hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
32406d8c3ce63b2c5e3474b9d13859c34525b0e0
| 135
|
py
|
Python
|
src/utils/rpg/db/__init__.py
|
sakura-no-hana/hanalon-bot
|
528217a06e4113000903caa31141dae6e07b4979
|
[
"MIT"
] | null | null | null |
src/utils/rpg/db/__init__.py
|
sakura-no-hana/hanalon-bot
|
528217a06e4113000903caa31141dae6e07b4979
|
[
"MIT"
] | 7
|
2021-04-17T00:16:32.000Z
|
2021-05-13T22:49:43.000Z
|
src/utils/rpg/db/__init__.py
|
sakura-no-hana/hanalon-bot
|
528217a06e4113000903caa31141dae6e07b4979
|
[
"MIT"
] | 2
|
2021-04-18T21:49:52.000Z
|
2021-05-18T14:34:35.000Z
|
from utils.rpg.db.character import *
from utils.rpg.db.clan import *
from utils.rpg.db.money import *
from utils.rpg.db.party import *
| 27
| 36
| 0.762963
| 24
| 135
| 4.291667
| 0.375
| 0.349515
| 0.466019
| 0.543689
| 0.582524
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.118519
| 135
| 4
| 37
| 33.75
| 0.865546
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
328d98d74a32939d01846f23d12d69e2360628ba
| 113
|
py
|
Python
|
app/core/__init__.py
|
husv/JustList
|
57598b379e4f7416ae65751d13ac2b6298cbd18c
|
[
"MIT"
] | 1
|
2020-12-24T08:48:17.000Z
|
2020-12-24T08:48:17.000Z
|
app/core/__init__.py
|
husv/JustList
|
57598b379e4f7416ae65751d13ac2b6298cbd18c
|
[
"MIT"
] | null | null | null |
app/core/__init__.py
|
husv/JustList
|
57598b379e4f7416ae65751d13ac2b6298cbd18c
|
[
"MIT"
] | null | null | null |
from .args.main import *
from .cache.main import *
# from .onedrive.main import *
# from .cloud189.main import *
| 22.6
| 30
| 0.716814
| 16
| 113
| 5.0625
| 0.4375
| 0.493827
| 0.518519
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.031579
| 0.159292
| 113
| 4
| 31
| 28.25
| 0.821053
| 0.504425
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
3ebe3520595cac5d3dc8cb48376b87d24b7b5f82
| 3,158
|
py
|
Python
|
tripaware_2017/Uncleared Outputs Notebooks/stats_functions.py
|
jesbu1/e-mission-eval
|
f8068201bbaf87c53def5a860af165dcf0b603d3
|
[
"BSD-3-Clause"
] | null | null | null |
tripaware_2017/Uncleared Outputs Notebooks/stats_functions.py
|
jesbu1/e-mission-eval
|
f8068201bbaf87c53def5a860af165dcf0b603d3
|
[
"BSD-3-Clause"
] | 13
|
2020-06-27T03:41:07.000Z
|
2021-08-13T17:15:36.000Z
|
tripaware_2017/Uncleared Outputs Notebooks/stats_functions.py
|
corinne-hcr/e-mission-eval-private-data
|
3825bbcd36b431d0458bb9d0c6c671043861a32e
|
[
"BSD-3-Clause"
] | 4
|
2017-07-30T15:53:00.000Z
|
2018-07-03T06:01:20.000Z
|
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from math import sqrt
def perm_test(labels, response_vars, stat_func, n):
'''Labels: Series with two labels, Response_vars series in same order as labels
stat_func is a function that takes in two series and returns a statistic, n is permutation numnber'''
unique_label_counts = labels.value_counts()
label_0 = unique_label_counts.index[0]
label_1 = unique_label_counts.index[1]
label_0_count = unique_label_counts[0]
label_1_count = unique_label_counts[1]
vals_0 = response_vars[labels == label_0]
vals_1 = response_vars[labels == label_1]
observed_stat = stat_func(vals_0, vals_1)
sample_stats = np.array([])
ind = labels
for i in range(n):
sampler = np.random.permutation(label_0_count + label_1_count)
new_vals = response_vars.take(sampler).values
df = pd.DataFrame({'vals': new_vals}, index=ind)
vals_0 = df[df.index == label_0]['vals']
vals_1 = df[df.index == label_1]['vals']
stat = stat_func(vals_0, vals_1)
sample_stats = np.append(sample_stats, stat)
perm_mean = np.mean(sample_stats)
plt.hist(sample_stats)
plt.show()
if observed_stat > perm_mean:
return np.sum(sample_stats > observed_stat) / len(sample_stats)
return np.sum(sample_stats < observed_stat) / len(sample_stats)
def mean_diff(vals_0, vals_1):
return np.mean(vals_0) - np.mean(vals_1)
# Same as permutation testing but sampling is with replacement.
# Also don't include iteration if SD's of both groups are 0.
def bootstrap_test(labels, response_vars, stat_func, n):
'''Labels: Series with two labels, Response_vars series in same order as labels
stat_func is a function that takes in two series and returns a statistic, n is permutation numnber'''
unique_label_counts = labels.value_counts()
label_0 = unique_label_counts.index[0]
label_1 = unique_label_counts.index[1]
label_0_count = unique_label_counts[0]
label_1_count = unique_label_counts[1]
vals_0 = response_vars[labels == label_0]
vals_1 = response_vars[labels == label_1]
observed_stat = stat_func(vals_0, vals_1)
sample_stats = np.array([])
ind = labels
for i in range(n):
sampler = np.random.choice(np.random.permutation(label_0_count + label_1_count), label_0_count + label_1_count)
new_vals = response_vars.take(sampler).values
df = pd.DataFrame({'vals': new_vals}, index=ind)
vals_0 = df[df.index == label_0]['vals']
vals_1 = df[df.index == label_1]['vals']
if np.std(vals_0) == 0 and np.std(vals_1) == 0:
continue
stat = stat_func(vals_0, vals_1)
sample_stats = np.append(sample_stats, stat)
perm_mean = np.mean(sample_stats)
plt.hist(sample_stats)
plt.show()
if observed_stat > perm_mean:
return np.sum(sample_stats > observed_stat) / len(sample_stats)
return np.sum(sample_stats < observed_stat) / len(sample_stats)
def print_error_percent(p, n):
print("p value: ", p)
print("error percent: {0}%".format(sqrt(p * (1-p) / n) * 2 * 100))
| 43.260274
| 119
| 0.693794
| 497
| 3,158
| 4.140845
| 0.193159
| 0.09621
| 0.082604
| 0.024295
| 0.831876
| 0.831876
| 0.831876
| 0.831876
| 0.831876
| 0.806608
| 0
| 0.024168
| 0.20076
| 3,158
| 73
| 120
| 43.260274
| 0.791204
| 0.149778
| 0
| 0.754098
| 0
| 0
| 0.019505
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.065574
| false
| 0
| 0.065574
| 0.016393
| 0.213115
| 0.04918
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
3ec5716a7e828c6c97a2d9764f981be55fba6df3
| 84
|
py
|
Python
|
gobigger/server/__init__.py
|
luanshaotong/GoBigger
|
00c347a89a660134677d633f39c39123c5ab3deb
|
[
"Apache-2.0"
] | 189
|
2021-10-08T07:55:10.000Z
|
2022-03-31T23:49:43.000Z
|
gobigger/server/__init__.py
|
luanshaotong/GoBigger
|
00c347a89a660134677d633f39c39123c5ab3deb
|
[
"Apache-2.0"
] | 25
|
2021-11-01T06:59:30.000Z
|
2022-03-22T11:22:27.000Z
|
gobigger/server/__init__.py
|
luanshaotong/GoBigger
|
00c347a89a660134677d633f39c39123c5ab3deb
|
[
"Apache-2.0"
] | 28
|
2021-10-14T12:23:14.000Z
|
2022-03-31T23:49:45.000Z
|
from .server import Server
from .server_default_config import server_default_config
| 28
| 56
| 0.880952
| 12
| 84
| 5.833333
| 0.416667
| 0.285714
| 0.542857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.095238
| 84
| 2
| 57
| 42
| 0.921053
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
f5b4da1faf335e37f5df5acb3cc3c42d3d5d2195
| 10,191
|
py
|
Python
|
unit_tests/test_tlslite_utils_aescbc.py
|
tomato42/tlslite-1
|
4631799cdfac8f90b567d455e698b05d7a917599
|
[
"Unlicense"
] | 121
|
2015-05-28T18:14:37.000Z
|
2020-11-18T11:23:59.000Z
|
unit_tests/test_tlslite_utils_aescbc.py
|
tomato42/tlslite-1
|
4631799cdfac8f90b567d455e698b05d7a917599
|
[
"Unlicense"
] | 340
|
2015-05-28T15:56:11.000Z
|
2020-11-04T11:40:45.000Z
|
unit_tests/test_tlslite_utils_aescbc.py
|
tomato42/tlslite-1
|
4631799cdfac8f90b567d455e698b05d7a917599
|
[
"Unlicense"
] | 60
|
2015-07-10T20:07:02.000Z
|
2020-10-22T08:04:20.000Z
|
# compatibility with Python 2.6, for that we need unittest2 package,
# which is not available on 3.3 or 3.4
try:
import unittest2 as unittest
except ImportError:
import unittest
from tlslite.utils.rijndael import Rijndael
from tlslite.utils.python_aes import Python_AES
class TestAESCBC(unittest.TestCase):
def test___init__(self):
key = bytearray(16)
aesCBC = Python_AES(key, 2, bytearray(b'\x00' * 16))
self.assertIsNotNone(aesCBC)
def test___init___with_invalid_key(self):
key = bytearray(8)
with self.assertRaises(AssertionError):
aesCBC = Python_AES(key, 2, bytearray(b'\x00' * 16))
def test___init___with_invalid_iv(self):
key = bytearray(16)
with self.assertRaises(AssertionError):
aesCBC = Python_AES(key, 2, bytearray(b'\x00' * 8))
def test_encrypt_with_test_vector_1(self):
key = bytearray(b'\x2b\x7e\x15\x16\x28\xae\xd2'
b'\xa6\xab\xf7\x15\x88\x09\xcf\x4f\x3c')
IV = bytearray(b'\x00\x01\x02\x03\x04\x05\x06\x07'
b'\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f')
plaintext = bytearray(b'\x6b\xc1\xbe\xe2\x2e\x40\x9f'
b'\x96\xe9\x3d\x7e\x11\x73\x93'
b'\x17\x2a\xae\x2d\x8a\x57\x1e'
b'\x03\xac\x9c\x9e\xb7\x6f\xac'
b'\x45\xaf\x8e\x51\x30\xc8\x1c'
b'\x46\xa3\x5c\xe4\x11\xe5\xfb'
b'\xc1\x19\x1a\x0a\x52\xef\xf6'
b'\x9f\x24\x45\xdf\x4f\x9b\x17'
b'\xad\x2b\x41\x7b\xe6\x6c\x37\x10')
ciphertext = bytearray(b'\x76\x49\xab\xac\x81\x19\xb2\x46'
b'\xce\xe9\x8e\x9b\x12\xe9\x19\x7d'
b'\x50\x86\xcb\x9b\x50\x72\x19\xee'
b'\x95\xdb\x11\x3a\x91\x76\x78\xb2'
b'\x73\xbe\xd6\xb8\xe3\xc1\x74\x3b'
b'\x71\x16\xe6\x9e\x22\x22\x95\x16'
b'\x3f\xf1\xca\xa1\x68\x1f\xac\x09'
b'\x12\x0e\xca\x30\x75\x86\xe1\xa7')
aesCBC = Python_AES(key, 2, IV)
self.assertEqual(aesCBC.encrypt(plaintext), ciphertext)
def test_decrypt_with_test_vector_1(self):
key = bytearray(b'\x2b\x7e\x15\x16\x28\xae\xd2'
b'\xa6\xab\xf7\x15\x88\x09\xcf\x4f\x3c')
IV = bytearray(b'\x00\x01\x02\x03\x04\x05\x06\x07'
b'\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f')
plaintext = bytearray(b'\x6b\xc1\xbe\xe2\x2e\x40\x9f'
b'\x96\xe9\x3d\x7e\x11\x73\x93'
b'\x17\x2a\xae\x2d\x8a\x57\x1e'
b'\x03\xac\x9c\x9e\xb7\x6f\xac'
b'\x45\xaf\x8e\x51\x30\xc8\x1c'
b'\x46\xa3\x5c\xe4\x11\xe5\xfb'
b'\xc1\x19\x1a\x0a\x52\xef\xf6'
b'\x9f\x24\x45\xdf\x4f\x9b\x17'
b'\xad\x2b\x41\x7b\xe6\x6c\x37\x10')
ciphertext = bytearray(b'\x76\x49\xab\xac\x81\x19\xb2\x46'
b'\xce\xe9\x8e\x9b\x12\xe9\x19\x7d'
b'\x50\x86\xcb\x9b\x50\x72\x19\xee'
b'\x95\xdb\x11\x3a\x91\x76\x78\xb2'
b'\x73\xbe\xd6\xb8\xe3\xc1\x74\x3b'
b'\x71\x16\xe6\x9e\x22\x22\x95\x16'
b'\x3f\xf1\xca\xa1\x68\x1f\xac\x09'
b'\x12\x0e\xca\x30\x75\x86\xe1\xa7')
aesCBC = Python_AES(key, 2, IV)
self.assertEqual(aesCBC.decrypt(ciphertext), plaintext)
def test_encrypt_with_test_vector_2(self):
key = bytearray(b'\x8e\x73\xb0\xf7\xda\x0e\x64\x52'
b'\xc8\x10\xf3\x2b\x80\x90\x79\xe5'
b'\x62\xf8\xea\xd2\x52\x2c\x6b\x7b')
IV = bytearray(b'\x00\x01\x02\x03\x04\x05\x06\x07'
b'\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f')
plaintext = bytearray(b'\x6b\xc1\xbe\xe2\x2e\x40\x9f'
b'\x96\xe9\x3d\x7e\x11\x73\x93'
b'\x17\x2a\xae\x2d\x8a\x57\x1e'
b'\x03\xac\x9c\x9e\xb7\x6f\xac'
b'\x45\xaf\x8e\x51\x30\xc8\x1c'
b'\x46\xa3\x5c\xe4\x11\xe5\xfb'
b'\xc1\x19\x1a\x0a\x52\xef\xf6'
b'\x9f\x24\x45\xdf\x4f\x9b\x17'
b'\xad\x2b\x41\x7b\xe6\x6c\x37\x10')
ciphertext = bytearray(b'\x4f\x02\x1d\xb2\x43\xbc\x63\x3d'
b'\x71\x78\x18\x3a\x9f\xa0\x71\xe8'
b'\xb4\xd9\xad\xa9\xad\x7d\xed\xf4'
b'\xe5\xe7\x38\x76\x3f\x69\x14\x5a'
b'\x57\x1b\x24\x20\x12\xfb\x7a\xe0'
b'\x7f\xa9\xba\xac\x3d\xf1\x02\xe0'
b'\x08\xb0\xe2\x79\x88\x59\x88\x81'
b'\xd9\x20\xa9\xe6\x4f\x56\x15\xcd')
aesCBC = Python_AES(key, 2, IV)
self.assertEqual(aesCBC.encrypt(plaintext), ciphertext)
def test_decrypt_with_test_vector_2(self):
key = bytearray(b'\x8e\x73\xb0\xf7\xda\x0e\x64\x52'
b'\xc8\x10\xf3\x2b\x80\x90\x79\xe5'
b'\x62\xf8\xea\xd2\x52\x2c\x6b\x7b')
IV = bytearray(b'\x00\x01\x02\x03\x04\x05\x06\x07'
b'\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f')
plaintext = bytearray(b'\x6b\xc1\xbe\xe2\x2e\x40\x9f'
b'\x96\xe9\x3d\x7e\x11\x73\x93'
b'\x17\x2a\xae\x2d\x8a\x57\x1e'
b'\x03\xac\x9c\x9e\xb7\x6f\xac'
b'\x45\xaf\x8e\x51\x30\xc8\x1c'
b'\x46\xa3\x5c\xe4\x11\xe5\xfb'
b'\xc1\x19\x1a\x0a\x52\xef\xf6'
b'\x9f\x24\x45\xdf\x4f\x9b\x17'
b'\xad\x2b\x41\x7b\xe6\x6c\x37\x10')
ciphertext = bytearray(b'\x4f\x02\x1d\xb2\x43\xbc\x63\x3d'
b'\x71\x78\x18\x3a\x9f\xa0\x71\xe8'
b'\xb4\xd9\xad\xa9\xad\x7d\xed\xf4'
b'\xe5\xe7\x38\x76\x3f\x69\x14\x5a'
b'\x57\x1b\x24\x20\x12\xfb\x7a\xe0'
b'\x7f\xa9\xba\xac\x3d\xf1\x02\xe0'
b'\x08\xb0\xe2\x79\x88\x59\x88\x81'
b'\xd9\x20\xa9\xe6\x4f\x56\x15\xcd')
aesCBC = Python_AES(key, 2, IV)
self.assertEqual(aesCBC.decrypt(ciphertext), plaintext)
def test_encrypt_with_test_vector_3(self):
key = bytearray(b'\x60\x3d\xeb\x10\x15\xca\x71\xbe'
b'\x2b\x73\xae\xf0\x85\x7d\x77\x81'
b'\x1f\x35\x2c\x07\x3b\x61\x08\xd7'
b'\x2d\x98\x10\xa3\x09\x14\xdf\xf4')
IV = bytearray(b'\x00\x01\x02\x03\x04\x05\x06\x07'
b'\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f')
plaintext = bytearray(b'\x6b\xc1\xbe\xe2\x2e\x40\x9f'
b'\x96\xe9\x3d\x7e\x11\x73\x93'
b'\x17\x2a\xae\x2d\x8a\x57\x1e'
b'\x03\xac\x9c\x9e\xb7\x6f\xac'
b'\x45\xaf\x8e\x51\x30\xc8\x1c'
b'\x46\xa3\x5c\xe4\x11\xe5\xfb'
b'\xc1\x19\x1a\x0a\x52\xef\xf6'
b'\x9f\x24\x45\xdf\x4f\x9b\x17'
b'\xad\x2b\x41\x7b\xe6\x6c\x37\x10')
ciphertext = bytearray(b'\xf5\x8c\x4c\x04\xd6\xe5\xf1\xba'
b'\x77\x9e\xab\xfb\x5f\x7b\xfb\xd6'
b'\x9c\xfc\x4e\x96\x7e\xdb\x80\x8d'
b'\x67\x9f\x77\x7b\xc6\x70\x2c\x7d'
b'\x39\xf2\x33\x69\xa9\xd9\xba\xcf'
b'\xa5\x30\xe2\x63\x04\x23\x14\x61'
b'\xb2\xeb\x05\xe2\xc3\x9b\xe9\xfc'
b'\xda\x6c\x19\x07\x8c\x6a\x9d\x1b')
aesCBC = Python_AES(key, 2, IV)
self.assertEqual(aesCBC.encrypt(plaintext), ciphertext)
def test_decrypt_with_test_vector_3(self):
key = bytearray(b'\x60\x3d\xeb\x10\x15\xca\x71\xbe'
b'\x2b\x73\xae\xf0\x85\x7d\x77\x81'
b'\x1f\x35\x2c\x07\x3b\x61\x08\xd7'
b'\x2d\x98\x10\xa3\x09\x14\xdf\xf4')
IV = bytearray(b'\x00\x01\x02\x03\x04\x05\x06\x07'
b'\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f')
plaintext = bytearray(b'\x6b\xc1\xbe\xe2\x2e\x40\x9f'
b'\x96\xe9\x3d\x7e\x11\x73\x93'
b'\x17\x2a\xae\x2d\x8a\x57\x1e'
b'\x03\xac\x9c\x9e\xb7\x6f\xac'
b'\x45\xaf\x8e\x51\x30\xc8\x1c'
b'\x46\xa3\x5c\xe4\x11\xe5\xfb'
b'\xc1\x19\x1a\x0a\x52\xef\xf6'
b'\x9f\x24\x45\xdf\x4f\x9b\x17'
b'\xad\x2b\x41\x7b\xe6\x6c\x37\x10')
ciphertext = bytearray(b'\xf5\x8c\x4c\x04\xd6\xe5\xf1\xba'
b'\x77\x9e\xab\xfb\x5f\x7b\xfb\xd6'
b'\x9c\xfc\x4e\x96\x7e\xdb\x80\x8d'
b'\x67\x9f\x77\x7b\xc6\x70\x2c\x7d'
b'\x39\xf2\x33\x69\xa9\xd9\xba\xcf'
b'\xa5\x30\xe2\x63\x04\x23\x14\x61'
b'\xb2\xeb\x05\xe2\xc3\x9b\xe9\xfc'
b'\xda\x6c\x19\x07\x8c\x6a\x9d\x1b')
aesCBC = Python_AES(key, 2, IV)
self.assertEqual(aesCBC.decrypt(ciphertext), plaintext)
| 47.180556
| 68
| 0.465116
| 1,421
| 10,191
| 3.292048
| 0.164673
| 0.057717
| 0.030782
| 0.03463
| 0.926464
| 0.917059
| 0.914066
| 0.914066
| 0.914066
| 0.90637
| 0
| 0.209979
| 0.386419
| 10,191
| 215
| 69
| 47.4
| 0.538142
| 0.010107
| 0
| 0.887574
| 0
| 0
| 0.400992
| 0.399802
| 0
| 0
| 0
| 0
| 0.053254
| 1
| 0.053254
| false
| 0
| 0.029586
| 0
| 0.088757
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
eb28863f51b7367ca4b919e46a5907f9bc69d342
| 3,580
|
py
|
Python
|
kernels/Matern_11half.py
|
yifanc96/HighDimPDEs-GPsolver
|
fbe557422954addca022197f655e1a9ff49c8026
|
[
"MIT"
] | 2
|
2021-10-16T21:18:32.000Z
|
2021-10-17T01:52:53.000Z
|
kernels/Matern_11half.py
|
yifanc96/HighDimPDEs-GPsolver
|
fbe557422954addca022197f655e1a9ff49c8026
|
[
"MIT"
] | null | null | null |
kernels/Matern_11half.py
|
yifanc96/HighDimPDEs-GPsolver
|
fbe557422954addca022197f655e1a9ff49c8026
|
[
"MIT"
] | null | null | null |
import jax.numpy as jnp
from jax import grad, jvp, hessian
eps = 0.0
def kappa(x,y,d,sigma):
dist = jnp.sqrt(jnp.sum((x-y)**2 + eps))
val = (945*sigma**5+945*jnp.sqrt(11)*sigma**4*dist+4620*sigma**3*dist**2+1155*jnp.sqrt(11)*sigma**2*dist**3+1815*sigma*dist**4+121*jnp.sqrt(11)*dist**5)/(945*sigma**5)*jnp.exp(-jnp.sqrt(11)*dist/sigma)
return val
def D_wy_kappa(x,y,d, sigma,w):
t = jnp.sqrt(jnp.sum((x-y)**2 + eps))
a = sigma
DF = -11*(105*a**4+105*jnp.sqrt(11)*a**3*t+495*a**2*t**2+110*jnp.sqrt(11)*a*t**3+121*t**4)*jnp.exp(-jnp.sqrt(11)*t/a)/(945*a**6)
val = -DF*sum((x-y)*w)
return val
def Delta_y_kappa(x,y,d,sigma):
t = jnp.sqrt(jnp.sum((x-y)**2 + eps))
a = sigma
D2F = -11*(105*d*a**5+105*d*jnp.sqrt(11)*a**4*t+165*(3*d-1)*a**3*t**2+55*jnp.sqrt(11)*(2*d-3)*a**2*t**3+121*(d-6)*a*t**4-121*jnp.sqrt(11)*t**5)/(945*a**7)*jnp.exp(-jnp.sqrt(11)*t/a)
val = D2F
return val
def D_wx_kappa(x,y,d, sigma,w):
t = jnp.sqrt(jnp.sum((x-y)**2 + eps))
a = sigma
DF = -11*(105*a**4+105*jnp.sqrt(11)*a**3*t+495*a**2*t**2+110*jnp.sqrt(11)*a*t**3+121*t**4)*jnp.exp(-jnp.sqrt(11)*t/a)/(945*a**6)
val = DF*sum((x-y)*w)
return val
# Dx vector
def D_x_kappa(x,y,d, sigma):
t = jnp.sqrt(jnp.sum((x-y)**2 + eps))
a = sigma
DF = -11*(105*a**4+105*jnp.sqrt(11)*a**3*t+495*a**2*t**2+110*jnp.sqrt(11)*a*t**3+121*t**4)*jnp.exp(-jnp.sqrt(11)*t/a)/(945*a**6)
val = DF*(x-y)
return val
def D_wx_D_wy_kappa(x,y,d,sigma,wx,wy):
t = jnp.sqrt(jnp.sum((x-y)**2 + eps))
a = sigma
DF = -11*(105*a**4+105*jnp.sqrt(11)*a**3*t+495*a**2*t**2+110*jnp.sqrt(11)*a*t**3+121*t**4)*jnp.exp(-jnp.sqrt(11)*t/a)/(945*a**6)
DDF = 121*jnp.exp(-jnp.sqrt(11)*t/a)*(15*a**3+15*jnp.sqrt(11)*a**2*t+66*a*t**2+11*jnp.sqrt(11)*t**3)/(945*a**7)
vec = x-y
val = sum(-wx*wy)*DF+sum(wx*vec)*sum(-wy*vec)*DDF
return val
# # DxDwy vector
def D_x_D_wy_kappa(x,y,d,sigma,wy):
t = jnp.sqrt(jnp.sum((x-y)**2 + eps))
a = sigma
DF = -11*(105*a**4+105*jnp.sqrt(11)*a**3*t+495*a**2*t**2+110*jnp.sqrt(11)*a*t**3+121*t**4)*jnp.exp(-jnp.sqrt(11)*t/a)/(945*a**6)
DDF = 121*jnp.exp(-jnp.sqrt(11)*t/a)*(15*a**3+15*jnp.sqrt(11)*a**2*t+66*a*t**2+11*jnp.sqrt(11)*t**3)/(945*a**7)
vec = x-y
val = -wy*DF + vec*sum(-wy*vec)*DDF
return val
def D_wx_Delta_y_kappa(x,y, d,sigma,w):
t = jnp.sqrt(jnp.sum((x-y)**2 + eps))
a = sigma
D3F = 121*jnp.exp(-jnp.sqrt(11)*t/a)*(15*a**4*(2+d)+15*jnp.sqrt(11)*a**3*(2+d)*t+33*a**2*(3+2*d)*t**2+11*jnp.sqrt(11)*a*(d-1)*t**3-121*t**4)/(945*a**8)
vec = x-y
val = D3F*sum(vec*w)
return val
# # Delta
def Delta_x_kappa(x,y,d,sigma):
t = jnp.sqrt(jnp.sum((x-y)**2 + eps))
a = sigma
D2F = -11*(105*d*a**5+105*d*jnp.sqrt(11)*a**4*t+165*(3*d-1)*a**3*t**2+55*jnp.sqrt(11)*(2*d-3)*a**2*t**3+121*(d-6)*a*t**4-121*jnp.sqrt(11)*t**5)/(945*a**7)*jnp.exp(-jnp.sqrt(11)*t/a)
val = D2F
return val
def Delta_x_D_wy_kappa(x,y, d,sigma,w):
t = jnp.sqrt(jnp.sum((x-y)**2 + eps))
a = sigma
D3F = 121*jnp.exp(-jnp.sqrt(11)*t/a)*(15*a**4*(2+d)+15*jnp.sqrt(11)*a**3*(2+d)*t+33*a**2*(3+2*d)*t**2+11*jnp.sqrt(11)*a*(d-1)*t**3-121*t**4)/(945*a**8)
vec = x-y
val = -D3F*sum(vec*w)
return val
def Delta_x_Delta_y_kappa(x,y,d,sigma):
t = jnp.sqrt(jnp.sum((x-y)**2 + eps))
a = sigma
D4F = 121*(15*d*(2+d)*a**5+15*d*(2+d)*jnp.sqrt(11)*a**4*t+66*(d**2+d-2)*a**3*t**2+11*jnp.sqrt(11)*a**2*(d**2-4*d-12)*t**3-121*(3+2*d)*a*t**4+121*jnp.sqrt(11)*t**5)/(945*a**9)*jnp.exp(-jnp.sqrt(11)*t/a)
val = D4F
return val
| 39.777778
| 205
| 0.551117
| 885
| 3,580
| 2.19548
| 0.070057
| 0.194545
| 0.199177
| 0.102934
| 0.849202
| 0.806485
| 0.806485
| 0.757591
| 0.728255
| 0.728255
| 0
| 0.156787
| 0.13771
| 3,580
| 89
| 206
| 40.224719
| 0.472627
| 0.00838
| 0
| 0.657534
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.150685
| false
| 0
| 0.027397
| 0
| 0.328767
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
eb3407b468847a360ac2e4134326f51a24d40d9a
| 188
|
py
|
Python
|
grr/test_lib/__init__.py
|
tsehori/grr
|
048506f22f74642bfe61749069a45ddf496fdab3
|
[
"Apache-2.0"
] | 1
|
2019-08-28T23:48:20.000Z
|
2019-08-28T23:48:20.000Z
|
grr/test_lib/__init__.py
|
tsehori/grr
|
048506f22f74642bfe61749069a45ddf496fdab3
|
[
"Apache-2.0"
] | 44
|
2021-05-14T22:49:24.000Z
|
2022-03-13T21:54:02.000Z
|
grr/test_lib/__init__.py
|
tsehori/grr
|
048506f22f74642bfe61749069a45ddf496fdab3
|
[
"Apache-2.0"
] | 2
|
2022-02-25T08:34:51.000Z
|
2022-03-16T17:29:44.000Z
|
#!/usr/bin/env python
"""A collection of utilities for writing test code."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
| 31.333333
| 54
| 0.819149
| 26
| 188
| 5.384615
| 0.730769
| 0.214286
| 0.342857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.117021
| 188
| 5
| 55
| 37.6
| 0.843373
| 0.367021
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
eb445ae14be7d18491b8e632e33c7c292ce9c648
| 329
|
py
|
Python
|
gql_schema_codegen/constants/__init__.py
|
sauldom102/gql_schema_codegen
|
f3bb813874760a8495e67c770d4622674fef0632
|
[
"MIT"
] | 2
|
2022-03-26T20:33:17.000Z
|
2022-03-26T23:15:17.000Z
|
gql_schema_codegen/constants/__init__.py
|
sauldom102/gql_schema_codegen
|
f3bb813874760a8495e67c770d4622674fef0632
|
[
"MIT"
] | null | null | null |
gql_schema_codegen/constants/__init__.py
|
sauldom102/gql_schema_codegen
|
f3bb813874760a8495e67c770d4622674fef0632
|
[
"MIT"
] | null | null | null |
from .constants import DIRECTIVE_PATTERN, DIRECTIVE_USAGE_PATTERN, SCALAR_PATTERN, UNION_PATTERN, VALUE_TYPES, BLOCK_PATTERN, FIELD_PATTERN, RESOLVER_TYPES
__all__ = ['DIRECTIVE_PATTERN', 'DIRECTIVE_USAGE_PATTERN', 'SCALAR_PATTERN',
'UNION_PATTERN', 'VALUE_TYPES', 'BLOCK_PATTERN', 'FIELD_PATTERN', 'RESOLVER_TYPES']
| 65.8
| 155
| 0.796353
| 38
| 329
| 6.315789
| 0.368421
| 0.133333
| 0.208333
| 0.25
| 0.908333
| 0.908333
| 0.908333
| 0.908333
| 0.908333
| 0.908333
| 0
| 0
| 0.100304
| 329
| 4
| 156
| 82.25
| 0.810811
| 0
| 0
| 0
| 0
| 0
| 0.358663
| 0.069909
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 10
|
de6329741ff7f2e3e7c536cb61f00ac41d9ee6cb
| 764
|
py
|
Python
|
logo.py
|
Szczurowsky/PythonRandomPasswordGenerator
|
786df12b4f797f324d8df7701a48806fce1475d5
|
[
"MIT"
] | null | null | null |
logo.py
|
Szczurowsky/PythonRandomPasswordGenerator
|
786df12b4f797f324d8df7701a48806fce1475d5
|
[
"MIT"
] | null | null | null |
logo.py
|
Szczurowsky/PythonRandomPasswordGenerator
|
786df12b4f797f324d8df7701a48806fce1475d5
|
[
"MIT"
] | null | null | null |
def generate_logo():
print('__________ __ .__ __________ ____ ___ __ .__.__ \n'
'\______ \___.__._/ |_| |__ ____ ____\______ \_____ ______ _____| | \_/ |_|__| | ______\n'
' | ___< | |\ __\ | \ / _ \ / \| ___/\__ \ / ___// ___/ | /\ __\ | | / ___/\n'
' | | \___ | | | | Y ( <_> ) | \ | / __ \_\___ \ \___ \| | / | | | | |__\___ \ \n'
' |____| / ____| |__| |___| /\____/|___| /____| (____ /____ >____ >______/ |__| |__|____/____ >\n'
' \/ \/ \/ \/ \/ \/ \/ \n\n\n')
| 95.5
| 127
| 0.282723
| 13
| 764
| 2.230769
| 0.461538
| 0.413793
| 0.413793
| 0.275862
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.528796
| 764
| 7
| 128
| 109.142857
| 0.080556
| 0
| 0
| 0
| 1
| 0.571429
| 0.86911
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| true
| 0
| 0
| 0
| 0.142857
| 0.142857
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 12
|
de84af9ac0a3f3a1282fd887c9f0fc92e5efb8c7
| 36,812
|
py
|
Python
|
virgmo/vi_sbm.py
|
mozzhorin/VIRGMo
|
d1cc25a35e6513c46ffd1f2797dfea6789f3075d
|
[
"MIT"
] | 2
|
2020-10-28T21:29:09.000Z
|
2020-10-29T08:08:28.000Z
|
virgmo/vi_sbm.py
|
mozzhorin/VIRGMo
|
d1cc25a35e6513c46ffd1f2797dfea6789f3075d
|
[
"MIT"
] | null | null | null |
virgmo/vi_sbm.py
|
mozzhorin/VIRGMo
|
d1cc25a35e6513c46ffd1f2797dfea6789f3075d
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Variational inference for stochastic block models.
"""
#import time
import itertools
#import #warnings
#import pickle
#import copy
#import numpy as np
#import matplotlib.pyplot as plt
#import networkx as nx
import torch
from torch.autograd import Variable
from torch.distributions.dirichlet import Dirichlet
from torch.distributions.beta import Beta
#from torch.distributions.normal import Normal
#from torch.utils.data import Dataset, DataLoader
from virgmo.utils import diriKL, gammaKL, normKL, softmax
from virgmo.vi_rg import VI_RG
class VI_SBM(VI_RG):
'''
Variational inference for stochastic block models.
PARAMETERS:
num_classes (int): number of classes K
num_nodes (int): number of nodes N
etas (torch.Tensor, size: K*N):
posterior class assignment probabilities of each node.
thetas (torch.Tensor, size: K):
posterior classes probabilities.
Bs (torch.Tensor, size: K*K*2):
posterior parameters of Beta probability disrtibutions of edges
between nodes of specific classes.
theta_p (torch.Tensor, size: K):
prior classes probabilities.
B_p (torch.Tensor, size: K*K*2):
prior parameters of Beta probability disrtibutions of edges
between nodes of specific classes.
EXAMPLE:
N = 75
p = torch.tensor([0.2, 0.3, 0.5])
b = torch.tensor([
[0.8, 0.1, 0.4],
[0.1, 0.9, 0.1],
[0.4, 0.1, 0.8]])
sbm = SBM(p, b)
gen_z, gen_A = sbm.generate(N)
sbm.show(sorted=True)
dataloader = DataLoader(EdgesDataset(gen_A),
batch_size=10, shuffle=True, num_workers=0)
vi = VI_SBM(num_nodes=N, num_classes=3)
vi.train(dataloader, epochs=10)
vi.summary(gen_A, gen_z)
'''
def __init__(self, num_classes=2, num_nodes=50,
priors={'theta_p':None,
'B_p':None},
init_values={'etas':None,
'thetas':None,
'Bs':None},
device=None):
''' Initialize VI_SBM model.
ARGUMENTS:
num_classes (int): number of classes K
num_nodes (int): number of nodes N
priors (dict of torch.float): priors
init_values (dict of torch.float): initial values of the variational
distribution's parameters.
'''
super(VI_SBM, self).__init__(num_nodes, priors,
init_values, device)
self.num_classes = num_classes
# Initialize parameters of variational distribution
self.params_reset()
# Initialize the priors fron the priors' dictionary or with
# default values
if priors['theta_p'] is None:
# Default flat prior for Dirichlet distribution
theta_p = torch.ones([self.num_classes])
else:
theta_p = priors['theta_p']
if priors['B_p'] is None:
# Default flat prior for Beta distribution
B_p = torch.ones([self.num_classes, self.num_classes, 2])
else:
B_p = priors['B_p']
self.theta_p = Variable(theta_p, requires_grad=False).to(self.device)
self.B_p = Variable(B_p, requires_grad=False).to(self.device)
def params_reset(self):
''' Reset parameters of the variational distribution from the
init_values dictionary or with random values.'''
if self.init_values['etas'] is None:
etas = torch.rand([self.num_classes, self.num_nodes]).to(self.device)
elif self.init_values['etas']=='SHORTEST_PATH':
# Dataloader can be not specified yet
try:
etas = self.etas_init().to(self.device)
print('Initialize etas with shortest path algorithm')
except:
etas = torch.rand([self.num_classes, self.num_nodes]).to(self.device)
else:
etas = self.init_values['etas'].to(self.device)
if self.init_values['thetas'] is None:
thetas = torch.rand([self.num_classes]).to(self.device)
else:
thetas = self.init_values['thetas'].to(self.device)
if self.init_values['Bs'] is None:
Bs = torch.rand([self.num_classes, self.num_classes, 2]).to(self.device)
else:
Bs = self.init_values['Bs'].to(self.device)
self.etas = torch.nn.Parameter(etas)
self.thetas = torch.nn.Parameter(thetas)
self.Bs = torch.nn.Parameter(Bs)
def constrained_params(self):
''' Return constrained posterior parameters. '''
return (softmax(self.etas),
torch.exp(self.thetas)+self.epsilon,
torch.exp(self.Bs))
def elbo(self, idx1, idx2, weights, debug=False):
''' Return evidence lower bound (ELBO) calculated for a nodes batch
of size L; also the loss for the training.
ARGUMENTS:
idx1 (torch.int, size: L): start nodes.
idx2 (torch.int, size: L): finish nodes.
weights (torch.float, size: L): edges weights.
'''
L = len(weights) # Batch size
eta_x, theta_x, B_x = self.constrained_params()
# Calculate and sum different parts of ELBO
elbo = - L / self.num_nodes**2 * diriKL(theta_x, self.theta_p)
elbo += - L / self.num_nodes**2 * diriKL(B_x, self.B_p).sum()
elbo += 1 / self.num_nodes * self.phi(idx1, eta_x, theta_x)
elbo += self.omega(B_x, eta_x, idx1, idx2, weights)
return elbo
def qmean(self):
''' Return mean values of posterior variational distributions.
'''
eta_x, theta_x, B_x = self.constrained_params()
thetas = Dirichlet(theta_x.data).mean
Bs = torch.zeros([self.num_classes, self.num_classes])
for i in range(self.num_classes):
for j in range(self.num_classes):
Bs[i,j] = Beta(B_x[i,j,0].data, B_x[i,j,1].data).mean
return eta_x.data, thetas, Bs
def summary(self, A, z=None):
''' Print the summary and plot the sorted adjacency matrix and
the loss for one fit.
ARGUMENTS:
A (torch.Tensor, size: N*N): adjacency matrix.
z (torch.Tensor, size: N*K): binary matrix indicating the true class
assignment for each data point.
'''
qmean = super(VI_SBM, self).summary(A,z)
print('Classes probability', qmean[1].numpy())
print('Edges probability:\n', qmean[2].numpy())
if len(qmean)>3:
return qmean
def class_accuracy(self, z, eta=None):
''' Return the best accuracy of nodes' class assignments for all
permutations of class' labels.
ARGUMENTS:
z (torch.Tensor, size: N*K): binary matrix indicating the true class
assignment for each data point.
eta (torch.Tensor, size: K*N):
posterior class assignment probabilities of each node.
'''
if eta is None:
eta = self.qmean()[0]
eta = eta.cpu()
z = z.cpu()
pred = eta.argmax(dim=0)
truth = z.argmax(dim=-1).float()
perm_list = list(itertools.permutations(range(self.num_classes)))
pred_modify = pred + self.num_classes
# Calculate all permutations predicted/true class names
perms = torch.empty(len(perm_list), len(truth))
for p in range(len(perm_list)):
tmp = pred_modify.clone()
for i in range(self.num_classes):
tmp = torch.where(tmp==(self.num_classes+i),
torch.tensor(perm_list[p][i]), tmp)
perms[p] = tmp.clone()
compare = torch.empty(len(perm_list), len(truth))
for p in range(len(perm_list)):
compare[p] = perms[p]==truth
# Choose the permutation with the highest accuracy rate
return compare.sum(dim=-1).div(len(truth)).max()
###############################################################################
class VI_DCSBM(VI_SBM):
'''
Variational inference for degree-corrected stochastic block models.
Parameters:
num_classes (int): number of classes K
num_nodes (int): number of nodes N
etas (torch.Tensor, size: K*N):
posterior class assignment probabilities of each node.
thetas (torch.Tensor, size: K):
posterior classes probabilities.
Bs (torch.Tensor, size: K*K*2):
posterior parameters of Beta probability disrtibutions corresponding to
probabilities of edges between nodes of specific classes.
deltas (torch.Tensor, size: N*2):
posterior parameters of Normal probability disrtibutions corresponding
to expected degree of each node.
theta_p (torch.Tensor, size: K):
prior classes probabilities.
B_p (torch.Tensor, size: K*K*2):
prior parameters of Beta probability disrtibutions corresponding to
probabilities of edges between nodes of specific classes.
delta_p (torch.Tensor, size: N*2):
prior parameters of Normal probability disrtibutions corresponding to
expected degree of each node.
EXAMPLE:
N = 75
p = torch.tensor([0.2, 0.3, 0.5])
b = torch.tensor([
[0.8, 0.1, 0.4],
[0.1, 0.9, 0.1],
[0.4, 0.1, 0.8]])
delta = torch.tensor([[0.,1.], [2.,1.], [-2.,4.]])
dcsbm = DCSBM(p, b, delta)
z, A = dcsbm.generate(N)
dcsbm.show(sorted=True)
dataloader = DataLoader(EdgesDataset(A),
batch_size=10, shuffle=True, num_workers=0)
vi = VI_DCSBM(num_nodes=N, num_classes=3)
vi.train(dataloader, epochs=10)
vi.summary(A,z)
'''
def __init__(self, num_classes=2, num_nodes=50, num_samples=10,
priors={'theta_p':None,
'B_p':None,
'delta_p':None},
init_values={'etas':None,
'thetas':None,
'Bs':None,
'deltas':None},
device=None):
''' Initialize VI_DCSBM model.
ARGUMENTS:
num_classes (int): number of classes K
num_nodes (int): number of nodes N
num_samples (int): number of samples to calculated the expectation of
ELBO's parts, when it cannot be done analyticaly.
priors (dict of torch.float): priors
init_values (dict of torch.float): initial values of the variational
distribution's parameters.
'''
super(VI_DCSBM, self).__init__(num_classes, num_nodes, priors,
init_values, device)
self.num_samples = num_samples
if priors['delta_p'] is None:
# Default standard normal prior
delta_p = torch.ones([self.num_nodes, 2])*torch.tensor([0.,1])
else:
delta_p = priors['delta_p']
self.delta_p = Variable(delta_p, requires_grad=False).to(self.device)
def params_reset(self):
''' Reset parameters of the variational distribution from the
init_values dictionary or with random values.
'''
super(VI_DCSBM, self).params_reset()
if self.init_values['deltas'] is None:
deltas = torch.rand([self.num_nodes, 2]).to(self.device)
else:
deltas = self.init_values['deltas'].to(self.device)
self.deltas = torch.nn.Parameter(deltas)
def constrained_params(self):
''' Returned constrained posterior parameters.'''
return (softmax(self.etas), torch.exp(self.thetas),
torch.exp(self.Bs), self.deltas)
def elbo(self, idx1, idx2, weights, debug=False):
''' Return evidence lower bound (ELBO) calculated for a nodes batch
of size L; also the loss for the training.
Arguments:
idx1 (torch.int, size: L): start nodes.
idx2 (torch.int, size: L): finish nodes.
weights (torch.float, size: L): edges weights.
'''
L = len(weights) # Batch size
eta_x, theta_x, B_x, delta_x = self.constrained_params()
elbo = - L / self.num_nodes**2 * diriKL(theta_x, self.theta_p)
elbo += - L / self.num_nodes**2 * diriKL(B_x, self.B_p).sum()
elbo += 1 / self.num_nodes * self.phi(idx1, eta_x, theta_x)
elbo += - 1 / self.num_nodes * normKL(delta_x, self.delta_p).sum()
elbo += self.omega_approx(B_x, eta_x, delta_x, idx1, idx2, weights)
return elbo
def qmean(self):
''' Return mean values of posterior variational distributions.
'''
eta_x, theta_x, B_x, delta_x = self.constrained_params()
thetas = Dirichlet(theta_x.data).mean
Bs = torch.zeros([self.num_classes, self.num_classes])
for i in range(self.num_classes):
for j in range(self.num_classes):
Bs[i,j] = Beta(B_x[i,j,0].data, B_x[i,j,1].data).mean
deltas = delta_x[:,0].data
return eta_x.data, thetas, Bs, deltas
def summary(self, A, z=None):
''' Print the summary and plot the sorted adjacency matrix and
the loss for one fit.
ARGUMENTS:
A (torch.Tensor, size: N*N): adjacency matrix.
z (torch.Tensor, size: N*K): binary matrix indicating the true class
assignment for each data point.
'''
qmean = super(VI_DCSBM, self).summary(A,z)
print('Expected degree:\n', qmean[3].numpy())
if len(qmean)>4:
return qmean
###############################################################################
class VI_WDCSBM(VI_DCSBM):
'''
Variational inference for weighted degree-corrected stochastic block models.
Parameters:
num_classes (int): number of classes K
num_nodes (int): number of nodes N
etas (torch.Tensor, size: K*N):
posterior class assignment probabilities of each node.
thetas (torch.Tensor, size: K):
posterior classes probabilities.
Bs (torch.Tensor, size: K*K*2):
posterior parameters of Beta probability disrtibutions corresponding to
probabilities of edges between nodes of specific classes.
deltas (torch.Tensor, size: N*2):
posterior parameters of Normal probability disrtibutions corresponding
to expected degree of each node.
mus (torch.Tensor, size: K*K*2):
posterior parameters of Normal probability disrtibutions corresponding
to mean of weights
taus (torch.Tensor, size: K*K*2):
posterior parameters of Gamma probability disrtibutions corresponding
to precision of weights
theta_p (torch.Tensor, size: K):
prior classes probabilities.
B_p (torch.Tensor, size: K*K*2):
prior parameters of Beta probability disrtibutions corresponding to
probabilities of edges between nodes of specific classes.
delta_p (torch.Tensor, size: N*2):
prior parameters of Normal probability disrtibutions corresponding to
expected degree of each node.
mu_p (torch.Tensor, size: K*K*2):
prior parameters of Normal probability disrtibutions corresponding
to mean of weights
tau_p (torch.Tensor, size: K*K*2):
prior parameters of Gamma probability disrtibutions corresponding
to precision of weights
EXAMPLE:
N = 75
p = torch.tensor([0.2, 0.3, 0.5])
b = torch.tensor([
[0.8, 0.1, 0.4],
[0.1, 0.9, 0.1],
[0.4, 0.1, 0.8]])
delta = torch.tensor([[0.,1.], [2.,1.], [-2.,4.]])
g_mu = torch.tensor([
[10., 5., 2.],
[5., 10., 2.],
[2., 2., 20.]])
g_tau = torch.ones([3,3])*2
model = WDCSBM(p, b, delta, g_mu.log(), g_tau)
z, A = model.generate(N, directed=True)
model.show(sorted=True)
dataloader = DataLoader(EdgesDataset(A),
batch_size=25, shuffle=True, num_workers=0)
vi = VI_WDCSBM(num_nodes=N, num_classes=3)
vi.train(dataloader, epochs=10)
vi.summary(A, z)
'''
def __init__(self, num_classes=2, num_nodes=50, num_samples=10,
priors={'theta_p':None,
'B_p':None,
'delta_p':None,
'mu_p':None,
'tau_p':None},
init_values={'etas':None,
'thetas':None,
'Bs':None,
'deltas':None,
'mus':None,
'taus':None},
device=None):
''' Initialize VI_WDCSBM model.
ARGUMENTS:
num_classes (int): number of classes K
num_nodes (int): number of nodes N
num_samples (int): number of samples to calculated the expectation of
ELBO's parts, when it cannot be done analyticaly.
priors (dict of torch.float): priors
init_values (dict of torch.float): initial values of the variational
distribution's parameters.
'''
super(VI_WDCSBM, self).__init__(num_classes, num_nodes, num_samples,
priors, init_values, device)
if priors['mu_p'] is None:
# Default normal prior
mu_p = torch.ones([self.num_classes, self.num_classes, 2])
else:
mu_p = priors['mu_p']
if priors['tau_p'] is None:
# Default normal prior
tau_p = torch.ones([self.num_classes, self.num_classes, 2])
else:
tau_p = priors['tau_p']
self.mu_p = Variable(mu_p, requires_grad=False).to(self.device)
self.tau_p = Variable(tau_p, requires_grad=False).to(self.device)
def params_reset(self):
''' Reset parameters of the variational distribution from the
init_values dictionary or with random values.
'''
super(VI_WDCSBM, self).params_reset()
if self.init_values['mus'] is None:
mus = torch.rand([self.num_classes, self.num_classes, 2]).to(self.device)
else:
mus = self.init_values['mus'].to(self.device)
if self.init_values['taus'] is None:
taus = torch.rand([self.num_classes, self.num_classes, 2]).to(self.device)
else:
taus = self.init_values['taus'].to(self.device)
self.mus = torch.nn.Parameter(mus)
self.taus = torch.nn.Parameter(taus)
def constrained_params(self):
''' Returned constrained posterior parameters.'''
return (softmax(self.etas), torch.exp(self.thetas)+self.epsilon,
torch.exp(self.Bs), self.deltas,
torch.exp(self.mus), torch.exp(self.taus))
def elbo(self, idx1, idx2, weights, debug=False):
''' Return evidence lower bound (ELBO) calculated for a nodes batch
of size L; also the loss for the training.
Arguments:
idx1 (torch.int, size: L): start nodes.
idx2 (torch.int, size: L): finish nodes.
data_values (torch.float, size: L): edges weights.
'''
L = len(weights) # Batch size
eta_x, theta_x, B_x, delta_x, mu_x, tau_x = self.constrained_params()
elbo = - L / self.num_nodes**2 * diriKL(theta_x, self.theta_p)
elbo += - L / self.num_nodes**2 * diriKL(B_x, self.B_p).sum()
elbo += 1 / self.num_nodes * self.phi(idx1, eta_x, theta_x)
elbo += - 1 / self.num_nodes * normKL(delta_x, self.delta_p).sum()
elbo += - L / self.num_nodes**2 * normKL(mu_x, self.mu_p).sum()
elbo += - L / self.num_nodes**2 * gammaKL(tau_x, self.tau_p).sum()
elbo += self.omega_approx(B_x, eta_x, delta_x, idx1, idx2, weights)
elbo += self.psi(eta_x, mu_x, tau_x, idx1, idx2, weights)
return elbo
def qmean(self):
''' Return mean values of posterior variational distributions.
'''
eta_x, theta_x, B_x, delta_x, mu_x, tau_x = self.constrained_params()
thetas = Dirichlet(theta_x.data).mean
Bs = torch.zeros([self.num_classes, self.num_classes])
for i in range(self.num_classes):
for j in range(self.num_classes):
Bs[i,j] = Beta(B_x[i,j,0].data, B_x[i,j,1].data).mean
deltas = delta_x[:,0].data
mus = mu_x[:,:,0].data
taus = tau_x.data.prod(dim=-1)
return eta_x.data, thetas, Bs, deltas, mus, taus
def summary(self, A, z=None):
''' Print the summary and plot the sorted adjacency matrix and
the loss for one fit.
ARGUMENTS:
A (torch.Tensor, size: N*N): adjacency matrix.
z (torch.Tensor, size: N*K): binary matrix indicating the true class
assignment for each data point.
'''
qmean = super(VI_WDCSBM, self).summary(A,z)
print('Expected mean weight:\n', qmean[4].numpy())
print('Expected precision weight:\n', qmean[5].numpy())
if len(qmean)>6:
return qmean
###############################################################################
class VI_WSBM(VI_SBM):
'''
Variational inference for weighted stochastic block models.
Parameters:
num_classes (int): number of classes K
num_nodes (int): number of nodes N
etas (torch.Tensor, size: K*N):
posterior class assignment probabilities of each node.
thetas (torch.Tensor, size: K):
posterior classes probabilities.
Bs (torch.Tensor, size: K*K*2):
posterior parameters of Beta probability disrtibutions corresponding to
probabilities of edges between nodes of specific classes.
mus (torch.Tensor, size: K*K*2):
posterior parameters of Normal probability disrtibutions corresponding
to mean of weights
taus (torch.Tensor, size: K*K*2):
posterior parameters of Gamma probability disrtibutions corresponding
to precision of weights
theta_p (torch.Tensor, size: K):
prior classes probabilities.
B_p (torch.Tensor, size: K*K*2):
prior parameters of Beta probability disrtibutions corresponding to
probabilities of edges between nodes of specific classes.
mu_p (torch.Tensor, size: K*K*2):
prior parameters of Normal probability disrtibutions corresponding
to mean of weights
tau_p (torch.Tensor, size: K*K*2):
prior parameters of Gamma probability disrtibutions corresponding
to precision of weights
EXAMPLE:
N = 75
p = torch.tensor([0.2, 0.3, 0.5])
b = torch.tensor([
[0.8, 0.1, 0.4],
[0.1, 0.9, 0.1],
[0.4, 0.1, 0.8]])
delta = torch.tensor([[100.,1.], [100.,1.], [100.,1.]])
g_mu = torch.tensor([
[10., 5., 2.],
[5., 10., 2.],
[2., 2., 20.]])
g_tau = torch.ones([3,3])*2
model = WDCSBM(p, b, delta, g_mu.log(), g_tau)
z, A = model.generate(N, directed=True)
model.show(sorted=True)
dataloader = DataLoader(EdgesDataset(A),
batch_size=25, shuffle=True, num_workers=0)
vi = VI_WSBM(num_nodes=N, num_classes=3)
vi.train(dataloader, epochs=10)
vi.summary(A, z)
'''
def __init__(self, num_classes=2, num_nodes=50,
priors={'theta_p':None,
'B_p':None,
'mu_p':None,
'tau_p':None},
init_values={'etas':None,
'thetas':None,
'Bs':None,
'mus':None,
'taus':None},
device=None):
''' Initialize VI_WSBM model.
ARGUMENTS:
num_classes (int): number of classes K
num_nodes (int): number of nodes N
num_samples (int): number of samples to calculated the expectation of
ELBO's parts, when it cannot be done analyticaly.
priors (dict of torch.float): priors
init_values (dict of torch.float): initial values of the variational
distribution's parameters.
'''
super(VI_WSBM, self).__init__(num_classes, num_nodes,
priors, init_values, device)
if priors['mu_p'] is None:
# Default normal prior
mu_p = torch.ones([self.num_classes, self.num_classes, 2])
else:
mu_p = priors['mu_p']
if priors['tau_p'] is None:
# Default normal prior
tau_p = torch.ones([self.num_classes, self.num_classes, 2])
else:
tau_p = priors['tau_p']
self.mu_p = Variable(mu_p, requires_grad=False).to(self.device)
self.tau_p = Variable(tau_p, requires_grad=False).to(self.device)
def params_reset(self):
''' Reset parameters of the variational distribution from the
init_values dictionary or with random values.
'''
super(VI_WSBM, self).params_reset()
if self.init_values['mus'] is None:
mus = torch.rand([self.num_classes, self.num_classes, 2]).to(self.device)
else:
mus = self.init_values['mus'].to(self.device)
if self.init_values['taus'] is None:
taus = torch.rand([self.num_classes, self.num_classes, 2]).to(self.device)
else:
taus = self.init_values['taus'].to(self.device)
self.mus = torch.nn.Parameter(mus)
self.taus = torch.nn.Parameter(taus)
def constrained_params(self):
''' Returned constrained posterior parameters.'''
return (softmax(self.etas), torch.exp(self.thetas)+self.epsilon,
torch.exp(self.Bs), torch.exp(self.mus), torch.exp(self.taus))
def elbo(self, idx1, idx2, weights, debug=False):
''' Return evidence lower bound (ELBO) calculated for a nodes batch
of size L; also the loss for the training.
Arguments:
idx1 (torch.int, size: L): start nodes.
idx2 (torch.int, size: L): finish nodes.
data_values (torch.float, size: L): edges weights.
'''
L = len(weights) # Batch size
eta_x, theta_x, B_x, mu_x, tau_x = self.constrained_params()
elbo = - L / self.num_nodes**2 * diriKL(theta_x, self.theta_p)
elbo += - L / self.num_nodes**2 * diriKL(B_x, self.B_p).sum()
elbo += 1 / self.num_nodes * self.phi(idx1, eta_x, theta_x)
elbo += - L / self.num_nodes**2 * normKL(mu_x, self.mu_p).sum()
elbo += - L / self.num_nodes**2 * gammaKL(tau_x, self.tau_p).sum()
elbo += self.omega(B_x, eta_x, idx1, idx2, weights)
elbo += self.psi(eta_x, mu_x, tau_x, idx1, idx2, weights)
return elbo
def qmean(self):
''' Return mean values of posterior variational distributions.
'''
eta_x, theta_x, B_x, mu_x, tau_x = self.constrained_params()
thetas = Dirichlet(theta_x.data).mean
Bs = torch.zeros([self.num_classes, self.num_classes])
for i in range(self.num_classes):
for j in range(self.num_classes):
Bs[i,j] = Beta(B_x[i,j,0].data, B_x[i,j,1].data).mean
mus = mu_x[:,:,0].data
taus = tau_x.data.prod(dim=-1)
return eta_x.data, thetas, Bs, mus, taus
def summary(self, A, z=None):
''' Print the summary and plot the sorted adjacency matrix and
the loss for one fit.
ARGUMENTS:
A (torch.Tensor, size: N*N): adjacency matrix.
z (torch.Tensor, size: N*K): binary matrix indicating the true class
assignment for each data point.
'''
qmean = super(VI_WSBM, self).summary(A,z)
print('Expected mean weight:\n', qmean[3].numpy())
print('Expected precision weight:\n', qmean[4].numpy())
if len(qmean)>5:
return qmean
###############################################################################
class VI_WCRG(VI_RG):
'''
Variational inference for weighted stochastic block models for complete graphs.
Parameters:
num_classes (int): number of classes K
num_nodes (int): number of nodes N
etas (torch.Tensor, size: K*N):
posterior class assignment probabilities of each node.
thetas (torch.Tensor, size: K):
posterior classes probabilities.
Bs (torch.Tensor, size: K*K*2):
posterior parameters of Beta probability disrtibutions corresponding to
probabilities of edges between nodes of specific classes.
mus (torch.Tensor, size: K*K*2):
posterior parameters of Normal probability disrtibutions corresponding
to mean of weights
taus (torch.Tensor, size: K*K*2):
posterior parameters of Gamma probability disrtibutions corresponding
to precision of weights
theta_p (torch.Tensor, size: K):
prior classes probabilities.
B_p (torch.Tensor, size: K*K*2):
prior parameters of Beta probability disrtibutions corresponding to
probabilities of edges between nodes of specific classes.
mu_p (torch.Tensor, size: K*K*2):
prior parameters of Normal probability disrtibutions corresponding
to mean of weights
tau_p (torch.Tensor, size: K*K*2):
prior parameters of Gamma probability disrtibutions corresponding
to precision of weights
EXAMPLE:
N = 75
p = torch.tensor([0.2, 0.3, 0.5])
g_mu = torch.tensor([
[10., 5., 2.],
[5., 50., 2.],
[2., 2., 20.]])
g_tau = torch.ones([3,3])*2
model = WCRG(p, g_mu.log(), g_tau)
z, A = model.generate(N, directed=True)
model.show(sorted=True)
dataloader = DataLoader(EdgesDataset(A),
batch_size=25, shuffle=True, num_workers=0)
vi = VI_WCRG(num_nodes=N, num_classes=3)
vi.train(dataloader, epochs=10)
vi.summary(A, z)
'''
def __init__(self, num_classes=2, num_nodes=50,
priors={'theta_p':None,
'mu_p':None,
'tau_p':None},
init_values={'etas':None,
'thetas':None,
'mus':None,
'taus':None},
device=None):
''' Initialize VI_WCRG model.
ARGUMENTS:
num_classes (int): number of classes K
num_nodes (int): number of nodes N
num_samples (int): number of samples to calculated the expectation of
ELBO's parts, when it cannot be done analyticaly.
priors (dict of torch.float): priors
init_values (dict of torch.float): initial values of the variational
distribution's parameters.
'''
super(VI_WCRG, self).__init__(num_nodes, priors, init_values, device)
self.num_classes = num_classes
self.params_reset()
if priors['theta_p'] is None:
# Default flat prior for Dirichlet distribution
theta_p = torch.ones([self.num_classes])
else:
theta_p = priors['theta_p']
if priors['mu_p'] is None:
# Default normal prior
mu_p = torch.ones([self.num_classes, self.num_classes, 2])
else:
mu_p = priors['mu_p']
if priors['tau_p'] is None:
# Default normal prior
tau_p = torch.ones([self.num_classes, self.num_classes, 2])
else:
tau_p = priors['tau_p']
self.theta_p = Variable(theta_p, requires_grad=False).to(self.device)
self.mu_p = Variable(mu_p, requires_grad=False).to(self.device)
self.tau_p = Variable(tau_p, requires_grad=False).to(self.device)
def params_reset(self):
''' Reset parameters of the variational distribution from the
init_values dictionary or with random values.
'''
if self.init_values['etas'] is None:
etas = torch.rand([self.num_classes, self.num_nodes]).to(self.device)
elif self.init_values['etas']=='SHORTEST_PATH':
# Dataloader can be not specified yet
try:
etas = self.etas_init().to(self.device)
print('Initialize etas with shortest path algorithm')
except:
etas = torch.rand([self.num_classes, self.num_nodes]).to(self.device)
else:
etas = self.init_values['etas'].to(self.device)
if self.init_values['thetas'] is None:
thetas = torch.rand([self.num_classes]).to(self.device)
else:
thetas = self.init_values['thetas'].to(self.device)
if self.init_values['mus'] is None:
mus = torch.rand([self.num_classes, self.num_classes, 2]).to(self.device)
else:
mus = self.init_values['mus'].to(self.device)
if self.init_values['taus'] is None:
taus = torch.rand([self.num_classes, self.num_classes, 2]).to(self.device)
else:
taus = self.init_values['taus'].to(self.device)
self.etas = torch.nn.Parameter(etas)
self.thetas = torch.nn.Parameter(thetas)
self.mus = torch.nn.Parameter(mus)
self.taus = torch.nn.Parameter(taus)
def constrained_params(self):
''' Returned constrained posterior parameters.'''
return (softmax(self.etas), torch.exp(self.thetas)+self.epsilon,
torch.exp(self.mus), torch.exp(self.taus))
def elbo(self, idx1, idx2, weights, debug=False):
''' Return evidence lower bound (ELBO) calculated for a nodes batch
of size L; also the loss for the training.
Arguments:
idx1 (torch.int, size: L): start nodes.
idx2 (torch.int, size: L): finish nodes.
data_values (torch.float, size: L): edges weights.
'''
L = len(weights) # Batch size
eta_x, theta_x, mu_x, tau_x = self.constrained_params()
elbo = - L / self.num_nodes**2 * diriKL(theta_x, self.theta_p)
if debug: print('D_KL(theta)>>', str(elbo))
elbo += - L / self.num_nodes**2 * normKL(mu_x, self.mu_p).sum()
if debug: print('+D_KL(mu) >>', str(elbo))
elbo += - L / self.num_nodes**2 * gammaKL(tau_x, self.tau_p).sum()
if debug: print('+D_KL(tau) >>', str(elbo))
elbo += 1 / self.num_nodes * self.phi(idx1, eta_x, theta_x)
if debug: print('+Phi >>', str(elbo))
elbo += self.psi(eta_x, mu_x, tau_x, idx1, idx2, weights)
if debug: print('+Psi >>', str(elbo))
return elbo
def qmean(self):
''' Return mean values of posterior variational distributions.
'''
eta_x, theta_x, mu_x, tau_x = self.constrained_params()
thetas = Dirichlet(theta_x.data).mean
mus = mu_x[:,:,0].data
taus = tau_x.data.prod(dim=-1)
return eta_x.data, thetas, mus, taus
def summary(self, A, z=None):
''' Print the summary and plot the sorted adjacency matrix and
the loss for one fit.
ARGUMENTS:
A (torch.Tensor, size: N*N): adjacency matrix.
z (torch.Tensor, size: N*K): binary matrix indicating the true class
assignment for each data point.
'''
qmean = super(VI_WCRG, self).summary(A,z)
print('Classes probability', qmean[1].numpy())
print('Expected mean weight:\n', qmean[2].numpy())
print('Expected precision weight:\n', qmean[3].numpy())
if len(qmean)>4:
return qmean
| 40.231694
| 87
| 0.56525
| 4,718
| 36,812
| 4.288046
| 0.055108
| 0.031832
| 0.043596
| 0.030053
| 0.904206
| 0.897187
| 0.885077
| 0.868074
| 0.866097
| 0.857298
| 0
| 0.014753
| 0.31688
| 36,812
| 914
| 88
| 40.275711
| 0.789756
| 0.407476
| 0
| 0.748649
| 0
| 0
| 0.041915
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083784
| false
| 0
| 0.018919
| 0
| 0.172973
| 0.045946
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ded347677c3809c6d9e545beecd0ff3b58075f4f
| 141
|
py
|
Python
|
python-package/ssm/src/ssm/__init__.py
|
kkholst/target
|
a63f3121efeae2c3441d7d2d2261fdf85038868e
|
[
"Apache-2.0"
] | 1
|
2021-09-17T19:01:21.000Z
|
2021-09-17T19:01:21.000Z
|
python-package/ssm/src/ssm/__init__.py
|
kkholst/target
|
a63f3121efeae2c3441d7d2d2261fdf85038868e
|
[
"Apache-2.0"
] | null | null | null |
python-package/ssm/src/ssm/__init__.py
|
kkholst/target
|
a63f3121efeae2c3441d7d2d2261fdf85038868e
|
[
"Apache-2.0"
] | null | null | null |
from .ssm import Kalman # noqa: F401
from .__about__ import __version__ # noqa: F401
from .__ssm_c__ import CPP_Kalman # noqa: F401,E999
| 28.2
| 53
| 0.751773
| 21
| 141
| 4.380952
| 0.52381
| 0.26087
| 0.304348
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.103448
| 0.177305
| 141
| 4
| 54
| 35.25
| 0.689655
| 0.262411
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
724747d91a2bdd1a359f0582c9539099a8cba832
| 140
|
py
|
Python
|
metatron/__init__.py
|
harel/metatron
|
e8d82aa3e000c0682d215396112900ede54a96d6
|
[
"MIT"
] | 4
|
2018-02-16T10:54:45.000Z
|
2021-07-12T20:41:48.000Z
|
metatron/__init__.py
|
harel/metatron
|
e8d82aa3e000c0682d215396112900ede54a96d6
|
[
"MIT"
] | 1
|
2020-10-03T15:23:37.000Z
|
2020-10-03T15:23:37.000Z
|
metatron/__init__.py
|
harel/metatron
|
e8d82aa3e000c0682d215396112900ede54a96d6
|
[
"MIT"
] | null | null | null |
from .metatron import Metatron, add_schema_spec
from .version import __version__
__all__ = ['Metatron', 'add_schema_spec', '__version__']
| 23.333333
| 56
| 0.785714
| 17
| 140
| 5.529412
| 0.470588
| 0.234043
| 0.361702
| 0.446809
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.114286
| 140
| 5
| 57
| 28
| 0.758065
| 0
| 0
| 0
| 0
| 0
| 0.242857
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
a0dec97f14a784a3b828de5abd6ba7ae84e8afc9
| 180
|
py
|
Python
|
bot/utils/filters.py
|
TrixiS/base-tg-bot
|
f795724d670eca06ea1436264f7960e5fef6a2eb
|
[
"MIT"
] | null | null | null |
bot/utils/filters.py
|
TrixiS/base-tg-bot
|
f795724d670eca06ea1436264f7960e5fef6a2eb
|
[
"MIT"
] | null | null | null |
bot/utils/filters.py
|
TrixiS/base-tg-bot
|
f795724d670eca06ea1436264f7960e5fef6a2eb
|
[
"MIT"
] | null | null | null |
from aiogram import types
from bot.bot import bot
def is_admin(message: types.Message):
return message.from_user and message.from_user.username == bot.config.admin_username
| 22.5
| 88
| 0.794444
| 28
| 180
| 4.964286
| 0.5
| 0.158273
| 0.215827
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.133333
| 180
| 7
| 89
| 25.714286
| 0.891026
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.5
| 0.25
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
19c90650913d611fd65187a25c35a63941b38419
| 196
|
py
|
Python
|
tests/molecule_generation/generation/test_lignin.py
|
ZimmermanGroup/conformer-rl
|
beb98cbee6ba6efba686d7c6eebbf33fd737f279
|
[
"MIT"
] | 9
|
2021-09-03T18:46:46.000Z
|
2022-03-22T05:47:20.000Z
|
tests/molecule_generation/generation/test_lignin.py
|
ZimmermanGroup/conformer-rl
|
beb98cbee6ba6efba686d7c6eebbf33fd737f279
|
[
"MIT"
] | 4
|
2021-07-15T03:57:26.000Z
|
2021-08-03T06:27:28.000Z
|
tests/molecule_generation/generation/test_lignin.py
|
ZimmermanGroup/conformer-rl
|
beb98cbee6ba6efba686d7c6eebbf33fd737f279
|
[
"MIT"
] | 1
|
2022-03-17T01:59:36.000Z
|
2022-03-17T01:59:36.000Z
|
from conformer_rl.molecule_generation.generation.generate_lignin import generate_lignin
# additional testing done in jupyter notebook
def test_lignin(mocker):
mol = generate_lignin(5)
| 21.777778
| 87
| 0.806122
| 25
| 196
| 6.08
| 0.76
| 0.276316
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005952
| 0.142857
| 196
| 8
| 88
| 24.5
| 0.89881
| 0.219388
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
19e9598655f7f38277d1469c361ddd1b07135e39
| 96
|
py
|
Python
|
ronald_bdl/__init__.py
|
ronaldseoh/ronald_bdl
|
9485d2ac4ffa18684d279bd2cdffd24888719f5f
|
[
"MIT"
] | 2
|
2020-08-19T14:12:55.000Z
|
2020-10-19T05:35:25.000Z
|
ronald_bdl/__init__.py
|
ronaldseoh/ronald_bdl
|
9485d2ac4ffa18684d279bd2cdffd24888719f5f
|
[
"MIT"
] | 1
|
2020-09-25T15:37:52.000Z
|
2020-10-22T22:23:55.000Z
|
ronald_bdl/__init__.py
|
ronaldseoh/ronald_bdl
|
9485d2ac4ffa18684d279bd2cdffd24888719f5f
|
[
"MIT"
] | 1
|
2022-03-18T17:13:29.000Z
|
2022-03-18T17:13:29.000Z
|
__all__ = ['models', 'datasets']
from ronald_bdl import models
from ronald_bdl import datasets
| 19.2
| 32
| 0.78125
| 13
| 96
| 5.307692
| 0.538462
| 0.289855
| 0.376812
| 0.550725
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.135417
| 96
| 4
| 33
| 24
| 0.831325
| 0
| 0
| 0
| 0
| 0
| 0.145833
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
c21e44a084215ac310bc204835316f1b51efef52
| 1,193
|
pyde
|
Python
|
listing_22/listing_22.pyde
|
tiranderel/2019-fall-polytech-cs
|
67f0482a0f143381f9b494a4348d6436ce8f8c1e
|
[
"MIT"
] | null | null | null |
listing_22/listing_22.pyde
|
tiranderel/2019-fall-polytech-cs
|
67f0482a0f143381f9b494a4348d6436ce8f8c1e
|
[
"MIT"
] | null | null | null |
listing_22/listing_22.pyde
|
tiranderel/2019-fall-polytech-cs
|
67f0482a0f143381f9b494a4348d6436ce8f8c1e
|
[
"MIT"
] | null | null | null |
def setup():
size(590, 750)
smooth()
background(255)
noStroke()
noLoop()
def draw():
i= range(0, 10)
for i in range (0, 10):
i = i+1
fill (i*20)
rect(i*40+50, 220, 35, 35)
for i in range (0, 10):
i = i+1
fill (200-i*20)
rect(i*40+50, 260, 35, 35)
for i in range (0, 10):
i = i+1
fill (i*20)
rect(i*40+50, 300, 35, 35)
for i in range (0, 10):
i = i+1
fill (200 - i*20)
rect(i*40+50, 340, 35, 35)
for i in range (0, 10):
i = i+1
fill (i*20)
rect(i*40+50, 380, 35, 35)
for i in range (0, 10):
i = i+1
fill (200-i*20)
rect(i*40+50, 420, 35, 35)
for i in range (0, 10):
i = i+1
fill (i*20)
rect(i*40+50, 460, 35, 35)
for i in range (0, 10):
i = i+1
fill (200-i*20)
rect(i*40+50, 500, 35, 35)
for i in range (0, 10):
i = i+1
fill (i*20)
rect(i*40+50, 540, 35, 35)
for i in range (0, 10):
i = i+1
fill (200-i*20)
rect(i*40+50, 580, 35, 35)
| 23.392157
| 35
| 0.404023
| 211
| 1,193
| 2.28436
| 0.165877
| 0.136929
| 0.182573
| 0.228216
| 0.790456
| 0.790456
| 0.790456
| 0.790456
| 0.790456
| 0.790456
| 0
| 0.29491
| 0.440067
| 1,193
| 50
| 36
| 23.86
| 0.426647
| 0
| 0
| 0.625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041667
| false
| 0
| 0
| 0
| 0.041667
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
c2291b4f8e52c49ce23bb3f39bfb0948fa638ffd
| 155
|
py
|
Python
|
code/archs/__init__.py
|
ThmCuong/IIC-tmp
|
7029050db45aaec0099dca67b31b613f1e5ecf10
|
[
"MIT"
] | null | null | null |
code/archs/__init__.py
|
ThmCuong/IIC-tmp
|
7029050db45aaec0099dca67b31b613f1e5ecf10
|
[
"MIT"
] | null | null | null |
code/archs/__init__.py
|
ThmCuong/IIC-tmp
|
7029050db45aaec0099dca67b31b613f1e5ecf10
|
[
"MIT"
] | null | null | null |
# from code.archs.cluster import *
# from code.archs.segmentation import *
# from code.archs.semisup import *
from . import cluster, segmentation, semisup
| 31
| 44
| 0.767742
| 20
| 155
| 5.95
| 0.35
| 0.201681
| 0.327731
| 0.319328
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.135484
| 155
| 4
| 45
| 38.75
| 0.88806
| 0.664516
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
dfa6a6e48a6f2ee44f6e2cbb14f26fe44941cec7
| 3,481
|
py
|
Python
|
functional_tests/translate/test_microsoft.py
|
Rested/multi-translate
|
565ef2ac7e8b5f94595cecc78b4076a3bc9be45e
|
[
"MIT"
] | 1
|
2021-08-22T14:43:11.000Z
|
2021-08-22T14:43:11.000Z
|
functional_tests/translate/test_microsoft.py
|
Rested/multi-translate
|
565ef2ac7e8b5f94595cecc78b4076a3bc9be45e
|
[
"MIT"
] | null | null | null |
functional_tests/translate/test_microsoft.py
|
Rested/multi-translate
|
565ef2ac7e8b5f94595cecc78b4076a3bc9be45e
|
[
"MIT"
] | null | null | null |
import httpx
from functional_tests.translate.test_translate import translate_url
def test_translate_microsoft_basic():
request_data = {
"from_language": "en",
"to_language": "es",
"source_text": "hello",
"preferred_engine": "microsoft",
"with_alignment": False,
}
resp = httpx.get(translate_url(), params=request_data)
assert resp.status_code == 200
result = resp.json()
assert result == {
"translated_text": "Hola",
"engine": "microsoft",
"engine_version": "3.0",
"from_language": "en",
"to_language": "es",
"source_text": "hello",
"detected_language_confidence": None,
"alignment": None,
}
def test_translate_microsoft_detection():
request_data = {
"to_language": "es",
"source_text": "hello",
"preferred_engine": "microsoft",
"with_alignment": False,
}
resp = httpx.get(translate_url(), params=request_data)
assert resp.status_code == 200
result = resp.json()
assert result == {
"translated_text": "Hola",
"engine": "microsoft",
"engine_version": "3.0",
"from_language": "en",
"to_language": "es",
"source_text": "hello",
"detected_language_confidence": 1.0,
"alignment": None,
}
def test_translate_microsoft_alignment():
request_data = {
"from_language": "en",
"to_language": "es",
"source_text": "hello",
"preferred_engine": "microsoft",
"with_alignment": True,
}
resp = httpx.get(translate_url(), params=request_data)
assert resp.status_code == 200
result = resp.json()
assert result == {
"translated_text": "Hola",
"engine": "microsoft",
"engine_version": "3.0",
"from_language": "en",
"to_language": "es",
"source_text": "hello",
"detected_language_confidence": None,
"alignment": [
{
"dest": {"end": "3", "start": "0", "text": "Hola"},
"src": {"end": "4", "start": "0", "text": "hello"},
}
],
}
def test_translate_microsoft_alignment_with_bad_pair():
request_data = {
"from_language": "es",
"to_language": "fr",
"source_text": "hola",
"preferred_engine": "microsoft",
"with_alignment": True,
}
resp = httpx.get(translate_url(), params=request_data)
assert resp.status_code == 400
result = resp.json()
assert result == {
"detail": "microsoft (3.0) engine does not support alignment between es and fr"
}
def test_falls_back_when_alignment_required_even_though_not_best():
request_data = {
"from_language": "en",
"to_language": "ko",
"source_text": "hello",
"fallback": True,
"with_alignment": True,
}
resp = httpx.get(translate_url(), params=request_data)
assert resp.status_code == 200
result = resp.json()
assert result == {
"translated_text": "안녕하세요",
"engine": "microsoft",
"engine_version": "3.0",
"from_language": "en",
"to_language": "ko",
"source_text": "hello",
"detected_language_confidence": None,
"alignment": [
{
"dest": {"end": "4", "start": "0", "text": "안녕하세요"},
"src": {"end": "4", "start": "0", "text": "hello"},
}
],
}
| 28.532787
| 87
| 0.552427
| 352
| 3,481
| 5.184659
| 0.198864
| 0.060274
| 0.065753
| 0.06137
| 0.814795
| 0.770959
| 0.734247
| 0.70411
| 0.70411
| 0.680548
| 0
| 0.014164
| 0.290147
| 3,481
| 121
| 88
| 28.768595
| 0.724403
| 0
| 0
| 0.715596
| 0
| 0
| 0.308245
| 0.032175
| 0
| 0
| 0
| 0
| 0.091743
| 1
| 0.045872
| false
| 0
| 0.018349
| 0
| 0.06422
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
dfb2804002e566ec24700ba7d81e31166d93a292
| 2,170
|
py
|
Python
|
api/models.py
|
RicardoLamb/TCC_SIGO_ms_Consultorias
|
274efc756ea59cc99bcef200cf9b3218439aab79
|
[
"MIT"
] | null | null | null |
api/models.py
|
RicardoLamb/TCC_SIGO_ms_Consultorias
|
274efc756ea59cc99bcef200cf9b3218439aab79
|
[
"MIT"
] | null | null | null |
api/models.py
|
RicardoLamb/TCC_SIGO_ms_Consultorias
|
274efc756ea59cc99bcef200cf9b3218439aab79
|
[
"MIT"
] | null | null | null |
# from django.db import models
# from django.contrib.auth.models import User
# from django.utils import timezone
# class Consultorias(models.Model):
# class ConsultoriasObjects(models.Manager):
# def get_queryset(self):
# return super().get_queryset().filter(modalidade='vigente')
# options = ('assessoria', 'Assessoria'), ('consultoria', 'Consultoria'), ('assessoria e consultoria', 'Assessoria e Consultoria')
# empresa = models.CharField(max_length=80)
# cnpj = models.CharField(max_length=14)
# fantasia = models.CharField(max_length=50)
# endereco = models.CharField(max_length=50)
# iniciodecontrato = models.CharField(max_length=20)
# vigenciadecontrato = models.CharField(max_length=20)
# modalidade = models.CharField(max_length=25, choices=options, default='assessoria')
# area = models.TextField(null=True)
# normas = models.TextField(null=True)
# objects = models.Manager()
# consultoriasobjects = ConsultoriasObjects()
# class Meta:
# ordering = ('-assessoria',)
# def __str__(self):
# return self.empresa
from django.db import models
from django.contrib.auth.models import User
from django.utils import timezone
class Consultorias(models.Model):
class ConsultoriasObjects(models.Manager):
def get_queryset(self):
return super().get_queryset().filter(modalidade='assessoria')
options = ('assessoria', 'Assessoria'), ('consultoria', 'Consultoria'), ('assessoria e consultoria', 'Assessoria e Consultoria'),
empresa = models.CharField(max_length=80)
cnpj = models.CharField(max_length=14)
fantasia = models.CharField(max_length=50)
endereco = models.CharField(max_length=50)
iniciodecontrato = models.CharField(max_length=20)
vigenciadecontrato = models.CharField(max_length=20)
modalidade = models.CharField(max_length=25, choices=options, default='assessoria')
area = models.TextField(null=True)
normas = models.TextField(null=True)
objects = models.Manager()
consultoriasobjects = ConsultoriasObjects()
class Meta:
ordering = ('-assessoria',)
def __str__(self):
return self.empresa
| 38.070175
| 134
| 0.71659
| 236
| 2,170
| 6.478814
| 0.224576
| 0.137345
| 0.164814
| 0.219751
| 0.988882
| 0.988882
| 0.988882
| 0.988882
| 0.988882
| 0.988882
| 0
| 0.015376
| 0.160829
| 2,170
| 57
| 135
| 38.070175
| 0.824272
| 0.484793
| 0
| 0
| 0
| 0
| 0.110502
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.086957
| false
| 0
| 0.130435
| 0.086957
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 8
|
dfba54bfa6ce0cfdf6ffa3e9c82aea0f4896d7b2
| 4,503
|
py
|
Python
|
Traitment_images/tests/test_sample.py
|
KenN7/KorreKthor
|
58511e1f500482fd127f6e178efaad32f3df05c1
|
[
"MIT"
] | null | null | null |
Traitment_images/tests/test_sample.py
|
KenN7/KorreKthor
|
58511e1f500482fd127f6e178efaad32f3df05c1
|
[
"MIT"
] | 3
|
2022-02-11T09:22:01.000Z
|
2022-02-22T20:16:37.000Z
|
Traitment_images/tests/test_sample.py
|
KenN7/KorreKthor
|
58511e1f500482fd127f6e178efaad32f3df05c1
|
[
"MIT"
] | null | null | null |
import main
import pprint
def test_compute():
res = main.compute("tests/82318c24-3f36-424f-9b29-c1e7e05acfe5.pdf", "82318c24-3f36-424f-9b29-c1e7e05acfe5")
pprint.pprint(res)
assert res == {
"zipFile": "82318c24-3f36-424f-9b29-c1e7e05acfe5.zip",
"data": [
{
"qrcode": {
"matricule": 20072,
"version": "A",
"lessonId": "82318c24-3f36-424f-9b29-c1e7e05acfe5",
},
"answers": [
[False, False, False, False, True, False],
[False, False, False, False, True, False],
[False, True, False, False, False, False],
[False, False, True, False, False, False],
[False, True, False, False, False, False],
[True, False, False, False, False, False],
[False, False, False, False, True, False],
[False, True, False, False, False, False],
[False, False, False, False, True, False],
[False, False, False, True, False, False],
[True, False, False, False, False, False],
[False, False, False, False, False, True],
[False, False, False, False, True, False],
[False, False, False, True, False, False],
[False, False, True, False, False, False],
[True, False, False, False, False, False],
[False, False, False, False, False, True],
[False, False, False, False, True, False],
[False, False, True, False, False, False],
[True, False, False, False, False, False],
[False, False, False, True, False],
[False, True, False, False, False],
[False, False, True, False, False],
[False, False, False, True, False],
[False, False, False, False, True],
[False, False, True, False, False],
[False, True, False, False, False],
[True, False, False, False, False],
[True, False, True, True, True],
],
"file": "1.png",
"error": "None",
},
{
"qrcode": {
"matricule": 20072,
"version": "A",
"lessonId": "82318c24-3f36-424f-9b29-c1e7e05acfe5",
},
"answers": [
[False, False, False, False, True, False],
[False, False, False, False, True, False],
[False, True, False, False, False, False],
[False, False, True, False, False, False],
[False, True, False, False, False, False],
[True, False, False, False, False, False],
[False, False, False, False, True, False],
[False, True, False, False, False, False],
[False, False, False, False, True, False],
[False, False, False, True, False, False],
[True, False, False, False, False, False],
[False, False, False, False, False, True],
[False, False, False, False, True, False],
[False, False, False, True, False, False],
[False, False, True, False, False, False],
[True, False, False, False, False, False],
[False, False, False, False, False, True],
[False, False, False, False, True, False],
[False, False, True, False, False, False],
[True, False, False, False, False, False],
[False, False, False, True, False],
[False, True, False, False, False],
[False, False, True, False, False],
[False, False, False, True, False],
[False, False, False, False, True],
[False, False, True, False, False],
[False, True, False, False, False],
[True, False, False, False, False],
[False, False, False, True, False],
],
"file": "2.png",
"error": "None",
},
],
}
| 48.419355
| 112
| 0.436598
| 399
| 4,503
| 4.924812
| 0.082707
| 1.063613
| 1.152672
| 1.048346
| 0.927735
| 0.87888
| 0.87888
| 0.87888
| 0.87888
| 0.8743
| 0
| 0.04581
| 0.432823
| 4,503
| 92
| 113
| 48.945652
| 0.723571
| 0
| 0
| 0.8
| 0
| 0
| 0.070398
| 0.043082
| 0
| 0
| 0
| 0
| 0.011111
| 1
| 0.011111
| false
| 0
| 0.022222
| 0
| 0.033333
| 0.022222
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
a03e949fcec67a3ac669f02f2c62f84ef444df0a
| 153
|
py
|
Python
|
RiskChangesDesktop/__init__.py
|
ashokdahal/RiskChangesDesktop
|
e2ccd0c72f9199312c2d4d6ba36efd77aeff524c
|
[
"MIT"
] | 2
|
2021-10-08T17:02:36.000Z
|
2022-01-04T17:50:45.000Z
|
RiskChangesDesktop/__init__.py
|
ashokdahal/RiskChangesDesktop
|
e2ccd0c72f9199312c2d4d6ba36efd77aeff524c
|
[
"MIT"
] | 1
|
2022-03-08T06:43:50.000Z
|
2022-03-08T06:45:35.000Z
|
RiskChangesDesktop/__init__.py
|
ashokdahal/RiskChangesDesktop
|
e2ccd0c72f9199312c2d4d6ba36efd77aeff524c
|
[
"MIT"
] | 1
|
2022-03-08T02:18:23.000Z
|
2022-03-08T02:18:23.000Z
|
from RiskChangesDesktop import Exposure
from RiskChangesDesktop import Loss
from RiskChangesDesktop import Risk
from RiskChangesDesktop import DataManage
| 38.25
| 41
| 0.901961
| 16
| 153
| 8.625
| 0.4375
| 0.637681
| 0.811594
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.098039
| 153
| 4
| 41
| 38.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
a0402f3738760db40d9bdd841cdb71e4a33efada
| 78,360
|
py
|
Python
|
tests/adapters/switches/juniper_mx_test.py
|
joseph2rs/netman
|
69181e71b63ccd1ddb1497e3e7049d420a54e73b
|
[
"Apache-2.0"
] | null | null | null |
tests/adapters/switches/juniper_mx_test.py
|
joseph2rs/netman
|
69181e71b63ccd1ddb1497e3e7049d420a54e73b
|
[
"Apache-2.0"
] | null | null | null |
tests/adapters/switches/juniper_mx_test.py
|
joseph2rs/netman
|
69181e71b63ccd1ddb1497e3e7049d420a54e73b
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2015 Internap.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import textwrap
import unittest
from flexmock import flexmock, flexmock_teardown
from hamcrest import assert_that, equal_to, instance_of, contains_string, has_length
from ncclient.operations import RPCError
from ncclient.xml_ import to_ele
from netaddr import IPAddress, IPNetwork
from netman.adapters.switches.juniper.base import Juniper
from netman.adapters.switches.juniper.mx import netconf
from netman.core.objects.access_groups import IN, OUT
from netman.core.objects.exceptions import VlanAlreadyExist, BadVlanNumber, BadVlanName, UnknownVlan, \
IPAlreadySet, UnknownIP, InterfaceInWrongPortMode, AccessVlanNotSet, UnknownInterface
from netman.core.objects.exceptions import VrrpDoesNotExistForVlan
from netman.core.objects.port_modes import ACCESS
from netman.core.objects.switch_descriptor import SwitchDescriptor
from netman.core.switch_factory import RealSwitchFactory
from tests.adapters.switches.juniper_test import an_ok_response, is_xml, a_configuration, an_rpc_response
def test_factory():
switch = RealSwitchFactory().get_switch_by_descriptor(
SwitchDescriptor(hostname='hostname', model='juniper_mx', username='username', password='password', port=22)
)
assert_that(switch, instance_of(Juniper))
assert_that(switch.switch_descriptor.hostname, equal_to("hostname"))
assert_that(switch.switch_descriptor.model, equal_to("juniper_mx"))
assert_that(switch.switch_descriptor.username, equal_to("username"))
assert_that(switch.switch_descriptor.password, equal_to("password"))
assert_that(switch.switch_descriptor.port, equal_to(22))
class JuniperMXTest(unittest.TestCase):
def setUp(self):
self.switch = netconf(SwitchDescriptor(model='juniper_mx', hostname="toto"))
self.netconf_mock = flexmock()
self.switch.netconf = self.netconf_mock
self.switch.in_transaction = True
def tearDown(self):
flexmock_teardown()
def test_add_vlan(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<bridge-domains />
</configuration>
</filter>
""")).and_return(a_configuration("""
<bridge-domains>
<domain>
<name>PATATE</name>
<vlan-id>900</vlan-id>
</domain>
</bridge-domains>
"""))
self.netconf_mock.should_receive("edit_config").once().with_args(target="candidate", config=is_xml("""
<config>
<configuration>
<bridge-domains>
<domain>
<name>VLAN1000</name>
<vlan-id>1000</vlan-id>
<description>Shizzle</description>
</domain>
</bridge-domains>
</configuration>
</config>
""")).and_return(an_ok_response())
self.switch.add_vlan(1000, name="Shizzle")
def test_add_vlan_already_in_use_raises(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<bridge-domains />
</configuration>
</filter>
""")).and_return(a_configuration("""
<bridge-domains>
<domain>
<name>PATATE</name>
<vlan-id>1000</vlan-id>
</domain>
</bridge-domains>
"""))
self.netconf_mock.should_receive("edit_config").never()
with self.assertRaises(VlanAlreadyExist) as expect:
self.switch.add_vlan(1000)
assert_that(str(expect.exception), contains_string("Vlan 1000 already exist"))
def test_add_existing_vlan_raises(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<bridge-domains />
</configuration>
</filter>
""")).and_return(a_configuration("""
<bridge-domains>
<domain>
<name>VLAN1000</name>
<vlan-id>1000</vlan-id>
</domain>
</bridge-domains>
"""))
self.netconf_mock.should_receive("edit_config").never()
with self.assertRaises(VlanAlreadyExist) as expect:
self.switch.add_vlan(1000)
assert_that(str(expect.exception), contains_string("Vlan 1000 already exist"))
def test_add_vlan_bad_vlan_id(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<bridge-domains />
</configuration>
</filter>
""")).and_return(a_configuration(""))
self.netconf_mock.should_receive("edit_config").once().with_args(target="candidate", config=is_xml("""
<config>
<configuration>
<bridge-domains>
<domain>
<name>VLAN9000</name>
<vlan-id>9000</vlan-id>
</domain>
</bridge-domains>
</configuration>
</config>
""")).and_raise(RPCError(to_ele(textwrap.dedent("""
<rpc-error xmlns="urn:ietf:params:xml:ns:netconf:base:1.0" xmlns:junos="http://xml.juniper.net/junos/15.1R4/junos" xmlns:nc="urn:ietf:params:xml:ns:netconf:base:1.0">>
<error-severity>error</error-severity>
<error-info>
<bad-element>domain</bad-element>
</error-info>
<error-message>Value 9000 is not within range (1..4094)</error-message>
</rpc-error>
"""))))
with self.assertRaises(BadVlanNumber) as expect:
self.switch.add_vlan(9000)
assert_that(str(expect.exception), equal_to("Vlan number is invalid"))
def test_add_vlan_empty_vlan_name(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<bridge-domains />
</configuration>
</filter>
""")).and_return(a_configuration(""))
self.netconf_mock.should_receive("edit_config").once().with_args(target="candidate", config=is_xml("""
<config>
<configuration>
<bridge-domains>
<domain>
<name>VLAN1000</name>
<vlan-id>1000</vlan-id>
<description></description>
</domain>
</bridge-domains>
</configuration>
</config>
""")).and_raise(RPCError(to_ele(textwrap.dedent("""
<rpc-error xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"
xmlns:junos="http://xml.juniper.net/junos/15.1R4/junos"
xmlns:nc="urn:ietf:params:xml:ns:netconf:base:1.0">
<error-type>protocol</error-type>
<error-tag>operation-failed</error-tag>
<error-severity>error</error-severity>
<error-message>description: '': Must be a string of 255 characters or less</error-message>
<error-info>
<bad-element>domain</bad-element>
</error-info>
</rpc-error>
"""))))
with self.assertRaises(BadVlanName) as expect:
self.switch.add_vlan(1000, "")
assert_that(str(expect.exception), equal_to("Vlan name is invalid"))
def test_add_vlan_too_long_vlan_name(self):
long_string = 'a' * 256
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<bridge-domains />
</configuration>
</filter>
""")).and_return(a_configuration(""))
self.netconf_mock.should_receive("edit_config").once().with_args(target="candidate", config=is_xml("""
<config>
<configuration>
<bridge-domains>
<domain>
<name>VLAN1000</name>
<vlan-id>1000</vlan-id>
<description>{}</description>
</domain>
</bridge-domains>
</configuration>
</config>
""".format(long_string))).and_raise(RPCError(to_ele(textwrap.dedent("""
<rpc-error xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"
xmlns:junos="http://xml.juniper.net/junos/15.1R4/junos"
xmlns:nc="urn:ietf:params:xml:ns:netconf:base:1.0">
<error-type>protocol</error-type>
<error-tag>operation-failed</error-tag>
<error-severity>error</error-severity>
<error-message>description: '{}': Must be a string of 255 characters or less</error-message>
<error-info>
<bad-element>domain</bad-element>
</error-info>
</rpc-error>
""".format(long_string)))))
with self.assertRaises(BadVlanName) as expect:
self.switch.add_vlan(1000, long_string)
assert_that(str(expect.exception), equal_to("Vlan name is invalid"))
def test_add_vlan_raises_RPCError(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<bridge-domains />
</configuration>
</filter>
""")).and_return(a_configuration(""))
self.netconf_mock.should_receive("edit_config").once().and_raise(RPCError(to_ele(textwrap.dedent("""
<rpc-error xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"
xmlns:junos="http://xml.juniper.net/junos/15.1R4/junos"
xmlns:nc="urn:ietf:params:xml:ns:netconf:base:1.0">
<error-type>protocol</error-type>
<error-tag>operation-failed</error-tag>
<error-severity>error</error-severity>
<error-message>There's another problem</error-message>
<error-info>
<bad-element>domain</bad-element>
</error-info>
</rpc-error>
"""))))
with self.assertRaises(RPCError):
self.switch.add_vlan(1000, 'a' * 256)
def test_remove_vlan_ignores_removing_interface_not_created(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<bridge-domains />
<interfaces />
</configuration>
</filter>
""")).and_return(a_configuration("""
<bridge-domains>
<domain>
<name>STANDARD</name>
<vlan-id>10</vlan-id>
</domain>
</bridge-domains>
"""))
self.netconf_mock.should_receive("edit_config").once().with_args(target="candidate", config=is_xml("""
<config>
<configuration>
<bridge-domains>
<domain operation="delete">
<name>STANDARD</name>
</domain>
</bridge-domains>
</configuration>
</config>
""")).and_return(an_ok_response())
self.switch.remove_vlan(10)
def test_remove_vlan_invalid_vlan_raises(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<bridge-domains />
<interfaces />
</configuration>
</filter>
""")).and_return(a_configuration("""
<bridge-domains>
<domain>
<name>ANOTHER</name>
<vlan-id>10</vlan-id>
</domain>
</bridge-domains>
"""))
with self.assertRaises(UnknownVlan) as expect:
self.switch.remove_vlan(20)
assert_that(str(expect.exception), equal_to("Vlan 20 not found"))
def test_get_vlans(self):
self.switch.in_transaction = False
self.netconf_mock.should_receive("get_config").with_args(source="running", filter=is_xml("""
<filter>
<configuration>
<bridge-domains />
<interfaces />
</configuration>
</filter>
""")).and_return(a_configuration("""
<bridge-domains>
<domain>
<name>STANDARD</name>
<vlan-id>10</vlan-id>
<description>my-description</description>
</domain>
<domain>
<name>NO-VLAN-ID</name>
<description>shizzle</description>
</domain>
<domain>
<name>WITH-IF</name>
<vlan-id>20</vlan-id>
<routing-interface>irb.20</routing-interface>
</domain>
<domain>
<name>WITH-IF-MULTI-IP</name>
<vlan-id>40</vlan-id>
<routing-interface>irb.70</routing-interface>
</domain>
</bridge-domains>
<interfaces>
<interface>
<name>xe-0/0/1</name>
<unit>
<name>0</name>
<family>
<bridge>
</bridge>
</family>
</unit>
</interface>
<interface>
<name>irb</name>
<unit>
<name>20</name>
<family>
<inet>
<address>
<name>1.1.1.1/24</name>
</address>
<filter>
<input>
<filter-name>AC-IN</filter-name>
</input>
<output>
<filter-name>AC-OUT</filter-name>
</output>
</filter>
</inet>
</family>
</unit>
<unit>
<name>40</name>
</unit>
<unit>
<name>70</name>
<family>
<inet>
<address>
<name>2.1.1.1/24</name>
</address>
<address>
<name>4.1.1.1/24</name>
</address>
<address>
<name>3.1.1.1/24</name>
</address>
</inet>
</family>
</unit>
</interface>
</interfaces>
"""))
vlan10, vlan20, vlan40 = self.switch.get_vlans()
assert_that(vlan10.number, equal_to(10))
assert_that(vlan10.name, equal_to("my-description"))
assert_that(vlan10.access_groups[IN], equal_to(None))
assert_that(vlan10.access_groups[OUT], equal_to(None))
assert_that(vlan10.ips, has_length(0))
assert_that(vlan20.number, equal_to(20))
assert_that(vlan20.name, equal_to(None))
assert_that(vlan20.access_groups[IN], equal_to("AC-IN"))
assert_that(vlan20.access_groups[OUT], equal_to("AC-OUT"))
assert_that(vlan20.ips, has_length(1))
vlan20ip1 = vlan20.ips[0]
assert_that(str(vlan20ip1.ip), equal_to("1.1.1.1"))
assert_that(vlan20ip1.prefixlen, equal_to(24))
assert_that(vlan40.number, equal_to(40))
assert_that(vlan40.name, equal_to(None))
assert_that(vlan40.access_groups[IN], equal_to(None))
assert_that(vlan40.access_groups[OUT], equal_to(None))
vlan40ip1, vlan40ip2, vlan40ip3 = vlan40.ips
assert_that(str(vlan40ip1.ip), equal_to("2.1.1.1"))
assert_that(vlan40ip1.prefixlen, equal_to(24))
assert_that(str(vlan40ip2.ip), equal_to("3.1.1.1"))
assert_that(vlan40ip2.prefixlen, equal_to(24))
assert_that(str(vlan40ip3.ip), equal_to("4.1.1.1"))
assert_that(vlan40ip3.prefixlen, equal_to(24))
def test_get_vlan_with_interface_multi_ip(self):
self.switch.in_transaction = False
self.netconf_mock.should_receive("get_config").with_args(source="running", filter=is_xml("""
<filter>
<configuration>
<bridge-domains />
<interfaces />
</configuration>
</filter>
""")).and_return(a_configuration("""
<bridge-domains>
<domain>
<name>This-another-clam</name>
<vlan-id>39</vlan-id>
<routing-interface>irb.20</routing-interface>
</domain>
<domain>
<name>WITH-IF-MULTI-IP</name>
<vlan-id>40</vlan-id>
<routing-interface>irb.70</routing-interface>
</domain>
<domain>
<name>This-yet-another-clam</name>
<vlan-id>41</vlan-id>
<routing-interface>irb.40</routing-interface>
</domain>
</bridge-domains>
<interfaces>
<interface>
<name>xe-0/0/1</name>
</interface>
<interface>
<name>irb</name>
<unit>
<name>20</name>
<family>
<inet>
<address>
<name>1.1.1.1/24</name>
</address>
<filter>
<input>
<filter-name>AC-IN</filter-name>
</input>
<output>
<filter-name>AC-OUT</filter-name>
</output>
</filter>
</inet>
</family>
</unit>
<unit>
<name>40</name>
</unit>
<unit>
<name>70</name>
<family>
<inet>
<address>
<name>2.1.1.1/24</name>
</address>
<address>
<name>4.1.1.1/24</name>
</address>
<address>
<name>3.1.1.1/24</name>
</address>
</inet>
</family>
</unit>
</interface>
</interfaces>
"""))
vlan = self.switch.get_vlan(40)
assert_that(vlan.number, equal_to(40))
assert_that(vlan.name, equal_to(None))
assert_that(vlan.access_groups[IN], equal_to(None))
assert_that(vlan.access_groups[OUT], equal_to(None))
vlanip1, vlanip2, vlanip3 = vlan.ips
assert_that(str(vlanip1.ip), equal_to("2.1.1.1"))
assert_that(vlanip1.prefixlen, equal_to(24))
assert_that(str(vlanip2.ip), equal_to("3.1.1.1"))
assert_that(vlanip2.prefixlen, equal_to(24))
assert_that(str(vlanip3.ip), equal_to("4.1.1.1"))
assert_that(vlanip3.prefixlen, equal_to(24))
def test_get_vlan_with_no_interface(self):
self.switch.in_transaction = False
self.netconf_mock.should_receive("get_config").with_args(source="running", filter=is_xml("""
<filter>
<configuration>
<bridge-domains />
<interfaces />
</configuration>
</filter>
""")).and_return(a_configuration("""
<bridge-domains>
<domain>
<name>STANDARD</name>
<vlan-id>10</vlan-id>
<description>my-description</description>
</domain>
</bridge-domains>
"""))
vlan = self.switch.get_vlan(10)
assert_that(vlan.number, equal_to(10))
assert_that(vlan.name, equal_to("my-description"))
assert_that(vlan.access_groups[IN], equal_to(None))
assert_that(vlan.access_groups[OUT], equal_to(None))
assert_that(vlan.ips, has_length(0))
def test_get_vlan_with_unknown_vlan(self):
self.switch.in_transaction = False
self.netconf_mock.should_receive("get_config").with_args(source="running", filter=is_xml("""
<filter>
<configuration>
<bridge-domains />
<interfaces />
</configuration>
</filter>
""")).and_return(a_configuration("""
<bridge-domains>
<domain>
<name>This-another-clam</name>
<vlan-id>39</vlan-id>
</domain>
</bridge-domains>
"""))
with self.assertRaises(UnknownVlan) as expect:
self.switch.get_vlan(10)
assert_that(str(expect.exception), equal_to("Vlan 10 not found"))
def test_get_vlan_with_interface(self):
self.switch.in_transaction = False
self.netconf_mock.should_receive("get_config").with_args(source="running", filter=is_xml("""
<filter>
<configuration>
<bridge-domains />
<interfaces />
</configuration>
</filter>
""")).and_return(a_configuration("""
<bridge-domains>
<domain>
<name>WITH-IF</name>
<vlan-id>20</vlan-id>
<routing-interface>irb.20</routing-interface>
</domain>
</bridge-domains>
<interfaces>
<interface>
<name>xe-0/0/1</name>
<unit>
<name>0</name>
<family>
<bridge>
</bridge>
</family>
</unit>
</interface>
<interface>
<name>xe-0/0/1</name>
</interface>
<interface>
<name>irb</name>
<unit>
<name>20</name>
<family>
<inet>
<address>
<name>1.1.1.1/24</name>
</address>
<filter>
<input>
<filter-name>AC-IN</filter-name>
</input>
<output>
<filter-name>AC-OUT</filter-name>
</output>
</filter>
</inet>
</family>
</unit>
<unit>
<name>40</name>
</unit>
<unit>
<name>70</name>
<family>
<inet>
<address>
<name>2.1.1.1/24</name>
</address>
<address>
<name>4.1.1.1/24</name>
</address>
<address>
<name>3.1.1.1/24</name>
</address>
</inet>
</family>
</unit>
</interface>
</interfaces>
"""))
vlan = self.switch.get_vlan(20)
assert_that(vlan.number, equal_to(20))
assert_that(vlan.name, equal_to(None))
assert_that(vlan.access_groups[IN], equal_to("AC-IN"))
assert_that(vlan.access_groups[OUT], equal_to("AC-OUT"))
assert_that(vlan.ips, has_length(1))
vlan20ip1 = vlan.ips[0]
assert_that(str(vlan20ip1.ip), equal_to("1.1.1.1"))
assert_that(vlan20ip1.prefixlen, equal_to(24))
def test_add_vrrp_success(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces>
<interface>
<name>irb</name>
<unit>
<name>1234</name>
</unit>
</interface>
</interfaces>
</configuration>
</filter>
""")).and_return(a_configuration("""
<interfaces>
<interface>
<name>irb</name>
<unit>
<name>1234</name>
<family>
<inet>
<address>
<name>3.3.3.2/27</name>
</address>
</inet>
</family>
</unit>
</interface>
</interfaces>
"""))
self.netconf_mock.should_receive("edit_config").once().with_args(target="candidate", config=is_xml("""
<config>
<configuration>
<interfaces>
<interface>
<name>irb</name>
<unit>
<name>1234</name>
<family>
<inet>
<address>
<name>3.3.3.2/27</name>
<vrrp-group>
<name>1</name>
<priority>110</priority>
<preempt>
<hold-time>60</hold-time>
</preempt>
<accept-data/>
<authentication-type>simple</authentication-type>
<authentication-key>VLAN1234</authentication-key>
<track>
<route>
<route_address>0.0.0.0/0</route_address>
<routing-instance>default</routing-instance>
<priority-cost>50</priority-cost>
</route>
</track>
<virtual-address>3.3.3.1</virtual-address>
</vrrp-group>
</address>
</inet>
</family>
</unit>
</interface>
</interfaces>
</configuration>
</config>""")).and_return(an_ok_response())
self.switch.add_vrrp_group(1234, 1, ips=[IPAddress("3.3.3.1")], priority=110, track_id="0.0.0.0/0",
track_decrement=50)
def test_add_vrrp_multiple_ips(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces>
<interface>
<name>irb</name>
<unit>
<name>1234</name>
</unit>
</interface>
</interfaces>
</configuration>
</filter>
""")).and_return(a_configuration("""
<interfaces>
<interface>
<name>irb</name>
<unit>
<name>1234</name>
<family>
<inet>
<address>
<name>3.3.3.2/27</name>
</address>
</inet>
</family>
</unit>
</interface>
</interfaces>
"""))
self.netconf_mock.should_receive("edit_config").once().with_args(target="candidate", config=is_xml("""
<config>
<configuration>
<interfaces>
<interface>
<name>irb</name>
<unit>
<name>1234</name>
<family>
<inet>
<address>
<name>3.3.3.2/27</name>
<vrrp-group>
<name>1</name>
<priority>110</priority>
<preempt>
<hold-time>60</hold-time>
</preempt>
<accept-data/>
<authentication-type>simple</authentication-type>
<authentication-key>VLAN1234</authentication-key>
<track>
<route>
<route_address>0.0.0.0/0</route_address>
<routing-instance>default</routing-instance>
<priority-cost>50</priority-cost>
</route>
</track>
<virtual-address>3.3.3.1</virtual-address>
<virtual-address>3.3.3.3</virtual-address>
</vrrp-group>
</address>
</inet>
</family>
</unit>
</interface>
</interfaces>
</configuration>
</config>""")).and_return(an_ok_response())
self.switch.add_vrrp_group(1234, 1, ips=[IPAddress("3.3.3.1"), IPAddress("3.3.3.3")], priority=110,
track_id="0.0.0.0/0", track_decrement=50)
def test_add_vrrp_fails_when_vlan_not_found(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces>
<interface>
<name>irb</name>
<unit>
<name>1234</name>
</unit>
</interface>
</interfaces>
</configuration>
</filter>
""")).and_return(a_configuration())
with self.assertRaises(UnknownVlan):
self.switch.add_vrrp_group(1234, 1, ips=[IPAddress("3.3.3.1")], priority=110, track_id="0.0.0.0/0",
track_decrement=50)
def test_add_vrrp_adds_it_to_the_good_address(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces>
<interface>
<name>irb</name>
<unit>
<name>1234</name>
</unit>
</interface>
</interfaces>
</configuration>
</filter>
""")).and_return(a_configuration("""
<interfaces>
<interface>
<name>irb</name>
<unit>
<name>1234</name>
<family>
<inet>
<address>
<name>3.3.3.2/27</name>
</address>
<address>
<name>4.4.4.2/27</name>
</address>
<address>
<name>5.5.5.2/27</name>
</address>
</inet>
</family>
</unit>
</interface>
</interfaces>
"""))
self.netconf_mock.should_receive("edit_config").once().with_args(target="candidate", config=is_xml("""
<config>
<configuration>
<interfaces>
<interface>
<name>irb</name>
<unit>
<name>1234</name>
<family>
<inet>
<address>
<name>4.4.4.2/27</name>
<vrrp-group>
<name>1</name>
<priority>110</priority>
<preempt>
<hold-time>60</hold-time>
</preempt>
<accept-data/>
<authentication-type>simple</authentication-type>
<authentication-key>VLAN1234</authentication-key>
<track>
<route>
<route_address>0.0.0.0/0</route_address>
<routing-instance>default</routing-instance>
<priority-cost>50</priority-cost>
</route>
</track>
<virtual-address>4.4.4.1</virtual-address>
</vrrp-group>
</address>
</inet>
</family>
</unit>
</interface>
</interfaces>
</configuration>
</config>""")).and_return(an_ok_response())
self.switch.add_vrrp_group(1234, 1, ips=[IPAddress("4.4.4.1")], priority=110, track_id="0.0.0.0/0",
track_decrement=50)
def test_add_vrrp_adds_it_if_all_ips_are_within_a_single_address(self):
self.netconf_mock.should_receive("get_config").and_return(a_configuration("""
<interfaces>
<interface>
<name>irb</name>
<unit>
<name>1234</name>
<family>
<inet>
<address>
<name>1.1.1.1/27</name>
</address>
</inet>
</family>
</unit>
</interface>
</interfaces>
"""))
self.netconf_mock.should_receive("edit_config").once()
self.switch.add_vrrp_group(1234, 1, ips=[IPAddress("1.1.1.2"), IPAddress("1.1.1.3")],
priority=110, track_id="0.0.0.0/0",
track_decrement=50)
def test_add_vrrp_fails_when_the_ips_doesnt_belong_to_an_existing_address(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces>
<interface>
<name>irb</name>
<unit>
<name>1234</name>
</unit>
</interface>
</interfaces>
</configuration>
</filter>
""")).and_return(a_configuration("""
<interfaces>
<interface>
<name>irb</name>
<unit>
<name>1234</name>
<family>
<inet>
<address>
<name>3.3.3.2/27</name>
</address>
</inet>
</family>
</unit>
</interface>
</interfaces>
"""))
with self.assertRaises(UnknownIP):
self.switch.add_vrrp_group(1234, 1, ips=[IPAddress("4.4.4.1")], priority=110, track_id="0.0.0.0/0",
track_decrement=50)
def test_add_vrrp_fail_if_all_ips_are_not_in_the_same_address(self):
self.netconf_mock.should_receive("get_config").and_return(a_configuration("""
<interfaces>
<interface>
<name>irb</name>
<unit>
<name>1234</name>
<family>
<inet>
<address>
<name>1.1.1.1/27</name>
</address>
<address>
<name>2.2.2.2/27</name>
</address>
</inet>
</family>
</unit>
</interface>
</interfaces>
"""))
with self.assertRaises(UnknownIP):
self.switch.add_vrrp_group(1234, 1, ips=[IPAddress("1.1.1.2"), IPAddress("2.2.2.3")],
priority=110, track_id="0.0.0.0/0",
track_decrement=50)
def test_add_vrrp_fails_when_any_of_the_ips_doesnt_belong_to_an_existing_address(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces>
<interface>
<name>irb</name>
<unit>
<name>1234</name>
</unit>
</interface>
</interfaces>
</configuration>
</filter>
""")).and_return(a_configuration("""
<interfaces>
<interface>
<name>irb</name>
<unit>
<name>1234</name>
<family>
<inet>
<address>
<name>3.3.3.2/27</name>
</address>
</inet>
</family>
</unit>
</interface>
</interfaces>
"""))
with self.assertRaises(UnknownIP):
self.switch.add_vrrp_group(1234, 1, ips=[IPAddress("3.3.3.1"), IPAddress("4.4.4.1")], priority=110, track_id="0.0.0.0/0",
track_decrement=50)
def test_remove_vrrp_success(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces>
<interface>
<name>irb</name>
<unit>
<name>1234</name>
</unit>
</interface>
</interfaces>
</configuration>
</filter>
""")).and_return(a_configuration("""
<interfaces>
<interface>
<name>irb</name>
<unit>
<name>1234</name>
<family>
<inet>
<address>
<name>192.0.1.1/27</name>
<vrrp-group>
<name>1</name>
<virtual-address>192.0.1.2</virtual-address>
</vrrp-group>
</address>
</inet>
</family>
</unit>
</interface>
</interfaces>
"""))
self.netconf_mock.should_receive("edit_config").once().with_args(target="candidate", config=is_xml("""
<config>
<configuration>
<interfaces>
<interface>
<name>irb</name>
<unit>
<name>1234</name>
<family>
<inet>
<address>
<name>192.0.1.1/27</name>
<vrrp-group operation="delete">
<name>1</name>
</vrrp-group>
</address>
</inet>
</family>
</unit>
</interface>
</interfaces>
</configuration>
</config>""")).and_return(an_ok_response())
self.switch.remove_vrrp_group(1234, 1)
def test_remove_vrrp_with_invalid_group_id(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces>
<interface>
<name>irb</name>
<unit>
<name>1234</name>
</unit>
</interface>
</interfaces>
</configuration>
</filter>
""")).and_return(a_configuration("""
<interfaces>
<interface>
<name>irb</name>
<unit>
<name>1234</name>
<family>
<inet>
<address>
<name>192.0.1.1/27</name>
<vrrp-group>
<name>99</name>
<virtual-address>192.0.1.2</virtual-address>
</vrrp-group>
</address>
</inet>
</family>
</unit>
</interface>
</interfaces>"""))
self.netconf_mock.should_receive("edit_config").never()
with self.assertRaises(VrrpDoesNotExistForVlan) as expect:
self.switch.remove_vrrp_group(1234, 1)
assert_that(str(expect.exception), equal_to("Vrrp group 1 does not exist for vlan 1234"))
def test_remove_vrrp_from_unknown_vlan(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces>
<interface>
<name>irb</name>
<unit>
<name>1234</name>
</unit>
</interface>
</interfaces>
</configuration>
</filter>
""")).and_return(a_configuration())
self.netconf_mock.should_receive("edit_config").never()
with self.assertRaises(UnknownVlan) as expect:
self.switch.remove_vrrp_group(1234, 2)
assert_that(str(expect.exception), equal_to("Vlan 1234 not found"))
def test_add_ip_to_vlan(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<bridge-domains>
<domain>
<vlan-id>1234</vlan-id>
</domain>
</bridge-domains>
<interfaces>
<interface>
<name>irb</name>
<unit>
<name>1234</name>
</unit>
</interface>
</interfaces>
</configuration>
</filter>
""")).and_return(a_configuration("""
<bridge-domains>
<domain>
<name>VLAN1234</name>
<vlan-id>1234</vlan-id>
</domain>
</bridge-domains>
<interfaces/>
"""))
self.netconf_mock.should_receive("edit_config").once().with_args(target="candidate", config=is_xml("""
<config>
<configuration>
<bridge-domains>
<domain>
<name>VLAN1234</name>
<vlan-id>1234</vlan-id>
<routing-interface>irb.1234</routing-interface>
</domain>
</bridge-domains>
<interfaces>
<interface>
<name>irb</name>
<unit>
<name>1234</name>
<family>
<inet>
<address>
<name>3.3.3.2/27</name>
</address>
</inet>
</family>
</unit>
</interface>
</interfaces>
</configuration>
</config>
""")).and_return(an_ok_response())
self.switch.add_ip_to_vlan(vlan_number=1234, ip_network=IPNetwork("3.3.3.2/27"))
def test_add_ip_to_vlan_unknown_vlan_raises(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<bridge-domains>
<domain>
<vlan-id>1234</vlan-id>
</domain>
</bridge-domains>
<interfaces>
<interface>
<name>irb</name>
<unit>
<name>1234</name>
</unit>
</interface>
</interfaces>
</configuration>
</filter>
""")).and_return(a_configuration())
with self.assertRaises(UnknownVlan):
self.switch.add_ip_to_vlan(vlan_number=1234, ip_network=IPNetwork("3.3.3.2/27"))
def test_add_ip_to_vlan_ip_already_exists_in_vlan_raises(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<bridge-domains>
<domain>
<vlan-id>1234</vlan-id>
</domain>
</bridge-domains>
<interfaces>
<interface>
<name>irb</name>
<unit>
<name>1234</name>
</unit>
</interface>
</interfaces>
</configuration>
</filter>
""")).and_return(a_configuration("""
<bridge-domains>
<domain>
<name>VLAN1234</name>
<vlan-id>1234</vlan-id>
</domain>
</bridge-domains>
<interfaces>
<interface>
<name>irb</name>
<unit>
<name>1234</name>
<family>
<inet>
<address>
<name>3.3.3.2/27</name>
</address>
</inet>
</family>
</unit>
</interface>
</interfaces>
"""))
with self.assertRaises(IPAlreadySet):
self.switch.add_ip_to_vlan(vlan_number=1234, ip_network=IPNetwork("3.3.3.2/27"))
def test_port_mode_access(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces/>
<bridge-domains/>
</configuration>
</filter>
""")).and_return(a_configuration("""
<interfaces>
<interface>
<name>xe-0/0/6</name>
<unit>
<name>0</name>
<family>
<bridge>
</bridge>
</family>
</unit>
</interface>
</interfaces>
<bridge-domains/>
"""))
self.netconf_mock.should_receive("edit_config").once().with_args(target="candidate", config=is_xml("""
<config>
<configuration>
<interfaces>
<interface>
<name>xe-0/0/6</name>
<unit>
<name>0</name>
<family>
<bridge>
<interface-mode>access</interface-mode>
</bridge>
</family>
</unit>
</interface>
</interfaces>
</configuration>
</config>
""")).and_return(an_ok_response())
self.switch.set_access_mode("xe-0/0/6")
def test_port_mode_access_with_no_mode_and_1_vlan_does_not_remove_it(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces/>
<bridge-domains/>
</configuration>
</filter>
""")).and_return(a_configuration("""
<interfaces>
<interface>
<name>xe-0/0/6</name>
<unit>
<name>0</name>
<family>
<bridge>
<vlan-id>2998</vlan-id>
</bridge>
</family>
</unit>
</interface>
</interfaces>
<bridge-domains/>
"""))
self.netconf_mock.should_receive("edit_config").once().with_args(target="candidate", config=is_xml("""
<config>
<configuration>
<interfaces>
<interface>
<name>xe-0/0/6</name>
<unit>
<name>0</name>
<family>
<bridge>
<interface-mode>access</interface-mode>
</bridge>
</family>
</unit>
</interface>
</interfaces>
</configuration>
</config>
""")).and_return(an_ok_response())
self.switch.set_access_mode("xe-0/0/6")
def test_set_access_vlan_on_interface_with_access_mode_and_no_vlan_succeeds_easily(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces/>
<bridge-domains/>
</configuration>
</filter>
""")).and_return(a_configuration("""
<bridge-domains>
<domain>
<name>PATATE</name>
<vlan-id>1000</vlan-id>
</domain>
</bridge-domains>
<interfaces>
<interface>
<name>xe-0/0/6</name>
<unit>
<name>0</name>
<family>
<bridge>
<interface-mode>access</interface-mode>
</bridge>
</family>
</unit>
</interface>
</interfaces>
"""))
self.netconf_mock.should_receive("edit_config").once().with_args(target="candidate", config=is_xml("""
<config>
<configuration>
<interfaces>
<interface>
<name>xe-0/0/6</name>
<unit>
<name>0</name>
<family>
<bridge>
<vlan-id>1000</vlan-id>
</bridge>
</family>
</unit>
</interface>
</interfaces>
</configuration>
</config>
""")).and_return(an_ok_response())
self.switch.set_access_vlan("xe-0/0/6", 1000)
def test_set_access_vlan_on_interface_that_already_has_it_does_nothing(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces/>
<bridge-domains/>
</configuration>
</filter>
""")).and_return(a_configuration("""
<bridge-domains>
<domain>
<name>PATATE</name>
<vlan-id>1000</vlan-id>
</domain>
</bridge-domains>
<interfaces>
<interface>
<name>xe-0/0/6</name>
<unit>
<name>0</name>
<family>
<bridge>
<interface-mode>access</interface-mode>
<vlan-id>1000</vlan-id>
</bridge>
</family>
</unit>
</interface>
</interfaces>
"""))
self.netconf_mock.should_receive("edit_config").never()
self.switch.set_access_vlan("xe-0/0/6", 1000)
def test_port_mode_trunk_with_no_port_mode_or_vlan_set_just_sets_the_port_mode(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces>
<interface>
<name>xe-0/0/6</name>
</interface>
</interfaces>
<bridge-domains/>
</configuration>
</filter>
""")).and_return(a_configuration("""
<interfaces>
<interface>
<name>xe-0/0/6</name>
<unit>
<name>0</name>
<family>
<bridge>
</bridge>
</family>
</unit>
</interface>
</interfaces>
<bridge-domains/>
"""))
self.netconf_mock.should_receive("edit_config").once().with_args(target="candidate", config=is_xml("""
<config>
<configuration>
<interfaces>
<interface>
<name>xe-0/0/6</name>
<unit>
<name>0</name>
<family>
<bridge>
<interface-mode>trunk</interface-mode>
</bridge>
</family>
</unit>
</interface>
</interfaces>
</configuration>
</config>
""")).and_return(an_ok_response())
self.switch.set_trunk_mode("xe-0/0/6")
def test_add_trunk_vlan_on_interface_adds_to_the_list(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces/>
<bridge-domains/>
</configuration>
</filter>
""")).and_return(a_configuration("""
<bridge-domains>
<domain>
<name>PATATE</name>
<vlan-id>1000</vlan-id>
</domain>
</bridge-domains>
<interfaces>
<interface>
<name>xe-0/0/6</name>
<unit>
<name>0</name>
<family>
<bridge>
<interface-mode>trunk</interface-mode>
<vlan-id-list>2000</vlan-id-list>
<vlan-id-list>2100-2200</vlan-id-list>
</bridge>
</family>
</unit>
</interface>
</interfaces>
"""))
self.netconf_mock.should_receive("edit_config").once().with_args(target="candidate", config=is_xml("""
<config>
<configuration>
<interfaces>
<interface>
<name>xe-0/0/6</name>
<unit>
<name>0</name>
<family>
<bridge>
<vlan-id-list>1000</vlan-id-list>
</bridge>
</family>
</unit>
</interface>
</interfaces>
</configuration>
</config>
""")).and_return(an_ok_response())
self.switch.add_trunk_vlan("xe-0/0/6", 1000)
def test_add_trunk_vlan_on_interface_that_has_no_port_mode_and_no_vlan_sets_it(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces/>
<bridge-domains/>
</configuration>
</filter>
""")).and_return(a_configuration("""
<bridge-domains>
<domain>
<name>PATATE</name>
<vlan-id>1000</vlan-id>
</domain>
</bridge-domains>
<interfaces>
<interface>
<name>xe-0/0/6</name>
<unit>
<name>0</name>
<family>
<bridge>
</bridge>
</family>
</unit>
</interface>
</interfaces>
"""))
self.netconf_mock.should_receive("edit_config").once().with_args(target="candidate", config=is_xml("""
<config>
<configuration>
<interfaces>
<interface>
<name>xe-0/0/6</name>
<unit>
<name>0</name>
<family>
<bridge>
<interface-mode>trunk</interface-mode>
<vlan-id-list>1000</vlan-id-list>
</bridge>
</family>
</unit>
</interface>
</interfaces>
</configuration>
</config>
""")).and_return(an_ok_response())
self.switch.add_trunk_vlan("xe-0/0/6", 1000)
def test_add_trunk_vlan_on_interface_in_access_mode_raises(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces/>
<bridge-domains/>
</configuration>
</filter>
""")).and_return(a_configuration("""
<bridge-domains>
<domain>
<name>PATATE</name>
<vlan-id>1000</vlan-id>
</domain>
</bridge-domains>
<interfaces>
<interface>
<name>xe-0/0/6</name>
<unit>
<name>0</name>
<family>
<bridge>
<vlan-id-list>500</vlan-id-list>
<interface-mode>access</interface-mode>
</bridge>
</family>
</unit>
</interface>
</interfaces>
"""))
self.netconf_mock.should_receive("edit_config").never()
with self.assertRaises(InterfaceInWrongPortMode) as expect:
self.switch.add_trunk_vlan("xe-0/0/6", 1000)
assert_that(str(expect.exception), contains_string("Operation cannot be performed on a access mode interface"))
def test_add_trunk_vlan_on_unknown_vlan_raises(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces/>
<bridge-domains/>
</configuration>
</filter>
""")).and_return(a_configuration("""
<interfaces>
<interface>
<name>xe-0/0/6</name>
<unit>
<name>0</name>
<family>
<bridge>
<interface-mode>trunk</interface-mode>
</bridge>
</family>
</unit>
</interface>
</interfaces>
"""))
self.netconf_mock.should_receive("edit_config").never()
with self.assertRaises(UnknownVlan) as expect:
self.switch.add_trunk_vlan("xe-0/0/6", 1000)
assert_that(str(expect.exception), contains_string("Vlan 1000 not found"))
def test_remove_ip_from_vlan(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces>
<interface>
<name>irb</name>
<unit>
<name>1234</name>
</unit>
</interface>
</interfaces>
</configuration>
</filter>
""")).and_return(a_configuration("""
<interfaces>
<interface>
<name>irb</name>
<unit>
<name>1234</name>
<family>
<inet>
<address>
<name>3.3.3.2/27</name>
</address>
</inet>
</family>
</unit>
</interface>
</interfaces>
"""))
self.netconf_mock.should_receive("edit_config").once().with_args(target="candidate", config=is_xml("""
<config>
<configuration>
<interfaces>
<interface>
<name>irb</name>
<unit>
<name>1234</name>
<family>
<inet>
<address operation="delete">
<name>3.3.3.2/27</name>
</address>
</inet>
</family>
</unit>
</interface>
</interfaces>
</configuration>
</config>""")).and_return(an_ok_response())
self.switch.remove_ip_from_vlan(vlan_number=1234, ip_network=IPNetwork("3.3.3.2/27"))
def test_remove_ip_from_vlan_ip_not_found(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces>
<interface>
<name>irb</name>
<unit>
<name>1234</name>
</unit>
</interface>
</interfaces>
</configuration>
</filter>
""")).and_return(a_configuration("""
<interfaces>
<interface>
<name>irb</name>
<unit>
<name>1234</name>
<family>
<inet>
<address>
<name>4.4.4.2/27</name>
</address>
</inet>
</family>
</unit>
</interface>
</interfaces>
"""))
with self.assertRaises(UnknownIP):
self.switch.remove_ip_from_vlan(vlan_number=1234, ip_network=IPNetwork("3.3.3.2/27"))
def test_remove_ip_from_vlan_unknown_vlan_raises(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces>
<interface>
<name>irb</name>
<unit>
<name>1234</name>
</unit>
</interface>
</interfaces>
</configuration>
</filter>
""")).and_return(a_configuration())
with self.assertRaises(UnknownVlan):
self.switch.remove_ip_from_vlan(vlan_number=1234, ip_network=IPNetwork("3.3.3.2/27"))
def test_unset_interface_access_vlan_removes_the_vlan_id(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces>
<interface>
<name>xe-0/0/1</name>
</interface>
</interfaces>
</configuration>
</filter>
""")).and_return(a_configuration("""
<interfaces>
<interface>
<name>xe-0/0/1</name>
<unit>
<name>0</name>
<family>
<bridge>
<interface-mode>access</interface-mode>
<vlan-id>999</vlan-id>
</bridge>
</family>
</unit>
</interface>
</interfaces>
"""))
self.netconf_mock.should_receive("edit_config").once().with_args(target="candidate", config=is_xml("""
<config>
<configuration>
<interfaces>
<interface>
<name>xe-0/0/1</name>
<unit>
<name>0</name>
<family>
<bridge>
<vlan-id operation="delete" />
</bridge>
</family>
</unit>
</interface>
</interfaces>
</configuration>
</config>""")).and_return(an_ok_response())
self.switch.unset_interface_access_vlan("xe-0/0/1")
def test_unset_interface_access_vlan_fails_when_not_set(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces>
<interface>
<name>xe-0/0/1</name>
</interface>
</interfaces>
</configuration>
</filter>
""")).and_return(a_configuration("""
<interfaces>
<interface>
<name>xe-0/0/1</name>
<unit>
<name>0</name>
<family>
<bridge>
<interface-mode>access</interface-mode>
</bridge>
</family>
</unit>
</interface>
</interfaces>
"""))
self.netconf_mock.should_receive("edit_config").never()
with self.assertRaises(AccessVlanNotSet) as expect:
self.switch.unset_interface_access_vlan("xe-0/0/1")
assert_that(str(expect.exception), equal_to("Access Vlan is not set on interface xe-0/0/1"))
def test_unset_interface_access_vlan_unknown_interface_raises(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces>
<interface>
<name>xe-0/0/1</name>
</interface>
</interfaces>
</configuration>
</filter>
""")) .and_return(a_configuration())
self.netconf_mock.should_receive("edit_config").never()
with self.assertRaises(UnknownInterface) as expect:
self.switch.unset_interface_access_vlan("xe-0/0/1")
assert_that(str(expect.exception), equal_to("Unknown interface xe-0/0/1"))
def test_get_interface(self):
self.switch.in_transaction = False
self.netconf_mock.should_receive("get_config").with_args(source="running", filter=is_xml("""
<filter>
<configuration>
<interfaces>
<interface>
<name>xe-0/0/1</name>
</interface>
</interfaces>
<bridge-domains/>
</configuration>
</filter>
""")).and_return(a_configuration("""
<interfaces>
<interface>
<name>xe-0/0/1</name>
<unit>
<name>0</name>
<family>
<bridge>
</bridge>
</family>
</unit>
</interface>
</interfaces>
<bridge-domains/>
"""))
interface = self.switch.get_interface('xe-0/0/1')
assert_that(interface.name, equal_to("xe-0/0/1"))
assert_that(interface.shutdown, equal_to(False))
assert_that(interface.port_mode, equal_to(ACCESS))
assert_that(interface.access_vlan, equal_to(None))
assert_that(interface.trunk_native_vlan, equal_to(None))
assert_that(interface.trunk_vlans, equal_to([]))
assert_that(interface.auto_negotiation, equal_to(None))
assert_that(interface.mtu, equal_to(None))
def test_get_interfaces_lists_configuration_less_interfaces(self):
self.switch.in_transaction = False
self.netconf_mock.should_receive("rpc").with_args(is_xml("""
<get-interface-information>
<terse/>
</get-interface-information>
""")).and_return(an_rpc_response(textwrap.dedent("""
<interface-information style="terse">
<physical-interface>
<name>
xe-0/0/1
</name>
<admin-status>
up
</admin-status>
<oper-status>
down
</oper-status>
</physical-interface>
<physical-interface>
<name>
xe-0/0/2
</name>
<admin-status>
down
</admin-status>
<oper-status>
down
</oper-status>
</physical-interface>
</interface-information>
""")))
self.netconf_mock.should_receive("get_config").with_args(source="running", filter=is_xml("""
<filter>
<configuration>
<interfaces />
<bridge-domains />
</configuration>
</filter>
""")).and_return(a_configuration("""
<interfaces />
<bridge-domains/>
"""))
if1, if2 = self.switch.get_interfaces()
assert_that(if1.name, equal_to("xe-0/0/1"))
assert_that(if1.shutdown, equal_to(False))
assert_that(if1.port_mode, equal_to(ACCESS))
assert_that(if1.access_vlan, equal_to(None))
assert_that(if1.trunk_native_vlan, equal_to(None))
assert_that(if1.trunk_vlans, equal_to([]))
assert_that(if2.name, equal_to("xe-0/0/2"))
assert_that(if2.shutdown, equal_to(True))
def test_get_nonexistent_interface_raises(self):
self.switch.in_transaction = False
self.netconf_mock.should_receive("get_config").with_args(source="running", filter=is_xml("""
<filter>
<configuration>
<interfaces>
<interface>
<name>xe-0/0/INEXISTENT</name>
</interface>
</interfaces>
<bridge-domains/>
</configuration>
</filter>
""")).and_return(a_configuration("""
<interfaces/>
<bridge-domains/>
"""))
self.netconf_mock.should_receive("rpc").with_args(is_xml("""
<get-interface-information>
<terse/>
</get-interface-information>
""")).and_return(an_rpc_response(textwrap.dedent("""
<interface-information style="terse">
<physical-interface>
<name>
xe-0/0/1
</name>
<admin-status>
down
</admin-status>
<oper-status>
down
</oper-status>
</physical-interface>
</interface-information>
""")))
with self.assertRaises(UnknownInterface) as expect:
self.switch.get_interface('xe-0/0/INEXISTENT')
assert_that(str(expect.exception), equal_to("Unknown interface xe-0/0/INEXISTENT"))
def test_get_unconfigured_interface_could_be_disabled(self):
self.switch.in_transaction = False
self.netconf_mock.should_receive("get_config").with_args(source="running", filter=is_xml("""
<filter>
<configuration>
<interfaces>
<interface>
<name>xe-0/0/27</name>
</interface>
</interfaces>
<bridge-domains/>
</configuration>
</filter>
""")).and_return(a_configuration("""
<interfaces/>
<bridge-domains/>
"""))
self.netconf_mock.should_receive("rpc").with_args(is_xml("""
<get-interface-information>
<terse/>
</get-interface-information>
""")).and_return(an_rpc_response(textwrap.dedent("""
<interface-information style="terse">
<physical-interface>
<name>
xe-0/0/27
</name>
<admin-status>
down
</admin-status>
<oper-status>
down
</oper-status>
</physical-interface>
</interface-information>
""")))
assert_that(self.switch.get_interface('xe-0/0/27').shutdown, equal_to(True))
| 37.474892
| 183
| 0.432338
| 6,526
| 78,360
| 5.007202
| 0.054091
| 0.006059
| 0.037182
| 0.05077
| 0.891483
| 0.874438
| 0.852771
| 0.836521
| 0.816354
| 0.799829
| 0
| 0.031689
| 0.449081
| 78,360
| 2,090
| 184
| 37.492823
| 0.725249
| 0.006968
| 0
| 0.886635
| 0
| 0.00468
| 0.703766
| 0.081375
| 0
| 0
| 0
| 0
| 0.054602
| 1
| 0.026001
| false
| 0.00104
| 0.00832
| 0
| 0.034841
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
a04820fdd3501c63c9c740593a7cb269b0ba3cb9
| 2,570
|
py
|
Python
|
myproject/core/tests/test_views_product.py
|
rg3915/django-example
|
67e82767a748f9ab1ae23079f2b34b86ce77cf06
|
[
"MIT"
] | 2
|
2015-10-24T10:50:36.000Z
|
2019-02-05T18:31:30.000Z
|
myproject/core/tests/test_views_product.py
|
rg3915/django-example
|
67e82767a748f9ab1ae23079f2b34b86ce77cf06
|
[
"MIT"
] | null | null | null |
myproject/core/tests/test_views_product.py
|
rg3915/django-example
|
67e82767a748f9ab1ae23079f2b34b86ce77cf06
|
[
"MIT"
] | 2
|
2018-06-27T14:28:27.000Z
|
2019-02-03T17:36:18.000Z
|
from django.test import TestCase
from django.core.urlresolvers import reverse as r
class ProductTest(TestCase):
def setUp(self):
self.resp = self.client.get(r('product_add'))
def test_get(self):
'GET /product/add/ must return status code 200.'
self.assertEqual(200, self.resp.status_code)
def test_template(self):
'Response should be a rendered template.'
self.assertTemplateUsed(
self.resp, 'core/product/product_create_form.html')
def test_html(self):
'Html must contain input controls.'
self.assertContains(self.resp, '<form')
self.assertContains(self.resp, '<input', 10)
self.assertContains(self.resp, 'type="text"', 2)
self.assertContains(self.resp, 'type="submit"')
def test_csrf(self):
'Html must contain csrf token.'
self.assertContains(self.resp, 'csrfmiddlewaretoken')
class BrandTest(TestCase):
def setUp(self):
self.resp = self.client.get(r('brand_add'))
def test_get(self):
'GET /brand/add/ must return status code 200.'
self.assertEqual(200, self.resp.status_code)
def test_template(self):
'Response should be a rendered template.'
self.assertTemplateUsed(
self.resp, 'core/product/brand_create_form.html')
def test_html(self):
'Html must contain input controls.'
self.assertContains(self.resp, '<form')
self.assertContains(self.resp, '<input', 2)
self.assertContains(self.resp, 'type="text"', 1)
self.assertContains(self.resp, 'type="submit"')
def test_csrf(self):
'Html must contain csrf token.'
self.assertContains(self.resp, 'csrfmiddlewaretoken')
class CategoryTest(TestCase):
def setUp(self):
self.resp = self.client.get(r('category_add'))
def test_get(self):
'GET /category/add/ must return status code 200.'
self.assertEqual(200, self.resp.status_code)
def test_template(self):
'Response should be a rendered template.'
self.assertTemplateUsed(
self.resp, 'core/product/category_create_form.html')
def test_html(self):
'Html must contain input controls.'
self.assertContains(self.resp, '<form')
self.assertContains(self.resp, '<input', 2)
self.assertContains(self.resp, 'type="text"', 1)
self.assertContains(self.resp, 'type="submit"')
def test_csrf(self):
'Html must contain csrf token.'
self.assertContains(self.resp, 'csrfmiddlewaretoken')
| 31.728395
| 64
| 0.651751
| 313
| 2,570
| 5.27476
| 0.169329
| 0.116293
| 0.199879
| 0.23622
| 0.897638
| 0.897638
| 0.840097
| 0.840097
| 0.840097
| 0.840097
| 0
| 0.012563
| 0.225681
| 2,570
| 80
| 65
| 32.125
| 0.817085
| 0.175486
| 0
| 0.728814
| 0
| 0
| 0.289494
| 0.042802
| 0
| 0
| 0
| 0
| 0.355932
| 1
| 0.254237
| false
| 0
| 0.033898
| 0
| 0.338983
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
a0c2ce742622015499d1e0163a0906061ffe4f9f
| 3,706
|
py
|
Python
|
AATCC/lab-report/w2/anagram-solution-test.py
|
kancheng/kan-cs-report-in-2022
|
2a1e1eaa515349d59803c7831a7bd4cbea890a44
|
[
"MIT"
] | null | null | null |
AATCC/lab-report/w2/anagram-solution-test.py
|
kancheng/kan-cs-report-in-2022
|
2a1e1eaa515349d59803c7831a7bd4cbea890a44
|
[
"MIT"
] | null | null | null |
AATCC/lab-report/w2/anagram-solution-test.py
|
kancheng/kan-cs-report-in-2022
|
2a1e1eaa515349d59803c7831a7bd4cbea890a44
|
[
"MIT"
] | null | null | null |
import time
# 計算通過 EX 1 的效率
start = time.process_time()
def anagramSolution1(s1, s2):
alist = list(s2)
pos1 = 0
stillOK = True
while pos1 < len(s1) and stillOK:
pos2 = 0
found = False
while pos2 < len(alist) and not found:
if s1[pos1] == alist[pos2]:
found = True
else:
pos2 = pos2 + 1
if found :
alist[pos2] = None
pos1 = pos1 + 1
else:
stillOK = False
return stillOK and (len(list(filter(None, alist))) == 0)
print(anagramSolution1('eat', 'eat'))
print(anagramSolution1('eat', 'ade'))
end = time.process_time()
print("Process Time: time of EX 1 is %.5f" % float(end-start))
start = time.perf_counter()
def anagramSolution1(s1, s2):
alist = list(s2)
pos1 = 0
stillOK = True
while pos1 < len(s1) and stillOK:
pos2 = 0
found = False
while pos2 < len(alist) and not found:
if s1[pos1] == alist[pos2]:
found = True
else:
pos2 = pos2 + 1
if found :
alist[pos2] = None
pos1 = pos1 + 1
else:
stillOK = False
return stillOK and (len(list(filter(None, alist))) == 0)
print(anagramSolution1('eat', 'eat'))
print(anagramSolution1('eat', 'ade'))
end = time.perf_counter()
print("Perf Counter: time of EX 1 is %.5f" % float(end-start))
# 計算通過 EX 2 的效率
start = time.process_time()
def anagramSolution2(s1, s2):
alist1 = list(s1)
alist2 = list(s2)
alist1.sort()
alist2.sort()
pos = 0
matches = True
while pos < len(s1) and matches:
if alist1[pos] == alist2[pos] :
pos = pos + 1
else:
matches = False
return matches
print(anagramSolution2('eat', 'eat'))
print(anagramSolution2('eat', 'ade'))
end = time.process_time()
print("Process Time: time of EX 2 is %.5f" % float(end-start))
start = time.perf_counter()
def anagramSolution2(s1, s2):
alist1 = list(s1)
alist2 = list(s2)
alist1.sort()
alist2.sort()
pos = 0
matches = True
while pos < len(s1) and matches:
if alist1[pos] == alist2[pos] :
pos = pos + 1
else:
matches = False
return matches
print(anagramSolution2('eat', 'eat'))
print(anagramSolution2('eat', 'ade'))
end = time.perf_counter()
print("Perf Counter: time of EX 2 is %.5f" % float(end-start))
# 計算通過 EX 3 的效率
start = time.process_time()
def anagramSolution3(s1, s2):
c1 = [0] * 26
c2 = [0] * 26
for i in range(len(s1)):
pos = ord(s1[i]) - ord('a')
c1[pos] = c1[pos] + 1
for i in range(len(s2)):
pos = ord(s2[i]) - ord('a')
c2[pos] = c2[pos] + 1
j = 0
stillOK = True
while j < 26 and stillOK:
if c1[j] == c2[j]:
j = j + 1
else :
stillOK = False
return stillOK
print(anagramSolution3('eat', 'eat'))
print(anagramSolution3('eat', 'ade'))
end = time.process_time()
print("Process Time: time of EX 3 is %.5f" % float(end-start))
start = time.perf_counter()
def anagramSolution3(s1, s2):
c1 = [0] * 26
c2 = [0] * 26
for i in range(len(s1)):
pos = ord(s1[i]) - ord('a')
c1[pos] = c1[pos] + 1
for i in range(len(s2)):
pos = ord(s2[i]) - ord('a')
c2[pos] = c2[pos] + 1
j = 0
stillOK = True
while j < 26 and stillOK:
if c1[j] == c2[j]:
j = j + 1
else :
stillOK = False
return stillOK
print(anagramSolution3('eat', 'eat'))
print(anagramSolution3('eat', 'ade'))
end = time.perf_counter()
print("Perf Counter: time of EX 3 is %.5f" % float(end-start))
| 25.916084
| 62
| 0.546681
| 506
| 3,706
| 3.980237
| 0.116601
| 0.049156
| 0.044687
| 0.038729
| 0.990566
| 0.990566
| 0.956306
| 0.950348
| 0.950348
| 0.922046
| 0
| 0.059929
| 0.311117
| 3,706
| 142
| 63
| 26.098592
| 0.728946
| 0.011063
| 0
| 0.944882
| 0
| 0
| 0.076482
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047244
| false
| 0
| 0.007874
| 0
| 0.102362
| 0.141732
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
2623061f127ff7a54c8a9c5cf8b1cb727d97ee30
| 68,811
|
py
|
Python
|
matchzoo/inputs/pair_generator.py
|
Guzpenha/DomainRegularizedDeepMatchingNetworks
|
c40a12df1db5eaa5d7f6e7ddb1321ca901f20a8f
|
[
"MIT"
] | 3
|
2020-05-24T04:46:45.000Z
|
2022-03-29T08:28:01.000Z
|
matchzoo/inputs/pair_generator.py
|
Guzpenha/DomainRegularizedDeepMatchingNetworks
|
c40a12df1db5eaa5d7f6e7ddb1321ca901f20a8f
|
[
"MIT"
] | null | null | null |
matchzoo/inputs/pair_generator.py
|
Guzpenha/DomainRegularizedDeepMatchingNetworks
|
c40a12df1db5eaa5d7f6e7ddb1321ca901f20a8f
|
[
"MIT"
] | 1
|
2020-05-24T04:46:50.000Z
|
2020-05-24T04:46:50.000Z
|
# -*- coding: utf-8 -*-
import sys
import random
import numpy as np
from utils.rank_io import *
from layers import DynamicMaxPooling
import scipy.sparse as sp
from IPython import embed
import pandas as pd
class PairBasicGenerator(object):
def __init__(self, config):
self.__name = 'PairBasicGenerator'
self.config = config
rel_file = config['relation_file']
self.rel = read_relation(filename=rel_file)
self.batch_size = config['batch_size']
self.check_list = ['relation_file', 'batch_size']
self.point = 0
if config['use_iter']:
self.pair_list_iter = self.make_pair_iter(self.rel)
self.pair_list = []
else:
self.pair_list = self.make_pair_static(self.rel)
self.pair_list_iter = None
print('len pair_list', len(self.pair_list))
def check(self):
for e in self.check_list:
if e not in self.config:
print '[%s] Error %s not in config' % (self.__name, e)
return False
return True
def make_pair_static(self, rel):
rel_set = {}
pair_list = []
for label, d1, d2 in rel:
if d1 not in rel_set:
rel_set[d1] = {}
if label not in rel_set[d1]:
rel_set[d1][label] = []
rel_set[d1][label].append(d2)
for d1 in rel_set:
label_list = sorted(rel_set[d1].keys(), reverse = True)
for hidx, high_label in enumerate(label_list[:-1]):
for low_label in label_list[hidx+1:]:
for high_d2 in rel_set[d1][high_label]:
for low_d2 in rel_set[d1][low_label]:
pair_list.append( (d1, high_d2, low_d2) )
print 'Pair Instance Count:', len(pair_list)
return pair_list
def make_pair_iter(self, rel):
rel_set = {}
pair_list = []
for label, d1, d2 in rel:
if d1 not in rel_set:
rel_set[d1] = {}
if label not in rel_set[d1]:
rel_set[d1][label] = []
rel_set[d1][label].append(d2)
while True:
rel_set_sample = random.sample(rel_set.keys(), self.config['query_per_iter'])
# prepare the sample pool
for d1 in rel_set_sample:
label_list = sorted(rel_set[d1].keys(), reverse = True)
for hidx, high_label in enumerate(label_list[:-1]):
for low_label in label_list[hidx+1:]:
for high_d2 in rel_set[d1][high_label]:
for low_d2 in rel_set[d1][low_label]:
pair_list.append( (d1, high_d2, low_d2) )
#print 'Pair Instance Count:', len(pair_list)
yield pair_list
def get_batch_static(self):
pass
def get_batch_iter(self):
pass
def get_batch(self):
if self.config['use_iter']:
return self.batch_iter.next()
else:
return self.get_batch_static()
def get_batch_generator(self):
pass
@property
def num_pairs(self):
return len(self.pair_list)
def reset(self):
self.point = 0
class PairGenerator(PairBasicGenerator):
def __init__(self, config):
super(PairGenerator, self).__init__(config=config)
self.__name = 'PairGenerator'
self.config = config
self.data1 = config['data1']
self.data2 = config['data2']
self.data1_maxlen = config['text1_maxlen']
self.data2_maxlen = config['text2_maxlen']
self.fill_word = config['vocab_size'] - 1
self.check_list.extend(['data1', 'data2', 'text1_maxlen', 'text2_maxlen'])
if config['use_iter']:
self.batch_iter = self.get_batch_iter()
if not self.check():
raise TypeError('[PairGenerator] parameter check wrong.')
print '[PairGenerator] init done'
def get_batch_static(self):
X1 = np.zeros((self.batch_size*2, self.data1_maxlen), dtype=np.int32)
X1_len = np.zeros((self.batch_size*2,), dtype=np.int32)
X2 = np.zeros((self.batch_size*2, self.data2_maxlen), dtype=np.int32)
X2_len = np.zeros((self.batch_size*2,), dtype=np.int32)
Y = np.zeros((self.batch_size*2,), dtype=np.int32)
Y[::2] = 1
X1[:] = self.fill_word
X2[:] = self.fill_word
for i in range(self.batch_size):
d1, d2p, d2n = random.choice(self.pair_list)
d1_len = min(self.data1_maxlen, len(self.data1[d1]))
d2p_len = min(self.data2_maxlen, len(self.data2[d2p]))
d2n_len = min(self.data2_maxlen, len(self.data2[d2n]))
#print 'test self.data1[d1], d1_len: ', self.data1[d1], d1_len
X1[i*2, :d1_len], X1_len[i*2] = self.data1[d1][:d1_len], d1_len
X2[i*2, :d2p_len], X2_len[i*2] = self.data2[d2p][:d2p_len], d2p_len
X1[i*2+1, :d1_len], X1_len[i*2+1] = self.data1[d1][:d1_len], d1_len
X2[i*2+1, :d2n_len], X2_len[i*2+1] = self.data2[d2n][:d2n_len], d2n_len
return X1, X1_len, X2, X2_len, Y
def get_batch_iter(self):
while True:
self.pair_list = self.pair_list_iter.next()
for _ in range(self.config['batch_per_iter']):
X1 = np.zeros((self.batch_size*2, self.data1_maxlen), dtype=np.int32)
X1_len = np.zeros((self.batch_size*2,), dtype=np.int32)
X2 = np.zeros((self.batch_size*2, self.data2_maxlen), dtype=np.int32)
X2_len = np.zeros((self.batch_size*2,), dtype=np.int32)
Y = np.zeros((self.batch_size*2,), dtype=np.int32)
Y[::2] = 1
X1[:] = self.fill_word
X2[:] = self.fill_word
for i in range(self.batch_size):
d1, d2p, d2n = random.choice(self.pair_list)
d1_len = min(self.data1_maxlen, len(self.data1[d1]))
d2p_len = min(self.data2_maxlen, len(self.data2[d2p]))
d2n_len = min(self.data2_maxlen, len(self.data2[d2n]))
X1[i*2, :d1_len], X1_len[i*2] = self.data1[d1][:d1_len], d1_len
X2[i*2, :d2p_len], X2_len[i*2] = self.data2[d2p][:d2p_len], d2p_len
X1[i*2+1, :d1_len], X1_len[i*2+1] = self.data1[d1][:d1_len], d1_len
X2[i*2+1, :d2n_len], X2_len[i*2+1] = self.data2[d2n][:d2n_len], d2n_len
yield X1, X1_len, X2, X2_len, Y
def get_batch_generator(self):
while True:
X1, X1_len, X2, X2_len, Y = self.get_batch()
if self.config['use_dpool']:
yield ({'query': X1, 'query_len': X1_len, 'doc': X2, 'doc_len': X2_len, 'dpool_index': DynamicMaxPooling.dynamic_pooling_index(X1_len, X2_len, self.config['text1_maxlen'], self.config['text2_maxlen'])}, Y)
else:
yield ({'query': X1, 'query_len': X1_len, 'doc': X2, 'doc_len': X2_len}, Y)
class Triletter_PairGenerator(PairBasicGenerator):
def __init__(self, config):
super(Triletter_PairGenerator, self).__init__(config=config)
self.__name = 'Triletter_PairGenerator'
self.data1 = config['data1']
self.data2 = config['data2']
self.dtype = config['dtype'].lower()
if self.dtype == 'cdssm':
self.data1_maxlen = config['text1_maxlen']
self.data2_maxlen = config['text2_maxlen']
self.vocab_size = config['vocab_size']
self.fill_word = self.vocab_size - 1
self.check_list.extend(['data1', 'data2', 'dtype', 'vocab_size', 'word_triletter_map_file'])
if config['use_iter']:
self.batch_iter = self.get_batch_iter()
if not self.check():
raise TypeError('[Triletter_PairGenerator] parameter check wrong.')
self.word_triletter_map = self.read_word_triletter_map(self.config['word_triletter_map_file'])
print '[Triletter_PairGenerator] init done'
def read_word_triletter_map(self, wt_map_file):
word_triletter_map = {}
for line in open(wt_map_file):
r = line.strip().split()
word_triletter_map[int(r[0])] = map(int, r[1:])
return word_triletter_map
def map_word_to_triletter(self, words):
triletters = []
for wid in words:
triletters.extend(self.word_triletter_map[wid])
return triletters
def transfer_feat2sparse(self, dense_feat):
data = []
indices = []
indptr = [0]
for feat in dense_feat:
for val in feat:
indices.append(val)
data.append(1)
indptr.append(indptr[-1] + len(feat))
res = sp.csr_matrix((data, indices, indptr), shape=(len(dense_feat), self.vocab_size), dtype="float32")
return sp.csr_matrix((data, indices, indptr), shape=(len(dense_feat), self.vocab_size), dtype="float32")
def transfer_feat2fixed(self, feats, max_len, fill_val):
num_feat = len(feats)
nfeat = np.zeros((num_feat, max_len), dtype=np.int32)
nfeat[:] = fill_val
for i in range(num_feat):
rlen = min(max_len, len(feats[i]))
nfeat[i,:rlen] = feats[i][:rlen]
return nfeat
def get_batch_static(self):
X1_len = np.zeros((self.batch_size*2,), dtype=np.int32)
X2_len = np.zeros((self.batch_size*2,), dtype=np.int32)
Y = np.zeros((self.batch_size*2,), dtype=np.int32)
Y[::2] = 1
X1, X2 = [], []
for i in range(self.batch_size):
d1, d2p, d2n = random.choice(self.pair_list)
d1_len = len(self.data1[d1])
d2p_len = len(self.data2[d2p])
d2n_len = len(self.data2[d2n])
X1_len[i*2], X1_len[i*2+1] = d1_len, d1_len
X2_len[i*2], X2_len[i*2+1] = d2p_len, d2n_len
X1.append(self.map_word_to_triletter(self.data1[d1]))
X1.append(self.map_word_to_triletter(self.data1[d1]))
X2.append(self.map_word_to_triletter(self.data2[d2p]))
X2.append(self.map_word_to_triletter(self.data2[d2n]))
if self.dtype == 'dssm':
return self.transfer_feat2sparse(X1).toarray(), X1_len, self.transfer_feat2sparse(X2).toarray(), X2_len, Y
elif self.dtype == 'cdssm':
return self.transfer_feat2fixed(X1, self.data1_maxlen, self.fill_word), X1_len, \
self.transfer_feat2fixed(X2, self.data2_maxlen, self.fill_word), X2_len, Y
def get_batch_iter(self):
while True:
self.pair_list = self.pair_list_iter.next()
for _ in range(self.config['batch_per_iter']):
X1_len = np.zeros((self.batch_size*2,), dtype=np.int32)
X2_len = np.zeros((self.batch_size*2,), dtype=np.int32)
Y = np.zeros((self.batch_size*2,), dtype=np.int32)
Y[::2] = 1
X1, X2 = [], []
for i in range(self.batch_size):
d1, d2p, d2n = random.choice(self.pair_list)
d1_len = len(self.data1[d1])
d2p_len = len(self.data2[d2p])
d2n_len = len(self.data2[d2n])
X1_len[i*2], X1_len[i*2+1] = d1_len, d1_len
X2_len[i*2], X2_len[i*2+1] = d2p_len, d2n_len
X1.append(self.map_word_to_triletter(self.data1[d1]))
X1.append(self.map_word_to_triletter(self.data1[d1]))
X2.append(self.map_word_to_triletter(self.data2[d2p]))
X2.append(self.map_word_to_triletter(self.data2[d2n]))
if self.dtype == 'dssm':
yield self.transfer_feat2sparse(X1).toarray(), X1_len, self.transfer_feat2sparse(X2).toarray(), X2_len, Y
elif self.dtype == 'cdssm':
yield self.transfer_feat2fixed(X1, self.data1_maxlen, self.fill_word), X1_len, \
self.transfer_feat2fixed(X2, self.data2_maxlen, self.fill_word), X2_len, Y
def get_batch_generator(self):
while True:
X1, X1_len, X2, X2_len, Y = self.get_batch()
yield ({'query': X1, 'query_len': X1_len, 'doc': X2, 'doc_len': X2_len}, Y)
class DRMM_PairGenerator(PairBasicGenerator):
def __init__(self, config):
super(DRMM_PairGenerator, self).__init__(config=config)
self.__name = 'DRMM_PairGenerator'
self.data1 = config['data1']
self.data2 = config['data2']
self.data1_maxlen = config['text1_maxlen']
self.data2_maxlen = config['text2_maxlen']
self.embed = config['embed']
if 'bin_num' in config:
self.hist_size = config['bin_num']
else:
self.hist_size = config['hist_size']
self.fill_word = config['vocab_size'] - 1
self.check_list.extend(['data1', 'data2', 'text1_maxlen', 'text2_maxlen', 'embed'])
self.use_hist_feats = False
if 'hist_feats_file' in config:
hist_feats = read_features_without_id(config['hist_feats_file'])
self.hist_feats = {}
for idx, (label, d1, d2) in enumerate(self.rel):
self.hist_feats[(d1, d2)] = hist_feats[idx]
self.use_hist_feats = True
if config['use_iter']:
self.batch_iter = self.get_batch_iter()
if not self.check():
raise TypeError('[DRMM_PairGenerator] parameter check wrong.')
print '[DRMM_PairGenerator] init done'
def cal_hist(self, t1, t2, data1_maxlen, hist_size):
mhist = np.zeros((data1_maxlen, hist_size), dtype=np.float32)
d1len = len(self.data1[t1])
if self.use_hist_feats:
assert (t1, t2) in self.hist_feats
caled_hist = np.reshape(self.hist_feats[(t1, t2)], (d1len, hist_size))
if d1len < data1_maxlen:
mhist[:d1len, :] = caled_hist[:, :]
else:
mhist[:, :] = caled_hist[:data1_maxlen, :]
else:
t1_rep = self.embed[self.data1[t1]]
t2_rep = self.embed[self.data2[t2]]
mm = t1_rep.dot(np.transpose(t2_rep))
for (i,j), v in np.ndenumerate(mm):
if i >= data1_maxlen:
break
vid = int((v + 1.) / 2. * ( hist_size - 1.))
mhist[i][vid] += 1.
mhist += 1.
mhist = np.log10(mhist)
return mhist
def get_batch_static(self):
X1 = np.zeros((self.batch_size*2, self.data1_maxlen), dtype=np.int32)
X1_len = np.zeros((self.batch_size*2,), dtype=np.int32)
X2 = np.zeros((self.batch_size*2, self.data1_maxlen, self.hist_size), dtype=np.float32)
X2_len = np.zeros((self.batch_size*2,), dtype=np.int32)
Y = np.zeros((self.batch_size*2,), dtype=np.int32)
Y[::2] = 1
X1[:] = self.fill_word
for i in range(self.batch_size):
d1, d2p, d2n = random.choice(self.pair_list)
d1_len = min(self.data1_maxlen, len(self.data1[d1]))
d2p_len = len(self.data2[d2p])
d2n_len = len(self.data2[d2n])
X1[i*2, :d1_len], X1_len[i*2] = self.data1[d1][:d1_len], d1_len
X1[i*2+1, :d1_len], X1_len[i*2+1] = self.data1[d1][:d1_len], d1_len
X2[i*2], X2_len[i*2] = self.cal_hist(d1, d2p, self.data1_maxlen, self.hist_size), d2p_len
X2[i*2+1], X2_len[i*2+1] = self.cal_hist(d1, d2n, self.data1_maxlen, self.hist_size), d2n_len
return X1, X1_len, X2, X2_len, Y
def get_batch_iter(self):
while True:
self.pair_list = self.pair_list_iter.next()
for _ in range(self.config['batch_per_iter']):
X1 = np.zeros((self.batch_size*2, self.data1_maxlen), dtype=np.int32)
X1_len = np.zeros((self.batch_size*2,), dtype=np.int32)
X2 = np.zeros((self.batch_size*2, self.data1_maxlen, self.hist_size), dtype=np.float32)
X2_len = np.zeros((self.batch_size*2,), dtype=np.int32)
Y = np.zeros((self.batch_size*2,), dtype=np.int32)
Y[::2] = 1
X1[:] = self.fill_word
#X2[:] = 0.
for i in range(self.batch_size):
d1, d2p, d2n = random.choice(self.pair_list)
d1_len = min(self.data1_maxlen, len(self.data1[d1]))
d2p_len = len(self.data2[d2p])
d2n_len = len(self.data2[d2n])
X1[i*2, :d1_len], X1_len[i*2] = self.data1[d1][:d1_len], d1_len
X1[i*2+1, :d1_len], X1_len[i*2+1] = self.data1[d1][:d1_len], d1_len
X2[i*2], X2_len[i*2] = self.cal_hist(d1, d2p, self.data1_maxlen, self.hist_size), d2p_len
X2[i*2+1], X2_len[i*2+1] = self.cal_hist(d1, d2n, self.data1_maxlen, self.hist_size), d2n_len
yield X1, X1_len, X2, X2_len, Y
def get_batch_generator(self):
while True:
X1, X1_len, X2, X2_len, Y = self.get_batch()
yield ({'query': X1, 'query_len': X1_len, 'doc': X2, 'doc_len': X2_len}, Y)
# Pair generator for DMN_KD (qa_comat) for conversational response ranking (2D query + doc as input)
class DMN_KD_PairGenerator(PairBasicGenerator):
def __init__(self, config):
super(DMN_KD_PairGenerator, self).__init__(config=config)
self.__name = 'DMN_KD_PairGenerator'
self.config = config
self.data1 = config['data1']
self.data2 = config['data2']
self.qa_comat = config['qa_comat']
self.data1_maxlen = config['text1_maxlen']
self.data1_max_utt_num = int(config['text1_max_utt_num'])
self.data2_maxlen = config['text2_maxlen']
self.fill_word = config['vocab_size'] - 1
self.check_list.extend(['data1', 'data2', 'text1_maxlen', 'text2_maxlen', 'text1_max_utt_num', 'qa_comat'])
if config['use_iter']:
self.batch_iter = self.get_batch_iter()
if not self.check():
raise TypeError('[DMN_KD_PairGenerator] parameter check wrong.')
print '[DMN_KD_PairGenerator] init done'
def get_batch_static(self):
X1 = np.zeros((self.batch_size*2, self.data1_max_utt_num, self.data1_maxlen), dtype=np.int32) # max 10 turns
X1_len = np.zeros((self.batch_size*2, self.data1_max_utt_num), dtype=np.int32) # max 10 turns
X2 = np.zeros((self.batch_size*2, self.data2_maxlen), dtype=np.int32)
X2_len = np.zeros((self.batch_size*2,), dtype=np.int32)
X3 = np.zeros((self.batch_size * 2, self.data1_max_utt_num, self.data1_maxlen, self.data2_maxlen), dtype=np.float32) # max 10 turns (did, uid) -> 2d matrix
Y = np.zeros((self.batch_size*2,), dtype=np.int32)
Y[::2] = 1 # [1,0,1,0,1,0,...]
X1[:] = self.fill_word # the default word index is the last word, which is the added PAD word
X2[:] = self.fill_word
for i in range(self.batch_size):
#print 'get_batch_static test i = ', i
d1, d2p, d2n = random.choice(self.pair_list)
# print 'd1, d2p, d2n = ', d1, d2p, d2n
# print 'self.data2[d2p] = ', self.data2[d2p]
if len(self.data2[d2p]) == 0:
d2p_ws = [self.fill_word]
else:
d2p_ws = self.data2[d2p][0].split()
if len(self.data2[d2n]) == 0:
d2n_ws = [self.fill_word]
else:
d2n_ws = self.data2[d2n][0].split()
d2p_len = min(self.data2_maxlen, len(d2p_ws))
d2n_len = min(self.data2_maxlen, len(d2n_ws))
# print 'self.data1[d1] = ', self.data1[d1]
# print 'd2p_len, d2n_len = ', d2p_len, d2n_len
# print 'data2[d2p], data2[d2n] = ', self.data2[d2p], self.data2[d2n]
X2[i * 2, :d2p_len], X2_len[i * 2] = d2p_ws[:d2p_len], d2p_len
X2[i * 2 + 1, :d2n_len], X2_len[i * 2 + 1] = d2n_ws[:d2n_len], d2n_len
# if len(self.data1[d1]) > 10, we only keep the most recent 10 utterances
utt_start = 0 if len(self.data1[d1]) < self.data1_max_utt_num else (len(self.data1[d1])-self.data1_max_utt_num)
# print 'test utt_start ', utt_start
# print 'test len(self.data1[d1]) ', len(self.data1[d1])
for j in range(utt_start, len(self.data1[d1])):
# print 'test j ', j
# print 'test utt_start ', utt_start
d1_ws = self.data1[d1][j].split()
d1_len = min(self.data1_maxlen, len(d1_ws))
X1[i*2, j-utt_start, :d1_len], X1_len[i*2, j-utt_start] = d1_ws[:d1_len], d1_len
X1[i*2+1, j-utt_start, :d1_len], X1_len[i*2+1, j-utt_start] = d1_ws[:d1_len], d1_len
key = d1 + '_' + str(j-utt_start) + '_' + d2p
if key in self.qa_comat:
mp = self.qa_comat[key]
X3[i*2, j-utt_start][mp[0],mp[1]] = mp[2]
key = d1 + '_' + str(j-utt_start) + '_' + d2n
if key in self.qa_comat:
mn = self.qa_comat[key]
X3[i * 2+1, j - utt_start][mn[0], mn[1]] = mn[2]
return X1, X1_len, X2, X2_len, Y, X3
def get_batch_iter(self):
while True:
self.pair_list = self.pair_list_iter.next()
for _ in range(self.config['batch_per_iter']):
X1 = np.zeros((self.batch_size*2, self.data1_maxlen), dtype=np.int32)
X1_len = np.zeros((self.batch_size*2,), dtype=np.int32)
X2 = np.zeros((self.batch_size*2, self.data2_maxlen), dtype=np.int32)
X2_len = np.zeros((self.batch_size*2,), dtype=np.int32)
Y = np.zeros((self.batch_size*2,), dtype=np.int32)
Y[::2] = 1
X1[:] = self.fill_word
X2[:] = self.fill_word
for i in range(self.batch_size):
d1, d2p, d2n = random.choice(self.pair_list)
d1_len = min(self.data1_maxlen, len(self.data1[d1]))
d2p_len = min(self.data2_maxlen, len(self.data2[d2p]))
d2n_len = min(self.data2_maxlen, len(self.data2[d2n]))
X1[i*2, :d1_len], X1_len[i*2] = self.data1[d1][:d1_len], d1_len
X2[i*2, :d2p_len], X2_len[i*2] = self.data2[d2p][:d2p_len], d2p_len
X1[i*2+1, :d1_len], X1_len[i*2+1] = self.data1[d1][:d1_len], d1_len
X2[i*2+1, :d2n_len], X2_len[i*2+1] = self.data2[d2n][:d2n_len], d2n_len
yield X1, X1_len, X2, X2_len, Y
def get_batch_generator(self):
while True:
X1, X1_len, X2, X2_len, Y, X3 = self.get_batch()
if self.config['use_dpool']:
yield ({'query': X1, 'query_len': X1_len, 'doc': X2, 'doc_len': X2_len, 'dpool_index': DynamicMaxPooling.dynamic_pooling_index(X1_len, X2_len, self.config['text1_maxlen'], self.config['text2_maxlen'])}, Y)
else:
yield ({'query': X1, 'query_len': X1_len, 'doc': X2, 'doc_len': X2_len, 'qa_comat':X3}, Y)
# Pair generator for DMN for conversational response ranking (2D query + doc as input)
class DMN_PairGenerator(PairBasicGenerator):
def __init__(self, config):
super(DMN_PairGenerator, self).__init__(config=config)
self.__name = 'DMN_PairGenerator'
self.config = config
self.data1 = config['data1']
self.data2 = config['data2']
self.data1_maxlen = config['text1_maxlen']
self.data1_max_utt_num = int(config['text1_max_utt_num'])
self.data2_maxlen = config['text2_maxlen']
self.fill_word = config['vocab_size'] - 1
self.check_list.extend(['data1', 'data2', 'text1_maxlen', 'text2_maxlen', 'text1_max_utt_num'])
if config['use_iter']:
self.batch_iter = self.get_batch_iter()
if not self.check():
raise TypeError('[DMN_PairGenerator] parameter check wrong.')
print '[DMN_PairGenerator] init done'
def get_batch_static(self):
X1 = np.zeros((self.batch_size*2, self.data1_max_utt_num, self.data1_maxlen), dtype=np.int32) # max 10 turns
X1_len = np.zeros((self.batch_size*2, self.data1_max_utt_num), dtype=np.int32) # max 10 turns
X2 = np.zeros((self.batch_size*2, self.data2_maxlen), dtype=np.int32)
X2_len = np.zeros((self.batch_size*2,), dtype=np.int32)
Y = np.zeros((self.batch_size*2,), dtype=np.int32)
Y[::2] = 1 # [1,0,1,0,1,0,...]
X1[:] = self.fill_word # the default word index is the last word, which is the added PAD word
X2[:] = self.fill_word
for i in range(self.batch_size):
#print 'get_batch_static test i = ', i
d1, d2p, d2n = random.choice(self.pair_list)
# print 'd1, d2p, d2n = ', d1, d2p, d2n
# print 'self.data2[d2p] = ', self.data2[d2p]
if len(self.data2[d2p]) == 0:
d2p_ws = [self.fill_word]
else:
d2p_ws = self.data2[d2p][0].split()
if len(self.data2[d2n]) == 0:
d2n_ws = [self.fill_word]
else:
d2n_ws = self.data2[d2n][0].split()
d2p_len = min(self.data2_maxlen, len(d2p_ws))
d2n_len = min(self.data2_maxlen, len(d2n_ws))
# print 'self.data1[d1] = ', self.data1[d1]
# print 'd2p_len, d2n_len = ', d2p_len, d2n_len
# print 'data2[d2p], data2[d2n] = ', self.data2[d2p], self.data2[d2n]
X2[i * 2, :d2p_len], X2_len[i * 2] = d2p_ws[:d2p_len], d2p_len
X2[i * 2 + 1, :d2n_len], X2_len[i * 2 + 1] = d2n_ws[:d2n_len], d2n_len
# if len(self.data1[d1]) > 10, we only keep the most recent 10 utterances
utt_start = 0 if len(self.data1[d1]) < self.data1_max_utt_num else (len(self.data1[d1])-self.data1_max_utt_num)
# print 'test utt_start ', utt_start
# print 'test len(self.data1[d1]) ', len(self.data1[d1])
for j in range(utt_start, len(self.data1[d1])):
# print 'test j ', j
# print 'test utt_start ', utt_start
d1_ws = self.data1[d1][j].split()
d1_len = min(self.data1_maxlen, len(d1_ws))
X1[i*2, j-utt_start, :d1_len], X1_len[i*2, j-utt_start] = d1_ws[:d1_len], d1_len
X1[i*2+1, j-utt_start, :d1_len], X1_len[i*2+1, j-utt_start] = d1_ws[:d1_len], d1_len
return X1, X1_len, X2, X2_len, Y
def get_batch_iter(self):
while True:
self.pair_list = self.pair_list_iter.next()
for _ in range(self.config['batch_per_iter']):
X1 = np.zeros((self.batch_size*2, self.data1_maxlen), dtype=np.int32)
X1_len = np.zeros((self.batch_size*2,), dtype=np.int32)
X2 = np.zeros((self.batch_size*2, self.data2_maxlen), dtype=np.int32)
X2_len = np.zeros((self.batch_size*2,), dtype=np.int32)
Y = np.zeros((self.batch_size*2,), dtype=np.int32)
Y[::2] = 1
X1[:] = self.fill_word
X2[:] = self.fill_word
for i in range(self.batch_size):
d1, d2p, d2n = random.choice(self.pair_list)
d1_len = min(self.data1_maxlen, len(self.data1[d1]))
d2p_len = min(self.data2_maxlen, len(self.data2[d2p]))
d2n_len = min(self.data2_maxlen, len(self.data2[d2n]))
X1[i*2, :d1_len], X1_len[i*2] = self.data1[d1][:d1_len], d1_len
X2[i*2, :d2p_len], X2_len[i*2] = self.data2[d2p][:d2p_len], d2p_len
X1[i*2+1, :d1_len], X1_len[i*2+1] = self.data1[d1][:d1_len], d1_len
X2[i*2+1, :d2n_len], X2_len[i*2+1] = self.data2[d2n][:d2n_len], d2n_len
yield X1, X1_len, X2, X2_len, Y
def get_batch_generator(self):
while True:
X1, X1_len, X2, X2_len, Y = self.get_batch()
if self.config['use_dpool']:
yield ({'query': X1, 'query_len': X1_len, 'doc': X2, 'doc_len': X2_len, 'dpool_index': DynamicMaxPooling.dynamic_pooling_index(X1_len, X2_len, self.config['text1_maxlen'], self.config['text2_maxlen'])}, Y)
else:
yield ({'query': X1, 'query_len': X1_len, 'doc': X2, 'doc_len': X2_len}, Y)
class DMN_PairGeneratorMultipleDomains(PairBasicGenerator):
def __init__(self, config):
super(DMN_PairGeneratorMultipleDomains, self).__init__(config=config)
self.__name = 'DMN_PairGeneratorMultipleDomains'
self.config = config
self.domain_to_train = config['domain_to_train']
self.data1 = config['data1']
self.data2 = config['data2']
self.data1_maxlen = config['text1_maxlen']
self.data1_max_utt_num = int(config['text1_max_utt_num'])
self.data2_maxlen = config['text2_maxlen']
self.fill_word = config['vocab_size'] - 1
self.check_list.extend(['data1', 'data2', 'text1_maxlen', 'text2_maxlen', 'text1_max_utt_num'])
if config['use_iter']:
self.batch_iter = self.get_batch_iter()
if not self.check():
raise TypeError('[DMN_PairGeneratorMultipleDomains] parameter check wrong.')
path = config['domain_splits_folder']
with open(path+'domain_splits_train') as f:
size = int(f.read().split("Q")[1])
self.train_domain_division = size
self.balanced_domain_batches = config['balanced_domain_batches']
if(self.balanced_domain_batches):
self.d1_pair_list = []
self.d2_pair_list = []
for triplet in self.pair_list:
domain = int(triplet[0].split("Q")[1])<self.train_domain_division
# qid = triplet[0]
# dialogue_len = len(self.data1[qid])
# all_utt_higher = True
# for utt in self.data1[qid]:
# if(len(utt)<30):
# all_utt_higher=False
# if(dialogue_len>=2 and all_utt_higher):
if(domain):
self.d1_pair_list.append(triplet)
else:
self.d2_pair_list.append(triplet)
# d1_utt_turns = {}
# for i in range(0,len(self.d1_pair_list),10):
# qid = self.d1_pair_list[i][0]
# d1_utt_turns[qid] = len(self.data1[qid])
# d2_utt_turns = {}
# for i in range(0,len(self.d2_pair_list),10):
# qid = self.d2_pair_list[i][0]
# d2_utt_turns[qid] = len(self.data1[qid])
# d1_utt_length = {}
# for i in range(0,len(self.d1_pair_list),10):
# qid = self.d1_pair_list[i][0]
# d1_utt_length[qid] = np.mean([len(utt) for utt in self.data1[qid]])
# d2_utt_length = {}
# for i in range(0,len(self.d2_pair_list),10):
# qid = self.d2_pair_list[i][0]
# d2_utt_length[qid] = np.mean([len(utt) for utt in self.data1[qid]])
print('d1 pair_list size', str(len(self.d1_pair_list)))
print('d2 pair_list size', str(len(self.d2_pair_list)))
print '[DMN_PairGeneratorMultipleDomains] init done'
def get_batch_static(self):
X1 = np.zeros((self.batch_size*2, self.data1_max_utt_num, self.data1_maxlen), dtype=np.int32) # max 10 turns
X1_len = np.zeros((self.batch_size*2, self.data1_max_utt_num), dtype=np.int32) # max 10 turns
X2 = np.zeros((self.batch_size*2, self.data2_maxlen), dtype=np.int32)
X2_len = np.zeros((self.batch_size*2,), dtype=np.int32)
Y = np.zeros((self.batch_size*2,), dtype=np.int32)
Y[::2] = 1 # [1,0,1,0,1,0,...]
X1[:] = self.fill_word # the default word index is the last word, which is the added PAD word
X2[:] = self.fill_word
Y_domain = []
for i in range(self.batch_size):
#print 'get_batch_static test i = ', i
if(self.domain_to_train == 0):
rand_idx = random.choice(range(len(self.d1_pair_list)))
d1, d2p, d2n = self.d1_pair_list[rand_idx]
elif(self.domain_to_train == 1):
rand_idx = random.choice(range(len(self.d2_pair_list)))
d1, d2p, d2n = self.d2_pair_list[rand_idx]
elif(not self.balanced_domain_batches):
rand_idx = random.choice(range(len(self.pair_list)))
d1, d2p, d2n = self.pair_list[rand_idx]
elif(i<self.batch_size/2.0):
rand_idx = random.choice(range(len(self.d1_pair_list)))
d1, d2p, d2n = self.d1_pair_list[rand_idx]
else:
rand_idx = random.choice(range(len(self.d2_pair_list)))
d1, d2p, d2n = self.d2_pair_list[rand_idx]
#10 because we have 9 candidates for each true response
domain = int(int(d1.split("Q")[1])<self.train_domain_division)
Y_domain.append(domain)
# print 'd1, d2p, d2n = ', d1, d2p, d2n
# print 'self.data2[d2p] = ', self.data2[d2p]
if len(self.data2[d2p]) == 0:
d2p_ws = [self.fill_word]
else:
d2p_ws = self.data2[d2p][0].split()
if len(self.data2[d2n]) == 0:
d2n_ws = [self.fill_word]
else:
d2n_ws = self.data2[d2n][0].split()
d2p_len = min(self.data2_maxlen, len(d2p_ws))
d2n_len = min(self.data2_maxlen, len(d2n_ws))
# print 'self.data1[d1] = ', self.data1[d1]
# print 'd2p_len, d2n_len = ', d2p_len, d2n_len
# print 'data2[d2p], data2[d2n] = ', self.data2[d2p], self.data2[d2n]
X2[i * 2, :d2p_len], X2_len[i * 2] = d2p_ws[:d2p_len], d2p_len
X2[i * 2 + 1, :d2n_len], X2_len[i * 2 + 1] = d2n_ws[:d2n_len], d2n_len
# if len(self.data1[d1]) > 10, we only keep the most recent 10 utterances
utt_start = 0 if len(self.data1[d1]) < self.data1_max_utt_num else (len(self.data1[d1])-self.data1_max_utt_num)
# print 'test utt_start ', utt_start
# print 'test len(self.data1[d1]) ', len(self.data1[d1])
for j in range(utt_start, len(self.data1[d1])):
# print 'test j ', j
# print 'test utt_start ', utt_start
d1_ws = self.data1[d1][j].split()
d1_len = min(self.data1_maxlen, len(d1_ws))
X1[i*2, j-utt_start, :d1_len], X1_len[i*2, j-utt_start] = d1_ws[:d1_len], d1_len
X1[i*2+1, j-utt_start, :d1_len], X1_len[i*2+1, j-utt_start] = d1_ws[:d1_len], d1_len
return X1, X1_len, X2, X2_len, Y
def get_batch_iter(self):
while True:
self.pair_list = self.pair_list_iter.next()
for _ in range(self.config['batch_per_iter']):
X1 = np.zeros((self.batch_size*2, self.data1_maxlen), dtype=np.int32)
X1_len = np.zeros((self.batch_size*2,), dtype=np.int32)
X2 = np.zeros((self.batch_size*2, self.data2_maxlen), dtype=np.int32)
X2_len = np.zeros((self.batch_size*2,), dtype=np.int32)
Y = np.zeros((self.batch_size*2,), dtype=np.int32)
Y[::2] = 1
X1[:] = self.fill_word
X2[:] = self.fill_word
for i in range(self.batch_size):
d1, d2p, d2n = random.choice(self.pair_list)
d1_len = min(self.data1_maxlen, len(self.data1[d1]))
d2p_len = min(self.data2_maxlen, len(self.data2[d2p]))
d2n_len = min(self.data2_maxlen, len(self.data2[d2n]))
X1[i*2, :d1_len], X1_len[i*2] = self.data1[d1][:d1_len], d1_len
X2[i*2, :d2p_len], X2_len[i*2] = self.data2[d2p][:d2p_len], d2p_len
X1[i*2+1, :d1_len], X1_len[i*2+1] = self.data1[d1][:d1_len], d1_len
X2[i*2+1, :d2n_len], X2_len[i*2+1] = self.data2[d2n][:d2n_len], d2n_len
yield X1, X1_len, X2, X2_len, Y
def get_batch_generator(self):
while True:
X1, X1_len, X2, X2_len, Y = self.get_batch()
if self.config['use_dpool']:
yield ({'query': X1, 'query_len': X1_len, 'doc': X2, 'doc_len': X2_len, 'dpool_index': DynamicMaxPooling.dynamic_pooling_index(X1_len, X2_len, self.config['text1_maxlen'], self.config['text2_maxlen'])}, Y)
else:
yield ({'query': X1, 'query_len': X1_len, 'doc': X2, 'doc_len': X2_len}, Y)
class DMN_PairGeneratorMultipleDomainsWithLabels(PairBasicGenerator):
def __init__(self, config):
super(DMN_PairGeneratorMultipleDomainsWithLabels, self).__init__(config=config)
self.__name = 'DMN_PairGeneratorMultipleDomainsWithLabels'
self.config = config
self.data1 = config['data1']
self.data2 = config['data2']
self.data1_maxlen = config['text1_maxlen']
self.data1_max_utt_num = int(config['text1_max_utt_num'])
self.data2_maxlen = config['text2_maxlen']
self.fill_word = config['vocab_size'] - 1
self.check_list.extend(['data1', 'data2', 'text1_maxlen', 'text2_maxlen', 'text1_max_utt_num'])
if config['use_iter']:
self.batch_iter = self.get_batch_iter()
if not self.check():
raise TypeError('[DMN_PairGeneratorMultipleDomainsWithLabels] parameter check wrong.')
path = config['domain_splits_folder']
with open(path+'domain_splits_train') as f:
size = int(f.read().split("Q")[1])
self.train_domain_division = size
self.balanced_domain_batches = config['balanced_domain_batches'] # does not work with 3 domains and False
if(self.balanced_domain_batches):
self.d1_pair_list = []
self.d2_pair_list = []
for triplet in self.pair_list:
domain = int(triplet[0].split("Q")[1])<self.train_domain_division
# qid = triplet[0]
# dialogue_len = len(self.data1[qid])
# all_utt_higher = True
# for utt in self.data1[qid]:
# if(len(utt)<30):
# all_utt_higher=False
# if(dialogue_len>=2 and all_utt_higher):
if(domain):
self.d1_pair_list.append(triplet)
else:
self.d2_pair_list.append(triplet)
print('d1 pair_list size', str(len(self.d1_pair_list)))
print('d2 pair_list size', str(len(self.d2_pair_list)))
if('train_clf_with_ood' in config and config['train_clf_with_ood']):
self.rel_ood = read_relation(filename=config['relation_file_ood'])
self.pair_list_ood = self.make_pair_static(self.rel_ood)
self.data1_ood = config['data1_ood']
self.data2_ood = config['data2_ood']
print '[DMN_PairGeneratorMultipleDomainsWithLabels] init done'
def get_batch_static(self):
X1 = np.zeros((self.batch_size*2, self.data1_max_utt_num, self.data1_maxlen), dtype=np.int32) # max 10 turns
X1_len = np.zeros((self.batch_size*2, self.data1_max_utt_num), dtype=np.int32) # max 10 turns
X2 = np.zeros((self.batch_size*2, self.data2_maxlen), dtype=np.int32)
X2_len = np.zeros((self.batch_size*2,), dtype=np.int32)
Y = np.zeros((self.batch_size*2,), dtype=np.int32)
Y[::2] = 1 # [1,0,1,0,1,0,...]
X1[:] = self.fill_word # the default word index is the last word, which is the added PAD word
X2[:] = self.fill_word
number_of_domains = 2
if('train_clf_with_ood' in self.config and \
self.config['train_clf_with_ood']):
number_of_domains = 3
Y_domain = np.zeros((self.batch_size*2, number_of_domains), dtype=np.int32)
for i in range(self.batch_size):
#print 'get_batch_static test i = ', i
if(not self.balanced_domain_batches):
rand_idx = random.choice(range(len(self.pair_list)))
d1, d2p, d2n = self.pair_list[rand_idx]
domain = int(int(d1.split("Q")[1])<self.train_domain_division)
elif('train_clf_with_ood' in self.config and self.config['train_clf_with_ood']):
list_to_use = random.choice(range(3))
if list_to_use == 0:
rand_idx = random.choice(range(len(self.d1_pair_list)))
d1, d2p, d2n = self.d1_pair_list[rand_idx]
elif list_to_use == 1:
rand_idx = random.choice(range(len(self.d2_pair_list)))
d1, d2p, d2n = self.d2_pair_list[rand_idx]
else:
rand_idx = random.choice(range(len(self.pair_list_ood)))
d1, d2p, d2n = self.pair_list_ood[rand_idx]
domain = list_to_use
else:
if(i<self.batch_size/2.0):
rand_idx = random.choice(range(len(self.d1_pair_list)))
d1, d2p, d2n = self.d1_pair_list[rand_idx]
else:
rand_idx = random.choice(range(len(self.d2_pair_list)))
d1, d2p, d2n = self.d2_pair_list[rand_idx]
domain = int(int(d1.split("Q")[1])<self.train_domain_division)
if('train_clf_with_ood' in self.config and \
self.config['train_clf_with_ood'] and list_to_use == 2):
data1 = self.data1_ood
data2 = self.data2_ood
else:
data1 = self.data1
data2 = self.data2
if(domain == 2):
while(d1 not in data1 or d2p not in data2 or d2n not in data2):
rand_idx = random.choice(range(len(self.pair_list_ood)))
d1, d2p, d2n = self.pair_list_ood[rand_idx]
# print 'd1, d2p, d2n = ', d1, d2p, d2n
# print 'self.data2[d2p] = ', self.data2[d2p]
if len(data2[d2p]) == 0:
d2p_ws = [self.fill_word]
else:
d2p_ws = data2[d2p][0].split()
if len(data2[d2n]) == 0:
d2n_ws = [self.fill_word]
else:
d2n_ws = data2[d2n][0].split()
d2p_len = min(self.data2_maxlen, len(d2p_ws))
d2n_len = min(self.data2_maxlen, len(d2n_ws))
# print 'data1[d1] = ', data1[d1]
# print 'd2p_len, d2n_len = ', d2p_len, d2n_len
# print 'data2[d2p], data2[d2n] = ', data2[d2p], data2[d2n]
X2[i * 2, :d2p_len], X2_len[i * 2] = d2p_ws[:d2p_len], d2p_len
X2[i * 2 + 1, :d2n_len], X2_len[i * 2 + 1] = d2n_ws[:d2n_len], d2n_len
# if len(data1[d1]) > 10, we only keep the most recent 10 utterances
utt_start = 0 if len(data1[d1]) < self.data1_max_utt_num else (len(data1[d1])-self.data1_max_utt_num)
# print 'test utt_start ', utt_start
# print 'test len(data1[d1]) ', len(data1[d1])
for j in range(utt_start, len(data1[d1])):
# print 'test j ', j
# print 'test utt_start ', utt_start
d1_ws = data1[d1][j].split()
d1_len = min(self.data1_maxlen, len(d1_ws))
X1[i*2, j-utt_start, :d1_len], X1_len[i*2, j-utt_start] = d1_ws[:d1_len], d1_len
X1[i*2+1, j-utt_start, :d1_len], X1_len[i*2+1, j-utt_start] = d1_ws[:d1_len], d1_len
Y_domain[i*2, domain] = 1.
Y_domain[i*2+1, domain] = 1.
return X1, X1_len, X2, X2_len, Y_domain
def get_batch_iter(self):
while True:
self.pair_list = self.pair_list_iter.next()
for _ in range(self.config['batch_per_iter']):
X1 = np.zeros((self.batch_size*2, self.data1_maxlen), dtype=np.int32)
X1_len = np.zeros((self.batch_size*2,), dtype=np.int32)
X2 = np.zeros((self.batch_size*2, self.data2_maxlen), dtype=np.int32)
X2_len = np.zeros((self.batch_size*2,), dtype=np.int32)
Y = np.zeros((self.batch_size*2,), dtype=np.int32)
Y[::2] = 1
X1[:] = self.fill_word
X2[:] = self.fill_word
for i in range(self.batch_size):
d1, d2p, d2n = random.choice(self.pair_list)
d1_len = min(self.data1_maxlen, len(self.data1[d1]))
d2p_len = min(self.data2_maxlen, len(self.data2[d2p]))
d2n_len = min(self.data2_maxlen, len(self.data2[d2n]))
X1[i*2, :d1_len], X1_len[i*2] = self.data1[d1][:d1_len], d1_len
X2[i*2, :d2p_len], X2_len[i*2] = self.data2[d2p][:d2p_len], d2p_len
X1[i*2+1, :d1_len], X1_len[i*2+1] = self.data1[d1][:d1_len], d1_len
X2[i*2+1, :d2n_len], X2_len[i*2+1] = self.data2[d2n][:d2n_len], d2n_len
yield X1, X1_len, X2, X2_len, Y
def get_batch_generator(self):
while True:
X1, X1_len, X2, X2_len, Y = self.get_batch()
if self.config['use_dpool']:
yield ({'query': X1, 'query_len': X1_len, 'doc': X2, 'doc_len': X2_len, 'dpool_index': DynamicMaxPooling.dynamic_pooling_index(X1_len, X2_len, self.config['text1_maxlen'], self.config['text2_maxlen'])}, Y)
else:
yield ({'query': X1, 'query_len': X1_len, 'doc': X2, 'doc_len': X2_len}, Y)
class DMN_PairGeneratorTopicDomainsWithLabels(PairBasicGenerator):
def __init__(self, config):
super(DMN_PairGeneratorTopicDomainsWithLabels, self).__init__(config=config)
self.__name = 'DMN_PairGeneratorTopicDomainsWithLabels'
self.config = config
self.data1 = config['data1']
self.data2 = config['data2']
self.data1_maxlen = config['text1_maxlen']
self.data1_max_utt_num = int(config['text1_max_utt_num'])
self.data2_maxlen = config['text2_maxlen']
self.fill_word = config['vocab_size'] - 1
self.check_list.extend(['data1', 'data2', 'text1_maxlen', 'text2_maxlen', 'text1_max_utt_num'])
cat_df = pd.read_csv(config['query_to_category_file'])
self.queries_to_cat = {}
for idx, row in cat_df.iterrows():
self.queries_to_cat[row['Q']]=row['category']
self.cat_to_id = {}
i=0
for cat in sorted(cat_df['category'].unique()):
self.cat_to_id[cat] = i
i+=1
self.num_cat = config['number_of_categories']
if config['use_iter']:
self.batch_iter = self.get_batch_iter()
if not self.check():
raise TypeError('[DMN_PairGeneratorTopicDomainsWithLabels] parameter check wrong.')
print '[DMN_PairGeneratorTopicDomainsWithLabels] init done'
def get_batch_static(self):
X1 = np.zeros((self.batch_size*2, self.data1_max_utt_num, self.data1_maxlen), dtype=np.int32) # max 10 turns
X1_len = np.zeros((self.batch_size*2, self.data1_max_utt_num), dtype=np.int32) # max 10 turns
X2 = np.zeros((self.batch_size*2, self.data2_maxlen), dtype=np.int32)
X2_len = np.zeros((self.batch_size*2,), dtype=np.int32)
Y = np.zeros((self.batch_size*2,), dtype=np.int32)
Y_domain = np.zeros((self.batch_size*2, self.num_cat), dtype=np.int32)
Y[::2] = 1 # [1,0,1,0,1,0,...]
X1[:] = self.fill_word # the default word index is the last word, which is the added PAD word
X2[:] = self.fill_word
for i in range(self.batch_size):
#print 'get_batch_static test i = ', i
d1, d2p, d2n = random.choice(self.pair_list)
domain = self.cat_to_id[self.queries_to_cat[d1]]
# print 'd1, d2p, d2n = ', d1, d2p, d2n
# print 'self.data2[d2p] = ', self.data2[d2p]
if len(self.data2[d2p]) == 0:
d2p_ws = [self.fill_word]
else:
d2p_ws = self.data2[d2p][0].split()
if len(self.data2[d2n]) == 0:
d2n_ws = [self.fill_word]
else:
d2n_ws = self.data2[d2n][0].split()
d2p_len = min(self.data2_maxlen, len(d2p_ws))
d2n_len = min(self.data2_maxlen, len(d2n_ws))
# print 'self.data1[d1] = ', self.data1[d1]
# print 'd2p_len, d2n_len = ', d2p_len, d2n_len
# print 'data2[d2p], data2[d2n] = ', self.data2[d2p], self.data2[d2n]
X2[i * 2, :d2p_len], X2_len[i * 2] = d2p_ws[:d2p_len], d2p_len
X2[i * 2 + 1, :d2n_len], X2_len[i * 2 + 1] = d2n_ws[:d2n_len], d2n_len
# if len(self.data1[d1]) > 10, we only keep the most recent 10 utterances
utt_start = 0 if len(self.data1[d1]) < self.data1_max_utt_num else (len(self.data1[d1])-self.data1_max_utt_num)
# print 'test utt_start ', utt_start
# print 'test len(self.data1[d1]) ', len(self.data1[d1])
for j in range(utt_start, len(self.data1[d1])):
# print 'test j ', j
# print 'test utt_start ', utt_start
d1_ws = self.data1[d1][j].split()
d1_len = min(self.data1_maxlen, len(d1_ws))
X1[i*2, j-utt_start, :d1_len], X1_len[i*2, j-utt_start] = d1_ws[:d1_len], d1_len
X1[i*2+1, j-utt_start, :d1_len], X1_len[i*2+1, j-utt_start] = d1_ws[:d1_len], d1_len
Y_domain[i*2, domain] = 1.
Y_domain[i*2+1, domain] = 1.
return X1, X1_len, X2, X2_len, Y_domain
def get_batch_iter(self):
while True:
self.pair_list = self.pair_list_iter.next()
for _ in range(self.config['batch_per_iter']):
X1 = np.zeros((self.batch_size*2, self.data1_maxlen), dtype=np.int32)
X1_len = np.zeros((self.batch_size*2,), dtype=np.int32)
X2 = np.zeros((self.batch_size*2, self.data2_maxlen), dtype=np.int32)
X2_len = np.zeros((self.batch_size*2,), dtype=np.int32)
Y = np.zeros((self.batch_size*2,), dtype=np.int32)
Y[::2] = 1
X1[:] = self.fill_word
X2[:] = self.fill_word
for i in range(self.batch_size):
d1, d2p, d2n = random.choice(self.pair_list)
d1_len = min(self.data1_maxlen, len(self.data1[d1]))
d2p_len = min(self.data2_maxlen, len(self.data2[d2p]))
d2n_len = min(self.data2_maxlen, len(self.data2[d2n]))
X1[i*2, :d1_len], X1_len[i*2] = self.data1[d1][:d1_len], d1_len
X2[i*2, :d2p_len], X2_len[i*2] = self.data2[d2p][:d2p_len], d2p_len
X1[i*2+1, :d1_len], X1_len[i*2+1] = self.data1[d1][:d1_len], d1_len
X2[i*2+1, :d2n_len], X2_len[i*2+1] = self.data2[d2n][:d2n_len], d2n_len
yield X1, X1_len, X2, X2_len, Y
def get_batch_generator(self):
while True:
X1, X1_len, X2, X2_len, Y = self.get_batch()
if self.config['use_dpool']:
yield ({'query': X1, 'query_len': X1_len, 'doc': X2, 'doc_len': X2_len, 'dpool_index': DynamicMaxPooling.dynamic_pooling_index(X1_len, X2_len, self.config['text1_maxlen'], self.config['text2_maxlen'])}, Y)
else:
yield ({'query': X1, 'query_len': X1_len, 'doc': X2, 'doc_len': X2_len}, Y)
class DMN_PairGeneratorFilterMultipleTargetTopic(PairBasicGenerator):
def __init__(self, config):
super(DMN_PairGeneratorFilterMultipleTargetTopic, self).__init__(config=config)
self.__name = 'DMN_PairGeneratorFilterMultipleTargetTopic'
self.config = config
self.data1 = config['data1']
self.data2 = config['data2']
self.data1_maxlen = config['text1_maxlen']
self.data1_max_utt_num = int(config['text1_max_utt_num'])
self.data2_maxlen = config['text2_maxlen']
self.fill_word = config['vocab_size'] - 1
self.check_list.extend(['data1', 'data2', 'text1_maxlen', 'text2_maxlen', 'text1_max_utt_num'])
cat_df = pd.read_csv(config['query_to_category_file'])
self.queries_to_cat = {}
self.cat_to_filter = config['test_categories'].split(",")
for idx, row in cat_df.iterrows():
self.queries_to_cat[row['Q']]=row['category']
if config['use_iter']:
self.batch_iter = self.get_batch_iter()
if not self.check():
raise TypeError('[DMN_PairGeneratorFilterMultipleTargetTopic] parameter check wrong.')
print '[DMN_PairGeneratorFilterMultipleTargetTopic] init done'
def get_batch_static(self):
X1 = np.zeros((self.batch_size*2, self.data1_max_utt_num, self.data1_maxlen), dtype=np.int32) # max 10 turns
X1_len = np.zeros((self.batch_size*2, self.data1_max_utt_num), dtype=np.int32) # max 10 turns
X2 = np.zeros((self.batch_size*2, self.data2_maxlen), dtype=np.int32)
X2_len = np.zeros((self.batch_size*2,), dtype=np.int32)
Y = np.zeros((self.batch_size*2,), dtype=np.int32)
Y[::2] = 1 # [1,0,1,0,1,0,...]
X1[:] = self.fill_word # the default word index is the last word, which is the added PAD word
X2[:] = self.fill_word
for i in range(self.batch_size):
#print 'get_batch_static test i = ', i
d1, d2p, d2n = random.choice(self.pair_list)
domain = self.queries_to_cat[d1]
while domain in self.cat_to_filter:
# print('filtered doc')
d1, d2p, d2n = random.choice(self.pair_list)
domain = self.queries_to_cat[d1]
# print 'd1, d2p, d2n = ', d1, d2p, d2n
# print 'self.data2[d2p] = ', self.data2[d2p]
if len(self.data2[d2p]) == 0:
d2p_ws = [self.fill_word]
else:
d2p_ws = self.data2[d2p][0].split()
if len(self.data2[d2n]) == 0:
d2n_ws = [self.fill_word]
else:
d2n_ws = self.data2[d2n][0].split()
d2p_len = min(self.data2_maxlen, len(d2p_ws))
d2n_len = min(self.data2_maxlen, len(d2n_ws))
# print 'self.data1[d1] = ', self.data1[d1]
# print 'd2p_len, d2n_len = ', d2p_len, d2n_len
# print 'data2[d2p], data2[d2n] = ', self.data2[d2p], self.data2[d2n]
X2[i * 2, :d2p_len], X2_len[i * 2] = d2p_ws[:d2p_len], d2p_len
X2[i * 2 + 1, :d2n_len], X2_len[i * 2 + 1] = d2n_ws[:d2n_len], d2n_len
# if len(self.data1[d1]) > 10, we only keep the most recent 10 utterances
utt_start = 0 if len(self.data1[d1]) < self.data1_max_utt_num else (len(self.data1[d1])-self.data1_max_utt_num)
# print 'test utt_start ', utt_start
# print 'test len(self.data1[d1]) ', len(self.data1[d1])
for j in range(utt_start, len(self.data1[d1])):
# print 'test j ', j
# print 'test utt_start ', utt_start
d1_ws = self.data1[d1][j].split()
d1_len = min(self.data1_maxlen, len(d1_ws))
X1[i*2, j-utt_start, :d1_len], X1_len[i*2, j-utt_start] = d1_ws[:d1_len], d1_len
X1[i*2+1, j-utt_start, :d1_len], X1_len[i*2+1, j-utt_start] = d1_ws[:d1_len], d1_len
return X1, X1_len, X2, X2_len, Y
def get_batch_iter(self):
while True:
self.pair_list = self.pair_list_iter.next()
for _ in range(self.config['batch_per_iter']):
X1 = np.zeros((self.batch_size*2, self.data1_maxlen), dtype=np.int32)
X1_len = np.zeros((self.batch_size*2,), dtype=np.int32)
X2 = np.zeros((self.batch_size*2, self.data2_maxlen), dtype=np.int32)
X2_len = np.zeros((self.batch_size*2,), dtype=np.int32)
Y = np.zeros((self.batch_size*2,), dtype=np.int32)
Y[::2] = 1
X1[:] = self.fill_word
X2[:] = self.fill_word
for i in range(self.batch_size):
d1, d2p, d2n = random.choice(self.pair_list)
d1_len = min(self.data1_maxlen, len(self.data1[d1]))
d2p_len = min(self.data2_maxlen, len(self.data2[d2p]))
d2n_len = min(self.data2_maxlen, len(self.data2[d2n]))
X1[i*2, :d1_len], X1_len[i*2] = self.data1[d1][:d1_len], d1_len
X2[i*2, :d2p_len], X2_len[i*2] = self.data2[d2p][:d2p_len], d2p_len
X1[i*2+1, :d1_len], X1_len[i*2+1] = self.data1[d1][:d1_len], d1_len
X2[i*2+1, :d2n_len], X2_len[i*2+1] = self.data2[d2n][:d2n_len], d2n_len
yield X1, X1_len, X2, X2_len, Y
def get_batch_generator(self):
while True:
X1, X1_len, X2, X2_len, Y = self.get_batch()
if self.config['use_dpool']:
yield ({'query': X1, 'query_len': X1_len, 'doc': X2, 'doc_len': X2_len, 'dpool_index': DynamicMaxPooling.dynamic_pooling_index(X1_len, X2_len, self.config['text1_maxlen'], self.config['text2_maxlen'])}, Y)
else:
yield ({'query': X1, 'query_len': X1_len, 'doc': X2, 'doc_len': X2_len}, Y)
class DMN_PairGeneratorFilterTargetTopic(PairBasicGenerator):
def __init__(self, config):
super(DMN_PairGeneratorFilterTargetTopic, self).__init__(config=config)
self.__name = 'DMN_PairGeneratorFilterTargetTopic'
self.config = config
self.data1 = config['data1']
self.data2 = config['data2']
self.data1_maxlen = config['text1_maxlen']
self.data1_max_utt_num = int(config['text1_max_utt_num'])
self.data2_maxlen = config['text2_maxlen']
self.fill_word = config['vocab_size'] - 1
self.check_list.extend(['data1', 'data2', 'text1_maxlen', 'text2_maxlen', 'text1_max_utt_num'])
cat_df = pd.read_csv(config['query_to_category_file'])
self.queries_to_cat = {}
self.cat_to_filter = config['test_category']
for idx, row in cat_df.iterrows():
self.queries_to_cat[row['Q']]=row['category']
if config['use_iter']:
self.batch_iter = self.get_batch_iter()
if not self.check():
raise TypeError('[DMN_PairGeneratorFilterTargetTopic] parameter check wrong.')
print '[DMN_PairGeneratorFilterTargetTopic] init done'
def get_batch_static(self):
X1 = np.zeros((self.batch_size*2, self.data1_max_utt_num, self.data1_maxlen), dtype=np.int32) # max 10 turns
X1_len = np.zeros((self.batch_size*2, self.data1_max_utt_num), dtype=np.int32) # max 10 turns
X2 = np.zeros((self.batch_size*2, self.data2_maxlen), dtype=np.int32)
X2_len = np.zeros((self.batch_size*2,), dtype=np.int32)
Y = np.zeros((self.batch_size*2,), dtype=np.int32)
Y[::2] = 1 # [1,0,1,0,1,0,...]
X1[:] = self.fill_word # the default word index is the last word, which is the added PAD word
X2[:] = self.fill_word
for i in range(self.batch_size):
#print 'get_batch_static test i = ', i
d1, d2p, d2n = random.choice(self.pair_list)
domain = self.queries_to_cat[d1]
while domain == self.cat_to_filter:
# print('filtered doc')
d1, d2p, d2n = random.choice(self.pair_list)
domain = self.queries_to_cat[d1]
# print 'd1, d2p, d2n = ', d1, d2p, d2n
# print 'self.data2[d2p] = ', self.data2[d2p]
if len(self.data2[d2p]) == 0:
d2p_ws = [self.fill_word]
else:
d2p_ws = self.data2[d2p][0].split()
if len(self.data2[d2n]) == 0:
d2n_ws = [self.fill_word]
else:
d2n_ws = self.data2[d2n][0].split()
d2p_len = min(self.data2_maxlen, len(d2p_ws))
d2n_len = min(self.data2_maxlen, len(d2n_ws))
# print 'self.data1[d1] = ', self.data1[d1]
# print 'd2p_len, d2n_len = ', d2p_len, d2n_len
# print 'data2[d2p], data2[d2n] = ', self.data2[d2p], self.data2[d2n]
X2[i * 2, :d2p_len], X2_len[i * 2] = d2p_ws[:d2p_len], d2p_len
X2[i * 2 + 1, :d2n_len], X2_len[i * 2 + 1] = d2n_ws[:d2n_len], d2n_len
# if len(self.data1[d1]) > 10, we only keep the most recent 10 utterances
utt_start = 0 if len(self.data1[d1]) < self.data1_max_utt_num else (len(self.data1[d1])-self.data1_max_utt_num)
# print 'test utt_start ', utt_start
# print 'test len(self.data1[d1]) ', len(self.data1[d1])
for j in range(utt_start, len(self.data1[d1])):
# print 'test j ', j
# print 'test utt_start ', utt_start
d1_ws = self.data1[d1][j].split()
d1_len = min(self.data1_maxlen, len(d1_ws))
X1[i*2, j-utt_start, :d1_len], X1_len[i*2, j-utt_start] = d1_ws[:d1_len], d1_len
X1[i*2+1, j-utt_start, :d1_len], X1_len[i*2+1, j-utt_start] = d1_ws[:d1_len], d1_len
return X1, X1_len, X2, X2_len, Y
def get_batch_iter(self):
while True:
self.pair_list = self.pair_list_iter.next()
for _ in range(self.config['batch_per_iter']):
X1 = np.zeros((self.batch_size*2, self.data1_maxlen), dtype=np.int32)
X1_len = np.zeros((self.batch_size*2,), dtype=np.int32)
X2 = np.zeros((self.batch_size*2, self.data2_maxlen), dtype=np.int32)
X2_len = np.zeros((self.batch_size*2,), dtype=np.int32)
Y = np.zeros((self.batch_size*2,), dtype=np.int32)
Y[::2] = 1
X1[:] = self.fill_word
X2[:] = self.fill_word
for i in range(self.batch_size):
d1, d2p, d2n = random.choice(self.pair_list)
d1_len = min(self.data1_maxlen, len(self.data1[d1]))
d2p_len = min(self.data2_maxlen, len(self.data2[d2p]))
d2n_len = min(self.data2_maxlen, len(self.data2[d2n]))
X1[i*2, :d1_len], X1_len[i*2] = self.data1[d1][:d1_len], d1_len
X2[i*2, :d2p_len], X2_len[i*2] = self.data2[d2p][:d2p_len], d2p_len
X1[i*2+1, :d1_len], X1_len[i*2+1] = self.data1[d1][:d1_len], d1_len
X2[i*2+1, :d2n_len], X2_len[i*2+1] = self.data2[d2n][:d2n_len], d2n_len
yield X1, X1_len, X2, X2_len, Y
def get_batch_generator(self):
while True:
X1, X1_len, X2, X2_len, Y = self.get_batch()
if self.config['use_dpool']:
yield ({'query': X1, 'query_len': X1_len, 'doc': X2, 'doc_len': X2_len, 'dpool_index': DynamicMaxPooling.dynamic_pooling_index(X1_len, X2_len, self.config['text1_maxlen'], self.config['text2_maxlen'])}, Y)
else:
yield ({'query': X1, 'query_len': X1_len, 'doc': X2, 'doc_len': X2_len}, Y)
class PairGenerator_Feats(PairBasicGenerator):
def __init__(self, config):
super(PairGenerator_Feats, self).__init__(config=config)
self.__name = 'PairGenerator'
self.config = config
self.check_list.extend(['data1', 'data2', 'text1_maxlen', 'text2_maxlen', 'pair_feat_size', 'pair_feat_file', 'query_feat_size', 'query_feat_file'])
if not self.check():
raise TypeError('[PairGenerator] parameter check wrong.')
self.data1 = config['data1']
self.data2 = config['data2']
self.data1_maxlen = config['text1_maxlen']
self.data2_maxlen = config['text2_maxlen']
self.fill_word = config['vocab_size'] - 1
self.pair_feat_size = config['pair_feat_size']
self.query_feat_size = config['query_feat_size']
pair_feats = read_features_without_id(config['pair_feat_file'])
self.query_feats = read_features_with_id(config['query_feat_file'])
self.pair_feats = {}
for idx, (label, d1, d2) in enumerate(self.rel):
self.pair_feats[(d1, d2)] = pair_feats[idx]
if config['use_iter']:
self.batch_iter = self.get_batch_iter()
print '[PairGenerator] init done'
def get_batch_static(self):
X1 = np.zeros((self.batch_size*2, self.data1_maxlen), dtype=np.int32)
X1_len = np.zeros((self.batch_size*2,), dtype=np.int32)
X2 = np.zeros((self.batch_size*2, self.data2_maxlen), dtype=np.int32)
X2_len = np.zeros((self.batch_size*2,), dtype=np.int32)
X3 = np.zeros((self.batch_size * 2, self.pair_feat_size), dtype=np.float32)
X4 = np.zeros((self.batch_size * 2, self.query_feat_size), dtype=np.float32)
Y = np.zeros((self.batch_size*2,), dtype=np.int32)
Y[::2] = 1
X1[:] = self.fill_word
X2[:] = self.fill_word
for i in range(self.batch_size):
d1, d2p, d2n = random.choice(self.pair_list)
d1_len = min(self.data1_maxlen, len(self.data1[d1]))
d2p_len = min(self.data2_maxlen, len(self.data2[d2p]))
d2n_len = min(self.data2_maxlen, len(self.data2[d2n]))
X1[i*2, :d1_len], X1_len[i*2] = self.data1[d1][:d1_len], d1_len
X2[i*2, :d2p_len], X2_len[i*2] = self.data2[d2p][:d2p_len], d2p_len
X3[i*2, :self.pair_feat_size] = self.pair_feats[(d1, d2p)][:self.pair_feat_size]
X4[i*2, :self.query_feat_size] = self.query_feats[d1][:self.query_feat_size]
X1[i*2+1, :d1_len], X1_len[i*2+1] = self.data1[d1][:d1_len], d1_len
X2[i*2+1, :d2n_len], X2_len[i*2+1] = self.data2[d2n][:d2n_len], d2n_len
X3[i*2+1, :self.pair_feat_size] = self.pair_feats[(d1, d2n)][:self.pair_feat_size]
X4[i*2+1, :self.query_feat_size] = self.query_feats[d1][:self.query_feat_size]
return X1, X1_len, X2, X2_len, X3, X4, Y
def get_batch_iter(self):
while True:
self.pair_list = self.pair_list_iter.next()
for _ in range(self.config['batch_per_iter']):
X1 = np.zeros((self.batch_size*2, self.data1_maxlen), dtype=np.int32)
X1_len = np.zeros((self.batch_size*2,), dtype=np.int32)
X2 = np.zeros((self.batch_size*2, self.data2_maxlen), dtype=np.int32)
X2_len = np.zeros((self.batch_size*2,), dtype=np.int32)
X3 = np.zeros((self.batch_size*2, self.pair_feat_size), dtype=np.float32)
X4 = np.zeros((self.batch_size*2, self.query_feat_size), dtype=np.int32)
Y = np.zeros((self.batch_size*2,), dtype=np.int32)
Y[::2] = 1
X1[:] = self.fill_word
X2[:] = self.fill_word
for i in range(self.batch_size):
d1, d2p, d2n = random.choice(self.pair_list)
d1_len = min(self.data1_maxlen, len(self.data1[d1]))
d2p_len = min(self.data2_maxlen, len(self.data2[d2p]))
d2n_len = min(self.data2_maxlen, len(self.data2[d2n]))
X1[i*2, :d1_len], X1_len[i*2] = self.data1[d1][:d1_len], d1_len
X2[i*2, :d2p_len], X2_len[i*2] = self.data2[d2p][:d2p_len], d2p_len
X3[i*2, :self.pair_feat_size] = self.pair_feats[(d1, d2p)][:self.pair_feat_size]
X4[i*2, :d1_len] = self.query_feats[d1][:self.query_feat_size]
X1[i*2+1, :d1_len], X1_len[i*2+1] = self.data1[d1][:d1_len], d1_len
X2[i*2+1, :d2n_len], X2_len[i*2+1] = self.data2[d2n][:d2n_len], d2n_len
X3[i*2+1, :self.pair_feat_size] = self.pair_feats[(d1, d2n)][:self.pair_feat_size]
X4[i*2+1, :d1_len] = self.query_feats[d1][:self.query_feat_size]
yield X1, X1_len, X2, X2_len, X3, X4, Y
def get_batch_generator(self):
while True:
X1, X1_len, X2, X2_len, X3, X4, Y = self.get_batch()
yield ({'query': X1, 'query_len': X1_len, 'doc': X2, 'doc_len': X2_len, 'query_feats': X4, 'pair_feats': X3}, Y)
| 51.893665
| 221
| 0.56661
| 9,956
| 68,811
| 3.673463
| 0.02732
| 0.054384
| 0.049053
| 0.044022
| 0.884806
| 0.867498
| 0.858393
| 0.83723
| 0.829574
| 0.8258
| 0
| 0.061342
| 0.294241
| 68,811
| 1,325
| 222
| 51.93283
| 0.691747
| 0.083693
| 0
| 0.779614
| 0
| 0
| 0.070521
| 0.013704
| 0
| 0
| 0
| 0
| 0.000918
| 0
| null | null | 0.002755
| 0.007346
| null | null | 0.016529
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
2683750aef396583076a39b51df8b2aa5d956e20
| 12,121
|
py
|
Python
|
tests/test_cv.py
|
akuhnregnier/empirical-fire-modelling
|
4187f5bfce0595d98361a9264793c25607043047
|
[
"MIT"
] | null | null | null |
tests/test_cv.py
|
akuhnregnier/empirical-fire-modelling
|
4187f5bfce0595d98361a9264793c25607043047
|
[
"MIT"
] | null | null | null |
tests/test_cv.py
|
akuhnregnier/empirical-fire-modelling
|
4187f5bfce0595d98361a9264793c25607043047
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import numpy as np
import pytest
from empirical_fire_modelling.data.cv import apply_structure
@pytest.mark.parametrize(
"array, structure, expected",
[
(
np.array(
[
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
],
dtype=np.bool_,
),
np.array([[1]], dtype=np.bool_),
np.array(
[
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
],
dtype=np.bool_,
),
),
(
np.array(
[
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
],
dtype=np.bool_,
),
np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]], dtype=np.bool_),
np.array(
[
[0, 0, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 1, 1, 1, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 0, 0],
],
dtype=np.bool_,
),
),
(
np.array(
[
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
],
dtype=np.bool_,
),
np.array(
[
[0, 1, 1, 1, 0],
[0, 1, 1, 1, 0],
[1, 1, 1, 1, 1],
[0, 1, 1, 1, 0],
[0, 1, 1, 1, 0],
],
dtype=np.bool_,
),
np.array(
[
[0, 1, 1, 1, 0],
[0, 1, 1, 1, 0],
[1, 1, 1, 1, 1],
[0, 1, 1, 1, 0],
[0, 1, 1, 1, 0],
],
dtype=np.bool_,
),
),
(
np.array(
[
[0, 0, 1, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
],
dtype=np.bool_,
),
np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]], dtype=np.bool_),
np.array(
[
[0, 1, 1, 1, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
],
dtype=np.bool_,
),
),
(
np.array(
[
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 1, 0, 0],
],
dtype=np.bool_,
),
np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]], dtype=np.bool_),
np.array(
[
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 1, 1, 1, 0],
],
dtype=np.bool_,
),
),
(
np.array(
[
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[1, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
],
dtype=np.bool_,
),
np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]], dtype=np.bool_),
np.array(
[
[0, 0, 0, 0, 0],
[1, 0, 0, 0, 0],
[1, 1, 0, 0, 1],
[1, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
],
dtype=np.bool_,
),
),
(
np.array(
[
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
],
dtype=np.bool_,
),
np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]], dtype=np.bool_),
np.array(
[
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 1],
[1, 0, 0, 1, 1],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 0],
],
dtype=np.bool_,
),
),
(
np.array(
[
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
],
dtype=np.bool_,
),
np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]], dtype=np.bool_),
np.array(
[
[1, 0, 0, 1, 1],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
],
dtype=np.bool_,
),
),
(
np.array(
[
[1, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
],
dtype=np.bool_,
),
np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]], dtype=np.bool_),
np.array(
[
[1, 1, 0, 0, 1],
[1, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
],
dtype=np.bool_,
),
),
(
np.array(
[
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[1, 0, 0, 0, 0],
],
dtype=np.bool_,
),
np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]], dtype=np.bool_),
np.array(
[
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[1, 0, 0, 0, 0],
[1, 1, 0, 0, 1],
],
dtype=np.bool_,
),
),
(
np.array(
[
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 1],
],
dtype=np.bool_,
),
np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]], dtype=np.bool_),
np.array(
[
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 1],
[1, 0, 0, 1, 1],
],
dtype=np.bool_,
),
),
(
np.array(
[
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 0],
],
dtype=np.bool_,
),
np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]], dtype=np.bool_),
np.array(
[
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 1],
[1, 0, 0, 1, 1],
[0, 0, 0, 0, 1],
],
dtype=np.bool_,
),
),
(
np.array(
[
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
],
dtype=np.bool_,
),
np.array(
[
[0, 0, 0, 1, 0],
[0, 0, 0, 1, 0],
[1, 1, 1, 1, 1],
[0, 0, 1, 1, 0],
[0, 0, 1, 1, 0],
],
dtype=np.bool_,
),
np.array(
[
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 1, 1, 0, 0],
[0, 0, 0, 0, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
],
dtype=np.bool_,
),
),
(
np.array(
[
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
],
dtype=np.bool_,
),
np.array(
[
[0, 0, 0, 1, 0],
[0, 0, 0, 1, 0],
[1, 1, 1, 1, 1],
[0, 0, 1, 1, 0],
[0, 0, 1, 1, 0],
],
dtype=np.bool_,
),
np.array(
[
[0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 1, 1],
[0, 0, 0, 0, 0, 0, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
],
dtype=np.bool_,
),
),
(
np.array(
[
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0],
],
dtype=np.bool_,
),
np.array(
[
[0, 0, 0, 1, 0],
[0, 0, 0, 1, 0],
[1, 1, 1, 1, 1],
[0, 0, 1, 1, 0],
[0, 0, 1, 1, 0],
],
dtype=np.bool_,
),
np.array(
[
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 1, 1, 1, 1],
],
dtype=np.bool_,
),
),
],
)
def test_apply_structure(array, structure, expected):
assert np.all(apply_structure(array, structure) == expected)
| 29.708333
| 72
| 0.182328
| 1,391
| 12,121
| 1.552121
| 0.020129
| 0.718851
| 0.955998
| 1.141269
| 0.930523
| 0.89208
| 0.89208
| 0.89208
| 0.89208
| 0.89069
| 0
| 0.263983
| 0.647471
| 12,121
| 407
| 73
| 29.781327
| 0.241282
| 0.001733
| 0
| 0.848635
| 0
| 0
| 0.002149
| 0
| 0
| 0
| 0
| 0
| 0.002481
| 1
| 0.002481
| false
| 0
| 0.007444
| 0
| 0.009926
| 0
| 0
| 0
| 1
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 14
|
cd19ce56dc74834b266c021060f7f8d8e5ecce11
| 95,331
|
py
|
Python
|
nesi/bootup/conf/bootstraps/create_alcatel_7360.py
|
inexio/NESi
|
920b23ccaf293733b4b571e4df27929c036257f7
|
[
"BSD-2-Clause"
] | 30
|
2020-09-03T06:02:38.000Z
|
2022-03-11T16:34:18.000Z
|
nesi/bootup/conf/bootstraps/create_alcatel_7360.py
|
inexio/NESi
|
920b23ccaf293733b4b571e4df27929c036257f7
|
[
"BSD-2-Clause"
] | 2
|
2021-01-15T10:33:23.000Z
|
2021-02-21T21:04:37.000Z
|
nesi/bootup/conf/bootstraps/create_alcatel_7360.py
|
inexio/NESi
|
920b23ccaf293733b4b571e4df27929c036257f7
|
[
"BSD-2-Clause"
] | 3
|
2020-12-19T09:11:19.000Z
|
2022-02-07T22:15:34.000Z
|
# This file is part of the NESi software.
#
# Copyright (c) 2020
# Original Software Design by Ilya Etingof <https://github.com/etingof>.
#
# Software adapted by inexio <https://github.com/inexio>.
# - Janis Groß <https://github.com/unkn0wn-user>
# - Philip Konrath <https://github.com/Connyko65>
# - Alexander Dincher <https://github.com/Dinker1996>
#
# License: https://github.com/inexio/NESi/LICENSE.rst
#
# Example NESi REST API server bootstrapping
#
from .function import create_resource
import time
# --------------------------------------------------------#
# #
# Subrack 1/1 #
# |---> Card 1/1/1 (xdsl) #
# | |-> Port 1/1/1/1 #
# | | |-> Cpe 1/1/1/1/1 #
# | | |-> CpePort 1/1/1/1/1/1 #
# | |-> Port 1/1/1/2 #
# | | |-> Cpe 1/1/1/2/1 #
# | | |-> CpePort 1/1/1/2/1/1 #
# | |-> Port 1/1/1/3 #
# | #
# |---> Card 1/1/2 (vdsl) #
# | |-> Port 1/1/2/1 #
# | | |-> Cpe 1/1/2/1/1 #
# | | |-> CpePort 1/1/2/1/1/1 #
# | |-> Port 1/1/2/2 #
# | | |-> Cpe 1/1/2/2/1 #
# | | |-> CpePort 1/1/2/2/1/1 #
# | |-> Port 1/1/2/3 #
# | #
# |---> Card 1/1/3 (adsl) #
# | |-> Port 1/1/3/1 #
# | | |-> Cpe 1/1/3/1/1 #
# | | |-> CpePort 1/1/3/1/1/1 #
# | |-> Port 1/1/3/2 #
# | | |-> Cpe 1/1/3/2/1 #
# | | |-> CpePort 1/1/3/2/1/1 #
# | |-> Port 1/1/3/3 #
# | #
# |---> Card 1/1/4 (ftth) #
# | |-> Port 1/1/4/1 #
# | | |-> Ont 1/1/4/1/1 #
# | | |-> OntPort 1/1/4/1/1/1/1 #
# | | |-> Cpe 1/1/4/1/1/1/1/1 #
# | | |-> CpePort 1/1/4/1/1/1/1/1/1 #
# | |-> Port 1/1/4/2 #
# | | |-> Ont 1/1/4/2/1 #
# | | |-> OntPort 1/1/4/2/1/1/1 #
# | | |-> Cpe 1/1/4/2/1/1/1/1 #
# | | |-> CpePort 1/1/4/2/1/1/1/1/1 #
# | |-> Port 1/1/4/3 #
# | #
# |---> Card 1/1/5 (ftth-pon) #
# |-> Port 1/1/5/1 #
# |-> Ont 1/1/5/1/1 #
# | |-> OntPort 1/1/5/1/1/1/1 #
# | |-> Cpe 1/1/5/1/1/1/1/1 #
# | |-> CpePort 1/1/5/1/1/1/1/1/1 #
# |-> Ont 1/1/5/1/2 #
# |-> OntPort 1/1/5/1/2/1/1 #
# | |-> Cpe 1/1/5/1/2/1/1/1 #
# | |-> CpePort 1/1/5/1/2/1/1/1/1 #
# |-> OntPort 1/1/5/1/2/1/2 #
# |-> Cpe 1/1/5/1/2/1/2/1 #
# |-> CpePort 1/1/5/1/2/1/2/1/1 #
# #
# --------------------------------------------------------#
def create_alcatel():
endpoint = 'http://localhost:5000/nesi/v1'
time.sleep(1)
# Create a network device (admin operation)
req = {
"vendor": "Alcatel",
"model": "7360",
"version": "FX-4",
"description": "Aclatel Switch",
"hostname": "Alcatel_7360",
"mgmt_address": "10.0.0.1",
"network_protocol": "telnet",
"network_address": "127.0.0.1",
"network_port": 9023,
"software_version": "R5.5.02",
"login_banner": " ___ __ ______ ___ .___________. _______ __\r\n / \\ | | / | / \\ | || ____|| |\r\n / ^ \\ | | | ,----` / ^ \\ `---| |----`| |__ | |\r\n / /_\\ \\ | | | | / /_\\ \\ | | | __| | |\r\n / _____ \\ | `----.| `----./ _____ \\ | | | |____ | `----.\r\n/__/ \\__\\ |_______| \\______/__/ \\__\\ |__| |_______||_______|",
"welcome_banner": "Welcome to Alcatel_7360",
"uuid": "alcatel"
}
box_id = create_resource(req, (endpoint + '/boxen'))
# Admin user
req = {
"name": "Admin"
}
admin_id = create_resource(req, (endpoint + '/boxen/' + box_id + '/users'))
# Create login credentials at the switch (admin operation)
req = {
"username": "admin",
"password": "secret",
"user_id": admin_id
}
admin_credential_id = create_resource(req, (endpoint + '/boxen/' + box_id + '/credentials'))
# PortProfile 1
req = {
"name": "TEST_DSL_16000",
"description": "PortProfile #1",
"type": "service"
}
test_dsl_16000_profile = create_resource(req, (endpoint + '/boxen/' + box_id + '/port_profiles'))
# PortProfile 2
req = {
"name": "PSD_036",
"description": "PortProfile #2",
"type": "spectrum"
}
psd_036_profile = create_resource(req, (endpoint + '/boxen/' + box_id + '/port_profiles'))
# PortProfile 3
req = {
"name": "VECT_US_DS",
"description": "PortProfile #3",
"type": "vect"
}
vect_us_ds_profile = create_resource(req, (endpoint + '/boxen/' + box_id + '/port_profiles'))
# PortProfile 4
req = {
"name": "DPBO_3310",
"description": "PortProfile #4",
"type": "dpbo"
}
dpbo_3310_profile = create_resource(req, (endpoint + '/boxen/' + box_id + '/port_profiles'))
# PortProfile 5
req = {
"name": "TEST_FTTH_500M",
"description": "PortProfile #5",
"type": "qos",
"up_policer": "name:50M_CIR",
"down_policer": "name:500M_CIR",
"logical_flow_type": "generic"
}
test_ftth_500m_profile = create_resource(req, (endpoint + '/boxen/' + box_id + '/port_profiles'))
# PortProfile 6
req = {
"name": "TEST_FTTH_1G",
"description": "PortProfile #6",
"type": "qos",
"up_policer": "name:500M_CIR",
"down_policer": "name:1G_CIR",
"logical_flow_type": "generic"
}
test_ftth_1g_profile = create_resource(req, (endpoint + '/boxen/' + box_id + '/port_profiles'))
# PortProfile 7
req = {
"name": "VDSL_VECT_FALLBACK",
"description": "PortProfile #7",
"type": "vect"
}
tvdsl_vect_fallback_profile = create_resource(req, (endpoint + '/boxen/' + box_id + '/port_profiles'))
# PortProfile 6
req = {
"name": "vce-default",
"description": "Default vce profile",
"type": "vce"
}
vce_default_profile = create_resource(req, (endpoint + '/boxen/' + box_id + '/port_profiles'))
# Policer 1
req = {
"name": "1G_CIR",
"description": "Policer #1",
"type": "policer",
"committed_info_rate": 1050000,
"committed_burst_size": 2560000
}
_1g_cir_profile = create_resource(req, (endpoint + '/boxen/' + box_id + '/port_profiles'))
# Policer 2
req = {
"name": "1M_CIR",
"description": "Policer #2",
"type": "policer",
"committed_info_rate": 1050,
"committed_burst_size": 256000
}
_1m_cir_profile = create_resource(req, (endpoint + '/boxen/' + box_id + '/port_profiles'))
# Policer 3
req = {
"name": "2M_CIR",
"description": "Policer #3",
"type": "policer",
"committed_info_rate": 2100,
"committed_burst_size": 256000
}
_2m_cir_profile = create_resource(req, (endpoint + '/boxen/' + box_id + '/port_profiles'))
# Policer 4
req = {
"name": "50M_CIR",
"description": "Policer #4",
"type": "policer",
"committed_info_rate": 52500,
"committed_burst_size": 256000
}
_50m_cir_profile = create_resource(req, (endpoint + '/boxen/' + box_id + '/port_profiles'))
# Policer 5
req = {
"name": "100M_CIR",
"description": "Policer #5",
"type": "policer",
"committed_info_rate": 105000,
"committed_burst_size": 256000
}
_100m_cir_profile = create_resource(req, (endpoint + '/boxen/' + box_id + '/port_profiles'))
# Policer 6
req = {
"name": "500M_CIR",
"description": "Policer #6",
"type": "policer",
"committed_info_rate": 550000,
"committed_burst_size": 312500
}
_500m_cir_profile = create_resource(req, (endpoint + '/boxen/' + box_id + '/port_profiles'))
### PPPoE Vlan ###
req = {
"number": 2620,
"name": "PPPoE",
"description": "PPPoE Vlan",
"status": "learned",
"fdb_id": 2620,
"role": "access",
"shutdown": False,
"mtu": 1495,
"access_group_in": "",
"access_group_out": "",
"ip_redirect": False,
"ip_proxy_arp": False,
"unicast_reverse_path_forwarding": False,
"load_interval": 100,
"mpls_ip": "10.1.1.12",
"protocol_filter": "pass-pppoe",
"pppoe_relay_tag": "configurable",
"pppoe_linerate": "addactuallinerate",
"circuit_id_pppoe": "physical-id",
"remote_id_pppoe": "customer-id",
"in_qos_prof_name": "name:Default_TC0",
"new_broadcast": "disable",
"new_secure_fwd": "disable",
"aging_time": None,
"dhcp_opt82_ext": "disable"
}
vlan_pppoe = create_resource(req, (endpoint + '/boxen/' + box_id + '/vlans'))
### CPE Management Vlan ###
req = {
"number": 3320,
"name": "CPE Management",
"description": "CPE Management Vlan",
"status": "learned",
"fdb_id": 2620,
"role": "access",
"shutdown": False,
"mtu": 1495,
"access_group_in": "",
"access_group_out": "",
"ip_redirect": False,
"ip_proxy_arp": False,
"unicast_reverse_path_forwarding": False,
"load_interval": 100,
"mpls_ip": "10.1.1.12",
"protocol_filter": None,
"pppoe_relay_tag": None,
"pppoe_linerate": None,
"circuit_id_pppoe": None,
"remote_id_pppoe": None,
"circuit_id_dhcp": "physical-id",
"remote_id_dhcp": "customer-id",
"in_qos_prof_name": None,
"new_broadcast": "enable",
"new_secure_fwd": "enable",
"aging_time": 21600,
"dhcp_opt82_ext": "enable"
}
vlan_cpem = create_resource(req, (endpoint + '/boxen/' + box_id + '/vlans'))
### Subrack 1/1 ###
# Create a physical subrack at the network device (admin operation)
req = {
"name": "1/1",
"description": "Physical subrack #1",
"planned_type": "rvxs-a",
"actual_type": "rvxs-a",
"operational_state": "1",
"admin_state": "1",
"err_state": "no-error",
"availability": "available",
"mode": "no-extended-lt-slots",
"subrack_class": "main-ethernet",
"serial_no": "CN1646MAGDGF",
"variant": "3FE68313CDCDE",
"ics": "04"
}
subrack_id = create_resource(req, (endpoint + '/boxen/' + box_id + '/subracks'))
### Management Card ###
# Create a physical card at the network device (admin operation)
req = {
"name": "nt-a",
"position": "network:0",
"subrack_id": subrack_id,
"description": "Physical Management card",
"planned_type": "rant-a",
"actual_type": "rant-a",
"operational_state": "1",
"err_state": "no-error",
"availability": "available",
"alarm_profile": "none",
"capab_profile": "not_applicable",
"manufacturer": "ALCL",
"mnemonic": "RANT-A",
"pba_code": "3FE68863GGFL",
"fpba_code": "3FE68863GGFL",
"fpba_ics": "03",
"clei_code": "VAUCAMZKAA",
"serial_no": "YP1819F4025",
"failed_test": "00:00:00:00",
"lt_restart_time": "1970-01-01:00:00:00",
"lt_restart_cause": "other",
"lt_restart_num": 0,
"mgnt_entity_oamipaddr": "0.0.0.0",
"mgnt_entity_pairnum": 0,
"dual_host_ip": "0.0.0.0",
"dual_host_loc": "none",
"product": "mgnt"
}
card_nt_a = create_resource(req, (endpoint + '/boxen/' + box_id + '/cards'))
### Management Port 1 ###
# Create a physical port at the network device (admin operation)
req = {
"card_id": card_nt_a,
"name": "nt-a:xfp:1",
"position": "nt-a:xfp:1",
"description": "Management port #1",
"operational_state": "1",
"admin_state": "1",
"upstream": 0,
"downstream": 0,
"upstream_max": "100000",
"downstream_max": "100000",
"noise_margin_up": 0,
"noise_margin_down": 0,
"tgt_noise_margin_up": 0,
"tgt_noise_margin_down": 0,
"attenuation_up": 0,
"attenuation_down": 0,
"attained_upstream": 0,
"attained_downstream": 0,
"threshold_upstream": 0,
"threshold_downstream": 0,
"max_delay_upstream": 0,
"max_delay_downsteam": 0,
"if_index": 94502912,
"type": "ethernet-line",
"high_speed": 0,
"connector_present": "not-applicable",
"media": 0.0,
"largest_pkt_size": 0,
"curr_bandwith": 1244000000,
"phy_addr": " ",
"last_chg_opr_stat": "352-02:55:19",
"pkts_unknown_proto": 0,
"in_octets": 0,
"out_octets": 0,
"in_ucast_pkts": 0,
"out_ucast_pkts": 0,
"in_mcast_pkts": 0,
"out_mcast_pkts": 0,
"in_broadcast_pkts": 0,
"out_broadcast_pkts": 0,
"in_discard_pkts": 0,
"out_discard_pkts": 0,
"in_err_pkts": 0,
"out_err_pkts": 0,
"in_octets_hc": 0,
"out_octets_hc": 0,
"in_ucast_pkts_hc": 0,
"out_ucast_pkts_hc": 0,
"in_mcast_pkts_hc": 0,
"out_mcast_pkts_hc": 0,
"in_broadcast_pkts_hc": 0,
"out_broadcast_pkts_hc": 0,
"diag_avail_status": "no-error",
"los": "not-available",
"tx_fault": "no-tx-fault",
"tx_power": "3.85 dBm",
"rx_power": "not-available",
"tx_bias_current": "16.17 mA",
"supply_voltage": "3.23 VDC",
"temperature": "57.39 degrees Celsius",
"temperature_tca": "normal-value",
"voltage_tca": "normal-value",
"bias_current_tca": "normal-value",
"tx_power_tca": "normal-value",
"rx_power_tca": "normal-value",
"rssi_profile_id": 65535,
"rssi_state": "enable",
"inp_up": 0,
"inp_dn": 0,
"interl_us": 0,
"interl_dn": 0,
"cur_op_mode": "default",
"rinit_1d": 0,
"actual_tps_tc_mode": "ptm",
"rtx_mode_up": "unknown",
"rtx_mode_dn": "unknown",
"total_reset_attempt": 0,
"success_reset_attempt": 0,
"cur_init_state": "down",
"shutdown": False,
"speed": "1G",
"auto_negotiation": True,
"mtu": 1495
}
port_mgmt = create_resource(req, (endpoint + '/boxen/' + box_id + '/ports'))
### ServicePort nt-a:xfp:1 ####
req = {
"connected_id": port_mgmt,
"connected_type": "port",
"name": "nt-a:xfp:1",
"admin_state": "1",
"operational_state": "1"
}
service_port_1_1_1_1 = create_resource(req, (endpoint + '/boxen/' + box_id + '/service_ports'))
### Card 1/1/1 ###
# Create a physical card at the network device (admin operation)
req = {
"subrack_id": subrack_id,
"description": "Physical card 1/1/1",
"planned_type": "rdlt-c",
"actual_type": "rdlt-c",
"operational_state": "1",
"admin_state": "1",
"err_state": "no-error",
"availability": "available",
"alarm_profile": "none",
"capab_profile": "32port_xDSL",
"manufacturer": "ALCL",
"mnemonic": "RDLT-C",
"pba_code": "3FE68863GGFL",
"fpba_code": "3FE68863GGFL",
"fpba_ics": "02",
"clei_code": "VBIUAALBAB",
"serial_no": "AA1815FSE1CG",
"failed_test": "00:00:00:00",
"lt_restart_time": "1970-01-01:00:00:00",
"lt_restart_cause": "poweron",
"lt_restart_num": 0,
"mgnt_entity_oamipaddr": "0.0.0.0",
"mgnt_entity_pairnum": 0,
"dual_host_ip": "0.0.0.0",
"dual_host_loc": "none",
"product": "xdsl"
}
card_1_1_1 = create_resource(req, (endpoint + '/boxen/' + box_id + '/cards'))
### Port 1/1/1/1 ###
# Create a physical port at the network device (admin operation)
req = {
"card_id": card_1_1_1,
"description": "Physical port 1/1/1/1",
"operational_state": "1",
"admin_state": "1",
"upstream": 10000,
"downstream": 25000,
"upstream_max": "100000",
"downstream_max": "100000",
"noise_margin_up": 0,
"noise_margin_down": 0,
"tgt_noise_margin_up": 0,
"tgt_noise_margin_down": 0,
"attenuation_up": 0,
"attenuation_down": 0,
"attained_upstream": 0,
"attained_downstream": 0,
"threshold_upstream": 0,
"threshold_downstream": 0,
"max_delay_upstream": 0,
"max_delay_downsteam": 0,
"if_index": 94502912,
"type": "ethernet-line",
"high_speed": 0,
"connector_present": "not-applicable",
"media": 0.0,
"largest_pkt_size": 0,
"curr_bandwith": 1244000000,
"phy_addr": " ",
"last_chg_opr_stat": "352-02:55:19",
"pkts_unknown_proto": 0,
"in_octets": 0,
"out_octets": 0,
"in_ucast_pkts": 0,
"out_ucast_pkts": 0,
"in_mcast_pkts": 0,
"out_mcast_pkts": 0,
"in_broadcast_pkts": 0,
"out_broadcast_pkts": 0,
"in_discard_pkts": 0,
"out_discard_pkts": 0,
"in_err_pkts": 0,
"out_err_pkts": 0,
"in_octets_hc": 0,
"out_octets_hc": 0,
"in_ucast_pkts_hc": 0,
"out_ucast_pkts_hc": 0,
"in_mcast_pkts_hc": 0,
"out_mcast_pkts_hc": 0,
"in_broadcast_pkts_hc": 0,
"out_broadcast_pkts_hc": 0,
"diag_avail_status": "no-error",
"los": "not-available",
"tx_fault": "no-tx-fault",
"tx_power": "3.85 dBm",
"rx_power": "not-available",
"tx_bias_current": "16.17 mA",
"supply_voltage": "3.23 VDC",
"temperature": "57.39 degrees Celsius",
"temperature_tca": "normal-value",
"voltage_tca": "normal-value",
"bias_current_tca": "normal-value",
"tx_power_tca": "normal-value",
"rx_power_tca": "normal-value",
"rssi_profile_id": 65535,
"rssi_state": "enable",
"inp_up": 0,
"inp_dn": 0,
"interl_us": 0,
"interl_dn": 0,
"cur_op_mode": "default",
"rinit_1d": 0,
"actual_tps_tc_mode": "ptm",
"rtx_mode_up": "unknown",
"rtx_mode_dn": "unknown",
"total_reset_attempt": 0,
"success_reset_attempt": 0,
"cur_init_state": "down",
"shutdown": False,
"speed": "1G",
"auto_negotiation": True,
"mtu": 1495
}
port_1_1_1_1 = create_resource(req, (endpoint + '/boxen/' + box_id + '/ports'))
### ServicePort 1/1/1/1 ####
req = {
"connected_id": port_1_1_1_1,
"connected_type": "port",
"name": "1/1/1/1",
"admin_state": "1",
"operational_state": "1"
}
service_port_1_1_1_1 = create_resource(req, (endpoint + '/boxen/' + box_id + '/service_ports'))
### Service PPPoE Vlan at ServicePort 1/1/1/1 ###
req = {
"name": "2620",
"service_port_id": service_port_1_1_1_1,
"vlan_id": vlan_pppoe,
"card_id": card_1_1_1
}
service_vlan_ = create_resource(req, (endpoint + '/boxen/' + box_id + '/service_vlans'))
### Service CPE Management Vlan at ServicePort 1/1/1/1 ###
req = {
"name": "3320",
"service_port_id": service_port_1_1_1_1,
"vlan_id": vlan_cpem,
"card_id": card_1_1_1
}
service_vlan_ = create_resource(req, (endpoint + '/boxen/' + box_id + '/service_vlans'))
### ServicePort 1/1/1/1:1:32 ####
req = {
"name": "1/1/1/1:1:32",
"connected_id": port_1_1_1_1,
"connected_type": "port",
"admin_state": "1",
"operational_state": "1"
}
service_port_1_1_1_1_1_32 = create_resource(req, (endpoint + '/boxen/' + box_id + '/service_ports'))
### Service PPPoE Vlan at ServicePort 1/1/1/1:1:32 ###
req = {
"name": "2620",
"service_port_id": service_port_1_1_1_1_1_32,
"vlan_id": vlan_pppoe,
"card_id": card_1_1_1
}
service_vlan_ = create_resource(req, (endpoint + '/boxen/' + box_id + '/service_vlans'))
### Cpe 1/1/1/1/1 ###
# Create a physical cpe at the ont-port (admin operation)
req = {
"port_id": port_1_1_1_1,
"description": "Cpe 1/1/1/1/1",
"serial_no": "ABCD123456EF",
"admin_state": "1",
"mac": "8f:db:82:ef:ea:17"
}
cpe_1_1_1_1_1 = create_resource(req, (endpoint + '/boxen/' + box_id + '/cpes'))
### CpePort 1/1/1/1/1/1 ###
# Create a physical cpe-port at the cpe (admin operation)
req = {
"cpe_id": cpe_1_1_1_1_1,
"description": "CpePort 1/1/1/1/1/1"
}
cpe_port_1_1_1_1_1_1 = create_resource(req, (endpoint + '/boxen/' + box_id + '/cpe_ports'))
### ServicePort 1/1/1/1/1/1 ####
req = {
"connected_id": cpe_port_1_1_1_1_1_1,
"connected_type": "cpe",
"name": "1/1/1/1/1/1",
"admin_state": "1",
"operational_state": "1"
}
service_port_1_1_1_1_1_1 = create_resource(req, (endpoint + '/boxen/' + box_id + '/service_ports'))
### Service PPPoE Vlan at ServicePort 1/1/1/1/1/1 ###
req = {
"name": "2620",
"service_port_id": service_port_1_1_1_1_1_1,
"vlan_id": vlan_pppoe,
"card_id": card_1_1_1
}
service_vlan_ = create_resource(req, (endpoint + '/boxen/' + box_id + '/service_vlans'))
### Service CPE Management Vlan at ServicePort 1/1/1/1/1/1 ###
req = {
"name": "3320",
"service_port_id": service_port_1_1_1_1_1_1,
"vlan_id": vlan_cpem,
"card_id": card_1_1_1
}
service_vlan_ = create_resource(req, (endpoint + '/boxen/' + box_id + '/service_vlans'))
### Port 1/1/1/2 ###
# Create a physical port at the network device (admin operation)
req = {
"card_id": card_1_1_1,
"description": "Physical port 1/1/1/2",
"operational_state": "0",
"admin_state": "1",
"upstream": 0,
"downstream": 0,
"upstream_max": "100000",
"downstream_max": "100000",
"noise_margin_up": 0,
"noise_margin_down": 0,
"tgt_noise_margin_up": 0,
"tgt_noise_margin_down": 0,
"attenuation_up": 0,
"attenuation_down": 0,
"attained_upstream": 0,
"attained_downstream": 0,
"threshold_upstream": 0,
"threshold_downstream": 0,
"max_delay_upstream": 0,
"max_delay_downsteam": 0,
"if_index": 94502912,
"type": "ethernet-line",
"high_speed": 0,
"connector_present": "not-applicable",
"media": 0.0,
"largest_pkt_size": 0,
"curr_bandwith": 1244000000,
"phy_addr": " ",
"last_chg_opr_stat": "352-02:55:19",
"pkts_unknown_proto": 0,
"in_octets": 0,
"out_octets": 0,
"in_ucast_pkts": 0,
"out_ucast_pkts": 0,
"in_mcast_pkts": 0,
"out_mcast_pkts": 0,
"in_broadcast_pkts": 0,
"out_broadcast_pkts": 0,
"in_discard_pkts": 0,
"out_discard_pkts": 0,
"in_err_pkts": 0,
"out_err_pkts": 0,
"in_octets_hc": 0,
"out_octets_hc": 0,
"in_ucast_pkts_hc": 0,
"out_ucast_pkts_hc": 0,
"in_mcast_pkts_hc": 0,
"out_mcast_pkts_hc": 0,
"in_broadcast_pkts_hc": 0,
"out_broadcast_pkts_hc": 0,
"diag_avail_status": "no-error",
"los": "not-available",
"tx_fault": "no-tx-fault",
"tx_power": "3.85 dBm",
"rx_power": "not-available",
"tx_bias_current": "16.17 mA",
"supply_voltage": "3.23 VDC",
"temperature": "57.39 degrees Celsius",
"temperature_tca": "normal-value",
"voltage_tca": "normal-value",
"bias_current_tca": "normal-value",
"tx_power_tca": "normal-value",
"rx_power_tca": "normal-value",
"rssi_profile_id": 65535,
"rssi_state": "enable",
"inp_up": 0,
"inp_dn": 0,
"interl_us": 0,
"interl_dn": 0,
"cur_op_mode": "default",
"rinit_1d": 0,
"actual_tps_tc_mode": "ptm",
"rtx_mode_up": "unknown",
"rtx_mode_dn": "unknown",
"total_reset_attempt": 0,
"success_reset_attempt": 0,
"cur_init_state": "down",
"shutdown": False,
"speed": "1G",
"auto_negotiation": True,
"mtu": 1495
}
port_1_1_1_2 = create_resource(req, (endpoint + '/boxen/' + box_id + '/ports'))
### ServicePort 1/1/1/2 ####
req = {
"connected_id": port_1_1_1_2,
"connected_type": "port",
"name": "1/1/1/1",
"admin_state": "1",
"operational_state": "0"
}
service_port_1_1_1_2 = create_resource(req, (endpoint + '/boxen/' + box_id + '/service_ports'))
### Service PPPoE Vlan at ServicePort 1/1/1/2 ###
req = {
"name": "2620",
"service_port_id": service_port_1_1_1_2,
"vlan_id": vlan_pppoe,
"card_id": card_1_1_1
}
service_vlan_ = create_resource(req, (endpoint + '/boxen/' + box_id + '/service_vlans'))
### Service CPE Management Vlan at ServicePort 1/1/1/2 ###
req = {
"name": "3320",
"service_port_id": service_port_1_1_1_2,
"vlan_id": vlan_cpem,
"card_id": card_1_1_1
}
service_vlan_ = create_resource(req, (endpoint + '/boxen/' + box_id + '/service_vlans'))
### Cpe 1/1/1/2/1 ###
# Create a physical cpe at the ont-port (admin operation)
req = {
"port_id": port_1_1_1_2,
"description": "Cpe 1/1/1/2/1",
"serial_no": "ABCD654321FE",
"admin_state": "0",
"mac": "8d:dc:81:ea:fe:12"
}
cpe_1_1_1_2_1 = create_resource(req, (endpoint + '/boxen/' + box_id + '/cpes'))
### CpePort 1/1/1/1/2/1 ###
# Create a physical cpe-port at the cpe (admin operation)
req = {
"cpe_id": cpe_1_1_1_2_1,
"description": "CpePort 1/1/1/2/1/1"
}
cpe_port_1_1_1_2_1_1 = create_resource(req, (endpoint + '/boxen/' + box_id + '/cpe_ports'))
### Port 1/1/1/3 ###
# Create a physical port at the network device (admin operation)
req = {
"card_id": card_1_1_1,
"description": "Physical port 1/1/1/3",
"operational_state": "0",
"admin_state": "0",
"upstream": 0,
"downstream": 0,
"upstream_max": "100000",
"downstream_max": "100000",
"noise_margin_up": 0,
"noise_margin_down": 0,
"tgt_noise_margin_up": 0,
"tgt_noise_margin_down": 0,
"attenuation_up": 0,
"attenuation_down": 0,
"attained_upstream": 0,
"attained_downstream": 0,
"threshold_upstream": 0,
"threshold_downstream": 0,
"max_delay_upstream": 0,
"max_delay_downsteam": 0,
"if_index": 94502912,
"type": "ethernet-line",
"high_speed": 0,
"connector_present": "not-applicable",
"media": 0.0,
"largest_pkt_size": 0,
"curr_bandwith": 1244000000,
"phy_addr": " ",
"last_chg_opr_stat": "352-02:55:19",
"pkts_unknown_proto": 0,
"in_octets": 0,
"out_octets": 0,
"in_ucast_pkts": 0,
"out_ucast_pkts": 0,
"in_mcast_pkts": 0,
"out_mcast_pkts": 0,
"in_broadcast_pkts": 0,
"out_broadcast_pkts": 0,
"in_discard_pkts": 0,
"out_discard_pkts": 0,
"in_err_pkts": 0,
"out_err_pkts": 0,
"in_octets_hc": 0,
"out_octets_hc": 0,
"in_ucast_pkts_hc": 0,
"out_ucast_pkts_hc": 0,
"in_mcast_pkts_hc": 0,
"out_mcast_pkts_hc": 0,
"in_broadcast_pkts_hc": 0,
"out_broadcast_pkts_hc": 0,
"diag_avail_status": "no-error",
"los": "not-available",
"tx_fault": "no-tx-fault",
"tx_power": "3.85 dBm",
"rx_power": "not-available",
"tx_bias_current": "16.17 mA",
"supply_voltage": "3.23 VDC",
"temperature": "57.39 degrees Celsius",
"temperature_tca": "normal-value",
"voltage_tca": "normal-value",
"bias_current_tca": "normal-value",
"tx_power_tca": "normal-value",
"rx_power_tca": "normal-value",
"rssi_profile_id": 65535,
"rssi_state": "enable",
"inp_up": 0,
"inp_dn": 0,
"interl_us": 0,
"interl_dn": 0,
"cur_op_mode": "default",
"rinit_1d": 0,
"actual_tps_tc_mode": "ptm",
"rtx_mode_up": "unknown",
"rtx_mode_dn": "unknown",
"total_reset_attempt": 0,
"success_reset_attempt": 0,
"cur_init_state": "down",
"shutdown": False,
"speed": "1G",
"auto_negotiation": True,
"mtu": 1495
}
port_1_1_1_3 = create_resource(req, (endpoint + '/boxen/' + box_id + '/ports'))
### ServicePort 1/1/1/3 ####
req = {
"connected_id": port_1_1_1_3,
"connected_type": "port",
"name": "1/1/1/3",
"admin_state": "0",
"operational_state": "0"
}
service_port_1_1_1_3 = create_resource(req, (endpoint + '/boxen/' + box_id + '/service_ports'))
### Service PPPoE Vlan at ServicePort 1/1/1/3 ###
req = {
"name": "2620",
"service_port_id": service_port_1_1_1_3,
"vlan_id": vlan_pppoe,
"card_id": card_1_1_1
}
service_vlan_ = create_resource(req, (endpoint + '/boxen/' + box_id + '/service_vlans'))
### Service CPE Management Vlan at ServicePort 1/1/1/3 ###
req = {
"name": "3320",
"service_port_id": service_port_1_1_1_3,
"vlan_id": vlan_cpem,
"card_id": card_1_1_1
}
service_vlan_ = create_resource(req, (endpoint + '/boxen/' + box_id + '/service_vlans'))
### Card 1/1/2 ###
# Create a physical card at the network device (admin operation)
req = {
"subrack_id": subrack_id,
"description": "Physical card 1/1/2",
"planned_type": "rdlt-c",
"actual_type": "rdlt-c",
"operational_state": "1",
"admin_state": "1",
"err_state": "no-error",
"availability": "available",
"alarm_profile": "none",
"capab_profile": "32port_xDSL",
"manufacturer": "ALCL",
"mnemonic": "FANT-F",
"pba_code": "3FE68863GGFL",
"fpba_code": "3FE68863GGFL",
"fpba_ics": "02",
"clei_code": "VBIUAALBAB",
"serial_no": "AA1815FSE1CG",
"failed_test": "00:00:00:00",
"lt_restart_time": "1970-01-01:00:00:00",
"lt_restart_cause": "poweron",
"lt_restart_num": 0,
"mgnt_entity_oamipaddr": "0.0.0.0",
"mgnt_entity_pairnum": 0,
"dual_host_ip": "0.0.0.0",
"dual_host_loc": "none",
"product": "vdsl"
}
card_1_1_2 = create_resource(req, (endpoint + '/boxen/' + box_id + '/cards'))
### Port 1/1/2/1 ###
# Create a physical port at the network device (admin operation)
req = {
"card_id": card_1_1_2,
"description": "Physical port 1/1/2/1",
"operational_state": "1",
"admin_state": "1",
"upstream": 10000,
"downstream": 25000,
"upstream_max": "100000",
"downstream_max": "100000",
"noise_margin_up": 0,
"noise_margin_down": 0,
"tgt_noise_margin_up": 0,
"tgt_noise_margin_down": 0,
"attenuation_up": 0,
"attenuation_down": 0,
"attained_upstream": 0,
"attained_downstream": 0,
"threshold_upstream": 0,
"threshold_downstream": 0,
"max_delay_upstream": 0,
"max_delay_downsteam": 0,
"if_index": 94502912,
"type": "ethernet-line",
"high_speed": 0,
"connector_present": "not-applicable",
"media": 0.0,
"largest_pkt_size": 0,
"curr_bandwith": 1244000000,
"phy_addr": " ",
"last_chg_opr_stat": "352-02:55:19",
"pkts_unknown_proto": 0,
"in_octets": 0,
"out_octets": 0,
"in_ucast_pkts": 0,
"out_ucast_pkts": 0,
"in_mcast_pkts": 0,
"out_mcast_pkts": 0,
"in_broadcast_pkts": 0,
"out_broadcast_pkts": 0,
"in_discard_pkts": 0,
"out_discard_pkts": 0,
"in_err_pkts": 0,
"out_err_pkts": 0,
"in_octets_hc": 0,
"out_octets_hc": 0,
"in_ucast_pkts_hc": 0,
"out_ucast_pkts_hc": 0,
"in_mcast_pkts_hc": 0,
"out_mcast_pkts_hc": 0,
"in_broadcast_pkts_hc": 0,
"out_broadcast_pkts_hc": 0,
"diag_avail_status": "no-error",
"los": "not-available",
"tx_fault": "no-tx-fault",
"tx_power": "3.85 dBm",
"rx_power": "not-available",
"tx_bias_current": "16.17 mA",
"supply_voltage": "3.23 VDC",
"temperature": "57.39 degrees Celsius",
"temperature_tca": "normal-value",
"voltage_tca": "normal-value",
"bias_current_tca": "normal-value",
"tx_power_tca": "normal-value",
"rx_power_tca": "normal-value",
"rssi_profile_id": 65535,
"rssi_state": "enable",
"inp_up": 0,
"inp_dn": 0,
"interl_us": 0,
"interl_dn": 0,
"cur_op_mode": "default",
"rinit_1d": 0,
"actual_tps_tc_mode": "ptm",
"rtx_mode_up": "unknown",
"rtx_mode_dn": "unknown",
"total_reset_attempt": 0,
"success_reset_attempt": 0,
"cur_init_state": "down",
"shutdown": False,
"speed": "1G",
"auto_negotiation": True,
"mtu": 1495
}
port_1_1_2_1 = create_resource(req, (endpoint + '/boxen/' + box_id + '/ports'))
### ServicePort 1/1/2/1 ####
req = {
"connected_id": port_1_1_2_1,
"connected_type": "port",
"name": "1/1/2/1",
"admin_state": "1",
"operational_state": "1"
}
service_port_1_1_2_1 = create_resource(req, (endpoint + '/boxen/' + box_id + '/service_ports'))
### Service PPPoE Vlan at ServicePort 1/1/2/1 ###
req = {
"name": "2620",
"service_port_id": service_port_1_1_2_1,
"vlan_id": vlan_pppoe,
"card_id": card_1_1_2
}
service_vlan_ = create_resource(req, (endpoint + '/boxen/' + box_id + '/service_vlans'))
### Service CPE Management Vlan at ServicePort 1/1/2/1 ###
req = {
"name": "3320",
"service_port_id": service_port_1_1_2_1,
"vlan_id": vlan_cpem,
"card_id": card_1_1_2
}
service_vlan_ = create_resource(req, (endpoint + '/boxen/' + box_id + '/service_vlans'))
### Cpe 1/1/2/1/1 ###
# Create a physical cpe at the ont-port (admin operation)
req = {
"port_id": port_1_1_2_1,
"description": "Cpe 1/1/2/1/1",
"serial_no": "GFED123456BA",
"admin_state": "1",
"mac": "2a:87:19:09:ae:2f"
}
cpe_1_1_2_1_1 = create_resource(req, (endpoint + '/boxen/' + box_id + '/cpes'))
### CpePort 1/1/2/1/1/1 ##
# Create a physical cpe-port at the cpe (admin operation)
req = {
"cpe_id": cpe_1_1_2_1_1,
"description": "CpePort 1/1/2/1/1/1"
}
cpe_port_1_1_1_1_1_1 = create_resource(req, (endpoint + '/boxen/' + box_id + '/cpe_ports'))
### Port 1/1/2/2 ###
# Create a physical port at the network device (admin operation)
req = {
"card_id": card_1_1_2,
"description": "Physical port 1/1/2/2",
"operational_state": "0",
"admin_state": "1",
"upstream": 0,
"downstream": 0,
"upstream_max": "100000",
"downstream_max": "100000",
"noise_margin_up": 0,
"noise_margin_down": 0,
"tgt_noise_margin_up": 0,
"tgt_noise_margin_down": 0,
"attenuation_up": 0,
"attenuation_down": 0,
"attained_upstream": 0,
"attained_downstream": 0,
"threshold_upstream": 0,
"threshold_downstream": 0,
"max_delay_upstream": 0,
"max_delay_downsteam": 0,
"if_index": 94502912,
"type": "ethernet-line",
"high_speed": 0,
"connector_present": "not-applicable",
"media": 0.0,
"largest_pkt_size": 0,
"curr_bandwith": 1244000000,
"phy_addr": " ",
"last_chg_opr_stat": "352-02:55:19",
"pkts_unknown_proto": 0,
"in_octets": 0,
"out_octets": 0,
"in_ucast_pkts": 0,
"out_ucast_pkts": 0,
"in_mcast_pkts": 0,
"out_mcast_pkts": 0,
"in_broadcast_pkts": 0,
"out_broadcast_pkts": 0,
"in_discard_pkts": 0,
"out_discard_pkts": 0,
"in_err_pkts": 0,
"out_err_pkts": 0,
"in_octets_hc": 0,
"out_octets_hc": 0,
"in_ucast_pkts_hc": 0,
"out_ucast_pkts_hc": 0,
"in_mcast_pkts_hc": 0,
"out_mcast_pkts_hc": 0,
"in_broadcast_pkts_hc": 0,
"out_broadcast_pkts_hc": 0,
"diag_avail_status": "no-error",
"los": "not-available",
"tx_fault": "no-tx-fault",
"tx_power": "3.85 dBm",
"rx_power": "not-available",
"tx_bias_current": "16.17 mA",
"supply_voltage": "3.23 VDC",
"temperature": "57.39 degrees Celsius",
"temperature_tca": "normal-value",
"voltage_tca": "normal-value",
"bias_current_tca": "normal-value",
"tx_power_tca": "normal-value",
"rx_power_tca": "normal-value",
"rssi_profile_id": 65535,
"rssi_state": "enable",
"inp_up": 0,
"inp_dn": 0,
"interl_us": 0,
"interl_dn": 0,
"cur_op_mode": "default",
"rinit_1d": 0,
"actual_tps_tc_mode": "ptm",
"rtx_mode_up": "unknown",
"rtx_mode_dn": "unknown",
"total_reset_attempt": 0,
"success_reset_attempt": 0,
"cur_init_state": "down",
"shutdown": False,
"speed": "1G",
"auto_negotiation": True,
"mtu": 1495
}
port_1_1_2_2 = create_resource(req, (endpoint + '/boxen/' + box_id + '/ports'))
### ServicePort 1/1/2/2 ####
req = {
"connected_id": port_1_1_2_2,
"connected_type": "port",
"name": "1/1/2/1",
"admin_state": "1",
"operational_state": "0"
}
service_port_1_1_2_2 = create_resource(req, (endpoint + '/boxen/' + box_id + '/service_ports'))
### Service PPPoE Vlan at ServicePort 1/1/2/2 ###
req = {
"name": "2620",
"service_port_id": service_port_1_1_2_2,
"vlan_id": vlan_pppoe,
"card_id": card_1_1_2
}
service_vlan_ = create_resource(req, (endpoint + '/boxen/' + box_id + '/service_vlans'))
### Service CPE Management Vlan at ServicePort 1/1/2/2 ###
req = {
"name": "3320",
"service_port_id": service_port_1_1_2_2,
"vlan_id": vlan_cpem,
"card_id": card_1_1_2
}
service_vlan_ = create_resource(req, (endpoint + '/boxen/' + box_id + '/service_vlans'))
### Cpe 1/1/2/2/1 ###
# Create a physical cpe at the ont-port (admin operation)
req = {
"port_id": port_1_1_2_2,
"description": "Cpe 1/1/2/2/1",
"serial_no": "DEFG654321AB",
"admin_state": "0",
"mac": "2e:78:09:e6:dc:4e"
}
cpe_1_1_2_2_1 = create_resource(req, (endpoint + '/boxen/' + box_id + '/cpes'))
### CpePort 1/1/2/2/1/1 ###
# Create a physical cpe-port at the cpe (admin operation)
req = {
"cpe_id": cpe_1_1_2_2_1,
"description": "CpePort 1/1/2/2/1/1"
}
cpe_port_1_1_2_2_1_1 = create_resource(req, (endpoint + '/boxen/' + box_id + '/cpe_ports'))
### Port 1/1/2/3 ###
# Create a physical port at the network device (admin operation)
req = {
"card_id": card_1_1_2,
"description": "Physical port 1/1/2/3",
"operational_state": "0",
"admin_state": "0",
"upstream": 0,
"downstream": 0,
"upstream_max": "100000",
"downstream_max": "100000",
"noise_margin_up": 0,
"noise_margin_down": 0,
"tgt_noise_margin_up": 0,
"tgt_noise_margin_down": 0,
"attenuation_up": 0,
"attenuation_down": 0,
"attained_upstream": 0,
"attained_downstream": 0,
"threshold_upstream": 0,
"threshold_downstream": 0,
"max_delay_upstream": 0,
"max_delay_downsteam": 0,
"if_index": 94502912,
"type": "ethernet-line",
"high_speed": 0,
"connector_present": "not-applicable",
"media": 0.0,
"largest_pkt_size": 0,
"curr_bandwith": 1244000000,
"phy_addr": " ",
"last_chg_opr_stat": "352-02:55:19",
"pkts_unknown_proto": 0,
"in_octets": 0,
"out_octets": 0,
"in_ucast_pkts": 0,
"out_ucast_pkts": 0,
"in_mcast_pkts": 0,
"out_mcast_pkts": 0,
"in_broadcast_pkts": 0,
"out_broadcast_pkts": 0,
"in_discard_pkts": 0,
"out_discard_pkts": 0,
"in_err_pkts": 0,
"out_err_pkts": 0,
"in_octets_hc": 0,
"out_octets_hc": 0,
"in_ucast_pkts_hc": 0,
"out_ucast_pkts_hc": 0,
"in_mcast_pkts_hc": 0,
"out_mcast_pkts_hc": 0,
"in_broadcast_pkts_hc": 0,
"out_broadcast_pkts_hc": 0,
"diag_avail_status": "no-error",
"los": "not-available",
"tx_fault": "no-tx-fault",
"tx_power": "3.85 dBm",
"rx_power": "not-available",
"tx_bias_current": "16.17 mA",
"supply_voltage": "3.23 VDC",
"temperature": "57.39 degrees Celsius",
"temperature_tca": "normal-value",
"voltage_tca": "normal-value",
"bias_current_tca": "normal-value",
"tx_power_tca": "normal-value",
"rx_power_tca": "normal-value",
"rssi_profile_id": 65535,
"rssi_state": "enable",
"inp_up": 0,
"inp_dn": 0,
"interl_us": 0,
"interl_dn": 0,
"cur_op_mode": "default",
"rinit_1d": 0,
"actual_tps_tc_mode": "ptm",
"rtx_mode_up": "unknown",
"rtx_mode_dn": "unknown",
"total_reset_attempt": 0,
"success_reset_attempt": 0,
"cur_init_state": "down",
"shutdown": False,
"speed": "1G",
"auto_negotiation": True,
"mtu": 1495
}
port_1_1_2_3 = create_resource(req, (endpoint + '/boxen/' + box_id + '/ports'))
### ServicePort 1/1/2/3 ####
req = {
"connected_id": port_1_1_2_3,
"connected_type": "port",
"name": "1/1/2/3",
"admin_state": "0",
"operational_state": "0"
}
service_port_1_1_2_3 = create_resource(req, (endpoint + '/boxen/' + box_id + '/service_ports'))
### Service PPPoE Vlan at ServicePort 1/1/2/3 ###
req = {
"name": "2620",
"service_port_id": service_port_1_1_2_3,
"vlan_id": vlan_pppoe,
"card_id": card_1_1_2
}
service_vlan_ = create_resource(req, (endpoint + '/boxen/' + box_id + '/service_vlans'))
### Service CPE Management Vlan at ServicePort 1/1/2/3 ###
req = {
"name": "3320",
"service_port_id": service_port_1_1_2_3,
"vlan_id": vlan_cpem,
"card_id": card_1_1_2
}
service_vlan_ = create_resource(req, (endpoint + '/boxen/' + box_id + '/service_vlans'))
### Card 1/1/3 ###
# Create a physical card at the network device (admin operation)
req = {
"subrack_id": subrack_id,
"description": "Physical card 1/1/3",
"planned_type": "nant-a",
"actual_type": "nant-a",
"operational_state": "1",
"admin_state": "1",
"err_state": "no-error",
"availability": "available",
"alarm_profile": "none",
"capab_profile": "32port_xDSL",
"manufacturer": "ALCL",
"mnemonic": "FANT-F",
"pba_code": "3FE68863GGFL",
"fpba_code": "3FE68863GGFL",
"fpba_ics": "02",
"clei_code": "VBIUAALBAB",
"serial_no": "AA1815FSE1CG",
"failed_test": "00:00:00:00",
"lt_restart_time": "1970-01-01:00:00:00",
"lt_restart_cause": "poweron",
"lt_restart_num": 0,
"mgnt_entity_oamipaddr": "0.0.0.0",
"mgnt_entity_pairnum": 0,
"dual_host_ip": "0.0.0.0",
"dual_host_loc": "none",
"product": "adsl"
}
card_1_1_3 = create_resource(req, (endpoint + '/boxen/' + box_id + '/cards'))
### Port 1/1/3/1 ###
# Create a physical port at the network device (admin operation)
req = {
"card_id": card_1_1_3,
"description": "Physical port 1/1/3/1",
"operational_state": "1",
"admin_state": "1",
"upstream": 10000,
"downstream": 25000,
"upstream_max": "100000",
"downstream_max": "100000",
"noise_margin_up": 0,
"noise_margin_down": 0,
"tgt_noise_margin_up": 0,
"tgt_noise_margin_down": 0,
"attenuation_up": 0,
"attenuation_down": 0,
"attained_upstream": 0,
"attained_downstream": 0,
"threshold_upstream": 0,
"threshold_downstream": 0,
"max_delay_upstream": 0,
"max_delay_downsteam": 0,
"if_index": 94502912,
"type": "ethernet-line",
"high_speed": 0,
"connector_present": "not-applicable",
"media": 0.0,
"largest_pkt_size": 0,
"curr_bandwith": 1244000000,
"phy_addr": " ",
"last_chg_opr_stat": "352-02:55:19",
"pkts_unknown_proto": 0,
"in_octets": 0,
"out_octets": 0,
"in_ucast_pkts": 0,
"out_ucast_pkts": 0,
"in_mcast_pkts": 0,
"out_mcast_pkts": 0,
"in_broadcast_pkts": 0,
"out_broadcast_pkts": 0,
"in_discard_pkts": 0,
"out_discard_pkts": 0,
"in_err_pkts": 0,
"out_err_pkts": 0,
"in_octets_hc": 0,
"out_octets_hc": 0,
"in_ucast_pkts_hc": 0,
"out_ucast_pkts_hc": 0,
"in_mcast_pkts_hc": 0,
"out_mcast_pkts_hc": 0,
"in_broadcast_pkts_hc": 0,
"out_broadcast_pkts_hc": 0,
"diag_avail_status": "no-error",
"los": "not-available",
"tx_fault": "no-tx-fault",
"tx_power": "3.85 dBm",
"rx_power": "not-available",
"tx_bias_current": "16.17 mA",
"supply_voltage": "3.23 VDC",
"temperature": "57.39 degrees Celsius",
"temperature_tca": "normal-value",
"voltage_tca": "normal-value",
"bias_current_tca": "normal-value",
"tx_power_tca": "normal-value",
"rx_power_tca": "normal-value",
"rssi_profile_id": 65535,
"rssi_state": "enable",
"inp_up": 0,
"inp_dn": 0,
"interl_us": 0,
"interl_dn": 0,
"cur_op_mode": "default",
"rinit_1d": 0,
"actual_tps_tc_mode": "ptm",
"rtx_mode_up": "unknown",
"rtx_mode_dn": "unknown",
"total_reset_attempt": 0,
"success_reset_attempt": 0,
"cur_init_state": "down",
"shutdown": False,
"speed": "1G",
"auto_negotiation": True,
"mtu": 1495
}
port_1_1_3_1 = create_resource(req, (endpoint + '/boxen/' + box_id + '/ports'))
### ServicePort 1/1/3/1 ####
req = {
"connected_id": port_1_1_3_1,
"connected_type": "port",
"name": "1/1/3/1",
"admin_state": "1",
"operational_state": "1"
}
service_port_1_1_3_1 = create_resource(req, (endpoint + '/boxen/' + box_id + '/service_ports'))
### Service PPPoE Vlan at ServicePort 1/1/3/1 ###
req = {
"name": "2620",
"service_port_id": service_port_1_1_3_1,
"vlan_id": vlan_pppoe,
"card_id": card_1_1_3
}
service_vlan_ = create_resource(req, (endpoint + '/boxen/' + box_id + '/service_vlans'))
### Service CPE Management Vlan at ServicePort 1/1/3/1 ###
req = {
"name": "3320",
"service_port_id": service_port_1_1_3_1,
"vlan_id": vlan_cpem,
"card_id": card_1_1_3
}
service_vlan_ = create_resource(req, (endpoint + '/boxen/' + box_id + '/service_vlans'))
### Cpe 1/1/3/1/1 ###
# Create a physical cpe at the ont-port (admin operation)
req = {
"port_id": port_1_1_3_1,
"description": "Cpe 1/1/3/1/1",
"serial_no": "WXYZ123456BA",
"admin_state": "1",
"mac": "fd:28:2e:25:a2:99"
}
cpe_1_1_3_1_1 = create_resource(req, (endpoint + '/boxen/' + box_id + '/cpes'))
### CpePort 1/1/3/1/1/1 ###
# Create a physical cpe-port at the cpe (admin operation)
req = {
"cpe_id": cpe_1_1_3_1_1,
"description": "CpePort 1/1/3/1/1/1"
}
cpe_port_1_1_1_1_1_1 = create_resource(req, (endpoint + '/boxen/' + box_id + '/cpe_ports'))
### Port 1/1/3/2 ###
# Create a physical port at the network device (admin operation)
req = {
"card_id": card_1_1_3,
"description": "Physical port 1/1/3/2",
"operational_state": "0",
"admin_state": "1",
"upstream": 0,
"downstream": 0,
"upstream_max": "100000",
"downstream_max": "100000",
"noise_margin_up": 0,
"noise_margin_down": 0,
"tgt_noise_margin_up": 0,
"tgt_noise_margin_down": 0,
"attenuation_up": 0,
"attenuation_down": 0,
"attained_upstream": 0,
"attained_downstream": 0,
"threshold_upstream": 0,
"threshold_downstream": 0,
"max_delay_upstream": 0,
"max_delay_downsteam": 0,
"if_index": 94502912,
"type": "ethernet-line",
"high_speed": 0,
"connector_present": "not-applicable",
"media": 0.0,
"largest_pkt_size": 0,
"curr_bandwith": 1244000000,
"phy_addr": " ",
"last_chg_opr_stat": "352-02:55:19",
"pkts_unknown_proto": 0,
"in_octets": 0,
"out_octets": 0,
"in_ucast_pkts": 0,
"out_ucast_pkts": 0,
"in_mcast_pkts": 0,
"out_mcast_pkts": 0,
"in_broadcast_pkts": 0,
"out_broadcast_pkts": 0,
"in_discard_pkts": 0,
"out_discard_pkts": 0,
"in_err_pkts": 0,
"out_err_pkts": 0,
"in_octets_hc": 0,
"out_octets_hc": 0,
"in_ucast_pkts_hc": 0,
"out_ucast_pkts_hc": 0,
"in_mcast_pkts_hc": 0,
"out_mcast_pkts_hc": 0,
"in_broadcast_pkts_hc": 0,
"out_broadcast_pkts_hc": 0,
"diag_avail_status": "no-error",
"los": "not-available",
"tx_fault": "no-tx-fault",
"tx_power": "3.85 dBm",
"rx_power": "not-available",
"tx_bias_current": "16.17 mA",
"supply_voltage": "3.23 VDC",
"temperature": "57.39 degrees Celsius",
"temperature_tca": "normal-value",
"voltage_tca": "normal-value",
"bias_current_tca": "normal-value",
"tx_power_tca": "normal-value",
"rx_power_tca": "normal-value",
"rssi_profile_id": 65535,
"rssi_state": "enable",
"inp_up": 0,
"inp_dn": 0,
"interl_us": 0,
"interl_dn": 0,
"cur_op_mode": "default",
"rinit_1d": 0,
"actual_tps_tc_mode": "ptm",
"rtx_mode_up": "unknown",
"rtx_mode_dn": "unknown",
"total_reset_attempt": 0,
"success_reset_attempt": 0,
"cur_init_state": "down",
"shutdown": False,
"speed": "1G",
"auto_negotiation": True,
"mtu": 1495
}
port_1_1_3_2 = create_resource(req, (endpoint + '/boxen/' + box_id + '/ports'))
### ServicePort 1/1/3/2 ####
req = {
"connected_id": port_1_1_3_2,
"connected_type": "port",
"name": "1/1/3/2",
"admin_state": "1",
"operational_state": "0"
}
service_port_1_1_3_2 = create_resource(req, (endpoint + '/boxen/' + box_id + '/service_ports'))
### Service PPPoE Vlan at ServicePort 1/1/3/2 ###
req = {
"name": "2620",
"service_port_id": service_port_1_1_3_2,
"vlan_id": vlan_pppoe,
"card_id": card_1_1_3
}
service_vlan_ = create_resource(req, (endpoint + '/boxen/' + box_id + '/service_vlans'))
### Service CPE Management Vlan at ServicePort 1/1/3/2 ###
req = {
"name": "3320",
"service_port_id": service_port_1_1_3_2,
"vlan_id": vlan_cpem,
"card_id": card_1_1_3
}
service_vlan_ = create_resource(req, (endpoint + '/boxen/' + box_id + '/service_vlans'))
### Cpe 1/1/3/2/1 ###
# Create a physical cpe at the ont-port (admin operation)
req = {
"port_id": port_1_1_3_2,
"description": "Cpe 1/1/3/2/1",
"serial_no": "DEFG654321AB",
"admin_state": "0",
"mac": "c3:3e:81:30:3d:10"
}
cpe_1_1_3_2_1 = create_resource(req, (endpoint + '/boxen/' + box_id + '/cpes'))
### CpePort 1/1/3/2/1/1 ###
# Create a physical cpe-port at the cpe (admin operation)
req = {
"cpe_id": cpe_1_1_3_2_1,
"description": "CpePort 1/1/3/2/1/1"
}
cpe_port_1_1_3_2_1_1 = create_resource(req, (endpoint + '/boxen/' + box_id + '/cpe_ports'))
### Port 1/1/3/3 ###
# Create a physical port at the network device (admin operation)
req = {
"card_id": card_1_1_3,
"description": "Physical port 1/1/3/3",
"operational_state": "0",
"admin_state": "0",
"upstream": 0,
"downstream": 0,
"upstream_max": "100000",
"downstream_max": "100000",
"noise_margin_up": 0,
"noise_margin_down": 0,
"tgt_noise_margin_up": 0,
"tgt_noise_margin_down": 0,
"attenuation_up": 0,
"attenuation_down": 0,
"attained_upstream": 0,
"attained_downstream": 0,
"threshold_upstream": 0,
"threshold_downstream": 0,
"max_delay_upstream": 0,
"max_delay_downsteam": 0,
"if_index": 94502912,
"type": "ethernet-line",
"high_speed": 0,
"connector_present": "not-applicable",
"media": 0.0,
"largest_pkt_size": 0,
"curr_bandwith": 1244000000,
"phy_addr": " ",
"last_chg_opr_stat": "352-02:55:19",
"pkts_unknown_proto": 0,
"in_octets": 0,
"out_octets": 0,
"in_ucast_pkts": 0,
"out_ucast_pkts": 0,
"in_mcast_pkts": 0,
"out_mcast_pkts": 0,
"in_broadcast_pkts": 0,
"out_broadcast_pkts": 0,
"in_discard_pkts": 0,
"out_discard_pkts": 0,
"in_err_pkts": 0,
"out_err_pkts": 0,
"in_octets_hc": 0,
"out_octets_hc": 0,
"in_ucast_pkts_hc": 0,
"out_ucast_pkts_hc": 0,
"in_mcast_pkts_hc": 0,
"out_mcast_pkts_hc": 0,
"in_broadcast_pkts_hc": 0,
"out_broadcast_pkts_hc": 0,
"diag_avail_status": "no-error",
"los": "not-available",
"tx_fault": "no-tx-fault",
"tx_power": "3.85 dBm",
"rx_power": "not-available",
"tx_bias_current": "16.17 mA",
"supply_voltage": "3.23 VDC",
"temperature": "57.39 degrees Celsius",
"temperature_tca": "normal-value",
"voltage_tca": "normal-value",
"bias_current_tca": "normal-value",
"tx_power_tca": "normal-value",
"rx_power_tca": "normal-value",
"rssi_profile_id": 65535,
"rssi_state": "enable",
"inp_up": 0,
"inp_dn": 0,
"interl_us": 0,
"interl_dn": 0,
"cur_op_mode": "default",
"rinit_1d": 0,
"actual_tps_tc_mode": "ptm",
"rtx_mode_up": "unknown",
"rtx_mode_dn": "unknown",
"total_reset_attempt": 0,
"success_reset_attempt": 0,
"cur_init_state": "down",
"shutdown": False,
"speed": "1G",
"auto_negotiation": True,
"mtu": 1495
}
port_1_1_3_3 = create_resource(req, (endpoint + '/boxen/' + box_id + '/ports'))
### ServicePort 1/1/3/3 ####
req = {
"connected_id": port_1_1_3_3,
"connected_type": "port",
"name": "1/1/3/3",
"admin_state": "0",
"operational_state": "0"
}
service_port_1_1_3_3 = create_resource(req, (endpoint + '/boxen/' + box_id + '/service_ports'))
### Service PPPoE Vlan at ServicePort 1/1/3/3 ###
req = {
"name": "2620",
"service_port_id": service_port_1_1_3_3,
"vlan_id": vlan_pppoe,
"card_id": card_1_1_3
}
service_vlan_ = create_resource(req, (endpoint + '/boxen/' + box_id + '/service_vlans'))
### Service CPE Management Vlan at ServicePort 1/1/3/3 ###
req = {
"name": "3320",
"service_port_id": service_port_1_1_3_3,
"vlan_id": vlan_cpem,
"card_id": card_1_1_3
}
service_vlan_ = create_resource(req, (endpoint + '/boxen/' + box_id + '/service_vlans'))
### Card 1/1/4 ###
# Create a physical card at the network device (admin operation)
req = {
"subrack_id": subrack_id,
"description": "Physical card 1/1/4",
"planned_type": "relt-a",
"actual_type": "relt-a",
"operational_state": "1",
"admin_state": "1",
"err_state": "no-error",
"availability": "available",
"alarm_profile": "none",
"capab_profile": "32port_xDSL",
"manufacturer": "ALCL",
"mnemonic": "RDLT-C",
"pba_code": "3FE68863GGFL",
"fpba_code": "3FE68863GGFL",
"fpba_ics": "02",
"clei_code": "VBIUAALBAB",
"serial_no": "AA1815FSE1CG",
"failed_test": "00:00:00:00",
"lt_restart_time": "1970-01-01:00:00:00",
"lt_restart_cause": "poweron",
"lt_restart_num": 0,
"mgnt_entity_oamipaddr": "0.0.0.0",
"mgnt_entity_pairnum": 0,
"dual_host_ip": "0.0.0.0",
"dual_host_loc": "none",
"product": "ftth"
}
card_1_1_4 = create_resource(req, (endpoint + '/boxen/' + box_id + '/cards'))
### Port 1/1/4/1 ###
# Create a physical port at the network device (admin operation)
req = {
"card_id": card_1_1_4,
"description": "Physical port 1/1/4/1",
"operational_state": "1",
"admin_state": "1",
"upstream": 10000,
"downstream": 25000,
"upstream_max": "100000",
"downstream_max": "100000",
"noise_margin_up": 0,
"noise_margin_down": 0,
"tgt_noise_margin_up": 0,
"tgt_noise_margin_down": 0,
"attenuation_up": 0,
"attenuation_down": 0,
"attained_upstream": 0,
"attained_downstream": 0,
"threshold_upstream": 0,
"threshold_downstream": 0,
"max_delay_upstream": 0,
"max_delay_downsteam": 0,
"if_index": 94502912,
"type": "ethernet-line",
"high_speed": 0,
"connector_present": "not-applicable",
"media": 0.0,
"largest_pkt_size": 0,
"curr_bandwith": 1244000000,
"phy_addr": " ",
"last_chg_opr_stat": "352-02:55:19",
"pkts_unknown_proto": 0,
"in_octets": 0,
"out_octets": 0,
"in_ucast_pkts": 0,
"out_ucast_pkts": 0,
"in_mcast_pkts": 0,
"out_mcast_pkts": 0,
"in_broadcast_pkts": 0,
"out_broadcast_pkts": 0,
"in_discard_pkts": 0,
"out_discard_pkts": 0,
"in_err_pkts": 0,
"out_err_pkts": 0,
"in_octets_hc": 0,
"out_octets_hc": 0,
"in_ucast_pkts_hc": 0,
"out_ucast_pkts_hc": 0,
"in_mcast_pkts_hc": 0,
"out_mcast_pkts_hc": 0,
"in_broadcast_pkts_hc": 0,
"out_broadcast_pkts_hc": 0,
"diag_avail_status": "no-error",
"los": "not-available",
"tx_fault": "no-tx-fault",
"tx_power": "3.85 dBm",
"rx_power": "not-available",
"tx_bias_current": "16.17 mA",
"supply_voltage": "3.23 VDC",
"temperature": "57.39 degrees Celsius",
"temperature_tca": "normal-value",
"voltage_tca": "normal-value",
"bias_current_tca": "normal-value",
"tx_power_tca": "normal-value",
"rx_power_tca": "normal-value",
"rssi_profile_id": 65535,
"rssi_state": "enable",
"inp_up": 0,
"inp_dn": 0,
"interl_us": 0,
"interl_dn": 0,
"cur_op_mode": "default",
"rinit_1d": 0,
"actual_tps_tc_mode": "ptm",
"rtx_mode_up": "unknown",
"rtx_mode_dn": "unknown",
"total_reset_attempt": 0,
"success_reset_attempt": 0,
"cur_init_state": "down",
"shutdown": False,
"speed": "1G",
"auto_negotiation": True,
"mtu": 1495
}
port_1_1_4_1 = create_resource(req, (endpoint + '/boxen/' + box_id + '/ports'))
### ServicePort 1/1/4/1 ####
req = {
"connected_id": port_1_1_4_1,
"connected_type": "port",
"name": "1/1/4/1",
"admin_state": "1",
"operational_state": "1"
}
service_port_1_1_4_1 = create_resource(req, (endpoint + '/boxen/' + box_id + '/service_ports'))
### Service PPPoE Vlan at ServicePort 1/1/4/1 ###
req = {
"name": "2620",
"service_port_id": service_port_1_1_4_1,
"vlan_id": vlan_pppoe,
"card_id": card_1_1_4
}
service_vlan_ = create_resource(req, (endpoint + '/boxen/' + box_id + '/service_vlans'))
### Service CPE Management Vlan at ServicePort 1/1/4/1 ###
req = {
"name": "3320",
"service_port_id": service_port_1_1_4_1,
"vlan_id": vlan_cpem,
"card_id": card_1_1_4
}
service_vlan_ = create_resource(req, (endpoint + '/boxen/' + box_id + '/service_vlans'))
### Ont 1/1/4/1/1 ###
# Create a physical ont at the network device (admin operation)
req = {
"port_id": port_1_1_4_1,
"description": "Ont 1/1/4/1/1",
"admin_state": "1",
"index": 1,
"type": "10gbaselr",
"basebx10d": "yes",
"media_available": "available",
"jabber_state": "jabber",
"b100basefxfd": "no",
"b100baselx10": "no",
"b100basebx10d": "no",
"b100basebx10u": "yes",
"b100basetxfd": "yes",
"b1000basetfd": "no",
"b10gbasetfd": "yes",
"b1000basesxfd": "no",
"b1000baselx10": "no",
"b1000baselxfd": "yes",
"b1000basebx10u": "yes",
"b1000basebx10d": "no",
"b10gbaser": "no",
"b10gbaselr": "yes",
"b10gbaseer": "no",
"b2500basex": "no",
"cap100base_tfd": "no",
"cap1000base_xfd": "yes",
"cap1000base_tfd": "yes",
"cap10gbase_tfd": "no",
"act_num_data_ports": 1,
"act_num_voice_ports": 0,
"actual_card_type": "ethernet",
"actual_ont_integ": "integrated",
"actual_serial_num": "0168FC3C",
"actual_version_num": "G2110V1D0",
"actual_vendorid": "GNXS",
"actual_cardid": "FiberTwist-G2110",
"state": "enabled",
"sernum": "ALCLB140677C"
}
ont_1_1_4_1_1 = create_resource(req, (endpoint + '/boxen/' + box_id + '/onts'))
### OntPort 1/1/4/1/1/1/1 ###
# Create a physical ont-port at the ont (admin operation)
req = {
"ont_id": ont_1_1_4_1_1,
"description": "OntPort 1/1/4/1/1/1/1",
"admin_state": "1"
}
ont_port_1_1_4_1_1_1_1 = create_resource(req, (endpoint + '/boxen/' + box_id + '/ont_ports'))
### ServicePort 1/1/4/1/1/1/1 ####
req = {
"connected_id": ont_port_1_1_4_1_1_1_1,
"connected_type": "port",
"name": "1/1/4/1/1/1/1",
"admin_state": "1",
"operational_state": "1"
}
service_port_1_1_4_1_1_1_1 = create_resource(req, (endpoint + '/boxen/' + box_id + '/service_ports'))
### Service PPPoE Vlan at ServicePort 1/1/4/1/1/1/1 ###
req = {
"name": "2620",
"service_port_id": service_port_1_1_4_1_1_1_1,
"vlan_id": vlan_pppoe,
"card_id": card_1_1_4
}
service_vlan_ = create_resource(req, (endpoint + '/boxen/' + box_id + '/service_vlans'))
### Service CPE Management Vlan at ServicePort 1/1/4/1/1/1/1 ###
req = {
"name": "3320",
"service_port_id": service_port_1_1_4_1_1_1_1,
"vlan_id": vlan_cpem,
"card_id": card_1_1_4
}
service_vlan_ = create_resource(req, (endpoint + '/boxen/' + box_id + '/service_vlans'))
### Cpe 1/1/4/1/1/1/1/1 ###
# Create a physical cpe at the ont-port (admin operation)
req = {
"ont_port_id": ont_port_1_1_4_1_1_1_1,
"description": "Cpe 1/1/4/1/1/1/1/1",
"serial_no": "GFED123456XY",
"admin_state": "1",
"mac": "a4:c9:21:bd:11:c3"
}
cpe_1_1_4_1_1_1_1_1 = create_resource(req, (endpoint + '/boxen/' + box_id + '/cpes'))
### CpePort 1/1/4/1/1/1 ###
# Create a physical cpe-port at the cpe (admin operation)
req = {
"cpe_id": cpe_1_1_4_1_1_1_1_1,
"description": "CpePort 1/1/4/1/1/1/1/1/1"
}
cpe_port_1_1_4_1_1_1_1_1_1 = create_resource(req, (endpoint + '/boxen/' + box_id + '/cpe_ports'))
### Port 1/1/4/2 ###
# Create a physical port at the network device (admin operation)
req = {
"card_id": card_1_1_4,
"description": "Physical port 1/1/4/2",
"operational_state": "0",
"admin_state": "1",
"upstream": 0,
"downstream": 0,
"upstream_max": "100000",
"downstream_max": "100000",
"noise_margin_up": 0,
"noise_margin_down": 0,
"tgt_noise_margin_up": 0,
"tgt_noise_margin_down": 0,
"attenuation_up": 0,
"attenuation_down": 0,
"attained_upstream": 0,
"attained_downstream": 0,
"threshold_upstream": 0,
"threshold_downstream": 0,
"max_delay_upstream": 0,
"max_delay_downsteam": 0,
"if_index": 94502912,
"type": "ethernet-line",
"high_speed": 0,
"connector_present": "not-applicable",
"media": 0.0,
"largest_pkt_size": 0,
"curr_bandwith": 1244000000,
"phy_addr": " ",
"last_chg_opr_stat": "352-02:55:19",
"pkts_unknown_proto": 0,
"in_octets": 0,
"out_octets": 0,
"in_ucast_pkts": 0,
"out_ucast_pkts": 0,
"in_mcast_pkts": 0,
"out_mcast_pkts": 0,
"in_broadcast_pkts": 0,
"out_broadcast_pkts": 0,
"in_discard_pkts": 0,
"out_discard_pkts": 0,
"in_err_pkts": 0,
"out_err_pkts": 0,
"in_octets_hc": 0,
"out_octets_hc": 0,
"in_ucast_pkts_hc": 0,
"out_ucast_pkts_hc": 0,
"in_mcast_pkts_hc": 0,
"out_mcast_pkts_hc": 0,
"in_broadcast_pkts_hc": 0,
"out_broadcast_pkts_hc": 0,
"diag_avail_status": "no-error",
"los": "not-available",
"tx_fault": "no-tx-fault",
"tx_power": "3.85 dBm",
"rx_power": "not-available",
"tx_bias_current": "16.17 mA",
"supply_voltage": "3.23 VDC",
"temperature": "57.39 degrees Celsius",
"temperature_tca": "normal-value",
"voltage_tca": "normal-value",
"bias_current_tca": "normal-value",
"tx_power_tca": "normal-value",
"rx_power_tca": "normal-value",
"rssi_profile_id": 65535,
"rssi_state": "enable",
"inp_up": 0,
"inp_dn": 0,
"interl_us": 0,
"interl_dn": 0,
"cur_op_mode": "default",
"rinit_1d": 0,
"actual_tps_tc_mode": "ptm",
"rtx_mode_up": "unknown",
"rtx_mode_dn": "unknown",
"total_reset_attempt": 0,
"success_reset_attempt": 0,
"cur_init_state": "down",
"shutdown": False,
"speed": "1G",
"auto_negotiation": True,
"mtu": 1495
}
port_1_1_4_2 = create_resource(req, (endpoint + '/boxen/' + box_id + '/ports'))
### ServicePort 1/1/4/2 ####
req = {
"connected_id": port_1_1_4_2,
"connected_type": "port",
"name": "1/1/4/2",
"admin_state": "1",
"operational_state": "0"
}
service_port_1_1_4_2 = create_resource(req, (endpoint + '/boxen/' + box_id + '/service_ports'))
### Service PPPoE Vlan at ServicePort 1/1/4/2 ###
req = {
"name": "2620",
"service_port_id": service_port_1_1_4_2,
"vlan_id": vlan_pppoe,
"card_id": card_1_1_4
}
service_vlan_ = create_resource(req, (endpoint + '/boxen/' + box_id + '/service_vlans'))
### Service CPE Management Vlan at ServicePort 1/1/4/2 ###
req = {
"name": "3320",
"service_port_id": service_port_1_1_4_2,
"vlan_id": vlan_cpem,
"card_id": card_1_1_4
}
service_vlan_ = create_resource(req, (endpoint + '/boxen/' + box_id + '/service_vlans'))
### Ont 1/1/4/2/1 ###
# Create a physical ont at the network device (admin operation)
req = {
"port_id": port_1_1_4_2,
"description": "Ont 1/1/4/2/1",
"admin_state": "1",
"index": 1,
"type": "10gbaselr",
"basebx10d": "yes",
"media_available": "available",
"jabber_state": "jabber",
"b100basefxfd": "no",
"b100baselx10": "no",
"b100basebx10d": "no",
"b100basebx10u": "yes",
"b100basetxfd": "yes",
"b1000basetfd": "no",
"b10gbasetfd": "yes",
"b1000basesxfd": "no",
"b1000baselx10": "no",
"b1000baselxfd": "yes",
"b1000basebx10u": "yes",
"b1000basebx10d": "no",
"b10gbaser": "no",
"b10gbaselr": "yes",
"b10gbaseer": "no",
"b2500basex": "no",
"auto_neg_supported": True,
"auto_neg_status": "complete",
"cap100base_tfd": "no",
"cap1000base_xfd": "yes",
"cap1000base_tfd": "yes",
"cap10gbase_tfd": "no",
"act_num_data_ports": 1,
"act_num_voice_ports": 0,
"actual_card_type": "ethernet",
"actual_ont_integ": "integrated",
"actual_serial_num": "0168FC3C",
"actual_version_num": "G2110V1D0",
"actual_vendorid": "GNXS",
"actual_cardid": "FiberTwist-G2110",
"state": "enabled",
"sernum": "ALCLB140677C"
}
ont_1_1_4_2_1 = create_resource(req, (endpoint + '/boxen/' + box_id + '/onts'))
### OntPort 1/1/4/2/1/1/1 ###
# Create a physical ont-port at the ont (admin operation)
req = {
"ont_id": ont_1_1_4_2_1,
"description": "OntPort 1/1/4/2/1/1/1",
"admin_state": "0"
}
ont_port_1_1_4_2_1_1_1 = create_resource(req, (endpoint + '/boxen/' + box_id + '/ont_ports'))
### ServicePort 1/1/4/2/1/1/1 ####
req = {
"connected_id": ont_port_1_1_4_2_1_1_1,
"connected_type": "port",
"name": "1/1/4/2/1/1/1",
"admin_state": "1",
"operational_state": "1"
}
service_port_1_1_4_2_1_1_1 = create_resource(req, (endpoint + '/boxen/' + box_id + '/service_ports'))
### Service PPPoE Vlan at ServicePort 1/1/4/2/1/1/1 ###
req = {
"name": "2620",
"service_port_id": service_port_1_1_4_2_1_1_1,
"vlan_id": vlan_pppoe,
"card_id": card_1_1_4
}
service_vlan_ = create_resource(req, (endpoint + '/boxen/' + box_id + '/service_vlans'))
### Service CPE Management Vlan at ServicePort 1/1/4/2/1/1/1 ###
req = {
"name": "3320",
"service_port_id": service_port_1_1_4_2_1_1_1,
"vlan_id": vlan_cpem,
"card_id": card_1_1_4
}
service_vlan_ = create_resource(req, (endpoint + '/boxen/' + box_id + '/service_vlans'))
### Cpe 1/1/4/2/1/1/1/1 ###
# Create a physical cpe at the ont-port (admin operation)
req = {
"ont_port_id": ont_port_1_1_4_2_1_1_1,
"description": "Cpe 1/1/4/2/1/1/1/1",
"serial_no": "GFED123456YZ",
"admin_state": "0",
"mac": "04:1f:1a:14:fc:35"
}
cpe_1_1_4_2_1_1_1_1 = create_resource(req, (endpoint + '/boxen/' + box_id + '/cpes'))
### CpePort 1/1/4/2/1/1 ###
# Create a physical cpe-port at the cpe (admin operation)
req = {
"cpe_id": cpe_1_1_4_2_1_1_1_1,
"description": "CpePort 1/1/4/2/1/1/1/1/1"
}
cpe_port_1_1_4_2_1_1_1_1_1 = create_resource(req, (endpoint + '/boxen/' + box_id + '/cpe_ports'))
### Port 1/1/4/3 ###
# Create a physical port at the network device (admin operation)
req = {
"card_id": card_1_1_4,
"description": "Physical port 1/1/4/3",
"operational_state": "0",
"admin_state": "0",
"upstream": 0,
"downstream": 0,
"upstream_max": "100000",
"downstream_max": "100000",
"noise_margin_up": 0,
"noise_margin_down": 0,
"tgt_noise_margin_up": 0,
"tgt_noise_margin_down": 0,
"attenuation_up": 0,
"attenuation_down": 0,
"attained_upstream": 0,
"attained_downstream": 0,
"threshold_upstream": 0,
"threshold_downstream": 0,
"max_delay_upstream": 0,
"max_delay_downsteam": 0,
"if_index": 94502912,
"type": "ethernet-line",
"high_speed": 0,
"connector_present": "not-applicable",
"media": 0.0,
"largest_pkt_size": 0,
"curr_bandwith": 1244000000,
"phy_addr": " ",
"last_chg_opr_stat": "352-02:55:19",
"pkts_unknown_proto": 0,
"in_octets": 0,
"out_octets": 0,
"in_ucast_pkts": 0,
"out_ucast_pkts": 0,
"in_mcast_pkts": 0,
"out_mcast_pkts": 0,
"in_broadcast_pkts": 0,
"out_broadcast_pkts": 0,
"in_discard_pkts": 0,
"out_discard_pkts": 0,
"in_err_pkts": 0,
"out_err_pkts": 0,
"in_octets_hc": 0,
"out_octets_hc": 0,
"in_ucast_pkts_hc": 0,
"out_ucast_pkts_hc": 0,
"in_mcast_pkts_hc": 0,
"out_mcast_pkts_hc": 0,
"in_broadcast_pkts_hc": 0,
"out_broadcast_pkts_hc": 0,
"diag_avail_status": "no-error",
"los": "not-available",
"tx_fault": "no-tx-fault",
"tx_power": "3.85 dBm",
"rx_power": "not-available",
"tx_bias_current": "16.17 mA",
"supply_voltage": "3.23 VDC",
"temperature": "57.39 degrees Celsius",
"temperature_tca": "normal-value",
"voltage_tca": "normal-value",
"bias_current_tca": "normal-value",
"tx_power_tca": "normal-value",
"rx_power_tca": "normal-value",
"rssi_profile_id": 65535,
"rssi_state": "enable",
"inp_up": 0,
"inp_dn": 0,
"interl_us": 0,
"interl_dn": 0,
"cur_op_mode": "default",
"rinit_1d": 0,
"actual_tps_tc_mode": "ptm",
"rtx_mode_up": "unknown",
"rtx_mode_dn": "unknown",
"total_reset_attempt": 0,
"success_reset_attempt": 0,
"cur_init_state": "down",
"shutdown": False,
"speed": "1G",
"auto_negotiation": True,
"mtu": 1495
}
port_1_1_4_3 = create_resource(req, (endpoint + '/boxen/' + box_id + '/ports'))
### ServicePort 1/1/4/3 ####
req = {
"connected_id": port_1_1_4_3,
"connected_type": "port",
"name": "1/1/4/2",
"admin_state": "0",
"operational_state": "0"
}
service_port_1_1_4_3 = create_resource(req, (endpoint + '/boxen/' + box_id + '/service_ports'))
### Service PPPoE Vlan at ServicePort 1/1/4/3 ###
req = {
"name": "2620",
"service_port_id": service_port_1_1_4_3,
"vlan_id": vlan_pppoe,
"card_id": card_1_1_4
}
service_vlan_ = create_resource(req, (endpoint + '/boxen/' + box_id + '/service_vlans'))
### Service CPE Management Vlan at ServicePort 1/1/4/3 ###
req = {
"name": "3320",
"service_port_id": service_port_1_1_4_3,
"vlan_id": vlan_cpem,
"card_id": card_1_1_4
}
service_vlan_ = create_resource(req, (endpoint + '/boxen/' + box_id + '/service_vlans'))
### Ont 1/1/4/3/1 ###
# Create a physical ont at the network device (admin operation)
req = {
"port_id": port_1_1_4_3,
"description": "Ont 1/1/4/3/1",
"admin_state": "0",
"index": 1,
"type": "10gbaselr",
"basebx10d": "yes",
"media_available": "available",
"jabber_state": "jabber",
"b100basefxfd": "no",
"b100baselx10": "no",
"b100basebx10d": "no",
"b100basebx10u": "yes",
"b100basetxfd": "yes",
"b1000basetfd": "no",
"b10gbasetfd": "yes",
"b1000basesxfd": "no",
"b1000baselx10": "no",
"b1000baselxfd": "yes",
"b1000basebx10u": "yes",
"b1000basebx10d": "no",
"b10gbaser": "no",
"b10gbaselr": "yes",
"b10gbaseer": "no",
"b2500basex": "no",
"auto_neg_supported": True,
"auto_neg_status": "complete",
"cap100base_tfd": "no",
"cap1000base_xfd": "yes",
"cap1000base_tfd": "yes",
"cap10gbase_tfd": "no",
"act_num_data_ports": 1,
"act_num_voice_ports": 0,
"actual_card_type": "ethernet",
"actual_ont_integ": "integrated",
"actual_serial_num": "0168FC3C",
"actual_version_num": "G2110V1D0",
"actual_vendorid": "GNXS",
"actual_cardid": "FiberTwist-G2110",
"state": "enabled",
"sernum": "ALCLB140677C"
}
ont_1_1_4_3_1 = create_resource(req, (endpoint + '/boxen/' + box_id + '/onts'))
### OntPort 1/1/4/3/1/1/1 ###
# Create a physical ont-port at the ont (admin operation)
req = {
"ont_id": ont_1_1_4_3_1,
"description": "OntPort 1/1/4/3/1/1/1",
"admin_state": "0"
}
ont_port_1_1_4_3_1_1_1 = create_resource(req, (endpoint + '/boxen/' + box_id + '/ont_ports'))
### ServicePort 1/1/4/3/1/1/1 ####
req = {
"connected_id": ont_port_1_1_4_3_1_1_1,
"connected_type": "port",
"name": "1/1/4/3/1/1/1",
"admin_state": "0",
"operational_state": "0"
}
service_port_1_1_4_3_1_1_1 = create_resource(req, (endpoint + '/boxen/' + box_id + '/service_ports'))
### Service PPPoE Vlan at ServicePort 1/1/4/3/1/1/1 ###
req = {
"name": "2620",
"service_port_id": service_port_1_1_4_3_1_1_1,
"vlan_id": vlan_pppoe,
"card_id": card_1_1_4
}
service_vlan_ = create_resource(req, (endpoint + '/boxen/' + box_id + '/service_vlans'))
### Service CPE Management Vlan at ServicePort 1/1/4/3/1/1/1 ###
req = {
"name": "3320",
"service_port_id": service_port_1_1_4_3_1_1_1,
"vlan_id": vlan_cpem,
"card_id": card_1_1_4
}
service_vlan_ = create_resource(req, (endpoint + '/boxen/' + box_id + '/service_vlans'))
### Cpe 1/1/4/3/1/1/1/1 ###
# Create a physical cpe at the ont-port (admin operation)
req = {
"ont_port_id": ont_port_1_1_4_3_1_1_1,
"description": "Cpe 1/1/4/3/1/1/1/1",
"serial_no": "GFED123456WQ",
"admin_state": "0",
"mac": "5b:8a:36:50:d4:8b"
}
cpe_1_1_4_3_1_1_1_1 = create_resource(req, (endpoint + '/boxen/' + box_id + '/cpes'))
### CpePort 1/1/4/3/1/1 ###
# Create a physical cpe-port at the cpe (admin operation)
req = {
"cpe_id": cpe_1_1_4_3_1_1_1_1,
"description": "CpePort 1/1/4/3/1/1/1/1/1"
}
cpe_port_1_1_4_3_1_1_1_1_1 = create_resource(req, (endpoint + '/boxen/' + box_id + '/cpe_ports'))
### Card 1/1/5 ###
# Create a physical card at the network device (admin operation)
req = {
"subrack_id": subrack_id,
"description": "Physical card 1/1/5",
"planned_type": "fant-f",
"actual_type": "fant-f",
"operational_state": "1",
"admin_state": "1",
"err_state": "no-error",
"availability": "available",
"alarm_profile": "none",
"capab_profile": "32port_xDSL",
"manufacturer": "ALCL",
"mnemonic": "RDLT-C",
"pba_code": "3FE68863GGFL",
"fpba_code": "3FE68863GGFL",
"fpba_ics": "02",
"clei_code": "VBIUAALBAB",
"serial_no": "AA1815FSE1CG",
"failed_test": "00:00:00:00",
"lt_restart_time": "1970-01-01:00:00:00",
"lt_restart_cause": "poweron",
"lt_restart_num": 0,
"mgnt_entity_oamipaddr": "0.0.0.0",
"mgnt_entity_pairnum": 0,
"dual_host_ip": "0.0.0.0",
"dual_host_loc": "none",
"product": "ftth-pon"
}
card_1_1_5 = create_resource(req, (endpoint + '/boxen/' + box_id + '/cards'))
### Port 1/1/5/1 ###
# Create a physical port at the network device (admin operation)
req = {
"card_id": card_1_1_5,
"description": "Physical port 1/1/5/1",
"operational_state": "1",
"admin_state": "1",
"upstream": 10000,
"downstream": 25000,
"upstream_max": "100000",
"downstream_max": "100000",
"noise_margin_up": 0,
"noise_margin_down": 0,
"tgt_noise_margin_up": 0,
"tgt_noise_margin_down": 0,
"attenuation_up": 0,
"attenuation_down": 0,
"attained_upstream": 0,
"attained_downstream": 0,
"threshold_upstream": 0,
"threshold_downstream": 0,
"max_delay_upstream": 0,
"max_delay_downsteam": 0,
"if_index": 94502912,
"type": "ethernet-line",
"high_speed": 0,
"connector_present": "not-applicable",
"media": 0.0,
"largest_pkt_size": 0,
"curr_bandwith": 1244000000,
"phy_addr": " ",
"last_chg_opr_stat": "352-02:55:19",
"pkts_unknown_proto": 0,
"in_octets": 0,
"out_octets": 0,
"in_ucast_pkts": 0,
"out_ucast_pkts": 0,
"in_mcast_pkts": 0,
"out_mcast_pkts": 0,
"in_broadcast_pkts": 0,
"out_broadcast_pkts": 0,
"in_discard_pkts": 0,
"out_discard_pkts": 0,
"in_err_pkts": 0,
"out_err_pkts": 0,
"in_octets_hc": 0,
"out_octets_hc": 0,
"in_ucast_pkts_hc": 0,
"out_ucast_pkts_hc": 0,
"in_mcast_pkts_hc": 0,
"out_mcast_pkts_hc": 0,
"in_broadcast_pkts_hc": 0,
"out_broadcast_pkts_hc": 0,
"diag_avail_status": "no-error",
"los": "not-available",
"tx_fault": "no-tx-fault",
"tx_power": "3.85 dBm",
"rx_power": "not-available",
"tx_bias_current": "16.17 mA",
"supply_voltage": "3.23 VDC",
"temperature": "57.39 degrees Celsius",
"temperature_tca": "normal-value",
"voltage_tca": "normal-value",
"bias_current_tca": "normal-value",
"tx_power_tca": "normal-value",
"rx_power_tca": "normal-value",
"rssi_profile_id": 65535,
"rssi_state": "enable",
"inp_up": 0,
"inp_dn": 0,
"interl_us": 0,
"interl_dn": 0,
"cur_op_mode": "default",
"rinit_1d": 0,
"actual_tps_tc_mode": "ptm",
"rtx_mode_up": "unknown",
"rtx_mode_dn": "unknown",
"total_reset_attempt": 0,
"success_reset_attempt": 0,
"cur_init_state": "down",
"shutdown": False,
"speed": "1G",
"auto_negotiation": True,
"mtu": 1495
}
port_1_1_5_1 = create_resource(req, (endpoint + '/boxen/' + box_id + '/ports'))
### ServicePort 1/1/5/1 ####
req = {
"connected_id": port_1_1_5_1,
"connected_type": "port",
"name": "1/1/5/1",
"admin_state": "1",
"operational_state": "1"
}
service_port_1_1_5_1 = create_resource(req, (endpoint + '/boxen/' + box_id + '/service_ports'))
### Service PPPoE Vlan at ServicePort 1/1/5/1 ###
req = {
"name": "2620",
"service_port_id": service_port_1_1_5_1,
"vlan_id": vlan_pppoe,
"card_id": card_1_1_5
}
service_vlan_ = create_resource(req, (endpoint + '/boxen/' + box_id + '/service_vlans'))
### Service CPE Management Vlan at ServicePort 1/1/5/1 ###
req = {
"name": "3320",
"service_port_id": service_port_1_1_5_1,
"vlan_id": vlan_cpem,
"card_id": card_1_1_5
}
service_vlan_ = create_resource(req, (endpoint + '/boxen/' + box_id + '/service_vlans'))
### Ont 1/1/5/1/1 ###
# Create a physical ont at the network device (admin operation)
req = {
"port_id": port_1_1_5_1,
"description": "Ont 1/1/5/1/1",
"admin_state": "1",
"index": 1,
"type": "10gbaselr",
"basebx10d": "yes",
"media_available": "available",
"jabber_state": "jabber",
"b100basefxfd": "no",
"b100baselx10": "no",
"b100basebx10d": "no",
"b100basebx10u": "yes",
"b100basetxfd": "yes",
"b1000basetfd": "no",
"b10gbasetfd": "yes",
"b1000basesxfd": "no",
"b1000baselx10": "no",
"b1000baselxfd": "yes",
"b1000basebx10u": "yes",
"b1000basebx10d": "no",
"b10gbaser": "no",
"b10gbaselr": "yes",
"b10gbaseer": "no",
"b2500basex": "no",
"auto_neg_supported": True,
"auto_neg_status": "complete",
"cap100base_tfd": "no",
"cap1000base_xfd": "yes",
"cap1000base_tfd": "yes",
"cap10gbase_tfd": "no",
"act_num_data_ports": 1,
"act_num_voice_ports": 0,
"actual_card_type": "pon",
"actual_ont_integ": "integrated",
"actual_serial_num": "0168FC3C",
"actual_version_num": "G2110V1D0",
"actual_vendorid": "GNXS",
"actual_cardid": "FiberTwist-G2110",
"state": "enabled",
"sernum": "ALCLB140677C"
}
ont_1_1_5_1_1 = create_resource(req, (endpoint + '/boxen/' + box_id + '/onts'))
### OntPort 1/1/5/1/1/1/1 ###
# Create a physical ont-port at the ont (admin operation)
req = {
"ont_id": ont_1_1_5_1_1,
"description": "OntPort 1/1/5/1/1/1/1",
"admin_state": "1",
"operational_state": "1"
}
ont_port_1_1_5_1_1_1_1 = create_resource(req, (endpoint + '/boxen/' + box_id + '/ont_ports'))
### ServicePort 1/1/5/1/1/1/1 ####
req = {
"connected_id": ont_port_1_1_5_1_1_1_1,
"connected_type": "ont",
"name": "1/1/5/1/1/1/1",
"admin_state": "1",
"operational_state": "1"
}
service_port_1_1_5_1_1_1_1 = create_resource(req, (endpoint + '/boxen/' + box_id + '/service_ports'))
### Service PPPoE Vlan at ServicePort 1/1/5/1/1/1/1 ###
req = {
"name": "2620",
"service_port_id": service_port_1_1_5_1_1_1_1,
"vlan_id": vlan_pppoe,
"card_id": card_1_1_5
}
service_vlan_ = create_resource(req, (endpoint + '/boxen/' + box_id + '/service_vlans'))
### Service CPE Management Vlan at ServicePort 1/1/5/1/1/1/1 ###
req = {
"name": "3320",
"service_port_id": service_port_1_1_5_1_1_1_1,
"vlan_id": vlan_cpem,
"card_id": card_1_1_5
}
service_vlan_ = create_resource(req, (endpoint + '/boxen/' + box_id + '/service_vlans'))
### Cpe 1/1/5/1/1/1/1/1 ###
# Create a physical cpe at the ont-port (admin operation)
req = {
"ont_port_id": ont_port_1_1_5_1_1_1_1,
"description": "Cpe 1/1/5/1/1/1/1/1",
"serial_no": "GFED135790XY",
"admin_state": "1",
"mac": "29:62:57:a6:60:69"
}
cpe_1_1_5_1_1_1_1_1 = create_resource(req, (endpoint + '/boxen/' + box_id + '/cpes'))
### CpePort 1/1/5/1/1/1 ###
# Create a physical cpe-port at the cpe (admin operation)
req = {
"cpe_id": cpe_1_1_5_1_1_1_1_1,
"description": "CpePort 1/1/5/1/1/1/1/1/1"
}
cpe_port_1_1_5_1_1_1_1_1_1 = create_resource(req, (endpoint + '/boxen/' + box_id + '/cpe_ports'))
### Ont 1/1/5/1/2 ###
# Create a physical ont at the network device (admin operation)
req = {
"port_id": port_1_1_5_1,
"description": "Ont 1/1/5/1/2",
"admin_state": "1",
"index": 1,
"type": "10gbaselr",
"basebx10d": "yes",
"media_available": "available",
"jabber_state": "jabber",
"b100basefxfd": "no",
"b100baselx10": "no",
"b100basebx10d": "no",
"b100basebx10u": "yes",
"b100basetxfd": "yes",
"b1000basetfd": "no",
"b10gbasetfd": "yes",
"b1000basesxfd": "no",
"b1000baselx10": "no",
"b1000baselxfd": "yes",
"b1000basebx10u": "yes",
"b1000basebx10d": "no",
"b10gbaser": "no",
"b10gbaselr": "yes",
"b10gbaseer": "no",
"b2500basex": "no",
"auto_neg_supported": True,
"auto_neg_status": "complete",
"cap100base_tfd": "no",
"cap1000base_xfd": "yes",
"cap1000base_tfd": "yes",
"cap10gbase_tfd": "no",
"act_num_data_ports": 1,
"act_num_voice_ports": 0,
"actual_card_type": "pon",
"actual_ont_integ": "integrated",
"actual_serial_num": "0168FC3C",
"actual_version_num": "G2110V1D0",
"actual_vendorid": "GNXS",
"actual_cardid": "FiberTwist-G2110",
"state": "enabled",
"sernum": "ALCLB140677C"
}
ont_1_1_5_1_2 = create_resource(req, (endpoint + '/boxen/' + box_id + '/onts'))
### OntPort 1/1/5/1/2/1/1 ###
# Create a physical ont-port at the ont (admin operation)
req = {
"ont_id": ont_1_1_5_1_2,
"description": "OntPort 1/1/5/1/2/1/1",
"admin_state": "1"
}
ont_port_1_1_5_1_2_1_1 = create_resource(req, (endpoint + '/boxen/' + box_id + '/ont_ports'))
### ServicePort 1/1/5/1/2/1/1 ####
req = {
"connected_id": ont_port_1_1_5_1_2_1_1,
"connected_type": "ont",
"name": "1/1/5/1/2/1/1",
"admin_state": "1",
"operational_state": "1"
}
service_port_1_1_5_1_2_1_1 = create_resource(req, (endpoint + '/boxen/' + box_id + '/service_ports'))
### Service PPPoE Vlan at ServicePort 1/1/5/1/2/1/1 ###
req = {
"name": "2620",
"service_port_id": service_port_1_1_5_1_2_1_1,
"vlan_id": vlan_pppoe,
"card_id": card_1_1_5
}
service_vlan_ = create_resource(req, (endpoint + '/boxen/' + box_id + '/service_vlans'))
### Service CPE Management Vlan at ServicePort 1/1/5/1/2/1/1 ###
req = {
"name": "3320",
"service_port_id": service_port_1_1_5_1_2_1_1,
"vlan_id": vlan_cpem,
"card_id": card_1_1_5
}
service_vlan_ = create_resource(req, (endpoint + '/boxen/' + box_id + '/service_vlans'))
### Cpe 1/1/5/1/2/1/1/1 ###
# Create a physical cpe at the ont-port (admin operation)
req = {
"ont_port_id": ont_port_1_1_5_1_2_1_1,
"description": "Cpe 1/1/5/1/2/1/1/1",
"serial_no": "GFED132546XY",
"admin_state": "1",
"mac": "08:97:dc:ca:07:8e"
}
cpe_1_1_5_1_2_1_1_1 = create_resource(req, (endpoint + '/boxen/' + box_id + '/cpes'))
### CpePort 1/1/5/1/2/1 ###
# Create a physical cpe-port at the cpe (admin operation)
req = {
"cpe_id": cpe_1_1_5_1_2_1_1_1,
"description": "CpePort 1/1/5/1/2/1/1/1/1"
}
cpe_port_1_1_5_1_2_1_1_1_1 = create_resource(req, (endpoint + '/boxen/' + box_id + '/cpe_ports'))
### OntPort 1/1/5/1/2/1/2 ###
# Create a physical ont-port at the ont (admin operation)
req = {
"ont_id": ont_1_1_5_1_2,
"description": "OntPort 1/1/5/1/2/1/2",
"admin_state": "1"
}
ont_port_1_1_5_1_2_1_2 = create_resource(req, (endpoint + '/boxen/' + box_id + '/ont_ports'))
### ServicePort 1/1/5/1/1/1/1 ####
req = {
"connected_id": ont_port_1_1_5_1_2_1_2,
"connected_type": "ont",
"name": "1/1/5/1/2/1/2",
"admin_state": "1",
"operational_state": "1"
}
service_port_1_1_5_1_2_1_2 = create_resource(req, (endpoint + '/boxen/' + box_id + '/service_ports'))
### Service PPPoE Vlan at ServicePort 1/1/5/1/2/1/2 ###
req = {
"name": "2620",
"service_port_id": service_port_1_1_5_1_2_1_2,
"vlan_id": vlan_pppoe,
"card_id": card_1_1_5
}
service_vlan_ = create_resource(req, (endpoint + '/boxen/' + box_id + '/service_vlans'))
### Service CPE Management Vlan at ServicePort 1/1/5/1/2/1/2 ###
req = {
"name": "3320",
"service_port_id": service_port_1_1_5_1_2_1_2,
"vlan_id": vlan_cpem,
"card_id": card_1_1_5
}
service_vlan_ = create_resource(req, (endpoint + '/boxen/' + box_id + '/service_vlans'))
### Cpe 1/1/5/1/2/1/2/1 ###
# Create a physical cpe at the ont-port (admin operation)
req = {
"ont_port_id": ont_port_1_1_5_1_2_1_2,
"description": "Cpe 1/1/5/1/2/1/2/1",
"serial_no": "GFED213465XY",
"admin_state": "1",
"mac": "6f:4a:1e:b4:51:f5"
}
cpe_1_1_5_1_2_1_2_1 = create_resource(req, (endpoint + '/boxen/' + box_id + '/cpes'))
### CpePort 1/1/5/1/2/1 ###
# Create a physical cpe-port at the cpe (admin operation)
req = {
"cpe_id": cpe_1_1_5_1_2_1_2_1,
"description": "CpePort 1/1/5/1/2/1/2/1/1"
}
cpe_port_1_1_5_1_2_1_2_1_1 = create_resource(req, (endpoint + '/boxen/' + box_id + '/cpe_ports'))
return
| 33.555438
| 479
| 0.534422
| 12,180
| 95,331
| 3.831527
| 0.041215
| 0.041527
| 0.022692
| 0.014228
| 0.929909
| 0.919109
| 0.909188
| 0.893567
| 0.880561
| 0.870682
| 0
| 0.082437
| 0.300144
| 95,331
| 2,840
| 480
| 33.567254
| 0.617045
| 0.113552
| 0
| 0.792593
| 0
| 0.000412
| 0.420825
| 0.012806
| 0
| 0
| 0
| 0
| 0
| 1
| 0.000412
| false
| 0.000823
| 0.000823
| 0
| 0.001646
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
cd71f4d982e27b566f3e25f4bfc34d9536d4937b
| 220
|
py
|
Python
|
src/testcase/GN_F1331/input_case/GN_F1331_Unbind.py
|
maiyajj/AutoTest_script-Appium_Connect
|
f9c2c42c281a9e2f984acb4a72dda0694b053f22
|
[
"Apache-2.0"
] | 28
|
2017-11-10T00:19:16.000Z
|
2022-02-19T16:42:05.000Z
|
src/testcase/GN_F1331/input_case/GN_F1331_Unbind.py
|
maiyajj/AutoTest_script-Appium_Connect
|
f9c2c42c281a9e2f984acb4a72dda0694b053f22
|
[
"Apache-2.0"
] | null | null | null |
src/testcase/GN_F1331/input_case/GN_F1331_Unbind.py
|
maiyajj/AutoTest_script-Appium_Connect
|
f9c2c42c281a9e2f984acb4a72dda0694b053f22
|
[
"Apache-2.0"
] | 23
|
2017-08-22T06:12:19.000Z
|
2021-09-18T05:45:41.000Z
|
# coding=utf-8
try:
from src.testcase.GN_F1331.case.GN_F1331_UNBIND.GN_F1331_UNBIND_001 import *
from src.testcase.GN_F1331.case.GN_F1331_UNBIND.GN_F1331_UNBIND_002 import *
except ImportError as e:
print(e)
| 31.428571
| 80
| 0.786364
| 38
| 220
| 4.236842
| 0.5
| 0.26087
| 0.322981
| 0.21118
| 0.645963
| 0.645963
| 0.645963
| 0.645963
| 0.645963
| 0.645963
| 0
| 0.161458
| 0.127273
| 220
| 6
| 81
| 36.666667
| 0.677083
| 0.054545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.6
| 0
| 0.6
| 0.2
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
cd721633dfc356e1884f6f2553bf7b7741fffcc8
| 2,909
|
py
|
Python
|
openapi_server/controllers/bulk_controller.py
|
havardhuns/graphsense-REST
|
e2b2c851fc6fd7bba06de66a7abdb82cb76ad1d0
|
[
"MIT"
] | null | null | null |
openapi_server/controllers/bulk_controller.py
|
havardhuns/graphsense-REST
|
e2b2c851fc6fd7bba06de66a7abdb82cb76ad1d0
|
[
"MIT"
] | null | null | null |
openapi_server/controllers/bulk_controller.py
|
havardhuns/graphsense-REST
|
e2b2c851fc6fd7bba06de66a7abdb82cb76ad1d0
|
[
"MIT"
] | null | null | null |
from typing import List, Dict
from aiohttp import web
import traceback
import json
import gsrest.service.bulk_service as service
from openapi_server import util
async def bulk_csv(request: web.Request, currency, operation, num_pages, body) -> web.Response:
"""Get data as CSV in bulk
:param currency: The cryptocurrency code (e.g., btc)
:type currency: str
:param operation: The operation to execute in bulk
:type operation: str
:param num_pages: Number of pages to retrieve for operations with list response
:type num_pages: int
:param body: Map of the operation's parameter names to (arrays of) values
:type body:
"""
try:
if 'currency' in ['','currency','operation','num_pages','body']:
if currency is not None:
currency = currency.lower()
result = service.bulk_csv(request
,currency=currency,operation=operation,num_pages=num_pages,body=body)
return result
except RuntimeError as e:
traceback.print_exception(type(e), e, e.__traceback__)
raise web.HTTPNotFound(text=str(e))
except ValueError as e:
traceback.print_exception(type(e), e, e.__traceback__)
raise web.HTTPBadRequest(text=str(e))
except TypeError as e:
traceback.print_exception(type(e), e, e.__traceback__)
raise web.HTTPBadRequest(text=str(e))
except Exception as e:
traceback.print_exception(type(e), e, e.__traceback__)
raise web.HTTPInternalServerError()
async def bulk_json(request: web.Request, currency, operation, num_pages, body) -> web.Response:
"""Get data as JSON in bulk
:param currency: The cryptocurrency code (e.g., btc)
:type currency: str
:param operation: The operation to execute in bulk
:type operation: str
:param num_pages: Number of pages to retrieve for operations with list response
:type num_pages: int
:param body: Map of the operation's parameter names to (arrays of) values
:type body:
"""
try:
if 'currency' in ['','currency','operation','num_pages','body']:
if currency is not None:
currency = currency.lower()
result = service.bulk_json(request
,currency=currency,operation=operation,num_pages=num_pages,body=body)
return result
except RuntimeError as e:
traceback.print_exception(type(e), e, e.__traceback__)
raise web.HTTPNotFound(text=str(e))
except ValueError as e:
traceback.print_exception(type(e), e, e.__traceback__)
raise web.HTTPBadRequest(text=str(e))
except TypeError as e:
traceback.print_exception(type(e), e, e.__traceback__)
raise web.HTTPBadRequest(text=str(e))
except Exception as e:
traceback.print_exception(type(e), e, e.__traceback__)
raise web.HTTPInternalServerError()
| 36.3625
| 96
| 0.672396
| 381
| 2,909
| 4.981627
| 0.191601
| 0.084299
| 0.05058
| 0.071654
| 0.905163
| 0.905163
| 0.905163
| 0.905163
| 0.905163
| 0.905163
| 0
| 0.00179
| 0.231695
| 2,909
| 79
| 97
| 36.822785
| 0.847427
| 0
| 0
| 0.782609
| 0
| 0
| 0.036433
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.130435
| 0
| 0.173913
| 0.173913
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
26e93e250a213379dab863e3acffb6d04bc5af85
| 48
|
py
|
Python
|
ginjinn2_pollentube/core/__init__.py
|
TankredO/ginjinn2_pollentube
|
417b805e5aa54c51fafc97bc15ae78feddcad9b8
|
[
"Apache-2.0"
] | null | null | null |
ginjinn2_pollentube/core/__init__.py
|
TankredO/ginjinn2_pollentube
|
417b805e5aa54c51fafc97bc15ae78feddcad9b8
|
[
"Apache-2.0"
] | null | null | null |
ginjinn2_pollentube/core/__init__.py
|
TankredO/ginjinn2_pollentube
|
417b805e5aa54c51fafc97bc15ae78feddcad9b8
|
[
"Apache-2.0"
] | null | null | null |
from . import conversion
from . import measure
| 12
| 24
| 0.770833
| 6
| 48
| 6.166667
| 0.666667
| 0.540541
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1875
| 48
| 3
| 25
| 16
| 0.948718
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
f8063e11cc02510487d7bea00839160c0dfef3b5
| 24,288
|
py
|
Python
|
tasks-deploy/python-vm/check.py
|
HackerDom/qctf-starter-2018
|
f4eef0fd41d777661b9fbcc61dcee9709d9f6268
|
[
"MIT"
] | 8
|
2018-03-15T12:07:11.000Z
|
2020-12-01T15:02:46.000Z
|
tasks-deploy/python-vm/check.py
|
HackerDom/qctf-starter-2018
|
f4eef0fd41d777661b9fbcc61dcee9709d9f6268
|
[
"MIT"
] | 17
|
2020-01-28T22:17:42.000Z
|
2022-03-11T23:18:09.000Z
|
tasks-deploy/python-vm/check.py
|
HackerDom/qctf-starter-2018
|
f4eef0fd41d777661b9fbcc61dcee9709d9f6268
|
[
"MIT"
] | 2
|
2018-11-26T18:54:27.000Z
|
2018-12-05T17:37:32.000Z
|
#!/usr/bin/env python3
flags = ["QCTF{017c710d3e27fbdb267fc9ce475e2c}", "QCTF{6776a5bfa03f98eb2d91756d13f71d}", "QCTF{86c4a4929862876029335a57207391}", "QCTF{8940da8fbe89939a35fcc1489ea375}", "QCTF{f8583603c1ff8c10695dab27691002}", "QCTF{1c4c005c17d26c5abafde3a16a6451}", "QCTF{e44d3a1a411d086565f9e614467f16}", "QCTF{8993537f1c88e989eb53749a475a07}", "QCTF{660d5599511f5be03606ecf04f3795}", "QCTF{5b9adfc5793b095adf57096a50ff51}", "QCTF{217ae4ecc8949a11c2ead9a56a462c}", "QCTF{452b020629377f4b56d2b182ee7beb}", "QCTF{99ef77554897a980232f265d55080f}", "QCTF{9497eb3f1623693adae1edaa682ee9}", "QCTF{ae442091a204856997b2492dab5f3b}", "QCTF{7ac1cd110f9be056819e6c0ccee4f7}", "QCTF{0bcb7b1553e0f15427592aa92a0a56}", "QCTF{d5258021f548cf05ff72a27890f330}", "QCTF{8f09e8ff251d9d5df5bc6af68c7719}", "QCTF{9d25d8d60ae6eb65d3b9b8a6ab1eae}", "QCTF{b69551e435440363c5c9b9e33f68a1}", "QCTF{2bf33d0a7257501ffcd63c71f96e20}", "QCTF{8e04d54b6106fa17d67fe55c9644d7}", "QCTF{ac0f1570afc86803ca8b03b8a751e4}", "QCTF{b7c90491ea47c726c571121df0c94c}", "QCTF{ae66181b003e162a6f8288ddc820fa}", "QCTF{9f5bb6f2adc440a95758b773548300}", "QCTF{fff57a1ef22ecfa9e711a6ea4b4eba}", "QCTF{2f44bdab63a8ed0f848548fa584bce}", "QCTF{cac90b59bf7b3eb11034562ec38639}", "QCTF{1dceee7a9f1ceae5f00dd98a5d99e4}", "QCTF{7e1a1d22c8af7f5b9fab13678b8521}", "QCTF{6ac2ceda09faca7727488ad014dd7a}", "QCTF{3933cf51e07878bcdba8c0fc0b3f6c}", "QCTF{f0e3fe8e5052a19121eaca3131d662}", "QCTF{df2e6c1d0c503c26bea4e529da52ae}", "QCTF{5ce6cfbbf40b7122d960cb20ae0e7f}", "QCTF{1b85210294c7acbb680d9459841ca5}", "QCTF{0598fd0e691f87dd48ccb41a9c7b03}", "QCTF{2508bbd4298db14303a09a8ba82e8a}", "QCTF{cbfe8b730bbc2000b0ad13825cdd19}", "QCTF{f3ea5316273c7382d50b5359f910c0}", "QCTF{a1c8883b2f738611b9084db99ed05e}", "QCTF{add11f81f77b763bb53458fe68ec5c}", "QCTF{db0ada0afafb1b8a5e891b8736d69c}", "QCTF{39e010ce495acfc8d86b86c75fbabe}", "QCTF{64fc83dee504f4a92dc6b38b627fcd}", "QCTF{125155c8cc8661e49bf2047b92a796}", "QCTF{d6267475170de5249d7d30d3a7d0fe}", "QCTF{4e57dbcdc0ea6ca92b067a3e06cc63}", "QCTF{32da4052565e79fbc174838c6bbfb0}", "QCTF{b37851e83cdbfa8e66d430602c05cb}", "QCTF{2d37068b8dc1aec2e48efcfca453dd}", "QCTF{192f1f0d0638cbb9490ea72423fc34}", "QCTF{3c83f494230ed49416355a8e2cebff}", "QCTF{77ffc2388d618292a10661b1889b80}", "QCTF{43d3feb3e86e6d2ddba74213588cc8}", "QCTF{b573b34184f19e8f96c178680db914}", "QCTF{9c36c127c6299ad913885f9a2a36fc}", "QCTF{d76c1787aea100a6fa6e44f18b37c4}", "QCTF{86f6fbd048b9531cd30bf97e2d6080}", "QCTF{dbf30230dbd8df069b3f83e08ca5d8}", "QCTF{56e11e961f23b577dbaedeb7ae1aee}", "QCTF{3f051b7963a0ad9490768ba45590f5}", "QCTF{3f5fbdb7528f644b231df44dfd9b65}", "QCTF{413fe658ce429b822dab365cfc6914}", "QCTF{b54b8285428f5148ad55e5695e1003}", "QCTF{8ca778e87ece67554770055b49a58a}", "QCTF{27a4c2b9cb7460c8ab5bf79b11e483}", "QCTF{7f08927a3fad27f3bc3e602f15cf76}", "QCTF{9de060a8ccfe56985e4497076c6aad}", "QCTF{f306920b4a403ed61ebf7dd48e0355}", "QCTF{d89847f799ca8ed8d8922eff918532}", "QCTF{81d180cf58c1fb0de4c42c0a29ef8f}", "QCTF{7fddb8ff79a6c7af85c726f42d6871}", "QCTF{453cc10b8ae8fa7f7888f03ae34bc1}", "QCTF{6a9e299705487a509be0c2d9c28a78}", "QCTF{ce4b9c355ebefe76f26a929b80d4b2}", "QCTF{5463be0174e277561758dde7122c7a}", "QCTF{b44175cb4cb57964e2e0c7457b46ab}", "QCTF{910f273eed319ef403dca94a30cfe6}", "QCTF{37f5cda95f6e5189eb0fb4a5063af7}", "QCTF{284a79b105969506d34ffead2111df}", "QCTF{1911e596dbf02d74f46a549704a6df}", "QCTF{e803799cb203de1f993f6a4068ab5b}", "QCTF{323023ef6c2ae17e663542e62bd2dc}", "QCTF{9fc8c15844e60e827cbbc51cf5b7d4}", "QCTF{070dfeca64bfb8d3f7281f2f655790}", "QCTF{df1a7b2637e97420f013e602025b45}", "QCTF{695f7ad81eb48cdda21c735766bdd2}", "QCTF{0789f625796f033f824763b1c2d83a}", "QCTF{5a7c60f4b6fafe335fd220debfbda7}", "QCTF{bdb5ad88847c8b853a280fe479123c}", "QCTF{5dbe7f0d47aafb69750491dfc7235e}", "QCTF{5724996f731b086c4b422513ed3fac}", "QCTF{46a7409f05e002579ef75dc7cc8569}", "QCTF{c5c2d2da5017750546594d770c6b76}", "QCTF{c0f447e130b74d1174e53883925590}", "QCTF{2c32dd75d55c98bf2fc00b8552dca9}", "QCTF{2a0bae17264ee7f18caf464c9d03f8}", "QCTF{cfdd3f4f53c8e1c01117783a185a35}", "QCTF{daa68e54ee358ee10727223dd55832}", "QCTF{bf241b10fb3d3d92e2bfefaa83b589}", "QCTF{a56d96b625862b28430949d096470f}", "QCTF{58a80b1f9ba94c1959c35acd22779a}", "QCTF{cc4e1d5ec09a7759c23ebc3985029b}", "QCTF{7866c61b9e82907fcb981890e26415}", "QCTF{ef3997583c7b0f2fc3d463194d93c0}", "QCTF{1ed7131df3a8b168c12f3dbc61e3b6}", "QCTF{0d9f1da91c60b7b3c0873f9f9563e6}", "QCTF{98cdd7e87b7e42c65c3d47d0a6bd15}", "QCTF{891a03fc865a438c7ec082a17035fe}", "QCTF{ce68a6de0c0cdbfd9652fcf6c5d902}", "QCTF{37793013a8abc95aa80c2ba37b6215}", "QCTF{f104689e91d726b7a5c552c2d0e09a}", "QCTF{324b75b63777f9f8eb2ebf35790968}", "QCTF{0cd581aca4b1f67f5e756ab660b362}", "QCTF{dbf6f791622624aaef0d71f7a9862c}", "QCTF{25dbd61d686e4d188e1e10b807b087}", "QCTF{b0852e0a2400ba09789c398ab6e114}", "QCTF{ae4cea2ecf901776f142f1e0bb866a}", "QCTF{094de6379f80c72f7e50e64ee52b8d}", "QCTF{3034de73c2e055bdfc8aff0b4f1a57}", "QCTF{00eab5298195239feeb6822d69ee25}", "QCTF{8c27f11ae7a4acc7ae7883adac0f24}", "QCTF{df2d6293bb2b63a2c5973994c692e3}", "QCTF{bb98b6c6f419725d1d854d546d4250}", "QCTF{bf557d830395a3424f2d31d26b3586}", "QCTF{9bb05164c676dfedc878baa31e2657}", "QCTF{7dd2ffc4cafa7e1d903cbd309e0725}", "QCTF{235e741a22f4ac081e2efd5d0a2ebb}", "QCTF{5dbee3f20743399b76722f1fea4044}", "QCTF{c67c154a69292c8e50efa81dca89e0}", "QCTF{62925553df92c4a7c31c34404555d0}", "QCTF{2432b81ea0ef720497e7015d384f55}", "QCTF{5eb4da4cb8cce59607a5fe2d515cea}", "QCTF{9af20de0f6e4398b3f053fb87aa6c2}", "QCTF{4bec079b69687aab7d25b951ce885c}", "QCTF{76acf07ff980f9be85a6525421d53d}", "QCTF{eb10cba87dabecaeaf21e70e7a4d2b}", "QCTF{388e229277107df581a505e7454cff}", "QCTF{3cfd1da4deecf5a0cdaa9a7a86d6bb}", "QCTF{a4c950c0e7e968e177187b856f6dec}", "QCTF{bbdcec7ceaf832b98f89842e3e89d8}", "QCTF{a3eb61cc46ee56178b37b12cf4299f}", "QCTF{7f48d19a9a25ca7d26ed796c8aad76}", "QCTF{4e56d012266454188677051ec6c797}", "QCTF{f5ea57a9a3ca9011c2b839475b5815}", "QCTF{bdc23ab3de80af98559777dfa488a3}", "QCTF{be2747e8cc45d5c115e422fa5ef62a}", "QCTF{f81527b2921034f48f76af42cca98f}", "QCTF{a22f2f4e23f806e154c4789f98cdd5}", "QCTF{fb4e43e18a6cd54ed2ec44cf85bf8f}", "QCTF{7ee6c79cca08a7096bda654c05765f}", "QCTF{0012978383595f9644c46894431611}", "QCTF{b7d8a7b3283a781add56dd10b909df}", "QCTF{75ddeb0c338ed03c2ad20a74b7cc76}", "QCTF{402ddc0d270f14e5e11ef3de2abb6c}", "QCTF{cb9791d2b7a2ad910be6416300050e}", "QCTF{6ac295139cefd8f1bf8393d0ff479f}", "QCTF{d2de4ff46de4edbaf0af0f0d7a7110}", "QCTF{201e6c23543eaf67a8714f4f705e8e}", "QCTF{68eb77bc1c514de206886b2674f885}", "QCTF{3879de698b1fa277cc216a17821199}", "QCTF{16334aabc219e2ea2a51adb37b12a5}", "QCTF{485831b7e1fcd7c97630cf542b2204}", "QCTF{8902b2d453e26796d6beb97ff61d94}", "QCTF{2f11e062f1c09a72fc2f87b5b52efc}", "QCTF{84b9f88a103f3b50169a0ce5c4d81b}", "QCTF{0fd38afd4c04dd5f5b9fe871b211be}", "QCTF{57cd5c9c5c2dd3f457f7e801cd1df3}", "QCTF{e38b924d9338c90e2021b509d348a6}", "QCTF{36d66ff7cefb31303f5cb83bbaa8c8}", "QCTF{3682c84d3368c7e1d59770bc21a3bc}", "QCTF{ad3ecfa4b5bb85b5bb3a36997743c9}", "QCTF{6e8c7aca84dc31b90a60a53335d8ec}", "QCTF{7e4ee58dc30d8e78eb39e3d8a07581}", "QCTF{8a6abc832a18d9301d4f1faf608a3b}", "QCTF{d9ba6319c5d653aaeb11c7aca99f51}", "QCTF{872e22c1a8f7074d1aeabe99607d05}", "QCTF{0b9d5b0ef06544ffcf2bd675591d21}", "QCTF{6b2abb25e9e4fb9456a2a5d148b353}", "QCTF{bced13a83e377db74c81d0bfd257cd}", "QCTF{4b63c056a66749437043e88d66921b}", "QCTF{9bc1b0c596b9158ee3e1e83b24d742}", "QCTF{f6f7d64f7d055e56094a35811cf7a5}", "QCTF{8816453472a465ad564847ca81fca6}", "QCTF{32ce15c976e4627aff2c8652747cea}", "QCTF{4c76d33c2f9c41b0b5cb5d469ff50d}", "QCTF{9f7da4fbc93b02bb745a2616a92194}", "QCTF{cd0893a8555badfe953e73de2fec10}", "QCTF{bb8906c451643f99aedd526667d92a}", "QCTF{b6529a8b9ed2c7b7bf553c29f704a5}", "QCTF{1573a3c2a8316f27e6382fc48c4f33}", "QCTF{6c918e3d1ad6a632fc1a77b19f7a55}", "QCTF{c0e21f16330e8764b82e81dc9a3b56}", "QCTF{097ea21da40a2ea1cfd3b7053cc708}", "QCTF{482010f130554f0847864272d4c2c2}", "QCTF{748dc0d28fe677d9bc89546ec6b2c0}", "QCTF{ed1aa5c6a13bb1fdd22b7a0703fc16}", "QCTF{9c53c42158989d92625dd3572bbc95}", "QCTF{70c9e96b5d81acf43bce7755da303c}", "QCTF{a309dad413164cfb1b45c5256a3882}", "QCTF{e37c98bde0533db31adef4faa563f3}", "QCTF{7a0798e41ddccde6c8a75f225b94d3}", "QCTF{9811cae3ae3a81472beb8103b7ab82}", "QCTF{c87deed5b546c6cc32d1ffaefdb0f0}", "QCTF{83fb574eacf4b08c5ac3f6d3f74da7}", "QCTF{ba222d3be72a84885758308cc61ea1}", "QCTF{8a2b5ca1d91cb4ea01c007abdb6c2c}", "QCTF{53eaf49f8c54216f05e36fe8d5fafb}", "QCTF{8c858b7855f02198aab807d1edf9d2}", "QCTF{89a0e1095489c0cb284c1875756155}", "QCTF{b21ea97634dc697fd09399af837067}", "QCTF{dc5cb89bb542ff7e7975f4d15c4255}", "QCTF{48d5150d4be6b635e143d89aabf1a3}", "QCTF{922dd4c967bb9d69d956773afeab1e}", "QCTF{e5cd2fa153499c13ca49236de6adc4}", "QCTF{4d37247598052c9e6f2b83b8bcd952}", "QCTF{3e46e6cd60ccae54e82e87a8dc34e1}", "QCTF{25f36eb8e42259b2ff06cde1bd5605}", "QCTF{da1944f7edc9498ca4c1bfecf42ff2}", "QCTF{ce94baeff00cf8e73ce11137c59a28}", "QCTF{1bbbeef2c2fdebe8541d0bf18a50e1}", "QCTF{90721ce916e1311f2bb68bbae2d7ac}", "QCTF{80c7cd511153577b4fa91c5e2e4ad1}", "QCTF{603b395026bc8cc09dc97a38cb2280}", "QCTF{fbe22feb97e7c80d66a591602d415f}", "QCTF{a6aaadf2f0334f4af4cb9fe73c8302}", "QCTF{65d7b5da8e189b49e4b3da77e12e25}", "QCTF{9b6d2160ceae6028b64ce27e7b51a5}", "QCTF{d05b7dfdb049802ae7e6080f3fc79a}", "QCTF{a4c46d359b5b7350cc4fc4a936d825}", "QCTF{44bd838408c67fbca2ecbbedebf1cf}", "QCTF{26383798c460970fa6098989bb95d9}", "QCTF{c59e86a008d83ed3e972e046ca719a}", "QCTF{b04b985d1025efde0040beaf7dc5f0}", "QCTF{3d5bfc3671fdc8002b302cd1453655}", "QCTF{e306a59a226859cdaeef0fdfeeee79}", "QCTF{572862ce77246de5b91a5b0af46779}", "QCTF{4150dedcdd9bcc390834a75b697611}", "QCTF{c481a75551098eeeba7ed388449eb7}", "QCTF{f055b24f5171d68a8b4e81bd01792c}", "QCTF{dd756862a9e242b56196b96e62995a}", "QCTF{10b968626f2c5e7ddc159d9a693612}", "QCTF{0f081c1477b585763880bf4d47f90f}", "QCTF{312342427f1eabd39543fea814cbc7}", "QCTF{fffbe712ecc52d538e76d9d04abc40}", "QCTF{50263e0f1e44032652198b75d7cbb5}", "QCTF{c6e8f5eaf5fd39a400e78ab3e2faf0}", "QCTF{231f24f3b30f85a36ab5a1e3361646}", "QCTF{a2483150dd3fcc4c31beca9105b340}", "QCTF{4f64a8d96f193af89a1654c32a2e6e}", "QCTF{eece52ebe7c2d8c22d84b113805c29}", "QCTF{d90e6048879dbc95a751424e3dae80}", "QCTF{62e3d5152dcdf3ae665c5ab2551038}", "QCTF{eafca4f2e14ab780593d4daac5eb6a}", "QCTF{793f8267453ad28b7e42115b406dc2}", "QCTF{fd0c6135bbed7dbe929c072d4dd8a2}", "QCTF{b417e9cc4fccd1d8c9dbbda9dd9085}", "QCTF{056776924bea62f6c59faec02af84c}", "QCTF{07d2be0685063f1a479cc149c87469}", "QCTF{14747745e46430a990a2ca58ba4710}", "QCTF{d4a261d171447d6d3590d7d363967a}", "QCTF{001bcc7a06e5b7ee207650a4737d3d}", "QCTF{be0253ee5c2ebb31de5e53f5f4dfdf}", "QCTF{459ad7f0482f8373a225a140a82d93}", "QCTF{dac483918e43e63e77ee71bd0b3660}", "QCTF{dbf8d2a94520fe94d4ba6abb76b4e5}", "QCTF{39879724a07fe4db6a17d4c4044416}", "QCTF{6343cd7a9f1dd2280ecb3f4b2ddbcf}", "QCTF{4d6ed64d4ed4bf5a1aadc54dd29d6f}", "QCTF{a2a4c5060805b78e2e297fd516c64e}", "QCTF{43eba861767ac9fc8b687552f58ee1}", "QCTF{494a895de00ae4fa073bf60f6b0e8f}", "QCTF{169f7666e0e26a7e8e114ec55292fd}", "QCTF{ca8a500216e213f50f0ef9e2d5bc34}", "QCTF{e6d5c5948657d47967d05942756c22}", "QCTF{3c09c3994bb1e5ab3d40bf8b830e67}", "QCTF{3f8e87cf77592b7e6f248562688cd6}", "QCTF{b99d3d372dc0fd47ec8fa90586ea58}", "QCTF{3d4096238ed5189be1e3e4ab74c739}", "QCTF{c39eb6e1bbd14d9f066bd5c066e82d}", "QCTF{abe256a7bc3b786bcda9e7297c2271}", "QCTF{96ef74ad6f2096122b8d1a21b0b901}", "QCTF{14b79fdedb85fd168547152534a4e7}", "QCTF{6f7c2501e5085f7a703a2c306373c0}", "QCTF{3bf165aa5fd6ffc1c18724bbaf49c1}", "QCTF{1e0f2df613c8b7b6a0fc03232b216d}", "QCTF{d0b596863c036f0366fe15b9482efb}", "QCTF{cf17729c4c9e4a1b4ac214f99075f2}", "QCTF{aef70477ab2f637cc467c05418be2a}", "QCTF{4c58ad6faf885ba3ce0450031eca86}", "QCTF{9924566f66ac2c8bd1870750ec91ce}", "QCTF{b03147b934ea2b65e10547e2f71dd2}", "QCTF{7383f90f0c162f808997d1d0b3d9f6}", "QCTF{a6213c8a1073869525030f119283a8}", "QCTF{528e60191f04a586cf7b7795f79351}", "QCTF{5163fe3e98e2434533fd99483789be}", "QCTF{a69722554c0f86fff82bbe8c96974c}", "QCTF{8714276066181ac98523a27eb12d7e}", "QCTF{bdfe5870281bfafe37e1fdf5826d35}", "QCTF{ec71c293b03a36371bff6771baca22}", "QCTF{de122f99685a53e02e93cdc7ace813}", "QCTF{97c89b11befd75a4a89358e2b21e0d}", "QCTF{5d96880f78d827c12752a1d1d49334}", "QCTF{eb6351bd50687eb6aec3a156247c92}", "QCTF{10319d85149939307ee0bab8a2cf7e}", "QCTF{9022630cc835ddab640c4d3b11a34d}", "QCTF{505c2a0c8d0af2e30c5a1d57b7593b}", "QCTF{8db605a31f42465c877550809fb66c}", "QCTF{31ce06e68c3f09b64dcd90ac0998f6}", "QCTF{7e8a45ea2833348afc110719d3703b}", "QCTF{b6792242a13e1a722dbad6e46a94b3}", "QCTF{92e9c3f6c177ee5870083c1bb1450c}", "QCTF{ab837ee7e08c3ce1c3c0d89864b422}", "QCTF{b7873f9f2d0f27045d47a247c0c5c1}", "QCTF{d8f57b380a70f6222c927f403106a2}", "QCTF{f18a4fa8f3f3defd8f01a73457203c}", "QCTF{d516a9be009ff98d9c63308a7a0e4d}", "QCTF{b964e904c6d036903272b25c220ee2}", "QCTF{e422a73d21b3ae915dfc5a47f7d838}", "QCTF{c6bfbb696841d3a06d3b53c34497c2}", "QCTF{f67ad8000ac7c58e61e4e28ab87739}", "QCTF{58373d3bed3d618379f34d5c3c341a}", "QCTF{80d28b846c2755b8b8f661a70fbf70}", "QCTF{b2df1bd63f4db7cc7f49be4b77a068}", "QCTF{a923afa69dcb41e93a837f67c00f18}", "QCTF{2de524d407ab57442ca00f57fd12a4}", "QCTF{590102d15180aee4b22ad084adb766}", "QCTF{f03ff7f79befb402f9445b4ad1fbd2}", "QCTF{64aea5fb4eb710050bb41a0defb859}", "QCTF{3d3e53bc593ec6fa48513f771bbbcf}", "QCTF{56de9b6eade78bba0dd6c2dee4d075}", "QCTF{d5a4e1abc6634529ad3e2cfe1b05c0}", "QCTF{7914b002a76ee41b72d790245a3309}", "QCTF{882d7a1e28ca2e44992d026f60db4a}", "QCTF{5669e13d6e3a6016371af54eeac733}", "QCTF{16e34c119d1b5109037d4a705b71f1}", "QCTF{6ba4b9a3a7a64a03f0d0e7dde068ee}", "QCTF{0ccd6de5a838b402d81707ade417c7}", "QCTF{9df853d162dd62a4aa13ef401d46d8}", "QCTF{f04a20c6e2b0cbc31a19829b1f5cf4}", "QCTF{254ff9c01fe46a2f8f350f73c3644a}", "QCTF{19ad245959a9bde19f65f6fabc1c1c}", "QCTF{9f03d51394c306b02268db2f445fb9}", "QCTF{f16adc3e83565eb03b92fcbff58f78}", "QCTF{c31f5544164eed21f4ed8d5ebb5b47}", "QCTF{53e93ff2975f9894f791484e6fe4c6}", "QCTF{eaddcd7d1d117732822599efb3e5c2}", "QCTF{58eaa4b0368d70f55f00ef05442a70}", "QCTF{3bbccb85f4217288a9320324b907b6}", "QCTF{9d0ce53a2dbd5ff7ca6e2454a606c5}", "QCTF{cb4c95ff31da615d6dd904691b546e}", "QCTF{0db9b3c65b39df955fcb26b249da79}", "QCTF{586d5582208fa9aef02b607f56717a}", "QCTF{7cc36c86e3209fd13889e9fbfd6f3f}", "QCTF{9146062005aaaa2c616a18d67475cc}", "QCTF{740648bc89554f8400f44a4de4fbdc}", "QCTF{6479d95dd67c28a5174c0424647a6a}", "QCTF{22dac7d2410f4a3e29091e02e844d8}", "QCTF{3a63f5c202ece28b406dd5f34ffc7d}", "QCTF{635369be62946632adf9beb9393143}", "QCTF{e4ec9204852997b951c98e7d618b5d}", "QCTF{e91e932638d5218e6ee87db153ce94}", "QCTF{f2546d8d2a83fe7248a93173f4d535}", "QCTF{d4064d8f1645b99bd1055d4b805e2b}", "QCTF{01ed14267023ee5610b7a76938510a}", "QCTF{ed4132e517b396be80eecbb507cefb}", "QCTF{18e018d98126cddff774f7531ce68d}", "QCTF{aa5bcba5ad836510619b5cb1e4a08e}", "QCTF{e7167819a8e008fecd208559ab7e70}", "QCTF{a852a51aa1e5566ee5014c539614ca}", "QCTF{1f9b0b54eeddd410b44b27436b8c38}", "QCTF{536a7ae0218ccf6a5507eb7a34ee06}", "QCTF{53c9cb624f4321f29f3437c36c7b26}", "QCTF{5262548069c5a0bf22759ec388e999}", "QCTF{5bb1b17fb04c417ef77b732373342d}", "QCTF{cbe6c3db33a137d26ed82f54e8fcc3}", "QCTF{6050612d7a113661b0ed9412e35960}", "QCTF{6b2b2b3c89b83916fbf58e5b11f028}", "QCTF{5004671ee9150b8531aaa481181209}", "QCTF{1b82488d18cf79c26369a7c254119e}", "QCTF{1b60c7f5adeae36e19062ef627cfc4}", "QCTF{eca5ee6966ce7f1d28ce100a61e37b}", "QCTF{da41c09049fd3da77c82d678539a97}", "QCTF{8d9d3bb6d2459f0d8586f891cd5e68}", "QCTF{7ec2c8a38e1019edf336c04cc9c64b}", "QCTF{7c6bb65611c9bed59f66faf12c5fb7}", "QCTF{882eb9877ece55e1edde1614452fe5}", "QCTF{8d97c5f36f30d957ccbf792d775c83}", "QCTF{7a19b54c24517a112d1bafda1fe63f}", "QCTF{f4ad29671a78225c5156cd8167f486}", "QCTF{9857a2ddfd65119b9f534688519740}", "QCTF{e0db3ba08fdcff7a9ec42c5ff40d2e}", "QCTF{c5fd8fec36e875dda43a867c9fe537}", "QCTF{68662a546d41eb070f47ad8ffa5feb}", "QCTF{cfbb4a43a85f451a41e28eb249c89e}", "QCTF{f11cb89aba4b24716f5ea6df1f6456}", "QCTF{c2696c88aaa8d645de1bbee35af36e}", "QCTF{ac2bdab61efa8bb9b406e2f78c09d7}", "QCTF{88fb6396cccc7d4176b1fe731436e4}", "QCTF{98e3866b7f0718ffa594a3f33d3b44}", "QCTF{b5150f2be9b26d034e4690fc68f712}", "QCTF{8e1b949a2187cccfaf9f36b5ccdd9c}", "QCTF{eb94d1971dcb62127374a4628667ec}", "QCTF{a4b14b074eaebe07648266d5178ecc}", "QCTF{0ffd94c87bacfb7bb8010e1b41c83e}", "QCTF{d926c89592c95271b214a3af98de24}", "QCTF{8aaca152c7a4d081ae269d83e7132c}", "QCTF{ca0cdc2524d83e7586e39ed49c1241}", "QCTF{d0885d76c1c08c85f45eb96761af33}", "QCTF{013f254a971bd257261fcad457a2e3}", "QCTF{25d4de91eb78279240e6d80be2e698}", "QCTF{e53ab395cafab1b9529867bfdbfc1d}", "QCTF{32df02fa1d9b0fd00abab1dd565ff8}", "QCTF{0fa362ef46093d985dbdd62410b3ac}", "QCTF{abf3bd52fb5db6460356cb75cbadb2}", "QCTF{10a7c5e2839d5b5721acc4bd7dc685}", "QCTF{c95c1c4b9046bde3a7ca6d090c47fc}", "QCTF{bc7ebacc670be6b5cca0a9b0804012}", "QCTF{9749f30bb5c36121be0c2e70e484ec}", "QCTF{6ba00df829ecd5ea6489d95763876a}", "QCTF{717eb5ac1206150ff0702cb4fe86f0}", "QCTF{d5e1f18fdd6ba044817f76aa6e4d5e}", "QCTF{60f8358d9acdbdd0c690881f73d5fe}", "QCTF{a48df82d23e8ea7c8269b9f9c28d21}", "QCTF{1715bc35e96aee6d523b0f6dd67841}", "QCTF{a75fc2f83a2135aabbc5b2ddb859d3}", "QCTF{c62ebd632d680cb7d9ec25134bea22}", "QCTF{c643eb2f21c5284918089d5b383ad4}", "QCTF{4647bfdb1d4a62c240b765e9c91c79}", "QCTF{63807485b170cda640b7b62a410d48}", "QCTF{8c8b034cae88ab73d5b3b3a96bc2d2}", "QCTF{a5daed61fd0033ce47213cf5697e46}", "QCTF{94d7f1a9b0aace1c8f9a1ff22a146c}", "QCTF{c2f63e55532bec9ab9df72d33cfeec}", "QCTF{8bffe0983b67b044ef79e18f3c72da}", "QCTF{b41bff3cd041a48ace323b26bc9e9f}", "QCTF{ad9f9a63b0f25ec0b10a2cb305388e}", "QCTF{13e78c6cf24f6beec8481cdadb57b8}", "QCTF{9010313369af3a60fc553b82da9c1a}", "QCTF{5f0ea4a76664b284e9f63278accc07}", "QCTF{b78c9ec90055c370e22187baf2997b}", "QCTF{5674913dbeb3dfe49dc047af5005ab}", "QCTF{565855545b83d5581fc5be23050478}", "QCTF{0119d1c9cdf8b76c63fc131511cb91}", "QCTF{2446806ab6b75204994334152591fc}", "QCTF{5b7a1abbd24d4fa4d4ff8fdbf08d54}", "QCTF{85195cb9e8e20150bc36407ebd1afd}", "QCTF{ecbb22283f7819d1e23ac23740478d}", "QCTF{79fa3a0558ce092e543f062a947d07}", "QCTF{b78541e4ccf3757fd0c3393e6eb25f}", "QCTF{43abe69da62136ae82aa4c99d8d528}", "QCTF{e7801e0a2593bbe13393ba1ab86c04}", "QCTF{f134ff863da79fb458d8a1f90b1f41}", "QCTF{2b33036f6d4c0b4b623a750a4fc31c}", "QCTF{2cd5a7ce94912d0913e58156b27990}", "QCTF{5d8ab33cd0182c5a66067635fe3da4}", "QCTF{7eef1fbecc08683090b6de0207205f}", "QCTF{45a0d9a403a17dfdac5f5da65532eb}", "QCTF{7f1253f295258e934d4e6ac56aea15}", "QCTF{e98c5e97b373b380c0bee114127e25}", "QCTF{ba4c693790f04c1a5f6b0d21f67570}", "QCTF{7cfcdae9dadec26e29643f7e4d72aa}", "QCTF{8788bf8b293e18bc5a9e0b01d6bffb}", "QCTF{eff23f208639852567194e0355716f}", "QCTF{87187edd381c24b39bac1f2a75a2d1}", "QCTF{eaed5a8cfa7ab5dd1fcd05620d5489}", "QCTF{06eb121757eeec04ae97639dc24bd4}", "QCTF{a223a3fdb0a1366a1f77325307415d}", "QCTF{58e065d9d00c7bdd42e86d0b904312}", "QCTF{28a91fc6bfa4465b700f6b286929ac}", "QCTF{014f8b9593678f8d12617b5c357f25}", "QCTF{9fc4786d3b64e6db431eead755639f}", "QCTF{ec7a7e0611271cbb03242c581fcc04}", "QCTF{b98dae9862beb100fcc9be6bb90d82}", "QCTF{7fe3d3a16198edef3c7447b361e6a0}", "QCTF{504b6869a67144ee7106bc0d7be603}", "QCTF{29c455f53735a19af732e4bfd84be3}", "QCTF{1f8c566fed343aaf5e3dcedfe90c56}", "QCTF{905006b1ed2031bb2d56f2001bbfb2}", "QCTF{ba3499e97aef940661066d7b5e7504}", "QCTF{cef06edc8fe40aec51e5e347ab187b}", "QCTF{4a78ea740a7ec60ecb61768f30aa12}", "QCTF{76b4a296a4020079746c4486fd777d}", "QCTF{762b5dc8b4cb4f5b451f4f73a01607}", "QCTF{d5e73d2a50b37f8368ba15b88d6678}", "QCTF{c01504b6eba74bed0838d3a4efa972}", "QCTF{684727c1eb75066ab707b0d6dcc7dd}", "QCTF{191501cd548cee9c8e9a68be833b50}", "QCTF{a3c6fb7dfe72f42b208c3470bd4573}", "QCTF{d6f4fb48b850a2116505d591f89100}", "QCTF{2c24293aa001f08e05ba4c903dfbc9}", "QCTF{59688da319f97dc6aebde6b067958d}", "QCTF{381fe3b4d0a8e1cc53d28629534d7c}", "QCTF{16e8716d735853e3e246f5a6ad2e43}", "QCTF{4c932b72265105c5be87a9230e6e13}", "QCTF{6d59d6152d5cf54b7c681062e71367}", "QCTF{839f5097f25721ff9142068dd9c2bc}", "QCTF{7dd6d3d4bb374ea5bccd3ee4d0ef54}", "QCTF{0e624ed4e3142a3d31ca5c5a7c9391}", "QCTF{d7ef1fc3649f0c85e29d9c923c0f4b}", "QCTF{52a9d7c17ed7943f129682bfe27714}", "QCTF{186551d33463e4d4d2c5941076ea59}", "QCTF{7fd1e5549e958c277e6fa405506fd1}", "QCTF{5359f353219f7302ca42d4476763c7}", "QCTF{2ca409c449145ef04cca67d666cd45}", "QCTF{5ac7df369c2aa3851fdd217f7615c6}", "QCTF{ef1e9152dc233786f647e25d5b20fd}", "QCTF{f2614f4a7237a2c797d7cb632f2fda}", "QCTF{c9d540463875352ee6f13d0e0ed938}", "QCTF{4be41d85cd9e99e80624b6feb8a37f}", "QCTF{2ecead67ba4d48a188316470353ad8}", "QCTF{c5b026f24ab9712e27c0efd64dc331}", "QCTF{271dcc80a87955650ef4fcae70a68a}", "QCTF{14e338ad0cc422ec8a46228577cde4}", "QCTF{f881060e716a4d97554603757eee64}", "QCTF{973df6784ea30de84c9fe3bebad69a}", "QCTF{68f133bf7ee549416ebee23632b150}", "QCTF{6a654b9cba1e50faf2297e7c2ce8c4}", "QCTF{35c2ae5fd4ebb4eb68fb5751ce1682}", "QCTF{c3f87c659eb8500c0c4a1803d334b1}", "QCTF{43898c3975ee0bdfdb75873aef63a9}", "QCTF{99551b2d619359c339d4c5bdf3c00b}", "QCTF{cd90a69205d56a5fd24338efe7624a}", "QCTF{a100669e8530f2d800f2341eebd597}", "QCTF{b5a6abb251305171cc0181bca44dbf}", "QCTF{8f8d98c17aa9c52d31233acc1343ad}", "QCTF{e720103fda74e59ee45a7f68d1c51a}", "QCTF{5b8150b2ad264d3a943f03aa1032fd}", "QCTF{8779b403c7a79afb63978f5e1d30ff}", "QCTF{0a8fe6369109f23bead330b825fcaf}", "QCTF{eefbd29a360850420fc90694c718b3}", "QCTF{915f4a3cb8c8673d39d4ccb76e3331}", "QCTF{d7257f017df94d1e9368a1ed42fb89}", "QCTF{fdab2886b744d6489afa1502210b38}", "QCTF{d3bde16b5a7bbe00fea329f89acbf7}", "QCTF{6621382f1b9322f972baf0619c1210}", "QCTF{95ee77b85ff3721d1f42c060e4a883}", "QCTF{d58ab393535ec37c6a1b8abffc067a}", "QCTF{119b38e3ae1f15d095747d0fcd0b4d}", "QCTF{4cddc42015a4e19d2a60151f3c97a2}", "QCTF{03107aad90c5db341c0d00a0663b1c}", "QCTF{c029a6a8146dcd1747898cd6494969}", "QCTF{668031e35cc6b4734bd39712253b8c}", "QCTF{d33dcab69d61e41a7fa64d64f314fc}", "QCTF{f5a498adb0eaac2dd794a706bf26d4}", "QCTF{2d7c8910c329ec3f6071f551581592}", "QCTF{1b4af0d07aa05805949ad14f1f84b7}", "QCTF{c9a458727caa8948bcb7b528e43d09}", "QCTF{200427185b10891e5699eae02ba408}", "QCTF{dca214833cba53d45d870ef25efc84}", "QCTF{a359d07a3046a44c49658a4a109122}", "QCTF{a51334445620729f83c1aa80e1abeb}", "QCTF{6b19932e0d469c287006111ed0e485}", "QCTF{64af07bd50739f942e5b5f66eeec47}", "QCTF{5dee74d06f1a6c79d1e91d76a69f81}", "QCTF{9e1487b2dc8a0dea1b3fc7cda35a73}", "QCTF{c6d71b9f71994db2e07fa2c167a915}", "QCTF{1aab2dd00548d510d5b402ed215054}", "QCTF{f7d2a947f051d7a73cb6ba46dfb760}", "QCTF{e30aede2414e1ba21e02bdd54607e2}", "QCTF{98e9640db1544488361e6bd6f905fd}", "QCTF{b6f6252ee28ee7a9219c44597e1d03}", "QCTF{7e67ea676161069da81c7a865fbe95}", "QCTF{45a3350c303d4df1a965048589ff09}", "QCTF{25d877c31aabbbb15f5c1787920e5a}", "QCTF{6597809135beb88ae7e51df8034d2a}", "QCTF{6ecf87c315cbb26671541c46cf7a88}", "QCTF{246f7d3b7970f7cd68f00a8ca110c1}", "QCTF{e5f291a71f7e0737ff23e3ad6abd91}", "QCTF{cec4a53ab3babc7ec110d5652e114e}", "QCTF{2ee018c57516a9d2cb208054ecdc3a}", "QCTF{d6ab5b219514f7139bc19e9e4887cc}", "QCTF{598100bb05865f95efcdcbc4574e13}", "QCTF{42a6cf6d325a461582602128cc98b3}", "QCTF{a0623b26c45722a56e64f63dc7b150}", "QCTF{2e18a138e41eee9634c2d348af3c20}", "QCTF{bc42c82d43f140061067f330d7dd06}", "QCTF{b70a008179e39fad91834802ea409a}", "QCTF{a49356c6fbfb47c6559b9ade5dbd6e}", "QCTF{f9f999d0c7add6b76468a0ccb32c18}", "QCTF{bc4f08f3bd4f6a2098a3ad614d978f}", "QCTF{4ec4d379f9beb8a09d5797f74be21f}", "QCTF{09d701f6c5d9013514b3ea93ccd703}", "QCTF{8b04a97b9635683dcc4a3a3a050fc7}", "QCTF{f9f8281ae5c8c55a8fa66076c7b075}", "QCTF{e1bb9bbedd42570b0f2fe5c9efc254}", "QCTF{5f1b54ad7a3a0268b1dbb3396c7585}", "QCTF{f9ed129e37c0b6ddd2ae7b17b7c382}", "QCTF{5897beb5986f66bae61b902452e377}", "QCTF{bc0186c7fbf4639fbcd36e9066a456}", "QCTF{69e610ab77aa53383d15e6e6d2e403}", "QCTF{2e62079a3ce362e4d79b9ddc757bae}", "QCTF{ceb757d388c31072898a46a234c2fd}", "QCTF{721c1e3d91d1976745a324c104ab02}", "QCTF{cc2c12287f29ba8ce68351f96b902f}", "QCTF{13302b4057d048a08fa4b66608329c}", "QCTF{b672ed517251f9cb42aa10360669b1}"]
def check(attempt, context):
if attempt.answer == flags[attempt.participant.id % len(flags)]:
return Checked(True)
if attempt.answer in flags:
return CheckedPlagiarist(False, flags.index(attempt.answer))
return Checked(False)
| 2,208
| 24,008
| 0.848114
| 1,236
| 24,288
| 16.665858
| 0.503236
| 0.001893
| 0.001456
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.472667
| 0.026927
| 24,288
| 11
| 24,009
| 2,208
| 0.398917
| 0.000865
| 0
| 0
| 0
| 0
| 0.890098
| 0.890098
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0
| 0
| 0.571429
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 8
|
f80a9e20f3882b568f0b2c67b5501a6676d89db8
| 154
|
py
|
Python
|
tasks/R2R/speaker/vocab.py
|
sunqiang85/DASA
|
c4fdc61db77f59f84c68abec3b985fbd7dc29323
|
[
"MIT-0",
"MIT"
] | 117
|
2018-09-17T22:40:26.000Z
|
2021-12-29T14:44:42.000Z
|
tasks/R2R/speaker/vocab.py
|
sunqiang85/DASA
|
c4fdc61db77f59f84c68abec3b985fbd7dc29323
|
[
"MIT-0",
"MIT"
] | 25
|
2018-09-25T06:03:34.000Z
|
2022-03-11T23:29:13.000Z
|
tasks/R2R/speaker/vocab.py
|
sunqiang85/DASA
|
c4fdc61db77f59f84c68abec3b985fbd7dc29323
|
[
"MIT-0",
"MIT"
] | 38
|
2018-10-12T02:59:05.000Z
|
2021-08-24T05:14:28.000Z
|
SUBTRAIN_VOCAB = 'tasks/R2R/data/sub_train_vocab.txt'
TRAIN_VOCAB = 'tasks/R2R/data/train_vocab.txt'
TRAINVAL_VOCAB = 'tasks/R2R/data/trainval_vocab.txt'
| 38.5
| 53
| 0.805195
| 25
| 154
| 4.68
| 0.36
| 0.25641
| 0.333333
| 0.435897
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.02069
| 0.058442
| 154
| 3
| 54
| 51.333333
| 0.786207
| 0
| 0
| 0
| 0
| 0
| 0.62987
| 0.62987
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f834f39f43edf6f05def41b93e4c69bef21d494c
| 146
|
py
|
Python
|
atv021.py
|
luismontei/Atividade---python-3
|
c29c81623b8e8c630c0e16cc22db9a7489f3cea7
|
[
"Apache-2.0"
] | null | null | null |
atv021.py
|
luismontei/Atividade---python-3
|
c29c81623b8e8c630c0e16cc22db9a7489f3cea7
|
[
"Apache-2.0"
] | null | null | null |
atv021.py
|
luismontei/Atividade---python-3
|
c29c81623b8e8c630c0e16cc22db9a7489f3cea7
|
[
"Apache-2.0"
] | null | null | null |
nota = float(input('Informe uma nota de 0 a 10: '))
while (nota>10) or (nota<0):
nota = float(input('Informe uma nota de 0 a 10: '))
| 20.857143
| 56
| 0.589041
| 26
| 146
| 3.307692
| 0.423077
| 0.209302
| 0.325581
| 0.488372
| 0.790698
| 0.790698
| 0.790698
| 0.790698
| 0.790698
| 0.790698
| 0
| 0.082569
| 0.253425
| 146
| 6
| 57
| 24.333333
| 0.706422
| 0
| 0
| 0.666667
| 0
| 0
| 0.405797
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
f8375b2995e7a504c833b792acea4a6e01ca1570
| 1,565
|
py
|
Python
|
controller/gatt/models.py
|
helena-project/beetle
|
6f07d864c38ea6aed962263eca20ecf8436cfb4e
|
[
"Apache-2.0"
] | 16
|
2016-06-27T08:08:04.000Z
|
2020-10-24T21:20:27.000Z
|
controller/gatt/models.py
|
helena-project/beetle
|
6f07d864c38ea6aed962263eca20ecf8436cfb4e
|
[
"Apache-2.0"
] | 1
|
2018-01-23T19:18:06.000Z
|
2018-01-23T19:18:06.000Z
|
controller/gatt/models.py
|
helena-project/beetle
|
6f07d864c38ea6aed962263eca20ecf8436cfb4e
|
[
"Apache-2.0"
] | 2
|
2018-03-16T08:49:10.000Z
|
2019-02-14T04:30:03.000Z
|
from __future__ import unicode_literals
from django.db import models
# Create your models here.
class Service(models.Model):
"""A GATT service"""
name = models.CharField(max_length=200)
uuid = models.CharField(
max_length=36,
blank=True,
primary_key=True,
help_text="16 or 128-bit UUID.")
ttype = models.CharField(
max_length=200,
blank=True,
verbose_name="type")
verified = models.BooleanField(
default=False,
help_text="Is this a standard service or added manually by a human?")
def __unicode__(self):
if self.name != "":
return self.name
else:
return "<unk serv>." + self.uuid
class Characteristic(models.Model):
"""A GATT characteristic"""
name = models.CharField(max_length=200)
uuid = models.CharField(
max_length=36,
blank=True,
primary_key=True,
help_text="16 or 128-bit UUID.")
ttype = models.CharField(
max_length=200,
blank=True,
verbose_name="type")
verified = models.BooleanField(
default=False,
help_text="Is this a standard characteristic or added manually by a human?")
def __unicode__(self):
if self.name != "":
return self.name
else:
return "<unk char>." + self.uuid
class Descriptor(models.Model):
"""A GATT descriptor"""
name = models.CharField(max_length=200)
uuid = models.CharField(
max_length=36,
blank=True,
primary_key=True,
help_text="16 or 128-bit UUID.")
ttype = models.CharField(
max_length=200,
blank=True,
verbose_name="type")
def __unicode__(self):
if self.name != "":
return self.name
else:
return "<unk desc>." + self.uuid
| 22.042254
| 78
| 0.703514
| 221
| 1,565
| 4.81448
| 0.262443
| 0.12688
| 0.152256
| 0.203008
| 0.758459
| 0.758459
| 0.758459
| 0.758459
| 0.758459
| 0.758459
| 0
| 0.029977
| 0.16869
| 1,565
| 70
| 79
| 22.357143
| 0.787856
| 0.051118
| 0
| 0.821429
| 0
| 0
| 0.150442
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.053571
| false
| 0
| 0.035714
| 0
| 0.446429
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f8686b6673e38abb5f10e6b313ce4b6def11be27
| 802
|
py
|
Python
|
calculate_spf_CC_BASE.py
|
palmtop/SFP-reprogrammer
|
8a5d608c81f266c99e76c6f22a141c40f6991bd6
|
[
"MIT"
] | null | null | null |
calculate_spf_CC_BASE.py
|
palmtop/SFP-reprogrammer
|
8a5d608c81f266c99e76c6f22a141c40f6991bd6
|
[
"MIT"
] | null | null | null |
calculate_spf_CC_BASE.py
|
palmtop/SFP-reprogrammer
|
8a5d608c81f266c99e76c6f22a141c40f6991bd6
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
import sys
import smbus
import time
count=0
base_sum=0
bus = smbus.SMBus(0)
while 1:
if count>62:
break
byte=bus.read_byte_data(80,count)
count=count+1
base_sum=base_sum+byte
#print("{0:02x}:{1:02x}".format(ord(b),c)),
print("{0:02x}".format(byte)),
#time.sleep(0.1)
if (count % 16) == 0:
print
bus.close()
print
print "CC_BASE (0x3F,63) Hex:{0:04x} Dec:{1}".format(base_sum,base_sum%256)
#calculate CC_EXT
count=64
base_sum=0
bus = smbus.SMBus(0)
while 1:
if count>94:
break
byte=bus.read_byte_data(80,count)
count=count+1
base_sum=base_sum+byte
#print("{0:02x}:{1:02x}".format(ord(b),c)),
print("{0:02x}".format(byte)),
#time.sleep(0.1)
if (count % 16) == 0:
print
bus.close()
print
print "CC_EXT (0x5F,95) Hex:{0:04x} Dec:{1}".format(base_sum,base_sum%256)
| 20.05
| 75
| 0.679551
| 156
| 802
| 3.384615
| 0.275641
| 0.132576
| 0.060606
| 0.106061
| 0.825758
| 0.825758
| 0.825758
| 0.825758
| 0.825758
| 0.825758
| 0
| 0.09732
| 0.11596
| 802
| 39
| 76
| 20.564103
| 0.647391
| 0.182045
| 0
| 0.727273
| 0
| 0
| 0.133846
| 0
| 0
| 0
| 0.012308
| 0
| 0
| 0
| null | null | 0
| 0.090909
| null | null | 0.242424
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
6ef5687064f3841e3ad67facc6993e6c30c66e42
| 45,060
|
py
|
Python
|
heat/core/linalg/tests/test_basics.py
|
bhagemeier/heat
|
b362b61a558c4a69cd9a884051b5efcc74f494da
|
[
"MIT"
] | null | null | null |
heat/core/linalg/tests/test_basics.py
|
bhagemeier/heat
|
b362b61a558c4a69cd9a884051b5efcc74f494da
|
[
"MIT"
] | null | null | null |
heat/core/linalg/tests/test_basics.py
|
bhagemeier/heat
|
b362b61a558c4a69cd9a884051b5efcc74f494da
|
[
"MIT"
] | 1
|
2020-04-07T11:27:05.000Z
|
2020-04-07T11:27:05.000Z
|
import torch
import os
import unittest
import warnings
import heat as ht
import numpy as np
if os.environ.get("DEVICE") == "gpu" and torch.cuda.is_available():
ht.use_device("gpu")
torch.cuda.set_device(torch.device(ht.get_device().torch_device))
else:
ht.use_device("cpu")
device = ht.get_device().torch_device
ht_device = None
if os.environ.get("DEVICE") == "lgpu" and torch.cuda.is_available():
device = ht.gpu.torch_device
ht_device = ht.gpu
torch.cuda.set_device(device)
class TestLinalgBasics(unittest.TestCase):
def test_dot(self):
# ONLY TESTING CORRECTNESS! ALL CALLS IN DOT ARE PREVIOUSLY TESTED
# cases to test:
data2d = np.ones((10, 10))
data3d = np.ones((10, 10, 10))
data1d = np.arange(10)
a1d = ht.array(data1d, dtype=ht.float32, split=0, device=ht_device)
b1d = ht.array(data1d, dtype=ht.float32, split=0, device=ht_device)
# 2 1D arrays,
self.assertEqual(ht.dot(a1d, b1d), np.dot(data1d, data1d))
ret = []
self.assertEqual(ht.dot(a1d, b1d, out=ret), np.dot(data1d, data1d))
a1d = ht.array(data1d, dtype=ht.float32, split=None, device=ht_device)
b1d = ht.array(data1d, dtype=ht.float32, split=0, device=ht_device)
self.assertEqual(ht.dot(a1d, b1d), np.dot(data1d, data1d))
a1d = ht.array(data1d, dtype=ht.float32, split=None, device=ht_device)
b1d = ht.array(data1d, dtype=ht.float32, split=None, device=ht_device)
self.assertEqual(ht.dot(a1d, b1d), np.dot(data1d, data1d))
a1d = ht.array(data1d, dtype=ht.float32, split=0, device=ht_device)
b1d = ht.array(data1d, dtype=ht.float32, split=0, device=ht_device)
self.assertEqual(ht.dot(a1d, b1d), np.dot(data1d, data1d))
# 2 1D arrays,
a2d = ht.array(data2d, split=1, device=ht_device)
b2d = ht.array(data2d, split=1, device=ht_device)
# 2 2D arrays,
res = ht.dot(a2d, b2d) - ht.array(np.dot(data2d, data2d), device=ht_device)
self.assertEqual(ht.equal(res, ht.zeros(res.shape, device=ht_device)), 1)
ret = ht.array(data2d, split=1, device=ht_device)
ht.dot(a2d, b2d, out=ret)
# print(ht.dot(a2d, b2d, out=ret))
res = ret - ht.array(np.dot(data2d, data2d), device=ht_device)
self.assertEqual(ht.equal(res, ht.zeros(res.shape, device=ht_device)), 1)
const1 = 5
const2 = 6
# a is const,
res = ht.dot(const1, b2d) - ht.array(np.dot(const1, data2d), device=ht_device)
ret = 0
ht.dot(const1, b2d, out=ret)
self.assertEqual(ht.equal(res, ht.zeros(res.shape, device=ht_device)), 1)
# b is const,
res = ht.dot(a2d, const2) - ht.array(np.dot(data2d, const2), device=ht_device)
self.assertEqual(ht.equal(res, ht.zeros(res.shape, device=ht_device)), 1)
# a and b and const
self.assertEqual(ht.dot(const2, const1), 5 * 6)
with self.assertRaises(NotImplementedError):
ht.dot(ht.array(data3d, device=ht_device), ht.array(data1d, device=ht_device))
def test_matmul(self):
with self.assertRaises(ValueError):
ht.matmul(ht.ones((25, 25), device=ht_device), ht.ones((42, 42), device=ht_device))
# cases to test:
n, m = 21, 31
j, k = m, 45
a_torch = torch.ones((n, m), device=device)
a_torch[0] = torch.arange(1, m + 1, device=device)
a_torch[:, -1] = torch.arange(1, n + 1, device=device)
b_torch = torch.ones((j, k), device=device)
b_torch[0] = torch.arange(1, k + 1, device=device)
b_torch[:, 0] = torch.arange(1, j + 1, device=device)
# splits None None
a = ht.ones((n, m), split=None, device=ht_device)
b = ht.ones((j, k), split=None, device=ht_device)
a[0] = ht.arange(1, m + 1, device=ht_device)
a[:, -1] = ht.arange(1, n + 1, device=ht_device)
b[0] = ht.arange(1, k + 1, device=ht_device)
b[:, 0] = ht.arange(1, j + 1, device=ht_device)
ret00 = ht.matmul(a, b)
self.assertEqual(ht.all(ret00 == ht.array(a_torch @ b_torch, device=ht_device)), 1)
self.assertIsInstance(ret00, ht.DNDarray)
self.assertEqual(ret00.shape, (n, k))
self.assertEqual(ret00.dtype, ht.float)
self.assertEqual(ret00.split, None)
self.assertEqual(a.split, None)
self.assertEqual(b.split, None)
# splits None None
a = ht.ones((n, m), split=None, device=ht_device)
b = ht.ones((j, k), split=None, device=ht_device)
a[0] = ht.arange(1, m + 1, device=ht_device)
a[:, -1] = ht.arange(1, n + 1, device=ht_device)
b[0] = ht.arange(1, k + 1, device=ht_device)
b[:, 0] = ht.arange(1, j + 1, device=ht_device)
ret00 = ht.matmul(a, b, allow_resplit=True)
self.assertEqual(ht.all(ret00 == ht.array(a_torch @ b_torch, device=ht_device)), 1)
self.assertIsInstance(ret00, ht.DNDarray)
self.assertEqual(ret00.shape, (n, k))
self.assertEqual(ret00.dtype, ht.float)
self.assertEqual(ret00.split, None)
self.assertEqual(a.split, 0)
self.assertEqual(b.split, None)
if a.comm.size > 1:
# splits 00
a = ht.ones((n, m), split=0, dtype=ht.float64, device=ht_device)
b = ht.ones((j, k), split=0, device=ht_device)
a[0] = ht.arange(1, m + 1, device=ht_device)
a[:, -1] = ht.arange(1, n + 1, device=ht_device)
b[0] = ht.arange(1, k + 1, device=ht_device)
b[:, 0] = ht.arange(1, j + 1, device=ht_device)
ret00 = a @ b
ret_comp00 = ht.array(a_torch @ b_torch, split=0, device=ht_device)
self.assertTrue(ht.equal(ret00, ret_comp00))
self.assertIsInstance(ret00, ht.DNDarray)
self.assertEqual(ret00.shape, (n, k))
self.assertEqual(ret00.dtype, ht.float64)
self.assertEqual(ret00.split, 0)
# splits 00 (numpy)
a = ht.array(np.ones((n, m)), split=0, device=ht_device)
b = ht.array(np.ones((j, k)), split=0, device=ht_device)
a[0] = ht.arange(1, m + 1, device=ht_device)
a[:, -1] = ht.arange(1, n + 1, device=ht_device)
b[0] = ht.arange(1, k + 1, device=ht_device)
b[:, 0] = ht.arange(1, j + 1, device=ht_device)
ret00 = a @ b
ret_comp00 = ht.array(a_torch @ b_torch, split=0, device=ht_device)
self.assertTrue(ht.equal(ret00, ret_comp00))
self.assertIsInstance(ret00, ht.DNDarray)
self.assertEqual(ret00.shape, (n, k))
self.assertEqual(ret00.dtype, ht.float64)
self.assertEqual(ret00.split, 0)
# splits 01
a = ht.ones((n, m), split=0, device=ht_device)
b = ht.ones((j, k), split=1, dtype=ht.float64, device=ht_device)
a[0] = ht.arange(1, m + 1, device=ht_device)
a[:, -1] = ht.arange(1, n + 1, device=ht_device)
b[0] = ht.arange(1, k + 1, device=ht_device)
b[:, 0] = ht.arange(1, j + 1, device=ht_device)
ret00 = ht.matmul(a, b)
ret_comp01 = ht.array(a_torch @ b_torch, split=0, device=ht_device)
self.assertTrue(ht.equal(ret00, ret_comp01))
self.assertIsInstance(ret00, ht.DNDarray)
self.assertEqual(ret00.shape, (n, k))
self.assertEqual(ret00.dtype, ht.float64)
self.assertEqual(ret00.split, 0)
# splits 10
a = ht.ones((n, m), split=1, device=ht_device)
b = ht.ones((j, k), split=0, device=ht_device)
a[0] = ht.arange(1, m + 1, device=ht_device)
a[:, -1] = ht.arange(1, n + 1, device=ht_device)
b[0] = ht.arange(1, k + 1, device=ht_device)
b[:, 0] = ht.arange(1, j + 1, device=ht_device)
ret00 = ht.matmul(a, b)
ret_comp10 = ht.array(a_torch @ b_torch, split=1, device=ht_device)
self.assertTrue(ht.equal(ret00, ret_comp10))
self.assertIsInstance(ret00, ht.DNDarray)
self.assertEqual(ret00.shape, (n, k))
self.assertEqual(ret00.dtype, ht.float)
self.assertEqual(ret00.split, 1)
# splits 11
a = ht.ones((n, m), split=1, device=ht_device)
b = ht.ones((j, k), split=1, device=ht_device)
a[0] = ht.arange(1, m + 1, device=ht_device)
a[:, -1] = ht.arange(1, n + 1, device=ht_device)
b[0] = ht.arange(1, k + 1, device=ht_device)
b[:, 0] = ht.arange(1, j + 1, device=ht_device)
ret00 = ht.matmul(a, b)
ret_comp11 = ht.array(a_torch @ b_torch, split=1, device=ht_device)
self.assertTrue(ht.equal(ret00, ret_comp11))
self.assertIsInstance(ret00, ht.DNDarray)
self.assertEqual(ret00.shape, (n, k))
self.assertEqual(ret00.dtype, ht.float)
self.assertEqual(ret00.split, 1)
# splits 11 (torch)
a = ht.array(torch.ones((n, m), device=device), split=1, device=ht_device)
b = ht.array(torch.ones((j, k), device=device), split=1, device=ht_device)
a[0] = ht.arange(1, m + 1, device=ht_device)
a[:, -1] = ht.arange(1, n + 1, device=ht_device)
b[0] = ht.arange(1, k + 1, device=ht_device)
b[:, 0] = ht.arange(1, j + 1, device=ht_device)
ret00 = ht.matmul(a, b)
ret_comp11 = ht.array(a_torch @ b_torch, split=1, device=ht_device)
self.assertTrue(ht.equal(ret00, ret_comp11))
self.assertIsInstance(ret00, ht.DNDarray)
self.assertEqual(ret00.shape, (n, k))
self.assertEqual(ret00.dtype, ht.float)
self.assertEqual(ret00.split, 1)
# splits 0 None
a = ht.ones((n, m), split=0, device=ht_device)
b = ht.ones((j, k), split=None, device=ht_device)
a[0] = ht.arange(1, m + 1, device=ht_device)
a[:, -1] = ht.arange(1, n + 1, device=ht_device)
b[0] = ht.arange(1, k + 1, device=ht_device)
b[:, 0] = ht.arange(1, j + 1, device=ht_device)
ret00 = ht.matmul(a, b)
ret_comp0 = ht.array(a_torch @ b_torch, split=0, device=ht_device)
self.assertTrue(ht.equal(ret00, ret_comp0))
self.assertIsInstance(ret00, ht.DNDarray)
self.assertEqual(ret00.shape, (n, k))
self.assertEqual(ret00.dtype, ht.float)
self.assertEqual(ret00.split, 0)
# splits 1 None
a = ht.ones((n, m), split=1, device=ht_device)
b = ht.ones((j, k), split=None, device=ht_device)
a[0] = ht.arange(1, m + 1, device=ht_device)
a[:, -1] = ht.arange(1, n + 1, device=ht_device)
b[0] = ht.arange(1, k + 1, device=ht_device)
b[:, 0] = ht.arange(1, j + 1, device=ht_device)
ret00 = ht.matmul(a, b)
ret_comp1 = ht.array(a_torch @ b_torch, split=1, device=ht_device)
self.assertTrue(ht.equal(ret00, ret_comp1))
self.assertIsInstance(ret00, ht.DNDarray)
self.assertEqual(ret00.shape, (n, k))
self.assertEqual(ret00.dtype, ht.float)
self.assertEqual(ret00.split, 1)
# splits None 0
a = ht.ones((n, m), split=None, device=ht_device)
b = ht.ones((j, k), split=0, device=ht_device)
a[0] = ht.arange(1, m + 1, device=ht_device)
a[:, -1] = ht.arange(1, n + 1, device=ht_device)
b[0] = ht.arange(1, k + 1, device=ht_device)
b[:, 0] = ht.arange(1, j + 1, device=ht_device)
ret00 = ht.matmul(a, b)
ret_comp = ht.array(a_torch @ b_torch, split=0, device=ht_device)
self.assertTrue(ht.equal(ret00, ret_comp))
self.assertIsInstance(ret00, ht.DNDarray)
self.assertEqual(ret00.shape, (n, k))
self.assertEqual(ret00.dtype, ht.float)
self.assertEqual(ret00.split, 0)
# splits None 1
a = ht.ones((n, m), split=None, device=ht_device)
b = ht.ones((j, k), split=1, device=ht_device)
a[0] = ht.arange(1, m + 1, device=ht_device)
a[:, -1] = ht.arange(1, n + 1, device=ht_device)
b[0] = ht.arange(1, k + 1, device=ht_device)
b[:, 0] = ht.arange(1, j + 1, device=ht_device)
ret00 = ht.matmul(a, b)
ret_comp = ht.array(a_torch @ b_torch, split=1, device=ht_device)
self.assertTrue(ht.equal(ret00, ret_comp))
self.assertIsInstance(ret00, ht.DNDarray)
self.assertEqual(ret00.shape, (n, k))
self.assertEqual(ret00.dtype, ht.float)
self.assertEqual(ret00.split, 1)
# vector matrix mult:
# a -> vector
a_torch = torch.ones((m), device=device)
b_torch = torch.ones((j, k), device=device)
b_torch[0] = torch.arange(1, k + 1, device=device)
b_torch[:, 0] = torch.arange(1, j + 1, device=device)
# splits None None
a = ht.ones((m), split=None, device=ht_device)
b = ht.ones((j, k), split=None, device=ht_device)
b[0] = ht.arange(1, k + 1, device=ht_device)
b[:, 0] = ht.arange(1, j + 1, device=ht_device)
ret00 = ht.matmul(a, b)
ret_comp = ht.array(a_torch @ b_torch, split=None, device=ht_device)
self.assertTrue(ht.equal(ret00, ret_comp))
self.assertIsInstance(ret00, ht.DNDarray)
self.assertEqual(ret00.shape, (k,))
self.assertEqual(ret00.dtype, ht.float)
self.assertEqual(ret00.split, None)
# splits None 0
a = ht.ones((m), split=None, device=ht_device)
b = ht.ones((j, k), split=0, device=ht_device)
b[0] = ht.arange(1, k + 1, device=ht_device)
b[:, 0] = ht.arange(1, j + 1, device=ht_device)
ret00 = ht.matmul(a, b)
ret_comp = ht.array(a_torch @ b_torch, split=None, device=ht_device)
self.assertTrue(ht.equal(ret00, ret_comp))
self.assertIsInstance(ret00, ht.DNDarray)
self.assertEqual(ret00.shape, (k,))
self.assertEqual(ret00.dtype, ht.float)
self.assertEqual(ret00.split, 0)
# splits None 1
a = ht.ones((m), split=None, device=ht_device)
b = ht.ones((j, k), split=1, device=ht_device)
b[0] = ht.arange(1, k + 1, device=ht_device)
b[:, 0] = ht.arange(1, j + 1, device=ht_device)
ret00 = ht.matmul(a, b)
ret_comp = ht.array(a_torch @ b_torch, split=0, device=ht_device)
self.assertTrue(ht.equal(ret00, ret_comp))
self.assertIsInstance(ret00, ht.DNDarray)
self.assertEqual(ret00.shape, (k,))
self.assertEqual(ret00.dtype, ht.float)
self.assertEqual(ret00.split, 0)
# splits 0 None
a = ht.ones((m), split=None, device=ht_device)
b = ht.ones((j, k), split=0, device=ht_device)
b[0] = ht.arange(1, k + 1, device=ht_device)
b[:, 0] = ht.arange(1, j + 1, device=ht_device)
ret00 = ht.matmul(a, b)
ret_comp = ht.array(a_torch @ b_torch, split=None, device=ht_device)
self.assertTrue(ht.equal(ret00, ret_comp))
self.assertIsInstance(ret00, ht.DNDarray)
self.assertEqual(ret00.shape, (k,))
self.assertEqual(ret00.dtype, ht.float)
self.assertEqual(ret00.split, 0)
# splits 0 0
a = ht.ones((m), split=0, device=ht_device)
b = ht.ones((j, k), split=0, device=ht_device)
b[0] = ht.arange(1, k + 1, device=ht_device)
b[:, 0] = ht.arange(1, j + 1, device=ht_device)
ret00 = ht.matmul(a, b)
ret_comp = ht.array(a_torch @ b_torch, split=None, device=ht_device)
self.assertTrue(ht.equal(ret00, ret_comp))
self.assertIsInstance(ret00, ht.DNDarray)
self.assertEqual(ret00.shape, (k,))
self.assertEqual(ret00.dtype, ht.float)
self.assertEqual(ret00.split, 0)
# splits 0 1
a = ht.ones((m), split=0, device=ht_device)
b = ht.ones((j, k), split=1, device=ht_device)
b[0] = ht.arange(1, k + 1, device=ht_device)
b[:, 0] = ht.arange(1, j + 1, device=ht_device)
ret00 = ht.matmul(a, b)
ret_comp = ht.array(a_torch @ b_torch, split=None, device=ht_device)
self.assertTrue(ht.equal(ret00, ret_comp))
self.assertIsInstance(ret00, ht.DNDarray)
self.assertEqual(ret00.shape, (k,))
self.assertEqual(ret00.dtype, ht.float)
self.assertEqual(ret00.split, 0)
# b -> vector
a_torch = torch.ones((n, m), device=device)
a_torch[0] = torch.arange(1, m + 1, device=device)
a_torch[:, -1] = torch.arange(1, n + 1, device=device)
b_torch = torch.ones((j), device=device)
# splits None None
a = ht.ones((n, m), split=None, device=ht_device)
b = ht.ones((j), split=None, device=ht_device)
a[0] = ht.arange(1, m + 1, device=ht_device)
a[:, -1] = ht.arange(1, n + 1, device=ht_device)
ret00 = ht.matmul(a, b)
ret_comp = ht.array(a_torch @ b_torch, split=None, device=ht_device)
self.assertTrue(ht.equal(ret00, ret_comp))
self.assertIsInstance(ret00, ht.DNDarray)
self.assertEqual(ret00.shape, (n,))
self.assertEqual(ret00.dtype, ht.float)
self.assertEqual(ret00.split, None)
# splits 0 None
a = ht.ones((n, m), split=0, device=ht_device)
b = ht.ones((j), split=None, device=ht_device)
a[0] = ht.arange(1, m + 1, device=ht_device)
a[:, -1] = ht.arange(1, n + 1, device=ht_device)
ret00 = ht.matmul(a, b)
ret_comp = ht.array((a_torch @ b_torch), split=None, device=ht_device)
self.assertTrue(ht.equal(ret00, ret_comp))
self.assertIsInstance(ret00, ht.DNDarray)
self.assertEqual(ret00.shape, (n,))
self.assertEqual(ret00.dtype, ht.float)
self.assertEqual(ret00.split, 0)
# splits 1 None
a = ht.ones((n, m), split=1, device=ht_device)
b = ht.ones((j), split=None, device=ht_device)
a[0] = ht.arange(1, m + 1, device=ht_device)
a[:, -1] = ht.arange(1, n + 1, device=ht_device)
ret00 = ht.matmul(a, b)
ret_comp = ht.array((a_torch @ b_torch), split=None, device=ht_device)
self.assertTrue(ht.equal(ret00, ret_comp))
self.assertIsInstance(ret00, ht.DNDarray)
self.assertEqual(ret00.shape, (n,))
self.assertEqual(ret00.dtype, ht.float)
self.assertEqual(ret00.split, 0)
# splits None 0
a = ht.ones((n, m), split=None, device=ht_device)
b = ht.ones((j), split=0, device=ht_device)
a[0] = ht.arange(1, m + 1, device=ht_device)
a[:, -1] = ht.arange(1, n + 1, device=ht_device)
ret00 = ht.matmul(a, b)
ret_comp = ht.array((a_torch @ b_torch), split=None, device=ht_device)
self.assertTrue(ht.equal(ret00, ret_comp))
self.assertIsInstance(ret00, ht.DNDarray)
self.assertEqual(ret00.shape, (n,))
self.assertEqual(ret00.dtype, ht.float)
self.assertEqual(ret00.split, 0)
# splits 0 0
a = ht.ones((n, m), split=0, device=ht_device)
b = ht.ones((j), split=0, device=ht_device)
a[0] = ht.arange(1, m + 1, device=ht_device)
a[:, -1] = ht.arange(1, n + 1, device=ht_device)
ret00 = ht.matmul(a, b)
ret_comp = ht.array((a_torch @ b_torch), split=None, device=ht_device)
self.assertTrue(ht.equal(ret00, ret_comp))
self.assertIsInstance(ret00, ht.DNDarray)
self.assertEqual(ret00.shape, (n,))
self.assertEqual(ret00.dtype, ht.float)
self.assertEqual(ret00.split, 0)
# splits 1 0
a = ht.ones((n, m), split=1, device=ht_device)
b = ht.ones((j), split=0, device=ht_device)
a[0] = ht.arange(1, m + 1, device=ht_device)
a[:, -1] = ht.arange(1, n + 1, device=ht_device)
ret00 = ht.matmul(a, b)
ret_comp = ht.array((a_torch @ b_torch), split=None, device=ht_device)
self.assertTrue(ht.equal(ret00, ret_comp))
self.assertIsInstance(ret00, ht.DNDarray)
self.assertEqual(ret00.shape, (n,))
self.assertEqual(ret00.dtype, ht.float)
self.assertEqual(ret00.split, 0)
with self.assertRaises(NotImplementedError):
a = ht.zeros((3, 3, 3), split=2)
b = a.copy()
a @ b
def test_transpose(self):
# vector transpose, not distributed
vector = ht.arange(10, device=ht_device)
vector_t = vector.T
self.assertIsInstance(vector_t, ht.DNDarray)
self.assertEqual(vector_t.dtype, ht.int32)
self.assertEqual(vector_t.split, None)
self.assertEqual(vector_t.shape, (10,))
# simple matrix transpose, not distributed
simple_matrix = ht.zeros((2, 4), device=ht_device)
simple_matrix_t = simple_matrix.transpose()
self.assertIsInstance(simple_matrix_t, ht.DNDarray)
self.assertEqual(simple_matrix_t.dtype, ht.float32)
self.assertEqual(simple_matrix_t.split, None)
self.assertEqual(simple_matrix_t.shape, (4, 2))
self.assertEqual(simple_matrix_t._DNDarray__array.shape, (4, 2))
# 4D array, not distributed, with given axis
array_4d = ht.zeros((2, 3, 4, 5), device=ht_device)
array_4d_t = ht.transpose(array_4d, axes=(-1, 0, 2, 1))
self.assertIsInstance(array_4d_t, ht.DNDarray)
self.assertEqual(array_4d_t.dtype, ht.float32)
self.assertEqual(array_4d_t.split, None)
self.assertEqual(array_4d_t.shape, (5, 2, 4, 3))
self.assertEqual(array_4d_t._DNDarray__array.shape, (5, 2, 4, 3))
# vector transpose, distributed
vector_split = ht.arange(10, split=0, device=ht_device)
vector_split_t = vector_split.T
self.assertIsInstance(vector_split_t, ht.DNDarray)
self.assertEqual(vector_split_t.dtype, ht.int32)
self.assertEqual(vector_split_t.split, 0)
self.assertEqual(vector_split_t.shape, (10,))
self.assertLessEqual(vector_split_t.lshape[0], 10)
# matrix transpose, distributed
matrix_split = ht.ones((10, 20), split=1, device=ht_device)
matrix_split_t = matrix_split.transpose()
self.assertIsInstance(matrix_split_t, ht.DNDarray)
self.assertEqual(matrix_split_t.dtype, ht.float32)
self.assertEqual(matrix_split_t.split, 0)
self.assertEqual(matrix_split_t.shape, (20, 10))
self.assertLessEqual(matrix_split_t.lshape[0], 20)
self.assertEqual(matrix_split_t.lshape[1], 10)
# 4D array, distributed
array_4d_split = ht.ones((3, 4, 5, 6), split=3, device=ht_device)
array_4d_split_t = ht.transpose(array_4d_split, axes=(1, 0, 3, 2))
self.assertIsInstance(array_4d_t, ht.DNDarray)
self.assertEqual(array_4d_split_t.dtype, ht.float32)
self.assertEqual(array_4d_split_t.split, 2)
self.assertEqual(array_4d_split_t.shape, (4, 3, 6, 5))
self.assertEqual(array_4d_split_t.lshape[0], 4)
self.assertEqual(array_4d_split_t.lshape[1], 3)
self.assertLessEqual(array_4d_split_t.lshape[2], 6)
self.assertEqual(array_4d_split_t.lshape[3], 5)
# exceptions
with self.assertRaises(TypeError):
ht.transpose(1)
with self.assertRaises(ValueError):
ht.transpose(ht.zeros((2, 3), device=ht_device), axes=1.0)
with self.assertRaises(ValueError):
ht.transpose(ht.zeros((2, 3), device=ht_device), axes=(-1,))
with self.assertRaises(TypeError):
ht.zeros((2, 3), device=ht_device).transpose(axes="01")
with self.assertRaises(TypeError):
ht.zeros((2, 3), device=ht_device).transpose(axes=(0, 1.0))
with self.assertRaises((ValueError, IndexError)):
ht.zeros((2, 3), device=ht_device).transpose(axes=(0, 3))
def test_tril(self):
local_ones = ht.ones((5,), device=ht_device)
# 1D case, no offset, data is not split, module-level call
result = ht.tril(local_ones)
comparison = torch.ones((5, 5), device=device).tril()
self.assertIsInstance(result, ht.DNDarray)
self.assertEqual(result.shape, (5, 5))
self.assertEqual(result.lshape, (5, 5))
self.assertEqual(result.split, None)
self.assertTrue((result._DNDarray__array == comparison).all())
# 1D case, positive offset, data is not split, module-level call
result = ht.tril(local_ones, k=2)
comparison = torch.ones((5, 5), device=device).tril(diagonal=2)
self.assertIsInstance(result, ht.DNDarray)
self.assertEqual(result.shape, (5, 5))
self.assertEqual(result.lshape, (5, 5))
self.assertEqual(result.split, None)
self.assertTrue((result._DNDarray__array == comparison).all())
# 1D case, negative offset, data is not split, module-level call
result = ht.tril(local_ones, k=-2)
comparison = torch.ones((5, 5), device=device).tril(diagonal=-2)
self.assertIsInstance(result, ht.DNDarray)
self.assertEqual(result.shape, (5, 5))
self.assertEqual(result.lshape, (5, 5))
self.assertEqual(result.split, None)
self.assertTrue((result._DNDarray__array == comparison).all())
local_ones = ht.ones((4, 5), device=ht_device)
# 2D case, no offset, data is not split, method
result = local_ones.tril()
comparison = torch.ones((4, 5), device=device).tril()
self.assertIsInstance(result, ht.DNDarray)
self.assertEqual(result.shape, (4, 5))
self.assertEqual(result.lshape, (4, 5))
self.assertEqual(result.split, None)
self.assertTrue((result._DNDarray__array == comparison).all())
# 2D case, positive offset, data is not split, method
result = local_ones.tril(k=2)
comparison = torch.ones((4, 5), device=device).tril(diagonal=2)
self.assertIsInstance(result, ht.DNDarray)
self.assertEqual(result.shape, (4, 5))
self.assertEqual(result.lshape, (4, 5))
self.assertEqual(result.split, None)
self.assertTrue((result._DNDarray__array == comparison).all())
# 2D case, negative offset, data is not split, method
result = local_ones.tril(k=-2)
comparison = torch.ones((4, 5), device=device).tril(diagonal=-2)
self.assertIsInstance(result, ht.DNDarray)
self.assertEqual(result.shape, (4, 5))
self.assertEqual(result.lshape, (4, 5))
self.assertEqual(result.split, None)
self.assertTrue((result._DNDarray__array == comparison).all())
local_ones = ht.ones((3, 4, 5, 6), device=ht_device)
# 2D+ case, no offset, data is not split, module-level call
result = local_ones.tril()
comparison = torch.ones((5, 6), device=device).tril()
self.assertIsInstance(result, ht.DNDarray)
self.assertEqual(result.shape, (3, 4, 5, 6))
self.assertEqual(result.lshape, (3, 4, 5, 6))
self.assertEqual(result.split, None)
for i in range(3):
for j in range(4):
self.assertTrue((result._DNDarray__array[i, j] == comparison).all())
# 2D+ case, positive offset, data is not split, module-level call
result = local_ones.tril(k=2)
comparison = torch.ones((5, 6), device=device).tril(diagonal=2)
self.assertIsInstance(result, ht.DNDarray)
self.assertEqual(result.shape, (3, 4, 5, 6))
self.assertEqual(result.lshape, (3, 4, 5, 6))
self.assertEqual(result.split, None)
for i in range(3):
for j in range(4):
self.assertTrue((result._DNDarray__array[i, j] == comparison).all())
# # 2D+ case, negative offset, data is not split, module-level call
result = local_ones.tril(k=-2)
comparison = torch.ones((5, 6), device=device).tril(diagonal=-2)
self.assertIsInstance(result, ht.DNDarray)
self.assertEqual(result.shape, (3, 4, 5, 6))
self.assertEqual(result.lshape, (3, 4, 5, 6))
self.assertEqual(result.split, None)
for i in range(3):
for j in range(4):
self.assertTrue((result._DNDarray__array[i, j] == comparison).all())
distributed_ones = ht.ones((5,), split=0, device=ht_device)
# 1D case, no offset, data is split, method
result = distributed_ones.tril()
self.assertIsInstance(result, ht.DNDarray)
self.assertEqual(result.shape, (5, 5))
self.assertEqual(result.split, 1)
self.assertTrue(result.lshape[0] == 5 or result.lshape[0] == 0)
self.assertLessEqual(result.lshape[1], 5)
self.assertTrue(result.sum(), 15)
if result.comm.rank == 0:
self.assertTrue(result._DNDarray__array[-1, 0] == 1)
if result.comm.rank == result.shape[0] - 1:
self.assertTrue(result._DNDarray__array[0, -1] == 0)
# 1D case, positive offset, data is split, method
result = distributed_ones.tril(k=2)
self.assertIsInstance(result, ht.DNDarray)
self.assertEqual(result.shape, (5, 5))
self.assertEqual(result.split, 1)
self.assertEqual(result.lshape[0], 5)
self.assertLessEqual(result.lshape[1], 5)
self.assertEqual(result.sum(), 22)
if result.comm.rank == 0:
self.assertTrue(result._DNDarray__array[-1, 0] == 1)
if result.comm.rank == result.shape[0] - 1:
self.assertTrue(result._DNDarray__array[0, -1] == 0)
# 1D case, negative offset, data is split, method
result = distributed_ones.tril(k=-2)
self.assertIsInstance(result, ht.DNDarray)
self.assertEqual(result.shape, (5, 5))
self.assertEqual(result.split, 1)
self.assertEqual(result.lshape[0], 5)
self.assertLessEqual(result.lshape[1], 5)
self.assertEqual(result.sum(), 6)
if result.comm.rank == 0:
self.assertTrue(result._DNDarray__array[-1, 0] == 1)
if result.comm.rank == result.shape[0] - 1:
self.assertTrue(result._DNDarray__array[0, -1] == 0)
distributed_ones = ht.ones((4, 5), split=0, device=ht_device)
# 2D case, no offset, data is horizontally split, method
result = distributed_ones.tril()
self.assertIsInstance(result, ht.DNDarray)
self.assertEqual(result.shape, (4, 5))
self.assertEqual(result.split, 0)
self.assertLessEqual(result.lshape[0], 4)
self.assertEqual(result.lshape[1], 5)
self.assertEqual(result.sum(), 10)
if result.comm.rank == 0:
self.assertTrue(result._DNDarray__array[0, -1] == 0)
if result.comm.rank == result.shape[0] - 1:
self.assertTrue(result._DNDarray__array[-1, 0] == 1)
# 2D case, positive offset, data is horizontally split, method
result = distributed_ones.tril(k=2)
self.assertIsInstance(result, ht.DNDarray)
self.assertEqual(result.shape, (4, 5))
self.assertEqual(result.split, 0)
self.assertLessEqual(result.lshape[0], 4)
self.assertEqual(result.lshape[1], 5)
self.assertEqual(result.sum(), 17)
if result.comm.rank == 0:
self.assertTrue(result._DNDarray__array[0, -1] == 0)
if result.comm.rank == result.shape[0] - 1:
self.assertTrue(result._DNDarray__array[-1, 0] == 1)
# 2D case, negative offset, data is horizontally split, method
result = distributed_ones.tril(k=-2)
self.assertIsInstance(result, ht.DNDarray)
self.assertEqual(result.shape, (4, 5))
self.assertEqual(result.split, 0)
self.assertLessEqual(result.lshape[0], 4)
self.assertEqual(result.lshape[1], 5)
self.assertEqual(result.sum(), 3)
if result.comm.rank == 0:
self.assertTrue(result._DNDarray__array[0, -1] == 0)
if result.comm.rank == result.shape[0] - 1:
self.assertTrue(result._DNDarray__array[-1, 0] == 1)
distributed_ones = ht.ones((4, 5), split=1, device=ht_device)
# 2D case, no offset, data is vertically split, method
result = distributed_ones.tril()
self.assertIsInstance(result, ht.DNDarray)
self.assertEqual(result.shape, (4, 5))
self.assertEqual(result.split, 1)
self.assertEqual(result.lshape[0], 4)
self.assertLessEqual(result.lshape[1], 5)
self.assertEqual(result.sum(), 10)
if result.comm.rank == 0:
self.assertTrue(result._DNDarray__array[-1, 0] == 1)
if result.comm.rank == result.shape[0] - 1:
self.assertTrue(result._DNDarray__array[0, -1] == 0)
# 2D case, positive offset, data is horizontally split, method
result = distributed_ones.tril(k=2)
self.assertIsInstance(result, ht.DNDarray)
self.assertEqual(result.shape, (4, 5))
self.assertEqual(result.split, 1)
self.assertEqual(result.lshape[0], 4)
self.assertLessEqual(result.lshape[1], 5)
self.assertEqual(result.sum(), 17)
if result.comm.rank == 0:
self.assertTrue(result._DNDarray__array[-1, 0] == 1)
if result.comm.rank == result.shape[0] - 1:
self.assertTrue(result._DNDarray__array[0, -1] == 0)
# 2D case, negative offset, data is horizontally split, method
result = distributed_ones.tril(k=-2)
self.assertIsInstance(result, ht.DNDarray)
self.assertEqual(result.shape, (4, 5))
self.assertEqual(result.split, 1)
self.assertEqual(result.lshape[0], 4)
self.assertLessEqual(result.lshape[1], 5)
self.assertEqual(result.sum(), 3)
if result.comm.rank == 0:
self.assertTrue(result._DNDarray__array[-1, 0] == 1)
if result.comm.rank == result.shape[0] - 1:
self.assertTrue(result._DNDarray__array[0, -1] == 0)
with self.assertRaises(TypeError):
ht.tril("asdf")
with self.assertRaises(TypeError):
ht.tril(distributed_ones, m=["sdf", "sf"])
def test_triu(self):
local_ones = ht.ones((5,), device=ht_device)
# 1D case, no offset, data is not split, module-level call
result = ht.triu(local_ones)
comparison = torch.ones((5, 5), device=device).triu()
self.assertIsInstance(result, ht.DNDarray)
self.assertEqual(result.shape, (5, 5))
self.assertEqual(result.lshape, (5, 5))
self.assertEqual(result.split, None)
self.assertTrue((result._DNDarray__array == comparison).all())
# 1D case, positive offset, data is not split, module-level call
result = ht.triu(local_ones, k=2)
comparison = torch.ones((5, 5), device=device).triu(diagonal=2)
self.assertIsInstance(result, ht.DNDarray)
self.assertEqual(result.shape, (5, 5))
self.assertEqual(result.lshape, (5, 5))
self.assertEqual(result.split, None)
self.assertTrue((result._DNDarray__array == comparison).all())
# 1D case, negative offset, data is not split, module-level call
result = ht.triu(local_ones, k=-2)
comparison = torch.ones((5, 5), device=device).triu(diagonal=-2)
self.assertIsInstance(result, ht.DNDarray)
self.assertEqual(result.shape, (5, 5))
self.assertEqual(result.lshape, (5, 5))
self.assertEqual(result.split, None)
self.assertTrue((result._DNDarray__array == comparison).all())
local_ones = ht.ones((4, 5), device=ht_device)
# 2D case, no offset, data is not split, method
result = local_ones.triu()
comparison = torch.ones((4, 5), device=device).triu()
self.assertIsInstance(result, ht.DNDarray)
self.assertEqual(result.shape, (4, 5))
self.assertEqual(result.lshape, (4, 5))
self.assertEqual(result.split, None)
self.assertTrue((result._DNDarray__array == comparison).all())
# 2D case, positive offset, data is not split, method
result = local_ones.triu(k=2)
comparison = torch.ones((4, 5), device=device).triu(diagonal=2)
self.assertIsInstance(result, ht.DNDarray)
self.assertEqual(result.shape, (4, 5))
self.assertEqual(result.lshape, (4, 5))
self.assertEqual(result.split, None)
self.assertTrue((result._DNDarray__array == comparison).all())
# 2D case, negative offset, data is not split, method
result = local_ones.triu(k=-2)
comparison = torch.ones((4, 5), device=device).triu(diagonal=-2)
self.assertIsInstance(result, ht.DNDarray)
self.assertEqual(result.shape, (4, 5))
self.assertEqual(result.lshape, (4, 5))
self.assertEqual(result.split, None)
self.assertTrue((result._DNDarray__array == comparison).all())
local_ones = ht.ones((3, 4, 5, 6), device=ht_device)
# 2D+ case, no offset, data is not split, module-level call
result = local_ones.triu()
comparison = torch.ones((5, 6), device=device).triu()
self.assertIsInstance(result, ht.DNDarray)
self.assertEqual(result.shape, (3, 4, 5, 6))
self.assertEqual(result.lshape, (3, 4, 5, 6))
self.assertEqual(result.split, None)
for i in range(3):
for j in range(4):
self.assertTrue((result._DNDarray__array[i, j] == comparison).all())
# 2D+ case, positive offset, data is not split, module-level call
result = local_ones.triu(k=2)
comparison = torch.ones((5, 6), device=device).triu(diagonal=2)
self.assertIsInstance(result, ht.DNDarray)
self.assertEqual(result.shape, (3, 4, 5, 6))
self.assertEqual(result.lshape, (3, 4, 5, 6))
self.assertEqual(result.split, None)
for i in range(3):
for j in range(4):
self.assertTrue((result._DNDarray__array[i, j] == comparison).all())
# # 2D+ case, negative offset, data is not split, module-level call
result = local_ones.triu(k=-2)
comparison = torch.ones((5, 6), device=device).triu(diagonal=-2)
self.assertIsInstance(result, ht.DNDarray)
self.assertEqual(result.shape, (3, 4, 5, 6))
self.assertEqual(result.lshape, (3, 4, 5, 6))
self.assertEqual(result.split, None)
for i in range(3):
for j in range(4):
self.assertTrue((result._DNDarray__array[i, j] == comparison).all())
distributed_ones = ht.ones((5,), split=0, device=ht_device)
# 1D case, no offset, data is split, method
result = distributed_ones.triu()
self.assertIsInstance(result, ht.DNDarray)
self.assertEqual(result.shape, (5, 5))
self.assertEqual(result.split, 1)
self.assertEqual(result.lshape[0], 5)
self.assertLessEqual(result.lshape[1], 5)
self.assertTrue(result.sum(), 15)
if result.comm.rank == 0:
self.assertTrue(result._DNDarray__array[-1, 0] == 0)
if result.comm.rank == result.shape[0] - 1:
self.assertTrue(result._DNDarray__array[0, -1] == 1)
# 1D case, positive offset, data is split, method
result = distributed_ones.triu(k=2)
self.assertIsInstance(result, ht.DNDarray)
self.assertEqual(result.shape, (5, 5))
self.assertEqual(result.split, 1)
self.assertEqual(result.lshape[0], 5)
self.assertLessEqual(result.lshape[1], 5)
self.assertEqual(result.sum(), 6)
if result.comm.rank == 0:
self.assertTrue(result._DNDarray__array[-1, 0] == 0)
if result.comm.rank == result.shape[0] - 1:
self.assertTrue(result._DNDarray__array[0, -1] == 1)
# 1D case, negative offset, data is split, method
result = distributed_ones.triu(k=-2)
self.assertIsInstance(result, ht.DNDarray)
self.assertEqual(result.shape, (5, 5))
self.assertEqual(result.split, 1)
self.assertEqual(result.lshape[0], 5)
self.assertLessEqual(result.lshape[1], 5)
self.assertEqual(result.sum(), 22)
if result.comm.rank == 0:
self.assertTrue(result._DNDarray__array[-1, 0] == 0)
if result.comm.rank == result.shape[0] - 1:
self.assertTrue(result._DNDarray__array[0, -1] == 1)
distributed_ones = ht.ones((4, 5), split=0, device=ht_device)
# 2D case, no offset, data is horizontally split, method
result = distributed_ones.triu()
self.assertIsInstance(result, ht.DNDarray)
self.assertEqual(result.shape, (4, 5))
self.assertEqual(result.split, 0)
self.assertLessEqual(result.lshape[0], 4)
self.assertEqual(result.lshape[1], 5)
self.assertEqual(result.sum(), 14)
if result.comm.rank == 0:
self.assertTrue(result._DNDarray__array[0, -1] == 1)
if result.comm.rank == result.shape[0] - 1:
self.assertTrue(result._DNDarray__array[-1, 0] == 0)
# # 2D case, positive offset, data is horizontally split, method
result = distributed_ones.triu(k=2)
self.assertIsInstance(result, ht.DNDarray)
self.assertEqual(result.shape, (4, 5))
self.assertEqual(result.split, 0)
self.assertLessEqual(result.lshape[0], 4)
self.assertEqual(result.lshape[1], 5)
self.assertEqual(result.sum(), 6)
if result.comm.rank == 0:
self.assertTrue(result._DNDarray__array[0, -1] == 1)
if result.comm.rank == result.shape[0] - 1:
self.assertTrue(result._DNDarray__array[-1, 0] == 0)
# # 2D case, negative offset, data is horizontally split, method
result = distributed_ones.triu(k=-2)
self.assertIsInstance(result, ht.DNDarray)
self.assertEqual(result.shape, (4, 5))
self.assertEqual(result.split, 0)
self.assertLessEqual(result.lshape[0], 4)
self.assertEqual(result.lshape[1], 5)
self.assertEqual(result.sum(), 19)
if result.comm.rank == 0:
self.assertTrue(result._DNDarray__array[0, -1] == 1)
if result.comm.rank == result.shape[0] - 1:
self.assertTrue(result._DNDarray__array[-1, 0] == 0)
distributed_ones = ht.ones((4, 5), split=1, device=ht_device)
# 2D case, no offset, data is vertically split, method
result = distributed_ones.triu()
self.assertIsInstance(result, ht.DNDarray)
self.assertEqual(result.shape, (4, 5))
self.assertEqual(result.split, 1)
self.assertEqual(result.lshape[0], 4)
self.assertLessEqual(result.lshape[1], 5)
self.assertEqual(result.sum(), 14)
if result.comm.rank == 0:
self.assertTrue(result._DNDarray__array[-1, 0] == 0)
if result.comm.rank == result.shape[0] - 1:
self.assertTrue(result._DNDarray__array[0, -1] == 1)
# 2D case, positive offset, data is horizontally split, method
result = distributed_ones.triu(k=2)
self.assertIsInstance(result, ht.DNDarray)
self.assertEqual(result.shape, (4, 5))
self.assertEqual(result.split, 1)
self.assertEqual(result.lshape[0], 4)
self.assertLessEqual(result.lshape[1], 5)
self.assertEqual(result.sum(), 6)
if result.comm.rank == 0:
self.assertTrue(result._DNDarray__array[-1, 0] == 0)
if result.comm.rank == result.shape[0] - 1:
self.assertTrue(result._DNDarray__array[0, -1] == 1)
# 2D case, negative offset, data is horizontally split, method
result = distributed_ones.triu(k=-2)
self.assertIsInstance(result, ht.DNDarray)
self.assertEqual(result.shape, (4, 5))
self.assertEqual(result.split, 1)
self.assertEqual(result.lshape[0], 4)
self.assertLessEqual(result.lshape[1], 5)
self.assertEqual(result.sum(), 19)
if result.comm.rank == 0:
self.assertTrue(result._DNDarray__array[-1, 0] == 0)
if result.comm.rank == result.shape[0] - 1:
self.assertTrue(result._DNDarray__array[0, -1] == 1)
| 45.932722
| 95
| 0.594363
| 6,138
| 45,060
| 4.261486
| 0.02737
| 0.134763
| 0.102764
| 0.053905
| 0.930573
| 0.90148
| 0.881867
| 0.87143
| 0.861681
| 0.860152
| 0
| 0.045032
| 0.264225
| 45,060
| 980
| 96
| 45.979592
| 0.743922
| 0.062428
| 0
| 0.81178
| 0
| 0
| 0.000854
| 0
| 0
| 0
| 0
| 0
| 0.527529
| 1
| 0.006402
| false
| 0
| 0.007682
| 0
| 0.015365
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
3e24378f1f3b09285002c85d10ff3fa2f8b2de30
| 29,352
|
py
|
Python
|
WinLsLoad/lib/db/tab_history_tickets.py
|
Svolcano/python_exercise
|
a50e05891cc7f1fbb40ebcae324b09b6a14473d2
|
[
"MIT"
] | 6
|
2015-07-09T08:47:08.000Z
|
2020-05-16T10:47:31.000Z
|
WinLsLoad/lib/db/tab_history_tickets.py
|
Svolcano/python_exercise
|
a50e05891cc7f1fbb40ebcae324b09b6a14473d2
|
[
"MIT"
] | 7
|
2019-03-27T04:13:12.000Z
|
2022-03-02T14:54:56.000Z
|
WinLsLoad/lib/db/tab_history_tickets.py
|
Svolcano/python_exercise
|
a50e05891cc7f1fbb40ebcae324b09b6a14473d2
|
[
"MIT"
] | 2
|
2019-06-21T06:46:28.000Z
|
2019-12-23T09:31:09.000Z
|
import MySQLdb
import logging
from mysql_basic_c import mysql_basic_c as database
logger = logging.getLogger(__name__)
class tab_history_tickets():
'''
Manage tab_history_tickets table
'''
def __init__(self, host, user, passwd, db):
'''
init class.
Args:
host:mysql server host.
user:mysql user
passwd:mysql password
db:database which is used.
Return:
Raise:
'''
self.db = database(host, user, passwd, db)
def insert_one_record_from_ticket_record(self, ticket_record):
'''
insert one record from ticket record.
Args:
ticket_record:tab_tickets record
Return:
True/False
Raise:
'''
if None == ticket_record[0]:
ticket_id = 'NULL'
else:
ticket_id = '%s' % ticket_record[0]
if None == ticket_record[1]:
applicant_id = 'NULL'
else:
applicant_id = '%s' % ticket_record[1]
if None == ticket_record[2]:
applicant_name = 'NULL'
else:
applicant_name = "'%s'" % ticket_record[2]
if None == ticket_record[3]:
application_time = 'NULL'
else:
application_time = "'%s'" % ticket_record[3]
if None == ticket_record[4]:
application_method = 'NULL'
else:
application_method = '%s' % ticket_record[4]
if None == ticket_record[5]:
type = 'NULL'
else:
type = '%s' % ticket_record[5]
if None == ticket_record[6]:
content = 'NULL'
else:
content = "'%s'" % ticket_record[6]
if None == ticket_record[7]:
begin_time = 'NULL'
else:
begin_time = "'%s'" % ticket_record[7]
if None == ticket_record[8]:
end_time = 'NULL'
else:
end_time = "'%s'" % ticket_record[8]
if None == ticket_record[9]:
state = 'NULL'
else:
state = '%s' % ticket_record[9]
if None == ticket_record[10]:
inner_state = 'NULL'
else:
inner_state = '%s' % ticket_record[10]
if None == ticket_record[11]:
approver_id = 'NULL'
else:
approver_id = '%s' % ticket_record[11]
if None == ticket_record[12]:
approver_name = 'NULL'
else:
approver_name = "'%s'" % ticket_record[12]
if None == ticket_record[13]:
approval_time = 'NULL'
else:
approval_time = "'%s'" % ticket_record[13]
if None == ticket_record[14]:
approval_description = 'NULL'
else:
approval_description = "'%s'" % ticket_record[14]
if None == ticket_record[15]:
param_1 = 'NULL'
else:
param_1 = "%s" % ticket_record[15]
if None == ticket_record[16]:
param_2 = 'NULL'
else:
param_2 = "%s" % ticket_record[16]
if None == ticket_record[17]:
param_3 = 'NULL'
else:
param_3 = "'%s'" % ticket_record[17]
if None == ticket_record[18]:
param_4 = 'NULL'
else:
param_4 = "%s" % ticket_record[18]
if None == ticket_record[19]:
param_5 = 'NULL'
else:
param_5 = "%s" % ticket_record[19]
if None == ticket_record[20]:
param_6 = 'NULL'
else:
param_6 = "'%s'" % ticket_record[20]
if None == ticket_record[21]:
last_operation_time = 'NULL'
else:
last_operation_time = "'%s'" % ticket_record[21]
if None == ticket_record[22]:
operation_record = 'NULL'
else:
operation_record = "'%s'" % ticket_record[22]
sql = "insert into tab_history_tickets(ticket_id,applicant_id,applicant_name,application_time,application_method,type,content,begin_time,end_time,state,inner_state,approver_id,approver_name,approval_time,approval_description,param_1,param_2,param_3,param_4,param_5,param_6,last_operation_time,operation_record) values(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)" % (ticket_id, applicant_id, applicant_name, application_time, application_method, type, content, begin_time, end_time, state, inner_state, approver_id, approver_name, approval_time, approval_description, param_1, param_2, param_3, param_4, param_5, param_6, last_operation_time, operation_record)
return self.db.execute(sql)
def get_sum_files_num_about_user_one(self,user_id,type,source_zone_id,dest_zone_id):
sql = ''
if 11 == type :
sql = "select sum(param_4) from tab_history_tickets where state=90 and applicant_id=%d and param_1=%s and param_2=%s and type=%d " %(user_id,source_zone_id,dest_zone_id,type)
elif 1 == type :
sql = "select sum(param_4) from tab_history_tickets where state=90 and applicant_id=%d and param_2=%s and type=%d " %(user_id,dest_zone_id,type)
elif 2 == type :
sql = "select sum(param_4) from tab_history_tickets where state=90 and applicant_id=%d and param_1=%s and type=%d " %(user_id,source_zone_id,type)
else :
logger.error('type:%d is error' %type)
return (False,[])
logger.info('sql:%s' %sql)
err,result = self.db.query(sql)
if False == err:
return (err,result)
result_list = []
for row in result:
result_list.append(row)
return (err, result_list)
def get_sum_files_num_about_user_two(self,user_id,type,source_zone_id,dest_zone_id,end_time):
sql = ''
if 11 == type :
sql = "select sum(param_4) from tab_history_tickets where state=90 and applicant_id=%d and param_1=%s and param_2=%s and type=%d and \
application_time <= '%s' " %(user_id,source_zone_id,dest_zone_id,type,end_time)
elif 1 == type :
sql = "select sum(param_4) from tab_history_tickets where state=90 and applicant_id=%d and param_2=%s and type=%d and \
application_time <= '%s' " %(user_id,dest_zone_id,type,end_time)
elif 2 == type :
sql = "select sum(param_4) from tab_history_tickets where state=90 and applicant_id=%d and param_1=%s and type=%d and \
application_time <= '%s' " %(user_id,source_zone_id,type,end_time)
else :
logger.error('type:%d is error' %type)
return (False,[])
logger.info('sql:%s' %sql)
err,result = self.db.query(sql)
if False == err:
return (err,result)
result_list = []
for row in result:
result_list.append(row)
return (err, result_list)
def get_sum_files_num_about_user_three(self,user_id,type,source_zone_id,dest_zone_id,begin_time):
sql = ''
if 11 == type :
sql = "select sum(param_4) from tab_history_tickets where state=90 and applicant_id=%d and param_1=%s and param_2=%s and type=%d and \
application_time >= '%s' " %(user_id,source_zone_id,dest_zone_id,type,begin_time)
elif 1 == type :
sql = "select sum(param_4) from tab_history_tickets where state=90 and applicant_id=%d and param_2=%s and type=%d and \
application_time >= '%s' " %(user_id,dest_zone_id,type,begin_time)
elif 2 == type :
sql = "select sum(param_4) from tab_history_tickets where state=90 and applicant_id=%d and param_1=%s and type=%d and \
application_time >= '%s' " %(user_id,source_zone_id,type,begin_time)
else :
logger.error('type:%d is error' %type)
return (False,[])
logger.info('sql:%s' %sql)
err,result = self.db.query(sql)
if False == err:
return (err,result)
result_list = []
for row in result:
result_list.append(row)
return (err, result_list)
def get_sum_files_num_about_user_four(self,user_id,type,source_zone_id,dest_zone_id,begin_time,end_time):
sql = ''
if 11 == type :
sql = "select sum(param_4) from tab_history_tickets where state=90 and applicant_id=%d and param_1=%s and param_2=%s and type=%d and application_time>='%s' and \
application_time<='%s' " %(user_id,source_zone_id,dest_zone_id,type,begin_time,end_time)
elif 1 == type :
sql = "select sum(param_4) from tab_history_tickets where state=90 and applicant_id=%d and param_2=%s and type=%d and application_time>='%s' and \
application_time<='%s' " %(user_id,dest_zone_id,type,begin_time,end_time)
elif 2 == type :
sql = "select sum(param_4) from tab_history_tickets where state=90 and applicant_id=%d and param_1=%s and type=%d and application_time>='%s' and \
application_time<='%s' " %(user_id,source_zone_id,type,begin_time,end_time)
else :
logger.error('type:%d is error' %type)
return (False,[])
logger.info('sql:%s' %sql)
err,result = self.db.query(sql)
if False == err:
return (err,result)
result_list = []
for row in result:
result_list.append(row)
return (err, result_list)
def get_sum_files_num_about_zone_one(self,type,source_zone_id,dest_zone_id):
sql = ''
if 11 == type :
sql = "select sum(param_4) from tab_history_tickets where state=90 and param_1=%s and param_2=%s and type = %d " %(source_zone_id,dest_zone_id,type)
elif 1 == type :
sql = "select sum(param_4) from tab_history_tickets where state=90 and param_2=%s and type = %d " %(dest_zone_id,type)
elif 2 == type :
sql = "select sum(param_4) from tab_history_tickets where state=90 and param_1=%s and type = %d " %(source_zone_id,type)
else :
logger.error('type:%d is error' %type)
return (False,[])
logger.info('sql:%s' %sql)
err,result = self.db.query(sql)
if False == err:
return (err,result)
result_list = []
for row in result:
result_list.append(row)
return (err, result_list)
def get_sum_files_num_about_zone_two(self,type,source_zone_id,dest_zone_id,end_time):
sql = ''
if 11 == type :
sql = "select sum(param_4) from tab_history_tickets where state=90 and param_1=%s and param_2=%s and type=%d and \
application_time <= '%s' " %(source_zone_id,dest_zone_id,type,end_time)
elif 1 == type :
sql = "select sum(param_4) from tab_history_tickets where state=90 and param_2=%s and type=%d and \
application_time <= '%s' " %(dest_zone_id,type,end_time)
elif 2 == type :
sql = "select sum(param_4) from tab_history_tickets where state=90 and param_1=%s and type=%d and \
application_time <= '%s' " %(source_zone_id,type,end_time)
else :
logger.error('type:%d is error' %type)
return (False,[])
logger.info('sql:%s' %sql)
err,result = self.db.query(sql)
if False == err:
return (err,result)
result_list = []
for row in result:
result_list.append(row)
return (err, result_list)
def get_sum_files_num_about_zone_three(self,type,source_zone_id,dest_zone_id,begin_time):
sql = ''
if 11 == type :
sql = "select sum(param_4) from tab_history_tickets where state=90 and param_1=%s and param_2=%s and type=%d and \
application_time >= '%s' " %(source_zone_id,dest_zone_id,type,begin_time)
elif 1 == type :
sql = "select sum(param_4) from tab_history_tickets where state=90 and param_2=%s and type=%d and \
application_time >= '%s' " %(dest_zone_id,type,begin_time)
elif 2 == type :
sql = "select sum(param_4) from tab_history_tickets where state=90 and param_1=%s and type=%d and \
application_time >= '%s' " %(source_zone_id,type,begin_time)
else :
logger.error('type:%d is error' %type)
return (False,[])
logger.info('sql:%s' %sql)
err,result = self.db.query(sql)
if False == err:
return (err,result)
result_list = []
for row in result:
result_list.append(row)
return (err, result_list)
def get_sum_files_num_about_zone_four(self,type,source_zone_id,dest_zone_id,begin_time,end_time):
sql = ''
if 11 == type :
sql = "select sum(param_4) from tab_history_tickets where state=90 and param_1=%s and param_2=%s and type=%d and application_time>='%s' and \
application_time<='%s' " %(source_zone_id,dest_zone_id,type,begin_time,end_time)
elif 1 == type :
sql = "select sum(param_4) from tab_history_tickets where state=90 and param_2=%s and type=%d and application_time>='%s' and \
application_time<='%s' " %(dest_zone_id,type,begin_time,end_time)
elif 2 == type :
sql = "select sum(param_4) from tab_history_tickets where state=90 and param_1=%s and type=%d and application_time>='%s' and \
application_time<='%s' " %(source_zone_id,type,begin_time,end_time)
else :
logger.error('type:%d is error' %type)
return (False,[])
logger.info('sql:%s' %sql)
err,result = self.db.query(sql)
if False == err:
return (err,result)
result_list = []
for row in result:
result_list.append(row)
return (err, result_list)
def get_sum_files_size_about_user_one(self,user_id,type,source_zone_id,dest_zone_id):
sql = ''
if 11 == type :
sql = sql = "select sum(param_5) from tab_history_tickets where state=90 and applicant_id=%d and param_1=%s and param_2=%s and type=%d " \
%(user_id,source_zone_id,dest_zone_id,type)
elif 1 == type :
sql = sql = "select sum(param_5) from tab_history_tickets where state=90 and applicant_id=%d and param_2=%s and type=%d " \
%(user_id,dest_zone_id,type)
elif 2 == type :
sql = sql = "select sum(param_5) from tab_history_tickets where state=90 and applicant_id=%d and param_1=%s and type=%d " \
%(user_id,source_zone_id,type)
else :
logger.error('type:%d is error' %type)
return (False,[])
logger.info('sql:%s' %sql)
err,result = self.db.query(sql)
if False == err:
return (err,result)
result_list = []
for row in result:
result_list.append(row)
return (err, result_list)
def get_sum_files_size_about_user_two(self,user_id,type,source_zone_id,dest_zone_id,end_time):
sql = ''
if 11 == type :
sql = "select sum(param_5) from tab_history_tickets where state=90 and applicant_id=%d and param_1=%s and param_2=%s and type=%d and \
application_time <= '%s' " %(user_id,source_zone_id,dest_zone_id,type,end_time)
elif 1 == type :
sql = "select sum(param_5) from tab_history_tickets where state=90 and applicant_id=%d and param_2=%s and type=%d and \
application_time <= '%s' " %(user_id,dest_zone_id,type,end_time)
elif 2 == type :
sql = "select sum(param_5) from tab_history_tickets where state=90 and applicant_id=%d and param_1=%s and type=%d and \
application_time <= '%s' " %(user_id,source_zone_id,type,end_time)
else :
logger.error('type:%d is error' %type)
return (False,[])
logger.info('sql:%s' %sql)
err,result = self.db.query(sql)
if False == err:
return (err,result)
result_list = []
for row in result:
result_list.append(row)
return (err, result_list)
def get_sum_files_size_about_user_three(self,user_id,type,source_zone_id,dest_zone_id,begin_time):
sql = ''
if 11 == type :
sql = "select sum(param_5) from tab_history_tickets where state=90 and applicant_id=%d and param_1=%s and param_2=%s and type=%d and \
application_time >= '%s' " %(user_id,source_zone_id,dest_zone_id,type,begin_time)
elif 1 == type :
sql = "select sum(param_5) from tab_history_tickets where state=90 and applicant_id=%d and param_2=%s and type=%d and \
application_time >= '%s' " %(user_id,dest_zone_id,type,begin_time)
elif 2 == type :
sql = "select sum(param_5) from tab_history_tickets where state=90 and applicant_id=%d and param_1=%s and type=%d and \
application_time >= '%s' " %(user_id,source_zone_id,type,begin_time)
else :
logger.error('type:%d is error' %type)
return (False,[])
logger.info('sql:%s' %sql)
err,result = self.db.query(sql)
if False == err:
return (err,result)
result_list = []
for row in result:
result_list.append(row)
return (err, result_list)
def get_sum_files_size_about_user_four(self,user_id,type,source_zone_id,dest_zone_id,begin_time,end_time):
sql = ''
if 11 == type :
sql = "select sum(param_5) from tab_history_tickets where state=90 and applicant_id=%d and param_1=%s and param_2=%s and type=%d and application_time>='%s' and \
application_time<='%s' " %(user_id,source_zone_id,dest_zone_id,type,begin_time,end_time)
elif 1 == type :
sql = "select sum(param_5) from tab_history_tickets where state=90 and applicant_id=%d and param_2=%s and type=%d and application_time>='%s' and \
application_time<='%s' " %(user_id,dest_zone_id,type,begin_time,end_time)
elif 2 == type :
sql = "select sum(param_5) from tab_history_tickets where state=90 and applicant_id=%d and param_1=%s and type=%d and application_time>='%s' and \
application_time<='%s' " %(user_id,source_zone_id,type,begin_time,end_time)
else :
logger.error('type:%d is error' %type)
return (False,[])
logger.info('sql:%s' %sql)
err,result = self.db.query(sql)
if False == err:
return (err,result)
result_list = []
for row in result:
result_list.append(row)
return (err, result_list)
def get_sum_files_size_about_zone_one(self,type,source_zone_id,dest_zone_id):
sql = ''
if 11 == type :
sql = "select sum(param_5) from tab_history_tickets where state=90 and param_1=%s and param_2=%s and type = %d " %(source_zone_id,dest_zone_id,type)
elif 1 == type :
sql = "select sum(param_5) from tab_history_tickets where state=90 and param_2=%s and type = %d " %(dest_zone_id,type)
elif 2 == type :
sql = "select sum(param_5) from tab_history_tickets where state=90 and param_1=%s and type = %d " %(source_zone_id,type)
else :
logger.error('type:%d is error' %type)
return (False,[])
logger.info('sql:%s' %sql)
err,result = self.db.query(sql)
if False == err:
return (err,result)
result_list = []
for row in result:
result_list.append(row)
return (err, result_list)
def get_sum_files_size_about_zone_two(self,type,source_zone_id,dest_zone_id,end_time):
sql = ''
if 11 == type :
sql = "select sum(param_5) from tab_history_tickets where state=90 and param_1=%s and param_2=%s and type=%d and \
application_time <= '%s' " %(source_zone_id,dest_zone_id,type,end_time)
elif 1 == type :
sql = "select sum(param_5) from tab_history_tickets where state=90 and param_2=%s and type=%d and \
application_time <= '%s' " %(dest_zone_id,type,end_time)
elif 2 == type :
sql = "select sum(param_5) from tab_history_tickets where state=90 and param_1=%s and type=%d and \
application_time <= '%s' " %(source_zone_id,type,end_time)
else :
logger.error('type:%d is error' %type)
return (False,[])
logger.info('sql:%s' %sql)
err,result = self.db.query(sql)
if False == err:
return (err,result)
result_list = []
for row in result:
result_list.append(row)
return (err, result_list)
def get_sum_files_size_about_zone_three(self,type,source_zone_id,dest_zone_id,begin_time):
sql = ''
if 11 == type :
sql = "select sum(param_5) from tab_history_tickets where state=90 and param_1=%s and param_2=%s and type=%d and \
application_time >= '%s' " %(source_zone_id,dest_zone_id,type,begin_time)
elif 1 == type :
sql = "select sum(param_5) from tab_history_tickets where state=90 and param_2=%s and type=%d and \
application_time >= '%s' " %(dest_zone_id,type,begin_time)
elif 2 == type :
sql = "select sum(param_5) from tab_history_tickets where state=90 and param_1=%s and type=%d and \
application_time >= '%s' " %(source_zone_id,type,begin_time)
else :
logger.error('type:%d is error' %type)
return (False,[])
logger.info('sql:%s' %sql)
err,result = self.db.query(sql)
if False == err:
return (err,result)
result_list = []
for row in result:
result_list.append(row)
return (err, result_list)
def get_sum_files_size_about_zone_four(self,type,source_zone_id,dest_zone_id,begin_time,end_time):
sql = ''
if 11 == type :
sql = "select sum(param_5) from tab_history_tickets where state=90 and param_1=%s and param_2=%s and type=%d and application_time>='%s' and \
application_time<='%s' " %(source_zone_id,dest_zone_id,type,begin_time,end_time)
elif 1 == type :
sql = "select sum(param_5) from tab_history_tickets where state=90 and param_2=%s and type=%d and application_time>='%s' and \
application_time<='%s' " %(dest_zone_id,type,begin_time,end_time)
elif 2 == type :
sql = "select sum(param_5) from tab_history_tickets where state=90 and param_1=%s and type=%d and application_time>='%s' and \
application_time<='%s' " %(source_zone_id,type,begin_time,end_time)
else :
logger.error('type:%d is error' %type)
return (False,[])
logger.info('sql:%s' %sql)
err,result = self.db.query(sql)
if False == err:
return (err,result)
result_list = []
for row in result:
result_list.append(row)
return (err, result_list)
if __name__ == '__main__':
db = tab_history_tickets('192.168.1.18','sboxweb','Sbox123456xZ','sbox_db')
'''
print 'get_sum_files_num_about_user_one'
print db.get_sum_files_num_about_user_one(112,1,'',11)
print db.get_sum_files_num_about_user_one(112,2,11,'')
print db.get_sum_files_num_about_user_one(112,3,11,12)
print db.get_sum_files_num_about_user_one(112,11,11,12)
print 'get_sum_files_num_about_zone_one'
print db.get_sum_files_num_about_zone_one(1,None,11)
print db.get_sum_files_num_about_zone_one(2,11,None)
print db.get_sum_files_num_about_zone_one(3,12,11)
print db.get_sum_files_num_about_zone_one(11,11,12)
print 'get_sum_files_num_about_user_two'
print db.get_sum_files_num_about_user_two(112,1,None,11,'2015-02-13 10:10:10')
print db.get_sum_files_num_about_user_two(112,2,11,None,'2015-02-13 10:10:10')
print db.get_sum_files_num_about_user_two(112,3,11,12,'2015-02-13 10:10:10')
print db.get_sum_files_num_about_user_two(112,11,11,12,'2015-02-13 10:10:10')
print 'get_sum_files_num_about_zone_two'
print db.get_sum_files_num_about_zone_two(1,None,11,'2015-02-13 10:10:10')
print db.get_sum_files_num_about_zone_two(2,11,None,'2015-02-13 10:10:10')
print db.get_sum_files_num_about_zone_two(3,11,12,'2015-02-13 10:10:10')
print db.get_sum_files_num_about_zone_two(11,11,12,'2015-02-13 10:10:10')
print 'get_sum_files_num_about_user_three'
print db.get_sum_files_num_about_user_three(112,1,None,11,'2015-02-13 10:10:10')
print db.get_sum_files_num_about_user_three(112,2,11,None,'2015-02-13 10:10:10')
print db.get_sum_files_num_about_user_three(112,3,11,12,'2015-02-13 10:10:10')
print db.get_sum_files_num_about_user_three(112,11,11,12,'2015-02-13 10:10:10')
print 'get_sum_files_num_about_zone_three'
print db.get_sum_files_num_about_zone_three(1,None,11,'2015-02-13 10:10:10')
print db.get_sum_files_num_about_zone_three(2,11,None,'2015-02-13 10:10:10')
print db.get_sum_files_num_about_zone_three(3,11,12,'2015-02-13 10:10:10')
print db.get_sum_files_num_about_zone_three(11,11,12,'2015-02-13 10:10:10')
'''
print 'get_sum_files_num_about_user_four'
print db.get_sum_files_num_about_user_four(2,1,None,11,'20150212 00:00:00','20150214 00:00:00')
#print db.get_sum_files_num_about_user_four(112,2,11,None,'2015-02-13 10:10:10','2015-02-15 10:10:10')
#print db.get_sum_files_num_about_user_four(112,3,11,12,'2015-02-13 10:10:10','2015-02-15 10:10:10')
#print db.get_sum_files_num_about_user_four(112,11,11,12,'2015-02-13 10:10:10','2015-02-15 10:10:10')
'''
print 'get_sum_files_num_about_zone_four'
print db.get_sum_files_num_about_zone_four(1,None,11,'2015-02-13 10:10:10','2015-02-15 10:10:10')
print db.get_sum_files_num_about_zone_four(2,11,None,'2015-02-13 10:10:10','2015-02-15 10:10:10')
print db.get_sum_files_num_about_zone_four(3,11,12,'2015-02-13 10:10:10','2015-02-15 10:10:10')
print db.get_sum_files_num_about_zone_four(11,11,12,'2015-02-13 10:10:10','2015-02-15 10:10:10')
print 'get_sum_files_size_about_user_one'
print db.get_sum_files_size_about_user_one(112,1,'',11)
print db.get_sum_files_size_about_user_one(112,2,11,'')
print db.get_sum_files_size_about_user_one(112,3,11,12)
print db.get_sum_files_size_about_user_one(112,11,11,12)
print 'get_sum_files_size_about_zone_one'
print db.get_sum_files_size_about_zone_one(1,None,11)
print db.get_sum_files_size_about_zone_one(2,11,None)
print db.get_sum_files_size_about_zone_one(3,12,11)
print db.get_sum_files_size_about_zone_one(11,11,12)
print 'get_sum_files_size_about_user_two'
print db.get_sum_files_size_about_user_two(112,1,None,11,'2015-02-13 10:10:10')
print db.get_sum_files_size_about_user_two(112,2,11,None,'2015-02-13 10:10:10')
print db.get_sum_files_size_about_user_two(112,3,11,12,'2015-02-13 10:10:10')
print db.get_sum_files_size_about_user_two(112,11,11,12,'2015-02-13 10:10:10')
print 'get_sum_files_size_about_zone_two'
print db.get_sum_files_size_about_zone_two(1,None,11,'2015-02-13 10:10:10')
print db.get_sum_files_size_about_zone_two(2,11,None,'2015-02-13 10:10:10')
print db.get_sum_files_size_about_zone_two(3,11,12,'2015-02-13 10:10:10')
print db.get_sum_files_size_about_zone_two(11,11,12,'2015-02-13 10:10:10')
print 'get_sum_files_size_about_user_three'
print db.get_sum_files_size_about_user_three(112,1,None,11,'2015-02-13 10:10:10')
print db.get_sum_files_size_about_user_three(112,2,11,None,'2015-02-13 10:10:10')
print db.get_sum_files_size_about_user_three(112,3,11,12,'2015-02-13 10:10:10')
print db.get_sum_files_size_about_user_three(112,11,11,12,'2015-02-13 10:10:10')
print 'get_sum_files_size_about_zone_three'
print db.get_sum_files_size_about_zone_three(1,None,11,'2015-02-13 10:10:10')
print db.get_sum_files_size_about_zone_three(2,11,None,'2015-02-13 10:10:10')
print db.get_sum_files_size_about_zone_three(3,11,12,'2015-02-13 10:10:10')
print db.get_sum_files_size_about_zone_three(11,11,12,'2015-02-13 10:10:10')
'''
print 'get_sum_files_size_about_user_four'
print db.get_sum_files_size_about_user_four(112,1,None,11,'2015-02-13 10:10:10','2015-02-15 10:10:10')
print db.get_sum_files_size_about_user_four(112,2,11,None,'2015-02-13 10:10:10','2015-02-15 10:10:10')
print db.get_sum_files_size_about_user_four(112,3,11,12,'2015-02-13 10:10:10','2015-02-15 10:10:10')
print db.get_sum_files_size_about_user_four(112,11,11,12,'2015-02-13 10:10:10','2015-02-15 10:10:10')
print 'get_sum_files_size_about_zone_four'
print db.get_sum_files_size_about_zone_four(1,None,11,'2015-02-13 10:10:10','2015-02-15 10:10:10')
print db.get_sum_files_size_about_zone_four(2,11,None,'2015-02-13 10:10:10','2015-02-15 10:10:10')
print db.get_sum_files_size_about_zone_four(3,11,12,'2015-02-13 10:10:10','2015-02-15 10:10:10')
print db.get_sum_files_size_about_zone_four(11,11,12,'2015-02-13 10:10:10','2015-02-15 10:10:10')
| 46.664547
| 715
| 0.62609
| 4,641
| 29,352
| 3.666882
| 0.027796
| 0.029146
| 0.062052
| 0.048889
| 0.885533
| 0.885533
| 0.88189
| 0.879774
| 0.87478
| 0.860266
| 0
| 0.07497
| 0.252453
| 29,352
| 628
| 716
| 46.738854
| 0.70062
| 0.010221
| 0
| 0.703704
| 0
| 0.098039
| 0.113858
| 0.016313
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.004357
| 0.006536
| null | null | 0.026144
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
3e8c0358f580389b69e5c5b1a4952c4ae4a8d079
| 4,527
|
py
|
Python
|
tests/libraries/position/test_oi.py
|
overlay-market/v1-core
|
e18fabd242f21c243a555712d3f08ca059941f41
|
[
"MIT"
] | 3
|
2022-02-17T16:11:39.000Z
|
2022-03-10T23:46:19.000Z
|
tests/libraries/position/test_oi.py
|
overlay-market/v1-core
|
e18fabd242f21c243a555712d3f08ca059941f41
|
[
"MIT"
] | 10
|
2022-01-25T21:49:20.000Z
|
2022-03-31T00:32:29.000Z
|
tests/libraries/position/test_oi.py
|
overlay-market/v1-core
|
e18fabd242f21c243a555712d3f08ca059941f41
|
[
"MIT"
] | 2
|
2022-01-21T01:04:54.000Z
|
2022-02-23T08:38:20.000Z
|
def test_oi_current(position):
# NOTE: oi = pos.oi_shares
is_long = True
liquidated = False
mid_price = 100000000000000000000 # 100
entry_price = 101000000000000000000 # 101
notional = 10000000000000000000 # 10
debt = 2000000000000000000 # 2
fraction = 1000000000000000000 # 1
# NOTE: mid_ratio tests in test_entry_price.py
oi = int((notional / mid_price) * 1000000000000000000) # 0.1
mid_ratio = position.calcEntryToMidRatio(entry_price, mid_price)
pos = (notional, debt, mid_ratio, is_long, liquidated, oi)
# lost 3 from total oi due to funding
total_oi = 12000000000000000000 # 12
total_oi_shares = 15000000000000000000 # 15
# check oi is pro-rata shares of total oi
expect = int((total_oi * oi / total_oi_shares) * (fraction / 1e18))
actual = position.oiCurrent(pos, fraction, total_oi, total_oi_shares)
assert expect == actual
def test_oi_current_when_fraction_less_than_one(position):
# NOTE: oi = pos.oi_shares
is_long = True
liquidated = False
mid_price = 100000000000000000000 # 100
entry_price = 101000000000000000000 # 101
notional = 10000000000000000000 # 10
debt = 2000000000000000000 # 2
fraction = 250000000000000000 # 0.25
# NOTE: mid_ratio tests in test_entry_price.py
oi = int((notional / mid_price) * 1000000000000000000) # 0.1
mid_ratio = position.calcEntryToMidRatio(entry_price, mid_price)
pos = (notional, debt, mid_ratio, is_long, liquidated, oi)
# lost 3 from total oi due to funding
total_oi = 12000000000000000000 # 12
total_oi_shares = 15000000000000000000 # 15
# check oi is pro-rata shares of total oi
expect = int((total_oi * oi / total_oi_shares) * (fraction / 1e18))
actual = position.oiCurrent(pos, fraction, total_oi, total_oi_shares)
assert expect == actual
def test_oi_current_when_total_oi_or_oi_shares_zero(position):
"""
Tests four possible cases of when oi should return 0
Cases:
1. total_oi = 0; oi_shares, total_oi_shares != 0
2. oi_shares = 0; total_oi, total_oi_shares != 0
3. oi_shares, total_oi, total_oi_shares = 0
4. oi_shares, total_oi = 0; total_oi_shares != 0
"""
# NOTE: oi = pos.oi_shares
is_long = True
liquidated = False
mid_price = 100000000000000000000 # 100
entry_price = 101000000000000000000 # 101
debt = 2000000000000000000 # 2
fraction = 1000000000000000000 # 1
# 1. lost it all due to funding (t -> infty)
notional = 10000000000000000000 # 10
total_oi = 0 # 0
total_oi_shares = 15000000000000000000 # 15
# NOTE: mid_ratio tests in test_entry_price.py
oi = int((notional / mid_price) * 1000000000000000000) # 0.1
mid_ratio = position.calcEntryToMidRatio(entry_price, mid_price)
pos = (notional, debt, mid_ratio, is_long, liquidated, oi)
# check oi is zero
expect = 0
actual = position.oiCurrent(pos, fraction, total_oi, total_oi_shares)
assert expect == actual
# 2. unwound all of position oi
notional = 0 # 0
total_oi = 4000000000000000000 # 4
total_oi_shares = 5000000000000000000 # 5
# NOTE: mid_ratio tests in test_entry_price.py
oi = int((notional / mid_price) * 1000000000000000000) # 0.1
mid_ratio = position.calcEntryToMidRatio(entry_price, mid_price)
pos = (notional, debt, mid_ratio, is_long, liquidated, oi)
expect = 0
actual = position.oiCurrent(pos, fraction, total_oi, total_oi_shares)
assert expect == actual
# 3. all oi has been unwound
notional = 0 # 0
total_oi = 0 # 0
total_oi_shares = 0 # 0
# NOTE: mid_ratio tests in test_entry_price.py
oi = int((notional / mid_price) * 1000000000000000000) # 0.1
mid_ratio = position.calcEntryToMidRatio(entry_price, mid_price)
pos = (notional, debt, mid_ratio, is_long, liquidated, oi)
expect = 0
actual = position.oiCurrent(pos, fraction, total_oi, total_oi_shares)
assert expect == actual
# 4. position has been liquidated
notional = 0 # 0
total_oi = 0 # 0
total_oi_shares = 5000000000000000000 # 5
liquidated = True
# NOTE: mid_ratio tests in test_entry_price.py
oi = int((notional / mid_price) * 1000000000000000000) # 0.1
mid_ratio = position.calcEntryToMidRatio(entry_price, mid_price)
pos = (notional, debt, mid_ratio, is_long, liquidated, oi)
expect = 0
actual = position.oiCurrent(pos, fraction, total_oi, total_oi_shares)
assert expect == actual
| 37.106557
| 73
| 0.694941
| 598
| 4,527
| 5.023411
| 0.133779
| 0.095539
| 0.077896
| 0.049933
| 0.875499
| 0.839547
| 0.801598
| 0.794274
| 0.794274
| 0.794274
| 0
| 0.190436
| 0.223989
| 4,527
| 121
| 74
| 37.413223
| 0.664674
| 0.222001
| 0
| 0.907895
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.078947
| 1
| 0.039474
| false
| 0
| 0
| 0
| 0.039474
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e4e97cb8daf1a8ac70ce6bc5212015bee80082d8
| 184
|
py
|
Python
|
src/htmxl/compose/__init__.py
|
schireson/htmxl
|
d4adef9fe0630f39c3664d3913bbbbe3db3ec069
|
[
"MIT"
] | 2
|
2021-08-11T15:15:29.000Z
|
2022-03-20T04:04:54.000Z
|
src/htmxl/compose/__init__.py
|
schireson/htmxl
|
d4adef9fe0630f39c3664d3913bbbbe3db3ec069
|
[
"MIT"
] | 5
|
2021-08-18T20:54:32.000Z
|
2022-01-24T20:06:46.000Z
|
src/htmxl/compose/__init__.py
|
schireson/htmxl
|
d4adef9fe0630f39c3664d3913bbbbe3db3ec069
|
[
"MIT"
] | null | null | null |
# flake8: noqa
from htmxl.compose.cell import Cell
from htmxl.compose.style import Styler
from htmxl.compose.workbook import Workbook, Worksheet
from htmxl.compose.write import Writer
| 30.666667
| 54
| 0.831522
| 27
| 184
| 5.666667
| 0.481481
| 0.235294
| 0.418301
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006098
| 0.108696
| 184
| 5
| 55
| 36.8
| 0.926829
| 0.065217
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
5fcaf15bdcf9e7f190b8316e5f0cb13b1dd48ed2
| 134
|
py
|
Python
|
vika/datasheet/__init__.py
|
Borye/vika.py
|
7b4ac29d308e00e2bbfc37dbcaa3f6c7a4a2236f
|
[
"MIT"
] | 39
|
2020-10-27T13:17:37.000Z
|
2022-03-17T11:04:39.000Z
|
vika/datasheet/__init__.py
|
Borye/vika.py
|
7b4ac29d308e00e2bbfc37dbcaa3f6c7a4a2236f
|
[
"MIT"
] | 9
|
2020-10-27T14:44:48.000Z
|
2022-01-19T04:46:58.000Z
|
vika/datasheet/__init__.py
|
Borye/vika.py
|
7b4ac29d308e00e2bbfc37dbcaa3f6c7a4a2236f
|
[
"MIT"
] | 8
|
2020-10-27T15:12:34.000Z
|
2022-01-19T14:23:15.000Z
|
from .datasheet import *
from .field_manager import *
from .view_manager import *
from .record import *
from .record_manager import *
| 22.333333
| 29
| 0.776119
| 18
| 134
| 5.611111
| 0.388889
| 0.39604
| 0.336634
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.149254
| 134
| 5
| 30
| 26.8
| 0.885965
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
3988a7d8e72864b065f9ee42e446eb06e5ebb9d8
| 2,826
|
py
|
Python
|
data/banners.py
|
federico123579/BigBrother
|
9b61dcf8fd84b6491690ae83bf695bc2b64b17b3
|
[
"MIT"
] | 29
|
2017-03-14T11:48:54.000Z
|
2022-02-22T02:01:25.000Z
|
data/banners.py
|
federico123579/BigBrother
|
9b61dcf8fd84b6491690ae83bf695bc2b64b17b3
|
[
"MIT"
] | null | null | null |
data/banners.py
|
federico123579/BigBrother
|
9b61dcf8fd84b6491690ae83bf695bc2b64b17b3
|
[
"MIT"
] | 10
|
2017-07-16T18:58:05.000Z
|
2021-09-27T23:45:24.000Z
|
#!/usr/bin/env python
eye_banner = '\n'
eye_banner += '''\033[01m w*"""^q_ 0 \033[91mp" F F _F p^^"_\033[0m\033[01m__jM j F \033[0m\n'''
eye_banner += '''\033[01m _,,__ q \033[91mx" [ F J_ J P w""""\033[0m\033[01m_ _," 9" \033[0m\n'''
eye_banner += '''\033[01m w" "M_ \033[91m@ `, ",_!u_9__L F # r^""^^\033[0m\033[01m" f j" \033[0m\n'''
eye_banner += '''\033[01m _,,__ \033[91mB 9_ "v,\033[0m\033[01m\033[94m_Zp*"""""^@u# P\033[0m\033[01m\033[91m _m^"^u\033[0m\033[01m,a*" j \033[0m\n'''
eye_banner += '''\033[01m _F `\033[91m4 A_ "*-\033[0m\033[01m\033[94map" ^Lj"\033[0m\033[01m\033[91m _smu,\033[0m\033[01m _* , \033[0m\n'''
eye_banner += '''\033[01m "__,,_ \033[91mjL -- \033[0m\033[01m\033[94mm< 5j!\033[0m\033[01m\033[91m ____*\033[0m\033[01m-*^ & \033[0m\n'''
eye_banner += '''\033[01m p" \033[91m9p`^u,\033[0m\033[01m\033[94mav' `,*\033[0m\033[01m\033[91m""""q\033[0m\033[01m_ _x" \033[0m\n'''
eye_banner += '''\033[01m q _____\033[91m!L___\033[0m\033[01m\033[94m,M Lsr\033[0m\033[01m\033[91m--x_\033[0m\033[01m"^^`""qP \033[0m\n'''
eye_banner += '''\033[01m y^ \033[91m"_ \033[0m\033[01m\033[94m_J L_,\033[0m\033[01m\033[91m,_ ?_\033[0m\033[01m _# \033[0m\n'''
eye_banner += '''\033[01m F __,_\033[91m`^---\033[0m\033[01m\033[94m"jr j__\033[0m\033[01m\033[91m_ ""y"\033[0m\033[01m"^^""_, \033[0m\n'''
eye_banner += '''\033[01m j! \033[91m?s_, \033[0m\033[01m\033[94m*"jp g""\033[0m\033[01m\033[91m""^q_b\033[0m\033[01m_ _F \033[0m\n'''
eye_banner += '''\033[01m L _,w\033[91mma_ _x\033[0m\033[01m\033[94m"jN__ __d""\033[0m\033[01m\033[91m"^c F\033[0m\033[01m "-^"" \033[0m\n'''
eye_banner += '''\033[01m " J" \033[91m""" _F \033[0m\033[01m\033[94m99Nu______g**L_"\033[0m\033[01m\033[91m"s 4\033[0m\033[01m A, _P \033[0m\n'''
eye_banner += '''\033[01m j_ _-^"\033[91m"mw^" _' \033[0m\033[01m\033[94m# 9"N""L ^,\033[0m\033[01m\033[91m "s b #\033[0m\033[01m "--^" \033[0m\n'''
eye_banner += '''\033[01m @ j" _\033[91mv-wa+" ," j # p r j qF\033[0m\033[01m "q_ _* \033[0m\n'''
eye_banner += '''\033[01m 0_ f _\033[91mm-**" _F _F L _FjP ?,\033[0m\033[01m "^"" \033[0m\n'''
eye_banner += '''\033[01m # J j" \033[91mp"""p-^ x^ p" d\033[0m\033[01m_ -q__a- \033[0m\n'''
eye_banner += '''\033[01m `q # f j 4 b ^, \033[0m\n'''
eye_banner += '''\033[01m F 9L_ b 1 4 `u_ "-^" \033[0m\n'''
eye_banner += '''\033[01m 0 `+a_ W__ 9,___"^^"+ \033[0m\n'''
eye_banner += '''\033[01m "" " \033[0m\n'''
| 117.75
| 161
| 0.5
| 511
| 2,826
| 2.528376
| 0.121331
| 0.278638
| 0.241486
| 0.332043
| 0.822755
| 0.708204
| 0.549536
| 0.504644
| 0.297214
| 0.236068
| 0
| 0.349745
| 0.238146
| 2,826
| 24
| 162
| 117.75
| 0.250348
| 0.007077
| 0
| 0
| 0
| 0.772727
| 0.836422
| 0.308624
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
39dc15c00e3b99c157480cb1f28edd6d8423eb30
| 37
|
py
|
Python
|
starry_process/app/__init__.py
|
arfon/starry_process
|
72b2a540e7e4fdb2e6af61507efa1c9861d5c919
|
[
"MIT"
] | 13
|
2020-04-14T17:47:28.000Z
|
2022-03-16T15:19:48.000Z
|
starry_process/app/__init__.py
|
arfon/starry_process
|
72b2a540e7e4fdb2e6af61507efa1c9861d5c919
|
[
"MIT"
] | 22
|
2020-09-23T20:33:22.000Z
|
2022-02-07T17:38:09.000Z
|
starry_process/app/__init__.py
|
arfon/starry_process
|
72b2a540e7e4fdb2e6af61507efa1c9861d5c919
|
[
"MIT"
] | 8
|
2020-04-14T17:47:44.000Z
|
2022-02-06T16:39:47.000Z
|
from .entry_point import entry_point
| 18.5
| 36
| 0.864865
| 6
| 37
| 5
| 0.666667
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.108108
| 37
| 1
| 37
| 37
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
39e8d70d0db56e6782c597ee9248adef27ae5371
| 196
|
py
|
Python
|
src/alchemtest/namd/__init__.py
|
xiki-tempula/alchemtest
|
e85f7cb16233a91a2b4528da35abad65aaadef57
|
[
"BSD-3-Clause"
] | 7
|
2017-04-19T08:38:25.000Z
|
2019-12-18T18:50:42.000Z
|
src/alchemtest/namd/__init__.py
|
xiki-tempula/alchemtest
|
e85f7cb16233a91a2b4528da35abad65aaadef57
|
[
"BSD-3-Clause"
] | 51
|
2017-03-24T20:18:14.000Z
|
2021-12-29T00:10:06.000Z
|
src/alchemtest/namd/__init__.py
|
xiki-tempula/alchemtest
|
e85f7cb16233a91a2b4528da35abad65aaadef57
|
[
"BSD-3-Clause"
] | 10
|
2017-10-31T17:16:25.000Z
|
2021-08-19T19:46:49.000Z
|
"""NAMD molecular dynamics simulation datasets.
"""
from .access import load_tyr2ala
from .access import load_idws
from .access import load_restarted
from .access import load_restarted_reversed
| 21.777778
| 47
| 0.821429
| 26
| 196
| 6
| 0.5
| 0.25641
| 0.410256
| 0.512821
| 0.371795
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005814
| 0.122449
| 196
| 8
| 48
| 24.5
| 0.901163
| 0.22449
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
f2e9ede8d8fec3cea451b92b8c2eacbfae574796
| 5,549
|
py
|
Python
|
src/abaqus/StepOutput/IntegratedOutputSection.py
|
Haiiliin/PyAbaqus
|
f20db6ebea19b73059fe875a53be370253381078
|
[
"MIT"
] | 7
|
2022-01-21T09:15:45.000Z
|
2022-02-15T09:31:58.000Z
|
src/abaqus/StepOutput/IntegratedOutputSection.py
|
Haiiliin/PyAbaqus
|
f20db6ebea19b73059fe875a53be370253381078
|
[
"MIT"
] | null | null | null |
src/abaqus/StepOutput/IntegratedOutputSection.py
|
Haiiliin/PyAbaqus
|
f20db6ebea19b73059fe875a53be370253381078
|
[
"MIT"
] | null | null | null |
from abaqusConstants import *
from ..Region.Region import Region
class IntegratedOutputSection:
"""The IntegratedOutputSection object specifies parameters used for integrated output.
Notes
-----
This object can be accessed by:
.. code-block:: python
import step
mdb.models[name].integratedOutputSections[name]
"""
def __init__(self, name: str, surface: Region = Region(), refPoint: SymbolicConstant = None,
refPointAtCenter: Boolean = OFF, refPointMotion: SymbolicConstant = INDEPENDENT,
localCsys: str = None, projectOrientation: Boolean = OFF):
"""This method creates an IntegratedOutputSection object.
Notes
-----
This function can be accessed by:
.. code-block:: python
mdb.models[name].IntegratedOutputSection
Parameters
----------
name
A String specifying the repository key.
surface
A Region object specifying the surface over which the output is based.
refPoint
None or a Region object specifying the anchor point about which the integrated moment
over the output region is computed or the SymbolicConstant None representing the global
origin. The default value is None.
refPointAtCenter
A Boolean specifying that the *refPoint* be adjusted so that it coincides with the
center of the output region in the initial configuration. This argument is valid only
when you include the *refPoint* argument. The default value is OFF.
refPointMotion
A SymbolicConstant specifying how to relate the motion of *refPoint* to the average
motion of the output region. A value of INDEPENDENT will allow the *refPoint* to move
independent of the output region. A value of AVERAGE_TRANSLATION will set the
displacement of the *refPoint* equal to the average translation of the output region. A
value of AVERAGE will set the displacement and rotation of the *refPoint* equal to the
average translation of the output region. The default value is INDEPENDENT.This argument
is valid only when you include the *refPoint* argument.
localCsys
None or a DatumCsys object specifying the local coordinate system used to express vector
output. If *localCsys*=None, the degrees of freedom are defined in the global coordinate
system. The default value is None.
projectOrientation
A Boolean specifying that the coordinate system be projected onto the *surface* such
that the 1–axis is normal to the *surface*. Projection onto a planar *surface* is such
that the 1-axis is normal to the surface, and a projection onto a nonplanar *surface* is
such that a least-squares fit surface will be used. The default value is OFF.
Returns
-------
An IntegratedOutputSection object.
"""
pass
def setValues(self, surface: Region = Region(), refPoint: SymbolicConstant = None,
refPointAtCenter: Boolean = OFF, refPointMotion: SymbolicConstant = INDEPENDENT,
localCsys: str = None, projectOrientation: Boolean = OFF):
"""This method modifies the IntegratedOutputSection object.
Parameters
----------
surface
A Region object specifying the surface over which the output is based.
refPoint
None or a Region object specifying the anchor point about which the integrated moment
over the output region is computed or the SymbolicConstant None representing the global
origin. The default value is None.
refPointAtCenter
A Boolean specifying that the *refPoint* be adjusted so that it coincides with the
center of the output region in the initial configuration. This argument is valid only
when you include the *refPoint* argument. The default value is OFF.
refPointMotion
A SymbolicConstant specifying how to relate the motion of *refPoint* to the average
motion of the output region. A value of INDEPENDENT will allow the *refPoint* to move
independent of the output region. A value of AVERAGE_TRANSLATION will set the
displacement of the *refPoint* equal to the average translation of the output region. A
value of AVERAGE will set the displacement and rotation of the *refPoint* equal to the
average translation of the output region. The default value is INDEPENDENT.This argument
is valid only when you include the *refPoint* argument.
localCsys
None or a DatumCsys object specifying the local coordinate system used to express vector
output. If *localCsys*=None, the degrees of freedom are defined in the global coordinate
system. The default value is None.
projectOrientation
A Boolean specifying that the coordinate system be projected onto the *surface* such
that the 1–axis is normal to the *surface*. Projection onto a planar *surface* is such
that the 1-axis is normal to the surface, and a projection onto a nonplanar *surface* is
such that a least-squares fit surface will be used. The default value is OFF.
"""
pass
| 52.349057
| 101
| 0.663363
| 677
| 5,549
| 5.431315
| 0.186115
| 0.034267
| 0.048953
| 0.046233
| 0.867011
| 0.867011
| 0.867011
| 0.850694
| 0.850694
| 0.850694
| 0
| 0.001022
| 0.295008
| 5,549
| 105
| 102
| 52.847619
| 0.938395
| 0.764282
| 0
| 0.545455
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.181818
| false
| 0.181818
| 0.181818
| 0
| 0.454545
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
8438667409e1b0827b6bb77c2c7733a7f1288d86
| 4,861
|
py
|
Python
|
tests/session/providers/testsessionserviceprovider.py
|
LowieHuyghe/edmunds-python
|
236d087746cb8802a8854b2706b8d3ff009e9209
|
[
"Apache-2.0"
] | 4
|
2017-09-07T13:39:50.000Z
|
2018-05-31T16:14:50.000Z
|
tests/session/providers/testsessionserviceprovider.py
|
LowieHuyghe/edmunds-python
|
236d087746cb8802a8854b2706b8d3ff009e9209
|
[
"Apache-2.0"
] | 103
|
2017-03-19T15:58:21.000Z
|
2018-07-11T20:36:17.000Z
|
tests/session/providers/testsessionserviceprovider.py
|
LowieHuyghe/edmunds-python
|
236d087746cb8802a8854b2706b8d3ff009e9209
|
[
"Apache-2.0"
] | 2
|
2017-10-14T15:20:11.000Z
|
2018-04-20T09:55:44.000Z
|
from tests.testcase import TestCase
from flask.sessions import SecureCookieSession
from werkzeug.local import LocalProxy
from edmunds.session.sessionmanager import SessionManager
class TestSessionServiceProvider(TestCase):
"""
Test the Session Service Provider
"""
def test_not_enabled(self):
"""
Test not enabled
:return: void
"""
secret_key = self.rand_str(24)
# Write config
self.write_config([
"from edmunds.session.drivers.sessioncookie import SessionCookie \n",
"SECRET_KEY = '%s'\n" % secret_key,
"APP = { \n",
" 'session': { \n",
" 'enabled': False, \n",
" 'instances': [ \n",
" { \n",
" 'name': 'sessioncookie',\n",
" 'driver': SessionCookie,\n",
" }, \n",
" ], \n",
" }, \n",
"} \n",
])
# Create app
app = self.create_application()
self.assert_equal(secret_key, app.secret_key)
# Test extension
self.assert_not_in('edmunds.session', app.extensions)
def test_outside_context(self):
"""
Test outside context
:return: void
"""
secret_key = self.rand_str(24)
# Write config
self.write_config([
"from edmunds.session.drivers.sessioncookie import SessionCookie \n",
"SECRET_KEY = '%s'\n" % secret_key,
"APP = { \n",
" 'session': { \n",
" 'enabled': True, \n",
" 'instances': [ \n",
" { \n",
" 'name': 'sessioncookie',\n",
" 'driver': SessionCookie,\n",
" }, \n",
" ], \n",
" }, \n",
"} \n",
])
# Create app
app = self.create_application()
self.assert_equal(secret_key, app.secret_key)
# Test extension
self.assert_in('edmunds.session', app.extensions)
self.assert_is_not_none(app.extensions['edmunds.session'])
self.assert_is_instance(app.extensions['edmunds.session'], SessionManager)
self.assert_is_instance(app.extensions['edmunds.session'].get(), LocalProxy)
with self.assert_raises_regexp(RuntimeError, 'Working outside of request context'):
self.assert_is_instance(app.extensions['edmunds.session'].get()._get_current_object(), SecureCookieSession)
self.assert_is_instance(app.extensions['edmunds.session'].get('sessioncookie'), LocalProxy)
with self.assert_raises_regexp(RuntimeError, 'Working outside of request context'):
self.assert_is_instance(app.extensions['edmunds.session'].get('sessioncookie')._get_current_object(), SecureCookieSession)
with self.assert_raises_regexp(RuntimeError, '[Nn]o instance'):
app.extensions['edmunds.session'].get('sessioncookie2')
def test_register(self):
"""
Test register
:return: void
"""
rule = '/' + self.rand_str(20)
secret_key = self.rand_str(24)
# Write config
self.write_config([
"from edmunds.session.drivers.sessioncookie import SessionCookie \n",
"SECRET_KEY = '%s'\n" % secret_key,
"APP = { \n",
" 'session': { \n",
" 'enabled': True, \n",
" 'instances': [ \n",
" { \n",
" 'name': 'sessioncookie',\n",
" 'driver': SessionCookie,\n",
" }, \n",
" ], \n",
" }, \n",
"} \n",
])
# Create app
app = self.create_application()
self.assert_equal(secret_key, app.secret_key)
# Test extension
self.assert_in('edmunds.session', app.extensions)
self.assert_is_not_none(app.extensions['edmunds.session'])
self.assert_is_instance(app.extensions['edmunds.session'], SessionManager)
# Test session
with app.test_request_context(rule):
self.assert_is_instance(app.extensions['edmunds.session'].get(), LocalProxy)
self.assert_is_instance(app.extensions['edmunds.session'].get()._get_current_object(), SecureCookieSession)
self.assert_is_instance(app.extensions['edmunds.session'].get('sessioncookie'), LocalProxy)
self.assert_is_instance(app.extensions['edmunds.session'].get('sessioncookie')._get_current_object(), SecureCookieSession)
with self.assert_raises_regexp(RuntimeError, '[Nn]o instance'):
app.extensions['edmunds.session'].get('sessioncookie2')
| 37.10687
| 134
| 0.545361
| 459
| 4,861
| 5.590414
| 0.152505
| 0.085737
| 0.109119
| 0.147311
| 0.840218
| 0.828917
| 0.828917
| 0.828917
| 0.828917
| 0.828917
| 0
| 0.00304
| 0.323185
| 4,861
| 130
| 135
| 37.392308
| 0.7769
| 0.054927
| 0
| 0.858824
| 0
| 0
| 0.297877
| 0.024804
| 0
| 0
| 0
| 0
| 0.258824
| 1
| 0.035294
| false
| 0
| 0.082353
| 0
| 0.129412
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ffbff062d388992e4dd93ccfbc4c5b9c6868f678
| 3,958
|
py
|
Python
|
isimip_data/metadata/tests/test_views.py
|
ISI-MIP/isimip-data
|
a0e4772362cc60db91e7689ec397840dcaaacddb
|
[
"MIT"
] | 3
|
2020-02-10T10:13:17.000Z
|
2021-12-21T09:10:50.000Z
|
isimip_data/metadata/tests/test_views.py
|
ISI-MIP/isimip-data
|
a0e4772362cc60db91e7689ec397840dcaaacddb
|
[
"MIT"
] | 17
|
2020-02-10T16:09:12.000Z
|
2021-07-02T09:03:37.000Z
|
isimip_data/metadata/tests/test_views.py
|
ISI-MIP/isimip-data
|
a0e4772362cc60db91e7689ec397840dcaaacddb
|
[
"MIT"
] | null | null | null |
from django.urls import reverse
from isimip_data.metadata.models import Dataset, File, Resource
def test_metadata(db, client):
response = client.get(reverse('metadata'))
assert response.status_code == 200
def test_metadata_dataset(db, client):
dataset = Dataset.objects.using('metadata').first()
url = reverse('metadata') + '?query=' + str(dataset.id)
response = client.get(url)
assert response.status_code == 302
assert response.url == '/datasets/{}/'.format(dataset.id)
def test_metadata_file(db, client):
file = File.objects.using('metadata').first()
url = reverse('metadata') + '?query=' + str(file.id)
response = client.get(url)
assert response.status_code == 302
assert response.url == '/files/{}/'.format(file.id)
def test_metadata_not_found(db, client):
url = reverse('metadata') + '?query=cf20cfa9-55ba-4c3c-9f00-486c8c259dd5'
response = client.get(url)
assert response.status_code == 200
def test_metadata_uuid_wrong(db, client):
url = reverse('metadata') + '?query=wrong'
response = client.get(url)
assert response.status_code == 200
def test_dataset_id(db, client):
dataset = Dataset.objects.using('metadata').filter(target=None).first()
response = client.get(reverse('dataset', args=[dataset.id]))
assert response.status_code == 200
def test_dataset_id_target(db, client):
dataset = Dataset.objects.using('metadata').exclude(target=None).first()
response = client.get(reverse('dataset', args=[dataset.id]))
assert response.status_code == 303
def test_dataset_path(db, client):
dataset = Dataset.objects.using('metadata').filter(target=None).first()
response = client.get(reverse('dataset', args=[dataset.path]))
assert response.status_code == 200
def test_dataset_path_target(db, client):
dataset = Dataset.objects.using('metadata').exclude(target=None).first()
response = client.get(reverse('dataset', args=[dataset.path]))
assert response.status_code == 303
def test_file_id(db, client):
file = File.objects.using('metadata').filter(dataset__target=None).first()
response = client.get(reverse('file', args=[file.id]))
assert response.status_code == 200
def test_file_id_target(db, client):
file = File.objects.using('metadata').exclude(dataset__target=None).first()
response = client.get(reverse('file', args=[file.id]))
assert response.status_code == 303
def test_file_path(db, client):
file = File.objects.using('metadata').filter(dataset__target=None).first()
response = client.get(reverse('file', args=[file.path]))
assert response.status_code == 200
def test_file_path_target(db, client):
file = File.objects.using('metadata').exclude(dataset__target=None).first()
response = client.get(reverse('file', args=[file.path]))
assert response.status_code == 303
def test_attributes(db, client):
response = client.get(reverse('attributes'))
assert response.status_code == 200
def test_resources(db, client):
response = client.get(reverse('resources'))
assert response.status_code == 200
def test_resource(db, client):
resource = Resource.objects.using('metadata').first()
response = client.get(reverse('resource', args=[resource.doi]))
assert response.status_code == 200
def test_resource_bibtex(db, client):
resource = Resource.objects.using('metadata').first()
response = client.get(reverse('resource_bibtex', args=[resource.doi]))
assert response.status_code == 200
def test_resource_json(db, client):
resource = Resource.objects.using('metadata').first()
response = client.get(reverse('resource_datacite_json', args=[resource.doi]))
assert response.status_code == 200
def test_resource_xml(db, client):
resource = Resource.objects.using('metadata').first()
response = client.get(reverse('resource_datacite_xml', args=[resource.doi]))
assert response.status_code == 200
| 33.260504
| 81
| 0.711218
| 511
| 3,958
| 5.365949
| 0.109589
| 0.107221
| 0.117797
| 0.166302
| 0.876003
| 0.876003
| 0.818381
| 0.792123
| 0.709336
| 0.661926
| 0
| 0.022052
| 0.140728
| 3,958
| 118
| 82
| 33.542373
| 0.784181
| 0
| 0
| 0.544304
| 0
| 0
| 0.09424
| 0.021728
| 0
| 0
| 0
| 0
| 0.265823
| 1
| 0.240506
| false
| 0
| 0.025316
| 0
| 0.265823
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
ffc9b86b2ab5f4cfc310b34d9a1f8fec6be4b1ba
| 25,780
|
py
|
Python
|
src/data/simulation_setup.py
|
rganti/Optimal_Protocols_bnAbs
|
2b24325cd33749bbfaaadedd5c41b418ae55dc1d
|
[
"MIT"
] | null | null | null |
src/data/simulation_setup.py
|
rganti/Optimal_Protocols_bnAbs
|
2b24325cd33749bbfaaadedd5c41b418ae55dc1d
|
[
"MIT"
] | null | null | null |
src/data/simulation_setup.py
|
rganti/Optimal_Protocols_bnAbs
|
2b24325cd33749bbfaaadedd5c41b418ae55dc1d
|
[
"MIT"
] | null | null | null |
import numpy as np
from src.data.ssc_setup import BnabModel
class BnabGillespie(object):
def __init__(self, p_ini, parameters):
self.p_ini = p_ini
self.len_ini = len(self.p_ini) + 1
self.vars = ['N{0}'.format(i) for i in range(self.len_ini)]
self.bnab = BnabModel(p_ini, parameters)
self.mu_ij = self.bnab.define_mu_ij()
# First 7 are death reactions, next 2 are N1,
self.prop = (lambda ini:self.mu_ij['mu{0}{1}'.format(1, 0)] * ini[1],
lambda ini:self.mu_ij['mu{0}{1}'.format(2, 0)] * ini[2],
lambda ini:self.mu_ij['mu{0}{1}'.format(3, 0)] * ini[3],
lambda ini:self.mu_ij['mu{0}{1}'.format(4, 0)] * ini[4],
lambda ini:self.mu_ij['mu{0}{1}'.format(5, 0)] * ini[5],
lambda ini:self.mu_ij['mu{0}{1}'.format(6, 0)] * ini[6],
lambda ini:self.mu_ij['mu{0}{1}'.format(7, 0)] * ini[7],
lambda ini:self.mu_ij['f{0}'.format(1)] * ini[1],
lambda ini:self.mu_ij['mu{0}{1}'.format(1, 2)] * ini[1],
lambda ini:self.mu_ij['f{0}'.format(2)] * ini[2],
lambda ini:self.mu_ij['mu{0}{1}'.format(2, 1)] * ini[2],
lambda ini:self.mu_ij['mu{0}{1}'.format(2, 3)] * ini[2],
lambda ini:self.mu_ij['f{0}'.format(3)] * ini[3],
lambda ini:self.mu_ij['mu{0}{1}'.format(3, 2)] * ini[3],
lambda ini:self.mu_ij['mu{0}{1}'.format(3, 4)] * ini[3],
lambda ini:self.mu_ij['f{0}'.format(4)] * ini[4],
lambda ini:self.mu_ij['mu{0}{1}'.format(4, 3)] * ini[4],
lambda ini:self.mu_ij['mu{0}{1}'.format(4, 5)] * ini[4],
lambda ini:self.mu_ij['f{0}'.format(5)] * ini[5],
lambda ini:self.mu_ij['mu{0}{1}'.format(5, 4)] * ini[5],
lambda ini:self.mu_ij['mu{0}{1}'.format(5, 6)] * ini[5],
lambda ini:self.mu_ij['f{0}'.format(6)] * ini[6],
lambda ini:self.mu_ij['mu{0}{1}'.format(6, 5)] * ini[6],
lambda ini:self.mu_ij['mu{0}{1}'.format(6, 7)] * ini[6],
lambda ini:self.mu_ij['f{0}'.format(7)] * ini[7],
lambda ini:self.mu_ij['mu{0}{1}'.format(7, 6)] * ini[7])
self.tmat = np.zeros((self.len_ini, len(self.prop)), dtype=int)
self.count = 0
def replication(self, index):
rxn = np.zeros(self.len_ini, dtype=int)
rxn[index] = 1
self.tmat[:, self.count] = rxn
self.count += 1
def transition_to_lower(self, index):
rxn = np.zeros(self.len_ini, dtype=int)
rxn[index] = -1
rxn[index - 1] = 1
self.tmat[:, self.count] = rxn
self.count += 1
def transition_to_higher(self, index):
rxn = np.zeros(self.len_ini, dtype=int)
rxn[index] = -1
rxn[index + 1] = 1
self.tmat[:, self.count] = rxn
self.count += 1
def define_lower_edge(self, index):
self.replication(index)
self.transition_to_higher(index)
def define_middle(self, index):
self.replication(index)
self.transition_to_lower(index)
self.transition_to_higher(index)
def define_upper_edge(self, index):
self.replication(index)
self.transition_to_lower(index)
def define_tmat(self):
# Create death reactions
for i in range(0, len(self.p_ini)):
rxn = np.zeros(self.len_ini, dtype=int)
rxn[0] = 1
rxn[i + 1] = -1
self.tmat[:, self.count] = rxn
self.count += 1
self.define_lower_edge(1)
for i in range(2, len(self.p_ini)):
self.define_middle(i)
self.define_upper_edge(len(self.p_ini))
# NEED to hardcode new reactions into self.prop
class BnabFiniteSizeEffects9(BnabGillespie):
def __init__(self, p_ini, parameters):
BnabGillespie.__init__(self, p_ini, parameters)
# First 9 reactions are death reactions
self.prop = (lambda ini:self.mu_ij['mu{0}{1}'.format(1, 0)] * ini[1],
lambda ini:self.mu_ij['mu{0}{1}'.format(2, 0)] * ini[2],
lambda ini:self.mu_ij['mu{0}{1}'.format(3, 0)] * ini[3],
lambda ini:self.mu_ij['mu{0}{1}'.format(4, 0)] * ini[4],
lambda ini:self.mu_ij['mu{0}{1}'.format(5, 0)] * ini[5],
lambda ini:self.mu_ij['mu{0}{1}'.format(6, 0)] * ini[6],
lambda ini:self.mu_ij['mu{0}{1}'.format(7, 0)] * ini[7],
lambda ini: self.mu_ij['mu{0}{1}'.format(8, 0)] * ini[8],
lambda ini: self.mu_ij['mu{0}{1}'.format(9, 0)] * ini[9],
# Edge state 1: can only hop to the right to 2
lambda ini:self.mu_ij['f{0}'.format(1)] * ini[1],
lambda ini:self.mu_ij['mu{0}{1}'.format(1, 2)] * ini[1],
lambda ini:self.mu_ij['f{0}'.format(2)] * ini[2],
lambda ini:self.mu_ij['mu{0}{1}'.format(2, 1)] * ini[2],
lambda ini:self.mu_ij['mu{0}{1}'.format(2, 3)] * ini[2],
lambda ini:self.mu_ij['f{0}'.format(3)] * ini[3],
lambda ini:self.mu_ij['mu{0}{1}'.format(3, 2)] * ini[3],
lambda ini:self.mu_ij['mu{0}{1}'.format(3, 4)] * ini[3],
lambda ini:self.mu_ij['f{0}'.format(4)] * ini[4],
lambda ini:self.mu_ij['mu{0}{1}'.format(4, 3)] * ini[4],
lambda ini:self.mu_ij['mu{0}{1}'.format(4, 5)] * ini[4],
# Middle bnAb state 5
lambda ini:self.mu_ij['f{0}'.format(5)] * ini[5],
lambda ini:self.mu_ij['mu{0}{1}'.format(5, 4)] * ini[5],
lambda ini:self.mu_ij['mu{0}{1}'.format(5, 6)] * ini[5],
lambda ini:self.mu_ij['f{0}'.format(6)] * ini[6],
lambda ini:self.mu_ij['mu{0}{1}'.format(6, 5)] * ini[6],
lambda ini:self.mu_ij['mu{0}{1}'.format(6, 7)] * ini[6],
lambda ini: self.mu_ij['f{0}'.format(7)] * ini[7],
lambda ini: self.mu_ij['mu{0}{1}'.format(7, 6)] * ini[7],
lambda ini: self.mu_ij['mu{0}{1}'.format(7, 8)] * ini[7],
lambda ini: self.mu_ij['f{0}'.format(8)] * ini[8],
lambda ini: self.mu_ij['mu{0}{1}'.format(8, 7)] * ini[8],
lambda ini: self.mu_ij['mu{0}{1}'.format(8, 9)] * ini[8],
# Edge state 9: can only hop to left to 8
lambda ini:self.mu_ij['f{0}'.format(9)] * ini[9],
lambda ini:self.mu_ij['mu{0}{1}'.format(9, 8)] * ini[9])
self.tmat = np.zeros((self.len_ini, len(self.prop)), dtype=int)
class BnabFiniteSizeEffects11(BnabGillespie):
def __init__(self, p_ini, parameters):
BnabGillespie.__init__(self, p_ini, parameters)
# First 11 reactions are death reactions
self.prop = (lambda ini:self.mu_ij['mu{0}{1}'.format(1, 0)] * ini[1],
lambda ini:self.mu_ij['mu{0}{1}'.format(2, 0)] * ini[2],
lambda ini:self.mu_ij['mu{0}{1}'.format(3, 0)] * ini[3],
lambda ini:self.mu_ij['mu{0}{1}'.format(4, 0)] * ini[4],
lambda ini:self.mu_ij['mu{0}{1}'.format(5, 0)] * ini[5],
lambda ini:self.mu_ij['mu{0}{1}'.format(6, 0)] * ini[6],
lambda ini:self.mu_ij['mu{0}{1}'.format(7, 0)] * ini[7],
lambda ini: self.mu_ij['mu{0}{1}'.format(8, 0)] * ini[8],
lambda ini: self.mu_ij['mu{0}{1}'.format(9, 0)] * ini[9],
lambda ini: self.mu_ij['mu{0}{1}'.format(10, 0)] * ini[10],
lambda ini: self.mu_ij['mu{0}{1}'.format(11, 0)] * ini[11],
# Edge state 1: can only hop to the right to 2
lambda ini:self.mu_ij['f{0}'.format(1)] * ini[1],
lambda ini:self.mu_ij['mu{0}{1}'.format(1, 2)] * ini[1],
lambda ini:self.mu_ij['f{0}'.format(2)] * ini[2],
lambda ini:self.mu_ij['mu{0}{1}'.format(2, 1)] * ini[2],
lambda ini:self.mu_ij['mu{0}{1}'.format(2, 3)] * ini[2],
lambda ini:self.mu_ij['f{0}'.format(3)] * ini[3],
lambda ini:self.mu_ij['mu{0}{1}'.format(3, 2)] * ini[3],
lambda ini:self.mu_ij['mu{0}{1}'.format(3, 4)] * ini[3],
lambda ini:self.mu_ij['f{0}'.format(4)] * ini[4],
lambda ini:self.mu_ij['mu{0}{1}'.format(4, 3)] * ini[4],
lambda ini:self.mu_ij['mu{0}{1}'.format(4, 5)] * ini[4],
lambda ini:self.mu_ij['f{0}'.format(5)] * ini[5],
lambda ini:self.mu_ij['mu{0}{1}'.format(5, 4)] * ini[5],
lambda ini:self.mu_ij['mu{0}{1}'.format(5, 6)] * ini[5],
# Middle bnAb state 6
lambda ini:self.mu_ij['f{0}'.format(6)] * ini[6],
lambda ini:self.mu_ij['mu{0}{1}'.format(6, 5)] * ini[6],
lambda ini:self.mu_ij['mu{0}{1}'.format(6, 7)] * ini[6],
lambda ini: self.mu_ij['f{0}'.format(7)] * ini[7],
lambda ini: self.mu_ij['mu{0}{1}'.format(7, 6)] * ini[7],
lambda ini: self.mu_ij['mu{0}{1}'.format(7, 8)] * ini[7],
lambda ini: self.mu_ij['f{0}'.format(8)] * ini[8],
lambda ini: self.mu_ij['mu{0}{1}'.format(8, 7)] * ini[8],
lambda ini: self.mu_ij['mu{0}{1}'.format(8, 9)] * ini[8],
lambda ini: self.mu_ij['f{0}'.format(9)] * ini[9],
lambda ini: self.mu_ij['mu{0}{1}'.format(9, 8)] * ini[9],
lambda ini: self.mu_ij['mu{0}{1}'.format(9, 10)] * ini[9],
lambda ini: self.mu_ij['f{0}'.format(10)] * ini[10],
lambda ini: self.mu_ij['mu{0}{1}'.format(10, 9)] * ini[10],
lambda ini: self.mu_ij['mu{0}{1}'.format(10, 11)] * ini[10],
# Edge state 11: can only hop to left to 10
lambda ini:self.mu_ij['f{0}'.format(11)] * ini[11],
lambda ini:self.mu_ij['mu{0}{1}'.format(11, 10)] * ini[11])
self.tmat = np.zeros((self.len_ini, len(self.prop)), dtype=int)
class BnabFiniteSizeEffects15(BnabGillespie):
def __init__(self, p_ini, parameters):
BnabGillespie.__init__(self, p_ini, parameters)
# First 15 reactions are death reactions
self.prop = (lambda ini:self.mu_ij['mu{0}{1}'.format(1, 0)] * ini[1],
lambda ini:self.mu_ij['mu{0}{1}'.format(2, 0)] * ini[2],
lambda ini:self.mu_ij['mu{0}{1}'.format(3, 0)] * ini[3],
lambda ini:self.mu_ij['mu{0}{1}'.format(4, 0)] * ini[4],
lambda ini:self.mu_ij['mu{0}{1}'.format(5, 0)] * ini[5],
lambda ini:self.mu_ij['mu{0}{1}'.format(6, 0)] * ini[6],
lambda ini:self.mu_ij['mu{0}{1}'.format(7, 0)] * ini[7],
lambda ini: self.mu_ij['mu{0}{1}'.format(8, 0)] * ini[8],
lambda ini: self.mu_ij['mu{0}{1}'.format(9, 0)] * ini[9],
lambda ini: self.mu_ij['mu{0}{1}'.format(10, 0)] * ini[10],
lambda ini: self.mu_ij['mu{0}{1}'.format(11, 0)] * ini[11],
lambda ini: self.mu_ij['mu{0}{1}'.format(12, 0)] * ini[12],
lambda ini: self.mu_ij['mu{0}{1}'.format(13, 0)] * ini[13],
lambda ini: self.mu_ij['mu{0}{1}'.format(14, 0)] * ini[14],
lambda ini: self.mu_ij['mu{0}{1}'.format(15, 0)] * ini[15],
# Edge state 1: can only hop to the right to 2
lambda ini:self.mu_ij['f{0}'.format(1)] * ini[1],
lambda ini:self.mu_ij['mu{0}{1}'.format(1, 2)] * ini[1],
lambda ini:self.mu_ij['f{0}'.format(2)] * ini[2],
lambda ini:self.mu_ij['mu{0}{1}'.format(2, 1)] * ini[2],
lambda ini:self.mu_ij['mu{0}{1}'.format(2, 3)] * ini[2],
lambda ini:self.mu_ij['f{0}'.format(3)] * ini[3],
lambda ini:self.mu_ij['mu{0}{1}'.format(3, 2)] * ini[3],
lambda ini:self.mu_ij['mu{0}{1}'.format(3, 4)] * ini[3],
lambda ini:self.mu_ij['f{0}'.format(4)] * ini[4],
lambda ini:self.mu_ij['mu{0}{1}'.format(4, 3)] * ini[4],
lambda ini:self.mu_ij['mu{0}{1}'.format(4, 5)] * ini[4],
lambda ini:self.mu_ij['f{0}'.format(5)] * ini[5],
lambda ini:self.mu_ij['mu{0}{1}'.format(5, 4)] * ini[5],
lambda ini:self.mu_ij['mu{0}{1}'.format(5, 6)] * ini[5],
lambda ini:self.mu_ij['f{0}'.format(6)] * ini[6],
lambda ini:self.mu_ij['mu{0}{1}'.format(6, 5)] * ini[6],
lambda ini:self.mu_ij['mu{0}{1}'.format(6, 7)] * ini[6],
lambda ini: self.mu_ij['f{0}'.format(7)] * ini[7],
lambda ini: self.mu_ij['mu{0}{1}'.format(7, 6)] * ini[7],
lambda ini: self.mu_ij['mu{0}{1}'.format(7, 8)] * ini[7],
# Middle bnAb state 8
lambda ini: self.mu_ij['f{0}'.format(8)] * ini[8],
lambda ini: self.mu_ij['mu{0}{1}'.format(8, 7)] * ini[8],
lambda ini: self.mu_ij['mu{0}{1}'.format(8, 9)] * ini[8],
lambda ini: self.mu_ij['f{0}'.format(9)] * ini[9],
lambda ini: self.mu_ij['mu{0}{1}'.format(9, 8)] * ini[9],
lambda ini: self.mu_ij['mu{0}{1}'.format(9, 10)] * ini[9],
lambda ini: self.mu_ij['f{0}'.format(10)] * ini[10],
lambda ini: self.mu_ij['mu{0}{1}'.format(10, 9)] * ini[10],
lambda ini: self.mu_ij['mu{0}{1}'.format(10, 11)] * ini[10],
lambda ini: self.mu_ij['f{0}'.format(11)] * ini[11],
lambda ini: self.mu_ij['mu{0}{1}'.format(11, 10)] * ini[11],
lambda ini: self.mu_ij['mu{0}{1}'.format(11, 12)] * ini[11],
lambda ini: self.mu_ij['f{0}'.format(12)] * ini[12],
lambda ini: self.mu_ij['mu{0}{1}'.format(12, 11)] * ini[12],
lambda ini: self.mu_ij['mu{0}{1}'.format(12, 13)] * ini[12],
lambda ini: self.mu_ij['f{0}'.format(13)] * ini[13],
lambda ini: self.mu_ij['mu{0}{1}'.format(13, 12)] * ini[13],
lambda ini: self.mu_ij['mu{0}{1}'.format(13, 14)] * ini[13],
lambda ini: self.mu_ij['f{0}'.format(14)] * ini[14],
lambda ini: self.mu_ij['mu{0}{1}'.format(14, 13)] * ini[14],
lambda ini: self.mu_ij['mu{0}{1}'.format(14, 15)] * ini[14],
# Edge state 15: can only hop to left to 14
lambda ini: self.mu_ij['f{0}'.format(15)] * ini[15],
lambda ini: self.mu_ij['mu{0}{1}'.format(15, 14)] * ini[15])
self.tmat = np.zeros((self.len_ini, len(self.prop)), dtype=int)
class BnabFiniteSizeEffects31(BnabGillespie):
def __init__(self, p_ini, parameters):
BnabGillespie.__init__(self, p_ini, parameters)
# First 31 reactions are death reactions
self.prop = (lambda ini: self.mu_ij['mu{0}{1}'.format(1, 0)] * ini[1],
lambda ini: self.mu_ij['mu{0}{1}'.format(2, 0)] * ini[2],
lambda ini: self.mu_ij['mu{0}{1}'.format(3, 0)] * ini[3],
lambda ini: self.mu_ij['mu{0}{1}'.format(4, 0)] * ini[4],
lambda ini: self.mu_ij['mu{0}{1}'.format(5, 0)] * ini[5],
lambda ini: self.mu_ij['mu{0}{1}'.format(6, 0)] * ini[6],
lambda ini: self.mu_ij['mu{0}{1}'.format(7, 0)] * ini[7],
lambda ini: self.mu_ij['mu{0}{1}'.format(8, 0)] * ini[8],
lambda ini: self.mu_ij['mu{0}{1}'.format(9, 0)] * ini[9],
lambda ini: self.mu_ij['mu{0}{1}'.format(10, 0)] * ini[10],
lambda ini: self.mu_ij['mu{0}{1}'.format(11, 0)] * ini[11],
lambda ini: self.mu_ij['mu{0}{1}'.format(12, 0)] * ini[12],
lambda ini: self.mu_ij['mu{0}{1}'.format(13, 0)] * ini[13],
lambda ini: self.mu_ij['mu{0}{1}'.format(14, 0)] * ini[14],
lambda ini: self.mu_ij['mu{0}{1}'.format(15, 0)] * ini[15],
lambda ini: self.mu_ij['mu{0}{1}'.format(16, 0)] * ini[16],
lambda ini: self.mu_ij['mu{0}{1}'.format(17, 0)] * ini[17],
lambda ini: self.mu_ij['mu{0}{1}'.format(18, 0)] * ini[18],
lambda ini: self.mu_ij['mu{0}{1}'.format(19, 0)] * ini[19],
lambda ini: self.mu_ij['mu{0}{1}'.format(20, 0)] * ini[20],
lambda ini: self.mu_ij['mu{0}{1}'.format(21, 0)] * ini[21],
lambda ini: self.mu_ij['mu{0}{1}'.format(22, 0)] * ini[22],
lambda ini: self.mu_ij['mu{0}{1}'.format(23, 0)] * ini[23],
lambda ini: self.mu_ij['mu{0}{1}'.format(24, 0)] * ini[24],
lambda ini: self.mu_ij['mu{0}{1}'.format(25, 0)] * ini[25],
lambda ini: self.mu_ij['mu{0}{1}'.format(26, 0)] * ini[26],
lambda ini: self.mu_ij['mu{0}{1}'.format(27, 0)] * ini[27],
lambda ini: self.mu_ij['mu{0}{1}'.format(28, 0)] * ini[28],
lambda ini: self.mu_ij['mu{0}{1}'.format(29, 0)] * ini[29],
lambda ini: self.mu_ij['mu{0}{1}'.format(30, 0)] * ini[30],
lambda ini: self.mu_ij['mu{0}{1}'.format(31, 0)] * ini[31],
# Edge state 1: can only hop to the right to 2
lambda ini: self.mu_ij['f{0}'.format(1)] * ini[1],
lambda ini: self.mu_ij['mu{0}{1}'.format(1, 2)] * ini[1],
lambda ini: self.mu_ij['f{0}'.format(2)] * ini[2],
lambda ini: self.mu_ij['mu{0}{1}'.format(2, 1)] * ini[2],
lambda ini: self.mu_ij['mu{0}{1}'.format(2, 3)] * ini[2],
lambda ini: self.mu_ij['f{0}'.format(3)] * ini[3],
lambda ini: self.mu_ij['mu{0}{1}'.format(3, 2)] * ini[3],
lambda ini: self.mu_ij['mu{0}{1}'.format(3, 4)] * ini[3],
lambda ini: self.mu_ij['f{0}'.format(4)] * ini[4],
lambda ini: self.mu_ij['mu{0}{1}'.format(4, 3)] * ini[4],
lambda ini: self.mu_ij['mu{0}{1}'.format(4, 5)] * ini[4],
lambda ini: self.mu_ij['f{0}'.format(5)] * ini[5],
lambda ini: self.mu_ij['mu{0}{1}'.format(5, 4)] * ini[5],
lambda ini: self.mu_ij['mu{0}{1}'.format(5, 6)] * ini[5],
lambda ini: self.mu_ij['f{0}'.format(6)] * ini[6],
lambda ini: self.mu_ij['mu{0}{1}'.format(6, 5)] * ini[6],
lambda ini: self.mu_ij['mu{0}{1}'.format(6, 7)] * ini[6],
lambda ini: self.mu_ij['f{0}'.format(7)] * ini[7],
lambda ini: self.mu_ij['mu{0}{1}'.format(7, 6)] * ini[7],
lambda ini: self.mu_ij['mu{0}{1}'.format(7, 8)] * ini[7],
lambda ini: self.mu_ij['f{0}'.format(8)] * ini[8],
lambda ini: self.mu_ij['mu{0}{1}'.format(8, 7)] * ini[8],
lambda ini: self.mu_ij['mu{0}{1}'.format(8, 9)] * ini[8],
lambda ini: self.mu_ij['f{0}'.format(9)] * ini[9],
lambda ini: self.mu_ij['mu{0}{1}'.format(9, 8)] * ini[9],
lambda ini: self.mu_ij['mu{0}{1}'.format(9, 10)] * ini[9],
lambda ini: self.mu_ij['f{0}'.format(10)] * ini[10],
lambda ini: self.mu_ij['mu{0}{1}'.format(10, 9)] * ini[10],
lambda ini: self.mu_ij['mu{0}{1}'.format(10, 11)] * ini[10],
lambda ini: self.mu_ij['f{0}'.format(11)] * ini[11],
lambda ini: self.mu_ij['mu{0}{1}'.format(11, 10)] * ini[11],
lambda ini: self.mu_ij['mu{0}{1}'.format(11, 12)] * ini[11],
lambda ini: self.mu_ij['f{0}'.format(12)] * ini[12],
lambda ini: self.mu_ij['mu{0}{1}'.format(12, 11)] * ini[12],
lambda ini: self.mu_ij['mu{0}{1}'.format(12, 13)] * ini[12],
lambda ini: self.mu_ij['f{0}'.format(13)] * ini[13],
lambda ini: self.mu_ij['mu{0}{1}'.format(13, 12)] * ini[13],
lambda ini: self.mu_ij['mu{0}{1}'.format(13, 14)] * ini[13],
lambda ini: self.mu_ij['f{0}'.format(14)] * ini[14],
lambda ini: self.mu_ij['mu{0}{1}'.format(14, 13)] * ini[14],
lambda ini: self.mu_ij['mu{0}{1}'.format(14, 15)] * ini[14],
lambda ini: self.mu_ij['f{0}'.format(15)] * ini[15],
lambda ini: self.mu_ij['mu{0}{1}'.format(15, 14)] * ini[15],
lambda ini: self.mu_ij['mu{0}{1}'.format(15, 16)] * ini[15],
# Middle bnAb state 16
lambda ini: self.mu_ij['f{0}'.format(16)] * ini[16],
lambda ini: self.mu_ij['mu{0}{1}'.format(16, 15)] * ini[16],
lambda ini: self.mu_ij['mu{0}{1}'.format(16, 17)] * ini[16],
lambda ini: self.mu_ij['f{0}'.format(17)] * ini[17],
lambda ini: self.mu_ij['mu{0}{1}'.format(17, 16)] * ini[17],
lambda ini: self.mu_ij['mu{0}{1}'.format(17, 18)] * ini[17],
lambda ini: self.mu_ij['f{0}'.format(18)] * ini[18],
lambda ini: self.mu_ij['mu{0}{1}'.format(18, 17)] * ini[18],
lambda ini: self.mu_ij['mu{0}{1}'.format(18, 19)] * ini[18],
lambda ini: self.mu_ij['f{0}'.format(19)] * ini[19],
lambda ini: self.mu_ij['mu{0}{1}'.format(19, 18)] * ini[19],
lambda ini: self.mu_ij['mu{0}{1}'.format(19, 20)] * ini[19],
lambda ini: self.mu_ij['f{0}'.format(20)] * ini[20],
lambda ini: self.mu_ij['mu{0}{1}'.format(20, 19)] * ini[20],
lambda ini: self.mu_ij['mu{0}{1}'.format(20, 21)] * ini[20],
lambda ini: self.mu_ij['f{0}'.format(21)] * ini[21],
lambda ini: self.mu_ij['mu{0}{1}'.format(21, 20)] * ini[21],
lambda ini: self.mu_ij['mu{0}{1}'.format(21, 22)] * ini[21],
lambda ini: self.mu_ij['f{0}'.format(22)] * ini[22],
lambda ini: self.mu_ij['mu{0}{1}'.format(22, 21)] * ini[22],
lambda ini: self.mu_ij['mu{0}{1}'.format(22, 23)] * ini[22],
lambda ini: self.mu_ij['f{0}'.format(23)] * ini[23],
lambda ini: self.mu_ij['mu{0}{1}'.format(23, 22)] * ini[23],
lambda ini: self.mu_ij['mu{0}{1}'.format(23, 24)] * ini[23],
lambda ini: self.mu_ij['f{0}'.format(24)] * ini[24],
lambda ini: self.mu_ij['mu{0}{1}'.format(24, 23)] * ini[24],
lambda ini: self.mu_ij['mu{0}{1}'.format(24, 25)] * ini[24],
lambda ini: self.mu_ij['f{0}'.format(25)] * ini[25],
lambda ini: self.mu_ij['mu{0}{1}'.format(25, 24)] * ini[25],
lambda ini: self.mu_ij['mu{0}{1}'.format(25, 26)] * ini[25],
lambda ini: self.mu_ij['f{0}'.format(26)] * ini[26],
lambda ini: self.mu_ij['mu{0}{1}'.format(26, 25)] * ini[26],
lambda ini: self.mu_ij['mu{0}{1}'.format(26, 27)] * ini[26],
lambda ini: self.mu_ij['f{0}'.format(27)] * ini[27],
lambda ini: self.mu_ij['mu{0}{1}'.format(27, 26)] * ini[27],
lambda ini: self.mu_ij['mu{0}{1}'.format(27, 28)] * ini[27],
lambda ini: self.mu_ij['f{0}'.format(28)] * ini[28],
lambda ini: self.mu_ij['mu{0}{1}'.format(28, 27)] * ini[28],
lambda ini: self.mu_ij['mu{0}{1}'.format(28, 29)] * ini[28],
lambda ini: self.mu_ij['f{0}'.format(29)] * ini[29],
lambda ini: self.mu_ij['mu{0}{1}'.format(29, 28)] * ini[29],
lambda ini: self.mu_ij['mu{0}{1}'.format(29, 30)] * ini[29],
lambda ini: self.mu_ij['f{0}'.format(30)] * ini[30],
lambda ini: self.mu_ij['mu{0}{1}'.format(30, 29)] * ini[30],
lambda ini: self.mu_ij['mu{0}{1}'.format(30, 31)] * ini[30],
# Edge state 31: can only hop to left to 30
lambda ini: self.mu_ij['f{0}'.format(31)] * ini[31],
lambda ini: self.mu_ij['mu{0}{1}'.format(31, 30)] * ini[31])
self.tmat = np.zeros((self.len_ini, len(self.prop)), dtype=int)
| 54.273684
| 81
| 0.463033
| 4,046
| 25,780
| 2.858379
| 0.025457
| 0.172503
| 0.195763
| 0.365759
| 0.929788
| 0.928318
| 0.919758
| 0.913878
| 0.878772
| 0.86658
| 0
| 0.094323
| 0.333786
| 25,780
| 474
| 82
| 54.388186
| 0.579039
| 0.02692
| 0
| 0.714697
| 0
| 0
| 0.078513
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.034582
| false
| 0
| 0.005764
| 0
| 0.054755
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
ffd5be2fe79cb326034afe73efbefd3ae1ae18fb
| 178
|
py
|
Python
|
pyremo/cmor/__init__.py
|
remo-rcm/pyremo2
|
59bd18d411944205db93c062439172f91ba99f83
|
[
"MIT"
] | null | null | null |
pyremo/cmor/__init__.py
|
remo-rcm/pyremo2
|
59bd18d411944205db93c062439172f91ba99f83
|
[
"MIT"
] | 2
|
2020-10-05T07:27:31.000Z
|
2020-12-10T22:59:54.000Z
|
pyremo/cmor/__init__.py
|
remo-rcm/pyremo2
|
59bd18d411944205db93c062439172f91ba99f83
|
[
"MIT"
] | null | null | null |
from .derived import derivator
from .remo_cmor import cmorize_variable, prepare_variable, to_cftime
__all__ = ["derivator", "cmorize_variable", "prepare_variable", "to_cftime"]
| 35.6
| 76
| 0.803371
| 22
| 178
| 6
| 0.545455
| 0.227273
| 0.333333
| 0.454545
| 0.575758
| 0.575758
| 0
| 0
| 0
| 0
| 0
| 0
| 0.095506
| 178
| 4
| 77
| 44.5
| 0.819876
| 0
| 0
| 0
| 0
| 0
| 0.280899
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
084b3d5805d9f52b0fd3e748ab56f5b3fb3926a5
| 179
|
py
|
Python
|
tests/pylint_airflow/integration/scripts/test_airflowmodelsdagcontextmanager_filename.py
|
scribd/pylint-airflow
|
90cc4cb9d38cc846c64ebfec8cb9876d49980e4e
|
[
"MIT"
] | 70
|
2019-05-18T21:57:44.000Z
|
2022-03-28T16:28:12.000Z
|
tests/pylint_airflow/integration/scripts/test_airflowmodelsdagcontextmanager_filename.py
|
scribd/pylint-airflow
|
90cc4cb9d38cc846c64ebfec8cb9876d49980e4e
|
[
"MIT"
] | 7
|
2019-05-25T11:53:54.000Z
|
2022-01-20T20:09:11.000Z
|
tests/pylint_airflow/integration/scripts/test_airflowmodelsdagcontextmanager_filename.py
|
scribd/pylint-airflow
|
90cc4cb9d38cc846c64ebfec8cb9876d49980e4e
|
[
"MIT"
] | 10
|
2019-07-08T02:28:24.000Z
|
2022-03-11T09:39:00.000Z
|
# [match-dagid-filename]
"""DAG assignment via context manager, checking airflow.models.DAG()"""
import airflow.models
with airflow.models.DAG(dag_id="foobar") as dag:
pass
| 22.375
| 71
| 0.73743
| 25
| 179
| 5.24
| 0.68
| 0.29771
| 0.244275
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.122905
| 179
| 7
| 72
| 25.571429
| 0.834395
| 0.497207
| 0
| 0
| 0
| 0
| 0.071429
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
|
0
| 7
|
4b57203985739413db39fe74a0709a45c2c160cd
| 294
|
py
|
Python
|
lifelines/__init__.py
|
patrick-russell/lifelines
|
b5c79dc2fc4ab6cdcf7ff3df59b7697c1c3bb56b
|
[
"MIT"
] | null | null | null |
lifelines/__init__.py
|
patrick-russell/lifelines
|
b5c79dc2fc4ab6cdcf7ff3df59b7697c1c3bb56b
|
[
"MIT"
] | null | null | null |
lifelines/__init__.py
|
patrick-russell/lifelines
|
b5c79dc2fc4ab6cdcf7ff3df59b7697c1c3bb56b
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from .estimation import KaplanMeierFitter, NelsonAalenFitter, \
AalenAdditiveFitter, BreslowFlemingHarringtonFitter, CoxPHFitter
__all__ = ['KaplanMeierFitter', 'NelsonAalenFitter', 'AalenAdditiveFitter',
'BreslowFlemingHarringtonFitter', 'CoxPHFitter']
| 42
| 75
| 0.765306
| 17
| 294
| 13
| 0.705882
| 0.307692
| 0.479638
| 0.751131
| 0.850679
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003891
| 0.12585
| 294
| 6
| 76
| 49
| 0.856031
| 0.071429
| 0
| 0
| 0
| 0
| 0.346863
| 0.110701
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.25
| 0
| 0.25
| 0
| 1
| 0
| 1
| null | 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4b8dd31dc79b37b22588fbe625c60cddd00b9358
| 46,981
|
py
|
Python
|
tests/dashboard/test_permissions.py
|
Fa67/saleor-shop
|
76110349162c54c8bfcae61983bb59ba8fb0f778
|
[
"BSD-3-Clause"
] | null | null | null |
tests/dashboard/test_permissions.py
|
Fa67/saleor-shop
|
76110349162c54c8bfcae61983bb59ba8fb0f778
|
[
"BSD-3-Clause"
] | 3
|
2020-03-24T16:21:02.000Z
|
2021-02-02T21:57:49.000Z
|
tests/dashboard/test_permissions.py
|
Fa67/saleor-shop
|
76110349162c54c8bfcae61983bb59ba8fb0f778
|
[
"BSD-3-Clause"
] | null | null | null |
from django.urls import reverse
from saleor.account.models import User
def test_admin_can_view_staff_list(admin_client):
response = admin_client.get(reverse('dashboard:staff-list'))
assert response.status_code == 200
def test_staff_cant_view_staff_list(staff_client):
response = staff_client.get(reverse('dashboard:staff-list'))
assert response.status_code == 302
def test_admin_can_view_staff_details(admin_client, admin_user):
response = admin_client.get(
reverse('dashboard:staff-details', args=[admin_user.pk]))
assert response.status_code == 200
def test_staff_cant_view_staff_details(staff_client, admin_user):
response = staff_client.get(
reverse('dashboard:staff-details', args=[admin_user.pk]))
assert response.status_code == 302
def test_admin_can_view_staff_create(admin_client):
response = admin_client.get(reverse('dashboard:staff-create'))
assert response.status_code == 200
def test_staff_cant_view_staff_create(staff_client):
response = staff_client.get(reverse('dashboard:staff-create'))
assert response.status_code == 302
def test_admin_can_view_product_types_list(admin_client):
response = admin_client.get(reverse('dashboard:product-type-list'))
assert response.status_code == 200
def test_staff_cant_view_product_types_list(staff_client):
response = staff_client.get(reverse('dashboard:product-type-list'))
assert response.status_code == 302
def test_admin_can_view_product_type_add(admin_client):
response = admin_client.get(reverse('dashboard:product-type-add'))
assert response.status_code == 200
def test_staff_cant_view_product_type_add(staff_client):
response = staff_client.get(reverse('dashboard:product-type-add'))
assert response.status_code == 302
def test_admin_can_view_product_type_update(admin_client, product_type):
response = admin_client.get(
reverse('dashboard:product-type-update', args=[product_type.pk]))
assert response.status_code == 200
def test_staff_cant_view_product_type_update(staff_client, product_type):
response = staff_client.get(
reverse('dashboard:product-type-update', args=[product_type.pk]))
assert response.status_code == 302
def test_admin_can_view_product_type_delete(admin_client, product_type):
response = admin_client.get(
reverse('dashboard:product-type-delete', args=[product_type.pk]))
assert response.status_code == 200
def test_staff_cant_view_product_type_delete(staff_client, product_type):
response = staff_client.get(
reverse('dashboard:product-type-delete', args=[product_type.pk]))
assert response.status_code == 302
def test_admin_can_view_products_attribute_list(admin_client):
response = admin_client.get(reverse('dashboard:product-attributes'))
assert response.status_code == 200
def test_staff_cant_view_products_attribute_list(staff_client):
response = staff_client.get(reverse('dashboard:product-attributes'))
assert response.status_code == 302
def test_admin_can_view_products_attribute_add(admin_client):
response = admin_client.get(reverse('dashboard:product-attribute-add'))
assert response.status_code == 200
def test_staff_cant_view_products_attribute_add(staff_client):
response = staff_client.get(reverse('dashboard:product-attribute-add'))
assert response.status_code == 302
def test_admin_can_view_products_attribute_details(
admin_client, color_attribute):
response = admin_client.get(
reverse(
'dashboard:product-attribute-details', args=[color_attribute.pk]))
assert response.status_code == 200
def test_staff_cant_view_products_attribute_details(
staff_client, color_attribute):
response = staff_client.get(
reverse(
'dashboard:product-attribute-details', args=[color_attribute.pk]))
assert response.status_code == 302
def test_admin_can_view_products_attribute_update(
admin_client, color_attribute):
response = admin_client.get(
reverse(
'dashboard:product-attribute-update', args=[color_attribute.pk]))
assert response.status_code == 200
def test_staff_cant_view_products_attribute_update(
staff_client, color_attribute):
response = staff_client.get(
reverse(
'dashboard:product-attribute-update', args=[color_attribute.pk]))
assert response.status_code == 302
def test_admin_can_view_products_attribute_delete(
admin_client, color_attribute):
response = admin_client.get(
reverse(
'dashboard:product-attribute-delete', args=[color_attribute.pk]))
assert response.status_code == 200
def test_staff_cant_view_products_attribute_delete(
staff_client, color_attribute):
response = staff_client.get(
reverse(
'dashboard:product-attribute-delete', args=[color_attribute.pk]))
assert response.status_code == 302
def test_admin_can_view_shipping_methods_list(admin_client):
response = admin_client.get(reverse('dashboard:shipping-methods'))
assert response.status_code == 200
def test_staff_cant_view_shipping_methods_list(staff_client):
response = staff_client.get(reverse('dashboard:shipping-methods'))
assert response.status_code == 302
def test_admin_can_view_shipping_methods_add(admin_client):
response = admin_client.get(reverse('dashboard:shipping-method-add'))
assert response.status_code == 200
def test_staff_cant_view_shipping_methods_add(staff_client):
response = staff_client.get(reverse('dashboard:shipping-method-add'))
assert response.status_code == 302
def test_admin_can_view_shipping_methods_update(admin_client, shipping_method):
response = admin_client.get(
reverse('dashboard:shipping-method-update', args=[shipping_method.pk]))
assert response.status_code == 200
def test_staff_cant_view_shipping_methods_update(
staff_client, shipping_method):
response = staff_client.get(
reverse('dashboard:shipping-method-update', args=[shipping_method.pk]))
assert response.status_code == 302
def test_admin_can_view_shipping_methods_details(
admin_client, shipping_method):
response = admin_client.get(
reverse(
'dashboard:shipping-method-details', args=[shipping_method.pk]))
assert response.status_code == 200
def test_staff_cant_view_shipping_methods_details(
staff_client, shipping_method):
response = staff_client.get(
reverse(
'dashboard:shipping-method-details', args=[shipping_method.pk]))
assert response.status_code == 302
def test_admin_can_view_shipping_methods_delete(admin_client, shipping_method):
response = admin_client.get(
reverse('dashboard:shipping-method-delete', args=[shipping_method.pk]))
assert response.status_code == 200
def test_staff_cant_view_shipping_methods_delete(
staff_client, shipping_method):
response = staff_client.get(
reverse('dashboard:shipping-method-delete', args=[shipping_method.pk]))
assert response.status_code == 302
def test_admin_can_view_customers_list(admin_client):
response = admin_client.get(reverse('dashboard:customers'))
assert response.status_code == 200
def test_admin_can_view_customer_detail_view(admin_client, customer_user):
response = admin_client.get(
reverse('dashboard:customer-details', args=[customer_user.pk]))
assert response.status_code == 200
def test_admin_can_view_customer_create(admin_client):
response = admin_client.get(reverse('dashboard:customer-create'))
assert response.status_code == 200
def test_staff_cant_view_customer_create(staff_client):
response = staff_client.get(reverse('dashboard:customer-create'))
assert response.status_code == 302
def test_staff_cant_access_product_list(staff_client, staff_user):
assert not staff_user.has_perm('product.view_product')
response = staff_client.get(reverse('dashboard:product-list'))
assert response.status_code == 302
def test_staff_can_access_product_list(
staff_client, staff_user, permission_view_product):
assert not staff_user.has_perm('product.view_product')
staff_user.user_permissions.add(permission_view_product)
staff_user = User.objects.get(pk=staff_user.pk)
assert staff_user.has_perm('product.view_product')
response = staff_client.get(reverse('dashboard:product-list'))
assert response.status_code == 200
def test_staff_can_access_product_details(
staff_client, staff_user, permission_view_product, product):
assert not staff_user.has_perm('product.view_product')
url = reverse('dashboard:product-details', kwargs={'pk': product.pk})
response = staff_client.get(url)
assert response.status_code == 302
staff_user.user_permissions.add(permission_view_product)
staff_user = User.objects.get(pk=staff_user.pk)
assert staff_user.has_perm('product.view_product')
response = staff_client.post(url)
assert response.status_code == 200
def test_staff_can_access_product_toggle_is_published(
staff_client, staff_user, permission_edit_product, product):
assert not staff_user.has_perm('product.edit_product')
url = reverse('dashboard:product-publish', kwargs={'pk': product.pk})
response = staff_client.post(url)
assert response.status_code == 302
staff_user.user_permissions.add(permission_edit_product)
staff_user = User.objects.get(pk=staff_user.pk)
assert staff_user.has_perm('product.edit_product')
response = staff_client.post(url)
assert response.status_code == 200
def test_staff_can_access_product_select_type(
staff_client, staff_user, permission_edit_product):
assert not staff_user.has_perm('product.edit_product')
url = reverse('dashboard:product-add-select-type')
response = staff_client.get(url)
assert response.status_code == 302
staff_user.user_permissions.add(permission_edit_product)
staff_user = User.objects.get(pk=staff_user.pk)
assert staff_user.has_perm('product.edit_product')
response = staff_client.post(url)
assert response.status_code == 200
def test_staff_can_access_product_create(
staff_client, staff_user, permission_edit_product, product_type):
assert not staff_user.has_perm('product.edit_product')
url = reverse('dashboard:product-add', kwargs={'type_pk': product_type.pk})
response = staff_client.get(url)
assert response.status_code == 302
staff_user.user_permissions.add(permission_edit_product)
staff_user = User.objects.get(pk=staff_user.pk)
assert staff_user.has_perm('product.edit_product')
response = staff_client.post(url)
assert response.status_code == 200
def test_staff_can_access_product_edit(
staff_client, staff_user, permission_edit_product, product):
assert not staff_user.has_perm('product.edit_product')
url = reverse('dashboard:product-update', kwargs={'pk': product.pk})
response = staff_client.get(url)
assert response.status_code == 302
staff_user.user_permissions.add(permission_edit_product)
staff_user = User.objects.get(pk=staff_user.pk)
assert staff_user.has_perm('product.edit_product')
response = staff_client.post(url)
assert response.status_code == 200
def test_staff_can_access_product_delete(
staff_client, staff_user, permission_edit_product, product):
assert not staff_user.has_perm('product.edit_product')
url = reverse('dashboard:product-delete', kwargs={'pk': product.pk})
response = staff_client.get(url)
assert response.status_code == 302
staff_user.user_permissions.add(permission_edit_product)
staff_user = User.objects.get(pk=staff_user.pk)
assert staff_user.has_perm('product.edit_product')
response = staff_client.get(url)
assert response.status_code == 200
def test_staff_can_view_product_list(
staff_client, staff_user, permission_view_product):
assert not staff_user.has_perm('product.view_product')
response = staff_client.get(reverse('dashboard:product-list'))
assert response.status_code == 302
staff_user.user_permissions.add(permission_view_product)
staff_user = User.objects.get(pk=staff_user.pk)
assert staff_user.has_perm('product.view_product')
response = staff_client.get(reverse('dashboard:product-list'))
assert response.status_code == 200
def test_staff_can_view_category_list(
staff_client, staff_user, permission_view_category):
assert not staff_user.has_perm('product.view_category')
response = staff_client.get(reverse('dashboard:category-list'))
assert response.status_code == 302
staff_user.user_permissions.add(permission_view_category)
staff_user = User.objects.get(pk=staff_user.pk)
assert staff_user.has_perm('product.view_category')
response = staff_client.get(reverse('dashboard:category-list'))
assert response.status_code == 200
def test_staff_can_view_category_add_root(
staff_client, staff_user, permission_edit_category):
assert not staff_user.has_perm('product.edit_category')
response = staff_client.get(reverse('dashboard:category-add'))
assert response.status_code == 302
staff_user.user_permissions.add(permission_edit_category)
staff_user = User.objects.get(pk=staff_user.pk)
assert staff_user.has_perm('product.edit_category')
response = staff_client.get(reverse('dashboard:category-add'))
assert response.status_code == 200
def test_staff_can_view_category_add_subcategory(
staff_client, staff_user, permission_edit_category, default_category):
assert not staff_user.has_perm('product.edit_category')
response = staff_client.get(
reverse('dashboard:category-add', args=[default_category.pk]))
assert response.status_code == 302
staff_user.user_permissions.add(permission_edit_category)
staff_user = User.objects.get(pk=staff_user.pk)
assert staff_user.has_perm('product.edit_category')
response = staff_client.get(
reverse('dashboard:category-add', args=[default_category.pk]))
assert response.status_code == 200
def test_staff_can_view_category_edit(
staff_client, staff_user, permission_edit_category, default_category):
assert not staff_user.has_perm('product.edit_category')
response = staff_client.get(
reverse('dashboard:category-edit', args=[default_category.pk]))
assert response.status_code == 302
staff_user.user_permissions.add(permission_edit_category)
staff_user = User.objects.get(pk=staff_user.pk)
assert staff_user.has_perm('product.edit_category')
response = staff_client.get(
reverse('dashboard:category-edit', args=[default_category.pk]))
assert response.status_code == 200
def test_staff_can_view_category_delete(
staff_client, staff_user, permission_edit_category, default_category):
assert not staff_user.has_perm('product.edit_category')
response = staff_client.get(
reverse('dashboard:category-delete', args=[default_category.pk]))
assert response.status_code == 302
staff_user.user_permissions.add(permission_edit_category)
staff_user = User.objects.get(pk=staff_user.pk)
assert staff_user.has_perm('product.edit_category')
response = staff_client.get(
reverse('dashboard:category-delete', args=[default_category.pk]))
assert response.status_code == 200
def test_staff_can_view_sale_list(
staff_client, staff_user, permission_view_sale):
assert not staff_user.has_perm('discount.view_sale')
response = staff_client.get(reverse('dashboard:sale-list'))
assert response.status_code == 302
staff_user.user_permissions.add(permission_view_sale)
staff_user = User.objects.get(pk=staff_user.pk)
assert staff_user.has_perm('discount.view_sale')
response = staff_client.get(reverse('dashboard:sale-list'))
assert response.status_code == 200
def test_staff_can_view_sale_update(
staff_client, staff_user, permission_edit_sale, sale):
assert not staff_user.has_perm('discount.edit_sale')
response = staff_client.get(
reverse('dashboard:sale-update', args=[sale.pk]))
assert response.status_code == 302
staff_user.user_permissions.add(permission_edit_sale)
staff_user = User.objects.get(pk=staff_user.pk)
assert staff_user.has_perm('discount.edit_sale')
response = staff_client.get(
reverse('dashboard:sale-update', args=[sale.pk]))
assert response.status_code == 200
def test_staff_can_view_sale_add(
staff_client, staff_user, permission_edit_sale, sale):
assert not staff_user.has_perm('discount.edit_sale')
response = staff_client.get(reverse('dashboard:sale-add'))
assert response.status_code == 302
staff_user.user_permissions.add(permission_edit_sale)
staff_user = User.objects.get(pk=staff_user.pk)
assert staff_user.has_perm('discount.edit_sale')
response = staff_client.get(reverse('dashboard:sale-add'))
assert response.status_code == 200
def test_staff_can_view_sale_delete(
staff_client, staff_user, permission_edit_sale, sale):
assert not staff_user.has_perm('discount.edit_sale')
response = staff_client.get(
reverse('dashboard:sale-delete', args=[sale.pk]))
assert response.status_code == 302
staff_user.user_permissions.add(permission_edit_sale)
staff_user = User.objects.get(pk=staff_user.pk)
assert staff_user.has_perm('discount.edit_sale')
response = staff_client.get(
reverse('dashboard:sale-delete', args=[sale.pk]))
assert response.status_code == 200
def test_staff_can_view_voucher_list(
staff_client, staff_user, permission_view_voucher):
assert not staff_user.has_perm('discount.view_voucher')
response = staff_client.get(reverse('dashboard:voucher-list'))
assert response.status_code == 302
staff_user.user_permissions.add(permission_view_voucher)
staff_user = User.objects.get(pk=staff_user.pk)
assert staff_user.has_perm('discount.view_voucher')
response = staff_client.get(reverse('dashboard:voucher-list'))
assert response.status_code == 200
def test_staff_can_view_voucher_update(
staff_client, staff_user, permission_edit_voucher, voucher):
assert not staff_user.has_perm('discount.edit_voucher')
response = staff_client.get(
reverse('dashboard:voucher-update', args=[voucher.pk]))
assert response.status_code == 302
staff_user.user_permissions.add(permission_edit_voucher)
staff_user = User.objects.get(pk=staff_user.pk)
assert staff_user.has_perm('discount.edit_voucher')
response = staff_client.get(
reverse('dashboard:voucher-update', args=[voucher.pk]))
assert response.status_code == 200
def test_staff_can_view_voucher_add(
staff_client, staff_user, permission_edit_voucher):
assert not staff_user.has_perm('discount.edit_voucher')
response = staff_client.get(reverse('dashboard:voucher-add'))
assert response.status_code == 302
staff_user.user_permissions.add(permission_edit_voucher)
staff_user = User.objects.get(pk=staff_user.pk)
assert staff_user.has_perm('discount.edit_voucher')
response = staff_client.get(reverse('dashboard:voucher-add'))
assert response.status_code == 200
def test_staff_can_view_voucher_delete(
staff_client, staff_user, permission_edit_voucher, voucher):
assert not staff_user.has_perm('discount.edit_voucher')
response = staff_client.get(
reverse('dashboard:voucher-delete', args=[voucher.pk]))
assert response.status_code == 302
staff_user.user_permissions.add(permission_edit_voucher)
staff_user = User.objects.get(pk=staff_user.pk)
assert staff_user.has_perm('discount.edit_voucher')
response = staff_client.get(
reverse('dashboard:voucher-delete', args=[voucher.pk]))
assert response.status_code == 200
def test_staff_can_view_order_list(
staff_client, staff_user, permission_view_order):
assert not staff_user.has_perm('order.view_order')
response = staff_client.get(reverse('dashboard:orders'))
assert response.status_code == 302
staff_user.user_permissions.add(permission_view_order)
staff_user = User.objects.get(pk=staff_user.pk)
assert staff_user.has_perm('order.view_order')
response = staff_client.get(reverse('dashboard:orders'))
assert response.status_code == 200
def test_staff_can_view_order_details(
staff_client, staff_user, permission_view_order, order_with_lines):
assert not staff_user.has_perm('order.view_order')
response = staff_client.get(
reverse('dashboard:order-details', args=[order_with_lines.pk]))
assert response.status_code == 302
staff_user.user_permissions.add(permission_view_order)
staff_user = User.objects.get(pk=staff_user.pk)
assert staff_user.has_perm('order.view_order')
response = staff_client.get(
reverse('dashboard:order-details', args=[order_with_lines.pk]))
assert response.status_code == 200
def test_staff_can_view_order_add_note(
staff_client, staff_user, permission_edit_order, order):
assert not staff_user.has_perm('order.edit_order')
response = staff_client.get(
reverse('dashboard:order-add-note', args=[order.pk]))
assert response.status_code == 302
staff_user.user_permissions.add(permission_edit_order)
staff_user = User.objects.get(pk=staff_user.pk)
assert staff_user.has_perm('order.edit_order')
response = staff_client.get(
reverse('dashboard:order-add-note', args=[order.pk]))
assert response.status_code == 200
def test_staff_can_view_cancel_order(
staff_client, staff_user, permission_edit_order, order):
assert not staff_user.has_perm('order.edit_order')
response = staff_client.get(
reverse('dashboard:order-cancel', args=[order.pk]))
assert response.status_code == 302
staff_user.user_permissions.add(permission_edit_order)
staff_user = User.objects.get(pk=staff_user.pk)
assert staff_user.has_perm('order.edit_order')
response = staff_client.get(
reverse('dashboard:order-cancel', args=[order.pk]))
assert response.status_code == 200
def test_staff_can_view_billing_address_edit(
staff_client, staff_user, permission_edit_order, order):
assert not staff_user.has_perm('order.edit_order')
response = staff_client.get(
reverse('dashboard:address-edit', args=[order.pk, 'billing']))
assert response.status_code == 302
staff_user.user_permissions.add(permission_edit_order)
staff_user = User.objects.get(pk=staff_user.pk)
assert staff_user.has_perm('order.edit_order')
response = staff_client.get(
reverse('dashboard:address-edit', args=[order.pk, 'billing']))
assert response.status_code == 200
def test_staff_can_view_customers_list(
staff_client, staff_user, permission_view_user):
assert not staff_user.has_perm('account.view_user')
response = staff_client.get(reverse('dashboard:customers'))
assert response.status_code == 302
staff_user.user_permissions.add(permission_view_user)
staff_user = User.objects.get(pk=staff_user.pk)
assert staff_user.has_perm('account.view_user')
response = staff_client.get(reverse('dashboard:customers'))
assert response.status_code == 200
def test_staff_can_view_customer_details(
staff_client, staff_user, permission_view_user, customer_user,
order_with_lines):
assert not staff_user.has_perm('account.view_user')
response = staff_client.get(
reverse('dashboard:customer-details', args=[customer_user.pk]))
assert response.status_code == 302
staff_user.user_permissions.add(permission_view_user)
staff_user = User.objects.get(pk=staff_user.pk)
assert staff_user.has_perm('account.view_user')
response = staff_client.get(
reverse('dashboard:customer-details', args=[customer_user.pk]))
assert response.status_code == 200
response = staff_client.get(
reverse('dashboard:order-details', args=[order_with_lines.pk]))
assert response.status_code == 302
def test_staff_can_view_staff_members_list(
staff_client, staff_user, permission_view_staff):
assert not staff_user.has_perm('account.view_staff')
response = staff_client.get(reverse('dashboard:staff-list'))
assert response.status_code == 302
staff_user.user_permissions.add(permission_view_staff)
staff_user = User.objects.get(pk=staff_user.pk)
assert staff_user.has_perm('account.view_staff')
response = staff_client.get(reverse('dashboard:staff-list'))
assert response.status_code == 200
def test_staff_can_view_detail_create_and_delete_staff_members(
staff_client, staff_user, permission_edit_staff):
assert not staff_user.has_perm('account.edit_staff')
response = staff_client.get(reverse('dashboard:staff-create'))
assert response.status_code == 302
response = staff_client.get(
reverse('dashboard:staff-delete', args=[staff_user.pk]))
assert response.status_code == 302
response = staff_client.get(
reverse('dashboard:staff-details', args=[staff_user.pk]))
assert response.status_code == 302
staff_user.user_permissions.add(permission_edit_staff)
staff_user = User.objects.get(pk=staff_user.pk)
assert staff_user.has_perm('account.edit_staff')
response = staff_client.get(reverse('dashboard:staff-create'))
assert response.status_code == 200
response = staff_client.get(
reverse('dashboard:staff-delete', args=[staff_user.pk]))
assert response.status_code == 200
response = staff_client.get(
reverse('dashboard:staff-details', args=[staff_user.pk]))
assert response.status_code == 200
def test_staff_with_permissions_can_view_product_types_list(
staff_client, staff_user, permission_view_properties):
assert not staff_user.has_perm('product.view_properties')
response = staff_client.get(reverse('dashboard:product-type-list'))
assert response.status_code == 302
staff_user.user_permissions.add(permission_view_properties)
staff_user = User.objects.get(pk=staff_user.pk)
assert staff_user.has_perm('product.view_properties')
response = staff_client.get(reverse('dashboard:product-type-list'))
assert response.status_code == 200
def test_staff_with_permissions_can_edit_add_and_delete_product_types_list(
staff_client, staff_user, permission_edit_properties, product_type):
assert not staff_user.has_perm('product.edit_properties')
response = staff_client.get(
reverse('dashboard:product-type-update', args=[product_type.pk]))
assert response.status_code == 302
response = staff_client.get(
reverse('dashboard:product-type-delete', args=[product_type.pk]))
assert response.status_code == 302
response = staff_client.get(reverse('dashboard:product-type-add'))
assert response.status_code == 302
staff_user.user_permissions.add(permission_edit_properties)
staff_user = User.objects.get(pk=staff_user.pk)
assert staff_user.has_perm('product.edit_properties')
response = staff_client.get(
reverse('dashboard:product-type-update', args=[product_type.pk]))
assert response.status_code == 200
response = staff_client.get(
reverse('dashboard:product-type-delete', args=[product_type.pk]))
assert response.status_code == 200
response = staff_client.get(reverse('dashboard:product-type-add'))
assert response.status_code == 200
def test_staff_can_access_variant_details(
staff_client, staff_user, permission_view_product, product):
product_type = product.product_type
product_type.has_variants = True
product_type.save()
variant = product.variants.get()
assert not staff_user.has_perm('product.view_product')
url = reverse(
'dashboard:variant-details',
kwargs={
'product_pk': product.pk,
'variant_pk': variant.pk})
response = staff_client.get(url)
assert response.status_code == 302
staff_user.user_permissions.add(permission_view_product)
staff_user = User.objects.get(pk=staff_user.pk)
assert staff_user.has_perm('product.view_product')
response = staff_client.get(url)
assert response.status_code == 200
def test_staff_can_access_variant_create(
staff_client, staff_user, permission_edit_product, product):
assert not staff_user.has_perm('product.edit_product')
url = reverse('dashboard:variant-add', kwargs={'product_pk': product.pk})
response = staff_client.get(url)
assert response.status_code == 302
staff_user.user_permissions.add(permission_edit_product)
staff_user = User.objects.get(pk=staff_user.pk)
assert staff_user.has_perm('product.edit_product')
response = staff_client.get(url)
assert response.status_code == 200
def test_staff_can_access_variant_edit(
staff_client, staff_user, permission_edit_product, product):
variant = product.variants.get()
assert not staff_user.has_perm('product.edit_product')
url = reverse(
'dashboard:variant-update',
kwargs={
'product_pk': product.pk,
'variant_pk': variant.pk})
response = staff_client.get(url)
assert response.status_code == 302
staff_user.user_permissions.add(permission_edit_product)
staff_user = User.objects.get(pk=staff_user.pk)
assert staff_user.has_perm('product.edit_product')
response = staff_client.get(url)
assert response.status_code == 200
def test_staff_can_access_variant_delete(
staff_client, staff_user, permission_edit_product, product):
variant = product.variants.get()
assert not staff_user.has_perm('product.edit_product')
url = reverse(
'dashboard:variant-delete',
kwargs={
'product_pk': product.pk,
'variant_pk': variant.pk})
response = staff_client.get(url)
assert response.status_code == 302
staff_user.user_permissions.add(permission_edit_product)
staff_user = User.objects.get(pk=staff_user.pk)
assert staff_user.has_perm('product.edit_product')
response = staff_client.get(url)
assert response.status_code == 200
def test_staff_can_access_variant_images(
staff_client, staff_user, permission_view_product, product):
variant = product.variants.get()
assert not staff_user.has_perm('product.view_product')
url = reverse(
'dashboard:variant-images',
kwargs={
'product_pk': product.pk,
'variant_pk': variant.pk})
response = staff_client.get(url)
assert response.status_code == 302
staff_user.user_permissions.add(permission_view_product)
staff_user = User.objects.get(pk=staff_user.pk)
assert staff_user.has_perm('product.view_product')
response = staff_client.get(url)
assert response.status_code == 200
def test_staff_can_access_product_image_list(
staff_client, staff_user, permission_view_product, product):
assert not staff_user.has_perm('product.view_product')
url = reverse(
'dashboard:product-image-list', kwargs={'product_pk': product.pk})
response = staff_client.get(url)
assert response.status_code == 302
staff_user.user_permissions.add(permission_view_product)
staff_user = User.objects.get(pk=staff_user.pk)
assert staff_user.has_perm('product.view_product')
response = staff_client.get(url)
assert response.status_code == 200
def test_staff_can_access_product_image_add(
staff_client, staff_user, permission_edit_product, product):
assert not staff_user.has_perm('product.edit_product')
url = reverse(
'dashboard:product-image-add', kwargs={'product_pk': product.pk})
response = staff_client.get(url)
assert response.status_code == 302
staff_user.user_permissions.add(permission_edit_product)
staff_user = User.objects.get(pk=staff_user.pk)
assert staff_user.has_perm('product.edit_product')
response = staff_client.get(url)
assert response.status_code == 200
def test_staff_can_access_product_image_update(
staff_client, staff_user, permission_edit_product, product_with_image):
product_image = product_with_image.images.get()
assert not staff_user.has_perm('product.edit_product')
url = reverse(
'dashboard:product-image-update',
kwargs={
'product_pk': product_with_image.pk,
'img_pk': product_image.pk})
response = staff_client.get(url)
assert response.status_code == 302
staff_user.user_permissions.add(permission_edit_product)
staff_user = User.objects.get(pk=staff_user.pk)
assert staff_user.has_perm('product.edit_product')
response = staff_client.get(url)
assert response.status_code == 200
def test_staff_can_access_product_image_delete(
staff_client, staff_user, permission_edit_product, product_with_image):
product_image = product_with_image.images.get()
assert not staff_user.has_perm('product.edit_product')
url = reverse(
'dashboard:product-image-delete',
kwargs={
'product_pk': product_with_image.pk,
'img_pk': product_image.pk})
response = staff_client.get(url)
assert response.status_code == 302
staff_user.user_permissions.add(permission_edit_product)
staff_user = User.objects.get(pk=staff_user.pk)
assert staff_user.has_perm('product.edit_product')
response = staff_client.get(url)
assert response.status_code == 200
def test_staff_with_permissions_can_view_products_attributes_list(
staff_client, staff_user, permission_view_properties, color_attribute):
assert not staff_user.has_perm('product.view_properties')
response = staff_client.get(reverse('dashboard:product-attributes'))
assert response.status_code == 302
response = staff_client.get(
reverse(
'dashboard:product-attribute-details', args=[color_attribute.pk]))
assert response.status_code == 302
staff_user.user_permissions.add(permission_view_properties)
staff_user = User.objects.get(pk=staff_user.pk)
assert staff_user.has_perm('product.view_properties')
response = staff_client.get(reverse('dashboard:product-attributes'))
assert response.status_code == 200
response = staff_client.get(
reverse(
'dashboard:product-attribute-details', args=[color_attribute.pk]))
assert response.status_code == 200
def test_staff_with_permissions_can_update_add_and_delete_products_attributes(
staff_client, staff_user, permission_edit_properties, color_attribute):
assert not staff_user.has_perm('product.edit_properties')
response = staff_client.get(
reverse(
'dashboard:product-attribute-update', args=[color_attribute.pk]))
assert response.status_code == 302
response = staff_client.get(
reverse(
'dashboard:product-attribute-delete', args=[color_attribute.pk]))
assert response.status_code == 302
response = staff_client.get(reverse('dashboard:product-attribute-add'))
assert response.status_code == 302
staff_user.user_permissions.add(permission_edit_properties)
staff_user = User.objects.get(pk=staff_user.pk)
assert staff_user.has_perm('product.edit_properties')
response = staff_client.get(
reverse(
'dashboard:product-attribute-update', args=[color_attribute.pk]))
assert response.status_code == 200
response = staff_client.get(
reverse(
'dashboard:product-attribute-delete', args=[color_attribute.pk]))
assert response.status_code == 200
response = staff_client.get(reverse('dashboard:product-attribute-add'))
assert response.status_code == 200
def test_staff_can_access_attribute_create(
staff_client, staff_user, permission_edit_properties, color_attribute):
assert not staff_user.has_perm('product.edit_properties')
url = reverse(
'dashboard:product-attribute-value-add',
kwargs={'attribute_pk': color_attribute.pk})
response = staff_client.get(url)
assert response.status_code == 302
staff_user.user_permissions.add(permission_edit_properties)
staff_user = User.objects.get(pk=staff_user.pk)
assert staff_user.has_perm('product.edit_properties')
response = staff_client.get(url)
assert response.status_code == 200
def test_staff_can_access_attribute_edit(
staff_client, staff_user, permission_edit_properties, color_attribute):
value = color_attribute.values.first()
assert not staff_user.has_perm('product.edit_properties')
url = reverse(
'dashboard:product-attribute-value-update',
kwargs={
'attribute_pk': color_attribute.pk,
'value_pk': value.pk})
response = staff_client.get(url)
assert response.status_code == 302
staff_user.user_permissions.add(permission_edit_properties)
staff_user = User.objects.get(pk=staff_user.pk)
assert staff_user.has_perm('product.edit_properties')
response = staff_client.get(url)
assert response.status_code == 200
def test_staff_can_access_attribute_delete(
staff_client, staff_user, permission_edit_properties, color_attribute):
value = color_attribute.values.first()
assert not staff_user.has_perm('product.edit_properties')
url = reverse(
'dashboard:product-attribute-value-delete',
kwargs={
'attribute_pk': color_attribute.pk,
'value_pk': value.pk})
response = staff_client.get(url)
assert response.status_code == 302
staff_user.user_permissions.add(permission_edit_properties)
staff_user = User.objects.get(pk=staff_user.pk)
assert staff_user.has_perm('product.edit_properties')
response = staff_client.get(url)
assert response.status_code == 200
def test_staff_with_permissions_can_view_shipping_methods_and_details(
staff_client, staff_user, permission_view_shipping, shipping_method):
assert not staff_user.has_perm('shipping.view_shipping')
response = staff_client.get(reverse('dashboard:shipping-methods'))
assert response.status_code == 302
response = staff_client.get(
reverse(
'dashboard:shipping-method-details', args=[shipping_method.pk]))
assert response.status_code == 302
staff_user.user_permissions.add(permission_view_shipping)
staff_user = User.objects.get(pk=staff_user.pk)
assert staff_user.has_perm('shipping.view_shipping')
response = staff_client.get(reverse('dashboard:shipping-methods'))
assert response.status_code == 200
response = staff_client.get(
reverse(
'dashboard:shipping-method-details', args=[shipping_method.pk]))
assert response.status_code == 200
def test_staff_with_permissions_can_update_add_and_delete_shipping_method(
staff_client, staff_user, permission_edit_shipping, shipping_method):
assert not staff_user.has_perm('shipping.edit_shipping')
response = staff_client.get(
reverse('dashboard:shipping-method-update', args=[shipping_method.pk]))
assert response.status_code == 302
response = staff_client.get(
reverse('dashboard:shipping-method-delete', args=[shipping_method.pk]))
assert response.status_code == 302
response = staff_client.get(reverse('dashboard:shipping-method-add'))
assert response.status_code == 302
staff_user.user_permissions.add(permission_edit_shipping)
staff_user = User.objects.get(pk=staff_user.pk)
assert staff_user.has_perm('shipping.edit_shipping')
response = staff_client.get(
reverse('dashboard:shipping-method-update', args=[shipping_method.pk]))
assert response.status_code == 200
response = staff_client.get(
reverse('dashboard:shipping-method-delete', args=[shipping_method.pk]))
assert response.status_code == 200
response = staff_client.get(reverse('dashboard:shipping-method-add'))
assert response.status_code == 200
def test_staff_with_permissions_can_edit_customer(
staff_client, customer_user, staff_user, permission_edit_user,
permission_view_user):
assert customer_user.email == 'test@example.com'
response = staff_client.get(
reverse('dashboard:customer-update', args=[customer_user.pk]))
assert response.status_code == 302
staff_user.user_permissions.add(permission_edit_user)
staff_user.user_permissions.add(permission_view_user)
staff_user = User.objects.get(pk=staff_user.pk)
assert staff_user.has_perm('account.edit_user')
assert staff_user.has_perm('account.view_user')
response = staff_client.get(
reverse('dashboard:customer-update', args=[customer_user.pk]))
assert response.status_code == 200
url = reverse('dashboard:customer-update', args=[customer_user.pk])
data = {'email': 'newemail@example.com', 'is_active': True}
staff_client.post(url, data)
customer_user = User.objects.get(pk=customer_user.pk)
assert customer_user.email == 'newemail@example.com'
assert customer_user.is_active
def test_staff_with_permissions_can_add_customer(
staff_client, staff_user, permission_edit_user, permission_view_user):
staff_user.user_permissions.add(permission_edit_user)
staff_user.user_permissions.add(permission_view_user)
staff_user = User.objects.get(pk=staff_user.pk)
assert staff_user.has_perm('account.edit_user')
assert staff_user.has_perm('account.view_user')
response = staff_client.get(reverse('dashboard:customer-create'))
assert response.status_code == 200
url = reverse('dashboard:customer-create')
data = {'email': 'newcustomer@example.com', 'is_active': True}
staff_client.post(url, data)
customer = User.objects.get(email='newcustomer@example.com')
assert customer.is_active
def test_staff_can_view_and_edit_site_settings(
staff_client, staff_user, site_settings, permission_edit_settings):
assert not staff_user.has_perm('site.edit_settings')
response = staff_client.get(
reverse('dashboard:site-update', args=[site_settings.pk]))
assert response.status_code == 302
staff_user.user_permissions.add(permission_edit_settings)
staff_user = User.objects.get(pk=staff_user.pk)
assert staff_user.has_perm('site.edit_settings')
response = staff_client.get(
reverse('dashboard:site-update', args=[site_settings.pk]))
assert response.status_code == 200
def test_staff_can_view_and_edit_taxes_settings(
staff_client, staff_user, site_settings, permission_edit_settings):
assert not staff_user.has_perm('site.edit_settings')
url = reverse('dashboard:configure-taxes')
response = staff_client.get(url)
assert response.status_code == 302
staff_user.user_permissions.add(permission_edit_settings)
staff_user = User.objects.get(pk=staff_user.pk)
assert staff_user.has_perm('site.edit_settings')
response = staff_client.get(url)
def test_staff_can_view_menus_and_details(
staff_client, staff_user, permission_view_menu, menu_item):
menu_list_url = reverse('dashboard:menu-list')
menu_details_url = reverse(
'dashboard:menu-details', args=[menu_item.menu.pk])
menu_item_details_url = reverse(
'dashboard:menu-item-details', args=[menu_item.menu.pk, menu_item.pk])
assert not staff_user.has_perm('menu.view_menu')
response = staff_client.get(menu_list_url)
assert response.status_code == 302
response = staff_client.get(menu_details_url)
assert response.status_code == 302
response = staff_client.get(menu_item_details_url)
assert response.status_code == 302
staff_user.user_permissions.add(permission_view_menu)
staff_user = User.objects.get(pk=staff_user.pk)
assert staff_user.has_perm('menu.view_menu')
response = staff_client.get(menu_list_url)
assert response.status_code == 200
response = staff_client.get(menu_details_url)
assert response.status_code == 200
response = staff_client.get(menu_item_details_url)
assert response.status_code == 200
def test_staff_can_edit_menus(
staff_client, staff_user, permission_edit_menu, menu_item):
menu_add_url = reverse('dashboard:menu-add')
menu_edit_url = reverse('dashboard:menu-edit', args=[menu_item.menu.pk])
menu_delete_url = reverse(
'dashboard:menu-delete', args=[menu_item.menu.pk])
assert not staff_user.has_perm('menu.view_menu')
response = staff_client.get(menu_add_url)
assert response.status_code == 302
response = staff_client.get(menu_edit_url)
assert response.status_code == 302
response = staff_client.get(menu_delete_url)
assert response.status_code == 302
staff_user.user_permissions.add(permission_edit_menu)
staff_user = User.objects.get(pk=staff_user.pk)
assert staff_user.has_perm('menu.edit_menu')
response = staff_client.get(menu_add_url)
assert response.status_code == 200
response = staff_client.get(menu_edit_url)
assert response.status_code == 200
response = staff_client.get(menu_delete_url)
assert response.status_code == 200
def test_staff_can_edit_menu_items(
staff_client, staff_user, permission_edit_menu, menu_item):
menu_item_add_url = reverse(
'dashboard:menu-item-add', args=[menu_item.menu.pk, menu_item.pk])
menu_item_edit_url = reverse(
'dashboard:menu-item-edit', args=[menu_item.menu.pk, menu_item.pk])
menu_item_delete_url = reverse(
'dashboard:menu-item-delete', args=[menu_item.menu.pk, menu_item.pk])
assert not staff_user.has_perm('menu.view_menu')
response = staff_client.get(menu_item_add_url)
assert response.status_code == 302
response = staff_client.get(menu_item_edit_url)
assert response.status_code == 302
response = staff_client.get(menu_item_delete_url)
assert response.status_code == 302
staff_user.user_permissions.add(permission_edit_menu)
staff_user = User.objects.get(pk=staff_user.pk)
assert staff_user.has_perm('menu.edit_menu')
response = staff_client.get(menu_item_add_url)
assert response.status_code == 200
response = staff_client.get(menu_item_edit_url)
assert response.status_code == 200
response = staff_client.get(menu_item_delete_url)
assert response.status_code == 200
def test_staff_can_remove_user(staff_client, staff_user, permission_edit_user):
url = reverse('dashboard:customer-delete', args=[staff_user.pk])
response = staff_client.get(url)
assert response.status_code == 302
staff_user.user_permissions.add(permission_edit_user)
staff_user = User.objects.get(pk=staff_user.pk)
response = staff_client.get(url)
assert response.status_code == 200
| 39.513036
| 79
| 0.747856
| 6,276
| 46,981
| 5.26434
| 0.01689
| 0.093435
| 0.109567
| 0.131481
| 0.96834
| 0.957838
| 0.952057
| 0.930325
| 0.914646
| 0.898272
| 0
| 0.013616
| 0.151146
| 46,981
| 1,188
| 80
| 39.546296
| 0.814845
| 0
| 0
| 0.78172
| 0
| 0
| 0.138588
| 0.096571
| 0
| 0
| 0
| 0
| 0.31828
| 1
| 0.102151
| false
| 0
| 0.002151
| 0
| 0.104301
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4b8e25a8520883f72ecfb7ac7c9df69f992fb9fe
| 12,655
|
py
|
Python
|
ppdpy/tests/test_expression_compiler.py
|
vkinoshita/ppdpy
|
f22e2ee4f7f01e98e3ebf3e3a426333f22fedd39
|
[
"MIT"
] | null | null | null |
ppdpy/tests/test_expression_compiler.py
|
vkinoshita/ppdpy
|
f22e2ee4f7f01e98e3ebf3e3a426333f22fedd39
|
[
"MIT"
] | null | null | null |
ppdpy/tests/test_expression_compiler.py
|
vkinoshita/ppdpy
|
f22e2ee4f7f01e98e3ebf3e3a426333f22fedd39
|
[
"MIT"
] | null | null | null |
from unittest import TestCase
from ppdpy.expression_compiler import lex, compile
from ppdpy.exceptions import ExpressionSyntaxError
from ppdpy.nodes import *
class TestLex(TestCase):
def test_ids(self):
self.assertEqual(list(lex('foo')), ['foo'])
self.assertEqual(list(lex('foo bar')), ['foo', 'bar'])
self.assertEqual(list(lex('foo bar')), ['foo', 'bar'])
self.assertEqual(list(lex(' foo bar')), ['foo', 'bar'])
self.assertEqual(list(lex('foo bar ')), ['foo', 'bar'])
self.assertEqual(list(lex(' foo bar ')), ['foo', 'bar'])
def test_parens(self):
self.assertEqual(list(lex('()')), ['(', ')'])
self.assertEqual(list(lex(' ()')), ['(', ')'])
self.assertEqual(list(lex('( )')), ['(', ')'])
self.assertEqual(list(lex('() ')), ['(', ')'])
self.assertEqual(list(lex(' ( ) ')), ['(', ')'])
self.assertEqual(list(lex(' ( ) ')), ['(', ')'])
self.assertEqual(list(lex(' ( ) ')), ['(', ')'])
self.assertEqual(list(lex('(())')), ['(', '(', ')', ')'])
self.assertEqual(list(lex('( ())')), ['(', '(', ')', ')'])
self.assertEqual(list(lex('(( ))')), ['(', '(', ')', ')'])
self.assertEqual(list(lex('(()) ')), ['(', '(', ')', ')'])
self.assertEqual(list(lex(')()(')), [')', '(', ')', '('])
def test_expressions(self):
self.assertEqual(list(lex('a and b')), ['a', 'and', 'b'])
self.assertEqual(list(lex('a and b or c')), ['a', 'and', 'b', 'or', 'c'])
self.assertEqual(list(lex('(a)')), ['(', 'a', ')'])
self.assertEqual(list(lex('(a and b) or c')), ['(', 'a', 'and', 'b', ')', 'or', 'c'])
self.assertEqual(list(lex('(a and b)or c')), ['(', 'a', 'and', 'b', ')', 'or', 'c'])
self.assertEqual(list(lex('not (a and b) or c')), ['not', '(', 'a', 'and', 'b', ')', 'or', 'c'])
self.assertEqual(list(lex('not(a and b)or c')), ['not', '(', 'a', 'and', 'b', ')', 'or', 'c'])
self.assertEqual(list(lex('a and (b or c)')), ['a', 'and', '(', 'b', 'or', 'c', ')'])
self.assertEqual(list(lex('a and(b or c)')), ['a', 'and', '(', 'b', 'or', 'c', ')'])
self.assertEqual(list(lex('a and not (b or c)')), ['a', 'and', 'not', '(', 'b', 'or', 'c', ')'])
self.assertEqual(list(lex('a and not(b or c)')), ['a', 'and', 'not', '(', 'b', 'or', 'c', ')'])
self.assertEqual(list(lex(' a and (b or c) ')), ['a', 'and', '(', 'b', 'or', 'c', ')'])
class TestParse(TestCase):
def test_simple(self):
self.assertEqual(compile('a'), Id('a'))
self.assertEqual(compile('A'), Id('A'))
self.assertEqual(compile('not a'), Not(Id('a')))
self.assertEqual(compile('not A'), Not(Id('A')))
self.assertEqual(compile('a and b'), And(Id('a'), Id('b')))
self.assertEqual(compile('a and b and c'), And(And(Id('a'), Id('b')), Id('c')))
self.assertEqual(compile('a and b and c and d'), And(And(And(Id('a'), Id('b')), Id('c')), Id('d')))
self.assertEqual(compile('a or b'), Or(Id('a'), Id('b')))
self.assertEqual(compile('a or b or c'), Or(Id('a'), Or(Id('b'), Id('c'))))
self.assertEqual(compile('a or b or c or d'), Or(Id('a'), Or(Id('b'), Or(Id('c'), Id('d')))))
def test_case_sensitivity(self):
self.assertEqual(compile('NOT a'), Not(Id('a')))
self.assertEqual(compile('a AND b'), And(Id('a'), Id('b')))
self.assertEqual(compile('a OR b'), Or(Id('a'), Id('b')))
def test_precedence(self):
self.assertEqual(compile('a or b and c'), Or(Id('a'), And(Id('b'), Id('c'))))
self.assertEqual(compile('a and b or c'), Or(And(Id('a'), Id('b')), Id('c')))
self.assertEqual(compile('a and b or c and d'), Or(And(Id('a'), Id('b')), And(Id('c'), Id('d'))))
self.assertEqual(compile('not a or b and c'), Or(Not(Id('a')), And(Id('b'), Id('c'))))
self.assertEqual(compile('a or not b and c'), Or(Id('a'), And(Not(Id('b')), Id('c'))))
self.assertEqual(compile('a or b and not c'), Or(Id('a'), And(Id('b'), Not(Id('c')))))
def test_nots(self):
self.assertEqual(compile('not a and b'), And(Not(Id('a')), Id('b')))
self.assertEqual(compile('a and not b'), And(Id('a'), Not(Id('b'))))
self.assertEqual(compile('not a and not b'), And(Not(Id('a')), Not(Id('b'))))
self.assertEqual(compile('not a or b'), Or(Not(Id('a')), Id('b')))
self.assertEqual(compile('a or not b'), Or(Id('a'), Not(Id('b'))))
self.assertEqual(compile('not a or not b'), Or(Not(Id('a')), Not(Id('b'))))
def test_parens(self):
self.assertEqual(compile('(a)'), Id('a'))
self.assertEqual(compile('not (a)'), Not(Id('a')))
self.assertEqual(compile('(a and b)'), And(Id('a'), Id('b')))
self.assertEqual(compile('((a) and (b))'), And(Id('a'), Id('b')))
self.assertEqual(compile('(((a)) and ((b)))'), And(Id('a'), Id('b')))
self.assertEqual(compile('a or (b and c)'), Or(Id('a'), And(Id('b'), Id('c'))))
self.assertEqual(compile('(a and b) or c'), Or(And(Id('a'), Id('b')), Id('c')))
self.assertEqual(compile('a and (b or c)'), And(Id('a'), Or(Id('b'), Id('c'))))
self.assertEqual(compile('(a or b) and c'), And(Or(Id('a'), Id('b')), Id('c')))
self.assertEqual(compile('(a and b) or (c and d)'), Or(And(Id('a'), Id('b')), And(Id('c'), Id('d'))))
self.assertEqual(compile('(a and b) or (c and d) or (e and f)'), Or(And(Id('a'), Id('b')), Or(And(Id('c'), Id('d')), And(Id('e'), Id('f')))))
self.assertEqual(compile('not (a and b)'), Not(And(Id('a'), Id('b'))))
self.assertEqual(compile('a and (not b or c)'), And(Id('a'), Or(Not(Id('b')), Id('c'))))
self.assertEqual(compile('a and not (b or c)'), And(Id('a'), Not(Or(Id('b'), Id('c')))))
self.assertEqual(compile('not (a and b) or c'), Or(Not(And(Id('a'), Id('b'))), Id('c')))
def test_precedence(self):
self.assertEqual(compile('a and b or c'), compile('(a and b) or c'))
self.assertEqual(compile('a or b and c'), compile('a or (b and c)'))
self.assertEqual(compile('a or not b'), compile('a or (not b)'))
self.assertEqual(compile('not a or b'), compile('(not a) or b'))
def test_errors(self):
with self.assertRaises(ExpressionSyntaxError):
compile('')
with self.assertRaises(ExpressionSyntaxError):
compile('and')
with self.assertRaises(ExpressionSyntaxError):
compile('or')
with self.assertRaises(ExpressionSyntaxError):
compile('not')
with self.assertRaises(ExpressionSyntaxError):
compile('()')
with self.assertRaises(ExpressionSyntaxError):
compile('a and')
with self.assertRaises(ExpressionSyntaxError):
compile('and a')
with self.assertRaises(ExpressionSyntaxError):
compile('a or')
with self.assertRaises(ExpressionSyntaxError):
compile('or a')
with self.assertRaises(ExpressionSyntaxError):
compile('a not')
with self.assertRaises(ExpressionSyntaxError):
compile('not and')
with self.assertRaises(ExpressionSyntaxError):
compile('not or')
with self.assertRaises(ExpressionSyntaxError):
compile('(a and b')
with self.assertRaises(ExpressionSyntaxError):
compile('a (and b')
with self.assertRaises(ExpressionSyntaxError):
compile('a and (b')
with self.assertRaises(ExpressionSyntaxError):
compile('a and b (')
with self.assertRaises(ExpressionSyntaxError):
compile(') a and b')
with self.assertRaises(ExpressionSyntaxError):
compile('a) and b')
with self.assertRaises(ExpressionSyntaxError):
compile('a and) b')
with self.assertRaises(ExpressionSyntaxError):
compile('a and b)')
with self.assertRaises(ExpressionSyntaxError):
compile('(a or b')
with self.assertRaises(ExpressionSyntaxError):
compile('a (or b')
with self.assertRaises(ExpressionSyntaxError):
compile('a or (b')
with self.assertRaises(ExpressionSyntaxError):
compile('a or b (')
with self.assertRaises(ExpressionSyntaxError):
compile(') a or b')
with self.assertRaises(ExpressionSyntaxError):
compile('a) or b')
with self.assertRaises(ExpressionSyntaxError):
compile('a or) b')
with self.assertRaises(ExpressionSyntaxError):
compile('a or b)')
with self.assertRaises(ExpressionSyntaxError):
compile('(a and b or c')
with self.assertRaises(ExpressionSyntaxError):
compile('((a and b) or c')
with self.assertRaises(ExpressionSyntaxError):
compile('(a and b) or c)')
class TestEval(TestCase):
def test_eval_simple(self):
self.assertEqual(Id('a').eval({'a'}), True)
self.assertEqual(Id('a').eval(set()), False)
self.assertEqual(Not(Id('a')).eval({'a'}), False)
self.assertEqual(Not(Id('a')).eval(set()), True)
self.assertEqual(Id('A').eval({'a'}), False)
def test_eval_and(self):
expression = And(Id('a'), Id('b'))
self.assertEqual(expression.eval({'a', 'b'}), True)
self.assertEqual(expression.eval({'a'}), False)
self.assertEqual(expression.eval({'b'}), False)
self.assertEqual(expression.eval(set()), False)
expression = And(Not(Id('a')), Id('b'))
self.assertEqual(expression.eval({'a', 'b'}), False)
self.assertEqual(expression.eval({'a'}), False)
self.assertEqual(expression.eval({'b'}), True)
self.assertEqual(expression.eval(set()), False)
expression = Not(And(Id('a'), Id('b')))
self.assertEqual(expression.eval({'a', 'b'}), False)
self.assertEqual(expression.eval({'a'}), True)
self.assertEqual(expression.eval({'b'}), True)
self.assertEqual(expression.eval(set()), True)
def test_eval_or(self):
expression = Or(Id('a'), Id('b'))
self.assertEqual(expression.eval({'a', 'b'}), True)
self.assertEqual(expression.eval({'a'}), True)
self.assertEqual(expression.eval({'b'}), True)
self.assertEqual(expression.eval(set()), False)
expression = Or(Not(Id('a')), Id('b'))
self.assertEqual(expression.eval({'a', 'b'}), True)
self.assertEqual(expression.eval({'a'}), False)
self.assertEqual(expression.eval({'b'}), True)
self.assertEqual(expression.eval(set()), True)
expression = Not(Or(Id('a'), Id('b')))
self.assertEqual(expression.eval({'a', 'b'}), False)
self.assertEqual(expression.eval({'a'}), False)
self.assertEqual(expression.eval({'b'}), False)
self.assertEqual(expression.eval(set()), True)
def test_eval_and_or(self):
expression = And(Id('a'), Or(Id('b'), Id('c')))
self.assertEqual(expression.eval({'a', 'b', 'c'}), True)
self.assertEqual(expression.eval({'a', 'b'}), True)
self.assertEqual(expression.eval({'a', 'c'}), True)
self.assertEqual(expression.eval({'b', 'c'}), False)
self.assertEqual(expression.eval({'a'}), False)
self.assertEqual(expression.eval({'b'}), False)
self.assertEqual(expression.eval({'c'}), False)
self.assertEqual(expression.eval(set()), False)
expression = Or(Id('a'), And(Id('b'), Id('c')))
self.assertEqual(expression.eval({'a', 'b', 'c'}), True)
self.assertEqual(expression.eval({'a', 'b'}), True)
self.assertEqual(expression.eval({'a', 'c'}), True)
self.assertEqual(expression.eval({'b', 'c'}), True)
self.assertEqual(expression.eval({'a'}), True)
self.assertEqual(expression.eval({'b'}), False)
self.assertEqual(expression.eval({'c'}), False)
self.assertEqual(expression.eval(set()), False)
expression = Not(And(Id('a'), Or(Id('b'), Id('c'))))
self.assertEqual(expression.eval({'a', 'b', 'c'}), False)
self.assertEqual(expression.eval({'a', 'b'}), False)
self.assertEqual(expression.eval({'a', 'c'}), False)
self.assertEqual(expression.eval({'b', 'c'}), True)
self.assertEqual(expression.eval({'a'}), True)
self.assertEqual(expression.eval({'b'}), True)
self.assertEqual(expression.eval({'c'}), True)
self.assertEqual(expression.eval(set()), True)
| 43.191126
| 149
| 0.547847
| 1,623
| 12,655
| 4.25878
| 0.029575
| 0.275608
| 0.173611
| 0.201389
| 0.940683
| 0.922888
| 0.847656
| 0.796007
| 0.764468
| 0.745949
| 0
| 0
| 0.218412
| 12,655
| 292
| 150
| 43.339041
| 0.698817
| 0
| 0
| 0.474886
| 0
| 0
| 0.115062
| 0
| 0
| 0
| 0
| 0
| 0.721461
| 1
| 0.063927
| false
| 0
| 0.018265
| 0
| 0.09589
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
4ba74557b9a63922e883b93f6a15aff30db635a2
| 191
|
py
|
Python
|
yalul/parsers/parse_response.py
|
OtavioHenrique/yalul
|
ce99e32365ed5607527b9f2f39705ad5d9e20ba2
|
[
"MIT"
] | 1
|
2021-04-01T20:22:36.000Z
|
2021-04-01T20:22:36.000Z
|
yalul/parsers/parse_response.py
|
OtavioHenrique/yalul
|
ce99e32365ed5607527b9f2f39705ad5d9e20ba2
|
[
"MIT"
] | 1
|
2020-11-20T22:24:38.000Z
|
2020-11-20T22:24:38.000Z
|
yalul/parsers/parse_response.py
|
OtavioHenrique/yalul
|
ce99e32365ed5607527b9f2f39705ad5d9e20ba2
|
[
"MIT"
] | null | null | null |
class ParseResponse:
def __init__(self, ast, parse_errors):
self.ast = ast
self.parse_errors = parse_errors
def errors(self):
return self.parse_errors.errors
| 23.875
| 42
| 0.664921
| 24
| 191
| 4.958333
| 0.375
| 0.369748
| 0.252101
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.256545
| 191
| 7
| 43
| 27.285714
| 0.838028
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0.166667
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
4bc5593b36453cfe5073d7fb45794c734cdea8ac
| 158,931
|
py
|
Python
|
smartrecruiters_python_client/apis/candidates_api.py
|
roksela/smartrecruiters-python-client
|
6d0849d173a3d6718b5f0769098f4c76857f637d
|
[
"MIT"
] | 5
|
2018-03-27T08:20:13.000Z
|
2022-03-30T06:23:38.000Z
|
smartrecruiters_python_client/apis/candidates_api.py
|
roksela/smartrecruiters-python-client
|
6d0849d173a3d6718b5f0769098f4c76857f637d
|
[
"MIT"
] | null | null | null |
smartrecruiters_python_client/apis/candidates_api.py
|
roksela/smartrecruiters-python-client
|
6d0849d173a3d6718b5f0769098f4c76857f637d
|
[
"MIT"
] | 2
|
2018-12-05T04:48:37.000Z
|
2020-12-17T12:12:12.000Z
|
# coding: utf-8
"""
Unofficial python library for the SmartRecruiters API
The SmartRecruiters API provides a platform to integrate services or applications, build apps and create fully customizable career sites. It exposes SmartRecruiters functionality and allows to connect and build software enhancing it.
OpenAPI spec version: 1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class CandidatesApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def candidates_add(self, candidate, **kwargs):
"""
Create a new candidate and assign to a Talent Pool
Create a new candidate and assign to a Talent Pool. **Tracking candidate source** When adding a new candidate, it's very important you track its source appropriately. In order to associate a source with your app/integration, add below object to a candidate body object for this endpoint: ``` { \"sourceDetails\": { \"sourceTypeId\": \"string\", \"sourceSubTypeId\": \"string\", \"sourceId\": \"string\" } } ``` **sourceTypeId** - it's a Source Type - available values can be found using [get /configuration/sources](https://dev.smartrecruiters.com/customer-api/live-docs/#!/configuration/configuration_getSourceTypes) endpoint **sourceSubTypeId** - it's a Source Subtype, an optional parameter - available values can be found using [get /configuration/sources](https://dev.smartrecruiters.com/customer-api/live-docs/#!/configuration/configuration_getSourceTypes) endpoint **sourceId** - it's a Source Id - available values for a given sourceTypeId can be found using [get /configuration/sources/:sourceTypeId/values](https://dev.smartrecruiters.com/customer-api/live-docs/#!/configuration/configuration_getSources) endpoint NOTE: Not defining the source will default to **API** source. NOTE II: In case you can't find an appropriate source to track against you can: * Create a custom source for each customer account separately on [this admin page](https://www.smartrecruiters.com/settings/configuration/custom-sources\\) (you need to be logged in as an admin to the customer account in order to view this page) * Request to [partners@smartrecruiters.com](mailto:partners@smartrecruiters.com) adding a standard source that will be available for all customers if your app/integration is productised (available to all SmartRecruiters customers)
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.candidates_add(candidate, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param CandidateInput candidate: Candidate object that needs to be created. (required)
:return: CandidateDetails
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.candidates_add_with_http_info(candidate, **kwargs)
else:
(data) = self.candidates_add_with_http_info(candidate, **kwargs)
return data
def candidates_add_with_http_info(self, candidate, **kwargs):
"""
Create a new candidate and assign to a Talent Pool
Create a new candidate and assign to a Talent Pool. **Tracking candidate source** When adding a new candidate, it's very important you track its source appropriately. In order to associate a source with your app/integration, add below object to a candidate body object for this endpoint: ``` { \"sourceDetails\": { \"sourceTypeId\": \"string\", \"sourceSubTypeId\": \"string\", \"sourceId\": \"string\" } } ``` **sourceTypeId** - it's a Source Type - available values can be found using [get /configuration/sources](https://dev.smartrecruiters.com/customer-api/live-docs/#!/configuration/configuration_getSourceTypes) endpoint **sourceSubTypeId** - it's a Source Subtype, an optional parameter - available values can be found using [get /configuration/sources](https://dev.smartrecruiters.com/customer-api/live-docs/#!/configuration/configuration_getSourceTypes) endpoint **sourceId** - it's a Source Id - available values for a given sourceTypeId can be found using [get /configuration/sources/:sourceTypeId/values](https://dev.smartrecruiters.com/customer-api/live-docs/#!/configuration/configuration_getSources) endpoint NOTE: Not defining the source will default to **API** source. NOTE II: In case you can't find an appropriate source to track against you can: * Create a custom source for each customer account separately on [this admin page](https://www.smartrecruiters.com/settings/configuration/custom-sources\\) (you need to be logged in as an admin to the customer account in order to view this page) * Request to [partners@smartrecruiters.com](mailto:partners@smartrecruiters.com) adding a standard source that will be available for all customers if your app/integration is productised (available to all SmartRecruiters customers)
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.candidates_add_with_http_info(candidate, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param CandidateInput candidate: Candidate object that needs to be created. (required)
:return: CandidateDetails
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['candidate']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method candidates_add" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'candidate' is set
if ('candidate' not in params) or (params['candidate'] is None):
raise ValueError("Missing the required parameter `candidate` when calling `candidates_add`")
collection_formats = {}
resource_path = '/candidates'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'candidate' in params:
body_params = params['candidate']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json; charset=utf-8'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['key']
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CandidateDetails',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def candidates_add_to_job(self, candidate, job_id, **kwargs):
"""
Create a new candidate and assign to a job
Create a new candidate and assign to a job. **Tracking candidate source** When adding a new candidate, it's very important you track its source appropriately. In order to associate a source with your app / integration, add the below object to a candidate body object for this endpoint: ``` { \"sourceDetails\": { \"sourceTypeId\": \"string\", \"sourceSubTypeId\": \"string\", \"sourceId\": \"string\" } } ``` **sourceTypeId** - it's a Source Type - available values can be found using [get /configuration/sources](https://dev.smartrecruiters.com/customer-api/live-docs/#!/configuration/configuration_getSourceTypes) endpoint **sourceSubTypeId** - it's a Source Subtype, an optional parameter - available values can be found using [get /configuration/sources](https://dev.smartrecruiters.com/customer-api/live-docs/#!/configuration/configuration_getSourceTypes) endpoint **sourceId** - it's a Source Id - available values for a given sourceTypeId can be found using [get /configuration/sources/:sourceTypeId/values](https://dev.smartrecruiters.com/customer-api/live-docs/#!/configuration/configuration_getSources) endpoint NOTE: Not defining the source will default to **API** source. NOTE II: In case you can't find an appropriate source to track against you can: * Create a custom source for each customer account separately on [this admin page](https://www.smartrecruiters.com/settings/configuration/custom-sources\\) (you need to be logged in as an admin to the customer account in order to view this page) * Request to [partners@smartrecruiters.com](mailto:partners@smartrecruiters.com) adding a standard source that will be available for all customers if your app / integration is productised (available to all SmartRecruiters customers)
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.candidates_add_to_job(candidate, job_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param CandidateInput candidate: Candidate object that needs to be created. (required)
:param str job_id: job identifier (required)
:return: CandidateDetails
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.candidates_add_to_job_with_http_info(candidate, job_id, **kwargs)
else:
(data) = self.candidates_add_to_job_with_http_info(candidate, job_id, **kwargs)
return data
def candidates_add_to_job_with_http_info(self, candidate, job_id, **kwargs):
"""
Create a new candidate and assign to a job
Create a new candidate and assign to a job. **Tracking candidate source** When adding a new candidate, it's very important you track its source appropriately. In order to associate a source with your app / integration, add the below object to a candidate body object for this endpoint: ``` { \"sourceDetails\": { \"sourceTypeId\": \"string\", \"sourceSubTypeId\": \"string\", \"sourceId\": \"string\" } } ``` **sourceTypeId** - it's a Source Type - available values can be found using [get /configuration/sources](https://dev.smartrecruiters.com/customer-api/live-docs/#!/configuration/configuration_getSourceTypes) endpoint **sourceSubTypeId** - it's a Source Subtype, an optional parameter - available values can be found using [get /configuration/sources](https://dev.smartrecruiters.com/customer-api/live-docs/#!/configuration/configuration_getSourceTypes) endpoint **sourceId** - it's a Source Id - available values for a given sourceTypeId can be found using [get /configuration/sources/:sourceTypeId/values](https://dev.smartrecruiters.com/customer-api/live-docs/#!/configuration/configuration_getSources) endpoint NOTE: Not defining the source will default to **API** source. NOTE II: In case you can't find an appropriate source to track against you can: * Create a custom source for each customer account separately on [this admin page](https://www.smartrecruiters.com/settings/configuration/custom-sources\\) (you need to be logged in as an admin to the customer account in order to view this page) * Request to [partners@smartrecruiters.com](mailto:partners@smartrecruiters.com) adding a standard source that will be available for all customers if your app / integration is productised (available to all SmartRecruiters customers)
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.candidates_add_to_job_with_http_info(candidate, job_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param CandidateInput candidate: Candidate object that needs to be created. (required)
:param str job_id: job identifier (required)
:return: CandidateDetails
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['candidate', 'job_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method candidates_add_to_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'candidate' is set
if ('candidate' not in params) or (params['candidate'] is None):
raise ValueError("Missing the required parameter `candidate` when calling `candidates_add_to_job`")
# verify the required parameter 'job_id' is set
if ('job_id' not in params) or (params['job_id'] is None):
raise ValueError("Missing the required parameter `job_id` when calling `candidates_add_to_job`")
collection_formats = {}
resource_path = '/jobs/{jobId}/candidates'.replace('{format}', 'json')
path_params = {}
if 'job_id' in params:
path_params['jobId'] = params['job_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'candidate' in params:
body_params = params['candidate']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json; charset=utf-8'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['key']
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CandidateDetails',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def candidates_all(self, **kwargs):
"""
Search candidates
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.candidates_all(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str q: keyword search on all candidate fields; case insensitive; e.g. java developer
:param int limit: number of elements to return. max value is 100
:param int offset: number of elements to skip while processing result
:param list[str] job_id: job filter to display candidates who applied for a job [id]; can be used repeatedly;
:param list[str] location: location keyword search which looks up a string in a candidate’s location data; can be used repeatedly; case insensitive; e.g. Krakow
:param list[int] average_rating: average rating filter to display candidates with a specific average rating (integer); can be used repeatedly; e.g. 4
:param list[str] status: candidate’s status filter in a context of a job; can be used repeatedly
:param str sub_status: candidate’s sub-status filter in a context of a job. Works only in a correlation with a set value for the \"status\" field.
:param list[str] tag: tag assigned to a candidate; can be used repeatedly; case insensitive; e.g. fluent english
:param datetime updated_after: ISO8601-formatted time boundaries for the candidate update time, Format: yyyy-MM-ddTHH:mm:ss.SSSZZ
:param str onboarding_status: candidate's onboarding status
:param list[str] property_id: candidate's property id (1-N)
:param list[str] property_value_id: candidate's property value id (1-N)
:return: Candidates
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.candidates_all_with_http_info(**kwargs)
else:
(data) = self.candidates_all_with_http_info(**kwargs)
return data
def candidates_all_with_http_info(self, **kwargs):
"""
Search candidates
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.candidates_all_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str q: keyword search on all candidate fields; case insensitive; e.g. java developer
:param int limit: number of elements to return. max value is 100
:param int offset: number of elements to skip while processing result
:param list[str] job_id: job filter to display candidates who applied for a job [id]; can be used repeatedly;
:param list[str] location: location keyword search which looks up a string in a candidate’s location data; can be used repeatedly; case insensitive; e.g. Krakow
:param list[int] average_rating: average rating filter to display candidates with a specific average rating (integer); can be used repeatedly; e.g. 4
:param list[str] status: candidate’s status filter in a context of a job; can be used repeatedly
:param str sub_status: candidate’s sub-status filter in a context of a job. Works only in a correlation with a set value for the \"status\" field.
:param list[str] tag: tag assigned to a candidate; can be used repeatedly; case insensitive; e.g. fluent english
:param datetime updated_after: ISO8601-formatted time boundaries for the candidate update time, Format: yyyy-MM-ddTHH:mm:ss.SSSZZ
:param str onboarding_status: candidate's onboarding status
:param list[str] property_id: candidate's property id (1-N)
:param list[str] property_value_id: candidate's property value id (1-N)
:return: Candidates
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['q', 'limit', 'offset', 'job_id', 'location', 'average_rating', 'status', 'sub_status', 'tag', 'updated_after', 'onboarding_status', 'property_id', 'property_value_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method candidates_all" % key
)
params[key] = val
del params['kwargs']
if 'limit' in params and params['limit'] > 100:
raise ValueError("Invalid value for parameter `limit` when calling `candidates_all`, must be a value less than or equal to `100`")
if 'limit' in params and params['limit'] < 1:
raise ValueError("Invalid value for parameter `limit` when calling `candidates_all`, must be a value greater than or equal to `1`")
if 'offset' in params and params['offset'] < 0:
raise ValueError("Invalid value for parameter `offset` when calling `candidates_all`, must be a value greater than or equal to `0`")
collection_formats = {}
resource_path = '/candidates'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'q' in params:
query_params['q'] = params['q']
if 'limit' in params:
query_params['limit'] = params['limit']
if 'offset' in params:
query_params['offset'] = params['offset']
if 'job_id' in params:
query_params['jobId'] = params['job_id']
collection_formats['jobId'] = 'multi'
if 'location' in params:
query_params['location'] = params['location']
collection_formats['location'] = 'multi'
if 'average_rating' in params:
query_params['averageRating'] = params['average_rating']
collection_formats['averageRating'] = 'multi'
if 'status' in params:
query_params['status'] = params['status']
collection_formats['status'] = 'multi'
if 'sub_status' in params:
query_params['subStatus'] = params['sub_status']
if 'tag' in params:
query_params['tag'] = params['tag']
collection_formats['tag'] = 'multi'
if 'updated_after' in params:
query_params['updatedAfter'] = params['updated_after']
if 'onboarding_status' in params:
query_params['onboardingStatus'] = params['onboarding_status']
if 'property_id' in params:
query_params['propertyId'] = params['property_id']
collection_formats['propertyId'] = 'multi'
if 'property_value_id' in params:
query_params['propertyValueId'] = params['property_value_id']
collection_formats['propertyValueId'] = 'multi'
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json; charset=utf-8'])
# Authentication setting
auth_settings = ['key']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Candidates',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def candidates_attachments_add(self, id, attachment_type, file, **kwargs):
"""
Attach files to a candidate.
Attach files to a candidate.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.candidates_attachments_add(id, attachment_type, file, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Identifier of a candidate (required)
:param str attachment_type: Type of attachment you want to upload. (required)
:param file file: The file to upload. (required)
:return: Attachment
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.candidates_attachments_add_with_http_info(id, attachment_type, file, **kwargs)
else:
(data) = self.candidates_attachments_add_with_http_info(id, attachment_type, file, **kwargs)
return data
def candidates_attachments_add_with_http_info(self, id, attachment_type, file, **kwargs):
"""
Attach files to a candidate.
Attach files to a candidate.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.candidates_attachments_add_with_http_info(id, attachment_type, file, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Identifier of a candidate (required)
:param str attachment_type: Type of attachment you want to upload. (required)
:param file file: The file to upload. (required)
:return: Attachment
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'attachment_type', 'file']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method candidates_attachments_add" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `candidates_attachments_add`")
# verify the required parameter 'attachment_type' is set
if ('attachment_type' not in params) or (params['attachment_type'] is None):
raise ValueError("Missing the required parameter `attachment_type` when calling `candidates_attachments_add`")
# verify the required parameter 'file' is set
if ('file' not in params) or (params['file'] is None):
raise ValueError("Missing the required parameter `file` when calling `candidates_attachments_add`")
collection_formats = {}
resource_path = '/candidates/{id}/attachments'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
if 'attachment_type' in params:
form_params.append(('attachmentType', params['attachment_type']))
if 'file' in params:
local_var_files['file'] = params['file']
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json; charset=utf-8'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['multipart/form-data'])
# Authentication setting
auth_settings = ['key']
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Attachment',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def candidates_attachments_get(self, id, attachment_id, **kwargs):
"""
Get a candidate's attachment.
Get a candidate's attachment.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.candidates_attachments_get(id, attachment_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Identifier of a candidate (required)
:param str attachment_id: Identifier of an attachment (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.candidates_attachments_get_with_http_info(id, attachment_id, **kwargs)
else:
(data) = self.candidates_attachments_get_with_http_info(id, attachment_id, **kwargs)
return data
def candidates_attachments_get_with_http_info(self, id, attachment_id, **kwargs):
"""
Get a candidate's attachment.
Get a candidate's attachment.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.candidates_attachments_get_with_http_info(id, attachment_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Identifier of a candidate (required)
:param str attachment_id: Identifier of an attachment (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'attachment_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method candidates_attachments_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `candidates_attachments_get`")
# verify the required parameter 'attachment_id' is set
if ('attachment_id' not in params) or (params['attachment_id'] is None):
raise ValueError("Missing the required parameter `attachment_id` when calling `candidates_attachments_get`")
collection_formats = {}
resource_path = '/candidates/{id}/attachments/{attachmentId}'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
if 'attachment_id' in params:
path_params['attachmentId'] = params['attachment_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json; charset=utf-8'])
# Authentication setting
auth_settings = ['key']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def candidates_attachments_list(self, id, **kwargs):
"""
Get list candidate's attachments.
Get list of candidate's attachments.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.candidates_attachments_list(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Identifier of a candidate (required)
:return: Attachments
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.candidates_attachments_list_with_http_info(id, **kwargs)
else:
(data) = self.candidates_attachments_list_with_http_info(id, **kwargs)
return data
def candidates_attachments_list_with_http_info(self, id, **kwargs):
"""
Get list candidate's attachments.
Get list of candidate's attachments.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.candidates_attachments_list_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Identifier of a candidate (required)
:return: Attachments
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method candidates_attachments_list" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `candidates_attachments_list`")
collection_formats = {}
resource_path = '/candidates/{id}/attachments'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json; charset=utf-8'])
# Authentication setting
auth_settings = ['key']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Attachments',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def candidates_delete(self, id, **kwargs):
"""
Delete Candidate
Delete candidate
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.candidates_delete(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Identifier of a Candidate (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.candidates_delete_with_http_info(id, **kwargs)
else:
(data) = self.candidates_delete_with_http_info(id, **kwargs)
return data
def candidates_delete_with_http_info(self, id, **kwargs):
"""
Delete Candidate
Delete candidate
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.candidates_delete_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Identifier of a Candidate (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method candidates_delete" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `candidates_delete`")
collection_formats = {}
resource_path = '/candidates/{id}'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json; charset=utf-8'])
# Authentication setting
auth_settings = ['key']
return self.api_client.call_api(resource_path, 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def candidates_get(self, id, **kwargs):
"""
Get details of a candidate
Get details of a candidate
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.candidates_get(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Identifier of a Candidate (required)
:return: CandidateDetails
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.candidates_get_with_http_info(id, **kwargs)
else:
(data) = self.candidates_get_with_http_info(id, **kwargs)
return data
def candidates_get_with_http_info(self, id, **kwargs):
"""
Get details of a candidate
Get details of a candidate
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.candidates_get_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Identifier of a Candidate (required)
:return: CandidateDetails
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method candidates_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `candidates_get`")
collection_formats = {}
resource_path = '/candidates/{id}'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json; charset=utf-8'])
# Authentication setting
auth_settings = ['key']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CandidateDetails',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def candidates_onboarding_get(self, id, **kwargs):
"""
Get Onboarding Status for a candidate
Get Onboarding Status for a candidate.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.candidates_onboarding_get(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Identifier of a candidate (required)
:return: OnboardingStatus
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.candidates_onboarding_get_with_http_info(id, **kwargs)
else:
(data) = self.candidates_onboarding_get_with_http_info(id, **kwargs)
return data
def candidates_onboarding_get_with_http_info(self, id, **kwargs):
"""
Get Onboarding Status for a candidate
Get Onboarding Status for a candidate.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.candidates_onboarding_get_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Identifier of a candidate (required)
:return: OnboardingStatus
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method candidates_onboarding_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `candidates_onboarding_get`")
collection_formats = {}
resource_path = '/candidates/{id}/onboardingStatus'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json; charset=utf-8'])
# Authentication setting
auth_settings = ['key']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='OnboardingStatus',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def candidates_onboarding_get_for_job(self, id, job_id, **kwargs):
"""
Get Onboarding Status for a candidate associated with given job
Get Onboarding Status for a candidate associated with given job.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.candidates_onboarding_get_for_job(id, job_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Identifier of a candidate (required)
:param str job_id: Identifier of a job (required)
:return: OnboardingStatus
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.candidates_onboarding_get_for_job_with_http_info(id, job_id, **kwargs)
else:
(data) = self.candidates_onboarding_get_for_job_with_http_info(id, job_id, **kwargs)
return data
def candidates_onboarding_get_for_job_with_http_info(self, id, job_id, **kwargs):
"""
Get Onboarding Status for a candidate associated with given job
Get Onboarding Status for a candidate associated with given job.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.candidates_onboarding_get_for_job_with_http_info(id, job_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Identifier of a candidate (required)
:param str job_id: Identifier of a job (required)
:return: OnboardingStatus
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'job_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method candidates_onboarding_get_for_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `candidates_onboarding_get_for_job`")
# verify the required parameter 'job_id' is set
if ('job_id' not in params) or (params['job_id'] is None):
raise ValueError("Missing the required parameter `job_id` when calling `candidates_onboarding_get_for_job`")
collection_formats = {}
resource_path = '/candidates/{id}/jobs/{jobId}/onboardingStatus'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
if 'job_id' in params:
path_params['jobId'] = params['job_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json; charset=utf-8'])
# Authentication setting
auth_settings = ['key']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='OnboardingStatus',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def candidates_onboarding_update(self, id, onboarding_status, **kwargs):
"""
Set Onboarding Status for a candidate
Set Onboarding Status for a candidate.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.candidates_onboarding_update(id, onboarding_status, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Identifier of a candidate (required)
:param OnboardingStatus onboarding_status: Onboarding status. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.candidates_onboarding_update_with_http_info(id, onboarding_status, **kwargs)
else:
(data) = self.candidates_onboarding_update_with_http_info(id, onboarding_status, **kwargs)
return data
def candidates_onboarding_update_with_http_info(self, id, onboarding_status, **kwargs):
"""
Set Onboarding Status for a candidate
Set Onboarding Status for a candidate.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.candidates_onboarding_update_with_http_info(id, onboarding_status, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Identifier of a candidate (required)
:param OnboardingStatus onboarding_status: Onboarding status. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'onboarding_status']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method candidates_onboarding_update" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `candidates_onboarding_update`")
# verify the required parameter 'onboarding_status' is set
if ('onboarding_status' not in params) or (params['onboarding_status'] is None):
raise ValueError("Missing the required parameter `onboarding_status` when calling `candidates_onboarding_update`")
collection_formats = {}
resource_path = '/candidates/{id}/onboardingStatus'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'onboarding_status' in params:
body_params = params['onboarding_status']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json; charset=utf-8'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['key']
return self.api_client.call_api(resource_path, 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def candidates_onboarding_update_for_job(self, id, job_id, onboarding_status, **kwargs):
"""
Sets Onboarding Status for a candidate associated with given job
Sets Onboarding Status for a candidate associated with given job.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.candidates_onboarding_update_for_job(id, job_id, onboarding_status, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Identifier of a candidate (required)
:param str job_id: Identifier of a job (required)
:param OnboardingStatus onboarding_status: Onboarding status. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.candidates_onboarding_update_for_job_with_http_info(id, job_id, onboarding_status, **kwargs)
else:
(data) = self.candidates_onboarding_update_for_job_with_http_info(id, job_id, onboarding_status, **kwargs)
return data
def candidates_onboarding_update_for_job_with_http_info(self, id, job_id, onboarding_status, **kwargs):
"""
Sets Onboarding Status for a candidate associated with given job
Sets Onboarding Status for a candidate associated with given job.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.candidates_onboarding_update_for_job_with_http_info(id, job_id, onboarding_status, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Identifier of a candidate (required)
:param str job_id: Identifier of a job (required)
:param OnboardingStatus onboarding_status: Onboarding status. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'job_id', 'onboarding_status']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method candidates_onboarding_update_for_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `candidates_onboarding_update_for_job`")
# verify the required parameter 'job_id' is set
if ('job_id' not in params) or (params['job_id'] is None):
raise ValueError("Missing the required parameter `job_id` when calling `candidates_onboarding_update_for_job`")
# verify the required parameter 'onboarding_status' is set
if ('onboarding_status' not in params) or (params['onboarding_status'] is None):
raise ValueError("Missing the required parameter `onboarding_status` when calling `candidates_onboarding_update_for_job`")
collection_formats = {}
resource_path = '/candidates/{id}/jobs/{jobId}/onboardingStatus'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
if 'job_id' in params:
path_params['jobId'] = params['job_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'onboarding_status' in params:
body_params = params['onboarding_status']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json; charset=utf-8'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['key']
return self.api_client.call_api(resource_path, 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def candidates_properties_get(self, id, **kwargs):
"""
Get candidate property values for a candidate
Returns ``` {} ``` when there is no value set for a candidate property.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.candidates_properties_get(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: candidate identifier (required)
:param str context: context for candidate properties to display
:return: CandidateProperties
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.candidates_properties_get_with_http_info(id, **kwargs)
else:
(data) = self.candidates_properties_get_with_http_info(id, **kwargs)
return data
def candidates_properties_get_with_http_info(self, id, **kwargs):
"""
Get candidate property values for a candidate
Returns ``` {} ``` when there is no value set for a candidate property.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.candidates_properties_get_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: candidate identifier (required)
:param str context: context for candidate properties to display
:return: CandidateProperties
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'context']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method candidates_properties_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `candidates_properties_get`")
collection_formats = {}
resource_path = '/candidates/{id}/properties'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = {}
if 'context' in params:
query_params['context'] = params['context']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json; charset=utf-8'])
# Authentication setting
auth_settings = ['key']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CandidateProperties',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def candidates_properties_get_for_job(self, id, job_id, **kwargs):
"""
Get candidate property values for a candidate's job
Returns ``` {} ``` when there is no value set for a candidate property.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.candidates_properties_get_for_job(id, job_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Identifier of a Candidate (required)
:param str job_id: Identifier of a Job (required)
:param str context: context for candidate properties to display
:return: CandidateProperties
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.candidates_properties_get_for_job_with_http_info(id, job_id, **kwargs)
else:
(data) = self.candidates_properties_get_for_job_with_http_info(id, job_id, **kwargs)
return data
def candidates_properties_get_for_job_with_http_info(self, id, job_id, **kwargs):
"""
Get candidate property values for a candidate's job
Returns ``` {} ``` when there is no value set for a candidate property.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.candidates_properties_get_for_job_with_http_info(id, job_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Identifier of a Candidate (required)
:param str job_id: Identifier of a Job (required)
:param str context: context for candidate properties to display
:return: CandidateProperties
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'job_id', 'context']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method candidates_properties_get_for_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `candidates_properties_get_for_job`")
# verify the required parameter 'job_id' is set
if ('job_id' not in params) or (params['job_id'] is None):
raise ValueError("Missing the required parameter `job_id` when calling `candidates_properties_get_for_job`")
collection_formats = {}
resource_path = '/candidates/{id}/jobs/{jobId}/properties'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
if 'job_id' in params:
path_params['jobId'] = params['job_id']
query_params = {}
if 'context' in params:
query_params['context'] = params['context']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json; charset=utf-8'])
# Authentication setting
auth_settings = ['key']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CandidateProperties',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def candidates_properties_values_update(self, id, property_id, **kwargs):
"""
Add/update candidate property value
Set a candidate property value for the candidate. Below you can find a list of value examples, dependent on different candidate property types. * **BOOLEAN** ``` { \"value\": true } ``` Value has to be `true` or `false`. * **COUNTRY** ``` { \"value\": \"us\" } ``` Value has to be lowercase string in ISO 3166-1 alpha-2 format. * **CURRENCY** ``` { \"value\": { \"code\": \"GBP\", \"value\": 23232 } } ``` Code of value is a currency code in ISO 4217 format. * **DATE** ``` { \"value\": \"2015-11-17T23:00:00.000Z\" } ``` * **NUMBER, PERCENT** ``` { \"value\": 42 } ``` * **REGION** ``` { \"value\": \"us-wa\" } ``` Value has to be lowercase string in ISO 3166-2 compatible format. * **SINGLE_SELECT** ``` { \"value\": \"f6fe768f-b5e6-4794-9938-c2f42ab0a572\" } ``` Value has to be an id of candidate property value (provided by GET /configuration/candidate-properties/{propertyId}/values). * **TEXT** ``` { \"value\": \"Example text\" } ``` * **USER** ``` { \"value\": \"50fe861de4b00265edec6775\" } ``` Value has to be a valid user id To reset a value for any of the above types, please pass ``` {} ```
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.candidates_properties_values_update(id, property_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Identifier of a Candidate (required)
:param str property_id: Identifier of a Candidate Property (required)
:param CandidatePropertyInputValue candidate_property_input_value: Input value of the candidate property.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.candidates_properties_values_update_with_http_info(id, property_id, **kwargs)
else:
(data) = self.candidates_properties_values_update_with_http_info(id, property_id, **kwargs)
return data
def candidates_properties_values_update_with_http_info(self, id, property_id, **kwargs):
"""
Add/update candidate property value
Set a candidate property value for the candidate. Below you can find a list of value examples, dependent on different candidate property types. * **BOOLEAN** ``` { \"value\": true } ``` Value has to be `true` or `false`. * **COUNTRY** ``` { \"value\": \"us\" } ``` Value has to be lowercase string in ISO 3166-1 alpha-2 format. * **CURRENCY** ``` { \"value\": { \"code\": \"GBP\", \"value\": 23232 } } ``` Code of value is a currency code in ISO 4217 format. * **DATE** ``` { \"value\": \"2015-11-17T23:00:00.000Z\" } ``` * **NUMBER, PERCENT** ``` { \"value\": 42 } ``` * **REGION** ``` { \"value\": \"us-wa\" } ``` Value has to be lowercase string in ISO 3166-2 compatible format. * **SINGLE_SELECT** ``` { \"value\": \"f6fe768f-b5e6-4794-9938-c2f42ab0a572\" } ``` Value has to be an id of candidate property value (provided by GET /configuration/candidate-properties/{propertyId}/values). * **TEXT** ``` { \"value\": \"Example text\" } ``` * **USER** ``` { \"value\": \"50fe861de4b00265edec6775\" } ``` Value has to be a valid user id To reset a value for any of the above types, please pass ``` {} ```
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.candidates_properties_values_update_with_http_info(id, property_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Identifier of a Candidate (required)
:param str property_id: Identifier of a Candidate Property (required)
:param CandidatePropertyInputValue candidate_property_input_value: Input value of the candidate property.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'property_id', 'candidate_property_input_value']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method candidates_properties_values_update" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `candidates_properties_values_update`")
# verify the required parameter 'property_id' is set
if ('property_id' not in params) or (params['property_id'] is None):
raise ValueError("Missing the required parameter `property_id` when calling `candidates_properties_values_update`")
collection_formats = {}
resource_path = '/candidates/{id}/properties/{propertyId}'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
if 'property_id' in params:
path_params['propertyId'] = params['property_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'candidate_property_input_value' in params:
body_params = params['candidate_property_input_value']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json; charset=utf-8'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['key']
return self.api_client.call_api(resource_path, 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def candidates_properties_values_update_for_job(self, id, job_id, property_id, **kwargs):
"""
Add/update candidate property value
Set a candidate property value for the candidate. Below you can find a list of value examples, dependent on different candidate property types. * **BOOLEAN** ``` { \"value\": true } ``` Value has to be `true` or `false`. * **COUNTRY** ``` { \"value\": \"us\" } ``` Value has to be lowercase string in ISO 3166-1 alpha-2 format. * **CURRENCY** ``` { \"value\": { \"code\": \"GBP\", \"value\": 23232 } } ``` Code of value is a currency code in ISO 4217 format. * **DATE** ``` { \"value\": \"2015-11-17T23:00:00.000Z\" } ``` * **NUMBER, PERCENT** ``` { \"value\": 42 } ``` * **REGION** ``` { \"value\": \"us-wa\" } ``` Value has to be lowercase string in ISO 3166-2 compatible format. * **SINGLE_SELECT** ``` { \"value\": \"f6fe768f-b5e6-4794-9938-c2f42ab0a572\" } ``` Value has to be an id of candidate property value (provided by GET /configuration/candidate-properties/{propertyId}/values). * **TEXT** ``` { \"value\": \"Example text\" } ``` * **USER** ``` { \"value\": \"50fe861de4b00265edec6775\" } ``` Value has to be a valid user id To reset a value for any of the above types, please pass ``` {} ```
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.candidates_properties_values_update_for_job(id, job_id, property_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Identifier of a Candidate (required)
:param str job_id: Identifier of a Job (required)
:param str property_id: Identifier of a Candidate Property (required)
:param CandidatePropertyInputValue candidate_property_input_value: Input value of the candidate property.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.candidates_properties_values_update_for_job_with_http_info(id, job_id, property_id, **kwargs)
else:
(data) = self.candidates_properties_values_update_for_job_with_http_info(id, job_id, property_id, **kwargs)
return data
def candidates_properties_values_update_for_job_with_http_info(self, id, job_id, property_id, **kwargs):
"""
Add/update candidate property value
Set a candidate property value for the candidate. Below you can find a list of value examples, dependent on different candidate property types. * **BOOLEAN** ``` { \"value\": true } ``` Value has to be `true` or `false`. * **COUNTRY** ``` { \"value\": \"us\" } ``` Value has to be lowercase string in ISO 3166-1 alpha-2 format. * **CURRENCY** ``` { \"value\": { \"code\": \"GBP\", \"value\": 23232 } } ``` Code of value is a currency code in ISO 4217 format. * **DATE** ``` { \"value\": \"2015-11-17T23:00:00.000Z\" } ``` * **NUMBER, PERCENT** ``` { \"value\": 42 } ``` * **REGION** ``` { \"value\": \"us-wa\" } ``` Value has to be lowercase string in ISO 3166-2 compatible format. * **SINGLE_SELECT** ``` { \"value\": \"f6fe768f-b5e6-4794-9938-c2f42ab0a572\" } ``` Value has to be an id of candidate property value (provided by GET /configuration/candidate-properties/{propertyId}/values). * **TEXT** ``` { \"value\": \"Example text\" } ``` * **USER** ``` { \"value\": \"50fe861de4b00265edec6775\" } ``` Value has to be a valid user id To reset a value for any of the above types, please pass ``` {} ```
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.candidates_properties_values_update_for_job_with_http_info(id, job_id, property_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Identifier of a Candidate (required)
:param str job_id: Identifier of a Job (required)
:param str property_id: Identifier of a Candidate Property (required)
:param CandidatePropertyInputValue candidate_property_input_value: Input value of the candidate property.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'job_id', 'property_id', 'candidate_property_input_value']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method candidates_properties_values_update_for_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `candidates_properties_values_update_for_job`")
# verify the required parameter 'job_id' is set
if ('job_id' not in params) or (params['job_id'] is None):
raise ValueError("Missing the required parameter `job_id` when calling `candidates_properties_values_update_for_job`")
# verify the required parameter 'property_id' is set
if ('property_id' not in params) or (params['property_id'] is None):
raise ValueError("Missing the required parameter `property_id` when calling `candidates_properties_values_update_for_job`")
collection_formats = {}
resource_path = '/candidates/{id}/jobs/{jobId}/properties/{propertyId}'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
if 'job_id' in params:
path_params['jobId'] = params['job_id']
if 'property_id' in params:
path_params['propertyId'] = params['property_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'candidate_property_input_value' in params:
body_params = params['candidate_property_input_value']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json; charset=utf-8'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['key']
return self.api_client.call_api(resource_path, 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def candidates_resume_add(self, file, **kwargs):
"""
Parse a resume, create a candidate and assign to a Talent Pool.
Parse a resume, create a candidate and assign to a Talent Pool.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.candidates_resume_add(file, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param file file: The resume file to parse. (required)
:param str source_type_id: Candidate Source type id
:param str source_sub_type_id: Candidate Source subtype id
:param str source_id: Candidate Source id
:param bool internal: Mark as company employee
:return: CandidateDetails
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.candidates_resume_add_with_http_info(file, **kwargs)
else:
(data) = self.candidates_resume_add_with_http_info(file, **kwargs)
return data
def candidates_resume_add_with_http_info(self, file, **kwargs):
"""
Parse a resume, create a candidate and assign to a Talent Pool.
Parse a resume, create a candidate and assign to a Talent Pool.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.candidates_resume_add_with_http_info(file, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param file file: The resume file to parse. (required)
:param str source_type_id: Candidate Source type id
:param str source_sub_type_id: Candidate Source subtype id
:param str source_id: Candidate Source id
:param bool internal: Mark as company employee
:return: CandidateDetails
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['file', 'source_type_id', 'source_sub_type_id', 'source_id', 'internal']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method candidates_resume_add" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'file' is set
if ('file' not in params) or (params['file'] is None):
raise ValueError("Missing the required parameter `file` when calling `candidates_resume_add`")
collection_formats = {}
resource_path = '/candidates/cv'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
if 'source_type_id' in params:
form_params.append(('sourceTypeId', params['source_type_id']))
if 'source_sub_type_id' in params:
form_params.append(('sourceSubTypeId', params['source_sub_type_id']))
if 'source_id' in params:
form_params.append(('sourceId', params['source_id']))
if 'internal' in params:
form_params.append(('internal', params['internal']))
if 'file' in params:
local_var_files['file'] = params['file']
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json; charset=utf-8'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['multipart/form-data'])
# Authentication setting
auth_settings = ['key']
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CandidateDetails',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def candidates_resume_add_to_job(self, file, job_id, **kwargs):
"""
Parse a resume, create a candidate and assign to a job.
Parse a resume, create a candidate and assign to a job.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.candidates_resume_add_to_job(file, job_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param file file: The resume file to parse. (required)
:param str job_id: Identifier of a Job (required)
:param str source_type_id: Candidate Source type id
:param str source_sub_type_id: Candidate Source subtype id
:param str source_id: Candidate Source id
:param bool internal: Mark as company employee
:return: CandidateDetails
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.candidates_resume_add_to_job_with_http_info(file, job_id, **kwargs)
else:
(data) = self.candidates_resume_add_to_job_with_http_info(file, job_id, **kwargs)
return data
def candidates_resume_add_to_job_with_http_info(self, file, job_id, **kwargs):
"""
Parse a resume, create a candidate and assign to a job.
Parse a resume, create a candidate and assign to a job.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.candidates_resume_add_to_job_with_http_info(file, job_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param file file: The resume file to parse. (required)
:param str job_id: Identifier of a Job (required)
:param str source_type_id: Candidate Source type id
:param str source_sub_type_id: Candidate Source subtype id
:param str source_id: Candidate Source id
:param bool internal: Mark as company employee
:return: CandidateDetails
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['file', 'job_id', 'source_type_id', 'source_sub_type_id', 'source_id', 'internal']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method candidates_resume_add_to_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'file' is set
if ('file' not in params) or (params['file'] is None):
raise ValueError("Missing the required parameter `file` when calling `candidates_resume_add_to_job`")
# verify the required parameter 'job_id' is set
if ('job_id' not in params) or (params['job_id'] is None):
raise ValueError("Missing the required parameter `job_id` when calling `candidates_resume_add_to_job`")
collection_formats = {}
resource_path = '/jobs/{jobId}/candidates/cv'.replace('{format}', 'json')
path_params = {}
if 'job_id' in params:
path_params['jobId'] = params['job_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
if 'source_type_id' in params:
form_params.append(('sourceTypeId', params['source_type_id']))
if 'source_sub_type_id' in params:
form_params.append(('sourceSubTypeId', params['source_sub_type_id']))
if 'source_id' in params:
form_params.append(('sourceId', params['source_id']))
if 'internal' in params:
form_params.append(('internal', params['internal']))
if 'file' in params:
local_var_files['file'] = params['file']
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json; charset=utf-8'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['multipart/form-data'])
# Authentication setting
auth_settings = ['key']
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CandidateDetails',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def candidates_screening_answers_get(self, id, job_id, **kwargs):
"""
Get candidate screening answers for a candidate's job
Returns candidate screening question answers for a candidate's job. Returns an empty array when there is no screening answers for given candidate's job. UUID in question category indicates custom question. Other value indicates predefined library question. In order to create human readable format of answers please use label properties. Ignore labels for answers with single field. Based on labels from included example you can get following text: ```text Do you have a current driver's license? - No Free text question - Long text answer for free text questions Checkbox question - Confirmed Legacy acknowledgment question - replaced by checkbox - Confirmed Gender, Race and Ethnicity [(definitions)](https://smartrecruiters.com/oneclick/static/html/en/eeoGeneral.html) - Gender: Male - Race/Ethnicity: Prefer not to answer Currency question - 1234 Multiple choice dropdown - third value, second value, first value Languages 1) - Language: English - Proficiency level: Advanced 2) - Language: Spanish - Proficiency level: Beginner 3) - Language: French - Proficiency level: Intermediate What are your preferred work shifts? 1) - Day: Weekdays - From: 08:00 AM - To: 04:00 PM 2) - Day: Weekdays - From: 10:00 AM - To: 02:00 PM Your Name - John ```
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.candidates_screening_answers_get(id, job_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Identifier of a Candidate (required)
:param str job_id: Identifier of a Job (required)
:return: ScreeningAnswers
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.candidates_screening_answers_get_with_http_info(id, job_id, **kwargs)
else:
(data) = self.candidates_screening_answers_get_with_http_info(id, job_id, **kwargs)
return data
def candidates_screening_answers_get_with_http_info(self, id, job_id, **kwargs):
"""
Get candidate screening answers for a candidate's job
Returns candidate screening question answers for a candidate's job. Returns an empty array when there is no screening answers for given candidate's job. UUID in question category indicates custom question. Other value indicates predefined library question. In order to create human readable format of answers please use label properties. Ignore labels for answers with single field. Based on labels from included example you can get following text: ```text Do you have a current driver's license? - No Free text question - Long text answer for free text questions Checkbox question - Confirmed Legacy acknowledgment question - replaced by checkbox - Confirmed Gender, Race and Ethnicity [(definitions)](https://smartrecruiters.com/oneclick/static/html/en/eeoGeneral.html) - Gender: Male - Race/Ethnicity: Prefer not to answer Currency question - 1234 Multiple choice dropdown - third value, second value, first value Languages 1) - Language: English - Proficiency level: Advanced 2) - Language: Spanish - Proficiency level: Beginner 3) - Language: French - Proficiency level: Intermediate What are your preferred work shifts? 1) - Day: Weekdays - From: 08:00 AM - To: 04:00 PM 2) - Day: Weekdays - From: 10:00 AM - To: 02:00 PM Your Name - John ```
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.candidates_screening_answers_get_with_http_info(id, job_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Identifier of a Candidate (required)
:param str job_id: Identifier of a Job (required)
:return: ScreeningAnswers
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'job_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method candidates_screening_answers_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `candidates_screening_answers_get`")
# verify the required parameter 'job_id' is set
if ('job_id' not in params) or (params['job_id'] is None):
raise ValueError("Missing the required parameter `job_id` when calling `candidates_screening_answers_get`")
collection_formats = {}
resource_path = '/candidates/{id}/jobs/{jobId}/screening-answers'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
if 'job_id' in params:
path_params['jobId'] = params['job_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json; charset=utf-8'])
# Authentication setting
auth_settings = ['key']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ScreeningAnswers',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def candidates_source_update(self, id, job_id, candidate_source, **kwargs):
"""
Update a candidate's source
Update a candidate's source
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.candidates_source_update(id, job_id, candidate_source, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Identifier of a Candidate (required)
:param str job_id: Identifier of a Job (required)
:param CandidateSource candidate_source: Candidate source to be set (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.candidates_source_update_with_http_info(id, job_id, candidate_source, **kwargs)
else:
(data) = self.candidates_source_update_with_http_info(id, job_id, candidate_source, **kwargs)
return data
def candidates_source_update_with_http_info(self, id, job_id, candidate_source, **kwargs):
"""
Update a candidate's source
Update a candidate's source
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.candidates_source_update_with_http_info(id, job_id, candidate_source, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Identifier of a Candidate (required)
:param str job_id: Identifier of a Job (required)
:param CandidateSource candidate_source: Candidate source to be set (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'job_id', 'candidate_source']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method candidates_source_update" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `candidates_source_update`")
# verify the required parameter 'job_id' is set
if ('job_id' not in params) or (params['job_id'] is None):
raise ValueError("Missing the required parameter `job_id` when calling `candidates_source_update`")
# verify the required parameter 'candidate_source' is set
if ('candidate_source' not in params) or (params['candidate_source'] is None):
raise ValueError("Missing the required parameter `candidate_source` when calling `candidates_source_update`")
collection_formats = {}
resource_path = '/candidates/{id}/jobs/{jobId}/source'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
if 'job_id' in params:
path_params['jobId'] = params['job_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'candidate_source' in params:
body_params = params['candidate_source']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json; charset=utf-8'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['key']
return self.api_client.call_api(resource_path, 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def candidates_status_history_get(self, id, **kwargs):
"""
Get candidate's status history
Get candidate's status history
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.candidates_status_history_get(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Identifier of a Candidate (required)
:return: CandidateStatusHistoryList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.candidates_status_history_get_with_http_info(id, **kwargs)
else:
(data) = self.candidates_status_history_get_with_http_info(id, **kwargs)
return data
def candidates_status_history_get_with_http_info(self, id, **kwargs):
"""
Get candidate's status history
Get candidate's status history
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.candidates_status_history_get_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Identifier of a Candidate (required)
:return: CandidateStatusHistoryList
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method candidates_status_history_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `candidates_status_history_get`")
collection_formats = {}
resource_path = '/candidates/{id}/status/history'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json; charset=utf-8'])
# Authentication setting
auth_settings = ['key']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CandidateStatusHistoryList',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def candidates_status_update(self, id, job_id, **kwargs):
"""
Update a candidate's status
Update a candidate's status
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.candidates_status_update(id, job_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Identifier of a Candidate (required)
:param str job_id: Identifier of a Job (required)
:param CandidateStatus candidate_status: Candidate Status to be set
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.candidates_status_update_with_http_info(id, job_id, **kwargs)
else:
(data) = self.candidates_status_update_with_http_info(id, job_id, **kwargs)
return data
def candidates_status_update_with_http_info(self, id, job_id, **kwargs):
"""
Update a candidate's status
Update a candidate's status
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.candidates_status_update_with_http_info(id, job_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Identifier of a Candidate (required)
:param str job_id: Identifier of a Job (required)
:param CandidateStatus candidate_status: Candidate Status to be set
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'job_id', 'candidate_status']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method candidates_status_update" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `candidates_status_update`")
# verify the required parameter 'job_id' is set
if ('job_id' not in params) or (params['job_id'] is None):
raise ValueError("Missing the required parameter `job_id` when calling `candidates_status_update`")
collection_formats = {}
resource_path = '/candidates/{id}/jobs/{jobId}/status'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
if 'job_id' in params:
path_params['jobId'] = params['job_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'candidate_status' in params:
body_params = params['candidate_status']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json; charset=utf-8'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['key']
return self.api_client.call_api(resource_path, 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def candidates_status_update_primary(self, id, **kwargs):
"""
Update a candidate's status on primary assignment
Update a candidate's status on primary assignment
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.candidates_status_update_primary(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Identifier of a Candidate (required)
:param CandidateStatus candidate_status: Candidate Status to be set
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.candidates_status_update_primary_with_http_info(id, **kwargs)
else:
(data) = self.candidates_status_update_primary_with_http_info(id, **kwargs)
return data
def candidates_status_update_primary_with_http_info(self, id, **kwargs):
"""
Update a candidate's status on primary assignment
Update a candidate's status on primary assignment
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.candidates_status_update_primary_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Identifier of a Candidate (required)
:param CandidateStatus candidate_status: Candidate Status to be set
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'candidate_status']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method candidates_status_update_primary" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `candidates_status_update_primary`")
collection_formats = {}
resource_path = '/candidates/{id}/status'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'candidate_status' in params:
body_params = params['candidate_status']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json; charset=utf-8'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['key']
return self.api_client.call_api(resource_path, 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def candidates_tags_add(self, id, candidate_tags, **kwargs):
"""
Add tags to a candidate
Add new tags to a given candidate. It doesn't replace existing tags.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.candidates_tags_add(id, candidate_tags, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Identifier of a Candidate (required)
:param CandidateTags candidate_tags: Tags to be added. (required)
:return: CandidateTags
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.candidates_tags_add_with_http_info(id, candidate_tags, **kwargs)
else:
(data) = self.candidates_tags_add_with_http_info(id, candidate_tags, **kwargs)
return data
def candidates_tags_add_with_http_info(self, id, candidate_tags, **kwargs):
"""
Add tags to a candidate
Add new tags to a given candidate. It doesn't replace existing tags.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.candidates_tags_add_with_http_info(id, candidate_tags, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Identifier of a Candidate (required)
:param CandidateTags candidate_tags: Tags to be added. (required)
:return: CandidateTags
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'candidate_tags']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method candidates_tags_add" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `candidates_tags_add`")
# verify the required parameter 'candidate_tags' is set
if ('candidate_tags' not in params) or (params['candidate_tags'] is None):
raise ValueError("Missing the required parameter `candidate_tags` when calling `candidates_tags_add`")
collection_formats = {}
resource_path = '/candidates/{id}/tags'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'candidate_tags' in params:
body_params = params['candidate_tags']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json; charset=utf-8'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['key']
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CandidateTags',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def candidates_tags_delete(self, id, **kwargs):
"""
Delete tags for a candidate
Delete tags for a given candidate. All tags associated with a candidate are removed.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.candidates_tags_delete(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Identifier of a Candidate (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.candidates_tags_delete_with_http_info(id, **kwargs)
else:
(data) = self.candidates_tags_delete_with_http_info(id, **kwargs)
return data
def candidates_tags_delete_with_http_info(self, id, **kwargs):
"""
Delete tags for a candidate
Delete tags for a given candidate. All tags associated with a candidate are removed.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.candidates_tags_delete_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Identifier of a Candidate (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method candidates_tags_delete" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `candidates_tags_delete`")
collection_formats = {}
resource_path = '/candidates/{id}/tags'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json; charset=utf-8'])
# Authentication setting
auth_settings = ['key']
return self.api_client.call_api(resource_path, 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def candidates_tags_get(self, id, **kwargs):
"""
Get tags for a candidate
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.candidates_tags_get(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Identifier of a Candidate (required)
:return: CandidateTags
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.candidates_tags_get_with_http_info(id, **kwargs)
else:
(data) = self.candidates_tags_get_with_http_info(id, **kwargs)
return data
def candidates_tags_get_with_http_info(self, id, **kwargs):
"""
Get tags for a candidate
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.candidates_tags_get_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Identifier of a Candidate (required)
:return: CandidateTags
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method candidates_tags_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `candidates_tags_get`")
collection_formats = {}
resource_path = '/candidates/{id}/tags'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json; charset=utf-8'])
# Authentication setting
auth_settings = ['key']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CandidateTags',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def candidates_tags_replace(self, id, candidate_tags, **kwargs):
"""
Update tags for a candidate
Update tags for a given candidate. It replaces all existing tags with the new array provided.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.candidates_tags_replace(id, candidate_tags, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Identifier of a Candidate (required)
:param CandidateTags candidate_tags: Tags to be set. (required)
:return: CandidateTags
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.candidates_tags_replace_with_http_info(id, candidate_tags, **kwargs)
else:
(data) = self.candidates_tags_replace_with_http_info(id, candidate_tags, **kwargs)
return data
def candidates_tags_replace_with_http_info(self, id, candidate_tags, **kwargs):
"""
Update tags for a candidate
Update tags for a given candidate. It replaces all existing tags with the new array provided.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.candidates_tags_replace_with_http_info(id, candidate_tags, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Identifier of a Candidate (required)
:param CandidateTags candidate_tags: Tags to be set. (required)
:return: CandidateTags
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'candidate_tags']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method candidates_tags_replace" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `candidates_tags_replace`")
# verify the required parameter 'candidate_tags' is set
if ('candidate_tags' not in params) or (params['candidate_tags'] is None):
raise ValueError("Missing the required parameter `candidate_tags` when calling `candidates_tags_replace`")
collection_formats = {}
resource_path = '/candidates/{id}/tags'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'candidate_tags' in params:
body_params = params['candidate_tags']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json; charset=utf-8'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['key']
return self.api_client.call_api(resource_path, 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CandidateTags',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def candidates_update(self, id, **kwargs):
"""
Update candidate personal information
Update candidate details
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.candidates_update(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Identifier of a candidate (required)
:param PersonalDetails personal_details: Candidate personal information
:return: CandidateDetails
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.candidates_update_with_http_info(id, **kwargs)
else:
(data) = self.candidates_update_with_http_info(id, **kwargs)
return data
def candidates_update_with_http_info(self, id, **kwargs):
"""
Update candidate personal information
Update candidate details
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.candidates_update_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Identifier of a candidate (required)
:param PersonalDetails personal_details: Candidate personal information
:return: CandidateDetails
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'personal_details']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method candidates_update" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `candidates_update`")
collection_formats = {}
resource_path = '/candidates/{id}'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'personal_details' in params:
body_params = params['personal_details']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json; charset=utf-8'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['key']
return self.api_client.call_api(resource_path, 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CandidateDetails',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 49.158985
| 1,773
| 0.593295
| 17,038
| 158,931
| 5.332199
| 0.028466
| 0.049312
| 0.017259
| 0.02219
| 0.977953
| 0.971161
| 0.96656
| 0.957689
| 0.953704
| 0.946076
| 0
| 0.003819
| 0.321234
| 158,931
| 3,232
| 1,774
| 49.174196
| 0.838344
| 0.388892
| 0
| 0.806811
| 0
| 0.001858
| 0.188839
| 0.052519
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035294
| false
| 0
| 0.004334
| 0
| 0.09226
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
29e35d8e88efc3989c239a0f1635d3a3eb84a908
| 7,430
|
py
|
Python
|
nova/tests/unit/scheduler/filters/test_vcpu_model_filter.py
|
teresa-ho/stx-nova
|
1f82323439da2449edbbaed2fe1c8414a550c86f
|
[
"Apache-2.0"
] | null | null | null |
nova/tests/unit/scheduler/filters/test_vcpu_model_filter.py
|
teresa-ho/stx-nova
|
1f82323439da2449edbbaed2fe1c8414a550c86f
|
[
"Apache-2.0"
] | null | null | null |
nova/tests/unit/scheduler/filters/test_vcpu_model_filter.py
|
teresa-ho/stx-nova
|
1f82323439da2449edbbaed2fe1c8414a550c86f
|
[
"Apache-2.0"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Copyright (c) 2016-2017 Wind River Systems, Inc.
#
from nova import objects
from nova.scheduler.filters import vcpu_model_filter
from nova import test
from nova.tests.unit.scheduler import fakes
class TestVCPUModelFilter(test.NoDBTestCase):
def setUp(self):
super(TestVCPUModelFilter, self).setUp()
self.filt_cls = vcpu_model_filter.VCpuModelFilter()
def test_vcpu_model_not_specified(self):
spec_obj = objects.RequestSpec(
flavor=objects.Flavor(memory_mb=1024, extra_specs={}),
image=objects.ImageMeta(properties=objects.ImageMetaProps()),
scheduler_hints={'task_state': ['scheduling'], 'host': ['host1'],
'node': ['node1']})
host = fakes.FakeHostState('host1', 'node1', {})
self.assertTrue(self.filt_cls.host_passes(host, spec_obj))
def test_vcpu_model_flavor_passes(self):
spec_obj = objects.RequestSpec(
flavor=objects.Flavor(extra_specs={'hw:cpu_model': 'Nehalem'}),
image=objects.ImageMeta(properties=objects.ImageMetaProps()),
scheduler_hints={'task_state': ['scheduling'], 'host': ['host1'],
'node': ['node1']})
host = fakes.FakeHostState('host1', 'node1',
{'cpu_info': '{"model": "Broadwell"}'})
self.assertTrue(self.filt_cls.host_passes(host, spec_obj))
def test_vcpu_model_flavor_fails(self):
spec_obj = objects.RequestSpec(
flavor=objects.Flavor(extra_specs={'hw:cpu_model': 'Nehalem'}),
image=objects.ImageMeta(properties=objects.ImageMetaProps()),
scheduler_hints={'task_state': ['scheduling'], 'host': ['host1'],
'node': ['node1']})
host = fakes.FakeHostState('host1', 'node1',
{'cpu_info': '{"model": "Conroe"}'})
self.assertFalse(self.filt_cls.host_passes(host, spec_obj))
def test_vcpu_model_image_passes(self):
props = objects.ImageMetaProps(hw_cpu_model='Nehalem')
spec_obj = objects.RequestSpec(
flavor=objects.Flavor(memory_mb=1024, extra_specs={}),
image=objects.ImageMeta(properties=props),
scheduler_hints={'task_state': ['scheduling'], 'host': ['host1'],
'node': ['node1']})
host = fakes.FakeHostState('host1', 'node1',
{'cpu_info': '{"model": "Broadwell"}'})
self.assertTrue(self.filt_cls.host_passes(host, spec_obj))
def test_vcpu_model_image_fails(self):
props = objects.ImageMetaProps(hw_cpu_model='Nehalem')
spec_obj = objects.RequestSpec(
flavor=objects.Flavor(memory_mb=1024, extra_specs={}),
image=objects.ImageMeta(properties=props),
scheduler_hints={'task_state': ['scheduling'], 'host': ['host1'],
'node': ['node1']})
host = fakes.FakeHostState('host1', 'node1',
{'cpu_info': '{"model": "Conroe"}'})
self.assertFalse(self.filt_cls.host_passes(host, spec_obj))
def test_passthrough_vcpu_model_flavor_passes(self):
spec_obj = objects.RequestSpec(
flavor=objects.Flavor(extra_specs={'hw:cpu_model': 'Passthrough'}),
image=objects.ImageMeta(properties=objects.ImageMetaProps()),
scheduler_hints={'task_state': ['scheduling'], 'host': ['host1'],
'node': ['node1']})
host = fakes.FakeHostState('host1', 'node1',
{'cpu_info': '{"model": "Broadwell", "features": ["vmx"]}'})
self.assertTrue(self.filt_cls.host_passes(host, spec_obj))
def test_passthrough_migrate_vcpu_model_flavor_passes(self):
spec_obj = objects.RequestSpec(
flavor=objects.Flavor(extra_specs={'hw:cpu_model': 'Passthrough'}),
image=objects.ImageMeta(properties=objects.ImageMetaProps()),
scheduler_hints={'task_state': ['migrating'], 'host': ['host1'],
'node': ['node1']})
host = fakes.FakeHostState('host1', 'node1',
{'cpu_info': '{"model": "Broadwell", '
'"features": ["pge", "avx", "vmx"]}'})
self.stub_out('nova.objects.ComputeNode.get_by_host_and_nodename',
self._fake_compute_node_get_by_host_and_nodename)
self.assertTrue(self.filt_cls.host_passes(host, spec_obj))
def test_passthrough_migrate_vcpu_model_flavor_fails(self):
spec_obj = objects.RequestSpec(
flavor=objects.Flavor(extra_specs={'hw:cpu_model': 'Passthrough'}),
image=objects.ImageMeta(properties=objects.ImageMetaProps()),
scheduler_hints={'task_state': ['migrating'], 'host': ['host1'],
'node': ['node1']})
host = fakes.FakeHostState('host1', 'node1',
{'cpu_info': '{"model": "IvyBridge", '
'"features": ["pge", "avx", "vmx"]}'})
self.stub_out('nova.objects.ComputeNode.get_by_host_and_nodename',
self._fake_compute_node_get_by_host_and_nodename)
self.assertFalse(self.filt_cls.host_passes(host, spec_obj))
def test_passthrough_migrate_vcpu_model_flavor_features_fails(self):
spec_obj = objects.RequestSpec(
flavor=objects.Flavor(extra_specs={'hw:cpu_model': 'Passthrough'}),
image=objects.ImageMeta(properties=objects.ImageMetaProps()),
scheduler_hints={'task_state': ['migrating'], 'host': ['host1'],
'node': ['node1']})
host = fakes.FakeHostState('host1', 'node1',
{'cpu_info': '{"model": "Broadwell", '
'"features": ["pge", "avx", "vmx", "clflush"]}'})
self.stub_out('nova.objects.ComputeNode.get_by_host_and_nodename',
self._fake_compute_node_get_by_host_and_nodename)
self.assertFalse(self.filt_cls.host_passes(host, spec_obj))
def test_passthrough_migrate_vcpu_model_flavor_kvm_fails(self):
spec_obj = objects.RequestSpec(
flavor=objects.Flavor(extra_specs={'hw:cpu_model': 'Passthrough'}),
image=objects.ImageMeta(properties=objects.ImageMetaProps()),
scheduler_hints={'task_state': ['scheduling'], 'host': ['host1'],
'node': ['node1']})
host = fakes.FakeHostState('host1', 'node1',
{'cpu_info': '{"model": "Broadwell", '
'"features": ["pge", "avx"]}'})
self.assertFalse(self.filt_cls.host_passes(host, spec_obj))
def _fake_compute_node_get_by_host_and_nodename(self, cn, ctx, host, node):
cpu_info = '{"model": "Broadwell", "features": ["pge", "avx", "vmx"]}'
compute_node = objects.ComputeNode(cpu_info=cpu_info)
return compute_node
| 51.958042
| 79
| 0.616958
| 802
| 7,430
| 5.465087
| 0.179551
| 0.031942
| 0.027607
| 0.057039
| 0.807894
| 0.807894
| 0.807894
| 0.807894
| 0.798312
| 0.789414
| 0
| 0.011323
| 0.2393
| 7,430
| 142
| 80
| 52.323944
| 0.764154
| 0.080081
| 0
| 0.761062
| 0
| 0
| 0.187326
| 0.021564
| 0
| 0
| 0
| 0
| 0.088496
| 1
| 0.106195
| false
| 0.19469
| 0.035398
| 0
| 0.159292
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
d9f95fcadfdb0bbe4c6dfe3179093b0ecd208180
| 24,693
|
py
|
Python
|
back-end/object_detection/data_decoders/tf_example_decoder_test.py
|
scorelab/Elphas
|
be3e3906fa1f69155dc3f61f5c0bf21568e712c9
|
[
"Apache-2.0"
] | 59
|
2018-09-23T09:34:24.000Z
|
2020-03-10T04:31:27.000Z
|
swimmingpool_model/object_detection/data_decoders/tf_example_decoder_test.py
|
henriqueftogashi/object-detection-tensorflow-swimmingpool
|
70a2414a89a778a46b07ad8032df7aade3cb8edb
|
[
"Apache-2.0"
] | 5
|
2018-10-02T14:49:12.000Z
|
2020-07-14T02:54:30.000Z
|
back-end/object_detection/data_decoders/tf_example_decoder_test.py
|
scorelab/Elphas
|
be3e3906fa1f69155dc3f61f5c0bf21568e712c9
|
[
"Apache-2.0"
] | 58
|
2018-09-23T10:31:47.000Z
|
2021-11-08T11:34:40.000Z
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.data_decoders.tf_example_decoder."""
import os
import numpy as np
import tensorflow as tf
from object_detection.core import standard_fields as fields
from object_detection.data_decoders import tf_example_decoder
from object_detection.protos import input_reader_pb2
class TfExampleDecoderTest(tf.test.TestCase):
def _EncodeImage(self, image_tensor, encoding_type='jpeg'):
with self.test_session():
if encoding_type == 'jpeg':
image_encoded = tf.image.encode_jpeg(tf.constant(image_tensor)).eval()
elif encoding_type == 'png':
image_encoded = tf.image.encode_png(tf.constant(image_tensor)).eval()
else:
raise ValueError('Invalid encoding type.')
return image_encoded
def _DecodeImage(self, image_encoded, encoding_type='jpeg'):
with self.test_session():
if encoding_type == 'jpeg':
image_decoded = tf.image.decode_jpeg(tf.constant(image_encoded)).eval()
elif encoding_type == 'png':
image_decoded = tf.image.decode_png(tf.constant(image_encoded)).eval()
else:
raise ValueError('Invalid encoding type.')
return image_decoded
def _Int64Feature(self, value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def _FloatFeature(self, value):
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def _BytesFeature(self, value):
if isinstance(value, list):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=value))
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def testDecodeJpegImage(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg = self._EncodeImage(image_tensor)
decoded_jpeg = self._DecodeImage(encoded_jpeg)
example = tf.train.Example(features=tf.train.Features(feature={
'image/encoded': self._BytesFeature(encoded_jpeg),
'image/format': self._BytesFeature('jpeg'),
'image/source_id': self._BytesFeature('image_id'),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder()
tensor_dict = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual((tensor_dict[fields.InputDataFields.image].
get_shape().as_list()), [None, None, 3])
with self.test_session() as sess:
tensor_dict = sess.run(tensor_dict)
self.assertAllEqual(decoded_jpeg, tensor_dict[fields.InputDataFields.image])
self.assertEqual('image_id', tensor_dict[fields.InputDataFields.source_id])
def testDecodeImageKeyAndFilename(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg = self._EncodeImage(image_tensor)
example = tf.train.Example(features=tf.train.Features(feature={
'image/encoded': self._BytesFeature(encoded_jpeg),
'image/key/sha256': self._BytesFeature('abc'),
'image/filename': self._BytesFeature('filename')
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder()
tensor_dict = example_decoder.decode(tf.convert_to_tensor(example))
with self.test_session() as sess:
tensor_dict = sess.run(tensor_dict)
self.assertEqual('abc', tensor_dict[fields.InputDataFields.key])
self.assertEqual('filename', tensor_dict[fields.InputDataFields.filename])
def testDecodePngImage(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_png = self._EncodeImage(image_tensor, encoding_type='png')
decoded_png = self._DecodeImage(encoded_png, encoding_type='png')
example = tf.train.Example(features=tf.train.Features(feature={
'image/encoded': self._BytesFeature(encoded_png),
'image/format': self._BytesFeature('png'),
'image/source_id': self._BytesFeature('image_id')
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder()
tensor_dict = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual((tensor_dict[fields.InputDataFields.image].
get_shape().as_list()), [None, None, 3])
with self.test_session() as sess:
tensor_dict = sess.run(tensor_dict)
self.assertAllEqual(decoded_png, tensor_dict[fields.InputDataFields.image])
self.assertEqual('image_id', tensor_dict[fields.InputDataFields.source_id])
def testDecodePngInstanceMasks(self):
image_tensor = np.random.randint(256, size=(10, 10, 3)).astype(np.uint8)
encoded_jpeg = self._EncodeImage(image_tensor)
mask_1 = np.random.randint(0, 2, size=(10, 10, 1)).astype(np.uint8)
mask_2 = np.random.randint(0, 2, size=(10, 10, 1)).astype(np.uint8)
encoded_png_1 = self._EncodeImage(mask_1, encoding_type='png')
decoded_png_1 = np.squeeze(mask_1.astype(np.float32))
encoded_png_2 = self._EncodeImage(mask_2, encoding_type='png')
decoded_png_2 = np.squeeze(mask_2.astype(np.float32))
encoded_masks = [encoded_png_1, encoded_png_2]
decoded_masks = np.stack([decoded_png_1, decoded_png_2])
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded': self._BytesFeature(encoded_jpeg),
'image/format': self._BytesFeature('jpeg'),
'image/object/mask': self._BytesFeature(encoded_masks)
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder(
load_instance_masks=True, instance_mask_type=input_reader_pb2.PNG_MASKS)
tensor_dict = example_decoder.decode(tf.convert_to_tensor(example))
with self.test_session() as sess:
tensor_dict = sess.run(tensor_dict)
self.assertAllEqual(
decoded_masks,
tensor_dict[fields.InputDataFields.groundtruth_instance_masks])
def testDecodeEmptyPngInstanceMasks(self):
image_tensor = np.random.randint(256, size=(10, 10, 3)).astype(np.uint8)
encoded_jpeg = self._EncodeImage(image_tensor)
encoded_masks = []
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded': self._BytesFeature(encoded_jpeg),
'image/format': self._BytesFeature('jpeg'),
'image/object/mask': self._BytesFeature(encoded_masks),
'image/height': self._Int64Feature([10]),
'image/width': self._Int64Feature([10]),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder(
load_instance_masks=True, instance_mask_type=input_reader_pb2.PNG_MASKS)
tensor_dict = example_decoder.decode(tf.convert_to_tensor(example))
with self.test_session() as sess:
tensor_dict = sess.run(tensor_dict)
self.assertAllEqual(
tensor_dict[fields.InputDataFields.groundtruth_instance_masks].shape,
[0, 10, 10])
def testDecodeBoundingBox(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg = self._EncodeImage(image_tensor)
bbox_ymins = [0.0, 4.0]
bbox_xmins = [1.0, 5.0]
bbox_ymaxs = [2.0, 6.0]
bbox_xmaxs = [3.0, 7.0]
example = tf.train.Example(features=tf.train.Features(feature={
'image/encoded': self._BytesFeature(encoded_jpeg),
'image/format': self._BytesFeature('jpeg'),
'image/object/bbox/ymin': self._FloatFeature(bbox_ymins),
'image/object/bbox/xmin': self._FloatFeature(bbox_xmins),
'image/object/bbox/ymax': self._FloatFeature(bbox_ymaxs),
'image/object/bbox/xmax': self._FloatFeature(bbox_xmaxs),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder()
tensor_dict = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual((tensor_dict[fields.InputDataFields.groundtruth_boxes].
get_shape().as_list()), [None, 4])
with self.test_session() as sess:
tensor_dict = sess.run(tensor_dict)
expected_boxes = np.vstack([bbox_ymins, bbox_xmins,
bbox_ymaxs, bbox_xmaxs]).transpose()
self.assertAllEqual(expected_boxes,
tensor_dict[fields.InputDataFields.groundtruth_boxes])
self.assertAllEqual(
2, tensor_dict[fields.InputDataFields.num_groundtruth_boxes])
def testDecodeDefaultGroundtruthWeights(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg = self._EncodeImage(image_tensor)
bbox_ymins = [0.0, 4.0]
bbox_xmins = [1.0, 5.0]
bbox_ymaxs = [2.0, 6.0]
bbox_xmaxs = [3.0, 7.0]
example = tf.train.Example(features=tf.train.Features(feature={
'image/encoded': self._BytesFeature(encoded_jpeg),
'image/format': self._BytesFeature('jpeg'),
'image/object/bbox/ymin': self._FloatFeature(bbox_ymins),
'image/object/bbox/xmin': self._FloatFeature(bbox_xmins),
'image/object/bbox/ymax': self._FloatFeature(bbox_ymaxs),
'image/object/bbox/xmax': self._FloatFeature(bbox_xmaxs),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder()
tensor_dict = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual((tensor_dict[fields.InputDataFields.groundtruth_boxes].
get_shape().as_list()), [None, 4])
with self.test_session() as sess:
tensor_dict = sess.run(tensor_dict)
self.assertAllClose(tensor_dict[fields.InputDataFields.groundtruth_weights],
np.ones(2, dtype=np.float32))
def testDecodeObjectLabel(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg = self._EncodeImage(image_tensor)
bbox_classes = [0, 1]
example = tf.train.Example(features=tf.train.Features(feature={
'image/encoded': self._BytesFeature(encoded_jpeg),
'image/format': self._BytesFeature('jpeg'),
'image/object/class/label': self._Int64Feature(bbox_classes),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder()
tensor_dict = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual((tensor_dict[
fields.InputDataFields.groundtruth_classes].get_shape().as_list()),
[None])
with self.test_session() as sess:
tensor_dict = sess.run(tensor_dict)
self.assertAllEqual(bbox_classes,
tensor_dict[fields.InputDataFields.groundtruth_classes])
def testDecodeObjectLabelNoText(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg = self._EncodeImage(image_tensor)
bbox_classes = [1, 2]
example = tf.train.Example(features=tf.train.Features(feature={
'image/encoded': self._BytesFeature(encoded_jpeg),
'image/format': self._BytesFeature('jpeg'),
'image/object/class/label': self._Int64Feature(bbox_classes),
})).SerializeToString()
label_map_string = """
item {
id:1
name:'cat'
}
item {
id:2
name:'dog'
}
"""
label_map_path = os.path.join(self.get_temp_dir(), 'label_map.pbtxt')
with tf.gfile.Open(label_map_path, 'wb') as f:
f.write(label_map_string)
example_decoder = tf_example_decoder.TfExampleDecoder(
label_map_proto_file=label_map_path)
tensor_dict = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual((tensor_dict[
fields.InputDataFields.groundtruth_classes].get_shape().as_list()),
[None])
init = tf.tables_initializer()
with self.test_session() as sess:
sess.run(init)
tensor_dict = sess.run(tensor_dict)
self.assertAllEqual(bbox_classes,
tensor_dict[fields.InputDataFields.groundtruth_classes])
def testDecodeObjectLabelUnrecognizedName(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg = self._EncodeImage(image_tensor)
bbox_classes_text = ['cat', 'cheetah']
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
self._BytesFeature(encoded_jpeg),
'image/format':
self._BytesFeature('jpeg'),
'image/object/class/text':
self._BytesFeature(bbox_classes_text),
})).SerializeToString()
label_map_string = """
item {
id:2
name:'cat'
}
item {
id:1
name:'dog'
}
"""
label_map_path = os.path.join(self.get_temp_dir(), 'label_map.pbtxt')
with tf.gfile.Open(label_map_path, 'wb') as f:
f.write(label_map_string)
example_decoder = tf_example_decoder.TfExampleDecoder(
label_map_proto_file=label_map_path)
tensor_dict = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual((tensor_dict[fields.InputDataFields.groundtruth_classes]
.get_shape().as_list()), [None])
with self.test_session() as sess:
sess.run(tf.tables_initializer())
tensor_dict = sess.run(tensor_dict)
self.assertAllEqual([2, -1],
tensor_dict[fields.InputDataFields.groundtruth_classes])
def testDecodeObjectLabelWithMapping(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg = self._EncodeImage(image_tensor)
bbox_classes_text = ['cat', 'dog']
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
self._BytesFeature(encoded_jpeg),
'image/format':
self._BytesFeature('jpeg'),
'image/object/class/text':
self._BytesFeature(bbox_classes_text),
})).SerializeToString()
label_map_string = """
item {
id:3
name:'cat'
}
item {
id:1
name:'dog'
}
"""
label_map_path = os.path.join(self.get_temp_dir(), 'label_map.pbtxt')
with tf.gfile.Open(label_map_path, 'wb') as f:
f.write(label_map_string)
example_decoder = tf_example_decoder.TfExampleDecoder(
label_map_proto_file=label_map_path)
tensor_dict = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual((tensor_dict[fields.InputDataFields.groundtruth_classes]
.get_shape().as_list()), [None])
with self.test_session() as sess:
sess.run(tf.tables_initializer())
tensor_dict = sess.run(tensor_dict)
self.assertAllEqual([3, 1],
tensor_dict[fields.InputDataFields.groundtruth_classes])
def testDecodeObjectArea(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg = self._EncodeImage(image_tensor)
object_area = [100., 174.]
example = tf.train.Example(features=tf.train.Features(feature={
'image/encoded': self._BytesFeature(encoded_jpeg),
'image/format': self._BytesFeature('jpeg'),
'image/object/area': self._FloatFeature(object_area),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder()
tensor_dict = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual((tensor_dict[fields.InputDataFields.groundtruth_area].
get_shape().as_list()), [None])
with self.test_session() as sess:
tensor_dict = sess.run(tensor_dict)
self.assertAllEqual(object_area,
tensor_dict[fields.InputDataFields.groundtruth_area])
def testDecodeObjectIsCrowd(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg = self._EncodeImage(image_tensor)
object_is_crowd = [0, 1]
example = tf.train.Example(features=tf.train.Features(feature={
'image/encoded': self._BytesFeature(encoded_jpeg),
'image/format': self._BytesFeature('jpeg'),
'image/object/is_crowd': self._Int64Feature(object_is_crowd),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder()
tensor_dict = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual((tensor_dict[
fields.InputDataFields.groundtruth_is_crowd].get_shape().as_list()),
[None])
with self.test_session() as sess:
tensor_dict = sess.run(tensor_dict)
self.assertAllEqual([bool(item) for item in object_is_crowd],
tensor_dict[
fields.InputDataFields.groundtruth_is_crowd])
def testDecodeObjectDifficult(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg = self._EncodeImage(image_tensor)
object_difficult = [0, 1]
example = tf.train.Example(features=tf.train.Features(feature={
'image/encoded': self._BytesFeature(encoded_jpeg),
'image/format': self._BytesFeature('jpeg'),
'image/object/difficult': self._Int64Feature(object_difficult),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder()
tensor_dict = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual((tensor_dict[
fields.InputDataFields.groundtruth_difficult].get_shape().as_list()),
[None])
with self.test_session() as sess:
tensor_dict = sess.run(tensor_dict)
self.assertAllEqual([bool(item) for item in object_difficult],
tensor_dict[
fields.InputDataFields.groundtruth_difficult])
def testDecodeObjectGroupOf(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg = self._EncodeImage(image_tensor)
object_group_of = [0, 1]
example = tf.train.Example(features=tf.train.Features(
feature={
'image/encoded': self._BytesFeature(encoded_jpeg),
'image/format': self._BytesFeature('jpeg'),
'image/object/group_of': self._Int64Feature(object_group_of),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder()
tensor_dict = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual((tensor_dict[
fields.InputDataFields.groundtruth_group_of].get_shape().as_list()),
[None])
with self.test_session() as sess:
tensor_dict = sess.run(tensor_dict)
self.assertAllEqual(
[bool(item) for item in object_group_of],
tensor_dict[fields.InputDataFields.groundtruth_group_of])
def testDecodeObjectWeight(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg = self._EncodeImage(image_tensor)
object_weights = [0.75, 1.0]
example = tf.train.Example(features=tf.train.Features(
feature={
'image/encoded': self._BytesFeature(encoded_jpeg),
'image/format': self._BytesFeature('jpeg'),
'image/object/weight': self._FloatFeature(object_weights),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder()
tensor_dict = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual((tensor_dict[
fields.InputDataFields.groundtruth_weights].get_shape().as_list()),
[None])
with self.test_session() as sess:
tensor_dict = sess.run(tensor_dict)
self.assertAllEqual(
object_weights,
tensor_dict[fields.InputDataFields.groundtruth_weights])
def testDecodeInstanceSegmentation(self):
num_instances = 4
image_height = 5
image_width = 3
# Randomly generate image.
image_tensor = np.random.randint(256, size=(image_height,
image_width,
3)).astype(np.uint8)
encoded_jpeg = self._EncodeImage(image_tensor)
# Randomly generate instance segmentation masks.
instance_masks = (
np.random.randint(2, size=(num_instances,
image_height,
image_width)).astype(np.float32))
instance_masks_flattened = np.reshape(instance_masks, [-1])
# Randomly generate class labels for each instance.
object_classes = np.random.randint(
100, size=(num_instances)).astype(np.int64)
example = tf.train.Example(features=tf.train.Features(feature={
'image/encoded': self._BytesFeature(encoded_jpeg),
'image/format': self._BytesFeature('jpeg'),
'image/height': self._Int64Feature([image_height]),
'image/width': self._Int64Feature([image_width]),
'image/object/mask': self._FloatFeature(instance_masks_flattened),
'image/object/class/label': self._Int64Feature(
object_classes)})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder(
load_instance_masks=True)
tensor_dict = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual((
tensor_dict[fields.InputDataFields.groundtruth_instance_masks].
get_shape().as_list()), [None, None, None])
self.assertAllEqual((
tensor_dict[fields.InputDataFields.groundtruth_classes].
get_shape().as_list()), [None])
with self.test_session() as sess:
tensor_dict = sess.run(tensor_dict)
self.assertAllEqual(
instance_masks.astype(np.float32),
tensor_dict[fields.InputDataFields.groundtruth_instance_masks])
self.assertAllEqual(
object_classes,
tensor_dict[fields.InputDataFields.groundtruth_classes])
def testInstancesNotAvailableByDefault(self):
num_instances = 4
image_height = 5
image_width = 3
# Randomly generate image.
image_tensor = np.random.randint(256, size=(image_height,
image_width,
3)).astype(np.uint8)
encoded_jpeg = self._EncodeImage(image_tensor)
# Randomly generate instance segmentation masks.
instance_masks = (
np.random.randint(2, size=(num_instances,
image_height,
image_width)).astype(np.float32))
instance_masks_flattened = np.reshape(instance_masks, [-1])
# Randomly generate class labels for each instance.
object_classes = np.random.randint(
100, size=(num_instances)).astype(np.int64)
example = tf.train.Example(features=tf.train.Features(feature={
'image/encoded': self._BytesFeature(encoded_jpeg),
'image/format': self._BytesFeature('jpeg'),
'image/height': self._Int64Feature([image_height]),
'image/width': self._Int64Feature([image_width]),
'image/object/mask': self._FloatFeature(instance_masks_flattened),
'image/object/class/label': self._Int64Feature(
object_classes)})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder()
tensor_dict = example_decoder.decode(tf.convert_to_tensor(example))
self.assertTrue(fields.InputDataFields.groundtruth_instance_masks
not in tensor_dict)
if __name__ == '__main__':
tf.test.main()
| 42.35506
| 81
| 0.658891
| 2,797
| 24,693
| 5.562031
| 0.089739
| 0.057852
| 0.038054
| 0.073729
| 0.826573
| 0.807354
| 0.780485
| 0.766215
| 0.753551
| 0.742367
| 0
| 0.016271
| 0.223464
| 24,693
| 582
| 82
| 42.427835
| 0.795046
| 0.03912
| 0
| 0.728632
| 0
| 0
| 0.071799
| 0.016522
| 0
| 0
| 0
| 0
| 0.081197
| 1
| 0.049145
| false
| 0
| 0.012821
| 0.004274
| 0.076923
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8a43b479e7955dc23db52cb56d15294b2883f276
| 9,365
|
py
|
Python
|
quantizers.py
|
gcunhase/sampleQAT
|
67523343cb368634f8744ad425bad428ab3fddad
|
[
"Apache-2.0"
] | null | null | null |
quantizers.py
|
gcunhase/sampleQAT
|
67523343cb368634f8744ad425bad428ab3fddad
|
[
"Apache-2.0"
] | null | null | null |
quantizers.py
|
gcunhase/sampleQAT
|
67523343cb368634f8744ad425bad428ab3fddad
|
[
"Apache-2.0"
] | 1
|
2021-08-31T23:16:42.000Z
|
2021-08-31T23:16:42.000Z
|
import tensorflow as tf
from tensorflow.python.training import moving_averages
def MovingAvgQuantize(inputs,
min_var,
max_var,
per_channel=False,
ema_decay=0.999,
name_prefix='MovingAvgQuantize',
is_training=True,
num_bits=8,
narrow_range=False,
symmetric=False):
"""Adds a layer that collects quantization ranges as EMAs of input ranges.
MovingAvgQuantize creates variables called 'min' and 'max', representing the
interval used for quantization and clamping.
Args:
inputs: a tensor containing values to be quantized.
per_channel: (default False) a boolean specifying whether to use different
quantization ranges per output channel.
init_min: a float scalar, the initial value for variable min.
init_max: a float scalar, the initial value for variable max.
ema_decay: EMA decay parameter.
name_prefix: name_prefix for created nodes.
is_training: Whether the op is applied to a training or eval graph.
num_bits: Number of bits to use for quantization, must be between 2 and 8.
narrow_range: Whether to use the narrow quantization range
[1; 2^num_bits - 1] or wide range [0; 2^num_bits - 1].
symmetric: If true, use symmetric quantization limits instead of training
the minimum and maximum of each quantization range separately.
Returns:
a tensor containing quantized values.
"""
with tf.name_scope(name_prefix):
input_shape = inputs.get_shape()
input_dim = len(input_shape)
if not is_training:
return _FakeQuantWithMinMaxVars(
inputs,
min_var,
max_var,
per_channel=per_channel,
num_bits=num_bits,
narrow_range=narrow_range)
if per_channel:
if input_dim == 2:
reduce_dims = [0]
elif input_dim == 4:
reduce_dims = [0, 1, 2]
if per_channel:
if input_dim >= 2:
batch_min = tf.math.reduce_min(
inputs, axis=reduce_dims, name='BatchMin')
else:
batch_min = inputs
else:
batch_min = tf.math.reduce_min(inputs, name='BatchMin')
if per_channel:
if input_dim >= 2:
batch_max = tf.math.reduce_max(
inputs, axis=reduce_dims, name='BatchMax')
else:
batch_max = inputs
else:
batch_max = tf.math.reduce_max(inputs, name='BatchMax')
if symmetric:
if narrow_range:
min_max_ratio = -1
else:
# In two's complement notation, the negative range is slightly larger
# than the positive range.
min_max_ratio = -((1 << num_bits) - 2) / (1 << num_bits)
# TFLite requires that 0.0 if always in the [min; max] range. Because
# batch_min <= batch_max, it follows that range_min <= 0 <= range_max.
range_min = tf.minimum(batch_min, batch_max / min_max_ratio)
range_max = tf.maximum(batch_max, batch_min * min_max_ratio)
else:
# TFLite requires that 0.0 if always in the [min; max] range.
range_min = tf.minimum(batch_min, 0.0)
range_max = tf.maximum(batch_max, 0.0)
assign_min = moving_averages.assign_moving_average(
min_var, range_min, ema_decay, zero_debias=False, name='AssignMinEma')
assign_max = moving_averages.assign_moving_average(
max_var, range_max, ema_decay, zero_debias=False, name='AssignMaxEma')
return _FakeQuantWithMinMaxVars(
inputs,
assign_min,
assign_max,
per_channel=per_channel,
num_bits=num_bits,
narrow_range=narrow_range)
def LastValueQuantize(inputs,
min_var,
max_var,
per_channel=False,
name_prefix='LastValueQuant',
is_training=True,
num_bits=8,
narrow_range=False,
symmetric=False):
"""Adds a layer that collects quantization ranges as last input ranges.
LastValueQuantize creates variables called 'min' and 'max', representing the
interval used for quantization and clamping.
Args:
inputs: a tensor containing values to be quantized.
per_channel: (Optional) a boolean specifying whether to use different
quantization ranges per output channel.
init_min: a float scalar, the initial value for variable min.
init_max: a float scalar, the initial value for variable max.
name_prefix: name_prefix for created nodes.
is_training: Whether the op is applied to a training or eval graph.
num_bits: Number of bits to use for quantization, must be between 2 and 8.
narrow_range: Whether to use the narrow quantization range
[1; 2^num_bits - 1] or wide range [0; 2^num_bits - 1].
symmetric: If true, use symmetric quantization limits instead of training
the minimum and maximum of each quantization range separately.
Returns:
a tensor containing quantized values.
"""
with tf.name_scope(name_prefix):
input_shape = inputs.get_shape()
input_dim = len(input_shape)
if not is_training:
return _FakeQuantWithMinMaxVars(
inputs,
min_var,
max_var,
per_channel=per_channel,
num_bits=num_bits,
narrow_range=narrow_range)
if per_channel:
if input_dim == 2:
reduce_dims = [0]
elif input_dim == 4:
reduce_dims = [0, 1, 2]
if per_channel:
if input_dim >= 2:
batch_min = tf.math.reduce_min(
inputs, axis=reduce_dims, name='BatchMin')
else:
batch_min = inputs
else:
batch_min = tf.math.reduce_min(inputs, name='BatchMin')
if per_channel:
if input_dim >= 2:
batch_max = tf.math.reduce_max(
inputs, axis=reduce_dims, name='BatchMax')
else:
batch_max = inputs
else:
batch_max = tf.math.reduce_max(inputs, name='BatchMax')
if symmetric:
if narrow_range:
min_max_ratio = -1
else:
# In two's complement notation, the negative range is slightly larger
# than the positive range.
min_max_ratio = -((1 << num_bits) - 2) / (1 << num_bits)
# TFLite requires that 0.0 if always in the [min; max] range. Because
# batch_min <= batch_max, it follows that range_min <= 0 <= range_max.
range_min = tf.math.minimum(batch_min, batch_max / min_max_ratio)
range_max = tf.math.maximum(batch_max, batch_min * min_max_ratio)
else:
# TFLite requires that 0.0 if always in the [min; max] range.
range_min = tf.math.minimum(batch_min, 0.0)
range_max = tf.math.maximum(batch_max, 0.0)
assign_min = min_var.assign(range_min, name='AssignMinLast')
assign_max = max_var.assign(range_max, name='AssignMaxLast')
return _FakeQuantWithMinMaxVars(
inputs,
assign_min,
assign_max,
per_channel=per_channel,
num_bits=num_bits,
narrow_range=narrow_range)
def _FakeQuantWithMinMaxVars(inputs, min_var, max_var, per_channel, num_bits,
narrow_range):
"""Adds a fake quantization operation.
Depending on value of per_channel, this operation may do global quantization
or per channel quantization. min_var and max_var should have corresponding
shapes: [1] when per_channel == False and [d] when per_channel == True.
Args:
inputs: a tensor containing values to be quantized.
min_var: a variable containing quantization range lower end(s).
max_var: a variable containing quantization range upper end(s).
per_channel: a boolean specifying whether to use per-channel quantization.
num_bits: Number of bits to use for quantization, must be between 2 and 8.
narrow_range: Whether to use the narrow quantization range
[1; 2^num_bits - 1] or wide range [0; 2^num_bits - 1].
Returns:
a tensor containing quantized values.
"""
if per_channel:
assert len(min_var.get_shape()) == 1
assert len(max_var.get_shape()) == 1
return tf.quantization.quantize_and_dequantize(inputs,
min_var,
max_var,
num_bits=num_bits,
narrow_range=narrow_range,
axis=-1,
range_given=True)
else:
assert min_var.get_shape() == [] # pylint: disable=g-explicit-bool-comparison
assert max_var.get_shape() == [] # pylint: disable=g-explicit-bool-comparison
return tf.quantization.quantize_and_dequantize(inputs,
min_var,
max_var,
num_bits=num_bits,
narrow_range=narrow_range,
range_given=True)
| 39.348739
| 83
| 0.604058
| 1,167
| 9,365
| 4.636675
| 0.144816
| 0.036223
| 0.017742
| 0.019405
| 0.849196
| 0.836999
| 0.799852
| 0.787285
| 0.755683
| 0.72981
| 0
| 0.012035
| 0.325681
| 9,365
| 237
| 84
| 39.514768
| 0.844814
| 0.372664
| 0
| 0.808219
| 0
| 0
| 0.026335
| 0
| 0
| 0
| 0
| 0
| 0.027397
| 1
| 0.020548
| false
| 0
| 0.013699
| 0
| 0.075342
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8a60dbe8060ce7507fa00718664515c25515efa8
| 183
|
py
|
Python
|
python/src/Lib/distutils/tests/test_versionpredicate.py
|
vlinhd11/vlinhd11-android-scripting
|
c90f04eb26a3746f025a6a0beab92bb6aa88c084
|
[
"Apache-2.0"
] | 2,293
|
2015-01-02T12:46:10.000Z
|
2022-03-29T09:45:43.000Z
|
python/src/Lib/distutils/tests/test_versionpredicate.py
|
weiqiangzheng/sl4a
|
d3c17dca978cbeee545e12ea240a9dbf2a6999e9
|
[
"Apache-2.0"
] | 315
|
2015-05-31T11:55:46.000Z
|
2022-01-12T08:36:37.000Z
|
python/src/Lib/distutils/tests/test_versionpredicate.py
|
weiqiangzheng/sl4a
|
d3c17dca978cbeee545e12ea240a9dbf2a6999e9
|
[
"Apache-2.0"
] | 1,033
|
2015-01-04T07:48:40.000Z
|
2022-03-24T09:34:37.000Z
|
"""Tests harness for distutils.versionpredicate.
"""
import distutils.versionpredicate
import doctest
def test_suite():
return doctest.DocTestSuite(distutils.versionpredicate)
| 18.3
| 59
| 0.803279
| 18
| 183
| 8.111111
| 0.666667
| 0.513699
| 0.424658
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.10929
| 183
| 9
| 60
| 20.333333
| 0.895706
| 0.245902
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0
| 0.5
| 0.25
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 9
|
8a6d6c210b433d575cc1c0ae3dab8386292e44db
| 945
|
py
|
Python
|
python--exercicios/ex043.py
|
Eliezer2000/python
|
12abb54c6536acb2f36b8f34bf51ec765857eb75
|
[
"MIT"
] | null | null | null |
python--exercicios/ex043.py
|
Eliezer2000/python
|
12abb54c6536acb2f36b8f34bf51ec765857eb75
|
[
"MIT"
] | null | null | null |
python--exercicios/ex043.py
|
Eliezer2000/python
|
12abb54c6536acb2f36b8f34bf51ec765857eb75
|
[
"MIT"
] | null | null | null |
peso = float(input('Digite seu peso : (KG) '))
altura = float(input('Digite sua altura (M) : '))
imc = peso / (altura ** 2)
print('O IMC dessa pessoa é de {:.1f}'.format(imc))
if imc <= 18.5:
print('você esta abaxo do peso normal . ')
elif 18.5 < imc <= 25:
print('Você está com o peso NORMAL')
elif 25 < imc <= 30:
print('Você está com SOBREPESO')
elif 30 < imc <= 40:
print('Você está com OBESIDADE')
elif imc >= 40:
print('Você está com OBESIDADE MÓRBIDA')
peso = float(input('Digite seu peso : KG'))
altura = float(input('Digite seu peso : M '))
imc = peso / (altura ** 2)
print('O IMC dessa pessoa é {:.1f}'.format(imc))
if imc <= 18.5:
print('Você está abaixo do peso normal!')
elif 18.5 < imc <= 25:
print('Você está com peso normal!')
elif 25 < imc <= 30:
print('Você está com sobrepeso!')
elif 30 < imc <= 40:
print('VocÊ está OBESO!')
elif imc >= 40:
print('Você está com OBESIDADE MÓRBIDA')
| 30.483871
| 51
| 0.621164
| 153
| 945
| 3.836601
| 0.248366
| 0.153322
| 0.199319
| 0.190801
| 0.938671
| 0.926746
| 0.926746
| 0.906303
| 0.906303
| 0.67121
| 0
| 0.053908
| 0.214815
| 945
| 30
| 52
| 31.5
| 0.737197
| 0
| 0
| 0.5
| 0
| 0
| 0.434322
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.428571
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 9
|
0a004e3cd0d415e6853672072bacb1c0cd8a472f
| 47,810
|
py
|
Python
|
tests/unit/test_session.py
|
zoercai/python-spanner
|
ed7152adc37290c63e59865265f36c593d9b8da3
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/test_session.py
|
zoercai/python-spanner
|
ed7152adc37290c63e59865265f36c593d9b8da3
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/test_session.py
|
zoercai/python-spanner
|
ed7152adc37290c63e59865265f36c593d9b8da3
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016 Google LLC All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import google.api_core.gapic_v1.method
import mock
from tests._helpers import (
OpenTelemetryBase,
StatusCanonicalCode,
HAS_OPENTELEMETRY_INSTALLED,
)
def _make_rpc_error(error_cls, trailing_metadata=None):
import grpc
grpc_error = mock.create_autospec(grpc.Call, instance=True)
grpc_error.trailing_metadata.return_value = trailing_metadata
return error_cls("error", errors=(grpc_error,))
class _ConstantTime:
def time(self):
return 1
class TestSession(OpenTelemetryBase):
PROJECT_ID = "project-id"
INSTANCE_ID = "instance-id"
INSTANCE_NAME = "projects/" + PROJECT_ID + "/instances/" + INSTANCE_ID
DATABASE_ID = "database-id"
DATABASE_NAME = INSTANCE_NAME + "/databases/" + DATABASE_ID
SESSION_ID = "session-id"
SESSION_NAME = DATABASE_NAME + "/sessions/" + SESSION_ID
BASE_ATTRIBUTES = {
"db.type": "spanner",
"db.url": "spanner.googleapis.com",
"db.instance": DATABASE_NAME,
"net.host.name": "spanner.googleapis.com",
}
def _getTargetClass(self):
from google.cloud.spanner_v1.session import Session
return Session
def _make_one(self, *args, **kwargs):
return self._getTargetClass()(*args, **kwargs)
@staticmethod
def _make_database(name=DATABASE_NAME):
from google.cloud.spanner_v1.database import Database
database = mock.create_autospec(Database, instance=True)
database.name = name
return database
@staticmethod
def _make_session_pb(name, labels=None):
from google.cloud.spanner_v1 import Session
return Session(name=name, labels=labels)
def _make_spanner_api(self):
from google.cloud.spanner_v1 import SpannerClient
return mock.Mock(autospec=SpannerClient, instance=True)
def test_constructor_wo_labels(self):
database = self._make_database()
session = self._make_one(database)
self.assertIs(session.session_id, None)
self.assertIs(session._database, database)
self.assertEqual(session.labels, {})
def test_constructor_w_labels(self):
database = self._make_database()
labels = {"foo": "bar"}
session = self._make_one(database, labels=labels)
self.assertIs(session.session_id, None)
self.assertIs(session._database, database)
self.assertEqual(session.labels, labels)
def test___lt___(self):
database = self._make_database()
lhs = self._make_one(database)
lhs._session_id = b"123"
rhs = self._make_one(database)
rhs._session_id = b"234"
self.assertTrue(lhs < rhs)
def test_name_property_wo_session_id(self):
database = self._make_database()
session = self._make_one(database)
with self.assertRaises(ValueError):
(session.name)
def test_name_property_w_session_id(self):
database = self._make_database()
session = self._make_one(database)
session._session_id = self.SESSION_ID
self.assertEqual(session.name, self.SESSION_NAME)
def test_create_w_session_id(self):
database = self._make_database()
session = self._make_one(database)
session._session_id = self.SESSION_ID
with self.assertRaises(ValueError):
session.create()
self.assertNoSpans()
def test_create_ok(self):
from google.cloud.spanner_v1 import CreateSessionRequest
session_pb = self._make_session_pb(self.SESSION_NAME)
gax_api = self._make_spanner_api()
gax_api.create_session.return_value = session_pb
database = self._make_database()
database.spanner_api = gax_api
session = self._make_one(database)
session.create()
self.assertEqual(session.session_id, self.SESSION_ID)
request = CreateSessionRequest(database=database.name,)
gax_api.create_session.assert_called_once_with(
request=request, metadata=[("google-cloud-resource-prefix", database.name)]
)
self.assertSpanAttributes(
"CloudSpanner.CreateSession", attributes=TestSession.BASE_ATTRIBUTES
)
def test_create_w_labels(self):
from google.cloud.spanner_v1 import CreateSessionRequest
from google.cloud.spanner_v1 import Session as SessionPB
labels = {"foo": "bar"}
session_pb = self._make_session_pb(self.SESSION_NAME, labels=labels)
gax_api = self._make_spanner_api()
gax_api.create_session.return_value = session_pb
database = self._make_database()
database.spanner_api = gax_api
session = self._make_one(database, labels=labels)
session.create()
self.assertEqual(session.session_id, self.SESSION_ID)
request = CreateSessionRequest(
database=database.name, session=SessionPB(labels=labels),
)
gax_api.create_session.assert_called_once_with(
request=request, metadata=[("google-cloud-resource-prefix", database.name)],
)
self.assertSpanAttributes(
"CloudSpanner.CreateSession",
attributes=dict(TestSession.BASE_ATTRIBUTES, foo="bar"),
)
def test_create_error(self):
from google.api_core.exceptions import Unknown
gax_api = self._make_spanner_api()
gax_api.create_session.side_effect = Unknown("error")
database = self._make_database()
database.spanner_api = gax_api
session = self._make_one(database)
with self.assertRaises(Unknown):
session.create()
self.assertSpanAttributes(
"CloudSpanner.CreateSession",
status=StatusCanonicalCode.UNKNOWN,
attributes=TestSession.BASE_ATTRIBUTES,
)
def test_exists_wo_session_id(self):
database = self._make_database()
session = self._make_one(database)
self.assertFalse(session.exists())
self.assertNoSpans()
def test_exists_hit(self):
session_pb = self._make_session_pb(self.SESSION_NAME)
gax_api = self._make_spanner_api()
gax_api.get_session.return_value = session_pb
database = self._make_database()
database.spanner_api = gax_api
session = self._make_one(database)
session._session_id = self.SESSION_ID
self.assertTrue(session.exists())
gax_api.get_session.assert_called_once_with(
name=self.SESSION_NAME,
metadata=[("google-cloud-resource-prefix", database.name)],
)
self.assertSpanAttributes(
"CloudSpanner.GetSession",
attributes=dict(TestSession.BASE_ATTRIBUTES, session_found=True),
)
@mock.patch(
"google.cloud.spanner_v1._opentelemetry_tracing.HAS_OPENTELEMETRY_INSTALLED",
False,
)
def test_exists_hit_wo_span(self):
session_pb = self._make_session_pb(self.SESSION_NAME)
gax_api = self._make_spanner_api()
gax_api.get_session.return_value = session_pb
database = self._make_database()
database.spanner_api = gax_api
session = self._make_one(database)
session._session_id = self.SESSION_ID
self.assertTrue(session.exists())
gax_api.get_session.assert_called_once_with(
name=self.SESSION_NAME,
metadata=[("google-cloud-resource-prefix", database.name)],
)
self.assertNoSpans()
def test_exists_miss(self):
from google.api_core.exceptions import NotFound
gax_api = self._make_spanner_api()
gax_api.get_session.side_effect = NotFound("testing")
database = self._make_database()
database.spanner_api = gax_api
session = self._make_one(database)
session._session_id = self.SESSION_ID
self.assertFalse(session.exists())
gax_api.get_session.assert_called_once_with(
name=self.SESSION_NAME,
metadata=[("google-cloud-resource-prefix", database.name)],
)
self.assertSpanAttributes(
"CloudSpanner.GetSession",
attributes=dict(TestSession.BASE_ATTRIBUTES, session_found=False),
)
@mock.patch(
"google.cloud.spanner_v1._opentelemetry_tracing.HAS_OPENTELEMETRY_INSTALLED",
False,
)
def test_exists_miss_wo_span(self):
from google.api_core.exceptions import NotFound
gax_api = self._make_spanner_api()
gax_api.get_session.side_effect = NotFound("testing")
database = self._make_database()
database.spanner_api = gax_api
session = self._make_one(database)
session._session_id = self.SESSION_ID
self.assertFalse(session.exists())
gax_api.get_session.assert_called_once_with(
name=self.SESSION_NAME,
metadata=[("google-cloud-resource-prefix", database.name)],
)
self.assertNoSpans()
def test_exists_error(self):
from google.api_core.exceptions import Unknown
gax_api = self._make_spanner_api()
gax_api.get_session.side_effect = Unknown("testing")
database = self._make_database()
database.spanner_api = gax_api
session = self._make_one(database)
session._session_id = self.SESSION_ID
with self.assertRaises(Unknown):
session.exists()
gax_api.get_session.assert_called_once_with(
name=self.SESSION_NAME,
metadata=[("google-cloud-resource-prefix", database.name)],
)
self.assertSpanAttributes(
"CloudSpanner.GetSession",
status=StatusCanonicalCode.UNKNOWN,
attributes=TestSession.BASE_ATTRIBUTES,
)
def test_ping_wo_session_id(self):
database = self._make_database()
session = self._make_one(database)
with self.assertRaises(ValueError):
session.ping()
def test_ping_hit(self):
from google.cloud.spanner_v1 import ExecuteSqlRequest
gax_api = self._make_spanner_api()
gax_api.execute_sql.return_value = "1"
database = self._make_database()
database.spanner_api = gax_api
session = self._make_one(database)
session._session_id = self.SESSION_ID
session.ping()
request = ExecuteSqlRequest(session=self.SESSION_NAME, sql="SELECT 1",)
gax_api.execute_sql.assert_called_once_with(
request=request, metadata=[("google-cloud-resource-prefix", database.name)],
)
def test_ping_miss(self):
from google.api_core.exceptions import NotFound
from google.cloud.spanner_v1 import ExecuteSqlRequest
gax_api = self._make_spanner_api()
gax_api.execute_sql.side_effect = NotFound("testing")
database = self._make_database()
database.spanner_api = gax_api
session = self._make_one(database)
session._session_id = self.SESSION_ID
with self.assertRaises(NotFound):
session.ping()
request = ExecuteSqlRequest(session=self.SESSION_NAME, sql="SELECT 1",)
gax_api.execute_sql.assert_called_once_with(
request=request, metadata=[("google-cloud-resource-prefix", database.name)],
)
def test_ping_error(self):
from google.api_core.exceptions import Unknown
from google.cloud.spanner_v1 import ExecuteSqlRequest
gax_api = self._make_spanner_api()
gax_api.execute_sql.side_effect = Unknown("testing")
database = self._make_database()
database.spanner_api = gax_api
session = self._make_one(database)
session._session_id = self.SESSION_ID
with self.assertRaises(Unknown):
session.ping()
request = ExecuteSqlRequest(session=self.SESSION_NAME, sql="SELECT 1",)
gax_api.execute_sql.assert_called_once_with(
request=request, metadata=[("google-cloud-resource-prefix", database.name)],
)
def test_delete_wo_session_id(self):
database = self._make_database()
session = self._make_one(database)
with self.assertRaises(ValueError):
session.delete()
self.assertNoSpans()
def test_delete_hit(self):
gax_api = self._make_spanner_api()
gax_api.delete_session.return_value = None
database = self._make_database()
database.spanner_api = gax_api
session = self._make_one(database)
session._session_id = self.SESSION_ID
session.delete()
gax_api.delete_session.assert_called_once_with(
name=self.SESSION_NAME,
metadata=[("google-cloud-resource-prefix", database.name)],
)
self.assertSpanAttributes(
"CloudSpanner.DeleteSession", attributes=TestSession.BASE_ATTRIBUTES
)
def test_delete_miss(self):
from google.cloud.exceptions import NotFound
gax_api = self._make_spanner_api()
gax_api.delete_session.side_effect = NotFound("testing")
database = self._make_database()
database.spanner_api = gax_api
session = self._make_one(database)
session._session_id = self.SESSION_ID
with self.assertRaises(NotFound):
session.delete()
gax_api.delete_session.assert_called_once_with(
name=self.SESSION_NAME,
metadata=[("google-cloud-resource-prefix", database.name)],
)
self.assertSpanAttributes(
"CloudSpanner.DeleteSession",
status=StatusCanonicalCode.NOT_FOUND,
attributes=TestSession.BASE_ATTRIBUTES,
)
def test_delete_error(self):
from google.api_core.exceptions import Unknown
gax_api = self._make_spanner_api()
gax_api.delete_session.side_effect = Unknown("testing")
database = self._make_database()
database.spanner_api = gax_api
session = self._make_one(database)
session._session_id = self.SESSION_ID
with self.assertRaises(Unknown):
session.delete()
gax_api.delete_session.assert_called_once_with(
name=self.SESSION_NAME,
metadata=[("google-cloud-resource-prefix", database.name)],
)
self.assertSpanAttributes(
"CloudSpanner.DeleteSession",
status=StatusCanonicalCode.UNKNOWN,
attributes=TestSession.BASE_ATTRIBUTES,
)
def test_snapshot_not_created(self):
database = self._make_database()
session = self._make_one(database)
with self.assertRaises(ValueError):
session.snapshot()
def test_snapshot_created(self):
from google.cloud.spanner_v1.snapshot import Snapshot
database = self._make_database()
session = self._make_one(database)
session._session_id = "DEADBEEF" # emulate 'session.create()'
snapshot = session.snapshot()
self.assertIsInstance(snapshot, Snapshot)
self.assertIs(snapshot._session, session)
self.assertTrue(snapshot._strong)
self.assertFalse(snapshot._multi_use)
def test_snapshot_created_w_multi_use(self):
from google.cloud.spanner_v1.snapshot import Snapshot
database = self._make_database()
session = self._make_one(database)
session._session_id = "DEADBEEF" # emulate 'session.create()'
snapshot = session.snapshot(multi_use=True)
self.assertIsInstance(snapshot, Snapshot)
self.assertTrue(snapshot._session is session)
self.assertTrue(snapshot._strong)
self.assertTrue(snapshot._multi_use)
def test_read_not_created(self):
from google.cloud.spanner_v1.keyset import KeySet
TABLE_NAME = "citizens"
COLUMNS = ["email", "first_name", "last_name", "age"]
KEYS = ["bharney@example.com", "phred@example.com"]
KEYSET = KeySet(keys=KEYS)
database = self._make_database()
session = self._make_one(database)
with self.assertRaises(ValueError):
session.read(TABLE_NAME, COLUMNS, KEYSET)
def test_read(self):
from google.cloud.spanner_v1.keyset import KeySet
TABLE_NAME = "citizens"
COLUMNS = ["email", "first_name", "last_name", "age"]
KEYS = ["bharney@example.com", "phred@example.com"]
KEYSET = KeySet(keys=KEYS)
INDEX = "email-address-index"
LIMIT = 20
database = self._make_database()
session = self._make_one(database)
session._session_id = "DEADBEEF"
with mock.patch("google.cloud.spanner_v1.session.Snapshot") as snapshot:
found = session.read(TABLE_NAME, COLUMNS, KEYSET, index=INDEX, limit=LIMIT)
self.assertIs(found, snapshot().read.return_value)
snapshot().read.assert_called_once_with(
TABLE_NAME, COLUMNS, KEYSET, INDEX, LIMIT
)
def test_execute_sql_not_created(self):
SQL = "SELECT first_name, age FROM citizens"
database = self._make_database()
session = self._make_one(database)
with self.assertRaises(ValueError):
session.execute_sql(SQL)
def test_execute_sql_defaults(self):
SQL = "SELECT first_name, age FROM citizens"
database = self._make_database()
session = self._make_one(database)
session._session_id = "DEADBEEF"
with mock.patch("google.cloud.spanner_v1.session.Snapshot") as snapshot:
found = session.execute_sql(SQL)
self.assertIs(found, snapshot().execute_sql.return_value)
snapshot().execute_sql.assert_called_once_with(
SQL,
None,
None,
None,
query_options=None,
timeout=google.api_core.gapic_v1.method.DEFAULT,
retry=google.api_core.gapic_v1.method.DEFAULT,
)
def test_execute_sql_non_default_retry(self):
from google.protobuf.struct_pb2 import Struct, Value
from google.cloud.spanner_v1 import TypeCode
SQL = "SELECT first_name, age FROM citizens"
database = self._make_database()
session = self._make_one(database)
session._session_id = "DEADBEEF"
params = Struct(fields={"foo": Value(string_value="bar")})
param_types = {"foo": TypeCode.STRING}
with mock.patch("google.cloud.spanner_v1.session.Snapshot") as snapshot:
found = session.execute_sql(
SQL, params, param_types, "PLAN", retry=None, timeout=None
)
self.assertIs(found, snapshot().execute_sql.return_value)
snapshot().execute_sql.assert_called_once_with(
SQL,
params,
param_types,
"PLAN",
query_options=None,
timeout=None,
retry=None,
)
def test_execute_sql_explicit(self):
from google.protobuf.struct_pb2 import Struct, Value
from google.cloud.spanner_v1 import TypeCode
SQL = "SELECT first_name, age FROM citizens"
database = self._make_database()
session = self._make_one(database)
session._session_id = "DEADBEEF"
params = Struct(fields={"foo": Value(string_value="bar")})
param_types = {"foo": TypeCode.STRING}
with mock.patch("google.cloud.spanner_v1.session.Snapshot") as snapshot:
found = session.execute_sql(SQL, params, param_types, "PLAN")
self.assertIs(found, snapshot().execute_sql.return_value)
snapshot().execute_sql.assert_called_once_with(
SQL,
params,
param_types,
"PLAN",
query_options=None,
timeout=google.api_core.gapic_v1.method.DEFAULT,
retry=google.api_core.gapic_v1.method.DEFAULT,
)
def test_batch_not_created(self):
database = self._make_database()
session = self._make_one(database)
with self.assertRaises(ValueError):
session.batch()
def test_batch_created(self):
from google.cloud.spanner_v1.batch import Batch
database = self._make_database()
session = self._make_one(database)
session._session_id = "DEADBEEF"
batch = session.batch()
self.assertIsInstance(batch, Batch)
self.assertIs(batch._session, session)
def test_transaction_not_created(self):
database = self._make_database()
session = self._make_one(database)
with self.assertRaises(ValueError):
session.transaction()
def test_transaction_created(self):
from google.cloud.spanner_v1.transaction import Transaction
database = self._make_database()
session = self._make_one(database)
session._session_id = "DEADBEEF"
transaction = session.transaction()
self.assertIsInstance(transaction, Transaction)
self.assertIs(transaction._session, session)
self.assertIs(session._transaction, transaction)
def test_transaction_w_existing_txn(self):
database = self._make_database()
session = self._make_one(database)
session._session_id = "DEADBEEF"
existing = session.transaction()
another = session.transaction() # invalidates existing txn
self.assertIs(session._transaction, another)
self.assertTrue(existing.rolled_back)
def test_run_in_transaction_callback_raises_non_gax_error(self):
from google.cloud.spanner_v1 import (
Transaction as TransactionPB,
TransactionOptions,
)
from google.cloud.spanner_v1.transaction import Transaction
TABLE_NAME = "citizens"
COLUMNS = ["email", "first_name", "last_name", "age"]
VALUES = [
["phred@exammple.com", "Phred", "Phlyntstone", 32],
["bharney@example.com", "Bharney", "Rhubble", 31],
]
TRANSACTION_ID = b"FACEDACE"
transaction_pb = TransactionPB(id=TRANSACTION_ID)
gax_api = self._make_spanner_api()
gax_api.begin_transaction.return_value = transaction_pb
gax_api.rollback.return_value = None
database = self._make_database()
database.spanner_api = gax_api
session = self._make_one(database)
session._session_id = self.SESSION_ID
called_with = []
class Testing(Exception):
pass
def unit_of_work(txn, *args, **kw):
called_with.append((txn, args, kw))
txn.insert(TABLE_NAME, COLUMNS, VALUES)
raise Testing()
with self.assertRaises(Testing):
session.run_in_transaction(unit_of_work)
self.assertIsNone(session._transaction)
self.assertEqual(len(called_with), 1)
txn, args, kw = called_with[0]
self.assertIsInstance(txn, Transaction)
self.assertIsNone(txn.committed)
self.assertTrue(txn.rolled_back)
self.assertEqual(args, ())
self.assertEqual(kw, {})
expected_options = TransactionOptions(read_write=TransactionOptions.ReadWrite())
gax_api.begin_transaction.assert_called_once_with(
session=self.SESSION_NAME,
options=expected_options,
metadata=[("google-cloud-resource-prefix", database.name)],
)
gax_api.rollback.assert_called_once_with(
session=self.SESSION_NAME,
transaction_id=TRANSACTION_ID,
metadata=[("google-cloud-resource-prefix", database.name)],
)
def test_run_in_transaction_callback_raises_non_abort_rpc_error(self):
from google.api_core.exceptions import Cancelled
from google.cloud.spanner_v1 import (
Transaction as TransactionPB,
TransactionOptions,
)
from google.cloud.spanner_v1.transaction import Transaction
TABLE_NAME = "citizens"
COLUMNS = ["email", "first_name", "last_name", "age"]
VALUES = [
["phred@exammple.com", "Phred", "Phlyntstone", 32],
["bharney@example.com", "Bharney", "Rhubble", 31],
]
TRANSACTION_ID = b"FACEDACE"
transaction_pb = TransactionPB(id=TRANSACTION_ID)
gax_api = self._make_spanner_api()
gax_api.begin_transaction.return_value = transaction_pb
gax_api.rollback.return_value = None
database = self._make_database()
database.spanner_api = gax_api
session = self._make_one(database)
session._session_id = self.SESSION_ID
called_with = []
def unit_of_work(txn, *args, **kw):
called_with.append((txn, args, kw))
txn.insert(TABLE_NAME, COLUMNS, VALUES)
raise Cancelled("error")
with self.assertRaises(Cancelled):
session.run_in_transaction(unit_of_work)
self.assertIsNone(session._transaction)
self.assertEqual(len(called_with), 1)
txn, args, kw = called_with[0]
self.assertIsInstance(txn, Transaction)
self.assertIsNone(txn.committed)
self.assertFalse(txn.rolled_back)
self.assertEqual(args, ())
self.assertEqual(kw, {})
expected_options = TransactionOptions(read_write=TransactionOptions.ReadWrite())
gax_api.begin_transaction.assert_called_once_with(
session=self.SESSION_NAME,
options=expected_options,
metadata=[("google-cloud-resource-prefix", database.name)],
)
gax_api.rollback.assert_not_called()
def test_run_in_transaction_w_args_w_kwargs_wo_abort(self):
import datetime
from google.cloud.spanner_v1 import CommitResponse
from google.cloud.spanner_v1 import (
Transaction as TransactionPB,
TransactionOptions,
)
from google.cloud._helpers import UTC
from google.cloud._helpers import _datetime_to_pb_timestamp
from google.cloud.spanner_v1.transaction import Transaction
TABLE_NAME = "citizens"
COLUMNS = ["email", "first_name", "last_name", "age"]
VALUES = [
["phred@exammple.com", "Phred", "Phlyntstone", 32],
["bharney@example.com", "Bharney", "Rhubble", 31],
]
TRANSACTION_ID = b"FACEDACE"
transaction_pb = TransactionPB(id=TRANSACTION_ID)
now = datetime.datetime.utcnow().replace(tzinfo=UTC)
now_pb = _datetime_to_pb_timestamp(now)
response = CommitResponse(commit_timestamp=now_pb)
gax_api = self._make_spanner_api()
gax_api.begin_transaction.return_value = transaction_pb
gax_api.commit.return_value = response
database = self._make_database()
database.spanner_api = gax_api
session = self._make_one(database)
session._session_id = self.SESSION_ID
called_with = []
def unit_of_work(txn, *args, **kw):
called_with.append((txn, args, kw))
txn.insert(TABLE_NAME, COLUMNS, VALUES)
return 42
return_value = session.run_in_transaction(unit_of_work, "abc", some_arg="def")
self.assertIsNone(session._transaction)
self.assertEqual(len(called_with), 1)
txn, args, kw = called_with[0]
self.assertIsInstance(txn, Transaction)
self.assertEqual(return_value, 42)
self.assertEqual(args, ("abc",))
self.assertEqual(kw, {"some_arg": "def"})
expected_options = TransactionOptions(read_write=TransactionOptions.ReadWrite())
gax_api.begin_transaction.assert_called_once_with(
session=self.SESSION_NAME,
options=expected_options,
metadata=[("google-cloud-resource-prefix", database.name)],
)
gax_api.commit.assert_called_once_with(
session=self.SESSION_NAME,
mutations=txn._mutations,
transaction_id=TRANSACTION_ID,
metadata=[("google-cloud-resource-prefix", database.name)],
)
def test_run_in_transaction_w_commit_error(self):
from google.api_core.exceptions import Unknown
from google.cloud.spanner_v1.transaction import Transaction
TABLE_NAME = "citizens"
COLUMNS = ["email", "first_name", "last_name", "age"]
VALUES = [
["phred@exammple.com", "Phred", "Phlyntstone", 32],
["bharney@example.com", "Bharney", "Rhubble", 31],
]
TRANSACTION_ID = b"FACEDACE"
gax_api = self._make_spanner_api()
gax_api.commit.side_effect = Unknown("error")
database = self._make_database()
database.spanner_api = gax_api
session = self._make_one(database)
session._session_id = self.SESSION_ID
begun_txn = session._transaction = Transaction(session)
begun_txn._transaction_id = TRANSACTION_ID
assert session._transaction._transaction_id
called_with = []
def unit_of_work(txn, *args, **kw):
called_with.append((txn, args, kw))
txn.insert(TABLE_NAME, COLUMNS, VALUES)
with self.assertRaises(Unknown):
session.run_in_transaction(unit_of_work)
self.assertIsNone(session._transaction)
self.assertEqual(len(called_with), 1)
txn, args, kw = called_with[0]
self.assertIs(txn, begun_txn)
self.assertEqual(txn.committed, None)
self.assertEqual(args, ())
self.assertEqual(kw, {})
gax_api.begin_transaction.assert_not_called()
gax_api.commit.assert_called_once_with(
session=self.SESSION_NAME,
mutations=txn._mutations,
transaction_id=TRANSACTION_ID,
metadata=[("google-cloud-resource-prefix", database.name)],
)
def test_run_in_transaction_w_abort_no_retry_metadata(self):
import datetime
from google.api_core.exceptions import Aborted
from google.cloud.spanner_v1 import CommitResponse
from google.cloud.spanner_v1 import (
Transaction as TransactionPB,
TransactionOptions,
)
from google.cloud._helpers import UTC
from google.cloud._helpers import _datetime_to_pb_timestamp
from google.cloud.spanner_v1.transaction import Transaction
TABLE_NAME = "citizens"
COLUMNS = ["email", "first_name", "last_name", "age"]
VALUES = [
["phred@exammple.com", "Phred", "Phlyntstone", 32],
["bharney@example.com", "Bharney", "Rhubble", 31],
]
TRANSACTION_ID = b"FACEDACE"
transaction_pb = TransactionPB(id=TRANSACTION_ID)
now = datetime.datetime.utcnow().replace(tzinfo=UTC)
now_pb = _datetime_to_pb_timestamp(now)
aborted = _make_rpc_error(Aborted, trailing_metadata=[])
response = CommitResponse(commit_timestamp=now_pb)
gax_api = self._make_spanner_api()
gax_api.begin_transaction.return_value = transaction_pb
gax_api.commit.side_effect = [aborted, response]
database = self._make_database()
database.spanner_api = gax_api
session = self._make_one(database)
session._session_id = self.SESSION_ID
called_with = []
def unit_of_work(txn, *args, **kw):
called_with.append((txn, args, kw))
txn.insert(TABLE_NAME, COLUMNS, VALUES)
return "answer"
return_value = session.run_in_transaction(unit_of_work, "abc", some_arg="def")
self.assertEqual(len(called_with), 2)
for index, (txn, args, kw) in enumerate(called_with):
self.assertIsInstance(txn, Transaction)
self.assertEqual(return_value, "answer")
self.assertEqual(args, ("abc",))
self.assertEqual(kw, {"some_arg": "def"})
expected_options = TransactionOptions(read_write=TransactionOptions.ReadWrite())
self.assertEqual(
gax_api.begin_transaction.call_args_list,
[
mock.call(
session=self.SESSION_NAME,
options=expected_options,
metadata=[("google-cloud-resource-prefix", database.name)],
)
]
* 2,
)
self.assertEqual(
gax_api.commit.call_args_list,
[
mock.call(
session=self.SESSION_NAME,
mutations=txn._mutations,
transaction_id=TRANSACTION_ID,
metadata=[("google-cloud-resource-prefix", database.name)],
)
]
* 2,
)
def test_run_in_transaction_w_abort_w_retry_metadata(self):
import datetime
from google.api_core.exceptions import Aborted
from google.protobuf.duration_pb2 import Duration
from google.rpc.error_details_pb2 import RetryInfo
from google.cloud.spanner_v1 import CommitResponse
from google.cloud.spanner_v1 import (
Transaction as TransactionPB,
TransactionOptions,
)
from google.cloud._helpers import UTC
from google.cloud._helpers import _datetime_to_pb_timestamp
from google.cloud.spanner_v1.transaction import Transaction
TABLE_NAME = "citizens"
COLUMNS = ["email", "first_name", "last_name", "age"]
VALUES = [
["phred@exammple.com", "Phred", "Phlyntstone", 32],
["bharney@example.com", "Bharney", "Rhubble", 31],
]
TRANSACTION_ID = b"FACEDACE"
RETRY_SECONDS = 12
RETRY_NANOS = 3456
retry_info = RetryInfo(
retry_delay=Duration(seconds=RETRY_SECONDS, nanos=RETRY_NANOS)
)
trailing_metadata = [
("google.rpc.retryinfo-bin", retry_info.SerializeToString())
]
aborted = _make_rpc_error(Aborted, trailing_metadata=trailing_metadata)
transaction_pb = TransactionPB(id=TRANSACTION_ID)
now = datetime.datetime.utcnow().replace(tzinfo=UTC)
now_pb = _datetime_to_pb_timestamp(now)
response = CommitResponse(commit_timestamp=now_pb)
gax_api = self._make_spanner_api()
gax_api.begin_transaction.return_value = transaction_pb
gax_api.commit.side_effect = [aborted, response]
database = self._make_database()
database.spanner_api = gax_api
session = self._make_one(database)
session._session_id = self.SESSION_ID
called_with = []
def unit_of_work(txn, *args, **kw):
called_with.append((txn, args, kw))
txn.insert(TABLE_NAME, COLUMNS, VALUES)
with mock.patch("time.sleep") as sleep_mock:
session.run_in_transaction(unit_of_work, "abc", some_arg="def")
sleep_mock.assert_called_once_with(RETRY_SECONDS + RETRY_NANOS / 1.0e9)
self.assertEqual(len(called_with), 2)
for index, (txn, args, kw) in enumerate(called_with):
self.assertIsInstance(txn, Transaction)
if index == 1:
self.assertEqual(txn.committed, now)
else:
self.assertIsNone(txn.committed)
self.assertEqual(args, ("abc",))
self.assertEqual(kw, {"some_arg": "def"})
expected_options = TransactionOptions(read_write=TransactionOptions.ReadWrite())
self.assertEqual(
gax_api.begin_transaction.call_args_list,
[
mock.call(
session=self.SESSION_NAME,
options=expected_options,
metadata=[("google-cloud-resource-prefix", database.name)],
)
]
* 2,
)
self.assertEqual(
gax_api.commit.call_args_list,
[
mock.call(
session=self.SESSION_NAME,
mutations=txn._mutations,
transaction_id=TRANSACTION_ID,
metadata=[("google-cloud-resource-prefix", database.name)],
)
]
* 2,
)
def test_run_in_transaction_w_callback_raises_abort_wo_metadata(self):
import datetime
from google.api_core.exceptions import Aborted
from google.protobuf.duration_pb2 import Duration
from google.rpc.error_details_pb2 import RetryInfo
from google.cloud.spanner_v1 import CommitResponse
from google.cloud.spanner_v1 import (
Transaction as TransactionPB,
TransactionOptions,
)
from google.cloud._helpers import UTC
from google.cloud._helpers import _datetime_to_pb_timestamp
from google.cloud.spanner_v1.transaction import Transaction
TABLE_NAME = "citizens"
COLUMNS = ["email", "first_name", "last_name", "age"]
VALUES = [
["phred@exammple.com", "Phred", "Phlyntstone", 32],
["bharney@example.com", "Bharney", "Rhubble", 31],
]
TRANSACTION_ID = b"FACEDACE"
RETRY_SECONDS = 1
RETRY_NANOS = 3456
transaction_pb = TransactionPB(id=TRANSACTION_ID)
now = datetime.datetime.utcnow().replace(tzinfo=UTC)
now_pb = _datetime_to_pb_timestamp(now)
response = CommitResponse(commit_timestamp=now_pb)
retry_info = RetryInfo(
retry_delay=Duration(seconds=RETRY_SECONDS, nanos=RETRY_NANOS)
)
trailing_metadata = [
("google.rpc.retryinfo-bin", retry_info.SerializeToString())
]
gax_api = self._make_spanner_api()
gax_api.begin_transaction.return_value = transaction_pb
gax_api.commit.side_effect = [response]
database = self._make_database()
database.spanner_api = gax_api
session = self._make_one(database)
session._session_id = self.SESSION_ID
called_with = []
def unit_of_work(txn, *args, **kw):
called_with.append((txn, args, kw))
if len(called_with) < 2:
raise _make_rpc_error(Aborted, trailing_metadata)
txn.insert(TABLE_NAME, COLUMNS, VALUES)
with mock.patch("time.sleep") as sleep_mock:
session.run_in_transaction(unit_of_work)
sleep_mock.assert_called_once_with(RETRY_SECONDS + RETRY_NANOS / 1.0e9)
self.assertEqual(len(called_with), 2)
for index, (txn, args, kw) in enumerate(called_with):
self.assertIsInstance(txn, Transaction)
if index == 0:
self.assertIsNone(txn.committed)
else:
self.assertEqual(txn.committed, now)
self.assertEqual(args, ())
self.assertEqual(kw, {})
expected_options = TransactionOptions(read_write=TransactionOptions.ReadWrite())
self.assertEqual(
gax_api.begin_transaction.call_args_list,
[
mock.call(
session=self.SESSION_NAME,
options=expected_options,
metadata=[("google-cloud-resource-prefix", database.name)],
)
]
* 2,
)
gax_api.commit.assert_called_once_with(
session=self.SESSION_NAME,
mutations=txn._mutations,
transaction_id=TRANSACTION_ID,
metadata=[("google-cloud-resource-prefix", database.name)],
)
def test_run_in_transaction_w_abort_w_retry_metadata_deadline(self):
import datetime
from google.api_core.exceptions import Aborted
from google.protobuf.duration_pb2 import Duration
from google.rpc.error_details_pb2 import RetryInfo
from google.cloud.spanner_v1 import CommitResponse
from google.cloud.spanner_v1 import (
Transaction as TransactionPB,
TransactionOptions,
)
from google.cloud.spanner_v1.transaction import Transaction
from google.cloud._helpers import UTC
from google.cloud._helpers import _datetime_to_pb_timestamp
TABLE_NAME = "citizens"
COLUMNS = ["email", "first_name", "last_name", "age"]
VALUES = [
["phred@exammple.com", "Phred", "Phlyntstone", 32],
["bharney@example.com", "Bharney", "Rhubble", 31],
]
TRANSACTION_ID = b"FACEDACE"
RETRY_SECONDS = 1
RETRY_NANOS = 3456
transaction_pb = TransactionPB(id=TRANSACTION_ID)
now = datetime.datetime.utcnow().replace(tzinfo=UTC)
now_pb = _datetime_to_pb_timestamp(now)
response = CommitResponse(commit_timestamp=now_pb)
retry_info = RetryInfo(
retry_delay=Duration(seconds=RETRY_SECONDS, nanos=RETRY_NANOS)
)
trailing_metadata = [
("google.rpc.retryinfo-bin", retry_info.SerializeToString())
]
aborted = _make_rpc_error(Aborted, trailing_metadata=trailing_metadata)
gax_api = self._make_spanner_api()
gax_api.begin_transaction.return_value = transaction_pb
gax_api.commit.side_effect = [aborted, response]
database = self._make_database()
database.spanner_api = gax_api
session = self._make_one(database)
session._session_id = self.SESSION_ID
called_with = []
def unit_of_work(txn, *args, **kw):
called_with.append((txn, args, kw))
txn.insert(TABLE_NAME, COLUMNS, VALUES)
# retry once w/ timeout_secs=1
def _time(_results=[1, 1.5]):
return _results.pop(0)
with mock.patch("time.time", _time):
if HAS_OPENTELEMETRY_INSTALLED:
with mock.patch("opentelemetry.util.time", _ConstantTime()):
with mock.patch("time.sleep") as sleep_mock:
with self.assertRaises(Aborted):
session.run_in_transaction(
unit_of_work, "abc", timeout_secs=1
)
else:
with mock.patch("time.sleep") as sleep_mock:
with self.assertRaises(Aborted):
session.run_in_transaction(unit_of_work, "abc", timeout_secs=1)
sleep_mock.assert_not_called()
self.assertEqual(len(called_with), 1)
txn, args, kw = called_with[0]
self.assertIsInstance(txn, Transaction)
self.assertIsNone(txn.committed)
self.assertEqual(args, ("abc",))
self.assertEqual(kw, {})
expected_options = TransactionOptions(read_write=TransactionOptions.ReadWrite())
gax_api.begin_transaction.assert_called_once_with(
session=self.SESSION_NAME,
options=expected_options,
metadata=[("google-cloud-resource-prefix", database.name)],
)
gax_api.commit.assert_called_once_with(
session=self.SESSION_NAME,
mutations=txn._mutations,
transaction_id=TRANSACTION_ID,
metadata=[("google-cloud-resource-prefix", database.name)],
)
def test_run_in_transaction_w_timeout(self):
from google.api_core.exceptions import Aborted
from google.cloud.spanner_v1 import (
Transaction as TransactionPB,
TransactionOptions,
)
from google.cloud.spanner_v1.transaction import Transaction
TABLE_NAME = "citizens"
COLUMNS = ["email", "first_name", "last_name", "age"]
VALUES = [
["phred@exammple.com", "Phred", "Phlyntstone", 32],
["bharney@example.com", "Bharney", "Rhubble", 31],
]
TRANSACTION_ID = b"FACEDACE"
transaction_pb = TransactionPB(id=TRANSACTION_ID)
aborted = _make_rpc_error(Aborted, trailing_metadata=[])
gax_api = self._make_spanner_api()
gax_api.begin_transaction.return_value = transaction_pb
gax_api.commit.side_effect = aborted
database = self._make_database()
database.spanner_api = gax_api
session = self._make_one(database)
session._session_id = self.SESSION_ID
called_with = []
def unit_of_work(txn, *args, **kw):
called_with.append((txn, args, kw))
txn.insert(TABLE_NAME, COLUMNS, VALUES)
# retry several times to check backoff
def _time(_results=[1, 2, 4, 8]):
return _results.pop(0)
with mock.patch("time.time", _time):
if HAS_OPENTELEMETRY_INSTALLED:
with mock.patch("opentelemetry.util.time", _ConstantTime()):
with mock.patch("time.sleep") as sleep_mock:
with self.assertRaises(Aborted):
session.run_in_transaction(unit_of_work, timeout_secs=8)
else:
with mock.patch("time.sleep") as sleep_mock:
with self.assertRaises(Aborted):
session.run_in_transaction(unit_of_work, timeout_secs=8)
# unpacking call args into list
call_args = [call_[0][0] for call_ in sleep_mock.call_args_list]
call_args = list(map(int, call_args))
assert call_args == [2, 4]
assert sleep_mock.call_count == 2
self.assertEqual(len(called_with), 3)
for txn, args, kw in called_with:
self.assertIsInstance(txn, Transaction)
self.assertIsNone(txn.committed)
self.assertEqual(args, ())
self.assertEqual(kw, {})
expected_options = TransactionOptions(read_write=TransactionOptions.ReadWrite())
self.assertEqual(
gax_api.begin_transaction.call_args_list,
[
mock.call(
session=self.SESSION_NAME,
options=expected_options,
metadata=[("google-cloud-resource-prefix", database.name)],
)
]
* 3,
)
self.assertEqual(
gax_api.commit.call_args_list,
[
mock.call(
session=self.SESSION_NAME,
mutations=txn._mutations,
transaction_id=TRANSACTION_ID,
metadata=[("google-cloud-resource-prefix", database.name)],
)
]
* 3,
)
def test_delay_helper_w_no_delay(self):
from google.cloud.spanner_v1.session import _delay_until_retry
metadata_mock = mock.Mock()
metadata_mock.trailing_metadata.return_value = {}
exc_mock = mock.Mock(errors=[metadata_mock])
def _time_func():
return 3
# check if current time > deadline
with mock.patch("time.time", _time_func):
with self.assertRaises(Exception):
_delay_until_retry(exc_mock, 2, 1)
with mock.patch("time.time", _time_func):
with mock.patch(
"google.cloud.spanner_v1.session._get_retry_delay"
) as get_retry_delay_mock:
with mock.patch("time.sleep") as sleep_mock:
get_retry_delay_mock.return_value = None
_delay_until_retry(exc_mock, 6, 1)
sleep_mock.assert_not_called()
| 36.329787
| 88
| 0.635787
| 5,233
| 47,810
| 5.515192
| 0.063061
| 0.033263
| 0.027026
| 0.033263
| 0.860123
| 0.842556
| 0.829666
| 0.815668
| 0.794463
| 0.78438
| 0
| 0.005501
| 0.269944
| 47,810
| 1,315
| 89
| 36.357414
| 0.821367
| 0.016231
| 0
| 0.714822
| 0
| 0
| 0.072001
| 0.033076
| 0
| 0
| 0
| 0
| 0.155722
| 1
| 0.061914
| false
| 0.000938
| 0.077861
| 0.00469
| 0.161351
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0a662524641e092a1f7be399ea62a3cfb19d7eee
| 9,764
|
py
|
Python
|
WebHacking/100-Rolodex/tests/t_app.py
|
Probely/CTF-Challenges
|
9fb708d1b7a64ebc8309e3559b80d96915c6ada6
|
[
"Apache-2.0"
] | 42
|
2016-10-10T15:10:29.000Z
|
2022-03-20T09:09:53.000Z
|
WebHacking/100-Rolodex/tests/t_app.py
|
Probely/CTF-Challenges
|
9fb708d1b7a64ebc8309e3559b80d96915c6ada6
|
[
"Apache-2.0"
] | null | null | null |
WebHacking/100-Rolodex/tests/t_app.py
|
Probely/CTF-Challenges
|
9fb708d1b7a64ebc8309e3559b80d96915c6ada6
|
[
"Apache-2.0"
] | 18
|
2016-10-08T17:50:38.000Z
|
2022-03-15T17:01:59.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016, Bright Pixel
#
# For these tests to work, remember to start the service
# from a clean slate. Otherwise, if a test fails, it may
# leave stuff around and mess up the next testing round.
#
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from __future__ import absolute_import
import unittest
import requests
import json
import os
TEST_USERNAME = "team0"
TEST_PASSWORD = "abcd1234-1"
# In this challenge, setting the participant's position attribute
# to "admin" (case-insensitive) gives it privileged access...
PRIVILEGED_POSITION = "admin"
# Valid employee UID...
EMPLOYEE_UID = 1001
class TestApp(unittest.TestCase):
def setUp(self):
r = requests.get("http://127.0.0.1:30878/token", auth=(TEST_USERNAME, TEST_PASSWORD))
response = r.json()
self.token = response["token"]
self.uid = response["uid"]
def test_failed_auth(self):
r = requests.get("http://127.0.0.1:30878/token", auth=(TEST_USERNAME, TEST_PASSWORD + "--bad--"))
self.assertEqual(r.status_code, 401)
response = r.json()
self.assertEqual(response["status"], r.status_code)
self.assertTrue("error" in response)
r = requests.get("http://127.0.0.1:30878/token", auth=(TEST_USERNAME + "--bad--", TEST_PASSWORD))
self.assertEqual(r.status_code, 401)
response = r.json()
self.assertEqual(response["status"], r.status_code)
self.assertTrue("error" in response)
def test_empty_auth(self):
r = requests.get("http://127.0.0.1:30878/token", auth=(TEST_USERNAME, ""))
self.assertEqual(r.status_code, 401)
response = r.json()
self.assertEqual(response["status"], r.status_code)
self.assertTrue("error" in response)
r = requests.get("http://127.0.0.1:30878/token", auth=("", TEST_PASSWORD))
self.assertEqual(r.status_code, 401)
response = r.json()
self.assertEqual(response["status"], r.status_code)
self.assertTrue("error" in response)
r = requests.get("http://127.0.0.1:30878/token", auth=("", ""))
self.assertEqual(r.status_code, 401)
response = r.json()
self.assertEqual(response["status"], r.status_code)
self.assertTrue("error" in response)
def test_invalid_auth_header(self):
for auth in (b"", b"--garbage--", b"--áççêntèd--"):
r = requests.get(b"http://127.0.0.1:30878/token", headers={b"Authorization": auth})
self.assertEqual(r.status_code, 401)
response = r.json()
self.assertEqual(response["status"], r.status_code)
self.assertTrue("error" in response)
def test_successful_auth(self):
r = requests.get("http://127.0.0.1:30878/token", auth=(TEST_USERNAME, TEST_PASSWORD))
self.assertEqual(r.status_code, 200)
response = r.json()
self.assertEqual(response["status"], r.status_code)
self.assertFalse("error" in response)
self.assertTrue("uid" in response)
self.assertTrue("token" in response)
self.assertEqual(len(response["token"]), 64)
def test_missing_token(self):
r = requests.get("http://127.0.0.1:30878/users/1")
self.assertEqual(r.status_code, 403)
response = r.json()
self.assertEqual(response["status"], r.status_code)
self.assertTrue("error" in response)
def test_failed_token_header(self):
for token in (b"", b"--garbage--", b"--áççêntèd--"):
r = requests.get(b"http://127.0.0.1:30878/users/1",
headers={b"X-API-Token": token})
self.assertEqual(r.status_code, 403)
response = r.json()
self.assertEqual(response["status"], r.status_code)
self.assertTrue("error" in response)
def test_successful_token_header(self):
r = requests.get(b"http://127.0.0.1:30878/users/%d" % self.uid,
headers={b"X-API-Token": self.token})
self.assertEqual(r.status_code, 200)
response = r.json()
self.assertEqual(response["status"], r.status_code)
self.assertFalse("error" in response)
def test_failed_token_qs(self):
for token in (b"", b"--garbage--", b"--áççêntèd--"):
r = requests.get(b"http://127.0.0.1:30878/users/1?token=%s" % token)
self.assertEqual(r.status_code, 403)
response = r.json()
self.assertEqual(response["status"], r.status_code)
self.assertTrue("error" in response)
def test_successful_token_qs(self):
r = requests.get(b"http://127.0.0.1:30878/users/%s?token=%s" % (self.uid, self.token))
self.assertEqual(r.status_code, 200)
response = r.json()
self.assertEqual(response["status"], r.status_code)
self.assertFalse("error" in response)
def test_set_participant_attributes(self):
random_name = os.urandom(10).encode("hex")
r = requests.put(b"http://127.0.0.1:30878/users/%d" % self.uid,
headers={b"X-API-Token": self.token},
json={"name": random_name})
self.assertTrue(r.status_code in (200, 304))
r = requests.get(b"http://127.0.0.1:30878/users/%s?token=%s" % (self.uid, self.token))
self.assertEqual(r.status_code, 200)
response = r.json()
self.assertEqual(response["user"]["name"], random_name)
def test_set_unknown_participant_attributes(self):
r = requests.put(b"http://127.0.0.1:30878/users/%d" % self.uid,
headers={b"X-API-Token": self.token},
json={"whatisthisattribute": "Testing Set"})
self.assertEqual(r.status_code, 400)
def test_set_employee_attributes(self):
r = requests.put(b"http://127.0.0.1:30878/users/%d" % EMPLOYEE_UID,
headers={b"X-API-Token": self.token},
json={"name": "Testing Set"})
self.assertEqual(r.status_code, 403) # ...cannot change employees.
def test_get_all_users_unprivileged(self):
r = requests.put(b"http://127.0.0.1:30878/users/%d" % self.uid,
headers={b"X-API-Token": self.token},
json={"position": "unprivileged"})
self.assertTrue(r.status_code in (200, 304))
r = requests.get(b"http://127.0.0.1:30878/users",
headers={b"X-API-Token": self.token})
self.assertEqual(r.status_code, 200)
response = r.json()
self.assertEqual(response["status"], r.status_code)
self.assertFalse("error" in response)
self.assertTrue("users" in response)
self.assertTrue(isinstance(response["users"], list))
self.assertTrue(len(response["users"]) > 0)
for user in response["users"]:
self.assertTrue("uid" in user)
if user["uid"] != self.uid:
self.assertFalse("notes" in user) # ...privileged attribute.
def test_get_user_by_uid_unprivileged(self):
r = requests.put(b"http://127.0.0.1:30878/users/%d" % self.uid,
headers={b"X-API-Token": self.token},
json={"position": "unprivileged"})
self.assertTrue(r.status_code in (200, 304))
r = requests.get(b"http://127.0.0.1:30878/users/%d" % EMPLOYEE_UID,
headers={b"X-API-Token": self.token})
self.assertEqual(r.status_code, 200)
response = r.json()
self.assertEqual(response["status"], r.status_code)
self.assertFalse("error" in response)
self.assertTrue("user" in response)
self.assertEqual(response["user"]["uid"], EMPLOYEE_UID)
self.assertTrue("notes" not in response["user"])
def test_get_all_users_privileged(self):
r = requests.put(b"http://127.0.0.1:30878/users/%d" % self.uid,
headers={b"X-API-Token": self.token},
json={"position": PRIVILEGED_POSITION})
self.assertTrue(r.status_code in (200, 304))
r = requests.get(b"http://127.0.0.1:30878/users",
headers={b"X-API-Token": self.token})
self.assertEqual(r.status_code, 200)
response = r.json()
self.assertEqual(response["status"], r.status_code)
self.assertFalse("error" in response)
self.assertTrue("users" in response)
self.assertTrue(isinstance(response["users"], list))
self.assertTrue(len(response["users"]) > 0)
for user in response["users"]:
self.assertTrue("uid" in user)
if user["uid"] != self.uid:
self.assertTrue("notes" in user) # ...privileged attribute.
def test_get_user_by_uid_privileged(self):
r = requests.put(b"http://127.0.0.1:30878/users/%d" % self.uid,
headers={b"X-API-Token": self.token},
json={"position": PRIVILEGED_POSITION})
self.assertTrue(r.status_code in (200, 304))
r = requests.get(b"http://127.0.0.1:30878/users/%d" % EMPLOYEE_UID,
headers={b"X-API-Token": self.token})
self.assertEqual(r.status_code, 200)
response = r.json()
self.assertEqual(response["status"], r.status_code)
self.assertFalse("error" in response)
self.assertTrue("user" in response)
self.assertEqual(response["user"]["uid"], EMPLOYEE_UID)
self.assertTrue("notes" in response["user"])
if __name__ == "__main__":
unittest.main()
# vim: set expandtab ts=4 sw=4:
| 35.249097
| 105
| 0.60129
| 1,267
| 9,764
| 4.521705
| 0.116811
| 0.048874
| 0.076802
| 0.039274
| 0.813929
| 0.802583
| 0.80206
| 0.784779
| 0.783732
| 0.783732
| 0
| 0.052717
| 0.246211
| 9,764
| 276
| 106
| 35.376812
| 0.725679
| 0.050594
| 0
| 0.668508
| 0
| 0
| 0.156797
| 0
| 0
| 0
| 0
| 0
| 0.41989
| 0
| null | null | 0.033149
| 0.044199
| null | null | 0.005525
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
6a8014805ac1d7ec7e2d131a836ed2ec7c76423c
| 21,299
|
py
|
Python
|
generated-tests/sklearn/test_SKLEARN_GaussianNB.py
|
sherbold/replication-kit-2020-smoke-testing
|
17aa9858ab693e5c1b5b20c9b2d277802109600e
|
[
"Apache-2.0"
] | null | null | null |
generated-tests/sklearn/test_SKLEARN_GaussianNB.py
|
sherbold/replication-kit-2020-smoke-testing
|
17aa9858ab693e5c1b5b20c9b2d277802109600e
|
[
"Apache-2.0"
] | null | null | null |
generated-tests/sklearn/test_SKLEARN_GaussianNB.py
|
sherbold/replication-kit-2020-smoke-testing
|
17aa9858ab693e5c1b5b20c9b2d277802109600e
|
[
"Apache-2.0"
] | null | null | null |
import unittest
import xmlrunner
import pandas as pd
import numpy as np
import threading
import functools
import inspect
import math
import traceback
import warnings
from parameterized import parameterized
from scipy.io.arff import loadarff
from scipy.stats import chisquare, ks_2samp
from sklearn.preprocessing import LabelEncoder
from sklearn.naive_bayes import GaussianNB
class TestTimeoutException(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
# thanks to https://gist.github.com/vadimg/2902788
def timeout(duration, default=None):
def decorator(func):
class InterruptableThread(threading.Thread):
def __init__(self, args, kwargs):
threading.Thread.__init__(self)
self.args = args
self.kwargs = kwargs
self.result = default
self.daemon = True
self.exception = None
def run(self):
try:
self.result = func(*self.args, **self.kwargs)
except Exception as e:
self.exception = e
@functools.wraps(func)
def wrap(*args, **kwargs):
it = InterruptableThread(args, kwargs)
it.start()
it.join(duration)
if it.is_alive():
raise TestTimeoutException('timeout after %i seconds for test %s' % (duration, func))
if it.exception:
raise it.exception
return it.result
return wrap
return decorator
class test_SKLEARN_GaussianNB(unittest.TestCase):
params = [("{'var_smoothing':0.000000001,}", {'var_smoothing':0.000000001,}),
("{'var_smoothing':0.0000001,}", {'var_smoothing':0.0000001,}),
]
def assert_morphtest(self, evaluation_type, testcase_name, iteration, deviations_class, deviations_score, pval_chisquare, pval_kstest):
if evaluation_type=='score_exact':
self.assertEqual(deviations_score, 0)
elif evaluation_type=='class_exact':
self.assertEqual(deviations_class, 0)
elif evaluation_type=='score_stat':
self.assertTrue(pval_kstest>0.05)
elif evaluation_type=='class_stat':
self.assertTrue(pval_chisquare>0.05)
else:
raise ValueError('invalid evaluation_type: %s (allowed: score_exact, class_exact, score_stat, class_stat' % evaluation_type)
@parameterized.expand(params)
@timeout(21600)
def test_Uniform(self, name, kwargs):
for iter in range(1,1+1):
data, meta = loadarff('smokedata/Uniform_%i_training.arff' % iter)
testdata, testmeta = loadarff('smokedata/Uniform_%i_test.arff' % iter)
lb_make = LabelEncoder()
data_df = pd.DataFrame(data)
data_df["classAtt"] = lb_make.fit_transform(data_df["classAtt"])
data_df = pd.get_dummies(data_df)
testdata_df = pd.DataFrame(data)
testdata_df["classAtt"] = lb_make.fit_transform(testdata_df["classAtt"])
testdata_df = pd.get_dummies(testdata_df, sparse=True)
classIndex = -1
for i, s in enumerate(data_df.columns):
if 'classAtt' in s:
classIndex = i
classifier = GaussianNB(**kwargs)
np.random.seed(42)
classifier.fit(np.delete(data_df.values, classIndex, axis=1),data_df.values[:,classIndex])
classifier.predict(np.delete(testdata_df.values, classIndex, axis=1))
@parameterized.expand(params)
@timeout(21600)
def test_MinFloat(self, name, kwargs):
for iter in range(1,1+1):
data, meta = loadarff('smokedata/MinFloat_%i_training.arff' % iter)
testdata, testmeta = loadarff('smokedata/MinFloat_%i_test.arff' % iter)
lb_make = LabelEncoder()
data_df = pd.DataFrame(data)
data_df["classAtt"] = lb_make.fit_transform(data_df["classAtt"])
data_df = pd.get_dummies(data_df)
testdata_df = pd.DataFrame(data)
testdata_df["classAtt"] = lb_make.fit_transform(testdata_df["classAtt"])
testdata_df = pd.get_dummies(testdata_df, sparse=True)
classIndex = -1
for i, s in enumerate(data_df.columns):
if 'classAtt' in s:
classIndex = i
classifier = GaussianNB(**kwargs)
np.random.seed(42)
classifier.fit(np.delete(data_df.values, classIndex, axis=1),data_df.values[:,classIndex])
classifier.predict(np.delete(testdata_df.values, classIndex, axis=1))
@parameterized.expand(params)
@timeout(21600)
def test_VerySmall(self, name, kwargs):
for iter in range(1,1+1):
data, meta = loadarff('smokedata/VerySmall_%i_training.arff' % iter)
testdata, testmeta = loadarff('smokedata/VerySmall_%i_test.arff' % iter)
lb_make = LabelEncoder()
data_df = pd.DataFrame(data)
data_df["classAtt"] = lb_make.fit_transform(data_df["classAtt"])
data_df = pd.get_dummies(data_df)
testdata_df = pd.DataFrame(data)
testdata_df["classAtt"] = lb_make.fit_transform(testdata_df["classAtt"])
testdata_df = pd.get_dummies(testdata_df, sparse=True)
classIndex = -1
for i, s in enumerate(data_df.columns):
if 'classAtt' in s:
classIndex = i
classifier = GaussianNB(**kwargs)
np.random.seed(42)
classifier.fit(np.delete(data_df.values, classIndex, axis=1),data_df.values[:,classIndex])
classifier.predict(np.delete(testdata_df.values, classIndex, axis=1))
@parameterized.expand(params)
@timeout(21600)
def test_MinDouble(self, name, kwargs):
for iter in range(1,1+1):
data, meta = loadarff('smokedata/MinDouble_%i_training.arff' % iter)
testdata, testmeta = loadarff('smokedata/MinDouble_%i_test.arff' % iter)
lb_make = LabelEncoder()
data_df = pd.DataFrame(data)
data_df["classAtt"] = lb_make.fit_transform(data_df["classAtt"])
data_df = pd.get_dummies(data_df)
testdata_df = pd.DataFrame(data)
testdata_df["classAtt"] = lb_make.fit_transform(testdata_df["classAtt"])
testdata_df = pd.get_dummies(testdata_df, sparse=True)
classIndex = -1
for i, s in enumerate(data_df.columns):
if 'classAtt' in s:
classIndex = i
classifier = GaussianNB(**kwargs)
np.random.seed(42)
classifier.fit(np.delete(data_df.values, classIndex, axis=1),data_df.values[:,classIndex])
classifier.predict(np.delete(testdata_df.values, classIndex, axis=1))
@parameterized.expand(params)
@timeout(21600)
def test_MaxFloat(self, name, kwargs):
for iter in range(1,1+1):
data, meta = loadarff('smokedata/MaxFloat_%i_training.arff' % iter)
testdata, testmeta = loadarff('smokedata/MaxFloat_%i_test.arff' % iter)
lb_make = LabelEncoder()
data_df = pd.DataFrame(data)
data_df["classAtt"] = lb_make.fit_transform(data_df["classAtt"])
data_df = pd.get_dummies(data_df)
testdata_df = pd.DataFrame(data)
testdata_df["classAtt"] = lb_make.fit_transform(testdata_df["classAtt"])
testdata_df = pd.get_dummies(testdata_df, sparse=True)
classIndex = -1
for i, s in enumerate(data_df.columns):
if 'classAtt' in s:
classIndex = i
classifier = GaussianNB(**kwargs)
np.random.seed(42)
classifier.fit(np.delete(data_df.values, classIndex, axis=1),data_df.values[:,classIndex])
classifier.predict(np.delete(testdata_df.values, classIndex, axis=1))
@parameterized.expand(params)
@timeout(21600)
def test_VeryLarge(self, name, kwargs):
for iter in range(1,1+1):
data, meta = loadarff('smokedata/VeryLarge_%i_training.arff' % iter)
testdata, testmeta = loadarff('smokedata/VeryLarge_%i_test.arff' % iter)
lb_make = LabelEncoder()
data_df = pd.DataFrame(data)
data_df["classAtt"] = lb_make.fit_transform(data_df["classAtt"])
data_df = pd.get_dummies(data_df)
testdata_df = pd.DataFrame(data)
testdata_df["classAtt"] = lb_make.fit_transform(testdata_df["classAtt"])
testdata_df = pd.get_dummies(testdata_df, sparse=True)
classIndex = -1
for i, s in enumerate(data_df.columns):
if 'classAtt' in s:
classIndex = i
classifier = GaussianNB(**kwargs)
np.random.seed(42)
classifier.fit(np.delete(data_df.values, classIndex, axis=1),data_df.values[:,classIndex])
classifier.predict(np.delete(testdata_df.values, classIndex, axis=1))
@parameterized.expand(params)
@timeout(21600)
def test_MaxDouble(self, name, kwargs):
for iter in range(1,1+1):
data, meta = loadarff('smokedata/MaxDouble_%i_training.arff' % iter)
testdata, testmeta = loadarff('smokedata/MaxDouble_%i_test.arff' % iter)
lb_make = LabelEncoder()
data_df = pd.DataFrame(data)
data_df["classAtt"] = lb_make.fit_transform(data_df["classAtt"])
data_df = pd.get_dummies(data_df)
testdata_df = pd.DataFrame(data)
testdata_df["classAtt"] = lb_make.fit_transform(testdata_df["classAtt"])
testdata_df = pd.get_dummies(testdata_df, sparse=True)
classIndex = -1
for i, s in enumerate(data_df.columns):
if 'classAtt' in s:
classIndex = i
classifier = GaussianNB(**kwargs)
np.random.seed(42)
classifier.fit(np.delete(data_df.values, classIndex, axis=1),data_df.values[:,classIndex])
classifier.predict(np.delete(testdata_df.values, classIndex, axis=1))
@parameterized.expand(params)
@timeout(21600)
def test_Split(self, name, kwargs):
for iter in range(1,1+1):
data, meta = loadarff('smokedata/Split_%i_training.arff' % iter)
testdata, testmeta = loadarff('smokedata/Split_%i_test.arff' % iter)
lb_make = LabelEncoder()
data_df = pd.DataFrame(data)
data_df["classAtt"] = lb_make.fit_transform(data_df["classAtt"])
data_df = pd.get_dummies(data_df)
testdata_df = pd.DataFrame(data)
testdata_df["classAtt"] = lb_make.fit_transform(testdata_df["classAtt"])
testdata_df = pd.get_dummies(testdata_df, sparse=True)
classIndex = -1
for i, s in enumerate(data_df.columns):
if 'classAtt' in s:
classIndex = i
classifier = GaussianNB(**kwargs)
np.random.seed(42)
classifier.fit(np.delete(data_df.values, classIndex, axis=1),data_df.values[:,classIndex])
classifier.predict(np.delete(testdata_df.values, classIndex, axis=1))
@parameterized.expand(params)
@timeout(21600)
def test_LeftSkew(self, name, kwargs):
for iter in range(1,1+1):
data, meta = loadarff('smokedata/LeftSkew_%i_training.arff' % iter)
testdata, testmeta = loadarff('smokedata/LeftSkew_%i_test.arff' % iter)
lb_make = LabelEncoder()
data_df = pd.DataFrame(data)
data_df["classAtt"] = lb_make.fit_transform(data_df["classAtt"])
data_df = pd.get_dummies(data_df)
testdata_df = pd.DataFrame(data)
testdata_df["classAtt"] = lb_make.fit_transform(testdata_df["classAtt"])
testdata_df = pd.get_dummies(testdata_df, sparse=True)
classIndex = -1
for i, s in enumerate(data_df.columns):
if 'classAtt' in s:
classIndex = i
classifier = GaussianNB(**kwargs)
np.random.seed(42)
classifier.fit(np.delete(data_df.values, classIndex, axis=1),data_df.values[:,classIndex])
classifier.predict(np.delete(testdata_df.values, classIndex, axis=1))
@parameterized.expand(params)
@timeout(21600)
def test_RightSkew(self, name, kwargs):
for iter in range(1,1+1):
data, meta = loadarff('smokedata/RightSkew_%i_training.arff' % iter)
testdata, testmeta = loadarff('smokedata/RightSkew_%i_test.arff' % iter)
lb_make = LabelEncoder()
data_df = pd.DataFrame(data)
data_df["classAtt"] = lb_make.fit_transform(data_df["classAtt"])
data_df = pd.get_dummies(data_df)
testdata_df = pd.DataFrame(data)
testdata_df["classAtt"] = lb_make.fit_transform(testdata_df["classAtt"])
testdata_df = pd.get_dummies(testdata_df, sparse=True)
classIndex = -1
for i, s in enumerate(data_df.columns):
if 'classAtt' in s:
classIndex = i
classifier = GaussianNB(**kwargs)
np.random.seed(42)
classifier.fit(np.delete(data_df.values, classIndex, axis=1),data_df.values[:,classIndex])
classifier.predict(np.delete(testdata_df.values, classIndex, axis=1))
@parameterized.expand(params)
@timeout(21600)
def test_OneClass(self, name, kwargs):
for iter in range(1,1+1):
data, meta = loadarff('smokedata/OneClass_%i_training.arff' % iter)
testdata, testmeta = loadarff('smokedata/OneClass_%i_test.arff' % iter)
lb_make = LabelEncoder()
data_df = pd.DataFrame(data)
data_df["classAtt"] = lb_make.fit_transform(data_df["classAtt"])
data_df = pd.get_dummies(data_df)
testdata_df = pd.DataFrame(data)
testdata_df["classAtt"] = lb_make.fit_transform(testdata_df["classAtt"])
testdata_df = pd.get_dummies(testdata_df, sparse=True)
classIndex = -1
for i, s in enumerate(data_df.columns):
if 'classAtt' in s:
classIndex = i
classifier = GaussianNB(**kwargs)
np.random.seed(42)
classifier.fit(np.delete(data_df.values, classIndex, axis=1),data_df.values[:,classIndex])
classifier.predict(np.delete(testdata_df.values, classIndex, axis=1))
@parameterized.expand(params)
@timeout(21600)
def test_Bias(self, name, kwargs):
for iter in range(1,1+1):
data, meta = loadarff('smokedata/Bias_%i_training.arff' % iter)
testdata, testmeta = loadarff('smokedata/Bias_%i_test.arff' % iter)
lb_make = LabelEncoder()
data_df = pd.DataFrame(data)
data_df["classAtt"] = lb_make.fit_transform(data_df["classAtt"])
data_df = pd.get_dummies(data_df)
testdata_df = pd.DataFrame(data)
testdata_df["classAtt"] = lb_make.fit_transform(testdata_df["classAtt"])
testdata_df = pd.get_dummies(testdata_df, sparse=True)
classIndex = -1
for i, s in enumerate(data_df.columns):
if 'classAtt' in s:
classIndex = i
classifier = GaussianNB(**kwargs)
np.random.seed(42)
classifier.fit(np.delete(data_df.values, classIndex, axis=1),data_df.values[:,classIndex])
classifier.predict(np.delete(testdata_df.values, classIndex, axis=1))
@parameterized.expand(params)
@timeout(21600)
def test_Outlier(self, name, kwargs):
for iter in range(1,1+1):
data, meta = loadarff('smokedata/Outlier_%i_training.arff' % iter)
testdata, testmeta = loadarff('smokedata/Outlier_%i_test.arff' % iter)
lb_make = LabelEncoder()
data_df = pd.DataFrame(data)
data_df["classAtt"] = lb_make.fit_transform(data_df["classAtt"])
data_df = pd.get_dummies(data_df)
testdata_df = pd.DataFrame(data)
testdata_df["classAtt"] = lb_make.fit_transform(testdata_df["classAtt"])
testdata_df = pd.get_dummies(testdata_df, sparse=True)
classIndex = -1
for i, s in enumerate(data_df.columns):
if 'classAtt' in s:
classIndex = i
classifier = GaussianNB(**kwargs)
np.random.seed(42)
classifier.fit(np.delete(data_df.values, classIndex, axis=1),data_df.values[:,classIndex])
classifier.predict(np.delete(testdata_df.values, classIndex, axis=1))
@parameterized.expand(params)
@timeout(21600)
def test_Zeroes(self, name, kwargs):
for iter in range(1,1+1):
data, meta = loadarff('smokedata/Zeroes_%i_training.arff' % iter)
testdata, testmeta = loadarff('smokedata/Zeroes_%i_test.arff' % iter)
lb_make = LabelEncoder()
data_df = pd.DataFrame(data)
data_df["classAtt"] = lb_make.fit_transform(data_df["classAtt"])
data_df = pd.get_dummies(data_df)
testdata_df = pd.DataFrame(data)
testdata_df["classAtt"] = lb_make.fit_transform(testdata_df["classAtt"])
testdata_df = pd.get_dummies(testdata_df, sparse=True)
classIndex = -1
for i, s in enumerate(data_df.columns):
if 'classAtt' in s:
classIndex = i
classifier = GaussianNB(**kwargs)
np.random.seed(42)
classifier.fit(np.delete(data_df.values, classIndex, axis=1),data_df.values[:,classIndex])
classifier.predict(np.delete(testdata_df.values, classIndex, axis=1))
@parameterized.expand(params)
@timeout(21600)
def test_RandomNumeric(self, name, kwargs):
for iter in range(1,1+1):
data, meta = loadarff('smokedata/RandomNumeric_%i_training.arff' % iter)
testdata, testmeta = loadarff('smokedata/RandomNumeric_%i_test.arff' % iter)
lb_make = LabelEncoder()
data_df = pd.DataFrame(data)
data_df["classAtt"] = lb_make.fit_transform(data_df["classAtt"])
data_df = pd.get_dummies(data_df)
testdata_df = pd.DataFrame(data)
testdata_df["classAtt"] = lb_make.fit_transform(testdata_df["classAtt"])
testdata_df = pd.get_dummies(testdata_df, sparse=True)
classIndex = -1
for i, s in enumerate(data_df.columns):
if 'classAtt' in s:
classIndex = i
classifier = GaussianNB(**kwargs)
np.random.seed(42)
classifier.fit(np.delete(data_df.values, classIndex, axis=1),data_df.values[:,classIndex])
classifier.predict(np.delete(testdata_df.values, classIndex, axis=1))
@parameterized.expand(params)
@timeout(21600)
def test_DisjointNumeric(self, name, kwargs):
for iter in range(1,1+1):
data, meta = loadarff('smokedata/DisjointNumeric_%i_training.arff' % iter)
testdata, testmeta = loadarff('smokedata/DisjointNumeric_%i_test.arff' % iter)
lb_make = LabelEncoder()
data_df = pd.DataFrame(data)
data_df["classAtt"] = lb_make.fit_transform(data_df["classAtt"])
data_df = pd.get_dummies(data_df)
testdata_df = pd.DataFrame(data)
testdata_df["classAtt"] = lb_make.fit_transform(testdata_df["classAtt"])
testdata_df = pd.get_dummies(testdata_df, sparse=True)
classIndex = -1
for i, s in enumerate(data_df.columns):
if 'classAtt' in s:
classIndex = i
classifier = GaussianNB(**kwargs)
np.random.seed(42)
classifier.fit(np.delete(data_df.values, classIndex, axis=1),data_df.values[:,classIndex])
classifier.predict(np.delete(testdata_df.values, classIndex, axis=1))
if __name__ == '__main__':
unittest.main()
# with open('results.xml', 'wb') as output:
# unittest.main(
# testRunner=xmlrunner.XMLTestRunner(output=output),
# failfast=False, buffer=False, catchbreak=False)
| 42.855131
| 139
| 0.588009
| 2,412
| 21,299
| 4.995854
| 0.070066
| 0.063734
| 0.071701
| 0.045145
| 0.83195
| 0.83195
| 0.83195
| 0.823568
| 0.757178
| 0.757178
| 0
| 0.017531
| 0.303676
| 21,299
| 497
| 140
| 42.855131
| 0.794957
| 0.01108
| 0
| 0.72
| 0
| 0
| 0.093266
| 0.053471
| 0
| 0
| 0
| 0
| 0.0125
| 1
| 0.06
| false
| 0
| 0.0375
| 0.0025
| 0.1175
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
6ac8329c041ef78ba881f0611699c346bde5bdbb
| 1,387
|
py
|
Python
|
api/models.py
|
dieegom/covidapi
|
57815566d5d39bf37601419eb4379494843b7294
|
[
"MIT"
] | null | null | null |
api/models.py
|
dieegom/covidapi
|
57815566d5d39bf37601419eb4379494843b7294
|
[
"MIT"
] | null | null | null |
api/models.py
|
dieegom/covidapi
|
57815566d5d39bf37601419eb4379494843b7294
|
[
"MIT"
] | null | null | null |
from django.db import models
# Create your models here.
class ConfirmedData(models.Model):
timestamp = models.DateTimeField()
country = models.CharField(max_length=100)
count = models.IntegerField(default=0.0)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return f'{self.country} - {self.count}'
class Meta:
indexes = [
models.Index(fields=['created_at'])
]
class DeadData(models.Model):
timestamp = models.DateTimeField()
country = models.CharField(max_length=100)
count = models.IntegerField(default=0.0)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return f'{self.country} - {self.count}'
class Meta:
indexes = [
models.Index(fields=['created_at'])
]
class RecoveredData(models.Model):
timestamp = models.DateTimeField()
country = models.CharField(max_length=100)
count = models.IntegerField(default=0.0)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return f'{self.country} - {self.count}'
class Meta:
indexes = [
models.Index(fields=['created_at'])
]
| 26.673077
| 56
| 0.664023
| 162
| 1,387
| 5.481481
| 0.246914
| 0.192568
| 0.141892
| 0.168919
| 0.906532
| 0.906532
| 0.906532
| 0.906532
| 0.906532
| 0.906532
| 0
| 0.01385
| 0.219178
| 1,387
| 51
| 57
| 27.196078
| 0.806094
| 0.017304
| 0
| 0.810811
| 0
| 0
| 0.085966
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.081081
| false
| 0
| 0.027027
| 0.081081
| 0.756757
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 9
|
0a8537d1b1c536755aba7f7585ae2896288819aa
| 3,845
|
py
|
Python
|
djangoBackend/user_module/migrations/0009_auto_20211015_1717.py
|
muhanzi/Django-REST-API
|
08b8b2bbd08a74589cca7b5fd4e1d604d9a6d7eb
|
[
"Apache-2.0"
] | null | null | null |
djangoBackend/user_module/migrations/0009_auto_20211015_1717.py
|
muhanzi/Django-REST-API
|
08b8b2bbd08a74589cca7b5fd4e1d604d9a6d7eb
|
[
"Apache-2.0"
] | null | null | null |
djangoBackend/user_module/migrations/0009_auto_20211015_1717.py
|
muhanzi/Django-REST-API
|
08b8b2bbd08a74589cca7b5fd4e1d604d9a6d7eb
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 3.1.2 on 2021-10-15 14:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user_module', '0008_auto_20211010_0123'),
]
operations = [
migrations.AddField(
model_name='employee',
name='address',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='employee',
name='bioData',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='employee',
name='country',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='employee',
name='dateOfBirth',
field=models.DateField(null=True),
),
migrations.AddField(
model_name='employee',
name='district',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='employee',
name='lc1Name',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='employee',
name='parentGuardian',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='employee',
name='profilePicture',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='employee',
name='state',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='employee',
name='village',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='recrutingagency',
name='address',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='recrutingagency',
name='bankAccountNumber',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='recrutingagency',
name='bankName',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='recrutingagency',
name='country',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='recrutingagency',
name='dateOfBirth',
field=models.DateField(null=True),
),
migrations.AddField(
model_name='recrutingagency',
name='nin',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='recrutingagency',
name='profilePicture',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='recrutingagency',
name='state',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='recrutingagency',
name='village',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='supersite',
name='profilePicture',
field=models.CharField(blank=True, max_length=255, null=True),
),
]
| 33.72807
| 74
| 0.560988
| 360
| 3,845
| 5.875
| 0.152778
| 0.170213
| 0.217494
| 0.255319
| 0.893144
| 0.893144
| 0.876596
| 0.876596
| 0.861939
| 0.861939
| 0
| 0.032887
| 0.319896
| 3,845
| 113
| 75
| 34.026549
| 0.775908
| 0.011704
| 0
| 0.859813
| 1
| 0
| 0.115324
| 0.006056
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.009346
| 0
| 0.037383
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
0ac99ec6caab8733f048da5a58bb51a235a50c8f
| 132,335
|
py
|
Python
|
daq-gui/mainwindow_ui.py
|
Sovichea/instrumentation-opendaq-rpi3
|
ed8b00bce131d8960ddc2a0d130c442b77a4a087
|
[
"MIT"
] | 1
|
2017-12-08T17:07:12.000Z
|
2017-12-08T17:07:12.000Z
|
daq-gui/mainwindow_ui.py
|
Sovichea/instrumentation-opendaq-rpi3
|
ed8b00bce131d8960ddc2a0d130c442b77a4a087
|
[
"MIT"
] | null | null | null |
daq-gui/mainwindow_ui.py
|
Sovichea/instrumentation-opendaq-rpi3
|
ed8b00bce131d8960ddc2a0d130c442b77a4a087
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'mainwindow.ui'
#
# Created by: PyQt4 UI code generator 4.11.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName(_fromUtf8("MainWindow"))
MainWindow.resize(851, 636)
self.centralwidget = QtGui.QWidget(MainWindow)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.centralwidget.sizePolicy().hasHeightForWidth())
self.centralwidget.setSizePolicy(sizePolicy)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.gridLayout = QtGui.QGridLayout(self.centralwidget)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.verticalLayout = QtGui.QVBoxLayout()
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.label = QtGui.QLabel(self.centralwidget)
self.label.setObjectName(_fromUtf8("label"))
self.horizontalLayout.addWidget(self.label)
self.lineEdit_sample_rate = QtGui.QLineEdit(self.centralwidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Maximum, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.lineEdit_sample_rate.sizePolicy().hasHeightForWidth())
self.lineEdit_sample_rate.setSizePolicy(sizePolicy)
self.lineEdit_sample_rate.setMaximumSize(QtCore.QSize(80, 16777215))
self.lineEdit_sample_rate.setMaxLength(5)
self.lineEdit_sample_rate.setFrame(True)
self.lineEdit_sample_rate.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.lineEdit_sample_rate.setObjectName(_fromUtf8("lineEdit_sample_rate"))
self.horizontalLayout.addWidget(self.lineEdit_sample_rate)
self.label_2 = QtGui.QLabel(self.centralwidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_2.sizePolicy().hasHeightForWidth())
self.label_2.setSizePolicy(sizePolicy)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.horizontalLayout.addWidget(self.label_2)
spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem)
self.verticalLayout.addLayout(self.horizontalLayout)
self.tabWidget = QtGui.QTabWidget(self.centralwidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.tabWidget.sizePolicy().hasHeightForWidth())
self.tabWidget.setSizePolicy(sizePolicy)
self.tabWidget.setMinimumSize(QtCore.QSize(325, 0))
self.tabWidget.setObjectName(_fromUtf8("tabWidget"))
self.tab_single = QtGui.QWidget()
self.tab_single.setObjectName(_fromUtf8("tab_single"))
self.verticalLayout_2 = QtGui.QVBoxLayout(self.tab_single)
self.verticalLayout_2.setObjectName(_fromUtf8("verticalLayout_2"))
self.groupBox_2 = QtGui.QGroupBox(self.tab_single)
self.groupBox_2.setObjectName(_fromUtf8("groupBox_2"))
self.gridLayout_2 = QtGui.QGridLayout(self.groupBox_2)
self.gridLayout_2.setObjectName(_fromUtf8("gridLayout_2"))
self.checkBox_chan5 = QtGui.QCheckBox(self.groupBox_2)
self.checkBox_chan5.setObjectName(_fromUtf8("checkBox_chan5"))
self.gridLayout_2.addWidget(self.checkBox_chan5, 2, 2, 1, 1)
self.checkBox_chan0 = QtGui.QCheckBox(self.groupBox_2)
self.checkBox_chan0.setChecked(True)
self.checkBox_chan0.setObjectName(_fromUtf8("checkBox_chan0"))
self.gridLayout_2.addWidget(self.checkBox_chan0, 0, 0, 1, 1)
self.checkBox_chan1 = QtGui.QCheckBox(self.groupBox_2)
self.checkBox_chan1.setObjectName(_fromUtf8("checkBox_chan1"))
self.gridLayout_2.addWidget(self.checkBox_chan1, 0, 2, 1, 1)
self.checkBox_chan3 = QtGui.QCheckBox(self.groupBox_2)
self.checkBox_chan3.setObjectName(_fromUtf8("checkBox_chan3"))
self.gridLayout_2.addWidget(self.checkBox_chan3, 1, 2, 1, 1)
self.checkBox_chan4 = QtGui.QCheckBox(self.groupBox_2)
self.checkBox_chan4.setObjectName(_fromUtf8("checkBox_chan4"))
self.gridLayout_2.addWidget(self.checkBox_chan4, 2, 0, 1, 1)
self.checkBox_chan2 = QtGui.QCheckBox(self.groupBox_2)
self.checkBox_chan2.setObjectName(_fromUtf8("checkBox_chan2"))
self.gridLayout_2.addWidget(self.checkBox_chan2, 1, 0, 1, 1)
self.checkBox_chan6 = QtGui.QCheckBox(self.groupBox_2)
self.checkBox_chan6.setObjectName(_fromUtf8("checkBox_chan6"))
self.gridLayout_2.addWidget(self.checkBox_chan6, 3, 0, 1, 1)
self.checkBox_chan7 = QtGui.QCheckBox(self.groupBox_2)
self.checkBox_chan7.setObjectName(_fromUtf8("checkBox_chan7"))
self.gridLayout_2.addWidget(self.checkBox_chan7, 3, 2, 1, 1)
self.label_8 = QtGui.QLabel(self.groupBox_2)
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(204, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 51, 51))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(229, 25, 25))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(102, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(136, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(204, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(229, 127, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(204, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 51, 51))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(229, 25, 25))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(102, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(136, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(204, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(229, 127, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(102, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(204, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 51, 51))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(229, 25, 25))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(102, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(136, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(102, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(102, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(204, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(204, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(204, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipText, brush)
self.label_8.setPalette(palette)
self.label_8.setAutoFillBackground(True)
self.label_8.setObjectName(_fromUtf8("label_8"))
self.gridLayout_2.addWidget(self.label_8, 0, 1, 1, 1)
self.label_9 = QtGui.QLabel(self.groupBox_2)
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(138, 226, 52))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(198, 255, 143))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(168, 240, 97))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(69, 113, 26))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(92, 151, 34))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(138, 226, 52))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(196, 240, 153))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(138, 226, 52))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(198, 255, 143))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(168, 240, 97))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(69, 113, 26))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(92, 151, 34))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(138, 226, 52))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(196, 240, 153))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(69, 113, 26))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(138, 226, 52))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(198, 255, 143))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(168, 240, 97))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(69, 113, 26))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(92, 151, 34))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(69, 113, 26))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(69, 113, 26))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(138, 226, 52))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(138, 226, 52))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(138, 226, 52))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipText, brush)
self.label_9.setPalette(palette)
self.label_9.setAutoFillBackground(True)
self.label_9.setText(_fromUtf8(""))
self.label_9.setObjectName(_fromUtf8("label_9"))
self.gridLayout_2.addWidget(self.label_9, 0, 3, 1, 1)
self.label_10 = QtGui.QLabel(self.groupBox_2)
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(52, 101, 164))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(78, 152, 246))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(65, 126, 205))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(26, 50, 82))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(34, 67, 109))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(52, 101, 164))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(153, 178, 209))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(52, 101, 164))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(78, 152, 246))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(65, 126, 205))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(26, 50, 82))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(34, 67, 109))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(52, 101, 164))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(153, 178, 209))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(26, 50, 82))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(52, 101, 164))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(78, 152, 246))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(65, 126, 205))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(26, 50, 82))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(34, 67, 109))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(26, 50, 82))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(26, 50, 82))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(52, 101, 164))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(52, 101, 164))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(52, 101, 164))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipText, brush)
self.label_10.setPalette(palette)
self.label_10.setAutoFillBackground(True)
self.label_10.setText(_fromUtf8(""))
self.label_10.setObjectName(_fromUtf8("label_10"))
self.gridLayout_2.addWidget(self.label_10, 1, 1, 1, 1)
self.label_11 = QtGui.QLabel(self.groupBox_2)
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(252, 233, 79))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 250, 203))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(253, 241, 141))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(126, 116, 39))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(168, 155, 52))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(252, 233, 79))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(253, 244, 167))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(252, 233, 79))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 250, 203))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(253, 241, 141))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(126, 116, 39))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(168, 155, 52))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(252, 233, 79))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(253, 244, 167))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(126, 116, 39))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(252, 233, 79))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 250, 203))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(253, 241, 141))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(126, 116, 39))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(168, 155, 52))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(126, 116, 39))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(126, 116, 39))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(252, 233, 79))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(252, 233, 79))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(252, 233, 79))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipText, brush)
self.label_11.setPalette(palette)
self.label_11.setAutoFillBackground(True)
self.label_11.setText(_fromUtf8(""))
self.label_11.setObjectName(_fromUtf8("label_11"))
self.gridLayout_2.addWidget(self.label_11, 1, 3, 1, 1)
self.label_12 = QtGui.QLabel(self.groupBox_2)
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(186, 63, 217))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(233, 144, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(209, 103, 236))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(93, 31, 108))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(124, 42, 145))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(186, 63, 217))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(220, 159, 236))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(186, 63, 217))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(233, 144, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(209, 103, 236))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(93, 31, 108))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(124, 42, 145))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(186, 63, 217))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(220, 159, 236))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(93, 31, 108))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(186, 63, 217))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(233, 144, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(209, 103, 236))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(93, 31, 108))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(124, 42, 145))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(93, 31, 108))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(93, 31, 108))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(186, 63, 217))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(186, 63, 217))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(186, 63, 217))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipText, brush)
self.label_12.setPalette(palette)
self.label_12.setAutoFillBackground(True)
self.label_12.setText(_fromUtf8(""))
self.label_12.setObjectName(_fromUtf8("label_12"))
self.gridLayout_2.addWidget(self.label_12, 2, 1, 1, 1)
self.label_13 = QtGui.QLabel(self.groupBox_2)
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(55, 228, 228))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(149, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(102, 241, 241))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(27, 114, 114))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(37, 152, 152))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(55, 228, 228))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(155, 241, 241))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(55, 228, 228))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(149, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(102, 241, 241))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(27, 114, 114))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(37, 152, 152))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(55, 228, 228))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(155, 241, 241))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(27, 114, 114))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(55, 228, 228))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(149, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(102, 241, 241))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(27, 114, 114))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(37, 152, 152))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(27, 114, 114))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(27, 114, 114))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(55, 228, 228))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(55, 228, 228))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(55, 228, 228))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipText, brush)
self.label_13.setPalette(palette)
self.label_13.setAutoFillBackground(True)
self.label_13.setText(_fromUtf8(""))
self.label_13.setObjectName(_fromUtf8("label_13"))
self.gridLayout_2.addWidget(self.label_13, 3, 1, 1, 1)
self.label_14 = QtGui.QLabel(self.groupBox_2)
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipText, brush)
self.label_14.setPalette(palette)
self.label_14.setAutoFillBackground(True)
self.label_14.setObjectName(_fromUtf8("label_14"))
self.gridLayout_2.addWidget(self.label_14, 2, 3, 1, 1)
self.label_15 = QtGui.QLabel(self.groupBox_2)
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 127, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(170, 170, 170))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 127, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(170, 170, 170))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 127, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 127, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(170, 170, 170))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 127, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 127, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipText, brush)
self.label_15.setPalette(palette)
self.label_15.setAutoFillBackground(True)
self.label_15.setText(_fromUtf8(""))
self.label_15.setObjectName(_fromUtf8("label_15"))
self.gridLayout_2.addWidget(self.label_15, 3, 3, 1, 1)
self.verticalLayout_2.addWidget(self.groupBox_2)
self.tabWidget.addTab(self.tab_single, _fromUtf8(""))
self.tab_diff = QtGui.QWidget()
self.tab_diff.setObjectName(_fromUtf8("tab_diff"))
self.verticalLayout_3 = QtGui.QVBoxLayout(self.tab_diff)
self.verticalLayout_3.setObjectName(_fromUtf8("verticalLayout_3"))
self.groupBox_3 = QtGui.QGroupBox(self.tab_diff)
self.groupBox_3.setObjectName(_fromUtf8("groupBox_3"))
self.gridLayout_3 = QtGui.QGridLayout(self.groupBox_3)
self.gridLayout_3.setObjectName(_fromUtf8("gridLayout_3"))
self.label_18 = QtGui.QLabel(self.groupBox_3)
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(52, 101, 164))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(78, 152, 246))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(65, 126, 205))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(26, 50, 82))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(34, 67, 109))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(52, 101, 164))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(153, 178, 209))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(52, 101, 164))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(78, 152, 246))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(65, 126, 205))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(26, 50, 82))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(34, 67, 109))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(52, 101, 164))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(153, 178, 209))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(26, 50, 82))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(52, 101, 164))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(78, 152, 246))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(65, 126, 205))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(26, 50, 82))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(34, 67, 109))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(26, 50, 82))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(26, 50, 82))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(52, 101, 164))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(52, 101, 164))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(52, 101, 164))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipText, brush)
self.label_18.setPalette(palette)
self.label_18.setAutoFillBackground(True)
self.label_18.setText(_fromUtf8(""))
self.label_18.setObjectName(_fromUtf8("label_18"))
self.gridLayout_3.addWidget(self.label_18, 4, 1, 1, 1)
self.label_16 = QtGui.QLabel(self.groupBox_3)
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(204, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 51, 51))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(229, 25, 25))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(102, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(136, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(204, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(229, 127, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(204, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 51, 51))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(229, 25, 25))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(102, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(136, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(204, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(229, 127, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(102, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(204, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 51, 51))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(229, 25, 25))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(102, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(136, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(102, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(102, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(204, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(204, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(204, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipText, brush)
self.label_16.setPalette(palette)
self.label_16.setAutoFillBackground(True)
self.label_16.setObjectName(_fromUtf8("label_16"))
self.gridLayout_3.addWidget(self.label_16, 0, 1, 1, 1)
self.checkBox_chan32 = QtGui.QCheckBox(self.groupBox_3)
self.checkBox_chan32.setObjectName(_fromUtf8("checkBox_chan32"))
self.gridLayout_3.addWidget(self.checkBox_chan32, 3, 0, 1, 1)
self.checkBox_chan76 = QtGui.QCheckBox(self.groupBox_3)
self.checkBox_chan76.setObjectName(_fromUtf8("checkBox_chan76"))
self.gridLayout_3.addWidget(self.checkBox_chan76, 5, 0, 1, 1)
self.checkBox_chan54 = QtGui.QCheckBox(self.groupBox_3)
self.checkBox_chan54.setObjectName(_fromUtf8("checkBox_chan54"))
self.gridLayout_3.addWidget(self.checkBox_chan54, 4, 0, 1, 1)
self.checkBox_chan10 = QtGui.QCheckBox(self.groupBox_3)
self.checkBox_chan10.setChecked(True)
self.checkBox_chan10.setObjectName(_fromUtf8("checkBox_chan10"))
self.gridLayout_3.addWidget(self.checkBox_chan10, 0, 0, 1, 1)
self.comboBox_gain_1 = QtGui.QComboBox(self.groupBox_3)
self.comboBox_gain_1.setObjectName(_fromUtf8("comboBox_gain_1"))
self.comboBox_gain_1.addItem(_fromUtf8(""))
self.comboBox_gain_1.addItem(_fromUtf8(""))
self.comboBox_gain_1.addItem(_fromUtf8(""))
self.comboBox_gain_1.addItem(_fromUtf8(""))
self.comboBox_gain_1.addItem(_fromUtf8(""))
self.comboBox_gain_1.addItem(_fromUtf8(""))
self.comboBox_gain_1.addItem(_fromUtf8(""))
self.comboBox_gain_1.addItem(_fromUtf8(""))
self.gridLayout_3.addWidget(self.comboBox_gain_1, 0, 2, 1, 1)
self.comboBox_gain_2 = QtGui.QComboBox(self.groupBox_3)
self.comboBox_gain_2.setObjectName(_fromUtf8("comboBox_gain_2"))
self.comboBox_gain_2.addItem(_fromUtf8(""))
self.comboBox_gain_2.addItem(_fromUtf8(""))
self.comboBox_gain_2.addItem(_fromUtf8(""))
self.comboBox_gain_2.addItem(_fromUtf8(""))
self.comboBox_gain_2.addItem(_fromUtf8(""))
self.comboBox_gain_2.addItem(_fromUtf8(""))
self.comboBox_gain_2.addItem(_fromUtf8(""))
self.comboBox_gain_2.addItem(_fromUtf8(""))
self.gridLayout_3.addWidget(self.comboBox_gain_2, 3, 2, 1, 1)
self.comboBox_gain_3 = QtGui.QComboBox(self.groupBox_3)
self.comboBox_gain_3.setObjectName(_fromUtf8("comboBox_gain_3"))
self.comboBox_gain_3.addItem(_fromUtf8(""))
self.comboBox_gain_3.addItem(_fromUtf8(""))
self.comboBox_gain_3.addItem(_fromUtf8(""))
self.comboBox_gain_3.addItem(_fromUtf8(""))
self.comboBox_gain_3.addItem(_fromUtf8(""))
self.comboBox_gain_3.addItem(_fromUtf8(""))
self.comboBox_gain_3.addItem(_fromUtf8(""))
self.comboBox_gain_3.addItem(_fromUtf8(""))
self.gridLayout_3.addWidget(self.comboBox_gain_3, 4, 2, 1, 1)
self.comboBox_gain_4 = QtGui.QComboBox(self.groupBox_3)
self.comboBox_gain_4.setObjectName(_fromUtf8("comboBox_gain_4"))
self.comboBox_gain_4.addItem(_fromUtf8(""))
self.comboBox_gain_4.addItem(_fromUtf8(""))
self.comboBox_gain_4.addItem(_fromUtf8(""))
self.comboBox_gain_4.addItem(_fromUtf8(""))
self.comboBox_gain_4.addItem(_fromUtf8(""))
self.comboBox_gain_4.addItem(_fromUtf8(""))
self.comboBox_gain_4.addItem(_fromUtf8(""))
self.comboBox_gain_4.addItem(_fromUtf8(""))
self.gridLayout_3.addWidget(self.comboBox_gain_4, 5, 2, 1, 1)
self.label_17 = QtGui.QLabel(self.groupBox_3)
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(115, 210, 22))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(170, 255, 87))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(142, 232, 54))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(57, 105, 11))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(76, 140, 14))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(115, 210, 22))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(185, 232, 138))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(115, 210, 22))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(170, 255, 87))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(142, 232, 54))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(57, 105, 11))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(76, 140, 14))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(115, 210, 22))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(185, 232, 138))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(57, 105, 11))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(115, 210, 22))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(170, 255, 87))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(142, 232, 54))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(57, 105, 11))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(76, 140, 14))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(57, 105, 11))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(57, 105, 11))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(115, 210, 22))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(115, 210, 22))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(115, 210, 22))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipText, brush)
self.label_17.setPalette(palette)
self.label_17.setAutoFillBackground(True)
self.label_17.setText(_fromUtf8(""))
self.label_17.setObjectName(_fromUtf8("label_17"))
self.gridLayout_3.addWidget(self.label_17, 3, 1, 1, 1)
self.label_19 = QtGui.QLabel(self.groupBox_3)
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(252, 233, 79))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 250, 203))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(253, 241, 141))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(126, 116, 39))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(168, 155, 52))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(252, 233, 79))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(253, 244, 167))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(252, 233, 79))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 250, 203))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(253, 241, 141))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(126, 116, 39))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(168, 155, 52))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(252, 233, 79))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(253, 244, 167))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(126, 116, 39))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(252, 233, 79))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 250, 203))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(253, 241, 141))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(126, 116, 39))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(168, 155, 52))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(126, 116, 39))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(126, 116, 39))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(252, 233, 79))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(252, 233, 79))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(252, 233, 79))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipText, brush)
self.label_19.setPalette(palette)
self.label_19.setAutoFillBackground(True)
self.label_19.setText(_fromUtf8(""))
self.label_19.setObjectName(_fromUtf8("label_19"))
self.gridLayout_3.addWidget(self.label_19, 5, 1, 1, 1)
spacerItem1 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.gridLayout_3.addItem(spacerItem1, 0, 3, 1, 1)
spacerItem2 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.gridLayout_3.addItem(spacerItem2, 3, 3, 1, 1)
spacerItem3 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.gridLayout_3.addItem(spacerItem3, 4, 3, 1, 1)
spacerItem4 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.gridLayout_3.addItem(spacerItem4, 5, 3, 1, 1)
self.verticalLayout_3.addWidget(self.groupBox_3)
self.tabWidget.addTab(self.tab_diff, _fromUtf8(""))
self.verticalLayout.addWidget(self.tabWidget)
self.label_7 = QtGui.QLabel(self.centralwidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_7.sizePolicy().hasHeightForWidth())
self.label_7.setSizePolicy(sizePolicy)
self.label_7.setObjectName(_fromUtf8("label_7"))
self.verticalLayout.addWidget(self.label_7)
self.horizontalLayout_2 = QtGui.QHBoxLayout()
self.horizontalLayout_2.setObjectName(_fromUtf8("horizontalLayout_2"))
self.label_3 = QtGui.QLabel(self.centralwidget)
self.label_3.setObjectName(_fromUtf8("label_3"))
self.horizontalLayout_2.addWidget(self.label_3)
self.lineEdit_tmin = QtGui.QLineEdit(self.centralwidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Maximum, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.lineEdit_tmin.sizePolicy().hasHeightForWidth())
self.lineEdit_tmin.setSizePolicy(sizePolicy)
self.lineEdit_tmin.setMaximumSize(QtCore.QSize(80, 16777215))
self.lineEdit_tmin.setMaxLength(5)
self.lineEdit_tmin.setAlignment(QtCore.Qt.AlignCenter)
self.lineEdit_tmin.setObjectName(_fromUtf8("lineEdit_tmin"))
self.horizontalLayout_2.addWidget(self.lineEdit_tmin)
self.label_4 = QtGui.QLabel(self.centralwidget)
self.label_4.setObjectName(_fromUtf8("label_4"))
self.horizontalLayout_2.addWidget(self.label_4)
self.lineEdit_tmax = QtGui.QLineEdit(self.centralwidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Maximum, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.lineEdit_tmax.sizePolicy().hasHeightForWidth())
self.lineEdit_tmax.setSizePolicy(sizePolicy)
self.lineEdit_tmax.setMaximumSize(QtCore.QSize(80, 16777215))
self.lineEdit_tmax.setMaxLength(5)
self.lineEdit_tmax.setAlignment(QtCore.Qt.AlignCenter)
self.lineEdit_tmax.setObjectName(_fromUtf8("lineEdit_tmax"))
self.horizontalLayout_2.addWidget(self.lineEdit_tmax)
self.comboBox_time = QtGui.QComboBox(self.centralwidget)
self.comboBox_time.setObjectName(_fromUtf8("comboBox_time"))
self.comboBox_time.addItem(_fromUtf8(""))
self.comboBox_time.addItem(_fromUtf8(""))
self.comboBox_time.addItem(_fromUtf8(""))
self.horizontalLayout_2.addWidget(self.comboBox_time)
spacerItem5 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem5)
self.verticalLayout.addLayout(self.horizontalLayout_2)
self.horizontalLayout_3 = QtGui.QHBoxLayout()
self.horizontalLayout_3.setObjectName(_fromUtf8("horizontalLayout_3"))
self.label_5 = QtGui.QLabel(self.centralwidget)
self.label_5.setObjectName(_fromUtf8("label_5"))
self.horizontalLayout_3.addWidget(self.label_5)
self.lineEdit_ymin = QtGui.QLineEdit(self.centralwidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Maximum, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.lineEdit_ymin.sizePolicy().hasHeightForWidth())
self.lineEdit_ymin.setSizePolicy(sizePolicy)
self.lineEdit_ymin.setMaximumSize(QtCore.QSize(80, 16777215))
self.lineEdit_ymin.setMaxLength(5)
self.lineEdit_ymin.setAlignment(QtCore.Qt.AlignCenter)
self.lineEdit_ymin.setObjectName(_fromUtf8("lineEdit_ymin"))
self.horizontalLayout_3.addWidget(self.lineEdit_ymin)
self.label_6 = QtGui.QLabel(self.centralwidget)
self.label_6.setObjectName(_fromUtf8("label_6"))
self.horizontalLayout_3.addWidget(self.label_6)
self.lineEdit_ymax = QtGui.QLineEdit(self.centralwidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Maximum, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.lineEdit_ymax.sizePolicy().hasHeightForWidth())
self.lineEdit_ymax.setSizePolicy(sizePolicy)
self.lineEdit_ymax.setMaximumSize(QtCore.QSize(80, 16777215))
self.lineEdit_ymax.setMaxLength(5)
self.lineEdit_ymax.setAlignment(QtCore.Qt.AlignCenter)
self.lineEdit_ymax.setObjectName(_fromUtf8("lineEdit_ymax"))
self.horizontalLayout_3.addWidget(self.lineEdit_ymax)
self.comboBox_y = QtGui.QComboBox(self.centralwidget)
self.comboBox_y.setObjectName(_fromUtf8("comboBox_y"))
self.comboBox_y.addItem(_fromUtf8(""))
self.comboBox_y.addItem(_fromUtf8(""))
self.comboBox_y.addItem(_fromUtf8(""))
self.horizontalLayout_3.addWidget(self.comboBox_y)
spacerItem6 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_3.addItem(spacerItem6)
self.verticalLayout.addLayout(self.horizontalLayout_3)
self.pushButton_fitplot = QtGui.QPushButton(self.centralwidget)
self.pushButton_fitplot.setObjectName(_fromUtf8("pushButton_fitplot"))
self.verticalLayout.addWidget(self.pushButton_fitplot)
self.horizontalLayout_5 = QtGui.QHBoxLayout()
self.horizontalLayout_5.setObjectName(_fromUtf8("horizontalLayout_5"))
self.radioButton_Fit_Plot = QtGui.QRadioButton(self.centralwidget)
self.radioButton_Fit_Plot.setObjectName(_fromUtf8("radioButton_Fit_Plot"))
self.horizontalLayout_5.addWidget(self.radioButton_Fit_Plot)
self.radioButton_Time_Interval = QtGui.QRadioButton(self.centralwidget)
self.radioButton_Time_Interval.setObjectName(_fromUtf8("radioButton_Time_Interval"))
self.horizontalLayout_5.addWidget(self.radioButton_Time_Interval)
self.lineEdit_Time_Interval = QtGui.QLineEdit(self.centralwidget)
self.lineEdit_Time_Interval.setMaximumSize(QtCore.QSize(80, 16777215))
self.lineEdit_Time_Interval.setObjectName(_fromUtf8("lineEdit_Time_Interval"))
self.horizontalLayout_5.addWidget(self.lineEdit_Time_Interval)
self.label_20 = QtGui.QLabel(self.centralwidget)
self.label_20.setObjectName(_fromUtf8("label_20"))
self.horizontalLayout_5.addWidget(self.label_20)
spacerItem7 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_5.addItem(spacerItem7)
self.verticalLayout.addLayout(self.horizontalLayout_5)
self.horizontalLayout_4 = QtGui.QHBoxLayout()
self.horizontalLayout_4.setObjectName(_fromUtf8("horizontalLayout_4"))
self.pushButton_start = QtGui.QPushButton(self.centralwidget)
self.pushButton_start.setObjectName(_fromUtf8("pushButton_start"))
self.horizontalLayout_4.addWidget(self.pushButton_start)
self.pushButton_stop = QtGui.QPushButton(self.centralwidget)
self.pushButton_stop.setObjectName(_fromUtf8("pushButton_stop"))
self.horizontalLayout_4.addWidget(self.pushButton_stop)
self.verticalLayout.addLayout(self.horizontalLayout_4)
self.gridLayout.addLayout(self.verticalLayout, 1, 0, 4, 1)
self.label_title = QtGui.QLabel(self.centralwidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_title.sizePolicy().hasHeightForWidth())
self.label_title.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(14)
font.setBold(True)
font.setWeight(75)
self.label_title.setFont(font)
self.label_title.setLineWidth(1)
self.label_title.setAlignment(QtCore.Qt.AlignCenter)
self.label_title.setObjectName(_fromUtf8("label_title"))
self.gridLayout.addWidget(self.label_title, 0, 1, 1, 1)
self.plotWidget = PlotWidget(self.centralwidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.plotWidget.sizePolicy().hasHeightForWidth())
self.plotWidget.setSizePolicy(sizePolicy)
self.plotWidget.setMinimumSize(QtCore.QSize(500, 0))
self.plotWidget.setObjectName(_fromUtf8("plotWidget"))
self.gridLayout.addWidget(self.plotWidget, 1, 1, 3, 1)
self.horizontalLayout_6 = QtGui.QHBoxLayout()
self.horizontalLayout_6.setObjectName(_fromUtf8("horizontalLayout_6"))
self.checkBox_roi = QtGui.QCheckBox(self.centralwidget)
self.checkBox_roi.setObjectName(_fromUtf8("checkBox_roi"))
self.horizontalLayout_6.addWidget(self.checkBox_roi)
spacerItem8 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_6.addItem(spacerItem8)
self.pushButton_Save_Data = QtGui.QPushButton(self.centralwidget)
self.pushButton_Save_Data.setObjectName(_fromUtf8("pushButton_Save_Data"))
self.horizontalLayout_6.addWidget(self.pushButton_Save_Data)
self.gridLayout.addLayout(self.horizontalLayout_6, 4, 1, 1, 1)
self.gridLayout.setColumnStretch(0, 1)
self.gridLayout.setColumnStretch(1, 3)
self.plotWidget.raise_()
self.label_title.raise_()
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtGui.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 851, 26))
self.menubar.setObjectName(_fromUtf8("menubar"))
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtGui.QStatusBar(MainWindow)
self.statusbar.setObjectName(_fromUtf8("statusbar"))
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
self.tabWidget.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
MainWindow.setTabOrder(self.lineEdit_sample_rate, self.tabWidget)
MainWindow.setTabOrder(self.tabWidget, self.checkBox_chan0)
MainWindow.setTabOrder(self.checkBox_chan0, self.checkBox_chan1)
MainWindow.setTabOrder(self.checkBox_chan1, self.checkBox_chan2)
MainWindow.setTabOrder(self.checkBox_chan2, self.checkBox_chan3)
MainWindow.setTabOrder(self.checkBox_chan3, self.checkBox_chan4)
MainWindow.setTabOrder(self.checkBox_chan4, self.checkBox_chan5)
MainWindow.setTabOrder(self.checkBox_chan5, self.checkBox_chan6)
MainWindow.setTabOrder(self.checkBox_chan6, self.checkBox_chan7)
MainWindow.setTabOrder(self.checkBox_chan7, self.checkBox_chan10)
MainWindow.setTabOrder(self.checkBox_chan10, self.comboBox_gain_1)
MainWindow.setTabOrder(self.comboBox_gain_1, self.checkBox_chan32)
MainWindow.setTabOrder(self.checkBox_chan32, self.comboBox_gain_2)
MainWindow.setTabOrder(self.comboBox_gain_2, self.checkBox_chan54)
MainWindow.setTabOrder(self.checkBox_chan54, self.comboBox_gain_3)
MainWindow.setTabOrder(self.comboBox_gain_3, self.checkBox_chan76)
MainWindow.setTabOrder(self.checkBox_chan76, self.comboBox_gain_4)
MainWindow.setTabOrder(self.comboBox_gain_4, self.lineEdit_tmin)
MainWindow.setTabOrder(self.lineEdit_tmin, self.lineEdit_tmax)
MainWindow.setTabOrder(self.lineEdit_tmax, self.comboBox_time)
MainWindow.setTabOrder(self.comboBox_time, self.lineEdit_ymin)
MainWindow.setTabOrder(self.lineEdit_ymin, self.lineEdit_ymax)
MainWindow.setTabOrder(self.lineEdit_ymax, self.comboBox_y)
MainWindow.setTabOrder(self.comboBox_y, self.pushButton_start)
MainWindow.setTabOrder(self.pushButton_start, self.pushButton_stop)
MainWindow.setTabOrder(self.pushButton_stop, self.plotWidget)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(_translate("MainWindow", "MCC DAQ Monitor", None))
self.label.setText(_translate("MainWindow", "Sample rate :", None))
self.lineEdit_sample_rate.setText(_translate("MainWindow", "100", None))
self.label_2.setText(_translate("MainWindow", "ms", None))
self.groupBox_2.setTitle(_translate("MainWindow", "Select ADC Channel(s): 0 - 10V", None))
self.checkBox_chan5.setText(_translate("MainWindow", "A5 (Pin 8)", None))
self.checkBox_chan0.setText(_translate("MainWindow", "A0 (Pin 1)", None))
self.checkBox_chan1.setText(_translate("MainWindow", "A1 (Pin 2)", None))
self.checkBox_chan3.setText(_translate("MainWindow", "A3 (Pin 5)", None))
self.checkBox_chan4.setText(_translate("MainWindow", "A4 (Pin 7)", None))
self.checkBox_chan2.setText(_translate("MainWindow", "A2 (Pin 4)", None))
self.checkBox_chan6.setText(_translate("MainWindow", "A6 (Pin 10)", None))
self.checkBox_chan7.setText(_translate("MainWindow", "A7 (Pin 11)", None))
self.label_8.setText(_translate("MainWindow", " .", None))
self.label_14.setText(_translate("MainWindow", " .", None))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_single), _translate("MainWindow", "Single Ended", None))
self.groupBox_3.setTitle(_translate("MainWindow", "Select ADC Channel-Gain Pair(s):", None))
self.label_16.setText(_translate("MainWindow", " .", None))
self.checkBox_chan32.setText(_translate("MainWindow", "A2 - A3", None))
self.checkBox_chan76.setText(_translate("MainWindow", "A6 - A7", None))
self.checkBox_chan54.setText(_translate("MainWindow", "A4 - A5", None))
self.checkBox_chan10.setText(_translate("MainWindow", "A0 - A1", None))
self.comboBox_gain_1.setItemText(0, _translate("MainWindow", "+/-20V", None))
self.comboBox_gain_1.setItemText(1, _translate("MainWindow", "+/-10V", None))
self.comboBox_gain_1.setItemText(2, _translate("MainWindow", "+/-5V", None))
self.comboBox_gain_1.setItemText(3, _translate("MainWindow", "+/-4V", None))
self.comboBox_gain_1.setItemText(4, _translate("MainWindow", "+/-2.5V", None))
self.comboBox_gain_1.setItemText(5, _translate("MainWindow", "+/-2V", None))
self.comboBox_gain_1.setItemText(6, _translate("MainWindow", "+/-1.25V", None))
self.comboBox_gain_1.setItemText(7, _translate("MainWindow", "+/-1V", None))
self.comboBox_gain_2.setItemText(0, _translate("MainWindow", "+/-20V", None))
self.comboBox_gain_2.setItemText(1, _translate("MainWindow", "+/-10V", None))
self.comboBox_gain_2.setItemText(2, _translate("MainWindow", "+/-5V", None))
self.comboBox_gain_2.setItemText(3, _translate("MainWindow", "+/-4V", None))
self.comboBox_gain_2.setItemText(4, _translate("MainWindow", "+/-2.5V", None))
self.comboBox_gain_2.setItemText(5, _translate("MainWindow", "+/-2V", None))
self.comboBox_gain_2.setItemText(6, _translate("MainWindow", "+/-1.25V", None))
self.comboBox_gain_2.setItemText(7, _translate("MainWindow", "+/-1V", None))
self.comboBox_gain_3.setItemText(0, _translate("MainWindow", "+/-20V", None))
self.comboBox_gain_3.setItemText(1, _translate("MainWindow", "+/-10V", None))
self.comboBox_gain_3.setItemText(2, _translate("MainWindow", "+/-5V", None))
self.comboBox_gain_3.setItemText(3, _translate("MainWindow", "+/-4V", None))
self.comboBox_gain_3.setItemText(4, _translate("MainWindow", "+/-2.5V", None))
self.comboBox_gain_3.setItemText(5, _translate("MainWindow", "+/-2V", None))
self.comboBox_gain_3.setItemText(6, _translate("MainWindow", "+/-1.25V", None))
self.comboBox_gain_3.setItemText(7, _translate("MainWindow", "+/-1V", None))
self.comboBox_gain_4.setItemText(0, _translate("MainWindow", "+/-20V", None))
self.comboBox_gain_4.setItemText(1, _translate("MainWindow", "+/-10V", None))
self.comboBox_gain_4.setItemText(2, _translate("MainWindow", "+/-5V", None))
self.comboBox_gain_4.setItemText(3, _translate("MainWindow", "+/-4V", None))
self.comboBox_gain_4.setItemText(4, _translate("MainWindow", "+/-2.5V", None))
self.comboBox_gain_4.setItemText(5, _translate("MainWindow", "+/-2V", None))
self.comboBox_gain_4.setItemText(6, _translate("MainWindow", "+/-1.25V", None))
self.comboBox_gain_4.setItemText(7, _translate("MainWindow", "+/-1V", None))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_diff), _translate("MainWindow", "Differential", None))
self.label_7.setText(_translate("MainWindow", "Plot interval:", None))
self.label_3.setText(_translate("MainWindow", "X:", None))
self.label_4.setText(_translate("MainWindow", "to", None))
self.comboBox_time.setItemText(0, _translate("MainWindow", "s", None))
self.comboBox_time.setItemText(1, _translate("MainWindow", "ms", None))
self.comboBox_time.setItemText(2, _translate("MainWindow", "us", None))
self.label_5.setText(_translate("MainWindow", "Y:", None))
self.label_6.setText(_translate("MainWindow", "to", None))
self.comboBox_y.setItemText(0, _translate("MainWindow", "V", None))
self.comboBox_y.setItemText(1, _translate("MainWindow", "mV", None))
self.comboBox_y.setItemText(2, _translate("MainWindow", "uV", None))
self.pushButton_fitplot.setText(_translate("MainWindow", "Fit Plot", None))
self.radioButton_Fit_Plot.setText(_translate("MainWindow", "Fit Plot", None))
self.radioButton_Time_Interval.setText(_translate("MainWindow", "Sample Interval", None))
self.label_20.setText(_translate("MainWindow", "samples", None))
self.pushButton_start.setText(_translate("MainWindow", "Start", None))
self.pushButton_stop.setText(_translate("MainWindow", "Stop", None))
self.label_title.setText(_translate("MainWindow", "Plot Title", None))
self.checkBox_roi.setText(_translate("MainWindow", "Enable Region of Interest (ROI)", None))
self.pushButton_Save_Data.setText(_translate("MainWindow", "Save Data (CSV)", None))
from pyqtgraph import PlotWidget
| 61.237853
| 122
| 0.700314
| 15,636
| 132,335
| 5.869468
| 0.022768
| 0.154683
| 0.094143
| 0.123563
| 0.880785
| 0.863492
| 0.836633
| 0.824767
| 0.821204
| 0.805089
| 0
| 0.042146
| 0.1756
| 132,335
| 2,160
| 123
| 61.266204
| 0.799078
| 0.00139
| 0
| 0.794501
| 1
| 0
| 0.016951
| 0.000356
| 0
| 0
| 0
| 0
| 0
| 1
| 0.00233
| false
| 0
| 0.000932
| 0.001398
| 0.005126
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
e400c09b4be016d15c9c6c8de43635c46400f192
| 96,723
|
py
|
Python
|
sdk/python/pulumi_aws/s3/bucket.py
|
dmelo/pulumi-aws
|
dd1a08d1fb93bab0d046aa410ca660f05ca0a58c
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/s3/bucket.py
|
dmelo/pulumi-aws
|
dd1a08d1fb93bab0d046aa410ca660f05ca0a58c
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/s3/bucket.py
|
dmelo/pulumi-aws
|
dd1a08d1fb93bab0d046aa410ca660f05ca0a58c
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['BucketArgs', 'Bucket']
@pulumi.input_type
class BucketArgs:
def __init__(__self__, *,
acceleration_status: Optional[pulumi.Input[str]] = None,
acl: Optional[pulumi.Input[Union[str, 'CannedAcl']]] = None,
arn: Optional[pulumi.Input[str]] = None,
bucket: Optional[pulumi.Input[str]] = None,
bucket_prefix: Optional[pulumi.Input[str]] = None,
cors_rules: Optional[pulumi.Input[Sequence[pulumi.Input['BucketCorsRuleArgs']]]] = None,
force_destroy: Optional[pulumi.Input[bool]] = None,
grants: Optional[pulumi.Input[Sequence[pulumi.Input['BucketGrantArgs']]]] = None,
hosted_zone_id: Optional[pulumi.Input[str]] = None,
lifecycle_rules: Optional[pulumi.Input[Sequence[pulumi.Input['BucketLifecycleRuleArgs']]]] = None,
loggings: Optional[pulumi.Input[Sequence[pulumi.Input['BucketLoggingArgs']]]] = None,
object_lock_configuration: Optional[pulumi.Input['BucketObjectLockConfigurationArgs']] = None,
policy: Optional[pulumi.Input[str]] = None,
replication_configuration: Optional[pulumi.Input['BucketReplicationConfigurationArgs']] = None,
request_payer: Optional[pulumi.Input[str]] = None,
server_side_encryption_configuration: Optional[pulumi.Input['BucketServerSideEncryptionConfigurationArgs']] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
versioning: Optional[pulumi.Input['BucketVersioningArgs']] = None,
website: Optional[pulumi.Input['BucketWebsiteArgs']] = None,
website_domain: Optional[pulumi.Input[str]] = None,
website_endpoint: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a Bucket resource.
:param pulumi.Input[str] acceleration_status: Sets the accelerate configuration of an existing bucket. Can be `Enabled` or `Suspended`.
:param pulumi.Input[Union[str, 'CannedAcl']] acl: The [canned ACL](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl) to apply. Valid values are `private`, `public-read`, `public-read-write`, `aws-exec-read`, `authenticated-read`, and `log-delivery-write`. Defaults to `private`. Conflicts with `grant`.
:param pulumi.Input[str] arn: The ARN of the bucket. Will be of format `arn:aws:s3:::bucketname`.
:param pulumi.Input[str] bucket: The name of the bucket. If omitted, this provider will assign a random, unique name. Must be lowercase and less than or equal to 63 characters in length. A full list of bucket naming rules [may be found here](https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html).
:param pulumi.Input[str] bucket_prefix: Creates a unique bucket name beginning with the specified prefix. Conflicts with `bucket`. Must be lowercase and less than or equal to 37 characters in length. A full list of bucket naming rules [may be found here](https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html).
:param pulumi.Input[Sequence[pulumi.Input['BucketCorsRuleArgs']]] cors_rules: A rule of [Cross-Origin Resource Sharing](https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) (documented below).
:param pulumi.Input[bool] force_destroy: A boolean that indicates all objects (including any [locked objects](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html)) should be deleted from the bucket so that the bucket can be destroyed without error. These objects are *not* recoverable.
:param pulumi.Input[Sequence[pulumi.Input['BucketGrantArgs']]] grants: An [ACL policy grant](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#sample-acl) (documented below). Conflicts with `acl`.
:param pulumi.Input[str] hosted_zone_id: The [Route 53 Hosted Zone ID](https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_website_region_endpoints) for this bucket's region.
:param pulumi.Input[Sequence[pulumi.Input['BucketLifecycleRuleArgs']]] lifecycle_rules: A configuration of [object lifecycle management](http://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) (documented below).
:param pulumi.Input[Sequence[pulumi.Input['BucketLoggingArgs']]] loggings: A settings of [bucket logging](https://docs.aws.amazon.com/AmazonS3/latest/UG/ManagingBucketLogging.html) (documented below).
:param pulumi.Input['BucketObjectLockConfigurationArgs'] object_lock_configuration: A configuration of [S3 object locking](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) (documented below)
:param pulumi.Input[str] policy: A valid [bucket policy](https://docs.aws.amazon.com/AmazonS3/latest/dev/example-bucket-policies.html) JSON document. Note that if the policy document is not specific enough (but still valid), the provider may view the policy as constantly changing in a `pulumi up / preview / update`. In this case, please make sure you use the verbose/specific version of the policy.
:param pulumi.Input['BucketReplicationConfigurationArgs'] replication_configuration: A configuration of [replication configuration](http://docs.aws.amazon.com/AmazonS3/latest/dev/crr.html) (documented below).
:param pulumi.Input[str] request_payer: Specifies who should bear the cost of Amazon S3 data transfer.
Can be either `BucketOwner` or `Requester`. By default, the owner of the S3 bucket would incur
the costs of any data transfer. See [Requester Pays Buckets](http://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html)
developer guide for more information.
:param pulumi.Input['BucketServerSideEncryptionConfigurationArgs'] server_side_encryption_configuration: A configuration of [server-side encryption configuration](http://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) (documented below)
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the bucket.
:param pulumi.Input['BucketVersioningArgs'] versioning: A state of [versioning](https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html) (documented below)
:param pulumi.Input['BucketWebsiteArgs'] website: A website object (documented below).
:param pulumi.Input[str] website_domain: The domain of the website endpoint, if the bucket is configured with a website. If not, this will be an empty string. This is used to create Route 53 alias records.
:param pulumi.Input[str] website_endpoint: The website endpoint, if the bucket is configured with a website. If not, this will be an empty string.
"""
if acceleration_status is not None:
pulumi.set(__self__, "acceleration_status", acceleration_status)
if acl is not None:
pulumi.set(__self__, "acl", acl)
if arn is not None:
pulumi.set(__self__, "arn", arn)
if bucket is not None:
pulumi.set(__self__, "bucket", bucket)
if bucket_prefix is not None:
pulumi.set(__self__, "bucket_prefix", bucket_prefix)
if cors_rules is not None:
pulumi.set(__self__, "cors_rules", cors_rules)
if force_destroy is not None:
pulumi.set(__self__, "force_destroy", force_destroy)
if grants is not None:
pulumi.set(__self__, "grants", grants)
if hosted_zone_id is not None:
pulumi.set(__self__, "hosted_zone_id", hosted_zone_id)
if lifecycle_rules is not None:
pulumi.set(__self__, "lifecycle_rules", lifecycle_rules)
if loggings is not None:
pulumi.set(__self__, "loggings", loggings)
if object_lock_configuration is not None:
pulumi.set(__self__, "object_lock_configuration", object_lock_configuration)
if policy is not None:
pulumi.set(__self__, "policy", policy)
if replication_configuration is not None:
pulumi.set(__self__, "replication_configuration", replication_configuration)
if request_payer is not None:
pulumi.set(__self__, "request_payer", request_payer)
if server_side_encryption_configuration is not None:
pulumi.set(__self__, "server_side_encryption_configuration", server_side_encryption_configuration)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if versioning is not None:
pulumi.set(__self__, "versioning", versioning)
if website is not None:
pulumi.set(__self__, "website", website)
if website_domain is not None:
pulumi.set(__self__, "website_domain", website_domain)
if website_endpoint is not None:
pulumi.set(__self__, "website_endpoint", website_endpoint)
@property
@pulumi.getter(name="accelerationStatus")
def acceleration_status(self) -> Optional[pulumi.Input[str]]:
"""
Sets the accelerate configuration of an existing bucket. Can be `Enabled` or `Suspended`.
"""
return pulumi.get(self, "acceleration_status")
@acceleration_status.setter
def acceleration_status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "acceleration_status", value)
@property
@pulumi.getter
def acl(self) -> Optional[pulumi.Input[Union[str, 'CannedAcl']]]:
"""
The [canned ACL](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl) to apply. Valid values are `private`, `public-read`, `public-read-write`, `aws-exec-read`, `authenticated-read`, and `log-delivery-write`. Defaults to `private`. Conflicts with `grant`.
"""
return pulumi.get(self, "acl")
@acl.setter
def acl(self, value: Optional[pulumi.Input[Union[str, 'CannedAcl']]]):
pulumi.set(self, "acl", value)
@property
@pulumi.getter
def arn(self) -> Optional[pulumi.Input[str]]:
"""
The ARN of the bucket. Will be of format `arn:aws:s3:::bucketname`.
"""
return pulumi.get(self, "arn")
@arn.setter
def arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "arn", value)
@property
@pulumi.getter
def bucket(self) -> Optional[pulumi.Input[str]]:
"""
The name of the bucket. If omitted, this provider will assign a random, unique name. Must be lowercase and less than or equal to 63 characters in length. A full list of bucket naming rules [may be found here](https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html).
"""
return pulumi.get(self, "bucket")
@bucket.setter
def bucket(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "bucket", value)
@property
@pulumi.getter(name="bucketPrefix")
def bucket_prefix(self) -> Optional[pulumi.Input[str]]:
"""
Creates a unique bucket name beginning with the specified prefix. Conflicts with `bucket`. Must be lowercase and less than or equal to 37 characters in length. A full list of bucket naming rules [may be found here](https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html).
"""
return pulumi.get(self, "bucket_prefix")
@bucket_prefix.setter
def bucket_prefix(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "bucket_prefix", value)
@property
@pulumi.getter(name="corsRules")
def cors_rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['BucketCorsRuleArgs']]]]:
"""
A rule of [Cross-Origin Resource Sharing](https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) (documented below).
"""
return pulumi.get(self, "cors_rules")
@cors_rules.setter
def cors_rules(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['BucketCorsRuleArgs']]]]):
pulumi.set(self, "cors_rules", value)
@property
@pulumi.getter(name="forceDestroy")
def force_destroy(self) -> Optional[pulumi.Input[bool]]:
"""
A boolean that indicates all objects (including any [locked objects](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html)) should be deleted from the bucket so that the bucket can be destroyed without error. These objects are *not* recoverable.
"""
return pulumi.get(self, "force_destroy")
@force_destroy.setter
def force_destroy(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "force_destroy", value)
@property
@pulumi.getter
def grants(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['BucketGrantArgs']]]]:
"""
An [ACL policy grant](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#sample-acl) (documented below). Conflicts with `acl`.
"""
return pulumi.get(self, "grants")
@grants.setter
def grants(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['BucketGrantArgs']]]]):
pulumi.set(self, "grants", value)
@property
@pulumi.getter(name="hostedZoneId")
def hosted_zone_id(self) -> Optional[pulumi.Input[str]]:
"""
The [Route 53 Hosted Zone ID](https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_website_region_endpoints) for this bucket's region.
"""
return pulumi.get(self, "hosted_zone_id")
@hosted_zone_id.setter
def hosted_zone_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "hosted_zone_id", value)
@property
@pulumi.getter(name="lifecycleRules")
def lifecycle_rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['BucketLifecycleRuleArgs']]]]:
"""
A configuration of [object lifecycle management](http://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) (documented below).
"""
return pulumi.get(self, "lifecycle_rules")
@lifecycle_rules.setter
def lifecycle_rules(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['BucketLifecycleRuleArgs']]]]):
pulumi.set(self, "lifecycle_rules", value)
@property
@pulumi.getter
def loggings(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['BucketLoggingArgs']]]]:
"""
A settings of [bucket logging](https://docs.aws.amazon.com/AmazonS3/latest/UG/ManagingBucketLogging.html) (documented below).
"""
return pulumi.get(self, "loggings")
@loggings.setter
def loggings(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['BucketLoggingArgs']]]]):
pulumi.set(self, "loggings", value)
@property
@pulumi.getter(name="objectLockConfiguration")
def object_lock_configuration(self) -> Optional[pulumi.Input['BucketObjectLockConfigurationArgs']]:
"""
A configuration of [S3 object locking](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) (documented below)
"""
return pulumi.get(self, "object_lock_configuration")
@object_lock_configuration.setter
def object_lock_configuration(self, value: Optional[pulumi.Input['BucketObjectLockConfigurationArgs']]):
pulumi.set(self, "object_lock_configuration", value)
@property
@pulumi.getter
def policy(self) -> Optional[pulumi.Input[str]]:
"""
A valid [bucket policy](https://docs.aws.amazon.com/AmazonS3/latest/dev/example-bucket-policies.html) JSON document. Note that if the policy document is not specific enough (but still valid), the provider may view the policy as constantly changing in a `pulumi up / preview / update`. In this case, please make sure you use the verbose/specific version of the policy.
"""
return pulumi.get(self, "policy")
@policy.setter
def policy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "policy", value)
@property
@pulumi.getter(name="replicationConfiguration")
def replication_configuration(self) -> Optional[pulumi.Input['BucketReplicationConfigurationArgs']]:
"""
A configuration of [replication configuration](http://docs.aws.amazon.com/AmazonS3/latest/dev/crr.html) (documented below).
"""
return pulumi.get(self, "replication_configuration")
@replication_configuration.setter
def replication_configuration(self, value: Optional[pulumi.Input['BucketReplicationConfigurationArgs']]):
pulumi.set(self, "replication_configuration", value)
@property
@pulumi.getter(name="requestPayer")
def request_payer(self) -> Optional[pulumi.Input[str]]:
"""
Specifies who should bear the cost of Amazon S3 data transfer.
Can be either `BucketOwner` or `Requester`. By default, the owner of the S3 bucket would incur
the costs of any data transfer. See [Requester Pays Buckets](http://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html)
developer guide for more information.
"""
return pulumi.get(self, "request_payer")
@request_payer.setter
def request_payer(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "request_payer", value)
@property
@pulumi.getter(name="serverSideEncryptionConfiguration")
def server_side_encryption_configuration(self) -> Optional[pulumi.Input['BucketServerSideEncryptionConfigurationArgs']]:
"""
A configuration of [server-side encryption configuration](http://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) (documented below)
"""
return pulumi.get(self, "server_side_encryption_configuration")
@server_side_encryption_configuration.setter
def server_side_encryption_configuration(self, value: Optional[pulumi.Input['BucketServerSideEncryptionConfigurationArgs']]):
pulumi.set(self, "server_side_encryption_configuration", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A mapping of tags to assign to the bucket.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter
def versioning(self) -> Optional[pulumi.Input['BucketVersioningArgs']]:
"""
A state of [versioning](https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html) (documented below)
"""
return pulumi.get(self, "versioning")
@versioning.setter
def versioning(self, value: Optional[pulumi.Input['BucketVersioningArgs']]):
pulumi.set(self, "versioning", value)
@property
@pulumi.getter
def website(self) -> Optional[pulumi.Input['BucketWebsiteArgs']]:
"""
A website object (documented below).
"""
return pulumi.get(self, "website")
@website.setter
def website(self, value: Optional[pulumi.Input['BucketWebsiteArgs']]):
pulumi.set(self, "website", value)
@property
@pulumi.getter(name="websiteDomain")
def website_domain(self) -> Optional[pulumi.Input[str]]:
"""
The domain of the website endpoint, if the bucket is configured with a website. If not, this will be an empty string. This is used to create Route 53 alias records.
"""
return pulumi.get(self, "website_domain")
@website_domain.setter
def website_domain(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "website_domain", value)
@property
@pulumi.getter(name="websiteEndpoint")
def website_endpoint(self) -> Optional[pulumi.Input[str]]:
"""
The website endpoint, if the bucket is configured with a website. If not, this will be an empty string.
"""
return pulumi.get(self, "website_endpoint")
@website_endpoint.setter
def website_endpoint(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "website_endpoint", value)
@pulumi.input_type
class _BucketState:
def __init__(__self__, *,
acceleration_status: Optional[pulumi.Input[str]] = None,
acl: Optional[pulumi.Input[Union[str, 'CannedAcl']]] = None,
arn: Optional[pulumi.Input[str]] = None,
bucket: Optional[pulumi.Input[str]] = None,
bucket_domain_name: Optional[pulumi.Input[str]] = None,
bucket_prefix: Optional[pulumi.Input[str]] = None,
bucket_regional_domain_name: Optional[pulumi.Input[str]] = None,
cors_rules: Optional[pulumi.Input[Sequence[pulumi.Input['BucketCorsRuleArgs']]]] = None,
force_destroy: Optional[pulumi.Input[bool]] = None,
grants: Optional[pulumi.Input[Sequence[pulumi.Input['BucketGrantArgs']]]] = None,
hosted_zone_id: Optional[pulumi.Input[str]] = None,
lifecycle_rules: Optional[pulumi.Input[Sequence[pulumi.Input['BucketLifecycleRuleArgs']]]] = None,
loggings: Optional[pulumi.Input[Sequence[pulumi.Input['BucketLoggingArgs']]]] = None,
object_lock_configuration: Optional[pulumi.Input['BucketObjectLockConfigurationArgs']] = None,
policy: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
replication_configuration: Optional[pulumi.Input['BucketReplicationConfigurationArgs']] = None,
request_payer: Optional[pulumi.Input[str]] = None,
server_side_encryption_configuration: Optional[pulumi.Input['BucketServerSideEncryptionConfigurationArgs']] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
versioning: Optional[pulumi.Input['BucketVersioningArgs']] = None,
website: Optional[pulumi.Input['BucketWebsiteArgs']] = None,
website_domain: Optional[pulumi.Input[str]] = None,
website_endpoint: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering Bucket resources.
:param pulumi.Input[str] acceleration_status: Sets the accelerate configuration of an existing bucket. Can be `Enabled` or `Suspended`.
:param pulumi.Input[Union[str, 'CannedAcl']] acl: The [canned ACL](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl) to apply. Valid values are `private`, `public-read`, `public-read-write`, `aws-exec-read`, `authenticated-read`, and `log-delivery-write`. Defaults to `private`. Conflicts with `grant`.
:param pulumi.Input[str] arn: The ARN of the bucket. Will be of format `arn:aws:s3:::bucketname`.
:param pulumi.Input[str] bucket: The name of the bucket. If omitted, this provider will assign a random, unique name. Must be lowercase and less than or equal to 63 characters in length. A full list of bucket naming rules [may be found here](https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html).
:param pulumi.Input[str] bucket_domain_name: The bucket domain name. Will be of format `bucketname.s3.amazonaws.com`.
:param pulumi.Input[str] bucket_prefix: Creates a unique bucket name beginning with the specified prefix. Conflicts with `bucket`. Must be lowercase and less than or equal to 37 characters in length. A full list of bucket naming rules [may be found here](https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html).
:param pulumi.Input[str] bucket_regional_domain_name: The bucket region-specific domain name. The bucket domain name including the region name, please refer [here](https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) for format. Note: The AWS CloudFront allows specifying S3 region-specific endpoint when creating S3 origin, it will prevent [redirect issues](https://forums.aws.amazon.com/thread.jspa?threadID=216814) from CloudFront to S3 Origin URL.
:param pulumi.Input[Sequence[pulumi.Input['BucketCorsRuleArgs']]] cors_rules: A rule of [Cross-Origin Resource Sharing](https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) (documented below).
:param pulumi.Input[bool] force_destroy: A boolean that indicates all objects (including any [locked objects](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html)) should be deleted from the bucket so that the bucket can be destroyed without error. These objects are *not* recoverable.
:param pulumi.Input[Sequence[pulumi.Input['BucketGrantArgs']]] grants: An [ACL policy grant](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#sample-acl) (documented below). Conflicts with `acl`.
:param pulumi.Input[str] hosted_zone_id: The [Route 53 Hosted Zone ID](https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_website_region_endpoints) for this bucket's region.
:param pulumi.Input[Sequence[pulumi.Input['BucketLifecycleRuleArgs']]] lifecycle_rules: A configuration of [object lifecycle management](http://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) (documented below).
:param pulumi.Input[Sequence[pulumi.Input['BucketLoggingArgs']]] loggings: A settings of [bucket logging](https://docs.aws.amazon.com/AmazonS3/latest/UG/ManagingBucketLogging.html) (documented below).
:param pulumi.Input['BucketObjectLockConfigurationArgs'] object_lock_configuration: A configuration of [S3 object locking](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) (documented below)
:param pulumi.Input[str] policy: A valid [bucket policy](https://docs.aws.amazon.com/AmazonS3/latest/dev/example-bucket-policies.html) JSON document. Note that if the policy document is not specific enough (but still valid), the provider may view the policy as constantly changing in a `pulumi up / preview / update`. In this case, please make sure you use the verbose/specific version of the policy.
:param pulumi.Input[str] region: The AWS region this bucket resides in.
:param pulumi.Input['BucketReplicationConfigurationArgs'] replication_configuration: A configuration of [replication configuration](http://docs.aws.amazon.com/AmazonS3/latest/dev/crr.html) (documented below).
:param pulumi.Input[str] request_payer: Specifies who should bear the cost of Amazon S3 data transfer.
Can be either `BucketOwner` or `Requester`. By default, the owner of the S3 bucket would incur
the costs of any data transfer. See [Requester Pays Buckets](http://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html)
developer guide for more information.
:param pulumi.Input['BucketServerSideEncryptionConfigurationArgs'] server_side_encryption_configuration: A configuration of [server-side encryption configuration](http://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) (documented below)
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the bucket.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags_all: A map of tags assigned to the resource, including those inherited from the provider .
:param pulumi.Input['BucketVersioningArgs'] versioning: A state of [versioning](https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html) (documented below)
:param pulumi.Input['BucketWebsiteArgs'] website: A website object (documented below).
:param pulumi.Input[str] website_domain: The domain of the website endpoint, if the bucket is configured with a website. If not, this will be an empty string. This is used to create Route 53 alias records.
:param pulumi.Input[str] website_endpoint: The website endpoint, if the bucket is configured with a website. If not, this will be an empty string.
"""
if acceleration_status is not None:
pulumi.set(__self__, "acceleration_status", acceleration_status)
if acl is not None:
pulumi.set(__self__, "acl", acl)
if arn is not None:
pulumi.set(__self__, "arn", arn)
if bucket is not None:
pulumi.set(__self__, "bucket", bucket)
if bucket_domain_name is not None:
pulumi.set(__self__, "bucket_domain_name", bucket_domain_name)
if bucket_prefix is not None:
pulumi.set(__self__, "bucket_prefix", bucket_prefix)
if bucket_regional_domain_name is not None:
pulumi.set(__self__, "bucket_regional_domain_name", bucket_regional_domain_name)
if cors_rules is not None:
pulumi.set(__self__, "cors_rules", cors_rules)
if force_destroy is not None:
pulumi.set(__self__, "force_destroy", force_destroy)
if grants is not None:
pulumi.set(__self__, "grants", grants)
if hosted_zone_id is not None:
pulumi.set(__self__, "hosted_zone_id", hosted_zone_id)
if lifecycle_rules is not None:
pulumi.set(__self__, "lifecycle_rules", lifecycle_rules)
if loggings is not None:
pulumi.set(__self__, "loggings", loggings)
if object_lock_configuration is not None:
pulumi.set(__self__, "object_lock_configuration", object_lock_configuration)
if policy is not None:
pulumi.set(__self__, "policy", policy)
if region is not None:
pulumi.set(__self__, "region", region)
if replication_configuration is not None:
pulumi.set(__self__, "replication_configuration", replication_configuration)
if request_payer is not None:
pulumi.set(__self__, "request_payer", request_payer)
if server_side_encryption_configuration is not None:
pulumi.set(__self__, "server_side_encryption_configuration", server_side_encryption_configuration)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if tags_all is not None:
pulumi.set(__self__, "tags_all", tags_all)
if versioning is not None:
pulumi.set(__self__, "versioning", versioning)
if website is not None:
pulumi.set(__self__, "website", website)
if website_domain is not None:
pulumi.set(__self__, "website_domain", website_domain)
if website_endpoint is not None:
pulumi.set(__self__, "website_endpoint", website_endpoint)
@property
@pulumi.getter(name="accelerationStatus")
def acceleration_status(self) -> Optional[pulumi.Input[str]]:
"""
Sets the accelerate configuration of an existing bucket. Can be `Enabled` or `Suspended`.
"""
return pulumi.get(self, "acceleration_status")
@acceleration_status.setter
def acceleration_status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "acceleration_status", value)
@property
@pulumi.getter
def acl(self) -> Optional[pulumi.Input[Union[str, 'CannedAcl']]]:
"""
The [canned ACL](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl) to apply. Valid values are `private`, `public-read`, `public-read-write`, `aws-exec-read`, `authenticated-read`, and `log-delivery-write`. Defaults to `private`. Conflicts with `grant`.
"""
return pulumi.get(self, "acl")
@acl.setter
def acl(self, value: Optional[pulumi.Input[Union[str, 'CannedAcl']]]):
pulumi.set(self, "acl", value)
@property
@pulumi.getter
def arn(self) -> Optional[pulumi.Input[str]]:
"""
The ARN of the bucket. Will be of format `arn:aws:s3:::bucketname`.
"""
return pulumi.get(self, "arn")
@arn.setter
def arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "arn", value)
@property
@pulumi.getter
def bucket(self) -> Optional[pulumi.Input[str]]:
"""
The name of the bucket. If omitted, this provider will assign a random, unique name. Must be lowercase and less than or equal to 63 characters in length. A full list of bucket naming rules [may be found here](https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html).
"""
return pulumi.get(self, "bucket")
@bucket.setter
def bucket(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "bucket", value)
@property
@pulumi.getter(name="bucketDomainName")
def bucket_domain_name(self) -> Optional[pulumi.Input[str]]:
"""
The bucket domain name. Will be of format `bucketname.s3.amazonaws.com`.
"""
return pulumi.get(self, "bucket_domain_name")
@bucket_domain_name.setter
def bucket_domain_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "bucket_domain_name", value)
@property
@pulumi.getter(name="bucketPrefix")
def bucket_prefix(self) -> Optional[pulumi.Input[str]]:
"""
Creates a unique bucket name beginning with the specified prefix. Conflicts with `bucket`. Must be lowercase and less than or equal to 37 characters in length. A full list of bucket naming rules [may be found here](https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html).
"""
return pulumi.get(self, "bucket_prefix")
@bucket_prefix.setter
def bucket_prefix(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "bucket_prefix", value)
@property
@pulumi.getter(name="bucketRegionalDomainName")
def bucket_regional_domain_name(self) -> Optional[pulumi.Input[str]]:
"""
The bucket region-specific domain name. The bucket domain name including the region name, please refer [here](https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) for format. Note: The AWS CloudFront allows specifying S3 region-specific endpoint when creating S3 origin, it will prevent [redirect issues](https://forums.aws.amazon.com/thread.jspa?threadID=216814) from CloudFront to S3 Origin URL.
"""
return pulumi.get(self, "bucket_regional_domain_name")
@bucket_regional_domain_name.setter
def bucket_regional_domain_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "bucket_regional_domain_name", value)
@property
@pulumi.getter(name="corsRules")
def cors_rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['BucketCorsRuleArgs']]]]:
"""
A rule of [Cross-Origin Resource Sharing](https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) (documented below).
"""
return pulumi.get(self, "cors_rules")
@cors_rules.setter
def cors_rules(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['BucketCorsRuleArgs']]]]):
pulumi.set(self, "cors_rules", value)
@property
@pulumi.getter(name="forceDestroy")
def force_destroy(self) -> Optional[pulumi.Input[bool]]:
"""
A boolean that indicates all objects (including any [locked objects](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html)) should be deleted from the bucket so that the bucket can be destroyed without error. These objects are *not* recoverable.
"""
return pulumi.get(self, "force_destroy")
@force_destroy.setter
def force_destroy(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "force_destroy", value)
@property
@pulumi.getter
def grants(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['BucketGrantArgs']]]]:
"""
An [ACL policy grant](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#sample-acl) (documented below). Conflicts with `acl`.
"""
return pulumi.get(self, "grants")
@grants.setter
def grants(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['BucketGrantArgs']]]]):
pulumi.set(self, "grants", value)
@property
@pulumi.getter(name="hostedZoneId")
def hosted_zone_id(self) -> Optional[pulumi.Input[str]]:
"""
The [Route 53 Hosted Zone ID](https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_website_region_endpoints) for this bucket's region.
"""
return pulumi.get(self, "hosted_zone_id")
@hosted_zone_id.setter
def hosted_zone_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "hosted_zone_id", value)
@property
@pulumi.getter(name="lifecycleRules")
def lifecycle_rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['BucketLifecycleRuleArgs']]]]:
"""
A configuration of [object lifecycle management](http://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) (documented below).
"""
return pulumi.get(self, "lifecycle_rules")
@lifecycle_rules.setter
def lifecycle_rules(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['BucketLifecycleRuleArgs']]]]):
pulumi.set(self, "lifecycle_rules", value)
@property
@pulumi.getter
def loggings(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['BucketLoggingArgs']]]]:
"""
A settings of [bucket logging](https://docs.aws.amazon.com/AmazonS3/latest/UG/ManagingBucketLogging.html) (documented below).
"""
return pulumi.get(self, "loggings")
@loggings.setter
def loggings(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['BucketLoggingArgs']]]]):
pulumi.set(self, "loggings", value)
@property
@pulumi.getter(name="objectLockConfiguration")
def object_lock_configuration(self) -> Optional[pulumi.Input['BucketObjectLockConfigurationArgs']]:
"""
A configuration of [S3 object locking](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) (documented below)
"""
return pulumi.get(self, "object_lock_configuration")
@object_lock_configuration.setter
def object_lock_configuration(self, value: Optional[pulumi.Input['BucketObjectLockConfigurationArgs']]):
pulumi.set(self, "object_lock_configuration", value)
@property
@pulumi.getter
def policy(self) -> Optional[pulumi.Input[str]]:
"""
A valid [bucket policy](https://docs.aws.amazon.com/AmazonS3/latest/dev/example-bucket-policies.html) JSON document. Note that if the policy document is not specific enough (but still valid), the provider may view the policy as constantly changing in a `pulumi up / preview / update`. In this case, please make sure you use the verbose/specific version of the policy.
"""
return pulumi.get(self, "policy")
@policy.setter
def policy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "policy", value)
@property
@pulumi.getter
def region(self) -> Optional[pulumi.Input[str]]:
"""
The AWS region this bucket resides in.
"""
return pulumi.get(self, "region")
@region.setter
def region(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "region", value)
@property
@pulumi.getter(name="replicationConfiguration")
def replication_configuration(self) -> Optional[pulumi.Input['BucketReplicationConfigurationArgs']]:
"""
A configuration of [replication configuration](http://docs.aws.amazon.com/AmazonS3/latest/dev/crr.html) (documented below).
"""
return pulumi.get(self, "replication_configuration")
@replication_configuration.setter
def replication_configuration(self, value: Optional[pulumi.Input['BucketReplicationConfigurationArgs']]):
pulumi.set(self, "replication_configuration", value)
@property
@pulumi.getter(name="requestPayer")
def request_payer(self) -> Optional[pulumi.Input[str]]:
"""
Specifies who should bear the cost of Amazon S3 data transfer.
Can be either `BucketOwner` or `Requester`. By default, the owner of the S3 bucket would incur
the costs of any data transfer. See [Requester Pays Buckets](http://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html)
developer guide for more information.
"""
return pulumi.get(self, "request_payer")
@request_payer.setter
def request_payer(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "request_payer", value)
@property
@pulumi.getter(name="serverSideEncryptionConfiguration")
def server_side_encryption_configuration(self) -> Optional[pulumi.Input['BucketServerSideEncryptionConfigurationArgs']]:
"""
A configuration of [server-side encryption configuration](http://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) (documented below)
"""
return pulumi.get(self, "server_side_encryption_configuration")
@server_side_encryption_configuration.setter
def server_side_encryption_configuration(self, value: Optional[pulumi.Input['BucketServerSideEncryptionConfigurationArgs']]):
pulumi.set(self, "server_side_encryption_configuration", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A mapping of tags to assign to the bucket.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="tagsAll")
def tags_all(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A map of tags assigned to the resource, including those inherited from the provider .
"""
return pulumi.get(self, "tags_all")
@tags_all.setter
def tags_all(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags_all", value)
@property
@pulumi.getter
def versioning(self) -> Optional[pulumi.Input['BucketVersioningArgs']]:
"""
A state of [versioning](https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html) (documented below)
"""
return pulumi.get(self, "versioning")
@versioning.setter
def versioning(self, value: Optional[pulumi.Input['BucketVersioningArgs']]):
pulumi.set(self, "versioning", value)
@property
@pulumi.getter
def website(self) -> Optional[pulumi.Input['BucketWebsiteArgs']]:
"""
A website object (documented below).
"""
return pulumi.get(self, "website")
@website.setter
def website(self, value: Optional[pulumi.Input['BucketWebsiteArgs']]):
pulumi.set(self, "website", value)
@property
@pulumi.getter(name="websiteDomain")
def website_domain(self) -> Optional[pulumi.Input[str]]:
"""
The domain of the website endpoint, if the bucket is configured with a website. If not, this will be an empty string. This is used to create Route 53 alias records.
"""
return pulumi.get(self, "website_domain")
@website_domain.setter
def website_domain(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "website_domain", value)
@property
@pulumi.getter(name="websiteEndpoint")
def website_endpoint(self) -> Optional[pulumi.Input[str]]:
"""
The website endpoint, if the bucket is configured with a website. If not, this will be an empty string.
"""
return pulumi.get(self, "website_endpoint")
@website_endpoint.setter
def website_endpoint(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "website_endpoint", value)
class Bucket(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
acceleration_status: Optional[pulumi.Input[str]] = None,
acl: Optional[pulumi.Input[Union[str, 'CannedAcl']]] = None,
arn: Optional[pulumi.Input[str]] = None,
bucket: Optional[pulumi.Input[str]] = None,
bucket_prefix: Optional[pulumi.Input[str]] = None,
cors_rules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['BucketCorsRuleArgs']]]]] = None,
force_destroy: Optional[pulumi.Input[bool]] = None,
grants: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['BucketGrantArgs']]]]] = None,
hosted_zone_id: Optional[pulumi.Input[str]] = None,
lifecycle_rules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['BucketLifecycleRuleArgs']]]]] = None,
loggings: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['BucketLoggingArgs']]]]] = None,
object_lock_configuration: Optional[pulumi.Input[pulumi.InputType['BucketObjectLockConfigurationArgs']]] = None,
policy: Optional[pulumi.Input[str]] = None,
replication_configuration: Optional[pulumi.Input[pulumi.InputType['BucketReplicationConfigurationArgs']]] = None,
request_payer: Optional[pulumi.Input[str]] = None,
server_side_encryption_configuration: Optional[pulumi.Input[pulumi.InputType['BucketServerSideEncryptionConfigurationArgs']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
versioning: Optional[pulumi.Input[pulumi.InputType['BucketVersioningArgs']]] = None,
website: Optional[pulumi.Input[pulumi.InputType['BucketWebsiteArgs']]] = None,
website_domain: Optional[pulumi.Input[str]] = None,
website_endpoint: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Provides a S3 bucket resource.
> This functionality is for managing S3 in an AWS Partition. To manage [S3 on Outposts](https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html), see the `s3control.Bucket` resource.
## Example Usage
### Private Bucket w/ Tags
```python
import pulumi
import pulumi_aws as aws
bucket = aws.s3.Bucket("bucket",
acl="private",
tags={
"Environment": "Dev",
"Name": "My bucket",
})
```
### Static Website Hosting
```python
import pulumi
import pulumi_aws as aws
bucket = aws.s3.Bucket("bucket",
acl="public-read",
policy=(lambda path: open(path).read())("policy.json"),
website=aws.s3.BucketWebsiteArgs(
index_document="index.html",
error_document="error.html",
routing_rules=\"\"\"[{
"Condition": {
"KeyPrefixEquals": "docs/"
},
"Redirect": {
"ReplaceKeyPrefixWith": "documents/"
}
}]
\"\"\",
))
```
### Using CORS
```python
import pulumi
import pulumi_aws as aws
bucket = aws.s3.Bucket("bucket",
acl="public-read",
cors_rules=[aws.s3.BucketCorsRuleArgs(
allowed_headers=["*"],
allowed_methods=[
"PUT",
"POST",
],
allowed_origins=["https://s3-website-test.mydomain.com"],
expose_headers=["ETag"],
max_age_seconds=3000,
)])
```
### Using versioning
```python
import pulumi
import pulumi_aws as aws
bucket = aws.s3.Bucket("bucket",
acl="private",
versioning=aws.s3.BucketVersioningArgs(
enabled=True,
))
```
### Enable Logging
```python
import pulumi
import pulumi_aws as aws
log_bucket = aws.s3.Bucket("logBucket", acl="log-delivery-write")
bucket = aws.s3.Bucket("bucket",
acl="private",
loggings=[aws.s3.BucketLoggingArgs(
target_bucket=log_bucket.id,
target_prefix="log/",
)])
```
### Using object lifecycle
```python
import pulumi
import pulumi_aws as aws
bucket = aws.s3.Bucket("bucket",
acl="private",
lifecycle_rules=[
aws.s3.BucketLifecycleRuleArgs(
enabled=True,
expiration=aws.s3.BucketLifecycleRuleExpirationArgs(
days=90,
),
id="log",
prefix="log/",
tags={
"autoclean": "true",
"rule": "log",
},
transitions=[
aws.s3.BucketLifecycleRuleTransitionArgs(
days=30,
storage_class="STANDARD_IA",
),
aws.s3.BucketLifecycleRuleTransitionArgs(
days=60,
storage_class="GLACIER",
),
],
),
aws.s3.BucketLifecycleRuleArgs(
enabled=True,
expiration=aws.s3.BucketLifecycleRuleExpirationArgs(
date="2016-01-12",
),
id="tmp",
prefix="tmp/",
),
])
versioning_bucket = aws.s3.Bucket("versioningBucket",
acl="private",
lifecycle_rules=[aws.s3.BucketLifecycleRuleArgs(
enabled=True,
noncurrent_version_expiration=aws.s3.BucketLifecycleRuleNoncurrentVersionExpirationArgs(
days=90,
),
noncurrent_version_transitions=[
aws.s3.BucketLifecycleRuleNoncurrentVersionTransitionArgs(
days=30,
storage_class="STANDARD_IA",
),
aws.s3.BucketLifecycleRuleNoncurrentVersionTransitionArgs(
days=60,
storage_class="GLACIER",
),
],
prefix="config/",
)],
versioning=aws.s3.BucketVersioningArgs(
enabled=True,
))
```
### Using replication configuration
> **NOTE:** See the `s3.BucketReplicationConfig` resource to support bi-directional replication configuration and additional features.
```python
import pulumi
import pulumi_aws as aws
import pulumi_pulumi as pulumi
central = pulumi.providers.Aws("central", region="eu-central-1")
replication_role = aws.iam.Role("replicationRole", assume_role_policy=\"\"\"{
"Version": "2012-10-17",
"Statement": [
{
"Action": "sts:AssumeRole",
"Principal": {
"Service": "s3.amazonaws.com"
},
"Effect": "Allow",
"Sid": ""
}
]
}
\"\"\")
destination = aws.s3.Bucket("destination", versioning=aws.s3.BucketVersioningArgs(
enabled=True,
))
source = aws.s3.Bucket("source",
acl="private",
versioning=aws.s3.BucketVersioningArgs(
enabled=True,
),
replication_configuration=aws.s3.BucketReplicationConfigurationArgs(
role=replication_role.arn,
rules=[aws.s3.BucketReplicationConfigurationRuleArgs(
id="foobar",
status="Enabled",
filter=aws.s3.BucketReplicationConfigurationRuleFilterArgs(
tags={},
),
destination=aws.s3.BucketReplicationConfigurationRuleDestinationArgs(
bucket=destination.arn,
storage_class="STANDARD",
replication_time=aws.s3.BucketReplicationConfigurationRuleDestinationReplicationTimeArgs(
status="Enabled",
minutes=15,
),
metrics=aws.s3.BucketReplicationConfigurationRuleDestinationMetricsArgs(
status="Enabled",
minutes=15,
),
),
)],
),
opts=pulumi.ResourceOptions(provider=aws["central"]))
replication_policy = aws.iam.Policy("replicationPolicy", policy=pulumi.Output.all(source.arn, source.arn, destination.arn).apply(lambda sourceArn, sourceArn1, destinationArn: f\"\"\"{{
"Version": "2012-10-17",
"Statement": [
{{
"Action": [
"s3:GetReplicationConfiguration",
"s3:ListBucket"
],
"Effect": "Allow",
"Resource": [
"{source_arn}"
]
}},
{{
"Action": [
"s3:GetObjectVersionForReplication",
"s3:GetObjectVersionAcl",
"s3:GetObjectVersionTagging"
],
"Effect": "Allow",
"Resource": [
"{source_arn1}/*"
]
}},
{{
"Action": [
"s3:ReplicateObject",
"s3:ReplicateDelete",
"s3:ReplicateTags"
],
"Effect": "Allow",
"Resource": "{destination_arn}/*"
}}
]
}}
\"\"\"))
replication_role_policy_attachment = aws.iam.RolePolicyAttachment("replicationRolePolicyAttachment",
role=replication_role.name,
policy_arn=replication_policy.arn)
```
### Enable Default Server Side Encryption
```python
import pulumi
import pulumi_aws as aws
mykey = aws.kms.Key("mykey",
description="This key is used to encrypt bucket objects",
deletion_window_in_days=10)
mybucket = aws.s3.Bucket("mybucket", server_side_encryption_configuration=aws.s3.BucketServerSideEncryptionConfigurationArgs(
rule=aws.s3.BucketServerSideEncryptionConfigurationRuleArgs(
apply_server_side_encryption_by_default=aws.s3.BucketServerSideEncryptionConfigurationRuleApplyServerSideEncryptionByDefaultArgs(
kms_master_key_id=mykey.arn,
sse_algorithm="aws:kms",
),
),
))
```
### Using ACL policy grants
```python
import pulumi
import pulumi_aws as aws
current_user = aws.s3.get_canonical_user_id()
bucket = aws.s3.Bucket("bucket", grants=[
aws.s3.BucketGrantArgs(
id=current_user.id,
type="CanonicalUser",
permissions=["FULL_CONTROL"],
),
aws.s3.BucketGrantArgs(
type="Group",
permissions=[
"READ_ACP",
"WRITE",
],
uri="http://acs.amazonaws.com/groups/s3/LogDelivery",
),
])
```
## Import
S3 bucket can be imported using the `bucket`, e.g.,
```sh
$ pulumi import aws:s3/bucket:Bucket bucket bucket-name
```
The `policy` argument is not imported and will be deprecated in a future version of the provider. Use the `aws_s3_bucket_policy` resource to manage the S3 Bucket Policy instead.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] acceleration_status: Sets the accelerate configuration of an existing bucket. Can be `Enabled` or `Suspended`.
:param pulumi.Input[Union[str, 'CannedAcl']] acl: The [canned ACL](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl) to apply. Valid values are `private`, `public-read`, `public-read-write`, `aws-exec-read`, `authenticated-read`, and `log-delivery-write`. Defaults to `private`. Conflicts with `grant`.
:param pulumi.Input[str] arn: The ARN of the bucket. Will be of format `arn:aws:s3:::bucketname`.
:param pulumi.Input[str] bucket: The name of the bucket. If omitted, this provider will assign a random, unique name. Must be lowercase and less than or equal to 63 characters in length. A full list of bucket naming rules [may be found here](https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html).
:param pulumi.Input[str] bucket_prefix: Creates a unique bucket name beginning with the specified prefix. Conflicts with `bucket`. Must be lowercase and less than or equal to 37 characters in length. A full list of bucket naming rules [may be found here](https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html).
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['BucketCorsRuleArgs']]]] cors_rules: A rule of [Cross-Origin Resource Sharing](https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) (documented below).
:param pulumi.Input[bool] force_destroy: A boolean that indicates all objects (including any [locked objects](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html)) should be deleted from the bucket so that the bucket can be destroyed without error. These objects are *not* recoverable.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['BucketGrantArgs']]]] grants: An [ACL policy grant](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#sample-acl) (documented below). Conflicts with `acl`.
:param pulumi.Input[str] hosted_zone_id: The [Route 53 Hosted Zone ID](https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_website_region_endpoints) for this bucket's region.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['BucketLifecycleRuleArgs']]]] lifecycle_rules: A configuration of [object lifecycle management](http://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) (documented below).
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['BucketLoggingArgs']]]] loggings: A settings of [bucket logging](https://docs.aws.amazon.com/AmazonS3/latest/UG/ManagingBucketLogging.html) (documented below).
:param pulumi.Input[pulumi.InputType['BucketObjectLockConfigurationArgs']] object_lock_configuration: A configuration of [S3 object locking](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) (documented below)
:param pulumi.Input[str] policy: A valid [bucket policy](https://docs.aws.amazon.com/AmazonS3/latest/dev/example-bucket-policies.html) JSON document. Note that if the policy document is not specific enough (but still valid), the provider may view the policy as constantly changing in a `pulumi up / preview / update`. In this case, please make sure you use the verbose/specific version of the policy.
:param pulumi.Input[pulumi.InputType['BucketReplicationConfigurationArgs']] replication_configuration: A configuration of [replication configuration](http://docs.aws.amazon.com/AmazonS3/latest/dev/crr.html) (documented below).
:param pulumi.Input[str] request_payer: Specifies who should bear the cost of Amazon S3 data transfer.
Can be either `BucketOwner` or `Requester`. By default, the owner of the S3 bucket would incur
the costs of any data transfer. See [Requester Pays Buckets](http://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html)
developer guide for more information.
:param pulumi.Input[pulumi.InputType['BucketServerSideEncryptionConfigurationArgs']] server_side_encryption_configuration: A configuration of [server-side encryption configuration](http://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) (documented below)
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the bucket.
:param pulumi.Input[pulumi.InputType['BucketVersioningArgs']] versioning: A state of [versioning](https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html) (documented below)
:param pulumi.Input[pulumi.InputType['BucketWebsiteArgs']] website: A website object (documented below).
:param pulumi.Input[str] website_domain: The domain of the website endpoint, if the bucket is configured with a website. If not, this will be an empty string. This is used to create Route 53 alias records.
:param pulumi.Input[str] website_endpoint: The website endpoint, if the bucket is configured with a website. If not, this will be an empty string.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: Optional[BucketArgs] = None,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a S3 bucket resource.
> This functionality is for managing S3 in an AWS Partition. To manage [S3 on Outposts](https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html), see the `s3control.Bucket` resource.
## Example Usage
### Private Bucket w/ Tags
```python
import pulumi
import pulumi_aws as aws
bucket = aws.s3.Bucket("bucket",
acl="private",
tags={
"Environment": "Dev",
"Name": "My bucket",
})
```
### Static Website Hosting
```python
import pulumi
import pulumi_aws as aws
bucket = aws.s3.Bucket("bucket",
acl="public-read",
policy=(lambda path: open(path).read())("policy.json"),
website=aws.s3.BucketWebsiteArgs(
index_document="index.html",
error_document="error.html",
routing_rules=\"\"\"[{
"Condition": {
"KeyPrefixEquals": "docs/"
},
"Redirect": {
"ReplaceKeyPrefixWith": "documents/"
}
}]
\"\"\",
))
```
### Using CORS
```python
import pulumi
import pulumi_aws as aws
bucket = aws.s3.Bucket("bucket",
acl="public-read",
cors_rules=[aws.s3.BucketCorsRuleArgs(
allowed_headers=["*"],
allowed_methods=[
"PUT",
"POST",
],
allowed_origins=["https://s3-website-test.mydomain.com"],
expose_headers=["ETag"],
max_age_seconds=3000,
)])
```
### Using versioning
```python
import pulumi
import pulumi_aws as aws
bucket = aws.s3.Bucket("bucket",
acl="private",
versioning=aws.s3.BucketVersioningArgs(
enabled=True,
))
```
### Enable Logging
```python
import pulumi
import pulumi_aws as aws
log_bucket = aws.s3.Bucket("logBucket", acl="log-delivery-write")
bucket = aws.s3.Bucket("bucket",
acl="private",
loggings=[aws.s3.BucketLoggingArgs(
target_bucket=log_bucket.id,
target_prefix="log/",
)])
```
### Using object lifecycle
```python
import pulumi
import pulumi_aws as aws
bucket = aws.s3.Bucket("bucket",
acl="private",
lifecycle_rules=[
aws.s3.BucketLifecycleRuleArgs(
enabled=True,
expiration=aws.s3.BucketLifecycleRuleExpirationArgs(
days=90,
),
id="log",
prefix="log/",
tags={
"autoclean": "true",
"rule": "log",
},
transitions=[
aws.s3.BucketLifecycleRuleTransitionArgs(
days=30,
storage_class="STANDARD_IA",
),
aws.s3.BucketLifecycleRuleTransitionArgs(
days=60,
storage_class="GLACIER",
),
],
),
aws.s3.BucketLifecycleRuleArgs(
enabled=True,
expiration=aws.s3.BucketLifecycleRuleExpirationArgs(
date="2016-01-12",
),
id="tmp",
prefix="tmp/",
),
])
versioning_bucket = aws.s3.Bucket("versioningBucket",
acl="private",
lifecycle_rules=[aws.s3.BucketLifecycleRuleArgs(
enabled=True,
noncurrent_version_expiration=aws.s3.BucketLifecycleRuleNoncurrentVersionExpirationArgs(
days=90,
),
noncurrent_version_transitions=[
aws.s3.BucketLifecycleRuleNoncurrentVersionTransitionArgs(
days=30,
storage_class="STANDARD_IA",
),
aws.s3.BucketLifecycleRuleNoncurrentVersionTransitionArgs(
days=60,
storage_class="GLACIER",
),
],
prefix="config/",
)],
versioning=aws.s3.BucketVersioningArgs(
enabled=True,
))
```
### Using replication configuration
> **NOTE:** See the `s3.BucketReplicationConfig` resource to support bi-directional replication configuration and additional features.
```python
import pulumi
import pulumi_aws as aws
import pulumi_pulumi as pulumi
central = pulumi.providers.Aws("central", region="eu-central-1")
replication_role = aws.iam.Role("replicationRole", assume_role_policy=\"\"\"{
"Version": "2012-10-17",
"Statement": [
{
"Action": "sts:AssumeRole",
"Principal": {
"Service": "s3.amazonaws.com"
},
"Effect": "Allow",
"Sid": ""
}
]
}
\"\"\")
destination = aws.s3.Bucket("destination", versioning=aws.s3.BucketVersioningArgs(
enabled=True,
))
source = aws.s3.Bucket("source",
acl="private",
versioning=aws.s3.BucketVersioningArgs(
enabled=True,
),
replication_configuration=aws.s3.BucketReplicationConfigurationArgs(
role=replication_role.arn,
rules=[aws.s3.BucketReplicationConfigurationRuleArgs(
id="foobar",
status="Enabled",
filter=aws.s3.BucketReplicationConfigurationRuleFilterArgs(
tags={},
),
destination=aws.s3.BucketReplicationConfigurationRuleDestinationArgs(
bucket=destination.arn,
storage_class="STANDARD",
replication_time=aws.s3.BucketReplicationConfigurationRuleDestinationReplicationTimeArgs(
status="Enabled",
minutes=15,
),
metrics=aws.s3.BucketReplicationConfigurationRuleDestinationMetricsArgs(
status="Enabled",
minutes=15,
),
),
)],
),
opts=pulumi.ResourceOptions(provider=aws["central"]))
replication_policy = aws.iam.Policy("replicationPolicy", policy=pulumi.Output.all(source.arn, source.arn, destination.arn).apply(lambda sourceArn, sourceArn1, destinationArn: f\"\"\"{{
"Version": "2012-10-17",
"Statement": [
{{
"Action": [
"s3:GetReplicationConfiguration",
"s3:ListBucket"
],
"Effect": "Allow",
"Resource": [
"{source_arn}"
]
}},
{{
"Action": [
"s3:GetObjectVersionForReplication",
"s3:GetObjectVersionAcl",
"s3:GetObjectVersionTagging"
],
"Effect": "Allow",
"Resource": [
"{source_arn1}/*"
]
}},
{{
"Action": [
"s3:ReplicateObject",
"s3:ReplicateDelete",
"s3:ReplicateTags"
],
"Effect": "Allow",
"Resource": "{destination_arn}/*"
}}
]
}}
\"\"\"))
replication_role_policy_attachment = aws.iam.RolePolicyAttachment("replicationRolePolicyAttachment",
role=replication_role.name,
policy_arn=replication_policy.arn)
```
### Enable Default Server Side Encryption
```python
import pulumi
import pulumi_aws as aws
mykey = aws.kms.Key("mykey",
description="This key is used to encrypt bucket objects",
deletion_window_in_days=10)
mybucket = aws.s3.Bucket("mybucket", server_side_encryption_configuration=aws.s3.BucketServerSideEncryptionConfigurationArgs(
rule=aws.s3.BucketServerSideEncryptionConfigurationRuleArgs(
apply_server_side_encryption_by_default=aws.s3.BucketServerSideEncryptionConfigurationRuleApplyServerSideEncryptionByDefaultArgs(
kms_master_key_id=mykey.arn,
sse_algorithm="aws:kms",
),
),
))
```
### Using ACL policy grants
```python
import pulumi
import pulumi_aws as aws
current_user = aws.s3.get_canonical_user_id()
bucket = aws.s3.Bucket("bucket", grants=[
aws.s3.BucketGrantArgs(
id=current_user.id,
type="CanonicalUser",
permissions=["FULL_CONTROL"],
),
aws.s3.BucketGrantArgs(
type="Group",
permissions=[
"READ_ACP",
"WRITE",
],
uri="http://acs.amazonaws.com/groups/s3/LogDelivery",
),
])
```
## Import
S3 bucket can be imported using the `bucket`, e.g.,
```sh
$ pulumi import aws:s3/bucket:Bucket bucket bucket-name
```
The `policy` argument is not imported and will be deprecated in a future version of the provider. Use the `aws_s3_bucket_policy` resource to manage the S3 Bucket Policy instead.
:param str resource_name: The name of the resource.
:param BucketArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(BucketArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
acceleration_status: Optional[pulumi.Input[str]] = None,
acl: Optional[pulumi.Input[Union[str, 'CannedAcl']]] = None,
arn: Optional[pulumi.Input[str]] = None,
bucket: Optional[pulumi.Input[str]] = None,
bucket_prefix: Optional[pulumi.Input[str]] = None,
cors_rules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['BucketCorsRuleArgs']]]]] = None,
force_destroy: Optional[pulumi.Input[bool]] = None,
grants: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['BucketGrantArgs']]]]] = None,
hosted_zone_id: Optional[pulumi.Input[str]] = None,
lifecycle_rules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['BucketLifecycleRuleArgs']]]]] = None,
loggings: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['BucketLoggingArgs']]]]] = None,
object_lock_configuration: Optional[pulumi.Input[pulumi.InputType['BucketObjectLockConfigurationArgs']]] = None,
policy: Optional[pulumi.Input[str]] = None,
replication_configuration: Optional[pulumi.Input[pulumi.InputType['BucketReplicationConfigurationArgs']]] = None,
request_payer: Optional[pulumi.Input[str]] = None,
server_side_encryption_configuration: Optional[pulumi.Input[pulumi.InputType['BucketServerSideEncryptionConfigurationArgs']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
versioning: Optional[pulumi.Input[pulumi.InputType['BucketVersioningArgs']]] = None,
website: Optional[pulumi.Input[pulumi.InputType['BucketWebsiteArgs']]] = None,
website_domain: Optional[pulumi.Input[str]] = None,
website_endpoint: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = BucketArgs.__new__(BucketArgs)
__props__.__dict__["acceleration_status"] = acceleration_status
__props__.__dict__["acl"] = acl
__props__.__dict__["arn"] = arn
__props__.__dict__["bucket"] = bucket
__props__.__dict__["bucket_prefix"] = bucket_prefix
__props__.__dict__["cors_rules"] = cors_rules
__props__.__dict__["force_destroy"] = force_destroy
__props__.__dict__["grants"] = grants
__props__.__dict__["hosted_zone_id"] = hosted_zone_id
__props__.__dict__["lifecycle_rules"] = lifecycle_rules
__props__.__dict__["loggings"] = loggings
__props__.__dict__["object_lock_configuration"] = object_lock_configuration
__props__.__dict__["policy"] = policy
__props__.__dict__["replication_configuration"] = replication_configuration
__props__.__dict__["request_payer"] = request_payer
__props__.__dict__["server_side_encryption_configuration"] = server_side_encryption_configuration
__props__.__dict__["tags"] = tags
__props__.__dict__["versioning"] = versioning
__props__.__dict__["website"] = website
__props__.__dict__["website_domain"] = website_domain
__props__.__dict__["website_endpoint"] = website_endpoint
__props__.__dict__["bucket_domain_name"] = None
__props__.__dict__["bucket_regional_domain_name"] = None
__props__.__dict__["region"] = None
__props__.__dict__["tags_all"] = None
super(Bucket, __self__).__init__(
'aws:s3/bucket:Bucket',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
acceleration_status: Optional[pulumi.Input[str]] = None,
acl: Optional[pulumi.Input[Union[str, 'CannedAcl']]] = None,
arn: Optional[pulumi.Input[str]] = None,
bucket: Optional[pulumi.Input[str]] = None,
bucket_domain_name: Optional[pulumi.Input[str]] = None,
bucket_prefix: Optional[pulumi.Input[str]] = None,
bucket_regional_domain_name: Optional[pulumi.Input[str]] = None,
cors_rules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['BucketCorsRuleArgs']]]]] = None,
force_destroy: Optional[pulumi.Input[bool]] = None,
grants: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['BucketGrantArgs']]]]] = None,
hosted_zone_id: Optional[pulumi.Input[str]] = None,
lifecycle_rules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['BucketLifecycleRuleArgs']]]]] = None,
loggings: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['BucketLoggingArgs']]]]] = None,
object_lock_configuration: Optional[pulumi.Input[pulumi.InputType['BucketObjectLockConfigurationArgs']]] = None,
policy: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
replication_configuration: Optional[pulumi.Input[pulumi.InputType['BucketReplicationConfigurationArgs']]] = None,
request_payer: Optional[pulumi.Input[str]] = None,
server_side_encryption_configuration: Optional[pulumi.Input[pulumi.InputType['BucketServerSideEncryptionConfigurationArgs']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
versioning: Optional[pulumi.Input[pulumi.InputType['BucketVersioningArgs']]] = None,
website: Optional[pulumi.Input[pulumi.InputType['BucketWebsiteArgs']]] = None,
website_domain: Optional[pulumi.Input[str]] = None,
website_endpoint: Optional[pulumi.Input[str]] = None) -> 'Bucket':
"""
Get an existing Bucket resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] acceleration_status: Sets the accelerate configuration of an existing bucket. Can be `Enabled` or `Suspended`.
:param pulumi.Input[Union[str, 'CannedAcl']] acl: The [canned ACL](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl) to apply. Valid values are `private`, `public-read`, `public-read-write`, `aws-exec-read`, `authenticated-read`, and `log-delivery-write`. Defaults to `private`. Conflicts with `grant`.
:param pulumi.Input[str] arn: The ARN of the bucket. Will be of format `arn:aws:s3:::bucketname`.
:param pulumi.Input[str] bucket: The name of the bucket. If omitted, this provider will assign a random, unique name. Must be lowercase and less than or equal to 63 characters in length. A full list of bucket naming rules [may be found here](https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html).
:param pulumi.Input[str] bucket_domain_name: The bucket domain name. Will be of format `bucketname.s3.amazonaws.com`.
:param pulumi.Input[str] bucket_prefix: Creates a unique bucket name beginning with the specified prefix. Conflicts with `bucket`. Must be lowercase and less than or equal to 37 characters in length. A full list of bucket naming rules [may be found here](https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html).
:param pulumi.Input[str] bucket_regional_domain_name: The bucket region-specific domain name. The bucket domain name including the region name, please refer [here](https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) for format. Note: The AWS CloudFront allows specifying S3 region-specific endpoint when creating S3 origin, it will prevent [redirect issues](https://forums.aws.amazon.com/thread.jspa?threadID=216814) from CloudFront to S3 Origin URL.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['BucketCorsRuleArgs']]]] cors_rules: A rule of [Cross-Origin Resource Sharing](https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) (documented below).
:param pulumi.Input[bool] force_destroy: A boolean that indicates all objects (including any [locked objects](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html)) should be deleted from the bucket so that the bucket can be destroyed without error. These objects are *not* recoverable.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['BucketGrantArgs']]]] grants: An [ACL policy grant](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#sample-acl) (documented below). Conflicts with `acl`.
:param pulumi.Input[str] hosted_zone_id: The [Route 53 Hosted Zone ID](https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_website_region_endpoints) for this bucket's region.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['BucketLifecycleRuleArgs']]]] lifecycle_rules: A configuration of [object lifecycle management](http://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) (documented below).
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['BucketLoggingArgs']]]] loggings: A settings of [bucket logging](https://docs.aws.amazon.com/AmazonS3/latest/UG/ManagingBucketLogging.html) (documented below).
:param pulumi.Input[pulumi.InputType['BucketObjectLockConfigurationArgs']] object_lock_configuration: A configuration of [S3 object locking](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) (documented below)
:param pulumi.Input[str] policy: A valid [bucket policy](https://docs.aws.amazon.com/AmazonS3/latest/dev/example-bucket-policies.html) JSON document. Note that if the policy document is not specific enough (but still valid), the provider may view the policy as constantly changing in a `pulumi up / preview / update`. In this case, please make sure you use the verbose/specific version of the policy.
:param pulumi.Input[str] region: The AWS region this bucket resides in.
:param pulumi.Input[pulumi.InputType['BucketReplicationConfigurationArgs']] replication_configuration: A configuration of [replication configuration](http://docs.aws.amazon.com/AmazonS3/latest/dev/crr.html) (documented below).
:param pulumi.Input[str] request_payer: Specifies who should bear the cost of Amazon S3 data transfer.
Can be either `BucketOwner` or `Requester`. By default, the owner of the S3 bucket would incur
the costs of any data transfer. See [Requester Pays Buckets](http://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html)
developer guide for more information.
:param pulumi.Input[pulumi.InputType['BucketServerSideEncryptionConfigurationArgs']] server_side_encryption_configuration: A configuration of [server-side encryption configuration](http://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) (documented below)
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the bucket.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags_all: A map of tags assigned to the resource, including those inherited from the provider .
:param pulumi.Input[pulumi.InputType['BucketVersioningArgs']] versioning: A state of [versioning](https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html) (documented below)
:param pulumi.Input[pulumi.InputType['BucketWebsiteArgs']] website: A website object (documented below).
:param pulumi.Input[str] website_domain: The domain of the website endpoint, if the bucket is configured with a website. If not, this will be an empty string. This is used to create Route 53 alias records.
:param pulumi.Input[str] website_endpoint: The website endpoint, if the bucket is configured with a website. If not, this will be an empty string.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _BucketState.__new__(_BucketState)
__props__.__dict__["acceleration_status"] = acceleration_status
__props__.__dict__["acl"] = acl
__props__.__dict__["arn"] = arn
__props__.__dict__["bucket"] = bucket
__props__.__dict__["bucket_domain_name"] = bucket_domain_name
__props__.__dict__["bucket_prefix"] = bucket_prefix
__props__.__dict__["bucket_regional_domain_name"] = bucket_regional_domain_name
__props__.__dict__["cors_rules"] = cors_rules
__props__.__dict__["force_destroy"] = force_destroy
__props__.__dict__["grants"] = grants
__props__.__dict__["hosted_zone_id"] = hosted_zone_id
__props__.__dict__["lifecycle_rules"] = lifecycle_rules
__props__.__dict__["loggings"] = loggings
__props__.__dict__["object_lock_configuration"] = object_lock_configuration
__props__.__dict__["policy"] = policy
__props__.__dict__["region"] = region
__props__.__dict__["replication_configuration"] = replication_configuration
__props__.__dict__["request_payer"] = request_payer
__props__.__dict__["server_side_encryption_configuration"] = server_side_encryption_configuration
__props__.__dict__["tags"] = tags
__props__.__dict__["tags_all"] = tags_all
__props__.__dict__["versioning"] = versioning
__props__.__dict__["website"] = website
__props__.__dict__["website_domain"] = website_domain
__props__.__dict__["website_endpoint"] = website_endpoint
return Bucket(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="accelerationStatus")
def acceleration_status(self) -> pulumi.Output[str]:
"""
Sets the accelerate configuration of an existing bucket. Can be `Enabled` or `Suspended`.
"""
return pulumi.get(self, "acceleration_status")
@property
@pulumi.getter
def acl(self) -> pulumi.Output[Optional[str]]:
"""
The [canned ACL](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl) to apply. Valid values are `private`, `public-read`, `public-read-write`, `aws-exec-read`, `authenticated-read`, and `log-delivery-write`. Defaults to `private`. Conflicts with `grant`.
"""
return pulumi.get(self, "acl")
@property
@pulumi.getter
def arn(self) -> pulumi.Output[str]:
"""
The ARN of the bucket. Will be of format `arn:aws:s3:::bucketname`.
"""
return pulumi.get(self, "arn")
@property
@pulumi.getter
def bucket(self) -> pulumi.Output[str]:
"""
The name of the bucket. If omitted, this provider will assign a random, unique name. Must be lowercase and less than or equal to 63 characters in length. A full list of bucket naming rules [may be found here](https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html).
"""
return pulumi.get(self, "bucket")
@property
@pulumi.getter(name="bucketDomainName")
def bucket_domain_name(self) -> pulumi.Output[str]:
"""
The bucket domain name. Will be of format `bucketname.s3.amazonaws.com`.
"""
return pulumi.get(self, "bucket_domain_name")
@property
@pulumi.getter(name="bucketPrefix")
def bucket_prefix(self) -> pulumi.Output[Optional[str]]:
"""
Creates a unique bucket name beginning with the specified prefix. Conflicts with `bucket`. Must be lowercase and less than or equal to 37 characters in length. A full list of bucket naming rules [may be found here](https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html).
"""
return pulumi.get(self, "bucket_prefix")
@property
@pulumi.getter(name="bucketRegionalDomainName")
def bucket_regional_domain_name(self) -> pulumi.Output[str]:
"""
The bucket region-specific domain name. The bucket domain name including the region name, please refer [here](https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) for format. Note: The AWS CloudFront allows specifying S3 region-specific endpoint when creating S3 origin, it will prevent [redirect issues](https://forums.aws.amazon.com/thread.jspa?threadID=216814) from CloudFront to S3 Origin URL.
"""
return pulumi.get(self, "bucket_regional_domain_name")
@property
@pulumi.getter(name="corsRules")
def cors_rules(self) -> pulumi.Output[Optional[Sequence['outputs.BucketCorsRule']]]:
"""
A rule of [Cross-Origin Resource Sharing](https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) (documented below).
"""
return pulumi.get(self, "cors_rules")
@property
@pulumi.getter(name="forceDestroy")
def force_destroy(self) -> pulumi.Output[Optional[bool]]:
"""
A boolean that indicates all objects (including any [locked objects](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html)) should be deleted from the bucket so that the bucket can be destroyed without error. These objects are *not* recoverable.
"""
return pulumi.get(self, "force_destroy")
@property
@pulumi.getter
def grants(self) -> pulumi.Output[Optional[Sequence['outputs.BucketGrant']]]:
"""
An [ACL policy grant](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#sample-acl) (documented below). Conflicts with `acl`.
"""
return pulumi.get(self, "grants")
@property
@pulumi.getter(name="hostedZoneId")
def hosted_zone_id(self) -> pulumi.Output[str]:
"""
The [Route 53 Hosted Zone ID](https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_website_region_endpoints) for this bucket's region.
"""
return pulumi.get(self, "hosted_zone_id")
@property
@pulumi.getter(name="lifecycleRules")
def lifecycle_rules(self) -> pulumi.Output[Optional[Sequence['outputs.BucketLifecycleRule']]]:
"""
A configuration of [object lifecycle management](http://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) (documented below).
"""
return pulumi.get(self, "lifecycle_rules")
@property
@pulumi.getter
def loggings(self) -> pulumi.Output[Optional[Sequence['outputs.BucketLogging']]]:
"""
A settings of [bucket logging](https://docs.aws.amazon.com/AmazonS3/latest/UG/ManagingBucketLogging.html) (documented below).
"""
return pulumi.get(self, "loggings")
@property
@pulumi.getter(name="objectLockConfiguration")
def object_lock_configuration(self) -> pulumi.Output[Optional['outputs.BucketObjectLockConfiguration']]:
"""
A configuration of [S3 object locking](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) (documented below)
"""
return pulumi.get(self, "object_lock_configuration")
@property
@pulumi.getter
def policy(self) -> pulumi.Output[Optional[str]]:
"""
A valid [bucket policy](https://docs.aws.amazon.com/AmazonS3/latest/dev/example-bucket-policies.html) JSON document. Note that if the policy document is not specific enough (but still valid), the provider may view the policy as constantly changing in a `pulumi up / preview / update`. In this case, please make sure you use the verbose/specific version of the policy.
"""
return pulumi.get(self, "policy")
@property
@pulumi.getter
def region(self) -> pulumi.Output[str]:
"""
The AWS region this bucket resides in.
"""
return pulumi.get(self, "region")
@property
@pulumi.getter(name="replicationConfiguration")
def replication_configuration(self) -> pulumi.Output[Optional['outputs.BucketReplicationConfiguration']]:
"""
A configuration of [replication configuration](http://docs.aws.amazon.com/AmazonS3/latest/dev/crr.html) (documented below).
"""
return pulumi.get(self, "replication_configuration")
@property
@pulumi.getter(name="requestPayer")
def request_payer(self) -> pulumi.Output[str]:
"""
Specifies who should bear the cost of Amazon S3 data transfer.
Can be either `BucketOwner` or `Requester`. By default, the owner of the S3 bucket would incur
the costs of any data transfer. See [Requester Pays Buckets](http://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html)
developer guide for more information.
"""
return pulumi.get(self, "request_payer")
@property
@pulumi.getter(name="serverSideEncryptionConfiguration")
def server_side_encryption_configuration(self) -> pulumi.Output[Optional['outputs.BucketServerSideEncryptionConfiguration']]:
"""
A configuration of [server-side encryption configuration](http://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) (documented below)
"""
return pulumi.get(self, "server_side_encryption_configuration")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
A mapping of tags to assign to the bucket.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="tagsAll")
def tags_all(self) -> pulumi.Output[Mapping[str, str]]:
"""
A map of tags assigned to the resource, including those inherited from the provider .
"""
return pulumi.get(self, "tags_all")
@property
@pulumi.getter
def versioning(self) -> pulumi.Output['outputs.BucketVersioning']:
"""
A state of [versioning](https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html) (documented below)
"""
return pulumi.get(self, "versioning")
@property
@pulumi.getter
def website(self) -> pulumi.Output[Optional['outputs.BucketWebsite']]:
"""
A website object (documented below).
"""
return pulumi.get(self, "website")
@property
@pulumi.getter(name="websiteDomain")
def website_domain(self) -> pulumi.Output[str]:
"""
The domain of the website endpoint, if the bucket is configured with a website. If not, this will be an empty string. This is used to create Route 53 alias records.
"""
return pulumi.get(self, "website_domain")
@property
@pulumi.getter(name="websiteEndpoint")
def website_endpoint(self) -> pulumi.Output[str]:
"""
The website endpoint, if the bucket is configured with a website. If not, this will be an empty string.
"""
return pulumi.get(self, "website_endpoint")
| 53.379139
| 474
| 0.648967
| 10,765
| 96,723
| 5.690478
| 0.047097
| 0.066799
| 0.063584
| 0.028992
| 0.968592
| 0.962323
| 0.952267
| 0.948268
| 0.945281
| 0.93178
| 0
| 0.006234
| 0.238816
| 96,723
| 1,811
| 475
| 53.408614
| 0.825804
| 0.511946
| 0
| 0.864315
| 1
| 0
| 0.148214
| 0.066222
| 0
| 0
| 0
| 0
| 0
| 1
| 0.16825
| false
| 0.001357
| 0.010855
| 0
| 0.280868
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
e40c1bd273f63cdf88ee03cda11893990b6e6763
| 12,887
|
py
|
Python
|
Tests/ttLib/tables/_b_s_l_n_test.py
|
odidev/fonttools
|
27b5f568f562971d7fbf64eeb027ea61e4939db4
|
[
"Apache-2.0",
"MIT"
] | 2,705
|
2016-09-27T10:02:12.000Z
|
2022-03-31T09:37:46.000Z
|
Tests/ttLib/tables/_b_s_l_n_test.py
|
odidev/fonttools
|
27b5f568f562971d7fbf64eeb027ea61e4939db4
|
[
"Apache-2.0",
"MIT"
] | 1,599
|
2016-09-27T09:07:36.000Z
|
2022-03-31T23:04:51.000Z
|
Tests/ttLib/tables/_b_s_l_n_test.py
|
odidev/fonttools
|
27b5f568f562971d7fbf64eeb027ea61e4939db4
|
[
"Apache-2.0",
"MIT"
] | 352
|
2016-10-07T04:18:15.000Z
|
2022-03-30T07:35:01.000Z
|
from fontTools.misc.testTools import FakeFont, getXML, parseXML
from fontTools.misc.textTools import deHexStr, hexStr
from fontTools.ttLib import newTable
import unittest
# Apple's spec of the baseline table gives no example for 'bsln' format 0,
# but the Apple Chancery font contains the following data.
BSLN_FORMAT_0_DATA = deHexStr(
'0001 0000 0000 ' # 0: Version=1.0, Format=0
'0000 ' # 6: DefaultBaseline=0 (Roman baseline)
'0000 01D1 0000 0541 ' # 8: Delta[0..3]=0, 465, 0, 1345
'01FB 0000 0000 0000 ' # 16: Delta[4..7]=507, 0, 0, 0
'0000 0000 0000 0000 ' # 24: Delta[8..11]=0, 0, 0, 0
'0000 0000 0000 0000 ' # 32: Delta[12..15]=0, 0, 0, 0
'0000 0000 0000 0000 ' # 40: Delta[16..19]=0, 0, 0, 0
'0000 0000 0000 0000 ' # 48: Delta[20..23]=0, 0, 0, 0
'0000 0000 0000 0000 ' # 56: Delta[24..27]=0, 0, 0, 0
'0000 0000 0000 0000 ' # 64: Delta[28..31]=0, 0, 0, 0
) # 72: <end>
assert len(BSLN_FORMAT_0_DATA) == 72
BSLN_FORMAT_0_XML = [
'<Version value="0x00010000"/>',
'<Baseline Format="0">',
' <DefaultBaseline value="0"/>',
' <Delta index="0" value="0"/>',
' <Delta index="1" value="465"/>',
' <Delta index="2" value="0"/>',
' <Delta index="3" value="1345"/>',
' <Delta index="4" value="507"/>',
' <Delta index="5" value="0"/>',
' <Delta index="6" value="0"/>',
' <Delta index="7" value="0"/>',
' <Delta index="8" value="0"/>',
' <Delta index="9" value="0"/>',
' <Delta index="10" value="0"/>',
' <Delta index="11" value="0"/>',
' <Delta index="12" value="0"/>',
' <Delta index="13" value="0"/>',
' <Delta index="14" value="0"/>',
' <Delta index="15" value="0"/>',
' <Delta index="16" value="0"/>',
' <Delta index="17" value="0"/>',
' <Delta index="18" value="0"/>',
' <Delta index="19" value="0"/>',
' <Delta index="20" value="0"/>',
' <Delta index="21" value="0"/>',
' <Delta index="22" value="0"/>',
' <Delta index="23" value="0"/>',
' <Delta index="24" value="0"/>',
' <Delta index="25" value="0"/>',
' <Delta index="26" value="0"/>',
' <Delta index="27" value="0"/>',
' <Delta index="28" value="0"/>',
' <Delta index="29" value="0"/>',
' <Delta index="30" value="0"/>',
' <Delta index="31" value="0"/>',
'</Baseline>',
]
# Example: Format 1 Baseline Table
# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6bsln.html
# The example in the AAT specification uses the value 270 for Seg[0].LastGlyph,
# whereas we use the value 10 for testng to shorten the XML dump.
BSLN_FORMAT_1_DATA = deHexStr(
'0001 0000 0001 ' # 0: Version=1.0, Format=1
'0001 ' # 6: DefaultBaseline=1 (Ideographic baseline)
'0000 0357 0000 05F0 ' # 8: Delta[0..3]=0, 855, 0, 1520
'0000 0000 0000 0000 ' # 16: Delta[4..7]=0, 0, 0, 0
'0000 0000 0000 0000 ' # 24: Delta[8..11]=0, 0, 0, 0
'0000 0000 0000 0000 ' # 32: Delta[12..15]=0, 0, 0, 0
'0000 0000 0000 0000 ' # 40: Delta[16..19]=0, 0, 0, 0
'0000 0000 0000 0000 ' # 48: Delta[20..23]=0, 0, 0, 0
'0000 0000 0000 0000 ' # 56: Delta[24..27]=0, 0, 0, 0
'0000 0000 0000 0000 ' # 64: Delta[28..31]=0, 0, 0, 0
'0002 0006 0001 ' # 72: LookupFormat=2, UnitSize=6, NUnits=1
'0006 0000 0000 ' # 78: SearchRange=6, EntrySelector=0, RangeShift=0
'000A 0002 0000 ' # 84: Seg[0].LastGlyph=10 FirstGl=2 Value=0/Roman
'FFFF FFFF 0000 ' # 90: Seg[1]=<end>
) # 96: <end>
assert len(BSLN_FORMAT_1_DATA) == 96
BSLN_FORMAT_1_XML = [
'<Version value="0x00010000"/>',
'<Baseline Format="1">',
' <DefaultBaseline value="1"/>',
' <Delta index="0" value="0"/>',
' <Delta index="1" value="855"/>',
' <Delta index="2" value="0"/>',
' <Delta index="3" value="1520"/>',
' <Delta index="4" value="0"/>',
' <Delta index="5" value="0"/>',
' <Delta index="6" value="0"/>',
' <Delta index="7" value="0"/>',
' <Delta index="8" value="0"/>',
' <Delta index="9" value="0"/>',
' <Delta index="10" value="0"/>',
' <Delta index="11" value="0"/>',
' <Delta index="12" value="0"/>',
' <Delta index="13" value="0"/>',
' <Delta index="14" value="0"/>',
' <Delta index="15" value="0"/>',
' <Delta index="16" value="0"/>',
' <Delta index="17" value="0"/>',
' <Delta index="18" value="0"/>',
' <Delta index="19" value="0"/>',
' <Delta index="20" value="0"/>',
' <Delta index="21" value="0"/>',
' <Delta index="22" value="0"/>',
' <Delta index="23" value="0"/>',
' <Delta index="24" value="0"/>',
' <Delta index="25" value="0"/>',
' <Delta index="26" value="0"/>',
' <Delta index="27" value="0"/>',
' <Delta index="28" value="0"/>',
' <Delta index="29" value="0"/>',
' <Delta index="30" value="0"/>',
' <Delta index="31" value="0"/>',
' <BaselineValues>',
' <Lookup glyph="B" value="0"/>',
' <Lookup glyph="C" value="0"/>',
' <Lookup glyph="D" value="0"/>',
' <Lookup glyph="E" value="0"/>',
' <Lookup glyph="F" value="0"/>',
' <Lookup glyph="G" value="0"/>',
' <Lookup glyph="H" value="0"/>',
' <Lookup glyph="I" value="0"/>',
' <Lookup glyph="J" value="0"/>',
' </BaselineValues>',
'</Baseline>',
]
BSLN_FORMAT_2_DATA = deHexStr(
'0001 0000 0002 ' # 0: Version=1.0, Format=2
'0004 ' # 6: DefaultBaseline=4 (Math)
'0016 ' # 8: StandardGlyph=22
'0050 0051 FFFF 0052 ' # 10: ControlPoint[0..3]=80, 81, <none>, 82
'FFFF FFFF FFFF FFFF ' # 18: ControlPoint[4..7]=<none>
'FFFF FFFF FFFF FFFF ' # 26: ControlPoint[8..11]=<none>
'FFFF FFFF FFFF FFFF ' # 34: ControlPoint[12..15]=<none>
'FFFF FFFF FFFF FFFF ' # 42: ControlPoint[16..19]=<none>
'FFFF FFFF FFFF FFFF ' # 50: ControlPoint[20..23]=<none>
'FFFF FFFF FFFF FFFF ' # 58: ControlPoint[24..27]=<none>
'FFFF FFFF FFFF FFFF ' # 66: ControlPoint[28..31]=<none>
) # 74: <end>
assert len(BSLN_FORMAT_2_DATA) == 74
BSLN_FORMAT_2_XML = [
'<Version value="0x00010000"/>',
'<Baseline Format="2">',
' <DefaultBaseline value="4"/>',
' <StandardGlyph value="V"/>',
' <ControlPoint index="0" value="80"/>',
' <ControlPoint index="1" value="81"/>',
' <ControlPoint index="2" value="65535"/>',
' <ControlPoint index="3" value="82"/>',
' <ControlPoint index="4" value="65535"/>',
' <ControlPoint index="5" value="65535"/>',
' <ControlPoint index="6" value="65535"/>',
' <ControlPoint index="7" value="65535"/>',
' <ControlPoint index="8" value="65535"/>',
' <ControlPoint index="9" value="65535"/>',
' <ControlPoint index="10" value="65535"/>',
' <ControlPoint index="11" value="65535"/>',
' <ControlPoint index="12" value="65535"/>',
' <ControlPoint index="13" value="65535"/>',
' <ControlPoint index="14" value="65535"/>',
' <ControlPoint index="15" value="65535"/>',
' <ControlPoint index="16" value="65535"/>',
' <ControlPoint index="17" value="65535"/>',
' <ControlPoint index="18" value="65535"/>',
' <ControlPoint index="19" value="65535"/>',
' <ControlPoint index="20" value="65535"/>',
' <ControlPoint index="21" value="65535"/>',
' <ControlPoint index="22" value="65535"/>',
' <ControlPoint index="23" value="65535"/>',
' <ControlPoint index="24" value="65535"/>',
' <ControlPoint index="25" value="65535"/>',
' <ControlPoint index="26" value="65535"/>',
' <ControlPoint index="27" value="65535"/>',
' <ControlPoint index="28" value="65535"/>',
' <ControlPoint index="29" value="65535"/>',
' <ControlPoint index="30" value="65535"/>',
' <ControlPoint index="31" value="65535"/>',
'</Baseline>',
]
# Example: Format 3 Baseline Table
# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6bsln.html
# The example in the AAT specification uses the value 270 for Seg[0].LastGlyph,
# whereas we use the value 10 for testng to shorten the XML dump.
BSLN_FORMAT_3_DATA = deHexStr(
'0001 0000 0003 ' # 0: Version=1.0, Format=3
'0001 ' # 6: DefaultBaseline=1 (Ideographic)
'0016 ' # 8: StandardGlyph=22
'0050 0051 FFFF 0052 ' # 10: ControlPoint[0..3]=80, 81, <none>, 82
'FFFF FFFF FFFF FFFF ' # 18: ControlPoint[4..7]=<none>
'FFFF FFFF FFFF FFFF ' # 26: ControlPoint[8..11]=<none>
'FFFF FFFF FFFF FFFF ' # 34: ControlPoint[12..15]=<none>
'FFFF FFFF FFFF FFFF ' # 42: ControlPoint[16..19]=<none>
'FFFF FFFF FFFF FFFF ' # 50: ControlPoint[20..23]=<none>
'FFFF FFFF FFFF FFFF ' # 58: ControlPoint[24..27]=<none>
'FFFF FFFF FFFF FFFF ' # 66: ControlPoint[28..31]=<none>
'0002 0006 0001 ' # 74: LookupFormat=2, UnitSize=6, NUnits=1
'0006 0000 0000 ' # 80: SearchRange=6, EntrySelector=0, RangeShift=0
'000A 0002 0000 ' # 86: Seg[0].LastGlyph=10 FirstGl=2 Value=0/Roman
'FFFF FFFF 0000 ' # 92: Seg[1]=<end>
) # 98: <end>
assert len(BSLN_FORMAT_3_DATA) == 98
BSLN_FORMAT_3_XML = [
'<Version value="0x00010000"/>',
'<Baseline Format="3">',
' <DefaultBaseline value="1"/>',
' <StandardGlyph value="V"/>',
' <ControlPoint index="0" value="80"/>',
' <ControlPoint index="1" value="81"/>',
' <ControlPoint index="2" value="65535"/>',
' <ControlPoint index="3" value="82"/>',
' <ControlPoint index="4" value="65535"/>',
' <ControlPoint index="5" value="65535"/>',
' <ControlPoint index="6" value="65535"/>',
' <ControlPoint index="7" value="65535"/>',
' <ControlPoint index="8" value="65535"/>',
' <ControlPoint index="9" value="65535"/>',
' <ControlPoint index="10" value="65535"/>',
' <ControlPoint index="11" value="65535"/>',
' <ControlPoint index="12" value="65535"/>',
' <ControlPoint index="13" value="65535"/>',
' <ControlPoint index="14" value="65535"/>',
' <ControlPoint index="15" value="65535"/>',
' <ControlPoint index="16" value="65535"/>',
' <ControlPoint index="17" value="65535"/>',
' <ControlPoint index="18" value="65535"/>',
' <ControlPoint index="19" value="65535"/>',
' <ControlPoint index="20" value="65535"/>',
' <ControlPoint index="21" value="65535"/>',
' <ControlPoint index="22" value="65535"/>',
' <ControlPoint index="23" value="65535"/>',
' <ControlPoint index="24" value="65535"/>',
' <ControlPoint index="25" value="65535"/>',
' <ControlPoint index="26" value="65535"/>',
' <ControlPoint index="27" value="65535"/>',
' <ControlPoint index="28" value="65535"/>',
' <ControlPoint index="29" value="65535"/>',
' <ControlPoint index="30" value="65535"/>',
' <ControlPoint index="31" value="65535"/>',
' <BaselineValues>',
' <Lookup glyph="B" value="0"/>',
' <Lookup glyph="C" value="0"/>',
' <Lookup glyph="D" value="0"/>',
' <Lookup glyph="E" value="0"/>',
' <Lookup glyph="F" value="0"/>',
' <Lookup glyph="G" value="0"/>',
' <Lookup glyph="H" value="0"/>',
' <Lookup glyph="I" value="0"/>',
' <Lookup glyph="J" value="0"/>',
' </BaselineValues>',
'</Baseline>',
]
class BSLNTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.maxDiff = None
cls.font = FakeFont(
['.notdef'] + [g for g in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'])
def decompileToXML(self, data, xml):
table = newTable('bsln')
table.decompile(data, self.font)
self.assertEqual(getXML(table.toXML), xml)
def compileFromXML(self, xml, data):
table = newTable('bsln')
for name, attrs, content in parseXML(xml):
table.fromXML(name, attrs, content, font=self.font)
self.assertEqual(hexStr(table.compile(self.font)), hexStr(data))
def testFormat0(self):
self.decompileToXML(BSLN_FORMAT_0_DATA, BSLN_FORMAT_0_XML)
self.compileFromXML(BSLN_FORMAT_0_XML, BSLN_FORMAT_0_DATA)
def testFormat1(self):
self.decompileToXML(BSLN_FORMAT_1_DATA, BSLN_FORMAT_1_XML)
self.compileFromXML(BSLN_FORMAT_1_XML, BSLN_FORMAT_1_DATA)
def testFormat2(self):
self.decompileToXML(BSLN_FORMAT_2_DATA, BSLN_FORMAT_2_XML)
self.compileFromXML(BSLN_FORMAT_2_XML, BSLN_FORMAT_2_DATA)
def testFormat3(self):
self.decompileToXML(BSLN_FORMAT_3_DATA, BSLN_FORMAT_3_XML)
self.compileFromXML(BSLN_FORMAT_3_XML, BSLN_FORMAT_3_DATA)
if __name__ == '__main__':
import sys
sys.exit(unittest.main())
| 41.570968
| 81
| 0.564212
| 1,643
| 12,887
| 4.369446
| 0.12112
| 0.066862
| 0.08887
| 0.129266
| 0.798579
| 0.730882
| 0.709152
| 0.703301
| 0.703162
| 0.659702
| 0
| 0.148464
| 0.226973
| 12,887
| 309
| 82
| 41.705502
| 0.572174
| 0.176612
| 0
| 0.731618
| 0
| 0
| 0.627062
| 0.002465
| 0
| 0
| 0.003793
| 0
| 0.022059
| 1
| 0.025735
| false
| 0
| 0.018382
| 0
| 0.047794
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7c1396cd459c3ff223c2ce4116413c161f89816c
| 42,404
|
py
|
Python
|
subversion/tests/cmdline/svnauthz_tests.py
|
markphip/subversion
|
b68ad49667ccd4a3fd3083c24909e6fcca4b8348
|
[
"Apache-2.0"
] | null | null | null |
subversion/tests/cmdline/svnauthz_tests.py
|
markphip/subversion
|
b68ad49667ccd4a3fd3083c24909e6fcca4b8348
|
[
"Apache-2.0"
] | 1
|
2016-09-14T18:22:43.000Z
|
2016-09-14T18:22:43.000Z
|
subversion/tests/cmdline/svnauthz_tests.py
|
markphip/subversion
|
b68ad49667ccd4a3fd3083c24909e6fcca4b8348
|
[
"Apache-2.0"
] | 1
|
2020-11-04T07:25:49.000Z
|
2020-11-04T07:25:49.000Z
|
#!/usr/bin/env python
#
# svnauthz_tests.py: testing the 'svnauthz' tool.
#
# Subversion is a tool for revision control.
# See http://subversion.apache.org for more information.
#
# ====================================================================
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
######################################################################
# General modules
import os.path
import tempfile
# Our testing module
import svntest
from svntest import wc
# (abbreviation)
Skip = svntest.testcase.Skip_deco
SkipUnless = svntest.testcase.SkipUnless_deco
XFail = svntest.testcase.XFail_deco
Issues = svntest.testcase.Issues_deco
Issue = svntest.testcase.Issue_deco
Wimp = svntest.testcase.Wimp_deco
Item = svntest.wc.StateItem
# Run svnauthz commands on commit
hook_template = """import sys,os,subprocess
svnauthz_bin=%s
fp = open(os.path.join(sys.argv[1], 'hooks.log'), 'wb')
def output_command(fp, cmd, opt):
command = [svnauthz_bin, cmd, '-t', sys.argv[2], sys.argv[1]] + opt
process = subprocess.Popen(command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=False, bufsize=-1)
(output, errors) = process.communicate()
status = process.returncode
fp.write(output)
fp.write(errors)
fp.write("Exit %%d\\n" %% status)
return status
for (svnauthz_cmd, svnauthz_opt) in %s:
output_command(fp, svnauthz_cmd, svnauthz_opt.split())
fp.close()"""
#----------------------------------------------------------------------
def verify_logfile(logfilename, expected_data, delete_log=True):
if os.path.exists(logfilename):
fp = open(logfilename)
else:
raise svntest.verify.SVNUnexpectedOutput("hook logfile %s not found"\
% logfilename)
actual_data = fp.readlines()
fp.close()
if delete_log:
os.unlink(logfilename)
svntest.verify.compare_and_display_lines('wrong hook logfile content',
'HOOKLOG',
expected_data, actual_data)
#----------------------------------------------------------------------
# Note we don't test various different validation failures, the
# validation is actually just done when the file is loaded and
# the library tests for the config file parser and the authz
# parser already validate various failures that return errors.
def svnauthz_validate_file_test(sbox):
"test 'svnauthz validate' on files"
# build an authz file
(authz_fd, authz_path) = tempfile.mkstemp()
authz_content = "[/]\n* = rw\n"
svntest.main.file_write(authz_path, authz_content)
# Valid authz file
svntest.actions.run_and_verify_svnauthz(None, None,
0, False, "validate", authz_path)
# Invalid authz file, expect exit code 1, we found the file loaded it
# but found an error
svntest.main.file_write(authz_path, 'x\n')
svntest.actions.run_and_verify_svnauthz(None, None,
1, False, "validate", authz_path)
# Non-existent authz file
# exit code 2, operational error since we can't test the file.
os.close(authz_fd)
os.remove(authz_path)
svntest.actions.run_and_verify_svnauthz(None,
None, 2, False, "validate",
authz_path)
@SkipUnless(svntest.main.is_ra_type_file)
def svnauthz_validate_repo_test(sbox):
"test 'svnauthz validate' on urls"
sbox.build()
wc_dir = sbox.wc_dir
repo_url = sbox.repo_url
authz_content = "[/]\n* = rw\n"
# build an authz file and commit it to the repo
authz_path = os.path.join(wc_dir, 'A', 'authz')
svntest.main.file_write(authz_path, authz_content)
svntest.main.run_svn(None, 'add', authz_path)
expected_output = wc.State(wc_dir, {'A/authz' : Item(verb='Adding')})
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.add({
'A/authz' : Item(status=' ', wc_rev=2),
})
svntest.actions.run_and_verify_commit(wc_dir, expected_output,
expected_status)
# Valid authz url (file stored in repo)
authz_url = repo_url + '/A/authz'
svntest.actions.run_and_verify_svnauthz(None, None,
0, False, "validate", authz_url)
# Invalid authz url (again use the iota file in the repo)
# expect exit code 1, we found the file loaded it but found an error
iota_url = repo_url + '/iota'
svntest.actions.run_and_verify_svnauthz(None, None,
1, False, "validate", iota_url)
# Non-existent authz url
# exit code 2, operational error since we can't test the file.
svntest.actions.run_and_verify_svnauthz(None,
None, 2, False, "validate",
repo_url + "/zilch")
def svnauthz_validate_txn_test(sbox):
"test 'svnauthz validate --transaction'"
sbox.build()
wc_dir = sbox.wc_dir
repo_dir = sbox.repo_dir
logfilepath = os.path.join(repo_dir, 'hooks.log')
pre_commit_hook = svntest.main.get_pre_commit_hook_path(repo_dir)
hook_instance = hook_template % (repr(svntest.main.svnauthz_binary),
repr([('validate', 'A/authz')]))
svntest.main.create_python_hook_script(pre_commit_hook, hook_instance)
# Create an authz file
authz_content = "[/]\n* = rw\n"
authz_path = os.path.join(wc_dir, 'A/authz')
svntest.main.file_write(authz_path, authz_content)
svntest.main.run_svn(None, 'add', authz_path)
# commit a valid authz file, and check the hook's logfile
expected_output = wc.State(wc_dir, {'A/authz' : Item(verb='Adding')})
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.add({
'A/authz' : Item(status=' ', wc_rev=2),
})
svntest.actions.run_and_verify_commit(wc_dir, expected_output,
expected_status)
expected_data = ['Exit 0\n']
verify_logfile(logfilepath, expected_data)
# Add an invalid line to the authz file.
svntest.main.file_append(authz_path, 'x')
expected_output = wc.State(wc_dir, {'A/authz' : Item(verb='Sending')})
expected_status.tweak('A/authz', status=' ', wc_rev=3)
svntest.actions.run_and_verify_commit(wc_dir, expected_output,
expected_status)
expected_data = svntest.verify.RegexOutput(".*?Error parsing authz file: '.*?'",
match_all=False)
verify_logfile(logfilepath, expected_data, delete_log=False)
# Check the logfile that our Exit was 1 too
expected_data = svntest.verify.ExpectedOutput("Exit 1\n", match_all=False)
verify_logfile(logfilepath, expected_data)
# Validate a file that doesn't exist and make sure we're exiting with 2.
hook_instance = hook_template % (repr(svntest.main.svnauthz_binary),
repr([('validate', 'zilch')]))
svntest.main.create_python_hook_script(pre_commit_hook, hook_instance)
svntest.main.file_append(authz_path, 'x')
expected_status.tweak('A/authz', status=' ', wc_rev=4)
if svntest.actions.run_and_verify_commit(wc_dir, expected_output,
expected_status):
raise svntest.Failure
expected_data = svntest.verify.ExpectedOutput("Exit 2\n", match_all=False)
verify_logfile(logfilepath, expected_data)
def svnauthz_accessof_file_test(sbox):
"test 'svnauthz accessof' on files"
# build an authz file
(authz_fd, authz_path) = tempfile.mkstemp()
authz_content = "[/]\ngroucho = \ngallagher = rw\n* = r\n" + \
"[/bios]\n* = rw\n" + \
"[comedy:/jokes]\ngroucho = rw\n" + \
"[slapstick:/jokes]\n* =\n"
svntest.main.file_write(authz_path, authz_content)
# Anonymous access with no path, and no repository should be rw
# since it returns the highest level of access granted anywhere.
# So /bios being rw for everyone means this will be rw.
svntest.actions.run_and_verify_svnauthz(["rw\n"], None,
0, False, "accessof", authz_path)
# Anonymous access on /jokes should be r, no repo so won't match
# the slapstick:/jokes section.
svntest.actions.run_and_verify_svnauthz(["r\n"], None, 0, False, "accessof",
authz_path, "--path", "/jokes")
# Anonymous access on /jokes on slapstick repo should be no
svntest.actions.run_and_verify_svnauthz(["no\n"], None, 0, False, "accessof",
authz_path, "--path", "/jokes",
"--repository", "slapstick")
# User access with no path, and no repository should be rw
# since it returns the h ighest level of access anywhere.
# So /bios being rw for everyone means this will be rw.
svntest.actions.run_and_verify_svnauthz(["rw\n"], None,
0, False, "accessof", authz_path,
"--username", "groucho")
# User groucho specified on /jokes with no repo, will not match any of the
# repo specific sections, so is r since everyone has read access.
svntest.actions.run_and_verify_svnauthz(["r\n"], None,
0, False, "accessof", authz_path,
"--path", "/jokes", "--username",
"groucho")
# User groucho specified on /jokes with the repo comedy will be rw
svntest.actions.run_and_verify_svnauthz(["rw\n"], None, 0, False, "accessof",
authz_path, "--path", "/jokes",
"--username", "groucho",
"--repository", "comedy")
os.close(authz_fd)
os.remove(authz_path)
@SkipUnless(svntest.main.is_ra_type_file)
def svnauthz_accessof_repo_test(sbox):
"test 'svnauthz accessof' on urls"
sbox.build()
wc_dir = sbox.wc_dir
repo_url = sbox.repo_url
authz_content = "[/]\ngroucho = \ngallagher = rw\n* = r\n" + \
"[/bios]\n* = rw\n" + \
"[comedy:/jokes]\ngroucho = rw\n" + \
"[slapstick:/jokes]\n* =\n"
# build an authz file and commit it to the repo
authz_path = os.path.join(wc_dir, 'A', 'authz')
svntest.main.file_write(authz_path, authz_content)
svntest.main.run_svn(None, 'add', authz_path)
expected_output = wc.State(wc_dir, {'A/authz' : Item(verb='Adding')})
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.add({
'A/authz' : Item(status=' ', wc_rev=2),
})
if svntest.actions.run_and_verify_commit(wc_dir, expected_output,
expected_status):
raise svntest.Failure
# Anonymous access with no path, and no repository should be rw
# since it returns the highest level of access granted anywhere.
# So /bios being rw for everyone means this will be rw.
authz_url = repo_url + "/A/authz"
svntest.actions.run_and_verify_svnauthz(["rw\n"], None,
0, False, "accessof", authz_url)
# Anonymous access on /jokes should be r, no repo so won't match
# the slapstick:/jokes section.
svntest.actions.run_and_verify_svnauthz(["r\n"], None, 0, False, "accessof",
authz_url, "--path", "/jokes")
# Anonymous access on /jokes on slapstick repo should be no
svntest.actions.run_and_verify_svnauthz(["no\n"], None, 0, False, "accessof",
authz_url, "--path", "/jokes",
"--repository", "slapstick")
# User access with no path, and no repository should be rw
# since it returns the h ighest level of access anywhere.
# So /bios being rw for everyone means this will be rw.
svntest.actions.run_and_verify_svnauthz(["rw\n"], None,
0, False, "accessof", authz_url,
"--username", "groucho")
# User groucho specified on /jokes with no repo, will not match any of the
# repo specific sections, so is r since everyone has read access.
svntest.actions.run_and_verify_svnauthz(["r\n"], None,
0, False, "accessof", authz_url,
"--path", "/jokes", "--username",
"groucho")
# User groucho specified on /jokes with the repo comedy will be rw
svntest.actions.run_and_verify_svnauthz(["rw\n"], None, 0, False, "accessof",
authz_url, "--path", "/jokes",
"--username", "groucho",
"--repository", "comedy")
def svnauthz_accessof_groups_file_test(sbox):
"test 'svnauthz accessof --groups-file' on files"
# build an authz file
(authz_fd, authz_path) = tempfile.mkstemp()
authz_content = "[/]\n@musicians = rw\n@comedians = \n" + \
"[comedy:/jokes]\n@musicians = \n@comedians = r\n"
svntest.main.file_write(authz_path, authz_content)
# build a groups file
(groups_fd, groups_path) = tempfile.mkstemp()
groups_content = "[groups]\nmusicians=stafford\ncomedians=groucho\n"
svntest.main.file_write(groups_path, groups_content)
# Anonymous access with no path, and no repository should be no
# since it returns the highest level of access granted anywhere.
svntest.actions.run_and_verify_svnauthz(["no\n"], None,
0, False, "accessof", authz_path,
"--groups-file", groups_path)
# User stafford (@musicians) access with no path, and no repository should
# be no since it returns the highest level of access granted anywhere.
svntest.actions.run_and_verify_svnauthz(["rw\n"], None,
0, False, "accessof", authz_path,
"--groups-file", groups_path,
"--username", "stafford")
# User groucho (@comedians) access with no path, and no repository should
# be no since it returns the highest level of access granted anywhere.
svntest.actions.run_and_verify_svnauthz(["no\n"], None,
0, False, "accessof", authz_path,
"--groups-file", groups_path,
"--username", "groucho")
# Anonymous access specified on /jokes with the repo comedy will be no.
svntest.actions.run_and_verify_svnauthz(["no\n"], None, 0, False,
"accessof", authz_path,
"--groups-file", groups_path,
"--path", "jokes",
"--repository", "comedy")
# User stafford (@musicians) specified on /jokes with the repo comedy
# will be no.
svntest.actions.run_and_verify_svnauthz(["no\n"], None,
0, False, "accessof", authz_path,
"--groups-file", groups_path,
"--path", "jokes",
"--repository", "comedy",
"--username", "stafford")
# User groucho (@comedians) specified on /jokes with the repo
# comedy will be r.
svntest.actions.run_and_verify_svnauthz(["r\n"], None,
0, False, "accessof", authz_path,
"--groups-file", groups_path,
"--path", "jokes",
"--repository", "comedy",
"--username", "groucho")
os.close(authz_fd)
os.remove(authz_path)
os.close(groups_fd)
os.remove(groups_path)
@SkipUnless(svntest.main.is_ra_type_file)
def svnauthz_accessof_groups_repo_test(sbox):
"test 'svnauthz accessof --groups-file' on urls"
sbox.build()
wc_dir = sbox.wc_dir
repo_url = sbox.repo_url
authz_content = "[/]\n@musicians = rw\n@comedians = \n" + \
"[comedy:/jokes]\n@musicians = \n@comedians = r\n"
groups_content = "[groups]\nmusicians=stafford\ncomedians=groucho\n"
# build authz and groups files and commit them to the repo
authz_path = os.path.join(wc_dir, 'A', 'authz')
groups_path = os.path.join(wc_dir, 'A', 'groups')
svntest.main.file_write(authz_path, authz_content)
svntest.main.file_write(groups_path, groups_content)
svntest.main.run_svn(None, 'add', authz_path, groups_path)
expected_output = wc.State(wc_dir, {
'A/authz' : Item(verb='Adding'),
'A/groups' : Item(verb='Adding'),
})
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.add({
'A/authz' : Item(status=' ', wc_rev=2),
'A/groups' : Item(status=' ', wc_rev=2),
})
svntest.actions.run_and_verify_commit(wc_dir, expected_output,
expected_status)
# Anonymous access with no path, and no repository should be no
# since it returns the highest level of access granted anywhere.
authz_url = repo_url + "/A/authz"
groups_url = repo_url + "/A/groups"
svntest.actions.run_and_verify_svnauthz(["no\n"], None,
0, False, "accessof", authz_url,
"--groups-file", groups_url)
# User stafford (@musicians) access with no path, and no repository should
# be no since it returns the highest level of access granted anywhere.
svntest.actions.run_and_verify_svnauthz(["rw\n"], None,
0, False, "accessof", authz_url,
"--groups-file", groups_url,
"--username", "stafford")
# User groucho (@comedians) access with no path, and no repository should
# be no since it returns the highest level of access granted anywhere.
svntest.actions.run_and_verify_svnauthz(["no\n"], None,
0, False, "accessof", authz_url,
"--groups-file", groups_url,
"--username", "groucho")
# Anonymous access specified on /jokes with the repo comedy will be no.
svntest.actions.run_and_verify_svnauthz(["no\n"], None, 0, False,
"accessof", authz_url,
"--groups-file", groups_url,
"--path", "jokes",
"--repository", "comedy")
# User stafford (@musicians) specified on /jokes with the repo comedy
# will be no.
svntest.actions.run_and_verify_svnauthz(["no\n"], None,
0, False, "accessof", authz_url,
"--groups-file", groups_url,
"--path", "jokes",
"--repository", "comedy",
"--username", "stafford")
# User groucho (@comedians) specified on /jokes with the repo
# comedy will be r.
svntest.actions.run_and_verify_svnauthz(["r\n"], None,
0, False, "accessof", authz_url,
"--groups-file", groups_url,
"--path", "jokes",
"--repository", "comedy",
"--username", "groucho")
def svnauthz_accessof_is_file_test(sbox):
"test 'svnauthz accessof --is' on files"
# build an authz file
(authz_fd, authz_path) = tempfile.mkstemp()
authz_content = "[/]\ngroucho = \ngallagher = rw\n* = r\n" + \
"[/bios]\n* = rw\n" + \
"[comedy:/jokes]\ngroucho = rw\n" + \
"[slapstick:/jokes]\n* =\n"
svntest.main.file_write(authz_path, authz_content)
# Test an invalid --is option, should get an error message and exit code
# of 2.
expected_output = svntest.verify.RegexOutput(
".*'x' is not a valid argument for --is", match_all=False
)
svntest.actions.run_and_verify_svnauthz(None,
expected_output, 2, False,
"accessof", authz_path, "--is", "x")
# Anonymous access with no path, and no repository should be rw
# since it returns the highest level of access granted anywhere.
# So /bios being rw for everyone means this will be rw.
# Test --is rw returns 0.
svntest.actions.run_and_verify_svnauthz(None,
None, 0, False, "accessof",
authz_path, "--is", "rw")
# Test --is r returns 3.
svntest.actions.run_and_verify_svnauthz(None,
None, 3, False, "accessof",
authz_path, "--is", "r")
# Test --is no returns 3.
svntest.actions.run_and_verify_svnauthz(None,
None, 3, False, "accessof",
authz_path, "--is", "no")
# Anonymous access on /jokes should be r, no repo so won't match
# the slapstick:/jokes section.
# Test --is r returns 0.
svntest.actions.run_and_verify_svnauthz(None, None, 0, False, "accessof",
authz_path, "--path", "/jokes",
"--is", "r")
# Test --is rw returns 3.
svntest.actions.run_and_verify_svnauthz(None, None, 3, False, "accessof",
authz_path, "--path", "/jokes",
"--is", "rw")
# Test --is no returns 3.
svntest.actions.run_and_verify_svnauthz(None, None, 3, False, "accessof",
authz_path, "--path", "/jokes",
"--is", "no")
# Anonymous access on /jokes on slapstick repo should be no
# Test --is no returns 0.
svntest.actions.run_and_verify_svnauthz(None, None, 0, False, "accessof",
authz_path, "--path", "/jokes",
"--repository", "slapstick",
"--is", "no")
# Test --is rw returns 3.
svntest.actions.run_and_verify_svnauthz(None, None, 3, False, "accessof",
authz_path, "--path", "/jokes",
"--repository", "slapstick",
"--is", "rw")
# Test --is r returns 3.
svntest.actions.run_and_verify_svnauthz(None, None, 3, False, "accessof",
authz_path, "--path", "/jokes",
"--repository", "slapstick",
"--is", "r")
# User access with no path, and no repository should be rw
# since it returns the h ighest level of access anywhere.
# So /bios being rw for everyone means this will be rw.
# Test --is rw returns 0.
svntest.actions.run_and_verify_svnauthz(None, None,
0, False, "accessof", authz_path,
"--username", "groucho", "--is",
"rw")
# Test --is r returns 3.
svntest.actions.run_and_verify_svnauthz(None, None,
3, False, "accessof", authz_path,
"--username", "groucho", "--is",
"r")
# Test --is no returns 3.
svntest.actions.run_and_verify_svnauthz(None, None,
3, False, "accessof", authz_path,
"--username", "groucho", "--is",
"no")
# User groucho specified on /jokes with no repo, will not match any of the
# repo specific sections, so is r since everyone has read access.
# Test --is r returns 0.
svntest.actions.run_and_verify_svnauthz(None,
None, 0, False, "accessof",
authz_path, "--path", "/jokes",
"--username", "groucho", "--is", "r")
# Test --is rw returns 3.
svntest.actions.run_and_verify_svnauthz(None,
None, 3, False, "accessof",
authz_path, "--path", "/jokes",
"--username", "groucho",
"--is", "rw")
# Test --is no returns 3.
svntest.actions.run_and_verify_svnauthz(None,
None, 3, False, "accessof",
authz_path, "--path", "/jokes",
"--username", "groucho",
"--is", "no")
# User groucho specified on /jokes with the repo comedy will be rw
# Test --is rw returns 0.
svntest.actions.run_and_verify_svnauthz(None, None, 0, False, "accessof",
authz_path, "--path", "/jokes",
"--username", "groucho",
"--repository", "comedy", "--is",
"rw")
# Test --is r returns 3.
svntest.actions.run_and_verify_svnauthz(None, None, 3, False, "accessof",
authz_path, "--path", "/jokes",
"--username", "groucho",
"--repository", "comedy", "--is",
"r")
# Test --is no returns 3.
svntest.actions.run_and_verify_svnauthz(None, None, 3, False, "accessof",
authz_path, "--path", "/jokes",
"--username", "groucho",
"--repository", "comedy", "--is",
"no")
# Add an invalid line to the authz file
svntest.main.file_append(authz_path, "x\n")
# Check that --is returns 1 when the syntax is invalid with a file..
expected_out = svntest.verify.RegexOutput(
".*Error while parsing config file:",
match_all=False
)
svntest.actions.run_and_verify_svnauthz(None, expected_out, 1, False,
"accessof", authz_path, "--path",
"/jokes", "--username", "groucho",
"--repository", "comedy", "--is",
"rw")
os.close(authz_fd)
os.remove(authz_path)
@SkipUnless(svntest.main.is_ra_type_file)
def svnauthz_accessof_is_repo_test(sbox):
"test 'svnauthz accessof --is' on files and urls"
sbox.build()
wc_dir = sbox.wc_dir
repo_url = sbox.repo_url
authz_content = "[/]\ngroucho = \ngallagher = rw\n* = r\n" + \
"[/bios]\n* = rw\n" + \
"[comedy:/jokes]\ngroucho = rw\n" + \
"[slapstick:/jokes]\n* =\n"
# build an authz file and commit it to the repo
authz_path = os.path.join(wc_dir, 'A', 'authz')
svntest.main.file_write(authz_path, authz_content)
svntest.main.run_svn(None, 'add', authz_path)
expected_output = wc.State(wc_dir, {'A/authz' : Item(verb='Adding')})
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.add({
'A/authz' : Item(status=' ', wc_rev=2),
})
svntest.actions.run_and_verify_commit(wc_dir, expected_output,
expected_status)
# Test an invalid --is option, should get an error message and exit code
# of 2.
authz_url = repo_url + "/A/authz"
expected_output = svntest.verify.RegexOutput(
".*'x' is not a valid argument for --is", match_all=False
)
svntest.actions.run_and_verify_svnauthz(None,
expected_output, 2, False,
"accessof", authz_url, "--is", "x")
# Anonymous access with no path, and no repository should be rw
# since it returns the highest level of access granted anywhere.
# So /bios being rw for everyone means this will be rw.
# Test --is rw returns 0.
svntest.actions.run_and_verify_svnauthz(None,
None, 0, False, "accessof",
authz_url, "--is", "rw")
# Test --is r returns 3.
svntest.actions.run_and_verify_svnauthz(None,
None, 3, False, "accessof",
authz_url, "--is", "r")
# Test --is no returns 3.
svntest.actions.run_and_verify_svnauthz(None,
None, 3, False, "accessof",
authz_url, "--is", "no")
# Anonymous access on /jokes should be r, no repo so won't match
# the slapstick:/jokes section.
# Test --is r returns 0.
svntest.actions.run_and_verify_svnauthz(None, None, 0, False, "accessof",
authz_url, "--path", "/jokes",
"--is", "r")
# Test --is rw returns 3.
svntest.actions.run_and_verify_svnauthz(None, None, 3, False, "accessof",
authz_url, "--path", "/jokes",
"--is", "rw")
# Test --is no returns 3.
svntest.actions.run_and_verify_svnauthz(None, None, 3, False, "accessof",
authz_url, "--path", "/jokes",
"--is", "no")
# Anonymous access on /jokes on slapstick repo should be no
# Test --is no returns 0.
svntest.actions.run_and_verify_svnauthz(None, None, 0, False, "accessof",
authz_url, "--path", "/jokes",
"--repository", "slapstick",
"--is", "no")
# Test --is rw returns 3.
svntest.actions.run_and_verify_svnauthz(None, None, 3, False, "accessof",
authz_url, "--path", "/jokes",
"--repository", "slapstick",
"--is", "rw")
# Test --is r returns 3.
svntest.actions.run_and_verify_svnauthz(None, None, 3, False, "accessof",
authz_url, "--path", "/jokes",
"--repository", "slapstick",
"--is", "r")
# User access with no path, and no repository should be rw
# since it returns the h ighest level of access anywhere.
# So /bios being rw for everyone means this will be rw.
# Test --is rw returns 0.
svntest.actions.run_and_verify_svnauthz(None, None,
0, False, "accessof", authz_url,
"--username", "groucho", "--is",
"rw")
# Test --is r returns 3.
svntest.actions.run_and_verify_svnauthz(None, None,
3, False, "accessof", authz_url,
"--username", "groucho", "--is",
"r")
# Test --is no returns 3.
svntest.actions.run_and_verify_svnauthz(None, None,
3, False, "accessof", authz_url,
"--username", "groucho", "--is",
"no")
# User groucho specified on /jokes with no repo, will not match any of the
# repo specific sections, so is r since everyone has read access.
# Test --is r returns 0.
svntest.actions.run_and_verify_svnauthz(None,
None, 0, False, "accessof",
authz_url, "--path", "/jokes",
"--username", "groucho", "--is", "r")
# Test --is rw returns 3.
svntest.actions.run_and_verify_svnauthz(None,
None, 3, False, "accessof",
authz_url, "--path", "/jokes",
"--username", "groucho",
"--is", "rw")
# Test --is no returns 3.
svntest.actions.run_and_verify_svnauthz(None,
None, 3, False, "accessof",
authz_url, "--path", "/jokes",
"--username", "groucho",
"--is", "no")
# User groucho specified on /jokes with the repo comedy will be rw
# Test --is rw returns 0.
svntest.actions.run_and_verify_svnauthz(None, None, 0, False, "accessof",
authz_url, "--path", "/jokes",
"--username", "groucho",
"--repository", "comedy", "--is",
"rw")
# Test --is r returns 3.
svntest.actions.run_and_verify_svnauthz(None, None, 3, False, "accessof",
authz_url, "--path", "/jokes",
"--username", "groucho",
"--repository", "comedy", "--is",
"r")
# Test --is no returns 3.
svntest.actions.run_and_verify_svnauthz(None, None, 3, False, "accessof",
authz_url, "--path", "/jokes",
"--username", "groucho",
"--repository", "comedy", "--is",
"no")
# Add an invalid line to the authz file
svntest.main.file_append(authz_path, "x\n")
expected_output = wc.State(wc_dir, {'A/authz' : Item(verb='Sending')})
expected_status.tweak('A/authz', wc_rev=3)
svntest.actions.run_and_verify_commit(wc_dir, expected_output,
expected_status)
# Check that --is returns 1 when the syntax is invalid with a url.
expected_out = svntest.verify.RegexOutput(
".*Error while parsing config file:",
match_all=False
)
svntest.actions.run_and_verify_svnauthz(None, expected_out, 1, False,
"accessof", authz_url, "--path",
"/jokes", "--username", "groucho",
"--repository", "comedy", "--is",
"rw")
def svnauthz_accessof_txn_test(sbox):
"test 'svnauthz accessof --transaction'"
sbox.build()
wc_dir = sbox.wc_dir
repo_dir = sbox.repo_dir
logfilepath = os.path.join(repo_dir, 'hooks.log')
pre_commit_hook = svntest.main.get_pre_commit_hook_path(repo_dir)
hook_instance = hook_template % (repr(svntest.main.svnauthz_binary),
repr([('accessof',
'--is rw A/authz')]))
svntest.main.create_python_hook_script(pre_commit_hook, hook_instance)
# Create an authz file
authz_content = "[/]\n* = rw\n"
authz_path = os.path.join(wc_dir, 'A/authz')
svntest.main.file_write(authz_path, authz_content)
svntest.main.run_svn(None, 'add', authz_path)
# Only really testing the exit value code paths.
# commit a valid authz file, and run --is rw which is true.
# Should get an exit of 0.
expected_output = wc.State(wc_dir, {'A/authz' : Item(verb='Adding')})
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.add({
'A/authz' : Item(status=' ', wc_rev=2),
})
svntest.actions.run_and_verify_commit(wc_dir, expected_output,
expected_status)
expected_data = ['Exit 0\n']
verify_logfile(logfilepath, expected_data)
# commit a valid authz file, and run --is r which is false
# Should get an exit of 3.
hook_instance = hook_template % (repr(svntest.main.svnauthz_binary),
repr([('accessof',
'--is r A/authz')]))
svntest.main.create_python_hook_script(pre_commit_hook, hook_instance)
expected_output = wc.State(wc_dir, {'A/authz' : Item(verb='Sending')})
expected_status.tweak('A/authz', status=' ', wc_rev=3)
svntest.main.file_append(authz_path, "groucho = r\n")
svntest.actions.run_and_verify_commit(wc_dir, expected_output,
expected_status)
expected_data = svntest.verify.ExpectedOutput('Exit 3\n', match_all=False)
verify_logfile(logfilepath, expected_data)
# break the authz file with a non-existent group and check for an exit 1.
expected_status.tweak('A/authz', status=' ', wc_rev=4)
svntest.main.file_append(authz_path, "@friends = rw\n")
svntest.actions.run_and_verify_commit(wc_dir, expected_output,
expected_status)
expected_data = svntest.verify.ExpectedOutput('Exit 1\n', match_all=False)
verify_logfile(logfilepath, expected_data)
# break the authz file with a non-existent gropu and check for an exit 2.
expected_output = wc.State(wc_dir, {'A/authz' : Item(verb='Deleting')})
expected_status.remove('A/authz')
svntest.main.run_svn(None, 'rm', authz_path)
svntest.actions.run_and_verify_commit(wc_dir, expected_output,
expected_status)
expected_data = svntest.verify.ExpectedOutput('Exit 2\n', match_all=False)
verify_logfile(logfilepath, expected_data)
def svnauthz_compat_mode_file_test(sbox):
"test 'svnauthz-validate' compatibility mode file"
# Create an authz file
(authz_fd, authz_path) = tempfile.mkstemp()
authz_content = "[/]\n* = rw\n"
svntest.main.file_write(authz_path, authz_content)
# Check a valid file.
svntest.actions.run_and_verify_svnauthz(None, None, 0, True,
authz_path)
# Check an invalid file.
svntest.main.file_append(authz_path, "x\n")
svntest.actions.run_and_verify_svnauthz(None, None, 1, True,
authz_path)
# Remove the file.
os.close(authz_fd)
os.remove(authz_path)
# Check a non-existent file.
svntest.actions.run_and_verify_svnauthz(
None, None, 2, True,
authz_path
)
@SkipUnless(svntest.main.is_ra_type_file)
def svnauthz_compat_mode_repo_test(sbox):
"test 'svnauthz-validate' compatibility mode url"
sbox.build()
wc_dir = sbox.wc_dir
repo_url = sbox.repo_url
# Create an authz file
authz_content = "[/]\n* = rw\n"
authz_path = os.path.join(wc_dir, 'A/authz')
svntest.main.file_write(authz_path, authz_content)
authz_url = repo_url + '/A/authz'
# Commit the file and check a URL
svntest.main.run_svn(None, 'add', authz_path)
expected_output = wc.State(wc_dir, {'A/authz' : Item(verb='Adding')})
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.add({
'A/authz' : Item(status=' ', wc_rev=2),
})
svntest.actions.run_and_verify_commit(wc_dir, expected_output,
expected_status)
svntest.actions.run_and_verify_svnauthz(None, None, 0, True,
authz_url)
# Check an invalid url.
svntest.main.file_append(authz_path, "x\n")
expected_output = wc.State(wc_dir, {'A/authz' : Item(verb='Sending')})
expected_status.tweak('A/authz', status=' ', wc_rev=3)
svntest.actions.run_and_verify_commit(wc_dir, expected_output,
expected_status)
svntest.actions.run_and_verify_svnauthz(None, None, 1, True,
authz_path)
# Check a non-existent url.
# Exit code really should be 2 since this is an operational error.
svntest.actions.run_and_verify_svnauthz(
None, None, 2, True,
repo_url + "/zilch"
)
########################################################################
# Run the tests
# list all tests here, starting with None:
test_list = [ None,
svnauthz_validate_file_test,
svnauthz_validate_repo_test,
svnauthz_validate_txn_test,
svnauthz_accessof_file_test,
svnauthz_accessof_repo_test,
svnauthz_accessof_groups_file_test,
svnauthz_accessof_groups_repo_test,
svnauthz_accessof_is_file_test,
svnauthz_accessof_is_repo_test,
svnauthz_accessof_txn_test,
svnauthz_compat_mode_file_test,
svnauthz_compat_mode_repo_test,
]
if __name__ == '__main__':
svntest.main.run_tests(test_list)
# NOTREACHED
### End of file.
| 45.743258
| 133
| 0.544406
| 4,811
| 42,404
| 4.614633
| 0.068385
| 0.061168
| 0.068916
| 0.081077
| 0.857484
| 0.839377
| 0.823161
| 0.817801
| 0.796045
| 0.787937
| 0
| 0.005802
| 0.337515
| 42,404
| 926
| 134
| 45.792657
| 0.784494
| 0.211301
| 0
| 0.76771
| 0
| 0.004942
| 0.158139
| 0.014793
| 0
| 0
| 0
| 0
| 0
| 1
| 0.021417
| false
| 0
| 0.008237
| 0
| 0.031301
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7c1841f21854443c3e1f1b4b792937ef92697218
| 255
|
py
|
Python
|
apps/dbd/models.py
|
TryNeo/sistema-gestion-pedidos
|
d36d13aa6ddfb6d74b543a58531ed7afe8383e9d
|
[
"MIT"
] | null | null | null |
apps/dbd/models.py
|
TryNeo/sistema-gestion-pedidos
|
d36d13aa6ddfb6d74b543a58531ed7afe8383e9d
|
[
"MIT"
] | 5
|
2020-08-12T16:55:36.000Z
|
2021-09-22T19:34:10.000Z
|
apps/dbd/models.py
|
TryNeo/sistema-gestion-pedidos
|
d36d13aa6ddfb6d74b543a58531ed7afe8383e9d
|
[
"MIT"
] | 2
|
2020-11-04T01:27:47.000Z
|
2022-02-24T14:26:26.000Z
|
from django.db import models
from apps.dbd.modelos.estructura_model_catalogo import *
from apps.dbd.modelos.estructura_model_proveedor import *
from apps.dbd.modelos.estructura_model_producto import *
from apps.dbd.modelos.estructura_model_pedido import *
| 51
| 57
| 0.858824
| 37
| 255
| 5.702703
| 0.378378
| 0.151659
| 0.208531
| 0.341232
| 0.7109
| 0.7109
| 0.554502
| 0
| 0
| 0
| 0
| 0
| 0.07451
| 255
| 5
| 58
| 51
| 0.894068
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
7c37182813e5e9306ed02e3db7a167d743e3ad3c
| 18,414
|
py
|
Python
|
tests/garage/tf/models/test_gaussian_gru_model.py
|
fangqyi/garage
|
ddafba385ef005f46f913ab352f9638760e5b412
|
[
"MIT"
] | 1
|
2021-03-02T08:43:20.000Z
|
2021-03-02T08:43:20.000Z
|
tests/garage/tf/models/test_gaussian_gru_model.py
|
fangqyi/garage
|
ddafba385ef005f46f913ab352f9638760e5b412
|
[
"MIT"
] | null | null | null |
tests/garage/tf/models/test_gaussian_gru_model.py
|
fangqyi/garage
|
ddafba385ef005f46f913ab352f9638760e5b412
|
[
"MIT"
] | null | null | null |
import pickle
from unittest import mock
import numpy as np
import pytest
import tensorflow as tf
import tensorflow_probability as tfp
from garage.tf.models import GaussianGRUModel
from tests.fixtures import TfGraphTestCase
from tests.helpers import recurrent_step_gru
class TestGaussianGRUModel(TfGraphTestCase):
def setup_method(self):
super().setup_method()
self.batch_size = 1
self.time_step = 2
self.feature_shape = 2
self.default_initializer = tf.constant_initializer(0.1)
self.obs_inputs = np.full(
(self.batch_size, self.time_step, self.feature_shape), 1.)
self.obs_input = np.full((self.batch_size, self.feature_shape), 1.)
self.input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, None,
self.feature_shape),
name='input')
self.step_input_var = tf.compat.v1.placeholder(
tf.float32, shape=(None, self.feature_shape), name='step_input')
def test_dist(self):
model = GaussianGRUModel(output_dim=1, hidden_dim=1)
step_hidden_var = tf.compat.v1.placeholder(shape=(self.batch_size, 1),
name='step_hidden',
dtype=tf.float32)
model.build(self.input_var, self.step_input_var, step_hidden_var)
assert isinstance(model.networks['default'].dist,
tfp.distributions.MultivariateNormalDiag)
# yapf: disable
@pytest.mark.parametrize('output_dim, hidden_dim', [
(1, 1),
(2, 2),
(3, 3)
])
# yapf: enable
@mock.patch('tensorflow.random.normal')
def test_std_share_network_output_values(self, mock_normal, output_dim,
hidden_dim):
mock_normal.return_value = 0.5
model = GaussianGRUModel(output_dim=output_dim,
hidden_dim=hidden_dim,
std_share_network=True,
hidden_nonlinearity=None,
recurrent_nonlinearity=None,
hidden_w_init=self.default_initializer,
recurrent_w_init=self.default_initializer,
output_w_init=self.default_initializer)
step_hidden_var = tf.compat.v1.placeholder(shape=(self.batch_size,
hidden_dim),
name='step_hidden',
dtype=tf.float32)
(_, step_mean_var, step_log_std_var, step_hidden,
hidden_init_var) = model.build(self.input_var, self.step_input_var,
step_hidden_var)
hidden1 = hidden2 = np.full((self.batch_size, hidden_dim),
hidden_init_var.eval())
for _ in range(self.time_step):
mean1, log_std1, hidden1 = self.sess.run(
[step_mean_var, step_log_std_var, step_hidden],
feed_dict={
self.step_input_var: self.obs_input,
step_hidden_var: hidden1
})
hidden2 = recurrent_step_gru(input_val=self.obs_input,
num_units=hidden_dim,
step_hidden=hidden2,
w_x_init=0.1,
w_h_init=0.1,
b_init=0.,
nonlinearity=None,
gate_nonlinearity=None)
output_nonlinearity = np.full(
(np.prod(hidden2.shape[1:]), output_dim), 0.1)
output2 = np.matmul(hidden2, output_nonlinearity)
assert np.allclose(mean1, output2)
assert np.allclose(log_std1, output2)
assert np.allclose(hidden1, hidden2)
# yapf: disable
@pytest.mark.parametrize('output_dim, hidden_dim', [
(1, 1),
(2, 2),
(3, 3)
])
# yapf: enable
def test_std_share_network_shapes(self, output_dim, hidden_dim):
model = GaussianGRUModel(output_dim=output_dim,
hidden_dim=hidden_dim,
std_share_network=True,
hidden_nonlinearity=None,
recurrent_nonlinearity=None,
hidden_w_init=self.default_initializer,
recurrent_w_init=self.default_initializer,
output_w_init=self.default_initializer)
step_hidden_var = tf.compat.v1.placeholder(shape=(self.batch_size,
hidden_dim),
name='step_hidden',
dtype=tf.float32)
model.build(self.input_var, self.step_input_var, step_hidden_var)
# output layer is a tf.keras.layers.Dense object,
# which cannot be access by tf.compat.v1.variable_scope.
# A workaround is to access in tf.compat.v1.global_variables()
for var in tf.compat.v1.global_variables():
if 'output_layer/kernel' in var.name:
std_share_output_weights = var
if 'output_layer/bias' in var.name:
std_share_output_bias = var
assert std_share_output_weights.shape[1] == output_dim * 2
assert std_share_output_bias.shape == output_dim * 2
# yapf: disable
@pytest.mark.parametrize('output_dim, hidden_dim, init_std', [
(1, 1, 1),
(1, 1, 2),
(1, 2, 1),
(1, 2, 2),
(3, 3, 1),
(3, 3, 2),
])
# yapf: enable
@mock.patch('tensorflow.random.normal')
def test_without_std_share_network_output_values(self, mock_normal,
output_dim, hidden_dim,
init_std):
mock_normal.return_value = 0.5
model = GaussianGRUModel(output_dim=output_dim,
hidden_dim=hidden_dim,
std_share_network=False,
hidden_nonlinearity=None,
recurrent_nonlinearity=None,
hidden_w_init=self.default_initializer,
recurrent_w_init=self.default_initializer,
output_w_init=self.default_initializer,
init_std=init_std)
step_hidden_var = tf.compat.v1.placeholder(shape=(self.batch_size,
hidden_dim),
name='step_hidden',
dtype=tf.float32)
(_, step_mean_var, step_log_std_var, step_hidden,
hidden_init_var) = model.build(self.input_var, self.step_input_var,
step_hidden_var)
hidden1 = hidden2 = np.full((self.batch_size, hidden_dim),
hidden_init_var.eval())
for _ in range(self.time_step):
mean1, log_std1, hidden1 = self.sess.run(
[step_mean_var, step_log_std_var, step_hidden],
feed_dict={
self.step_input_var: self.obs_input,
step_hidden_var: hidden1
})
hidden2 = recurrent_step_gru(input_val=self.obs_input,
num_units=hidden_dim,
step_hidden=hidden2,
w_x_init=0.1,
w_h_init=0.1,
b_init=0.,
nonlinearity=None,
gate_nonlinearity=None)
output_nonlinearity = np.full(
(np.prod(hidden2.shape[1:]), output_dim), 0.1)
output2 = np.matmul(hidden2, output_nonlinearity)
assert np.allclose(mean1, output2)
expected_log_std = np.full((self.batch_size, output_dim),
np.log(init_std))
assert np.allclose(log_std1, expected_log_std)
assert np.allclose(hidden1, hidden2)
# yapf: disable
@pytest.mark.parametrize('output_dim, hidden_dim', [
(1, 1),
(2, 2),
(3, 3)
])
# yapf: enable
def test_without_std_share_network_shapes(self, output_dim, hidden_dim):
model = GaussianGRUModel(output_dim=output_dim,
hidden_dim=hidden_dim,
std_share_network=False,
hidden_nonlinearity=None,
recurrent_nonlinearity=None,
hidden_w_init=self.default_initializer,
recurrent_w_init=self.default_initializer,
output_w_init=self.default_initializer)
step_hidden_var = tf.compat.v1.placeholder(shape=(self.batch_size,
hidden_dim),
name='step_hidden',
dtype=tf.float32)
model.build(self.input_var, self.step_input_var, step_hidden_var)
# output layer is a tf.keras.layers.Dense object,
# which cannot be access by tf.compat.v1.variable_scope.
# A workaround is to access in tf.compat.v1.global_variables()
for var in tf.compat.v1.global_variables():
if 'output_layer/kernel' in var.name:
std_share_output_weights = var
if 'output_layer/bias' in var.name:
std_share_output_bias = var
if 'log_std_param/parameter' in var.name:
log_std_param = var
assert std_share_output_weights.shape[1] == output_dim
assert std_share_output_bias.shape == output_dim
assert log_std_param.shape == output_dim
# yapf: disable
@pytest.mark.parametrize('output_dim, hidden_dim', [
(1, 1),
(2, 2),
(3, 3)
])
# yapf: enable
@mock.patch('tensorflow.random.normal')
def test_std_share_network_is_pickleable(self, mock_normal, output_dim,
hidden_dim):
mock_normal.return_value = 0.5
model = GaussianGRUModel(output_dim=output_dim,
hidden_dim=hidden_dim,
std_share_network=True,
hidden_nonlinearity=None,
recurrent_nonlinearity=None,
hidden_w_init=self.default_initializer,
recurrent_w_init=self.default_initializer,
output_w_init=self.default_initializer)
step_hidden_var = tf.compat.v1.placeholder(shape=(self.batch_size,
hidden_dim),
name='step_hidden',
dtype=tf.float32)
(dist, step_mean_var, step_log_std_var, step_hidden,
_) = model.build(self.input_var, self.step_input_var, step_hidden_var)
# output layer is a tf.keras.layers.Dense object,
# which cannot be access by tf.compat.v1.variable_scope.
# A workaround is to access in tf.compat.v1.global_variables()
for var in tf.compat.v1.global_variables():
if 'output_layer/bias' in var.name:
var.load(tf.ones_like(var).eval())
hidden = np.zeros((self.batch_size, hidden_dim))
outputs1 = self.sess.run([dist.loc, dist.scale.diag],
feed_dict={self.input_var: self.obs_inputs})
output1 = self.sess.run([step_mean_var, step_log_std_var, step_hidden],
feed_dict={
self.step_input_var: self.obs_input,
step_hidden_var: hidden
}) # noqa: E126
h = pickle.dumps(model)
with tf.compat.v1.Session(graph=tf.Graph()) as sess:
model_pickled = pickle.loads(h)
input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, None,
self.feature_shape),
name='input')
step_input_var = tf.compat.v1.placeholder(
tf.float32,
shape=(None, self.feature_shape),
name='step_input')
step_hidden_var = tf.compat.v1.placeholder(shape=(self.batch_size,
hidden_dim),
name='initial_hidden',
dtype=tf.float32)
(dist2, step_mean_var2, step_log_std_var2, step_hidden2,
_) = model_pickled.build(input_var, step_input_var,
step_hidden_var)
outputs2 = sess.run([dist2.loc, dist2.scale.diag],
feed_dict={input_var: self.obs_inputs})
output2 = sess.run(
[step_mean_var2, step_log_std_var2, step_hidden2],
feed_dict={
step_input_var: self.obs_input,
step_hidden_var: hidden
})
assert np.array_equal(outputs1, outputs2)
assert np.array_equal(output1, output2)
# yapf: disable
@pytest.mark.parametrize('output_dim, hidden_dim', [
(1, 1),
(2, 2),
(3, 3)
])
# yapf: enable
@mock.patch('tensorflow.random.normal')
def test_without_std_share_network_is_pickleable(self, mock_normal,
output_dim, hidden_dim):
mock_normal.return_value = 0.5
model = GaussianGRUModel(output_dim=output_dim,
hidden_dim=hidden_dim,
std_share_network=False,
hidden_nonlinearity=None,
recurrent_nonlinearity=None,
hidden_w_init=self.default_initializer,
recurrent_w_init=self.default_initializer,
output_w_init=self.default_initializer)
step_hidden_var = tf.compat.v1.placeholder(shape=(self.batch_size,
hidden_dim),
name='step_hidden',
dtype=tf.float32)
(dist, step_mean_var, step_log_std_var, step_hidden,
_) = model.build(self.input_var, self.step_input_var, step_hidden_var)
# output layer is a tf.keras.layers.Dense object,
# which cannot be access by tf.compat.v1.variable_scope.
# A workaround is to access in tf.compat.v1.global_variables()
for var in tf.compat.v1.global_variables():
if 'output_layer/bias' in var.name:
var.load(tf.ones_like(var).eval())
hidden = np.zeros((self.batch_size, hidden_dim))
outputs1 = self.sess.run([dist.loc, dist.scale.diag],
feed_dict={self.input_var: self.obs_inputs})
output1 = self.sess.run([step_mean_var, step_log_std_var, step_hidden],
feed_dict={
self.step_input_var: self.obs_input,
step_hidden_var: hidden
}) # noqa: E126
h = pickle.dumps(model)
with tf.compat.v1.Session(graph=tf.Graph()) as sess:
model_pickled = pickle.loads(h)
input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, None,
self.feature_shape),
name='input')
step_input_var = tf.compat.v1.placeholder(
tf.float32,
shape=(None, self.feature_shape),
name='step_input')
step_hidden_var = tf.compat.v1.placeholder(shape=(self.batch_size,
hidden_dim),
name='initial_hidden',
dtype=tf.float32)
(dist2, step_mean_var2, step_log_std_var2, step_hidden2,
_) = model_pickled.build(input_var, step_input_var,
step_hidden_var)
outputs2 = sess.run([dist2.loc, dist2.scale.diag],
feed_dict={input_var: self.obs_inputs})
output2 = sess.run(
[step_mean_var2, step_log_std_var2, step_hidden2],
feed_dict={
step_input_var: self.obs_input,
step_hidden_var: hidden
})
assert np.array_equal(outputs1, outputs2)
assert np.array_equal(output1, output2)
| 49.104
| 80
| 0.484414
| 1,812
| 18,414
| 4.618102
| 0.089404
| 0.048996
| 0.034656
| 0.038719
| 0.914316
| 0.902008
| 0.893284
| 0.893284
| 0.884202
| 0.878227
| 0
| 0.02113
| 0.43972
| 18,414
| 374
| 81
| 49.235294
| 0.789958
| 0.045563
| 0
| 0.821086
| 0
| 0
| 0.030511
| 0.006929
| 0
| 0
| 0
| 0
| 0.051118
| 1
| 0.025559
| false
| 0
| 0.028754
| 0
| 0.057508
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7c55492c24d31fc86a040ad8b3219275de539a1b
| 19,175
|
py
|
Python
|
test/test_clean_index_service.py
|
mmalandra-kb4/service-auto-analyzer
|
475e343d918254a33cb8fc953aa75dacab62465b
|
[
"Apache-2.0"
] | 8
|
2020-06-04T10:32:27.000Z
|
2022-02-17T08:11:00.000Z
|
test/test_clean_index_service.py
|
mmalandra-kb4/service-auto-analyzer
|
475e343d918254a33cb8fc953aa75dacab62465b
|
[
"Apache-2.0"
] | 9
|
2019-12-12T11:18:37.000Z
|
2022-02-19T16:17:28.000Z
|
test/test_clean_index_service.py
|
mmalandra-kb4/service-auto-analyzer
|
475e343d918254a33cb8fc953aa75dacab62465b
|
[
"Apache-2.0"
] | 12
|
2020-04-01T15:19:40.000Z
|
2022-03-03T14:41:55.000Z
|
"""
* Copyright 2019 EPAM Systems
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import unittest
from unittest.mock import MagicMock
import json
from http import HTTPStatus
import sure # noqa
import httpretty
import commons.launch_objects as launch_objects
from service.clean_index_service import CleanIndexService
from test.test_service import TestService
from utils import utils
class TestCleanIndexService(TestService):
@utils.ignore_warnings
def test_clean_index(self):
"""Test cleaning index logs"""
tests = [
{
"test_calls": [{"method": httpretty.GET,
"uri": "/2",
"status": HTTPStatus.NOT_FOUND,
},
{"method": httpretty.GET,
"uri": "/2_suggest",
"status": HTTPStatus.NOT_FOUND,
}, ],
"rq": launch_objects.CleanIndex(ids=[1], project=2),
"expected_count": 0
},
{
"test_calls": [{"method": httpretty.GET,
"uri": "/rp_2",
"status": HTTPStatus.NOT_FOUND,
},
{"method": httpretty.GET,
"uri": "/rp_2_suggest",
"status": HTTPStatus.NOT_FOUND,
}, ],
"rq": launch_objects.CleanIndex(ids=[1], project=2),
"app_config": {
"esHost": "http://localhost:9200",
"esUser": "",
"esPassword": "",
"esVerifyCerts": False,
"esUseSsl": False,
"esSslShowWarn": False,
"turnOffSslVerification": True,
"esCAcert": "",
"esClientCert": "",
"esClientKey": "",
"appVersion": "",
"minioRegion": "",
"minioBucketPrefix": "",
"filesystemDefaultPath": "",
"esChunkNumber": 1000,
"binaryStoreType": "minio",
"minioHost": "",
"minioAccessKey": "",
"minioSecretKey": "",
"esProjectIndexPrefix": "rp_"
},
"expected_count": 0
},
{
"test_calls": [{"method": httpretty.GET,
"uri": "/1",
"status": HTTPStatus.OK,
},
{"method": httpretty.GET,
"uri": "/1/_search?scroll=5m&size=1000",
"status": HTTPStatus.OK,
"content_type": "application/json",
"rq": utils.get_fixture(
self.search_not_merged_logs_for_delete),
"rs": utils.get_fixture(
self.one_hit_search_rs),
},
{"method": httpretty.POST,
"uri": "/_bulk?refresh=true",
"status": HTTPStatus.OK,
"content_type": "application/json",
"rs": utils.get_fixture(
self.delete_logs_rs),
},
{"method": httpretty.GET,
"uri": "/1/_search?scroll=5m&size=1000",
"status": HTTPStatus.OK,
"content_type": "application/json",
"rq": utils.get_fixture(self.search_merged_logs),
"rs": utils.get_fixture(
self.one_hit_search_rs),
},
{"method": httpretty.POST,
"uri": "/_bulk?refresh=true",
"status": HTTPStatus.OK,
"content_type": "application/json",
"rs": utils.get_fixture(self.delete_logs_rs),
},
{"method": httpretty.GET,
"uri": "/1/_search?scroll=5m&size=1000",
"status": HTTPStatus.OK,
"content_type": "application/json",
"rq": utils.get_fixture(self.search_not_merged_logs),
"rs": utils.get_fixture(
self.one_hit_search_rs),
},
{"method": httpretty.POST,
"uri": "/_bulk?refresh=true",
"status": HTTPStatus.OK,
"content_type": "application/json",
"rq": utils.get_fixture(self.index_logs_rq),
"rs": utils.get_fixture(self.index_logs_rs),
},
{"method": httpretty.GET,
"uri": "/1_suggest",
"status": HTTPStatus.NOT_FOUND,
}],
"rq": launch_objects.CleanIndex(ids=[1], project=1),
"expected_count": 1
},
{
"test_calls": [{"method": httpretty.GET,
"uri": "/1",
"status": HTTPStatus.OK,
},
{"method": httpretty.GET,
"uri": "/1/_search?scroll=5m&size=1000",
"status": HTTPStatus.OK,
"content_type": "application/json",
"rq": utils.get_fixture(
self.search_not_merged_logs_for_delete),
"rs": utils.get_fixture(
self.one_hit_search_rs),
},
{"method": httpretty.POST,
"uri": "/_bulk?refresh=true",
"status": HTTPStatus.OK,
"content_type": "application/json",
"rs": utils.get_fixture(
self.delete_logs_rs),
},
{"method": httpretty.GET,
"uri": "/1/_search?scroll=5m&size=1000",
"status": HTTPStatus.OK,
"content_type": "application/json",
"rq": utils.get_fixture(self.search_merged_logs),
"rs": utils.get_fixture(
self.one_hit_search_rs),
},
{"method": httpretty.POST,
"uri": "/_bulk?refresh=true",
"status": HTTPStatus.OK,
"content_type": "application/json",
"rs": utils.get_fixture(self.delete_logs_rs),
},
{"method": httpretty.GET,
"uri": "/1/_search?scroll=5m&size=1000",
"status": HTTPStatus.OK,
"content_type": "application/json",
"rq": utils.get_fixture(self.search_not_merged_logs),
"rs": utils.get_fixture(
self.one_hit_search_rs),
},
{"method": httpretty.POST,
"uri": "/_bulk?refresh=true",
"status": HTTPStatus.OK,
"content_type": "application/json",
"rq": utils.get_fixture(self.index_logs_rq),
"rs": utils.get_fixture(self.index_logs_rs),
},
{"method": httpretty.GET,
"uri": "/1_suggest",
"status": HTTPStatus.OK,
},
{"method": httpretty.GET,
"uri": "/1_suggest/_search?scroll=5m&size=1000",
"status": HTTPStatus.OK,
"content_type": "application/json",
"rq": utils.get_fixture(self.search_suggest_info_ids_query),
"rs": utils.get_fixture(
self.one_hit_search_suggest_info_rs),
},
{"method": httpretty.POST,
"uri": "/_bulk?refresh=true",
"status": HTTPStatus.OK,
"content_type": "application/json",
"rq": utils.get_fixture(self.delete_suggest_logs_rq),
"rs": utils.get_fixture(self.delete_logs_rs),
}],
"rq": launch_objects.CleanIndex(ids=[1], project=1),
"expected_count": 1
},
{
"test_calls": [{"method": httpretty.GET,
"uri": "/rp_1",
"status": HTTPStatus.OK,
},
{"method": httpretty.GET,
"uri": "/rp_1/_search?scroll=5m&size=1000",
"status": HTTPStatus.OK,
"content_type": "application/json",
"rq": utils.get_fixture(
self.search_not_merged_logs_for_delete),
"rs": utils.get_fixture(
self.one_hit_search_rs),
},
{"method": httpretty.POST,
"uri": "/_bulk?refresh=true",
"status": HTTPStatus.OK,
"content_type": "application/json",
"rs": utils.get_fixture(
self.delete_logs_rs),
},
{"method": httpretty.GET,
"uri": "/rp_1/_search?scroll=5m&size=1000",
"status": HTTPStatus.OK,
"content_type": "application/json",
"rq": utils.get_fixture(self.search_merged_logs),
"rs": utils.get_fixture(
self.one_hit_search_rs),
},
{"method": httpretty.POST,
"uri": "/_bulk?refresh=true",
"status": HTTPStatus.OK,
"content_type": "application/json",
"rs": utils.get_fixture(self.delete_logs_rs),
},
{"method": httpretty.GET,
"uri": "/rp_1/_search?scroll=5m&size=1000",
"status": HTTPStatus.OK,
"content_type": "application/json",
"rq": utils.get_fixture(self.search_not_merged_logs),
"rs": utils.get_fixture(
self.one_hit_search_rs),
},
{"method": httpretty.POST,
"uri": "/_bulk?refresh=true",
"status": HTTPStatus.OK,
"content_type": "application/json",
"rq": utils.get_fixture(self.index_logs_rq),
"rs": utils.get_fixture(self.index_logs_rs),
},
{"method": httpretty.GET,
"uri": "/rp_1_suggest",
"status": HTTPStatus.OK,
},
{"method": httpretty.GET,
"uri": "/rp_1_suggest/_search?scroll=5m&size=1000",
"status": HTTPStatus.OK,
"content_type": "application/json",
"rq": utils.get_fixture(self.search_suggest_info_ids_query),
"rs": utils.get_fixture(
self.one_hit_search_suggest_info_rs),
},
{"method": httpretty.POST,
"uri": "/_bulk?refresh=true",
"status": HTTPStatus.OK,
"content_type": "application/json",
"rq": utils.get_fixture(
self.delete_suggest_logs_rq_with_prefix),
"rs": utils.get_fixture(self.delete_logs_rs),
}],
"rq": launch_objects.CleanIndex(ids=[1], project=1),
"app_config": {
"esHost": "http://localhost:9200",
"esUser": "",
"esPassword": "",
"esVerifyCerts": False,
"esUseSsl": False,
"esSslShowWarn": False,
"turnOffSslVerification": True,
"esCAcert": "",
"esClientCert": "",
"esClientKey": "",
"appVersion": "",
"minioRegion": "",
"minioBucketPrefix": "",
"filesystemDefaultPath": "",
"esChunkNumber": 1000,
"binaryStoreType": "minio",
"minioHost": "",
"minioAccessKey": "",
"minioSecretKey": "",
"esProjectIndexPrefix": "rp_"
},
"expected_count": 1
}
]
for idx, test in enumerate(tests):
with sure.ensure('Error in the test case number: {0}', idx):
self._start_server(test["test_calls"])
app_config = self.app_config
if "app_config" in test:
app_config = test["app_config"]
_clean_index_service = CleanIndexService(
app_config=app_config,
search_cfg=self.get_default_search_config())
_clean_index_service.es_client.es_client.scroll = MagicMock(
return_value=json.loads(utils.get_fixture(self.no_hits_search_rs)))
_clean_index_service.suggest_info_service.es_client.es_client.scroll = MagicMock(
return_value=json.loads(utils.get_fixture(self.no_hits_search_rs)))
response = _clean_index_service.delete_logs(test["rq"])
test["expected_count"].should.equal(response)
TestCleanIndexService.shutdown_server(test["test_calls"])
if __name__ == '__main__':
unittest.main()
| 57.930514
| 108
| 0.333716
| 1,180
| 19,175
| 5.173729
| 0.151695
| 0.052416
| 0.09828
| 0.124488
| 0.797052
| 0.797052
| 0.79181
| 0.791155
| 0.790663
| 0.753481
| 0
| 0.014333
| 0.57794
| 19,175
| 330
| 109
| 58.106061
| 0.740022
| 0.031395
| 0
| 0.735974
| 0
| 0
| 0.141325
| 0.023922
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0033
| false
| 0.006601
| 0.033003
| 0
| 0.039604
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7c5b1f6076b948b8a2d6e06ccc3a896a0cc921d7
| 8,153
|
py
|
Python
|
viadot/tasks/salesforce.py
|
winiar93/viadot
|
7d8f8b0a30de3d40da161615639532012de072b0
|
[
"MIT"
] | null | null | null |
viadot/tasks/salesforce.py
|
winiar93/viadot
|
7d8f8b0a30de3d40da161615639532012de072b0
|
[
"MIT"
] | null | null | null |
viadot/tasks/salesforce.py
|
winiar93/viadot
|
7d8f8b0a30de3d40da161615639532012de072b0
|
[
"MIT"
] | null | null | null |
import json
from datetime import timedelta
import pandas as pd
from prefect import Task
from prefect.tasks.secrets import PrefectSecret
from prefect.utilities.tasks import defaults_from_attrs
from ..sources import Salesforce
from .azure_key_vault import AzureKeyVaultSecret
def get_credentials(credentials_secret: str, vault_name: str = None):
"""
Get Salesforce credentials from Azure Key Vault.
Args:
credentials_secret (str): The name of the Azure Key Vault secret containing a dictionary with
the required credentials (eg. username, password, token). Defaults to None.
vault_name (str, optional): The name of the vault from which to obtain the secret. Defaults to None.
Returns: Credentials
"""
if not credentials_secret:
# attempt to read a default for the service principal secret name
try:
credentials_secret = PrefectSecret("SALESFORCE_DEFAULT_SECRET").run()
except ValueError:
pass
if credentials_secret:
azure_secret_task = AzureKeyVaultSecret()
credentials_str = azure_secret_task.run(
secret=credentials_secret, vault_name=vault_name
)
credentials = json.loads(credentials_str)
return credentials
class SalesforceUpsert(Task):
"""
Task for upserting a pandas DataFrame to Salesforce.
Args:
"""
def __init__(
self,
table: str = None,
external_id: str = None,
domain: str = "test",
client_id: str = "viadot",
env: str = "DEV",
raise_on_error: bool = False,
max_retries: int = 3,
retry_delay: timedelta = timedelta(seconds=10),
*args,
**kwargs,
):
self.table = table
self.external_id = external_id
self.domain = domain
self.client_id = client_id
self.env = env
self.raise_on_error = raise_on_error
super().__init__(
name="salesforce_upsert",
max_retries=max_retries,
retry_delay=retry_delay,
*args,
**kwargs,
)
def __call__(self, *args, **kwargs):
"""Upserting data to Salesforce"""
return super().__call__(*args, **kwargs)
@defaults_from_attrs(
"table",
"external_id",
"domain",
"client_id",
"env",
"raise_on_error",
"max_retries",
"retry_delay",
)
def run(
self,
df: pd.DataFrame = None,
table: str = None,
external_id: str = None,
domain: str = None,
client_id: str = None,
credentials_secret: str = None,
vault_name: str = None,
env: str = None,
raise_on_error: bool = None,
max_retries: int = None,
retry_delay: timedelta = None,
) -> None:
"""Task run method.
Args:
df (pd.DataFrame, optional): The DataFrame to upsert. Defaults to None.
table (str, optional): The table where the data should be upserted. Defaults to None.
external_id (str, optional): The external ID to use for the upsert. Defaults to None.
domain (str, optional): Domain of a connection; defaults to 'test' (sandbox).
Can only be added if built-in username/password/security token is provided. Defaults to None.
client_id (str, optional): Client id to keep the track of API calls. Defaults to None.
credentials_secret (str, optional): The name of the Azure Key Vault secret containing a dictionary with
the required credentials (eg. username, password, token). Defaults to None.
vault_name (str, optional): The name of the vault from which to obtain the secret. Defaults to None.
env (str, optional): Environment information, provides information about credential and connection configuration. Defaults to 'DEV'.
raise_on_error (bool, optional): Whether to raise an exception if a row upsert fails.
If False, we only display a warning. Defaults to False.
"""
credentials = get_credentials(credentials_secret, vault_name=vault_name)
salesforce = Salesforce(
credentials=credentials,
env=env,
domain=domain,
client_id=client_id,
)
self.logger.info(f"Upserting {df.shape[0]} rows to Salesforce...")
salesforce.upsert(
df=df, table=table, external_id=external_id, raise_on_error=raise_on_error
)
self.logger.info(f"Successfully upserted {df.shape[0]} rows to Salesforce.")
class SalesforceBulkUpsert(Task):
"""
Task for upserting a pandas DataFrame to Salesforce.
Args:
"""
def __init__(
self,
table: str = None,
external_id: str = None,
domain: str = "test",
client_id: str = "viadot",
env: str = "DEV",
raise_on_error: bool = False,
max_retries: int = 3,
retry_delay: timedelta = timedelta(seconds=10),
*args,
**kwargs,
):
self.table = table
self.external_id = external_id
self.domain = domain
self.client_id = client_id
self.env = env
self.raise_on_error = raise_on_error
super().__init__(
name="salesforce_bulk_upsert",
max_retries=max_retries,
retry_delay=retry_delay,
*args,
**kwargs,
)
def __call__(self, *args, **kwargs):
"""Upserting data to Salesforce"""
return super().__call__(*args, **kwargs)
@defaults_from_attrs(
"table",
"external_id",
"domain",
"client_id",
"env",
"raise_on_error",
"max_retries",
"retry_delay",
)
def run(
self,
df: pd.DataFrame = None,
table: str = None,
external_id: str = None,
batch_size: int = None,
domain: str = None,
client_id: str = None,
credentials_secret: str = None,
vault_name: str = None,
env: str = None,
raise_on_error: bool = None,
max_retries: int = None,
retry_delay: timedelta = None,
) -> None:
"""Task run method.
Args:
df (pd.DataFrame, optional): The DataFrame to upsert. Defaults to None.
table (str, optional): The table where the data should be upserted. Defaults to None.
external_id (str, optional): The external ID to use for the upsert. Defaults to None.
domain (str, optional): Domain of a connection; defaults to 'test' (sandbox).
Can only be added if built-in username/password/security token is provided. Defaults to None.
client_id (str, optional): Client id to keep the track of API calls. Defaults to None.
credentials_secret (str, optional): The name of the Azure Key Vault secret containing a dictionary with
the required credentials (eg. username, password, token). Defaults to None.
vault_name (str, optional): The name of the vault from which to obtain the secret. Defaults to None.
env (str, optional): Environment information, provides information about credential and connection configuration. Defaults to 'DEV'.
raise_on_error (bool, optional): Whether to raise an exception if a row upsert fails.
If False, we only display a warning. Defaults to False.
"""
credentials = get_credentials(credentials_secret, vault_name=vault_name)
salesforce = Salesforce(
credentials=credentials,
env=env,
domain=domain,
client_id=client_id,
)
self.logger.info(f"Upserting {df.shape[0]} rows to Salesforce...")
salesforce.bulk_upsert(
df=df,
table=table,
external_id=external_id,
batch_size=batch_size,
raise_on_error=raise_on_error,
)
self.logger.info(f"Successfully upserted {df.shape[0]} rows to Salesforce.")
| 35.447826
| 144
| 0.609101
| 960
| 8,153
| 5
| 0.146875
| 0.045833
| 0.046667
| 0.015
| 0.843958
| 0.843958
| 0.836667
| 0.836667
| 0.836667
| 0.82
| 0
| 0.001769
| 0.306758
| 8,153
| 229
| 145
| 35.60262
| 0.847488
| 0.354839
| 0
| 0.730769
| 0
| 0
| 0.087045
| 0.009514
| 0
| 0
| 0
| 0
| 0
| 1
| 0.044872
| false
| 0.00641
| 0.051282
| 0
| 0.128205
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7c5fa4466e9bf86b1f180a89c2b3ed8d2aaa9f4c
| 1,851
|
py
|
Python
|
Twitter_Image_Vision/googleVideo.py
|
antpas/EC500C1
|
398550baeef7aa5a3fddd5d6e4c67e948e3b5dc6
|
[
"MIT"
] | null | null | null |
Twitter_Image_Vision/googleVideo.py
|
antpas/EC500C1
|
398550baeef7aa5a3fddd5d6e4c67e948e3b5dc6
|
[
"MIT"
] | null | null | null |
Twitter_Image_Vision/googleVideo.py
|
antpas/EC500C1
|
398550baeef7aa5a3fddd5d6e4c67e948e3b5dc6
|
[
"MIT"
] | 1
|
2018-04-17T20:01:29.000Z
|
2018-04-17T20:01:29.000Z
|
from google.cloud import videointelligence
import os
import io
os.environ["GOOGLE_APPLICATION_CREDENTIALS"]="Path\to\json\key\apikey.json"
def get_segment_labels():
path = os.path.join(os.path.dirname(__file__), 'video.avi')
if not os.path.exists(path):
raise ValueError('No images available')
video_client = videointelligence.VideoIntelligenceServiceClient()
features = [videointelligence.enums.Feature.LABEL_DETECTION]
with io.open(path, 'rb') as movie:
input_content = movie.read()
operation = video_client.annotate_video(features=features, input_content=input_content)
result = operation.result(timeout=90)
serial = MessageToJson(result)
output = json.loads(serial)
return output['annotationResults'][0]
def get_shot_labels():
path = os.path.join(os.path.dirname(__file__), 'video.avi')
if not os.path.exists(path):
raise ValueError('No images available')
video_client = videointelligence.VideoIntelligenceServiceClient()
features = [videointelligence.enums.Feature.LABEL_DETECTION]
with io.open(path, 'rb') as movie:
input_content = movie.read()
operation = video_client.annotate_video(features=features, input_content=input_content)
result = operation.result(timeout=90)
return result
def get_frame_labels():
path = os.path.join(os.path.dirname(__file__), 'video.avi')
if not os.path.exists(path):
raise ValueError('No images available')
video_client = videointelligence.VideoIntelligenceServiceClient()
features = [videointelligence.enums.Feature.LABEL_DETECTION]
with io.open(path, 'rb') as movie:
input_content = movie.read()
operation = video_client.annotate_video(features=features, input_content=input_content)
result = operation.result(timeout=90)
return result
| 28.921875
| 91
| 0.729876
| 221
| 1,851
| 5.927602
| 0.276018
| 0.041221
| 0.027481
| 0.036641
| 0.829008
| 0.829008
| 0.829008
| 0.829008
| 0.829008
| 0.829008
| 0
| 0.00451
| 0.161534
| 1,851
| 63
| 92
| 29.380952
| 0.839562
| 0
| 0
| 0.74359
| 0
| 0
| 0.089189
| 0.031351
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.076923
| 0
| 0.230769
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7c6e82920671ba7d9981f5c5f036c12de67a76cf
| 1,965
|
py
|
Python
|
dataclasses/resources/test/i3double_compare.py
|
hschwane/offline_production
|
e14a6493782f613b8bbe64217559765d5213dc1e
|
[
"MIT"
] | 1
|
2020-12-24T22:00:01.000Z
|
2020-12-24T22:00:01.000Z
|
dataclasses/resources/test/i3double_compare.py
|
hschwane/offline_production
|
e14a6493782f613b8bbe64217559765d5213dc1e
|
[
"MIT"
] | null | null | null |
dataclasses/resources/test/i3double_compare.py
|
hschwane/offline_production
|
e14a6493782f613b8bbe64217559765d5213dc1e
|
[
"MIT"
] | 3
|
2020-07-17T09:20:29.000Z
|
2021-03-30T16:44:18.000Z
|
#!/usr/bin/env python
from icecube import dataclasses
from icecube.icetray.I3Test import *
ENSURE( dataclasses.I3Double(12.0) == dataclasses.I3Double(12.0) , "I3Double == I3Double failed" )
ENSURE( dataclasses.I3Double(12.0) != dataclasses.I3Double(13) , "I3Double != I3Double failed" )
ENSURE( dataclasses.I3Double(12.0) < dataclasses.I3Double(13) , "I3Double < I3Double failed" )
ENSURE( dataclasses.I3Double(12.0) <= dataclasses.I3Double(13) , "I3Double <= I3Double failed" )
ENSURE( dataclasses.I3Double(14.0) > dataclasses.I3Double(13) , "I3Double > I3Double failed" )
ENSURE( dataclasses.I3Double(14.0) >= dataclasses.I3Double(13) , "I3Double >= I3Double failed" )
ENSURE( dataclasses.I3Double(12.0) == 12 , "I3Double == int failed" )
ENSURE( dataclasses.I3Double(12.0) != 13 , "I3Double != int failed" )
ENSURE( dataclasses.I3Double(12.0) < 13 , "I3Double < int failed" )
ENSURE( dataclasses.I3Double(12.0) <= 13 , "I3Double <= int failed" )
ENSURE( dataclasses.I3Double(14.0) > 13 , "I3Double > int failed" )
ENSURE( dataclasses.I3Double(14.0) >= 13 , "I3Double >= int failed" )
ENSURE( dataclasses.I3Double(12.0) == 12. , "I3Double == float failed" )
ENSURE( dataclasses.I3Double(12.0) != 13. , "I3Double != float failed" )
ENSURE( dataclasses.I3Double(12.0) < 13. , "I3Double < float failed" )
ENSURE( dataclasses.I3Double(12.0) <= 13. , "I3Double <= float failed" )
ENSURE( dataclasses.I3Double(14.0) > 13. , "I3Double > float failed" )
ENSURE( dataclasses.I3Double(14.0) >= 13. , "I3Double >= float failed" )
ENSURE( dataclasses.I3Double(0.0) == False , "I3Double == False failed" )
ENSURE( dataclasses.I3Double(0.0) != True , "I3Double != True failed" )
ENSURE( dataclasses.I3Double(1.0) == True , "I3Double == True failed" )
ENSURE( dataclasses.I3Double(1.0) != False , "I3Double != False failed" )
ENSURE(bool(dataclasses.I3Double(0.0))==False, "bool(0) should be false")
ENSURE(bool(dataclasses.I3Double(1.0))==True, "bool(0) should be false")
| 59.545455
| 98
| 0.703817
| 248
| 1,965
| 5.576613
| 0.100806
| 0.412148
| 0.397686
| 0.470716
| 0.934924
| 0.872017
| 0.812726
| 0.778742
| 0.77585
| 0.77585
| 0
| 0.095349
| 0.124682
| 1,965
| 32
| 99
| 61.40625
| 0.708721
| 0.010178
| 0
| 0
| 0
| 0
| 0.294239
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.076923
| 0
| 0.076923
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
7cb001f5e9b345ee9fe0f7ad36ce509fefdff6cd
| 106
|
py
|
Python
|
src/transactiondata.py
|
little-quokka/py-quokka-block
|
d6593087c1f027af80c8968ac113c1ccb2cf7f55
|
[
"MIT"
] | null | null | null |
src/transactiondata.py
|
little-quokka/py-quokka-block
|
d6593087c1f027af80c8968ac113c1ccb2cf7f55
|
[
"MIT"
] | 8
|
2018-01-03T01:27:06.000Z
|
2018-01-03T01:32:33.000Z
|
src/transactiondata.py
|
little-quokka/py-quokka-block
|
d6593087c1f027af80c8968ac113c1ccb2cf7f55
|
[
"MIT"
] | null | null | null |
from abstractdatapackage import AbstractDataPackage
class TransactionData(AbstractDataPackage):
pass
| 21.2
| 51
| 0.858491
| 8
| 106
| 11.375
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.113208
| 106
| 5
| 52
| 21.2
| 0.968085
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 7
|
7cb0bc94bb47e52130b2524ad5bf8807cbee1d8b
| 53,564
|
py
|
Python
|
onlinelinguisticdatabase/tests/functional/test_users.py
|
dativebase/old
|
bcfb856e8a8cccddcab4eaedc1aa107697ceee2f
|
[
"Apache-2.0"
] | 2
|
2019-11-19T22:39:04.000Z
|
2019-11-20T16:13:43.000Z
|
onlinelinguisticdatabase/tests/functional/test_users.py
|
jrwdunham/old
|
bcfb856e8a8cccddcab4eaedc1aa107697ceee2f
|
[
"Apache-2.0"
] | 51
|
2015-02-11T21:42:48.000Z
|
2016-08-16T18:52:44.000Z
|
onlinelinguisticdatabase/tests/functional/test_users.py
|
jrwdunham/old
|
bcfb856e8a8cccddcab4eaedc1aa107697ceee2f
|
[
"Apache-2.0"
] | 1
|
2019-11-19T22:39:07.000Z
|
2019-11-19T22:39:07.000Z
|
# Copyright 2016 Joel Dunham
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import logging
import os
import simplejson as json
from time import sleep
from nose.tools import nottest
from onlinelinguisticdatabase.tests import TestController, url
import onlinelinguisticdatabase.model as model
from onlinelinguisticdatabase.model.meta import Session
import onlinelinguisticdatabase.lib.helpers as h
from onlinelinguisticdatabase.model import User
log = logging.getLogger(__name__)
class TestUsersController(TestController):
# Clear all models in the database except Language; recreate the users.
def tearDown(self):
TestController.tearDown(self, dirs_to_destroy=['user'])
@nottest
def test_index(self):
"""Tests that GET /users returns an array of all users and that order_by and pagination parameters work correctly."""
# Add 100 users.
def create_user_from_index(index):
user = model.User()
user.username = u'user_%d' % index
user.password = u'Aaaaaa_%d' % index
user.first_name = u'John%d' % index
user.last_name = u'Doe'
user.email = u'john.doe@gmail.com'
user.role = u'viewer'
return user
users = [create_user_from_index(i) for i in range(1, 101)]
Session.add_all(users)
Session.commit()
users = h.get_users(True)
users_count = len(users)
# Test that GET /users gives us all of the users.
response = self.app.get(url('users'), headers=self.json_headers,
extra_environ=self.extra_environ_view)
resp = json.loads(response.body)
assert len(resp) == users_count
assert resp[3]['first_name'] == u'John1'
assert resp[0]['id'] == users[0].id
assert 'password' not in resp[3]
assert 'username' not in resp[3]
assert response.content_type == 'application/json'
# Test the paginator GET params.
paginator = {'items_per_page': 23, 'page': 3}
response = self.app.get(url('users'), paginator, headers=self.json_headers,
extra_environ=self.extra_environ_view)
resp = json.loads(response.body)
assert len(resp['items']) == 23
assert resp['items'][0]['first_name'] == users[46].first_name
assert response.content_type == 'application/json'
# Test the order_by GET params.
order_by_params = {'order_by_model': 'User', 'order_by_attribute': 'username',
'order_by_direction': 'desc'}
response = self.app.get(url('users'), order_by_params,
headers=self.json_headers, extra_environ=self.extra_environ_view)
resp = json.loads(response.body)
result_set = sorted(users, key=lambda u: u.username, reverse=True)
assert [u.id for u in result_set] == [u['id'] for u in resp]
assert response.content_type == 'application/json'
# Test the order_by *with* paginator.
params = {'order_by_model': 'User', 'order_by_attribute': 'username',
'order_by_direction': 'desc', 'items_per_page': 23, 'page': 3}
response = self.app.get(url('users'), params,
headers=self.json_headers, extra_environ=self.extra_environ_view)
resp = json.loads(response.body)
assert result_set[46].first_name == resp['items'][0]['first_name']
# Expect a 400 error when the order_by_direction param is invalid
order_by_params = {'order_by_model': 'User', 'order_by_attribute': 'username',
'order_by_direction': 'descending'}
response = self.app.get(url('users'), order_by_params, status=400,
headers=self.json_headers, extra_environ=self.extra_environ_view)
resp = json.loads(response.body)
assert resp['errors']['order_by_direction'] == u"Value must be one of: asc; desc (not u'descending')"
assert response.content_type == 'application/json'
# Expect the default BY id ASCENDING ordering when the order_by_model/Attribute
# param is invalid.
order_by_params = {'order_by_model': 'Userist', 'order_by_attribute': 'nominal',
'order_by_direction': 'desc'}
response = self.app.get(url('users'), order_by_params,
headers=self.json_headers, extra_environ=self.extra_environ_view)
resp = json.loads(response.body)
assert resp[0]['id'] == users[0].id
# Expect a 400 error when the paginator GET params are empty
# or are integers less than 1
paginator = {'items_per_page': u'a', 'page': u''}
response = self.app.get(url('users'), paginator, headers=self.json_headers,
extra_environ=self.extra_environ_view, status=400)
resp = json.loads(response.body)
assert resp['errors']['items_per_page'] == u'Please enter an integer value'
assert resp['errors']['page'] == u'Please enter a value'
assert response.content_type == 'application/json'
paginator = {'items_per_page': 0, 'page': -1}
response = self.app.get(url('users'), paginator, headers=self.json_headers,
extra_environ=self.extra_environ_view, status=400)
resp = json.loads(response.body)
assert resp['errors']['items_per_page'] == u'Please enter a number that is 1 or greater'
assert resp['errors']['page'] == u'Please enter a number that is 1 or greater'
assert response.content_type == 'application/json'
@nottest
def test_create(self):
"""Tests that POST /users creates a new user
or returns an appropriate error if the input is invalid.
"""
# Attempt to create a user as a contributor and expect to fail
params = self.user_create_params.copy()
params.update({
'username': u'johndoe',
'password': u'Aaaaaa_1',
'password_confirm': u'Aaaaaa_1',
'first_name': u'John',
'last_name': u'Doe',
'email': u'john.doe@gmail.com',
'role': u'viewer'
})
params = json.dumps(params)
response = self.app.post(url('users'), params, self.json_headers,
self.extra_environ_contrib, status=403)
resp = json.loads(response.body)
assert resp['error'] == u'You are not authorized to access this resource.'
assert response.content_type == 'application/json'
# Create a valid one
original_researchers_directory = os.listdir(self.users_path)
original_user_count = Session.query(User).count()
params = self.user_create_params.copy()
params.update({
'username': u'johndoe',
'password': u'Aaaaaa_1',
'password_confirm': u'Aaaaaa_1',
'first_name': u'John',
'last_name': u'Doe',
'email': u'john.doe@gmail.com',
'role': u'viewer'
})
params = json.dumps(params)
response = self.app.post(url('users'), params, self.json_headers, self.extra_environ_admin)
resp = json.loads(response.body)
new_user_count = Session.query(User).count()
new_researchers_directory = os.listdir(self.users_path)
researchers_directory_m_time = os.stat(self.users_path).st_mtime
assert new_user_count == original_user_count + 1
assert resp['username'] == u'johndoe'
assert resp['email'] == u'john.doe@gmail.com'
assert 'password' not in resp
assert new_researchers_directory != original_researchers_directory
assert u'johndoe' in new_researchers_directory
assert response.content_type == 'application/json'
# Invalid because username is not unique
params = self.user_create_params.copy()
params.update({
'username': u'johndoe',
'password': u'Zzzzzz_1',
'password_confirm': u'Zzzzzz_1',
'first_name': u'Johannes',
'last_name': u'Dough',
'email': u'johannes.dough@gmail.com',
'role': u'viewer'
})
params = json.dumps(params)
response = self.app.post(url('users'), params, self.json_headers,
self.extra_environ_admin, status=400)
resp = json.loads(response.body)
user_count = new_user_count
new_user_count = Session.query(User).count()
researchers_directory = new_researchers_directory
new_researchers_directory = os.listdir(self.users_path)
new_researchers_directory_m_time = os.stat(self.users_path).st_mtime
assert researchers_directory == new_researchers_directory
assert researchers_directory_m_time == new_researchers_directory_m_time
assert new_user_count == user_count
assert resp['errors'] == u'The username johndoe is already taken.'
assert response.content_type == 'application/json'
# Invalid because username contains illicit characters
params = self.user_create_params.copy()
params.update({
'username': u'johannes dough',
'password': u'Zzzzzz_1',
'password_confirm': u'Zzzzzz_1',
'first_name': u'Johannes',
'last_name': u'Dough',
'email': u'johannes.dough@gmail.com',
'role': u'viewer'
})
params = json.dumps(params)
response = self.app.post(url('users'), params, self.json_headers,
self.extra_environ_admin, status=400)
resp = json.loads(response.body)
user_count = new_user_count
new_user_count = Session.query(User).count()
assert new_user_count == user_count
assert resp['errors'] == u'The username johannes dough is invalid; only letters of the English alphabet, numbers and the underscore are permitted.'
assert response.content_type == 'application/json'
# Invalid because username must be a non-empty string
params = self.user_create_params.copy()
params.update({
'username': u'',
'password': u'Zzzzzz_1',
'password_confirm': u'Zzzzzz_1',
'first_name': u'Johannes',
'last_name': u'Dough',
'email': u'johannes.dough@gmail.com',
'role': u'viewer'
})
params = json.dumps(params)
response = self.app.post(url('users'), params, self.json_headers,
self.extra_environ_admin, status=400)
resp = json.loads(response.body)
user_count = new_user_count
new_user_count = Session.query(User).count()
assert new_user_count == user_count
assert resp['errors'] == u'A username is required when creating a new user.'
assert response.content_type == 'application/json'
params = self.user_create_params.copy()
params.update({
'username': None,
'password': u'Zzzzzz_1',
'password_confirm': u'Zzzzzz_1',
'first_name': u'Johannes',
'last_name': u'Dough',
'email': u'johannes.dough@gmail.com',
'role': u'viewer'
})
params = json.dumps(params)
response = self.app.post(url('users'), params, self.json_headers,
self.extra_environ_admin, status=400)
resp = json.loads(response.body)
user_count = new_user_count
new_user_count = Session.query(User).count()
assert new_user_count == user_count
assert resp['errors'] == u'A username is required when creating a new user.'
assert response.content_type == 'application/json'
# Invalid because username and password are both too long. Notice how the space in the
# username does not raise an error because the chained validators are not
# called
params = self.user_create_params.copy()
params.update({
'username': u'johannes dough' * 200,
'password': u'Zzzzzz_1' * 200,
'password_confirm': u'Zzzzzz_1' * 200,
'first_name': u'Johannes',
'last_name': u'Dough',
'email': u'johannes.dough@gmail.com',
'role': u'viewer'
})
params = json.dumps(params)
response = self.app.post(url('users'), params, self.json_headers,
self.extra_environ_admin, status=400)
resp = json.loads(response.body)
user_count = new_user_count
new_user_count = Session.query(User).count()
assert new_user_count == user_count
assert resp['errors']['username'] == u'Enter a value not more than 255 characters long'
assert resp['errors']['password'] == u'Enter a value not more than 255 characters long'
assert response.content_type == 'application/json'
# Invalid because password and password_confirm do not match.
params = self.user_create_params.copy()
params.update({
'username': u'johndoe',
'password': u'Zzzzzz_1',
'password_confirm': u'Zzzzzzx_1',
'first_name': u'Johannes',
'last_name': u'Dough',
'email': u'johannes.dough@gmail.com',
'role': u'viewer'
})
params = json.dumps(params)
response = self.app.post(url('users'), params, self.json_headers,
self.extra_environ_admin, status=400)
resp = json.loads(response.body)
user_count = new_user_count
new_user_count = Session.query(User).count()
assert new_user_count == user_count
assert resp['errors'] == u'The password and password_confirm values do not match.'
assert response.content_type == 'application/json'
# Invalid because no password was provided.
params = self.user_create_params.copy()
params.update({
'username': u'johndoe',
'password': u'',
'password_confirm': u'',
'first_name': u'Johannes',
'last_name': u'Dough',
'email': u'johannes.dough@gmail.com',
'role': u'viewer'
})
params = json.dumps(params)
response = self.app.post(url('users'), params, self.json_headers,
self.extra_environ_admin, status=400)
resp = json.loads(response.body)
user_count = new_user_count
new_user_count = Session.query(User).count()
assert new_user_count == user_count
assert resp['errors'] == u'A password is required when creating a new user.'
assert response.content_type == 'application/json'
# Invalid because no password was provided.
params = self.user_create_params.copy()
params.update({
'username': u'johndoe',
'password': [],
'password_confirm': [],
'first_name': u'Johannes',
'last_name': u'Dough',
'email': u'johannes.dough@gmail.com',
'role': u'viewer'
})
params = json.dumps(params)
response = self.app.post(url('users'), params, self.json_headers,
self.extra_environ_admin, status=400)
resp = json.loads(response.body)
user_count = new_user_count
new_user_count = Session.query(User).count()
assert new_user_count == user_count
assert resp['errors'] == u'A password is required when creating a new user.'
assert response.content_type == 'application/json'
# Invalid because the password is too short
params = self.user_create_params.copy()
params.update({
'username': u'johndoe',
'password': u'aA_9',
'password_confirm': u'aA_9',
'first_name': u'Johannes',
'last_name': u'Dough',
'email': u'johannes.dough@gmail.com',
'role': u'viewer'
})
params = json.dumps(params)
response = self.app.post(url('users'), params, self.json_headers,
self.extra_environ_admin, status=400)
resp = json.loads(response.body)
user_count = new_user_count
new_user_count = Session.query(User).count()
assert new_user_count == user_count
assert resp['errors'] == u' '.join([
u'The submitted password is invalid; valid passwords contain at least 8 characters',
u'and either contain at least one character that is not in the printable ASCII range',
u'or else contain at least one symbol, one digit, one uppercase letter and one lowercase letter.'])
assert response.content_type == 'application/json'
# Invalid because the password does not contain an uppercase printable ASCII character
params = self.user_create_params.copy()
params.update({
'username': u'johndoe',
'password': u'abcdefg_9',
'password_confirm': u'abcdefg_9',
'first_name': u'Johannes',
'last_name': u'Dough',
'email': u'johannes.dough@gmail.com',
'role': u'viewer'
})
params = json.dumps(params)
response = self.app.post(url('users'), params, self.json_headers,
self.extra_environ_admin, status=400)
resp = json.loads(response.body)
user_count = new_user_count
new_user_count = Session.query(User).count()
assert new_user_count == user_count
assert resp['errors'] == u' '.join([
u'The submitted password is invalid; valid passwords contain at least 8 characters',
u'and either contain at least one character that is not in the printable ASCII range',
u'or else contain at least one symbol, one digit, one uppercase letter and one lowercase letter.'])
# Invalid because the password does not contain a lowercase printable ASCII character
params = self.user_create_params.copy()
params.update({
'username': u'johndoe',
'password': u'ABCDEFG_9',
'password_confirm': u'ABCDEFG_9',
'first_name': u'Johannes',
'last_name': u'Dough',
'email': u'johannes.dough@gmail.com',
'role': u'viewer'
})
params = json.dumps(params)
response = self.app.post(url('users'), params, self.json_headers,
self.extra_environ_admin, status=400)
resp = json.loads(response.body)
user_count = new_user_count
new_user_count = Session.query(User).count()
assert new_user_count == user_count
assert resp['errors'] == u' '.join([
u'The submitted password is invalid; valid passwords contain at least 8 characters',
u'and either contain at least one character that is not in the printable ASCII range',
u'or else contain at least one symbol, one digit, one uppercase letter and one lowercase letter.'])
# Invalid because the password does not contain a symbol from the printable ASCII character range
params = self.user_create_params.copy()
params.update({
'username': u'johndoe',
'password': u'abcdefgH9',
'password_confirm': u'abcdefgH9',
'first_name': u'Johannes',
'last_name': u'Dough',
'email': u'johannes.dough@gmail.com',
'role': u'viewer'
})
params = json.dumps(params)
response = self.app.post(url('users'), params, self.json_headers,
self.extra_environ_admin, status=400)
resp = json.loads(response.body)
user_count = new_user_count
new_user_count = Session.query(User).count()
assert new_user_count == user_count
assert resp['errors'] == u' '.join([
u'The submitted password is invalid; valid passwords contain at least 8 characters',
u'and either contain at least one character that is not in the printable ASCII range',
u'or else contain at least one symbol, one digit, one uppercase letter and one lowercase letter.'])
assert response.content_type == 'application/json'
# Invalid because the password does not contain a digit
params = self.user_create_params.copy()
params.update({
'username': u'johndoe',
'password': u'abcdefgH.',
'password_confirm': u'abcdefgH.',
'first_name': u'Johannes',
'last_name': u'Dough',
'email': u'johannes.dough@gmail.com',
'role': u'viewer'
})
params = json.dumps(params)
response = self.app.post(url('users'), params, self.json_headers,
self.extra_environ_admin, status=400)
resp = json.loads(response.body)
user_count = new_user_count
new_user_count = Session.query(User).count()
assert new_user_count == user_count
assert resp['errors'] == u' '.join([
u'The submitted password is invalid; valid passwords contain at least 8 characters',
u'and either contain at least one character that is not in the printable ASCII range',
u'or else contain at least one symbol, one digit, one uppercase letter and one lowercase letter.'])
assert response.content_type == 'application/json'
# Valid user: the password contains a unicode character
researchers_directory = os.listdir(self.users_path)
researchers_directory_m_time = os.stat(self.users_path).st_mtime
sleep(1)
params = self.user_create_params.copy()
params.update({
'username': u'aadams',
'password': u'abcde\u0301fgh',
'password_confirm': u'abcde\u0301fgh',
'first_name': u'Alexander',
'last_name': u'Adams',
'email': u'aadams@gmail.com',
'role': u'viewer'
})
params = json.dumps(params)
response = self.app.post(url('users'), params, self.json_headers,
self.extra_environ_admin)
resp = json.loads(response.body)
user_count = new_user_count
new_user_count = Session.query(User).count()
new_researchers_directory = os.listdir(self.users_path)
new_researchers_directory_m_time = os.stat(self.users_path).st_mtime
assert u'aadams' not in researchers_directory
assert u'aadams' in new_researchers_directory
assert researchers_directory_m_time != new_researchers_directory_m_time
assert new_user_count == user_count + 1
assert resp['first_name'] == u'Alexander'
assert u'password' not in resp
assert response.content_type == 'application/json'
# Invalid user: first_name is empty, email is invalid, affilication is too
# long, role is unrecognized, input_orthography is nonexistent, markup_language is unrecognized.
params = self.user_create_params.copy()
params.update({
'username': u'xyh',
'password': u'abcde\u0301fgh',
'password_confirm': u'abcde\u0301fgh',
'first_name': u'',
'last_name': u'Yetzer-Hara',
'affiliation': u'here, there, everywhere, ' * 200,
'email': u'paradoxofevil@gmail',
'role': u'master',
'markup_language': u'markdownandupanddown',
'page_content': u'My OLD Page\n===============\n\nWhat a great linguistic fieldwork application!\n\n',
'input_orthography': 1234
})
params = json.dumps(params)
response = self.app.post(url('users'), params, self.json_headers,
self.extra_environ_admin, status=400)
resp = json.loads(response.body)
user_count = new_user_count
new_user_count = Session.query(User).count()
assert new_user_count == user_count
assert resp['errors']['first_name'] == u'Please enter a value'
assert resp['errors']['email'] == u'The domain portion of the email address is invalid (the portion after the @: gmail)'
assert resp['errors']['affiliation'] == u'Enter a value not more than 255 characters long'
assert resp['errors']['role'] == u"Value must be one of: viewer; contributor; administrator (not u'master')"
assert resp['errors']['input_orthography'] == u'There is no orthography with id 1234.'
assert resp['errors']['markup_language'] == u"Value must be one of: Markdown; reStructuredText (not u'markdownandupanddown')"
assert response.content_type == 'application/json'
# Valid user: all fields have valid values
orthography1 = h.generate_default_orthography1()
orthography2 = h.generate_default_orthography2()
Session.add_all([orthography1, orthography2])
Session.commit()
orthography1_id = orthography1.id
orthography2_id = orthography2.id
params = self.user_create_params.copy()
params.update({
'username': u'alyoshas',
'password': u'xY9.Bfx_J Jre\u0301',
'password_confirm': u'xY9.Bfx_J Jre\u0301',
'first_name': u'Alexander',
'last_name': u'Solzhenitsyn',
'email': u'amanaplanacanalpanama@gmail.com',
'affiliation': u'Moscow State University',
'role': u'contributor',
'markup_language': u'Markdown',
'page_content': u'My OLD Page\n===============\n\nWhat a great linguistic fieldwork application!\n\n',
'input_orthography': orthography1_id,
'output_orthography': orthography2_id
})
params = json.dumps(params)
response = self.app.post(url('users'), params, self.json_headers,
self.extra_environ_admin)
resp = json.loads(response.body)
user_count = new_user_count
new_user_count = Session.query(User).count()
assert new_user_count == user_count + 1
assert resp['username'] == u'alyoshas'
assert resp['first_name'] == u'Alexander'
assert resp['last_name'] == u'Solzhenitsyn'
assert resp['email'] == u'amanaplanacanalpanama@gmail.com'
assert resp['affiliation'] == u'Moscow State University'
assert resp['role'] == u'contributor'
assert resp['markup_language'] == u'Markdown'
assert resp['page_content'] == u'My OLD Page\n===============\n\nWhat a great linguistic fieldwork application!\n\n'
assert resp['html'] == h.get_HTML_from_contents(resp['page_content'], 'Markdown')
assert resp['input_orthography']['id'] == orthography1_id
assert resp['output_orthography']['id'] == orthography2_id
assert response.content_type == 'application/json'
@nottest
def test_new(self):
"""Tests that GET /users/new returns the data necessary to create a new user.
The properties of the JSON object are 'roles', 'orthographies' and
'markup_languages' and their values are arrays/lists.
"""
# A contributor (or a viewer) should return a 403 status code on the
# new action, which requires an administrator.
response = self.app.get(url('new_user'), extra_environ=self.extra_environ_contrib,
status=403)
resp = json.loads(response.body)
assert resp['error'] == u'You are not authorized to access this resource.'
assert response.content_type == 'application/json'
# Add some test data to the database.
application_settings = h.generate_default_application_settings()
orthography1 = h.generate_default_orthography1()
orthography2 = h.generate_default_orthography2()
Session.add_all([application_settings, orthography1, orthography2])
Session.commit()
# Get the data currently in the db (see websetup.py for the test data).
data = {
'orthographies': h.get_mini_dicts_getter('Orthography')(),
'roles': h.user_roles,
'markup_languages': h.markup_languages
}
# JSON.stringify and then re-Python-ify the data. This is what the data
# should look like in the response to a simulated GET request.
data = json.loads(json.dumps(data, cls=h.JSONOLDEncoder))
# GET /users/new without params. Without any GET params, /files/new
# should return a JSON array for every store.
response = self.app.get(url('new_user'),
extra_environ=self.extra_environ_admin)
resp = json.loads(response.body)
assert resp['orthographies'] == data['orthographies']
assert resp['roles'] == data['roles']
assert resp['markup_languages'] == data['markup_languages']
assert response.content_type == 'application/json'
# GET /new_file with params. Param values are treated as strings, not
# JSON. If any params are specified, the default is to return a JSON
# array corresponding to store for the param. There are three cases
# that will result in an empty JSON array being returned:
# 1. the param is not specified
# 2. the value of the specified param is an empty string
# 3. the value of the specified param is an ISO 8601 UTC datetime
# string that matches the most recent datetime_modified value of the
# store in question.
params = {
# Value is any string: 'orthographies' will be in response.
'orthographies': 'anything can go here!'
}
response = self.app.get(url('new_user'), params,
extra_environ=self.extra_environ_admin)
resp = json.loads(response.body)
assert resp['orthographies'] == data['orthographies']
assert resp['roles'] == data['roles']
assert resp['markup_languages'] == data['markup_languages']
assert response.content_type == 'application/json'
params = {
# Value is ISO 8601 UTC datetime string that does not match the most
# recent Orthography.datetime_modified value: 'orthographies' *will* be in
# response.
'orthographies': datetime.datetime.utcnow().isoformat()
}
response = self.app.get(url('new_user'), params,
extra_environ=self.extra_environ_admin)
resp = json.loads(response.body)
assert resp['orthographies'] == data['orthographies']
assert resp['roles'] == data['roles']
assert resp['markup_languages'] == data['markup_languages']
assert response.content_type == 'application/json'
params = {
# Value is ISO 8601 UTC datetime string that does match the most
# recent Orthography.datetime_modified value: 'orthographies' will *not* be in response.
'orthographies': h.get_most_recent_modification_datetime('Orthography').isoformat()
}
response = self.app.get(url('new_user'), params,
extra_environ=self.extra_environ_admin)
resp = json.loads(response.body)
assert resp['orthographies'] == []
assert resp['roles'] == data['roles']
assert resp['markup_languages'] == data['markup_languages']
assert response.content_type == 'application/json'
@nottest
def test_update(self):
"""Tests that PUT /users/id updates the user with id=id."""
default_contributor_id = Session.query(User).filter(User.role==u'contributor').first().id
def_contrib_environ = {'test.authentication.id': default_contributor_id}
# Create a user to update.
original_researchers_directory = os.listdir(self.users_path)
original_user_count = Session.query(User).count()
params = self.user_create_params.copy()
params.update({
'username': u'johndoe',
'password': u'Aaaaaa_1',
'password_confirm': u'Aaaaaa_1',
'first_name': u'John',
'last_name': u'Doe',
'email': u'john.doe@gmail.com',
'role': u'viewer'
})
params = json.dumps(params)
response = self.app.post(url('users'), params, self.json_headers, self.extra_environ_admin)
resp = json.loads(response.body)
user_id = resp['id']
datetime_modified = resp['datetime_modified']
new_user_count = Session.query(User).count()
new_researchers_directory = os.listdir(self.users_path)
assert new_user_count == original_user_count + 1
assert resp['username'] == u'johndoe'
assert resp['email'] == u'john.doe@gmail.com'
assert 'password' not in resp
assert new_researchers_directory != original_researchers_directory
assert u'johndoe' in new_researchers_directory
assert response.content_type == 'application/json'
# Update the user
sleep(1) # sleep for a second to ensure that MySQL registers a different datetime_modified for the update
params = self.user_create_params.copy()
params.update({
'username': u'johnbuck', # Admins CAN change usernames
'password': u'Aaaaaa_1',
'password_confirm': u'Aaaaaa_1',
'first_name': u'John',
'last_name': u'Doe',
'email': u'john.doe@gmail.com',
'role': u'contributor' # Admins CAN change roles
})
params = json.dumps(params)
response = self.app.put(url('user', id=user_id), params, self.json_headers,
self.extra_environ_admin)
resp = json.loads(response.body)
new_datetime_modified = resp['datetime_modified']
user_count = new_user_count
new_user_count = Session.query(User).count()
researchers_directory = new_researchers_directory
new_researchers_directory = os.listdir(self.users_path)
assert user_count == new_user_count
assert new_datetime_modified != datetime_modified
assert resp['username'] == u'johnbuck'
assert resp['role'] == u'contributor'
assert resp['last_name'] == u'Doe'
assert researchers_directory != new_researchers_directory
assert u'johndoe' in researchers_directory and u'johndoe' not in new_researchers_directory
assert u'johnbuck' in new_researchers_directory and u'johnbuck' not in researchers_directory
assert response.content_type == 'application/json'
# Attempt to update the user as a contributor and expect to fail
params = self.user_create_params.copy()
params.update({
'username': u'johnbuck',
'password': u'Aaaaaa_1',
'password_confirm': u'Aaaaaa_1',
'first_name': u'John',
'last_name': u'Buck', # here is the attempted change
'email': u'john.doe@gmail.com',
'role': u'contributor'
})
params = json.dumps(params)
response = self.app.put(url('user', id=user_id), params, self.json_headers,
def_contrib_environ, status=403)
resp = json.loads(response.body)
assert resp['error'] == u'You are not authorized to access this resource.'
assert response.content_type == 'application/json'
# Attempt to update the user as the user and expect to succeed
user_environ = {'test.authentication.id': user_id}
params = self.user_create_params.copy()
params.update({
'username': u'johnbuck',
'password': u'Zzzzzz.9', # Change the password too
'password_confirm': u'Zzzzzz.9',
'first_name': u'John',
'last_name': u'Buck', # Now this change will succeed
'email': u'john.doe@gmail.com',
'role': u'contributor'
})
params = json.dumps(params)
response = self.app.put(url('user', id=user_id), params, self.json_headers,
user_environ)
resp = json.loads(response.body)
user_just_updated = Session.query(User).get(user_id)
assert resp['username'] == u'johnbuck'
assert resp['last_name'] == u'Buck'
assert h.encrypt_password(u'Zzzzzz.9', str(user_just_updated.salt)) == user_just_updated.password
assert response.content_type == 'application/json'
# Simulate a user attempting to update his username. Expect to fail.
params = self.user_create_params.copy()
params.update({
'username': u'iroc_z', # Not permitted
'password': u'Zzzzzz.9',
'password_confirm': u'Zzzzzz.9',
'first_name': u'John',
'last_name': u'Buck',
'email': u'john.doe@gmail.com',
'role': u'contributor'
})
params = json.dumps(params)
response = self.app.put(url('user', id=user_id), params, self.json_headers,
user_environ, status=400)
resp = json.loads(response.body)
assert resp['errors'] == u'Only administrators can update usernames.'
assert response.content_type == 'application/json'
# Simulate a user attempting to update his role. Expect to fail.
params = self.user_create_params.copy()
params.update({
'username': u'johnbuck',
'password': u'Zzzzzz.9',
'password_confirm': u'Zzzzzz.9',
'first_name': u'John',
'last_name': u'Buck',
'email': u'john.doe@gmail.com',
'role': u'administrator' # Not permitted
})
params = json.dumps(params)
response = self.app.put(url('user', id=user_id), params, self.json_headers,
user_environ, status=400)
resp = json.loads(response.body)
assert resp['errors'] == u'Only administrators can update roles.'
assert response.content_type == 'application/json'
# Update the user with empty values for username and password and expect
# these fields to retain their original values.
md_contents = u'\n'.join([
'My Page',
'=======',
'',
'Research Interests',
'---------------------',
'',
'* Item 1',
'* Item 2',
''
])
params = self.user_create_params.copy()
params.update({
'first_name': u'John',
'last_name': u'Buckley', # Here is a change
'email': u'john.doe@gmail.com',
'role': u'contributor',
'markup_language': u'Markdown', # Another change
'page_content': md_contents # And another
})
params = json.dumps(params)
response = self.app.put(url('user', id=user_id), params, self.json_headers, user_environ)
resp = json.loads(response.body)
user_just_updated = Session.query(User).get(user_id)
assert resp['username'] == u'johnbuck'
assert resp['last_name'] == u'Buckley'
assert h.encrypt_password(u'Zzzzzz.9', str(user_just_updated.salt)) == user_just_updated.password
assert resp['html'] == h.get_HTML_from_contents(md_contents, u'Markdown')
assert response.content_type == 'application/json'
# Attempt an update with no new input and expect to fail
params = self.user_create_params.copy()
params.update({
'first_name': u'John',
'last_name': u'Buckley',
'email': u'john.doe@gmail.com',
'role': u'contributor',
'markup_language': u'Markdown',
'page_content': md_contents
})
params = json.dumps(params)
response = self.app.put(url('user', id=user_id), params, self.json_headers, user_environ, status=400)
resp = json.loads(response.body)
assert resp['error'] == u'The update request failed because the submitted data were not new.'
assert response.content_type == 'application/json'
@nottest
def test_delete(self):
"""Tests that DELETE /users/id deletes the user with id=id."""
# Create a user to delete.
original_researchers_directory = os.listdir(self.users_path)
original_user_count = Session.query(User).count()
params = self.user_create_params.copy()
params.update({
'username': u'johndoe',
'password': u'Aaaaaa_1',
'password_confirm': u'Aaaaaa_1',
'first_name': u'John',
'last_name': u'Doe',
'email': u'john.doe@gmail.com',
'role': u'viewer'
})
params = json.dumps(params)
response = self.app.post(url('users'), params, self.json_headers, self.extra_environ_admin)
resp = json.loads(response.body)
user_id = resp['id']
datetime_modified = resp['datetime_modified']
new_user_count = Session.query(User).count()
new_researchers_directory = os.listdir(self.users_path)
researchers_directory_m_time = os.stat(self.users_path).st_mtime
assert new_user_count == original_user_count + 1
assert resp['username'] == u'johndoe'
assert resp['email'] == u'john.doe@gmail.com'
assert 'password' not in resp
assert new_researchers_directory != original_researchers_directory
assert u'johndoe' in new_researchers_directory
# Write a file to the user's directory just to make sure that the deletion
# works on a non-empty directory
f = open(os.path.join(self.users_path, 'johndoe', 'test_file.txt'), 'w')
f.write('Some content here.')
f.close()
assert u'test_file.txt' in os.listdir(os.path.join(self.users_path, 'johndoe'))
# Now delete the user
response = self.app.delete(url('user', id=user_id), headers=self.json_headers,
extra_environ=self.extra_environ_admin)
resp = json.loads(response.body)
user_count = new_user_count
new_user_count = Session.query(User).count()
researchers_directory = new_researchers_directory
new_researchers_directory = os.listdir(self.users_path)
deleted_user = Session.query(User).get(user_id)
assert deleted_user is None
assert new_user_count == user_count - 1
assert resp['id'] == user_id
assert 'password' not in resp
assert resp['username'] == u'johndoe'
assert researchers_directory != new_researchers_directory
assert u'johndoe' not in new_researchers_directory and u'johndoe' in researchers_directory
assert response.content_type == 'application/json'
# Again create a user to (attempt to) delete.
original_researchers_directory = os.listdir(self.users_path)
original_user_count = Session.query(User).count()
params = self.user_create_params.copy()
params.update({
'username': u'johndoe',
'password': u'Aaaaaa_1',
'password_confirm': u'Aaaaaa_1',
'first_name': u'John',
'last_name': u'Doe',
'email': u'john.doe@gmail.com',
'role': u'viewer'
})
params = json.dumps(params)
response = self.app.post(url('users'), params, self.json_headers, self.extra_environ_admin)
resp = json.loads(response.body)
user_id = resp['id']
new_user_count = Session.query(User).count()
new_researchers_directory = os.listdir(self.users_path)
assert new_user_count == original_user_count + 1
assert resp['username'] == u'johndoe'
assert resp['email'] == u'john.doe@gmail.com'
assert 'password' not in resp
assert new_researchers_directory != original_researchers_directory
assert u'johndoe' in new_researchers_directory
assert response.content_type == 'application/json'
# Show that a user cannot delete his own user object
user_environ = {'test.authentication.id': user_id}
response = self.app.delete(url('user', id=user_id), headers=self.json_headers,
extra_environ=user_environ, status=403)
resp = json.loads(response.body)
user_count = new_user_count
new_user_count = Session.query(User).count()
assert resp['error'] == u'You are not authorized to access this resource.'
assert response.content_type == 'application/json'
# Delete with an invalid id
id = 9999999999999
response = self.app.delete(url('user', id=id), headers=self.json_headers,
extra_environ=self.extra_environ_admin, status=404)
assert u'There is no user with id %s' % id in json.loads(response.body)['error']
assert response.content_type == 'application/json'
# Delete without an id
response = self.app.delete(url('user', id=''), status=404,
headers=self.json_headers, extra_environ=self.extra_environ_admin)
assert json.loads(response.body)['error'] == 'The resource could not be found.'
assert response.content_type == 'application/json'
@nottest
def test_show(self):
"""Tests that GET /users/id returns the user with id=id or an appropriate error."""
# Create a user to show.
original_researchers_directory = os.listdir(self.users_path)
original_user_count = Session.query(User).count()
params = self.user_create_params.copy()
params.update({
'username': u'johndoe',
'password': u'Aaaaaa_1',
'password_confirm': u'Aaaaaa_1',
'first_name': u'John',
'last_name': u'Doe',
'email': u'john.doe@gmail.com',
'role': u'viewer'
})
params = json.dumps(params)
response = self.app.post(url('users'), params, self.json_headers, self.extra_environ_admin)
resp = json.loads(response.body)
user_id = resp['id']
new_user_count = Session.query(User).count()
new_researchers_directory = os.listdir(self.users_path)
assert new_user_count == original_user_count + 1
assert resp['username'] == u'johndoe'
assert resp['email'] == u'john.doe@gmail.com'
assert 'password' not in resp
assert new_researchers_directory != original_researchers_directory
assert u'johndoe' in new_researchers_directory
# Try to get a user using an invalid id
id = 100000000000
response = self.app.get(url('user', id=id), headers=self.json_headers,
extra_environ=self.extra_environ_admin, status=404)
resp = json.loads(response.body)
assert u'There is no user with id %s' % id in json.loads(response.body)['error']
assert response.content_type == 'application/json'
# No id
response = self.app.get(url('user', id=''), status=404,
headers=self.json_headers, extra_environ=self.extra_environ_admin)
assert json.loads(response.body)['error'] == 'The resource could not be found.'
assert response.content_type == 'application/json'
# Valid id (show that a viewer can GET a user too)
response = self.app.get(url('user', id=user_id), headers=self.json_headers,
extra_environ=self.extra_environ_view)
resp = json.loads(response.body)
assert 'username' not in resp
assert 'password' not in resp
assert resp['email'] == u'john.doe@gmail.com'
assert response.content_type == 'application/json'
@nottest
def test_edit(self):
"""Tests that GET /users/id/edit returns a JSON object of data necessary to edit the user with id=id.
The JSON object is of the form {'user': {...}, 'data': {...}} or
{'error': '...'} (with a 404 status code) depending on whether the id is
valid or invalid/unspecified, respectively.
"""
# Add some test data to the database.
orthography1 = h.generate_default_orthography1()
orthography2 = h.generate_default_orthography2()
Session.add_all([orthography1, orthography2])
Session.commit()
# Get the data currently in the db (see websetup.py for the test data).
data = {
'orthographies': h.get_mini_dicts_getter('Orthography')(),
'roles': h.user_roles,
'markup_languages': h.markup_languages
}
# JSON.stringify and then re-Python-ify the data. This is what the data
# should look like in the response to a simulated GET request.
data = json.loads(json.dumps(data, cls=h.JSONOLDEncoder))
# Create a user to edit.
original_researchers_directory = os.listdir(self.users_path)
original_user_count = Session.query(User).count()
params = self.user_create_params.copy()
params.update({
'username': u'johndoe',
'password': u'Aaaaaa_1',
'password_confirm': u'Aaaaaa_1',
'first_name': u'John',
'last_name': u'Doe',
'email': u'john.doe@gmail.com',
'role': u'viewer'
})
params = json.dumps(params)
response = self.app.post(url('users'), params, self.json_headers, self.extra_environ_admin)
resp = json.loads(response.body)
user_id = resp['id']
new_user_count = Session.query(User).count()
new_researchers_directory = os.listdir(self.users_path)
assert new_user_count == original_user_count + 1
assert resp['username'] == u'johndoe'
assert resp['email'] == u'john.doe@gmail.com'
assert 'password' not in resp
assert new_researchers_directory != original_researchers_directory
assert u'johndoe' in new_researchers_directory
assert response.content_type == 'application/json'
# Not logged in: expect 401 Unauthorized
response = self.app.get(url('edit_user', id=user_id), status=401)
resp = json.loads(response.body)
assert resp['error'] == u'Authentication is required to access this resource.'
assert response.content_type == 'application/json'
# Invalid id
id = 9876544
response = self.app.get(url('edit_user', id=id),
headers=self.json_headers, extra_environ=self.extra_environ_admin, status=404)
assert u'There is no user with id %s' % id in json.loads(response.body)['error']
assert response.content_type == 'application/json'
# No id
response = self.app.get(url('edit_user', id=''), status=404,
headers=self.json_headers, extra_environ=self.extra_environ_admin)
assert json.loads(response.body)['error'] == u'The resource could not be found.'
assert response.content_type == 'application/json'
# Valid id, admin
response = self.app.get(url('edit_user', id=user_id),
headers=self.json_headers, extra_environ=self.extra_environ_admin)
resp = json.loads(response.body)
assert resp['user']['username'] == u'johndoe'
assert resp['data']['orthographies'] == data['orthographies']
assert resp['data']['roles'] == data['roles']
assert resp['data']['markup_languages'] == data['markup_languages']
assert response.content_type == 'application/json'
# Valid id, user self-editing, GET params
user_environ = {'test.authentication.id': user_id}
params = {
# Value is ISO 8601 UTC datetime string that does match the most
# recent Orthography.datetime_modified value: 'orthographies' will *not* be in response.
'orthographies': h.get_most_recent_modification_datetime('Orthography').isoformat()
}
response = self.app.get(url('edit_user', id=user_id), params,
headers=self.json_headers, extra_environ=user_environ)
resp = json.loads(response.body)
assert resp['user']['username'] == u'johndoe'
assert resp['data']['orthographies'] == []
assert resp['data']['roles'] == data['roles']
assert resp['data']['markup_languages'] == data['markup_languages']
assert response.content_type == 'application/json'
# Valid id but contributor -- expect to fail
response = self.app.get(url('edit_user', id=user_id),
headers=self.json_headers, extra_environ=self.extra_environ_contrib, status=403)
resp = json.loads(response.body)
assert resp['error'] == u'You are not authorized to access this resource.'
assert response.content_type == 'application/json'
| 47.825
| 155
| 0.616328
| 6,513
| 53,564
| 4.908798
| 0.074774
| 0.041663
| 0.025523
| 0.03744
| 0.815176
| 0.798098
| 0.781677
| 0.759657
| 0.741484
| 0.709737
| 0
| 0.008949
| 0.271899
| 53,564
| 1,119
| 156
| 47.867739
| 0.810821
| 0.114499
| 0
| 0.781857
| 0
| 0.00108
| 0.22644
| 0.012582
| 0
| 0
| 0
| 0
| 0.233261
| 1
| 0.009719
| false
| 0.084233
| 0.011879
| 0
| 0.023758
| 0.0054
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
6b1c713940ee0f51621d7d587fedfbf664d9f1ed
| 37,525
|
py
|
Python
|
api_gateway_sdk/model/tool/tool_task_pb2.py
|
easyopsapis/easyops-api-python
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
[
"Apache-2.0"
] | 5
|
2019-07-31T04:11:05.000Z
|
2021-01-07T03:23:20.000Z
|
api_gateway_sdk/model/tool/tool_task_pb2.py
|
easyopsapis/easyops-api-python
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
[
"Apache-2.0"
] | null | null | null |
api_gateway_sdk/model/tool/tool_task_pb2.py
|
easyopsapis/easyops-api-python
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: tool_task.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from api_gateway_sdk.model.tool import callback_pb2 as api__gateway__sdk_dot_model_dot_tool_dot_callback__pb2
from api_gateway_sdk.model.tool import tool_pb2 as api__gateway__sdk_dot_model_dot_tool_dot_tool__pb2
from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='tool_task.proto',
package='tool',
syntax='proto3',
serialized_options=_b('Z>go.easyops.local/contracts/protorepo-models/easyops/model/tool'),
serialized_pb=_b('\n\x0ftool_task.proto\x12\x04tool\x1a)api_gateway_sdk/model/tool/callback.proto\x1a%api_gateway_sdk/model/tool/tool.proto\x1a\x1cgoogle/protobuf/struct.proto\"\xd8\x0e\n\x08ToolTask\x12\x10\n\x08username\x18\x01 \x01(\t\x12\'\n\x06inputs\x18\x02 \x01(\x0b\x32\x17.google.protobuf.Struct\x12/\n\x0b\x65xtraDetail\x18\x03 \x01(\x0b\x32\x1a.tool.ToolTask.ExtraDetail\x12\x13\n\x0btotalStatus\x18\x04 \x01(\t\x12\r\n\x05\x65rror\x18\x05 \x01(\t\x12\x0e\n\x06\x65xecId\x18\x06 \x01(\t\x12(\n\x07toolEnv\x18\x07 \x01(\x0b\x32\x17.google.protobuf.Struct\x12(\n\x07outputs\x18\x08 \x01(\x0b\x32\x17.google.protobuf.Struct\x12,\n\x0boutViewData\x18\t \x01(\x0b\x32\x17.google.protobuf.Struct\x12*\n\tagentData\x18\n \x01(\x0b\x32\x17.google.protobuf.Struct\x12\x0e\n\x06\x61gents\x18\x0b \x03(\t\x12*\n\tstartTime\x18\x0c \x01(\x0b\x32\x17.google.protobuf.Struct\x12\'\n\x06status\x18\r \x01(\x0b\x32\x17.google.protobuf.Struct\x12$\n\x03msg\x18\x0e \x01(\x0b\x32\x17.google.protobuf.Struct\x12(\n\x07\x65ndTime\x18\x0f \x01(\x0b\x32\x17.google.protobuf.Struct\x12+\n\nexitStatus\x18\x10 \x01(\x0b\x32\x17.google.protobuf.Struct\x12*\n\tsysStatus\x18\x11 \x01(\x0b\x32\x17.google.protobuf.Struct\x12\x30\n\x0ftoolOutputsData\x18\x12 \x03(\x0b\x32\x17.google.protobuf.Struct\x12*\n\ttableData\x18\x13 \x01(\x0b\x32\x17.google.protobuf.Struct\x12\x1c\n\x08toolData\x18\x14 \x01(\x0b\x32\n.tool.Tool\x12\x33\n\rbatchStrategy\x18\x15 \x01(\x0b\x32\x1c.tool.ToolTask.BatchStrategy\x12\x12\n\nneedNotify\x18\x16 \x01(\t\x12\x10\n\x08\x65xecUser\x18\x17 \x01(\t\x12\x0b\n\x03vId\x18\x18 \x01(\t\x12\x0e\n\x06toolId\x18\x19 \x01(\t\x12-\n\noutputDefs\x18\x1a \x03(\x0b\x32\x19.tool.ToolTask.OutputDefs\x12+\n\ttableDefs\x18\x1b \x03(\x0b\x32\x18.tool.ToolTask.TableDefs\x12+\n\x0btoolOutputs\x18\x1c \x01(\x0b\x32\x16.google.protobuf.Value\x1a\x9b\x04\n\x0b\x45xtraDetail\x12 \n\x08\x63\x61llback\x18\x01 \x01(\x0b\x32\x0e.tool.Callback\x12(\n\x07toolEnv\x18\x02 \x01(\x0b\x32\x17.google.protobuf.Struct\x12+\n\x0btoolOutputs\x18\x03 \x01(\x0b\x32\x16.google.protobuf.Value\x12\x37\n\ttableDefs\x18\x04 \x03(\x0b\x32$.tool.ToolTask.ExtraDetail.TableDefs\x12\x39\n\noutputDefs\x18\x05 \x03(\x0b\x32%.tool.ToolTask.ExtraDetail.OutputDefs\x1a\xf6\x01\n\tTableDefs\x12\n\n\x02id\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x43\n\ndimensions\x18\x03 \x03(\x0b\x32/.tool.ToolTask.ExtraDetail.TableDefs.Dimensions\x12=\n\x07\x63olumns\x18\x04 \x03(\x0b\x32,.tool.ToolTask.ExtraDetail.TableDefs.Columns\x1a&\n\nDimensions\x12\n\n\x02id\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x1a#\n\x07\x43olumns\x12\n\n\x02id\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x1a&\n\nOutputDefs\x12\n\n\x02id\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x1aL\n\rBatchStrategy\x12\x10\n\x08\x62\x61tchNum\x18\x01 \x01(\x05\x12\x15\n\rbatchInterval\x18\x02 \x01(\x05\x12\x12\n\nfailedStop\x18\x03 \x01(\x08\x1a&\n\nOutputDefs\x12\n\n\x02id\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x1a\xde\x01\n\tTableDefs\x12\n\n\x02id\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x37\n\ndimensions\x18\x03 \x03(\x0b\x32#.tool.ToolTask.TableDefs.Dimensions\x12\x31\n\x07\x63olumns\x18\x04 \x03(\x0b\x32 .tool.ToolTask.TableDefs.Columns\x1a&\n\nDimensions\x12\n\n\x02id\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x1a#\n\x07\x43olumns\x12\n\n\x02id\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\tB@Z>go.easyops.local/contracts/protorepo-models/easyops/model/toolb\x06proto3')
,
dependencies=[api__gateway__sdk_dot_model_dot_tool_dot_callback__pb2.DESCRIPTOR,api__gateway__sdk_dot_model_dot_tool_dot_tool__pb2.DESCRIPTOR,google_dot_protobuf_dot_struct__pb2.DESCRIPTOR,])
_TOOLTASK_EXTRADETAIL_TABLEDEFS_DIMENSIONS = _descriptor.Descriptor(
name='Dimensions',
full_name='tool.ToolTask.ExtraDetail.TableDefs.Dimensions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='tool.ToolTask.ExtraDetail.TableDefs.Dimensions.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='tool.ToolTask.ExtraDetail.TableDefs.Dimensions.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1560,
serialized_end=1598,
)
_TOOLTASK_EXTRADETAIL_TABLEDEFS_COLUMNS = _descriptor.Descriptor(
name='Columns',
full_name='tool.ToolTask.ExtraDetail.TableDefs.Columns',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='tool.ToolTask.ExtraDetail.TableDefs.Columns.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='tool.ToolTask.ExtraDetail.TableDefs.Columns.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1600,
serialized_end=1635,
)
_TOOLTASK_EXTRADETAIL_TABLEDEFS = _descriptor.Descriptor(
name='TableDefs',
full_name='tool.ToolTask.ExtraDetail.TableDefs',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='tool.ToolTask.ExtraDetail.TableDefs.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='tool.ToolTask.ExtraDetail.TableDefs.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dimensions', full_name='tool.ToolTask.ExtraDetail.TableDefs.dimensions', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='columns', full_name='tool.ToolTask.ExtraDetail.TableDefs.columns', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_TOOLTASK_EXTRADETAIL_TABLEDEFS_DIMENSIONS, _TOOLTASK_EXTRADETAIL_TABLEDEFS_COLUMNS, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1389,
serialized_end=1635,
)
_TOOLTASK_EXTRADETAIL_OUTPUTDEFS = _descriptor.Descriptor(
name='OutputDefs',
full_name='tool.ToolTask.ExtraDetail.OutputDefs',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='tool.ToolTask.ExtraDetail.OutputDefs.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='tool.ToolTask.ExtraDetail.OutputDefs.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1637,
serialized_end=1675,
)
_TOOLTASK_EXTRADETAIL = _descriptor.Descriptor(
name='ExtraDetail',
full_name='tool.ToolTask.ExtraDetail',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='callback', full_name='tool.ToolTask.ExtraDetail.callback', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='toolEnv', full_name='tool.ToolTask.ExtraDetail.toolEnv', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='toolOutputs', full_name='tool.ToolTask.ExtraDetail.toolOutputs', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tableDefs', full_name='tool.ToolTask.ExtraDetail.tableDefs', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='outputDefs', full_name='tool.ToolTask.ExtraDetail.outputDefs', index=4,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_TOOLTASK_EXTRADETAIL_TABLEDEFS, _TOOLTASK_EXTRADETAIL_OUTPUTDEFS, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1136,
serialized_end=1675,
)
_TOOLTASK_BATCHSTRATEGY = _descriptor.Descriptor(
name='BatchStrategy',
full_name='tool.ToolTask.BatchStrategy',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='batchNum', full_name='tool.ToolTask.BatchStrategy.batchNum', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='batchInterval', full_name='tool.ToolTask.BatchStrategy.batchInterval', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='failedStop', full_name='tool.ToolTask.BatchStrategy.failedStop', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1677,
serialized_end=1753,
)
_TOOLTASK_OUTPUTDEFS = _descriptor.Descriptor(
name='OutputDefs',
full_name='tool.ToolTask.OutputDefs',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='tool.ToolTask.OutputDefs.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='tool.ToolTask.OutputDefs.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1637,
serialized_end=1675,
)
_TOOLTASK_TABLEDEFS_DIMENSIONS = _descriptor.Descriptor(
name='Dimensions',
full_name='tool.ToolTask.TableDefs.Dimensions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='tool.ToolTask.TableDefs.Dimensions.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='tool.ToolTask.TableDefs.Dimensions.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1560,
serialized_end=1598,
)
_TOOLTASK_TABLEDEFS_COLUMNS = _descriptor.Descriptor(
name='Columns',
full_name='tool.ToolTask.TableDefs.Columns',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='tool.ToolTask.TableDefs.Columns.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='tool.ToolTask.TableDefs.Columns.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1600,
serialized_end=1635,
)
_TOOLTASK_TABLEDEFS = _descriptor.Descriptor(
name='TableDefs',
full_name='tool.ToolTask.TableDefs',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='tool.ToolTask.TableDefs.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='tool.ToolTask.TableDefs.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dimensions', full_name='tool.ToolTask.TableDefs.dimensions', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='columns', full_name='tool.ToolTask.TableDefs.columns', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_TOOLTASK_TABLEDEFS_DIMENSIONS, _TOOLTASK_TABLEDEFS_COLUMNS, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1796,
serialized_end=2018,
)
_TOOLTASK = _descriptor.Descriptor(
name='ToolTask',
full_name='tool.ToolTask',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='username', full_name='tool.ToolTask.username', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='inputs', full_name='tool.ToolTask.inputs', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='extraDetail', full_name='tool.ToolTask.extraDetail', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='totalStatus', full_name='tool.ToolTask.totalStatus', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='error', full_name='tool.ToolTask.error', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='execId', full_name='tool.ToolTask.execId', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='toolEnv', full_name='tool.ToolTask.toolEnv', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='outputs', full_name='tool.ToolTask.outputs', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='outViewData', full_name='tool.ToolTask.outViewData', index=8,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='agentData', full_name='tool.ToolTask.agentData', index=9,
number=10, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='agents', full_name='tool.ToolTask.agents', index=10,
number=11, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='startTime', full_name='tool.ToolTask.startTime', index=11,
number=12, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='status', full_name='tool.ToolTask.status', index=12,
number=13, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='msg', full_name='tool.ToolTask.msg', index=13,
number=14, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='endTime', full_name='tool.ToolTask.endTime', index=14,
number=15, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='exitStatus', full_name='tool.ToolTask.exitStatus', index=15,
number=16, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sysStatus', full_name='tool.ToolTask.sysStatus', index=16,
number=17, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='toolOutputsData', full_name='tool.ToolTask.toolOutputsData', index=17,
number=18, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tableData', full_name='tool.ToolTask.tableData', index=18,
number=19, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='toolData', full_name='tool.ToolTask.toolData', index=19,
number=20, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='batchStrategy', full_name='tool.ToolTask.batchStrategy', index=20,
number=21, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='needNotify', full_name='tool.ToolTask.needNotify', index=21,
number=22, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='execUser', full_name='tool.ToolTask.execUser', index=22,
number=23, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='vId', full_name='tool.ToolTask.vId', index=23,
number=24, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='toolId', full_name='tool.ToolTask.toolId', index=24,
number=25, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='outputDefs', full_name='tool.ToolTask.outputDefs', index=25,
number=26, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tableDefs', full_name='tool.ToolTask.tableDefs', index=26,
number=27, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='toolOutputs', full_name='tool.ToolTask.toolOutputs', index=27,
number=28, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_TOOLTASK_EXTRADETAIL, _TOOLTASK_BATCHSTRATEGY, _TOOLTASK_OUTPUTDEFS, _TOOLTASK_TABLEDEFS, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=138,
serialized_end=2018,
)
_TOOLTASK_EXTRADETAIL_TABLEDEFS_DIMENSIONS.containing_type = _TOOLTASK_EXTRADETAIL_TABLEDEFS
_TOOLTASK_EXTRADETAIL_TABLEDEFS_COLUMNS.containing_type = _TOOLTASK_EXTRADETAIL_TABLEDEFS
_TOOLTASK_EXTRADETAIL_TABLEDEFS.fields_by_name['dimensions'].message_type = _TOOLTASK_EXTRADETAIL_TABLEDEFS_DIMENSIONS
_TOOLTASK_EXTRADETAIL_TABLEDEFS.fields_by_name['columns'].message_type = _TOOLTASK_EXTRADETAIL_TABLEDEFS_COLUMNS
_TOOLTASK_EXTRADETAIL_TABLEDEFS.containing_type = _TOOLTASK_EXTRADETAIL
_TOOLTASK_EXTRADETAIL_OUTPUTDEFS.containing_type = _TOOLTASK_EXTRADETAIL
_TOOLTASK_EXTRADETAIL.fields_by_name['callback'].message_type = api__gateway__sdk_dot_model_dot_tool_dot_callback__pb2._CALLBACK
_TOOLTASK_EXTRADETAIL.fields_by_name['toolEnv'].message_type = google_dot_protobuf_dot_struct__pb2._STRUCT
_TOOLTASK_EXTRADETAIL.fields_by_name['toolOutputs'].message_type = google_dot_protobuf_dot_struct__pb2._VALUE
_TOOLTASK_EXTRADETAIL.fields_by_name['tableDefs'].message_type = _TOOLTASK_EXTRADETAIL_TABLEDEFS
_TOOLTASK_EXTRADETAIL.fields_by_name['outputDefs'].message_type = _TOOLTASK_EXTRADETAIL_OUTPUTDEFS
_TOOLTASK_EXTRADETAIL.containing_type = _TOOLTASK
_TOOLTASK_BATCHSTRATEGY.containing_type = _TOOLTASK
_TOOLTASK_OUTPUTDEFS.containing_type = _TOOLTASK
_TOOLTASK_TABLEDEFS_DIMENSIONS.containing_type = _TOOLTASK_TABLEDEFS
_TOOLTASK_TABLEDEFS_COLUMNS.containing_type = _TOOLTASK_TABLEDEFS
_TOOLTASK_TABLEDEFS.fields_by_name['dimensions'].message_type = _TOOLTASK_TABLEDEFS_DIMENSIONS
_TOOLTASK_TABLEDEFS.fields_by_name['columns'].message_type = _TOOLTASK_TABLEDEFS_COLUMNS
_TOOLTASK_TABLEDEFS.containing_type = _TOOLTASK
_TOOLTASK.fields_by_name['inputs'].message_type = google_dot_protobuf_dot_struct__pb2._STRUCT
_TOOLTASK.fields_by_name['extraDetail'].message_type = _TOOLTASK_EXTRADETAIL
_TOOLTASK.fields_by_name['toolEnv'].message_type = google_dot_protobuf_dot_struct__pb2._STRUCT
_TOOLTASK.fields_by_name['outputs'].message_type = google_dot_protobuf_dot_struct__pb2._STRUCT
_TOOLTASK.fields_by_name['outViewData'].message_type = google_dot_protobuf_dot_struct__pb2._STRUCT
_TOOLTASK.fields_by_name['agentData'].message_type = google_dot_protobuf_dot_struct__pb2._STRUCT
_TOOLTASK.fields_by_name['startTime'].message_type = google_dot_protobuf_dot_struct__pb2._STRUCT
_TOOLTASK.fields_by_name['status'].message_type = google_dot_protobuf_dot_struct__pb2._STRUCT
_TOOLTASK.fields_by_name['msg'].message_type = google_dot_protobuf_dot_struct__pb2._STRUCT
_TOOLTASK.fields_by_name['endTime'].message_type = google_dot_protobuf_dot_struct__pb2._STRUCT
_TOOLTASK.fields_by_name['exitStatus'].message_type = google_dot_protobuf_dot_struct__pb2._STRUCT
_TOOLTASK.fields_by_name['sysStatus'].message_type = google_dot_protobuf_dot_struct__pb2._STRUCT
_TOOLTASK.fields_by_name['toolOutputsData'].message_type = google_dot_protobuf_dot_struct__pb2._STRUCT
_TOOLTASK.fields_by_name['tableData'].message_type = google_dot_protobuf_dot_struct__pb2._STRUCT
_TOOLTASK.fields_by_name['toolData'].message_type = api__gateway__sdk_dot_model_dot_tool_dot_tool__pb2._TOOL
_TOOLTASK.fields_by_name['batchStrategy'].message_type = _TOOLTASK_BATCHSTRATEGY
_TOOLTASK.fields_by_name['outputDefs'].message_type = _TOOLTASK_OUTPUTDEFS
_TOOLTASK.fields_by_name['tableDefs'].message_type = _TOOLTASK_TABLEDEFS
_TOOLTASK.fields_by_name['toolOutputs'].message_type = google_dot_protobuf_dot_struct__pb2._VALUE
DESCRIPTOR.message_types_by_name['ToolTask'] = _TOOLTASK
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ToolTask = _reflection.GeneratedProtocolMessageType('ToolTask', (_message.Message,), {
'ExtraDetail' : _reflection.GeneratedProtocolMessageType('ExtraDetail', (_message.Message,), {
'TableDefs' : _reflection.GeneratedProtocolMessageType('TableDefs', (_message.Message,), {
'Dimensions' : _reflection.GeneratedProtocolMessageType('Dimensions', (_message.Message,), {
'DESCRIPTOR' : _TOOLTASK_EXTRADETAIL_TABLEDEFS_DIMENSIONS,
'__module__' : 'tool_task_pb2'
# @@protoc_insertion_point(class_scope:tool.ToolTask.ExtraDetail.TableDefs.Dimensions)
})
,
'Columns' : _reflection.GeneratedProtocolMessageType('Columns', (_message.Message,), {
'DESCRIPTOR' : _TOOLTASK_EXTRADETAIL_TABLEDEFS_COLUMNS,
'__module__' : 'tool_task_pb2'
# @@protoc_insertion_point(class_scope:tool.ToolTask.ExtraDetail.TableDefs.Columns)
})
,
'DESCRIPTOR' : _TOOLTASK_EXTRADETAIL_TABLEDEFS,
'__module__' : 'tool_task_pb2'
# @@protoc_insertion_point(class_scope:tool.ToolTask.ExtraDetail.TableDefs)
})
,
'OutputDefs' : _reflection.GeneratedProtocolMessageType('OutputDefs', (_message.Message,), {
'DESCRIPTOR' : _TOOLTASK_EXTRADETAIL_OUTPUTDEFS,
'__module__' : 'tool_task_pb2'
# @@protoc_insertion_point(class_scope:tool.ToolTask.ExtraDetail.OutputDefs)
})
,
'DESCRIPTOR' : _TOOLTASK_EXTRADETAIL,
'__module__' : 'tool_task_pb2'
# @@protoc_insertion_point(class_scope:tool.ToolTask.ExtraDetail)
})
,
'BatchStrategy' : _reflection.GeneratedProtocolMessageType('BatchStrategy', (_message.Message,), {
'DESCRIPTOR' : _TOOLTASK_BATCHSTRATEGY,
'__module__' : 'tool_task_pb2'
# @@protoc_insertion_point(class_scope:tool.ToolTask.BatchStrategy)
})
,
'OutputDefs' : _reflection.GeneratedProtocolMessageType('OutputDefs', (_message.Message,), {
'DESCRIPTOR' : _TOOLTASK_OUTPUTDEFS,
'__module__' : 'tool_task_pb2'
# @@protoc_insertion_point(class_scope:tool.ToolTask.OutputDefs)
})
,
'TableDefs' : _reflection.GeneratedProtocolMessageType('TableDefs', (_message.Message,), {
'Dimensions' : _reflection.GeneratedProtocolMessageType('Dimensions', (_message.Message,), {
'DESCRIPTOR' : _TOOLTASK_TABLEDEFS_DIMENSIONS,
'__module__' : 'tool_task_pb2'
# @@protoc_insertion_point(class_scope:tool.ToolTask.TableDefs.Dimensions)
})
,
'Columns' : _reflection.GeneratedProtocolMessageType('Columns', (_message.Message,), {
'DESCRIPTOR' : _TOOLTASK_TABLEDEFS_COLUMNS,
'__module__' : 'tool_task_pb2'
# @@protoc_insertion_point(class_scope:tool.ToolTask.TableDefs.Columns)
})
,
'DESCRIPTOR' : _TOOLTASK_TABLEDEFS,
'__module__' : 'tool_task_pb2'
# @@protoc_insertion_point(class_scope:tool.ToolTask.TableDefs)
})
,
'DESCRIPTOR' : _TOOLTASK,
'__module__' : 'tool_task_pb2'
# @@protoc_insertion_point(class_scope:tool.ToolTask)
})
_sym_db.RegisterMessage(ToolTask)
_sym_db.RegisterMessage(ToolTask.ExtraDetail)
_sym_db.RegisterMessage(ToolTask.ExtraDetail.TableDefs)
_sym_db.RegisterMessage(ToolTask.ExtraDetail.TableDefs.Dimensions)
_sym_db.RegisterMessage(ToolTask.ExtraDetail.TableDefs.Columns)
_sym_db.RegisterMessage(ToolTask.ExtraDetail.OutputDefs)
_sym_db.RegisterMessage(ToolTask.BatchStrategy)
_sym_db.RegisterMessage(ToolTask.OutputDefs)
_sym_db.RegisterMessage(ToolTask.TableDefs)
_sym_db.RegisterMessage(ToolTask.TableDefs.Dimensions)
_sym_db.RegisterMessage(ToolTask.TableDefs.Columns)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 46.384425
| 3,487
| 0.748035
| 4,838
| 37,525
| 5.495453
| 0.054361
| 0.053861
| 0.03024
| 0.050401
| 0.83601
| 0.803964
| 0.783014
| 0.757927
| 0.729943
| 0.699026
| 0
| 0.039298
| 0.125223
| 37,525
| 808
| 3,488
| 46.441832
| 0.770639
| 0.025077
| 0
| 0.701847
| 1
| 0.002639
| 0.176115
| 0.127219
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.010554
| 0
| 0.010554
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
8614dcae2d83d0308a72e29e9a8114d4a0a6d23c
| 96,051
|
py
|
Python
|
icon.py
|
victordefoe/DataLabel
|
73d1504ac6292d33ba1a8206d3e42abdb8d28bc5
|
[
"BSD-3-Clause"
] | 1
|
2019-09-08T04:21:59.000Z
|
2019-09-08T04:21:59.000Z
|
icon.py
|
victordefoe/DataLabel
|
73d1504ac6292d33ba1a8206d3e42abdb8d28bc5
|
[
"BSD-3-Clause"
] | null | null | null |
icon.py
|
victordefoe/DataLabel
|
73d1504ac6292d33ba1a8206d3e42abdb8d28bc5
|
[
"BSD-3-Clause"
] | null | null | null |
img = 'AAABAAEAMDAAAAEAIACoJQAAFgAAACgAAAAwAAAAYAAAAAEAIAAAAAAAACQAAAAAAAAAAAAAAAAAAAAAAADa6vL/2ery/9rp8f/X6vH/r73C/9rb3P/V1dX/4ODg/9/f3//Z2dn/ra2t/9ra2v/h4eH/6Ojo//X19f/6+vr/+/v7//v7+//6+vr/8/Pz/+Xl5f/d3d3/k5OT/9TU1P/+/v7//v7+//7+/v/39/f/9fX1//f39//5+fn/9PT0/+Pj4//a2tr/39/f/+bm5v/R2tj/2PHu/+f+/P/t/v3/7fz8/97p6P/z+Pf/+vn5//n5+f/5+fn/+fn5//n5+f/a6vH/2erx/9jp8f/W6PD/o7G2/9TV1v/f39//4ODg/9/f3/+4uLj/x8fH/+Dg4P/o6Oj/7e3t//z8/P/+/v7//v7+//z8/P/6+vr/9fX1/+jo6P/U1NT/jIyM//n5+f/+/v7//v7+//v7+//29vb/8vLy/+fn5//S0tL/zs7O/9jY2P/T09P/4ODg/9fX1//f5OP/wNXT/+b+/f/m/f3/6f78/+z9/P/a6+r/6PTz//b39//s7Oz/8vLy//Pz8//Z6vH/2Onw/9fp8P/T5+7/o7S5/9zd3f/h4eH/4eHh/97e3v/Nzc3/zs7O/+Pj4//s7Oz/9fX1//7+/v/+/v7//v7+//v7+//5+fn/7u7u/+fn5/+lpaX/tbW1//j4+P/4+Pj/+/v7/+3t7f/w8PD/2NjY/9bW1v/j4+P/5+fn/+Pj4//R0dH/29vb/+fn5//n5+f/zt7d/+X9/f/l/f7/5v39/+r+/f/q/f3/2u7t/+Dw7v/r7e3/5OTk/+Li4v/X6fD/1ujv/9bo7//Q5ez/rL3D/9vc3P/i4uL/4uLi/9XV1f/d3d3/4eHh/+vr6//39/f/+vr6//7+/v/+/v7//v7+//n5+f/4+Pj/7u7u/+Hh4f+np6f/0dHR/+fn5//p6en/+fn5/97e3v/Z2dn/4uLi//Hx8f/x8fH/6+vr/+Li4v/m5ub/6+vr/+3t7f/u7u7/ztfW/+L6+v/m/f7/5v3+/+b9/f/o/v3/6f78/9jt6//k8vD/9/n4//j49//W6O//1ujv/9Pn7v/K5Or/tMrQ/9PV1f/i4uL/4+Pj/83Nzf/l5eX/3Nzc//j4+P/9/f3//f39//7+/v/+/v7//v7+//f39//19fX/7Ozs/9zc3P+0tLT/3d3d/+Hh4f/l5eX/9vb2/9ra2v/p6en/8vLy//Pz8//w8PD/4eHh/+Pj4//k5OT/6urq//Hx8f/w8PD/0dXU/+D29v/m/f7/5v3+/+f9/v/q/f3/6/79/+n+/P/e8fH/5PTy/+/5+P/W6O//1efu/9Dm7f/I4un/vtTc/8fKyv/i4uL/5eXl/8fHx//o6Oj/2NjY//7+/v/+/v7//v7+//7+/v/+/v7/+fn5//T09P/z8/P/6urq/87Ozv/c3Nz/29vb/9bW1v/c3Nz/5OTk/+zs7P/09PT/9PT0//X19f/q6ur/6enp/+3t7f/09PT/8/Pz//Ly8v/p6en/xcjI/93y8v/m/f7/6P3+/+j9/v/m/f7/6P79/+r+/v/p/f3/3PHw/9Lm5f/W6O//1Obu/83k7P/I4un/xNzj/7G2t//j5OT/5ubm/8nJyf/q6ur/6+vr//7+/v/+/v7//v7+//7+/v/8/Pz/9fX1//T09P/w8PD/5ubm/9DV1f/f4OD/4uLi/9vb2//t7e3/9vb2//Pz8//z8/P/9/f3//X19f/o6Oj/6+vr//b29v/09PT/8vLy/+vr6//o6Oj/1Nzc/9v4+//p/f3/5/3+/+X9/v/m/f7/5v3+/+f+/f/n/vz/2e/t/9vv7f/V6O7/0+bu/8vj6v/I4un/yOHo/6mztv/j5OP/6Ojo/+Li4v/r6+v/9fX1//7+/v/+/v7//v7+//7+/v/5+fn/9/f3//X19f/y8vL/srOy/5ygn//a29v/29vb/+Xl5f/x8fH/+vr6//T09P/29vb/7+/v//Ly8v/x8fH/+Pj4//b29v/29vb/9/f3//b29v/z9PT/0+Li/9v4+v/O19b/1Ojn/+L+/v/l/f7/5f38/+X+/P/M3dz/zuLh/8PZ2v/U5+7/0uXt/8ni6f/H4ej/x+Ho/63Axf/T1NT/5+fn/+np6f/t7e3/8/Pz//7+/v/+/v7//v7+//7+/v/8/Pz//Pz8//n5+f/t7e3/lJSU/5ycnP/V1tb/2dnZ/9zc3P/r6+v/+/v7//X19f/7+/v/6Ojo//T09P/6+vr/9/f3//T09P/19fX/+Pj4//j4+P/29vb/1eXl/9jr6v/o6un/yc7O/9fw7//l/Pv/wM/O/8/h3//h+vr/vszL/8rk7P/T5u3/0OXs/8jh6f/H4ej/x+Ho/7/X3/+ws7X/5OTk/+np6f/v7+//8PDw//z8/P/+/v7//v7+//7+/v/+/v7//f39//39/f+7u7v/yMjI/5eXl//O0dD/7O3t/+vr6//p6en/8PDw/+zs7P/l5eX/5ubm/+fn5//z8/P/8fHx//b29v/39/f/9fX1//Pz8//v7+//3uPj/9jd3f/j4+P/5OXl/8rU1P/l+vn/3Ojn/+Pn5f/L3N7/5Pj5/9nf3//S5e3/zeTs/8ji6f/H4ej/x+Ho/8bh6P+drK//39/f/+Tk5P/k5OT/7u7u//Ly8v/8/Pz//v7+//7+/v/+/v7//v7+/9/f3//IyMj/zc3N/35/f//M1Nb/19ja/9LS0v/r6+v/7+/v/+3t7f/n5+f/6enp/+Pj4//d3d3/9PT0//j4+P/7+/v/+fn5//j4+P/6+vr/4+Pj/+Xl5f/t7e3/8/Pz/+7v7//I1Nb/6vf2/8/T0v/i4+T/3uLj/+jo6P/S5e3/z+Ts/87k6//O5ev/zuTs/9Dk7P+8ztT/ur2//9/f3//d3d3/2tra/+rq6v/w8PD//Pz8//7+/v/39/f/5OTk/729vf/g4OD/x8fH/4uOjf+7w8X/sLGw/+/v7//w8PD/+/v7//Hx8f/5+fn/+vr6//T09P/9/f3//f39//j4+P/5+fn//f39//z8/P/8/Pz/5OTk/+Xl5f/4+Pj/8/Pz/+/v7//o6un/193c/+3v7//j4+P/09PT/+rq6v/W6O//1ujv/9bo7//W6O//1efu/9Xn7v/V6O7/qLW3/93e3f/b29v/0NDQ/8TExP/FxcX/xsbG/83Nzf/Nzc3/u7u7/+Tk5P/U1NT/wcHB/66urv+0tbX/2tzc/8zS1P/z+Pf/7vDw//v7+//+/v7//v7+//7+/v/+/v7//v7+//7+/v/8/Pz/9vb2//X19f/z8/P/9vb2//n5+f/5+fn/9fX1//Dw8P/w8PD/6Ojo//Hx8f/q6ur/3d3d/+3t7f/X6vH/1+rx/9fp8P/X6fD/1+nw/9bp7//V6O//wNLW/8DDxP/d3d3/2tra/9nZ2f++vr7/nZ2d/5ycnP/Ozs7/5OTk/+Li4v+2urn/u729/7q6uv/Iycj/sLW4/9zl6v/f6Oz/7/b4/+Xl5f/x8fH//v7+//7+/v/+/v7//v7+//7+/v/+/v7//v7+//7+/v/7+/v/8/Pz/+7u7v/s7Oz/6+vr/+7u7v/6+vr/9vb2/9XV1f/m5ub/6enp/9DQ0P/a6vH/2urx/9nq8f/Z6/H/2evx/9nr8f/Z6vH/1+jv/56lqv/a29r/3Nzc/9nZ2f/BwcH/pKSk/8rKyv/i4uL/8fLy/77AwP/T6+n/x93f/7W2t/+yubv/2OLm/+bv9P/h6u7/1eDh//r6+v/s7Oz/9vb2//7+/v/+/v7//v7+//7+/v/+/v7//v7+//39/f/8/Pz/+fn5//r6+v/7+/v/+/v7//X19f/p6en/4+Pj/9PT0//h4eH/zs7O//T09P/b6/L/2+vy/9vr8v/c7PL/3O3w/9zt8P/c7fD/2+3x/9Pj5/+1urv/3Nzc/7CwsP+wsLD/yMjI/+7u7v/9/f3/2Nra/8XZ2P/g/v3/3vz9/7nV2f/O2t7/6fT3/9nk5f/T5uX/+P79//7+/v/+/v7/7e3t//j4+P/+/v7//v7+//7+/v/8/Pz/+/v7//j4+P/29vb/9fX1//j4+P/6+vr/+Pj4//b29v/y8vL/8PDw/+/v7//g4OD/7u7u/+fn5//c7PL/3O3y/9zt8f/d7fH/3u7x/97v8v/f7/L/3+/y/97v8v/V4+f/v8TE/9PT0//f39//+/v7//7+/v/z+Pj/q768/+D9/f/f/f7/3/3+/9L0/P+y0uf/uMjP/9Pq6v/p/vz//v7+//7+/v/6+vr/9fX1/+zs7P/39/f//v7+//39/f/7+/v/9/f3//b29v/09PT/9PT0//X19f/29vb/9/f3//f39//29vb/9PT0//X19f/19fX/9fX1//X19f/d7fH/3e3y/9/u8//j7/P/5fD0/+Xx9P/m8fX/5/H1/+jx9f/l8fX/1uLm/9/l5v/9/v3//v7+//39/f+1wsH/1vLy/9/9/v/f/f7/3/3+/9n4/P+v1/f/v97n/9Hu7//s+vn/+/z8//j4+P/x8fH//v7+//7+/v/q6ur/+Pj4//7+/v/6+vr/9/f3//X19f/x8fH/8fHx//Pz8//y8vL/8/Pz//n5+f/4+Pj/9vb2//X19f/09PT/9PT0//X19f/g7vP/4O7z/+Lw9P/l8fX/5/L2/+jy9v/p8vb/6fL2/+rz9//q8/f/6vP2/+Lq7v/x8vL//v7+/+jo6P+jtLP/y+bn/975+v/d+/z/3/3+/9f2+/+v0+j/1/j7/7vY3P/b8PD/3+3u/+/29v/v9PP//f7+//7+/v/+/v7/6Ojo/+np6f/6+vr/9/f3//T09P/w8PD/7e3t/+/v7//y8vL/7+/v//Dw8P/39/f/9vb2//T09P/z8/P/9PT0//b29v/i7/P/5fD1/+bx9f/n8vb/6fP2/+rz9//q8/j/6vT2/+v09v/r9ff/7fX3/+319//j7Ov/9vf3/+np6f/d4N3/lamr/7jP0f/Y9PT/3vz9/9b3+v/G6O3/xOLv/6vM0//M6+7/tNDc/8je5f/Y6u3/+/7+//7+/v/+/v7//v7+/9/f3//w8PD/9/f3//Pz8//19fX/8PDw/+7u7v/y8vL/8fHx/+vr6//v7+//9PT0//T09P/z8/P/9fX1//f39//l8PT/5vH1/+fy9v/o8vb/6vP3/+rz+P/r8/j/7Pb2/+329v/t9vb/7vb2/+/29v/v9/j/6fHz/+fv7v/O09T/X3N6/3CJkP+zzMz/3/39/9Ty8f+83ev/p8bb/6DE0/+vzuP/rMra/6fL2f+71d//8v79//v+/v/9/v7/9PX1/8XNzv/b3N3/5ebl//X09P/19fX/9fX1//T09P/z8/P/8/Pz//Dw8P/v7+//8vLy//X19f/19fX/9vb2//f39//m8fX/5/L2/+jy9v/q8/f/6vP3/+v09//s9fj/7fb2/+729v/v9/f/7/f3/+/39//v9/f/7PT0/9/n5v/V3d//ZHqG/2iCj/+ZtLn/ze/w/7bV4v+kxN//mbnN/5y+z/+Kq8X/rc7d/6zO4P+kws//4/r6/+36+v/m9PX/4vLz/6q+xf+wwMX/09rZ//f5+f/19fX/9fX1//X19f/19fX/8/Pz//Pz8//y8vL/8/Pz//X19f/29vb/9vb2//b29v/m8fX/6PL2/+rz9//q8/f/6/T4/+z19//t9vb/7/f3/+/39//v9/f/7/f3/+/39//v9/f/8Pj4/9nh4P+yu7v/fpen/3aQn/+HpbH/n8HK/6HE4f+jxt3/nb3O/56/z/9+nbT/qMrb/6vN3v+fv8//tcvZ/6fA0v+eucr/wdrj/6K/yP+gvMn/t8rO/+319f/39vb/9fX1//X19f/19fX/9fX1//T09P/x8fH/9PT0//X19f/39/f/+vr6//v7+//o8vb/6fL3/+rz9//r9Pf/6/X2/+729v/u9/b/7/f3/+/39//v9/f/7vf3/+/39//r8/P/8Pj4/+Ps7P+fra//e5ak/32Zp/+RsL3/p8vk/4OlyP+fw9r/psnZ/6TG1v+Nq7r/o8XV/6jK2v+gwdD/pb7P/6/J3/+yzOP/stLi/6fH1v+mxtX/pcHI/9fr7P/4+Pj/9fX1/+7u7v/19fX/9fX1//X19f/09PT/9fX1//b29v/39/f//Pz8//39/f/q8/f/6vP3/+v09//r9fb/7fb2/+/39//v9/f/7/f3/+/39//v+Pj/7/j3//D4+P/r8vT/0d7h/+Ht8P97lJ//f5qp/3iUov+kw83/oszl/3+myv+lyNv/tNnp/6jJ2f+Ssb7/qcvb/6TH1/+lx9f/osPT/6bB1P+XtMv/pMTX/6vN3f+sz9//qcjV/8He3//r7u7/6erp//Ly8v/29vb/9fX1//X19f/19fX/9fX1//f39//39/f/9/f3//39/f/q8/j/6/T4/+v09//s9vb/7/f3/+/39//v9/f/8Pf3/+/39//v9/f/8Pj3//D4+P/x+Pj/3+jq/7LJ0f+Foq3/ka27/3mUo/+iv8v/n8Xi/5i82P+pytv/uuDx/6jK2v+YuMf/rc/f/7LW5/+32uv/qMrY/6vJ2P+fvc3/jau7/6bG1f+v0OH/psjW/7/c4v/W4eD/3N3d//b39v/29vb/9vb2//X19f/19fX/9vb2//j4+P/4+Pj/9/f3//f39//u9vj/7PX3/+z19//s9vb/7/f3/+/39//v9/f/7/f4//D4+P/w+Pj/8Pj4//H4+P/y+vn/8fj3/5+6yP+En63/mLbD/4Kfr/+Nqrn/qcvd/7PY6f+y1+f/vOP1/6rN3P+t0d3/rdPj/7ne8f+/4vX/t9Xd/7nb6P+evMv/oMHQ/5Sywv+x1Ob/qMnZ/7bX3v+/0tX/3+Xk//X19f/39/f/9/f3//X19f/19fX/9vb2//n5+f/4+Pj/+Pj4//b29v/2+/r/9Pr5//H4+P/w9/j/7/f3/+739//u9/j/8Pj4/+/49//v9/f/8fn4//T6+f/1+vn/8/n4/5y2w/+Trrz/ob3M/4ypuP+KqLb/osbV/7PW6v+x1ef/wOb7/7LU4v/I7PD/tNnp/8Xs+P/C6PL/v9/p/8rn6/+32+b/nr7M/67R4f+u0eH/rdHh/7PU4f+z0df/3OPj//T09P/29vb/9/f3//b29v/19fX/9fX1//f39//7+/v/+/v7//n5+f/3/Pv/9/z7//b7+v/2+/r/8vr5//H5+P/v+Pf/8Pj4/+/39//x+fn/8fn4/+ft7P/t9PT/7fb5/5m2wv+lwtH/oL3M/5+9zf+Qrb3/k7LC/6nL2/+rz97/xOn8/8Ln7//J6ur/y/H3/9L6/P/V+/z/yO33/9Dn5v/W+vz/vuTq/7TY4/+53+r/s9bo/8Dk9v+qxs3/4uPk/+/v7//19fX/9vb2//j4+P/39/f/9fX1//b29v/6+vr/+vr6//r6+v/6/Pz/+fz8//n8/P/5/Pz/9fz7//f7+//1+/r/8vn5//L5+f/y+vn/9Pv5//X6+f/a4uL/vNXg/5y7x/+z0d7/r83b/569zP+qzt//m7vL/6rN3P+jxdX/xOr5/8/2+v/H6ur/1PDw/9r9/f/Y/v7/wuj0/9Lq7f/d+vn/1fv9/8Lm6P/K7e//xu32/8bq+/+kvsb/4ODg/+zs7P/y8vL/9vb2//j4+P/5+fn/9vb2//X19f/4+Pj/+vr6//r6+v/6/Pz/+vz8//r8/P/6/Pz/+vz8//n8/P/5/fz/+Pz8//f8/P/2+/v/9fv6//f8+//2+/v/y9/l/7TQ1f/A3ub/vd7s/6rN3f+py9z/qs3d/7Xc6/+y1ub/ueDu/9P4+//N8fD/ze7v/9/z8//c/v7/0/n8/83u8//h8vL/3v3+/9r8+//g9/b/y+7w/8bt8/+1ztb/usPV/+vt7v/19fX/9vb2//j4+P/5+fn/+Pj4//j4+P/7+/v/+fn5//r6+v/7/f3/+vz8//r8/P/6/Pz/+vz8//r8/P/5/Pz/+vz8//j9/P/6/Pz/+Pz8//f8+//2/Pv/8fn7/8LX2//P7fH/wePv/8Dk9f+x1OT/vuLy/7Xa6f/G7vn/xerw/9P19//g+Pn/y+zt/9zx8//j8vL/3/39/8/z9v/f8/L/6/z9/+X+/P/w/f3/4/v8/9L4+v+/0NX/sb7V/7nG5//19fX/8/Tz//f39//4+Pj/+fn5//r6+v/6+vr/+vr6//r6+v/6/Pz/+/39//r8/P/6/Pz/+/39//r8/P/5/fz/+P38//r8/P/6/Pz/+f38//j9/P/4/fz/9/37/8na4f/W7vD/xefx/8To+//B5Pj/yvD6/8zx9f/P9fz/zvX7/8/4+v/b8/T/5fb2/87t8P/k9PT/5u3s/+H7+//Y8vL/7/f2//H+/f/1/v7/8P39/9To5//J0ND/19vc/7vG3v/H0uj/8fLy//X29f/5+fn/+vr6//r6+v/5+fn/+fn5//r6+v/6/Pz/+/39//r8/P/6/Pz/+vz8//r8/P/6/Pz/+fz8//j9/P/6/Pz/+vz8//T29v/n7ez/3+fo/87i6P/E2dz/3Pn6/871/P/A6Pj/yvH5/9z9/f/f/f7/2Pj5/8/29v/U+vv/3fHw/+vx8f/l9vT/7vX0/+Xr6v/u+vn/7PLy//T29v/5/v7/8vr5/77KzP+7vr7/zc/P//Hz8v/U3ev/ytTo//X19P/4+Pj/+vr6//n5+f/5+fn/+fn5//n5+f/6/Pz/+/39//r8/P/6/Pz/+vz8//r8/P/6/Pz/+vz8//f9/P/6/Pz/+vz8//r8/P/5/Pz/6/P1/7/W3P+10tb/2e7u/939/P/S+f3/ye75/935+v/k/f7/4v39/+L4+f/P7+//2vj5/97w7//l6ur/8Pf2//X5+P/t7+//7vPy//Hy8v/6+/v/0NPT/7/Nz//c4+X/1trZ/+rq6v/s7/P/mq3d//T09P/39/f/+fn5//n5+f/5+fn/+fn5//n5+f/6/Pz/+/39//r8/P/6/Pz/+vz8//r8/P/6/Pz/+vz8//j8/P/6/Pz/+vz8//r8/P/6/Pz/+fv7//f5+f/N297/w9na/+r+/f/e/f3/2/z9/9v5+//p/Pz/5fv7/+X9/f/n/f3/3/Hy/9ru7f/e8O//2+Tj/+Xs6//r7+//5urq//P29v/t7u3/3uTm/9fm6//Z6fD/4+nr/+Dg4P/p7fL/nrHf//T09P/4+Pj/+fn5//n5+f/5+fn/+fn5//n5+f/6/Pz/+vz8//r8/P/6/Pz/+/39//r8/P/6/Pz/+vz8//r8/P/6/Pz/+vz8//r8/P/6/Pz/+vz8//b4+P/s8O//y8/Q/+Hv7v/t/f3/5/3+/+b9/v/x/f3/8/79/+79/P/v/fz/7fn4/+r09P/g7u3/4vb0/+b19P/d6uj/6fX0//P49//h5ub/3e3y/9rq8f/a6vH/3+3z/9/m6P/l5uj/w8/n//Pz8//4+Pj/+fn5//n5+f/5+fn/+fn5//n5+f/7/f3/+vz8//r8/P/7/f3/+vz8//r8/P/6/Pz/+vz8//r8/P/6/Pz/+vz8//r8/P/6/Pz/+vz8//r8/P/6/Pz/9vr5/9DX1v/W4+P/7/z7//L+/f/4/v3//f7+//7+/v/+/v7/+v7+//P8/P/l8/H/4e/u/+Lu7v/o8/L/5u/v/9ng3//W4+T/2uvy/9vr8v/a6vH/3Ozz/9zq7//W2dj/3N/q/8PN5//p7/T/+fn5//n5+f/5+fn/+fn5//n5+f/7/f3/+vz8//r8/P/7/f3/+vz8//r8/P/6/Pz/+fz8//j9/P/6/Pz/+vz8//r8/P/6/Pz/+vz8//r8/P/6/Pz/+vz8//j8/P/i6en/2OLi//D49//+/v7//v7+//7+/v/+/v7//v7+//z+/v/6/v3/+v79//f7+//k6ej/1uPl/8nW2f/Y6e3/2+vw/9vq8v/a6vL/3Ozy/9zt8v/b4uL/5eXl//Hy8//N1+v/1t/v//j5+f/5+fn/+fn5//n5+f/6/f3/+vz8//r8/P/6/Pz/+vz8//r8/P/6/Pz/+vz8//r8/P/6/Pz/+vz8//r8/P/6/Pz/+fz8//r8/P/6/Pz/+vz8//j8/P/z9vb/6Ovr/+Hn5v/n6en/6evr//Dy8v/v8fH/6uvr//Hy8v/29/f/5eXl/9Tb3P/Y5+v/z+Dj/87e4//b7PH/2+vy/9vs8P/a6vH/2+vy/9vr8v/b6e3/3t/e//Pz8//39/b/3+bx/8zW6//y9Pf/+fn5//n5+f/7/f3/+/39//r8/P/6/Pz/+/z8//r8/P/6/Pz/+vz8//r8/P/6/Pz/+vz8//r8/P/5/fz/+P38//r8/P/6/Pz/+vz8//n8/P/2+vr/+fv7//f7+//2+vn/9fn5/+vw8P/g5eT/4+jn/97l5f/W4eL/3uvu/+Dw8//d7/T/0uPo/9zs8v/b6/H/2+vy/9vr8P/b6/L/2+vy/9vr8v/c6/H/4ujo/+zs7P/4+Pj/+Pn4//X3+f/X3+//7O71//n5+f/7/f3/+/39//v9/f/7/f3/+vz8//v8/P/6/Pz/+/z8//v8/P/6/Pz/+vz8//r8/P/5/Pz/+f38//r8/P/6/Pz/+vz8//j9/P/5/fz/+vz8//f8+//3/fz/7vDw/+3z8v/2/Pv/9/z7//L5+f/r9ff/6vT4/+fy9//j8PX/3e3y/9zs8v/b7PH/2+zv/9vs8P/c7PH/3Ozy/97v8//g8PT/5fD0/+rr6//19/j/8PP3//v6+v/6+/n/2eDv//Hz9//7/f3/+/39//v9/f/7/f3/+/39//v9/f/6/Pz/+vz8//r8/P/6/Pz/+vz8//r8/P/5/Pz/+P38//r8/P/6/Pz/+fz8//f9/P/5/fz/+vz8//f8/P/2/fz/9/v6//b8+//2/Pv/9/z7//b6+v/w+ff/7PX3/+r09//p8/b/5fH1/97u8f/d7fL/3u7y/9/u8//i7vL/4u7y/+Tw9P/l8fX/5fH1/+bs6//X4O//j6Tc//r6+v/6+/v/9/j3/9Td7v/8/f3/+/39//v9/f/7/f3/+/39//v9/f/7/f3/+/39//v9/f/6/Pz/+vz8//r8/P/5/Pz/+f38//r8/P/6/Pz/+P38//n9/P/6/Pz/+fz8//f8+//2/fz/+Pz7//f8+//1/Pv/9/z7//b7+v/1+vn/7vj3/+z29v/s9ff/6/T3/+nz+P/k8PT/4+/z/+Pv8//j7/P/5O/z/+Xw9P/n8vb/5/L2/+rx8v/Z4e7/4Obz//r6+v/7+/v/+fn5/8bQ6f/8/f3/+/39//v9/f/7/f3/+/39//v9/f/7/f3/+/39//v9/f/7/Pz/+vz8//r8/P/5/Pz/+f38//r8/P/4/Pz/+f38//r8/P/6+/v/+fv7//j9/P/5/Pz/+Pz7//f8+//3/Pv/9/z7//b7+v/0+/r/8fr5/+/39//v9/f/7ff3/+729//s9fb/6PH2/+bx9f/m8fX/5vH1/+bx9f/n8vb/5/L3/+z0+P/L1ef/7PH3//r7+v/7+/v//Pz8/6u65P/8/f3/+/39//v9/f/7/f3/+/39//v9/f/7/f3/+/39//v9/f/7/f3/+/39//r8/P/6/Pz/+vz8//r8/P/4/fz/+fz8//n8/P/4+/v/+Pz7//f9/P/5+/v/9/z7//f8+//3/Pv/9vv7//b7+v/z+vn/8vr6//D4+P/w+Pj/7/j3/+/39//u9vf/7fX3/+jy9v/m8fX/5vH1/+by9f/l8vb/5vL2/+rz9//d5uf/y9Xq/83X7f/j6fP/0tvw/4qh2//8/f3/+/39//v9/f/7/f3/+/39//v9/f/7/f3/+/39//v9/f/7/f3/+/39//v9/f/6/Pz/+vz8//r8/P/4/fz/+fz8//f8/P/3/fz/+P38//j8/P/5/Pv/9/z7//f8+//2/Pr/9vv7//b7+v/0+/r/8/v6//H5+P/w+Pj/8Pj3//D49//v9/f/7vf3/+z2+P/p8/b/6PL2/+ny9v/p8/f/6vP3/+rz9//f6On/5uzr/9bf8f+itOP/nLDh/9ri7v/7/f3/+/39//v9/f/7/f3/+/39//v9/f/7/f3/+/39//v9/f/7/f3/+/39//v9/f/6/f3/+v39//r8/P/4/fz/+Pz7//f9/P/3/fz/+Pz8//j7+//4/fz/9/z7//f8+//3/Pr/9vv6//b7+v/0+/r/8/r5//H5+P/w+Pj/8fn4//D49//w9/f/7/f3/+739//t9/f/6vT3/+rz9//q8/f/6vP3/+v0+P/g6uv/4ero/83X6//19/j/5+zz//b29v8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA='
flash = 'data:image/jpeg;base64,/9j/4AAQSkZJRgABAQAAAQABAAD/2wBDAAYEBQYFBAYGBQYHBwYIChAKCgkJChQODwwQFxQYGBcUFhYaHSUfGhsjHBYWICwgIyYnKSopGR8tMC0oMCUoKSj/2wBDAQcHBwoIChMKChMoGhYaKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCj/wgARCAKAAoADASIAAhEBAxEB/8QAHAAAAQUBAQEAAAAAAAAAAAAAAAECAwQFBgcI/8QAGgEAAwEBAQEAAAAAAAAAAAAAAAECAwQFBv/aAAwDAQACEAMQAAAB9PAjMAAaqAR8VzePp+urDNt5qgCAAAAAAAABFBFEAQVtiNrBbMLNS6/zbRz42yO64TcnbF7SBdfOsYdnIx9mfORef6FFVFbkETVAKAEKItIBUi5TkcWdLn5Hl6vLwXd9fzDhC+UUQBY3pqCNKAAAAAAAAAAAAAAAAAAggY+b00k7o9FrBBQAAAAEVFAABFEBKF+oPBzdfPCk+llOOrv4G64e9siXPc56Jx+Xb1NfErm1Vit5PqlVqqlQAAUYI4EBBKOaIBBqijaKjErvdeeW9OH1BMu/l83ZBfQxrWEdjSoqbwoAAAAAAK2iOahQBAAAAAAJVtUBeb+neU+nTyaI0vtUQQ4AQrVARBtRoDhBCxPzAXloJB0LWbQc6l7Su1hFaY8m4+5YXRm2LTVtjUunSdvMn+hSZd3kzO6wsfTwy3VjrQQVKIA9gAAKlAqSjcpvG4sFhadP2nJ9lt8yiPOrgYh57Nd/J5n6SN40rNysAeMAcrRDkaMVzHCUBAAAIAJFza23GmbXPpLnLed8oNDRTOQNIzQNMzVFomcgaJnMS0ly0damJSZNRbsiSDhLhHNcGtcyLbu8VlWlhInpuATiWQKocv27Y6PJYvRON5vfzCzWz7QexMUQaiAipajrCvoRpv43peXBxevD6D03mnotYweW9t59NaXovD9O40zNfryX1oOHeKV1Us2PweXb6xJ596CkihfOAAAAgAYXnHsVfH0cZ+zkb+Wg5unO1qI2oijcqBLkBIREGiIolpXcBaM6SOSLUcEoqAxFRSKg3LbojL8mY6nrTYtwrQdRmLsNHTbI5xVg8f6TDl3eXRegcHz+3GgufaioomjS+Tt8HpHd/wAbxNDVbz+n0e9l8t0+c7Mh2Ob0dnXjl7PIV7WkSDRGxNlvXTTz7uzLztlqzoDXEKAAACABTh5LMXL6Zi7OLpoIF5MUcNHKIUQGNEcq1UkY9WDzLefqxpK9jhuFCWqA2o5rBY68q2IDlUWkgqAtymrNR9Ge6uOrTxqqPJuLE6CBa+YUvTeO5PoMJ8keffHZpR3yejFK53fHcZHHo4ej0PHtjak7CLV15mvSnpyScKujz+h0tpydPmpTuRE8H6b556Xgrjmq+8VEE4QBQAEVA56t1QZw4e5h3KAl5ucigoiJgg5RrkTRRwLj7HKTWzXfyKvtL8b5Uo0E4aMEVGR5etiybDqgq0CMqXjHA9EGK6Mbs3siZm0/IvrWyjHqo0mji+Z5H0nM8r1fMdTYu7bX83l4OzyJ4LOvHRgybeXec87h50Iuk0ajm+oebcbrDdkMFcGpn2dXrZepfK9zVjZVRUgAAABBA5atlz83tdzibfKdfz2g7E27yCvzGff1zYHacciUrpL0HAgDcfH7nPTpPK2/L3QVSpGrHjVBVYoJj7NeSjTjgd9arZHDmrWFMsD2SCI24Y4RoZoPdflaFVbpHAcx2cPGr4nZ2FjmdrPbNfJp/QcOLn9BkbYZ8GTbDu5MXWcOEc0ClGhZyGRrh1OrTPtk1MfYvjeoue6gCAAAAAAaKoVvN/SsaubgeztDyq8Z3RHp0HXEvi887TP2J5bTg16WoNFzmK/XWuhHU2lMsjBSgjlSOYoPVBjhJJnn8np+bqum0Ob6AEp6mQKKxn6rJY4I5LL6L6LTX0k7d2DPa6HDh2EuFr6+Tyc0WnXQOvnqQdfRsYnRYbWHVubD0w+u4jsKidRzkVwnS4rtuJy5+5nhs6bx7TpI6mOY6bUBIAAAAABFaDixtnEvFURt5KrVSVFql2l5LpM+u00TXiaitRxfYQ3Cuf1KNpO/A2BTda1g5ZYJgbPUzlWlsYTnNnB34kcd1PM6DfW5kTRQQ2M7n6dltO3vhRtYOiVrWsu486enBUEk8aULh9Hy3PluWeX63TTQyrMGhZjgxVVuWhIzabdKmVzXicgjRBMiAAdvUw9bPd7munUUEgAAAAAEa8HBhbuFpiiCvJVRWnZ+gTrw/WWkz6xHN089FVW2yIqXM3syQ0ty599BPn3aJ4n11MvOb+Hz9W3BSu655u/x+2LTx9NKzTO0spmjTmq8nWXdHDmr2Psw6xX0KtS46LFuP15q83OdAVYyNXIK5n0LgO3CWSGnWWhka8S05WeqgvRlhnaVyI5VBolaoxXI5N2pl6kavc2WOhoIpUAAAAAEVAIMPdw9MUUKyFUQ3H1+GnPqNHlupGoDsVRtBGI5BYq5poa+VfCtqYe0KWCSUjMzmO5u3Wz+hzeToyY5XdOEXR8vqbZaMKs1wxLFJmO3cT81DxdsNzPO3nKez2WGvCaV/mN+a1HZwt8Osz3RBl9Dzu7GmniaeJNb7ar9c6ePr6SU+vy/UOFahUoqjEeKhUcjHamTrZ7yPYueyooAAIAAABEVAiw9zE0xQFrJQYmmVZszaSDqgVUEqI1sY5ifH17AaWrLVFndDzHREz2c64lwGtkyYd3oU/HN4d0ruz+vn7jkN/Tw6MlnNb/dwMxd7Ea2Olnt+f3RU87nh1Ov84p9C9wzvI7aXQQ58/TzXrORo3lp39DJ8zvua2Vb5tual0+e9XzHdp5p0u0WtlHOHqiVmPa8aioAiIm/YxdidpVaZ7PGuEAAAAAA0GtsxdrFrFqo28loXUnTgOon0ef1nqi9XiqwRitEQAI5SSWWdHZ3QcpaZv8AP6imebMnDnZtStw+n1NuPMw06XOzddrh/Q8m01Q4j1bHefJ0NGh3c3okC0eDt82yuyz+3LlWzu1zr7lO4p6TlfXfPM9FtbnPrPuobkPndhJG/De95x6X5V6nnMr6tPddfueY91WGtJgXnOktFijSblXnM7UKH7WLrxvagcsbo9jxKAIAAAAY5G2Q20ZBDdcGBB0uM8qii1kIDlrXMTcVbU2o5azxH3li5OE7rkNHRu0a6d+5jbdRWzcypxelPn9159o7M2OVHceleB+k89d+5ruWqXn/AKdn754e5jdWmnC99H05eVU/QL/Tz8o/oKk10fEd/nc+7+E9H5nM1KG1g8/TBbg6FTlcVtU+7nwYImFPEWpY5ytq1UUx7mQ2jrOp8n0Kj0bz6OjFWfQ3vV9DNBO4UAQAAAAgNitkKRFzU40zLePZZpTVejzXsdk3hq49h+HTk28m/wAPs3UzIka2jkbff4rc/RTfDzvO6znY6H7mDo3lyva851kb7njf0JwBPmbvVKOkcP7BznZY1ZHrw9bVcrUSyAo5mrSqc91WTrFXoIbSbEkSWxsqIZyfS+aUei8tT3t8E88mptrIiLWSN/ek8Ad1xI2I1wnMFbEVE00stgu33+T2k+xt5OrWSgAAAAAiKxtz4GsTA08zn7jWZVHjplW1pvtcdngJXs4vP1VJXS8XrU4daFaLtYO73eMORenlg5Pscfn05/M9A89nsv7fPbDjt7NexFc6dDS0zz9yRMdEcOytFVaSI4Bo5ENR40yRFARYxyMSvS5rh9Oboy25W5WuHG26ehl2Q+gYbVHf1eT26Oq8m7bKT4R1veRit9fRrxRd7JWMaulXIno3mPY16XYaufoEKA0AAAAiKNw42zjVjalzkV0MXoYs+7mtLU0M6agdnjrHI1OnYckUihcsnFEIoDY5GN48mjn5a83V6LmsOr0m9y3V4WxXuqVHAIqowURCiAAAKIAogJWOjBOD1eG2iBuFV1v0arxMpG36F557DL5DA9So3PiN7srzjotypcmo693z9Q/Hfl5cDaUudSk1sR66tCeG/T9ZsRT30gikgACKA1VBwYu1wzNw4jplroVbPFi3rvEdbl26yidfhCKIYqg2uVQUVAERAGSJSaPAZz/SLFcf6Bz1zl69lWrlo5UAEcDajkE1YZgeig0FGmI6OReYuZ06cSVq3bhhqjnusjX1z63bcH6XD7FySVOJHtVUcrZ8xx1t9G53m3pyy8wp79fPzMW62Y0p7FKjWdm3DLJ6tb8/7vb0pkECQAQACKIEPM9NhXn5/enyfP8Art7Ac9Vo9fBP3/Ko2zX04xAEqo5sFRAwBKCIUCgcigiJGJ2Nspnc+hya4dXXmLq5ayK1E5GCtYtjQUHIABFyI+ry6Nzn3ainPpz/AJp7T5x6GfKypa6+USdlZ73tPiPtEaWkFeaCqjmuM7d5pJdtPU5HmHs/m88Wfn3qccditWgvpcxHPuk7XT6R26VquXgCEUBEBuLB3sSsc/i9nI4vpk6jl+wFqW6U/ofLbsSy59ePS2sa+ZHNV5uYkabxEB6sekqOaCojGPgfTmq9vXwK11FVVhTobjFeVNcZFy6XPY8a98vlLo19N5PzjJZuWuX6c39IkZL4vSNfg4PH4gl+j84lHbYjXxKm+h8DLJ7pc8M7NnoZQuDyW7IkkkbmOzNFyXEdRoIp8o5z1/yE3L2X1QegaUc7hqq4QAIEUGgNxcd1Hl+XdE5vRcvuZ9vra3R4yLap9XivuUnUpIwSGqiTUUppBNRnSJnW4kaUU6ugLHsW4WLXzox6tvC6BiohpyORBU1FzFWkzkucnV+JAi0uMJiadiRq07S1wr8K9E53mDK43OXs5xb9EGJq7Q8GxIpMdW49FHrMCgz2XQ8G7gv0VcesnvryUoupbmoOPx/0Ty9VJLo6q4dnvvPvQXqqoPVQAAARAb5TlfT8TPr43sFNOS0lSSsb5BZms8sNvKug1w9BUMBW61LT5DPbpop5Y2lpSQKtqzyRRq7PObTnEhv71LEfmJWWmjudqGcG1M9WjhEcVppVK0kCLaJNUqvUc7OdB96tVwvtNHrZk001d0MYJRSmOSQmIcjksoUd6gXr+peO9jGnUcpr6yOYczix0oa/VqK+qZ8+RN6xg9BfdIY2w93ADRQBGvRuHE28K8Wo5aybM3Tmqd3Kqxe3W1+aV6mdqZ2uTVVTNBw0mDu5sa70lh+PRUZca3TjvMTxcjrq7eNt5OeTq26+vS4fivSuDZlJpuJyDVAy003CzG6wGT0FVuenpnMVczzLqw3qXs8CTSrn0l2qrLlF9cUdqCwCqoxRQQitoy+05izNeuUsVFc3m3snnWfbj+hcP1ePZi2u3y+j5jIhzkWOf1uG4XqYF+mAAABXwt/D0ze24qVLXrSDGsrS1dLn3m6WG1UwKrUhIMvPTSXmelx6OlKM2V2yB+svikNFViuVJbalxQ5K5ucsPpvNuz4C80VoIYZ4apFMxBwTErlajt1LuvCyrcqLSRXGPYNkKTKN+jLtuRzHADciiSNetpMfXzR2/YPCPRo17TjLuHOnO3uV2uf19Ls+G9IJ4C/6Fa6PmfH+53bLbwH0gAIKBBkbGLedmzljipsUp5dOPXenYx9bHsVSZ5wIsAsqzRZx96dPynoeWlVt5DLPbpNydGymKr34ePs7rpEwTRbvnmzxlzJE92uTUeOY8q1RmtK1QtinEc01VaDLlK/0eeylfz5uyrXZdwiBMUbbc0OVWNFBqqKSK1GGVp49Gf3PAdtnttaTNlLyjo61wvtdlHNKqBmoAAAAACGYK7jbONWTEVumaq10pqybxpzy2azzfsYrVbYpM0eWmnR8z1NPfydvLON6x5qxJQbZaz+Ym6CCHUtl1L0GLU9dy1Pu6nytev4/fEgnyNM4myOHW0K1lLQfGo5GKBBpZmlvwlC/VcDkXD0VaueKW3FKCyMeJqKipBEacrGCiypKY621kWVr6xrYT5rHpUfQuXfopsS1vlpLhbdw4RXAIoIN4ontvMus4WOfoNrgrzz65I5ui0UcBepk06Kes0IAkxdrFx6M3Q1KfL29hPDN18yseiGQ2UmuY4/o28nRz17q5pfJ7eph3OvkP32n8R3FXSPHKmlm9GD+6zu0vHhOe6rkDTXWN00+WFyIr1K5fK9j3dHJQkil5/QdlaWKtNazmaIniKga5BjCKhM1aI31Hd+nU6bvVVcPn+j0R8TN3cWV5zkwlp1NDmurK01Q141ACHzH1MWfiWlrwxx5iqiV/c5O/ddc7A397Gq2napObIArac51GHlrJ1jp46WuV1wgqIRHMDL5KynPthx9vPjtx/Y0JJV+yi3Corg5jhPYOA2x6WbyPu+nCvwfo3AjszNWK1Ox5DrvD7uSyL+D7HDvNxjv825VrR491ueHRi4LSFJw0Q5hAxlBsLZURYs918P9cDuABmPs1h1rXOc0jvOTt08O3I6h/Q5bWQOzyVABEc0PLb/oQsPLXemYzz4+50w546/0DaTo7di9s5upVRXHI4TJ1quenUrEyemwRKKQjcC1H1lVfC6UyvkW9lzuOs+t5J6Lm9t0TpTlatjqtitB5QnofD6Ru2+V0EdT5f1fFJ6q5c3o8t+u59wsd6LTkzmzTZ9ll8JBKMGPaqMdBLWHn1X1htesycl6sgvY+h8CWd/Zc7zD0JPzmPQoKT0Pz3RD2p9S1UOAJUAEAAQBxY+xjaYqDayRzAWlpZ08dMuZcpkwNQvFmdfyM9L0GiuXXmO0Uki1MrOZ1Bzu41Og8IvPe98Yc5XW8hYjX22/xnS8WugQyNyU7McrN46KtecbhfR5WVbpLxYd5jMKSe/ZFO9DNo5DRrZhRWWZg3PozBLluqDrtSZ0+ZsgkBBKoFJ6R5p6FNWud6+rL87dDqp9912dpVKihKKiggZoaTcDNI67F2sLSFFdecao4knibLt1Urq5kz1JvI2QMTVs5Ge+szM1I0VjydIMnZFUl7B0KnnvLdjIuBLlLSbHV8bp4bd9a86yMt/aK/AXsyGZj+7hUQJUVoMqzxjfZaNDXOoiHIgFJpjVS02hotkwq+tiDltQ2mwVCUQBPEHTe94Hs877iLNys+zkO02uam+p6XM1NOVwi1gAAmPscyLoJHNqo8jawVnYx9C64gfT0E62Ni41TpZo5VEkwU3Wx2j7jV8vsOPQcvK3EQz3aqqJtDziNPUuR5WQcVxX1DKViOg9Ro9beXkfP9NzOfcadC843pMfSOeUbIgRQKtmpbbegohkjW2oClAG0aqja1yDXD240Y0jXMFQYjpbinMcK6Oi5m/zel7V5ZMzD1dLQ1dDbztWSKTby3ACAAMLdQKdCnsEUWXKtSkteZzaw6m9FefUgqla5BKAhrXtZFj6eEr07vOXGus6nzy049L42fqWvErPbcJNztZCNnep3JDSVl5eX8j7JxUdPL22RNX31JU9a3haBN2vYqoW1HKxFEbVFakMe1A1yMa1zKYNEAKinn7WONZ4NBlh74xY41zJ9rndPn7+o6qt5ytaLUL5PRe58C9lrPaVo5kASQFBoBUWPsY989qKs2p5vBWPPVFRwIDRPRoCtEKhydyEeBHp5wrWjhTh0V7ndFz6HT5Hsbjktfp5saVzU151awx3aPZarYfRr5Pf57j+tRetw+Ru73lHTUzpFpuuZJUtVFENdGmgBKoqAjXI3GLEm8w9AL9K4Bg2Gxt7SULIZ6tWpraOdJnp6b526rF2lgnGnofnu017DJDNUOQEgABFG6+ZeRrLrFe+XiBq5bq5ijQEBURAURBqiDTs7RQfMxbmYP1WB1b530uWvYWl7/mekv5hng+h1tQsfQeQxVPG9Eilk9zzY5FXDUR0fRzuz7eGX5lfzNeerTkjeQKgxrQSURAcNUSNEHDkXsMp2/gbiLioOK2Xu4paT5lpuRHI1XjWbO7Gr0+/5fZwuR12TrhiWqOv3R7HPE9w9AE2sue6uq0CvTu3KjAz+vjU+OroUpaCoAx0YIrRj0FQIKAigNztSqGRBdidR62JZHuOzdAl3T8xmNeuS8D6M84XaysyDXUePDvMFicP6R4crzt7D24rQVo5eNGKg1CgDAARqtcw4fRYtaU9rB2pNAAha1gbwIdTJTuJWGN7Xie24+juQPnvVTj+t4ju48nrOO9X9rh6VzJKSKiipVNCs6ryvYDNShpCajlHzvmXs/HKeKGPaRj2pMAbeCKVQUYAIaqN1aOlnBDXvUy5L2RaFuR1dBTjXYqpXp/afPek795OM3EtZnJ5qM7zqWNuTdwtwi8rRw4YA5oAAjThBNGpltpmOCp9WncauooSKKOvl7OcjJc+F2+9n3s77CbmNrm3zKHc9L05ePe8891Ll7kUzAASCdR0W342RWMrTRIx6CiinYPwSzHli31z7onI5rQCAqtchUUEiKjcGfo5wEb1Kz0s1yrVzKuEyNR1CVrTEU7UMSYqMG6etoua+zUuzNlY3NOBolVoNQYDmNz21zrNRtByS7k1Oy1qS519zIitTKN2iKjR0qBodLzPrefRzPqrNGN4mzGvBFMKIEAUAAAAAEUAQAGtcg8Tzv19gfOdr1PywNGbn9Ii8jHgArBUVCMextmdp5aBUGJWtRN1nIiqzLTlJsI1aIIpIxkcrU3a1DUUSq4EoKhUVGgGoe0Y3WqPhpsrSRFOGvQWqsiVu7Qc52mQzNJQv5oQ0blRWz3LxL32b1lQIBQAAAAP/8QAMBAAAQQBAgUEAgEFAQEBAQAAAgABAwQRBRIQEyAhMSIwMkEUIwYVJDM0QEJDJTX/2gAIAQEAAQUC6xMS/wCTPDKytUbFoZHjOSWtNWJ/w56k4ShNEEgjNJVVi0UvvMi9I5Ew0+y8EkZMY/8Abrs58+KQ4zidyj/4pLEUaLUYUWpTIjtTKSnM6kDC01opIzpRu0FdoFasd5j3e+yM3LhuWl23jNnz059X/PfohbVXSBjk9+a0ELHrELMWrSkudzEzxIAgTCzNwu1cqMygniLcFqXaBPl+Df8ADlmLTrTTBlB44Mz7v+fcLP8A8BRgSm0uE3fRQU+mQVl+O8j1KRAmbHRqdfaemz4jm5hyO21/+LdwrTlBJWm50YvhNwZv+ezMMER25CtV5Wmh9/wrFqOAZtSOZMEYk9uIGfUiN4msGohwyji3NJUGRo6UMa5AOUlCIyLTgdWqhQFHRkkiIXB+tn6n8IPC0iZA3Q7syY2L2c+5di59aGA5pqVcasHv2Gmdj0meWT+kSq7VaAaWmlOUFSOJM3GH49G1SxMaEWYblAZJJtOkBSV5AWMe6D9mbL6VX5c3Rrlo3m02wYzN/wAscEUJ84c89lz2XPZc9l+Qy/IFfkCvyBXPFc9lzmXPZc9l+QK/IFNOLJrAq9qLA1ao5GLY4Z4xF2z1kywrUTSxzUiwcZC/tOyZVmffXbYFrVWjLT9S/IkypJWFXTaW3T/2GnbH5DLnsueyaZlzmXOZNMnkFmF8+1KYxgWsRNIEoTw/+vf+rVjltUrd8dsJljjGaY2Zb8rctyZ+vUKjSRw1DkKaIoiWOluADvOxXkhVW96rNpjpl2bQo91knWpS8uMWytNDJrCxxFnJyiZoh82Jjmk0SY/yvZ1iM5Ka0qEoaTcc9LdbKzM0UdOs7k3ZvYEsJpGXMZb0xJiWeOETIQ2rUarTAVKRo3Z8dH0/iu7DKIAUdvT2JGbPGa0cRggksRi12fnTYwqEXLjTdEQbWdss3ym0kJCo0Y6vtOuTHkvj1Mm9jOGFvzJxTefcYnZAeVvZM6Z+h+6eMXGatiuQO3SXiqO+ceydXsc+lCM0xwAbajHsUfZUoeaYthNw3NxacmTWVLMAOHj2rUw14adkbUB/Hpwsezcl9EMYxxs3fpfh9svvqAs8GdM/DCdeWvwgQyQuzcXbKpOIzt3bwrT/AL6jkz84eXKfPmEdxVYeUPCxMMMctmSSSpu/H4WpeRFNKUx6cfMpe1r8xPPpcxxWi+PQ3ssyL0qv+2Ye7sTOzP3444Z7n2GA9wpupnTSdhLKF0z9BgxKeqLxWaBxI4yBfREhzmoTvF5ad/XTmjjrSSOSEVRgYY8Lwp7AwjPYOwVGtkm4zRtJFVqHPJViGGD2tYq8+vo1Pkxl8X91lqMmyGAdkVyw0QUhLkssrKys8Xb01u0z+WTLKz0sSA3QGmfofOSHK1KHe9iA4k/dURZzj2srVsISPDljgEjxl/UpGQ6jISsW5ZSwZtWpPuAWFlynYOEbMyDx7h/F+Le19WHcr5HywMyt2g9It7DPsvHJhV5N8fsieE0vcTW7ib4EcOrIMQ09PYpbenjEBSSu+FDE8pBVETOuLqOLdL/Ty2jRkzFRBkMIim4QjuN1qNt4pal6TmD5Dx7Wp6k8Mmm6jJJOXxfhj2yLDVi5k12VzfT4WC4/sOrodidji04vU3hvJdllN1wP2TOmdWZRAAlF1IbOoWT91JSjkk/CjdctoSvk7DWsMYH6ZYS3j01nZm5orU6jnNVoyc4fIePZdarWkC1pNOQrB/GzaCBVLwyOp5OVC1+bdBK0schbAhsxS9WpPy6tYmGGnHzG0tsxIW6/Klb9VfLNVfl3BWMMfcSd9gOzsz9/tNxEnQTd2dSHtjszuZRXHZvznxUuMQRzNIsuErKyykHcIgVc9+6PTJMwOm6SIQbgHyDx7kjZC3TkAoK8kz1oniisR82F4ZBKjG8UF3/VVDL1W6NYk9AjvOXbBWoDy6w+fA8W4j3Tspg5ck/psRFlvkHmMX71uyd8KP1MfZM6ZGm7vKyjnZ2YxJrNHcjr8l5B2RM3qqS4lmwSrHuEhyH3JiOxageEtMLbJ9fXG1O1eKxYOcq5b4EHkG7e4fhM2G6HbLWdPVUXGDiS1M/7rSodo6oW2FxwEalfu3h+glF5kb1ThuGT/HSkzFXJf+39M1ouXZ7SBE+CkLBuoywvkLlyDZsiXpdiUMuVc3tIUbuQA2Y43ZAL/jRFscXyEg4K8O4QxYgICq2Ii5oJuNuPmVq0BznCHLjjxuFhb3j8e39o4Hn1CMdo3h3HuUfZ5H9QeCf1Z7imR4JhfBuzPGr0WFVl2SxOmdnawLMV5t1WjK8bn2eYd8cM29s5UZMnAZBrzFDNZZsMyiPYV997tnnUGA5BjFpctsdRSYCQ2kad94aW/wCmzEM0dQXji6QAQZuEUjss590/j1WZmgi/Pm3VpedFxZDFsfHplwWoM3qiLJv5F8KT/IHqMX9TPhtPn3G6mmdoYyawJu0rWK/LOpNuGQnaHmc6KT1RY3xwTtKEb9rYFDPXmaQRbtCeFdgaeOpPzIybCbuo2YlqLlBLpljE8jeuMu5eRRE8Noibmacf9zjuLJm7N0twHyHj79s/j0NwuwvNC1abfWi5MPRheFY//pRF6ax5lUpYMvlX+bliwoMiTm0iL46fbeKxN+qWccp4thRZxH+mwfxo/wCKeJ45YZWNrI8yPlvtgnYkzYUB4V8eRJHLzWBD2fWBUT8q7u3CRbWfwy1WPdDEbkNaTZZFN7I/IfHuH8ehvbuvi/u2w0SyvuZ/2/cHaSftcN+1Tu7fqJ/UM4Ylin5sVc+ZGmV/soi3RUnwUQ5VmF6skJNIMH657tNxVayzpiZGzSRg7wziTFw1PLtbDLVu9OVsjBJlMjb07H2N/khLMfU3EfkHhvcP49WoWuWNKbnQ9X1qb4uWC/VpXeBn7zP/AHDIPlqPojY90NPucsPMaMnBWx9RtkKs+2c+6+phYwpyYev/ALsCmAZYsvVnsPkX1GYm/DsSPFNh4iwWpR5GjLkd3q1HsDNuhpvthIm2gW2UUfcazbnmB4D00t1bOepuI+Q8N7h/H74NxuC42NLCTmdeo/71t8tRblgD95X/ALtuGoDmtBLkKX+aB/XqEGH7SQ0XViF4yo2NwzdmH1NO3Lnjm5c8VwzU0huI1ymFmkgbTiAZG7NeojOzE4EJMQN+mwBZWovmOMsRVqpSR/0+F1aglrlAe+KPxSfGoanW5sOinkcYbi3SPkPHuF8eq1V55iLCLdeof7uN7h/th5kf+7Qqz/qV3wdPPPhUjtiywQTbts3LaaGwBV5q07ThEXKkvBkZMOFOpNK0dKKNA4srgRSiRPFLUu84d45u1o7AeqM7T7mryZG4/wDb6LU3BdlKSSCPlJ2GYJA/EsC/7s7LvZwjrvBa7t7I+Q90/j0vZhYm7sm69Qb+9pDvtSty7YOpH/ujfCAtzTF/Ytjc1nlTRzSmBHI7zRCUbcwQ06fc1yrz42IqdiV94M7Sxx+mSt4PxIcjvYtwRvZvb0NuUE1+Zni1G0KHUOc0ZMQj2I33w1MR0K7O8rKH0lrEXMhA8jfN2m026xInygT9k3frHyPxb3C8dF7P4q0vdyU3UKvt/daY379TbDRl+2T/ADk/6qp5aQ8QDW5iqxhJPXpQhGBQg/lW6wzBVF69yP1DqVAbIVjOrL8Sl7WavYbkxAF63NNIcZBxZaWYtPe0keXWd45TyBN4DtptcfQh8yixV4M7beUD4WnWmkBvDZWHXhN0h5j8e4Xxfofu34MLkIsLN7GqNtWkNuiujujifKn7KIt1OoWDsdnp+azbNR/IhwUkDP8AnGoJnkG7UaQamWjfuN6gFoZojrvKO5aaXMh1oiCDS9Oc31xsGzLCjid00DgqkZSQavBytRmq87T6+Uzf2rdmbwBMzy/69Y8S43Szx8qSGR45I78JRSalCChuBIH5MLIrcSO9DkHyn4ChMAEZAJn9vGWaAGXKjRVxdSREHtN5fxqYb6+lemA/gf65Zu8NIs1YSxPd7tTLE1yQYrE1vevyJGX5cyG/YFadqZb6xDJHwtQNMFmI6x6IW2CaHnKKEQH+Q1N5jpUnLHTTc69BlehYIqwbINVhezqUUTNFZjYNR24rqeXlqrA7rUpOVXKpkjJ8TyuTN3bCIGQOTLLosuofSf8AVShVPU3tEpZmgG7bK1NBCc56MRgCb28cHU8ex26uaO3LMybxI7GoQ5bEtUj2yn/h040T7Z5G3wSTlGcMcluWDSgihmfJ8A86Pzwdu/G3XGWPT6xV7P2DqaEZhO2MZgA4lnirqhGVmyvx2561CtvtOPpslylQruZdmYz/ACrAWAYpWdzdu7Nxx0OOUPoUN2eJWrp2RgjeUtPhjgjmbZfb1M3jrbpmsOxtK7tNEwBwzwIpDMstJNJvGSx3jsOqe0n78LcXMiw7NWLlzn84S3VtQHbL/H4v0WmflSxOMjghj3LkO8ekx/2PQ6xhm8Mpa0UrFpobq+nRC4swtwJYZP4mF57byBBFYsHbeMWihsPiyL7kywn4Z9XQ3B1WmGtHpDyzFqHerA/6vcnshGUovLFEHp1eTl0hfIK/E8oUAjGvcleGGnOQz2xxOnfaVLaM3DGVqNfbIA81F6oqhbh1KLmSfxmb9XLbFrT83rmlxPHFpuFYqsEVFtlRvY+jpsZxRtH1TG0UZTy83TY3sHJHtCxYk53/AKxhM45ESM6OlR8i5o47CyBeHzwbj2xkd0epjBHHKdilSf8AX7PnhNK0bTRtKcQbGrAtWl59uhK8sfEmjnF6Rwy3fVNjLd81G/bwZSi0jVa3JuXqpV5Kx4KR2FR1lXmaQZB3D++I/wAncoKsskj9k3seODdWsT7G3GK03tFqBu1cRff9huKXT9KjxNWDk0JRYXxjVsfkth3UVWY2kryRrKZCOUUbC1GBpp5GZqtL/E3sl2aORjVyPLwws1iOHc+pXWrAYSRx1jJrCbgcJAnMyU0OyE4OwQE7gzRXuPZlaAo3Ao7kGo1iqWf8sWhz4IRF0xKWQQGGwEr+Vjt7rvhZU5tHFakec7cP9jp+fxtZldogd8QA8xDPBp4vr0jKH+RDmOaveZ4Lhq/pOYwqTPJV04orEQAIOAu2qxRfkRvlPJiRiY2jN68g3msVKrbYm9kvj4cJXyVivE1nVtzi2yTby1pFQ5jdsO3AhYmKJnkdmfgy2Nv6H7qSJ4jfl3Y5qx1Sg9CqStLGywhZs/8AA74bV7e5PN+iPVD5cOshsv2wnZmWjUmGKSkNnU9XrcsyjcVT5sb6ZO88ArYKnhGUZrhVznuzzsTsKM23OSpVpbLFEzNppZnFsD7J+OD4dNGDNHDGCatCyjkeMetk3Q6fsm7qWtlylk2gTRWdLLYaFM3/AA6nd2q16YWkwDIUL+tvlp5sVcIMWL8Ue0qleQtNpickETRs3gvGsyTtP+IX4imfCd3J9uCULtt0scaoDd/ZPw/RYmGGMdQiUVmKR03S3ssiATa1pgkTDJWsD3ZP7DdbvhtQu4GvLmfUP8TcPpvEBbh0a+MDRkJAQsQlTrivyYoWk1toih1uEmhsRzBqtf8AIqnNiPeZog2tLDENZwfZbutfAQEVAfKmrSNKDcPvqLxqPNjf8iZU5ubC/YbUzzSLT4OXH/wsrUe6PTZubB7HMZibz0ubCrUhSNFAzDqoPFbksc2BMmZYVPJrTtIFmjFhZ047m/GjFtXjKO7laLz3VGfmx6rTaG5DA8gbfU3B/wBUizHydFsgLCW72T+NiPmxP2elNyptRsZ4afX50v10N7rrPbYdeWrbGZZZN0/+rcJO9OUjj4O/ApGZjPLrytUrc+Im2EhQovH8dFnvs3ZuGFhalp0VpVNBGWShRjqgMAjJqdb8mqrVU6zKs8Qyz7DKMnZiy7xMS0mvbFC3brP4rUYtk3CMHkOCNoYvL8k07Oz8G937futrOpqveKzYhUN+M3EmJM6zwZfTCzLysIzEGt6uDFX3EOODMiWsjHzWZAyZkTZbQpGivRSZFk3HVLYQxaX3i46zW5NnkSFXZndFJ3y7l9w7OZpj1jAUzewfxV0WKtw0yvtFVGyaKMSU8TA3Q/s+U/pEZZMwSNMOE7M6KsEjfhct2nuAm1PYo78ciA2LjLKEbXdcjjVzUprD6RFzJB8D5dMtT1DBE7m8Y9EWQm07VlXsBMLcWpBvq1eRJ98NQr/k1qemzvHFViij1mh+PKK+qNV7U1SAKsfn2T8LULHMLhp83MgUJ7CExfhY/wAXWPR2bhl2Un7prG2OvQDcD+eOMrYKlgGRp4yqAOtzi8uvS4s3JZk6ZaNKzStx1iy9ar8nEUzcXQsmYlBZlrlR1wVDZjlFn4Nwz06nGx1MbC3PnRIjZx7s3sMj8ahY5IJu6oVvyp3hl0+zwZDYLBk5v0NwOQQX5iK/tT3F+ahtiTDMDpywOms00mqHvKNtodOcJnUm1x1SvHFJGnHK2YTgqp8mWLVxX9RGU79+OoNu3Feoiy8cXfDRjucQQssb08SjllhejrZM9e3FMzeeLcHdXZP0zP8AuyoHkYtNmtlM3j2LcwwQym8prTIO2nVhhR6hWltFW5Y+yfijD+UhARG9/kwKukwxhSjKItLgUtewz153qDUlGexnPF/DJ1PbCJT6yzKbUrEjSE7uzoXX0gWE3pIicjbjHUlNFhnogMxnpZ1hxxZ1tyxxKOSSA9N1hyd7gCxXiJFbnjUOpby3unN1qczDWcsoPUNSMVpEjy6i3s6rWsWCKpOKr1jOZmw0c/LaCrUOa7bjpxs0dmLDi/V5Vk+XFUnGvAN9iXNB5fyoXeWYDmjmZcztpz7jkijkU1IBl3TQOEjGvteFqeoNGzmUhOsJxRCs4cXzwDg6LzwZsqG9JGDRtuqlHC9m090E7LegymWMqWLvja+ju8tgdoBJYAVqew20+7vgtWijjs2TnKIOY4Crn6mqy8ixEYyR+wXx6In5T2a8F+N+VRqVLUF8JY3DrljYwmdgVaqDB+IDoqcS/DB0VWbc8liNoLQAuZ2pvzbZgxtbr8qWGxg8stSutEDk8huy+uJAvig8QgUkjaOeyxEcEkRRCAg7po12UY8WTp3cnAGFeeOFIC02bkT/AJhTKtSPJ1oSG5R5T27xOh9SaocNSl+oQEpZaelwxCIsDT344rXWXx6BAiZojZ9vOjq1YaYVtRrWZpocddsBlsR9hbysLblbFJUA0cU0T1LQMuY4DV/bZuUhmGac6cczlO+x1sdbHW11tdbXWx1yXdNCS0MGG4tZEXcwbbE+B7k4gzJlVIY57hxnI6f1OIsyxjoZO2VKOF/HBFocqawMQtO0qvVCK9XhGE7Lha0q32WlgUV7UdQMLpazYcCJzLTtU2D1H8eiQ/xamk6i915oLr6sTbhp6RHWtWj6nVIebf2Latq2LHEhy9qoLpjkiVEgaJ5PScTXlLG8MrLHDCx0tkHh1SQhm3ymw+lg9fhM3FvJlhoh7NxbjlSDubTLfIksXWCOrRORDDGKvf7a0Ud8cEcMLXp5DD8eEV/ZiufXFDZ3F1H8UMREO12TdkLjNHBXgqhJnHONfkGnfL8stnRKXo0Qf0JuhxTthMsMpY2dSAcJNL+VFVxHFqMvOttwfsgLpwvqvwbx/wCuj6LvIm6WWF9TDtfSv22oy3CRsy1SDc5TgyjnN64yGDw149RqWK0kE7aXadpoZIS0Wo0cHUfxYdxXLIUa1C2F2AirPLyXF78D26ek1DqV54ccea/L4SSbG5s8yn/MdaXL/btKmkTGyZ+L90/biUe5Wm/HMrInSLubJ/Jd2bIvG+eqvxH/ADJuL+I/Uab2LHyileCxTu8wDs7VfPdD4VR8wrR4uVSKACmViAJwFsN1H8RLYRmE0cHIjCbSeZf1EpQpaHJZOLWNRkpyQHza5fLo1R8VYbANBLIVmSpV5AcpctbXXdkxumNMTI8O+Oyf0Nqlh5J3lJxZmbi/ZS/OJvSzdNfj/wDfokLAwt2b2Zu5Sdz0+sPKKsBEEO4dRh5dn+MRDKw6bVF29s/HFndk0xshtI+ROpTZom8J4y2oi7cg7qk0qsDVo2q2x8NwwtqKPKv74YY5LLpvyyTDeQyzxvY1D9L5dfTD2XhSmiUcrISyzcXVbh9f/b64OpUHYfYLwfk376PJlomyQjtbXI8L+KSCm9w/j044MmTeeYDi7qVt0Ve+MTlqG59PBlvdMbppMJpGTEyknjjU0teYHfkOOpiJBqUbqO7ETu0c4XNM2pu7/F8Iywz93wseqLwPD6dRdj+kfY28cHfDY3yfbeeuQkT9mWjnicHwZktaMeTpVaWmcRbw9r8yL8o/j0g2T2ticdp8SVyRBp0DBZjgZtMqvDBylynXLdbXXdXR/uAggkEKsOf6fVJPpdN1a0Z2VSeaqcRtKOpafuX274GR8vhMDmpIyiKFNwZMvEzcLDdm8LKmPvH2HryiLDGWXN0KqPtmF+004hHFm/f2emkbqy07qK67E3sPI5ahpmpE8eWfi3BkM7sxlufi7ZV9+UivzE2kMMk3jownBlqHKET/ACGcfzsgVtk1m0KpXhlU8Ec4VIpYJPK1eq0bySZWUIu5aZRaIdZZt8XyHwmQo/IvlkTZGPsm7JlO+Sgf09P0ifCKTLk/bytP/j1icW/jNdlNp88EZaNNKOm0JIQhjvagqNEaYWJCCaSMJRiilryt0P2YNdbeOq1ZI4nf8B/16ZWsPVqRm0g8G4OniJg4vjAjzrVjlRx6bDvsD36tSik58ViN1FMOGlFeg1bod6Vp+H1PE0kV6u8MzeNGp5TeNdZmjiQ+EyZH3aBsw8CbbInfAvnfE/dfXHKI2RllO6fuv4rpwzFxOuxz5ypD2CU8mdaL99e/LCqtuOym6DZ3CbSLYKSGSJSjjTdQ72bQ77YzbDqWGniZuOU877Oi85wTV6x2lFGwjjqtWIoWnmqm7QzLdeiVPUXkKGTKeId3jgzLWaPPgoVeZPGwxh2zrY5i8PE+RZafC00+wMajXaGWF9p5ZOTKaRtoTdjly/LyLR4dujKMkRZTkndMv46DDpHGOvPsrVY68kgbwMZIh1Ut11aLF3bq/ksmbDh/+jS/bf0//YlyGmRi7WtOvPPwfq8q13swDtj6ndaqLtYjGI1C7M26MlLSilUNZ4mDxxL46tAddxvzbtOt8yPUwcq0EEk7vBLAmVI/xpebHs1O200jyPuY3dZdP4F/TBEm6pH7ZdG/bgAr+K2WOpxmmjhVrW6sSra9MMz/AMiq4grldkDSmUEIwx9Wo5n1YmPnwQShpwQShpt2rKc3JmfUqtSeOtQa3EmZ34Oz44/VUefebrMkYCYy6e4Ll3BZp5InhmZ0BZ6jjEw1Wg9aeInB3tNPT0uMY6M0YyxMW0mlcm8rDY27pOSzLlupBdmgjWMr6454SfHKJ+DeR8VLElSfTtXr2g3jizqVaF75OcMj/sWO/wDHbGR4N7BeOmB9shw5lsIoiEU/hvjpWGkyzLeKYm6HNuj61gxjradfcJK0zIX7dEhbVNGMwzQcmVmDZp9x4IrmqDy8rKA3TSM6F/2u7J5URZcSW9MS7Om4spvBPwZBxaLLcqTEQOEhbnofiyGzM7EtOs/j24i3g3sn46BfBNMJcH7KRzfh9OtPkdpD/IJ+WS2zshmtgo9SbI2BNvPRh1/JJ++7vpNrBQlnpmZO7M12VppWTJ2yngYkUBMvUyZ8PuLIASZu2OHlbFt2pjTLCfs0pJ/P2zJm6RfBQlu0nT2zFq9flSqCHm2dGkkEfZP49DLfHGLHgJXdhKUiBZRZVqPlPFIM0SzwMGNPWeNRXSF4zE2QqU9g6lLzrLKItpafbaUAmymkFM/A/FuZ5psdsLHDCZEzOijZzEGEe6ZlhM3QQ5WXBCbbSPLyP2ZMyZulk60716XRDbFq8LFV7sqVczVZmJvYz3P49LdnkPcglxH0OzKQCrnBKErLDPwwjjGVnCSq9WdphytctbICfLs2E3ZQzHE9bVOw3YXYLgIJndX7Tx1x7dTKR1FFtbCx7DszqaLCzhnTIW6m4aH6oIh2tOHMCRsWNEkhr1x1MCkbx0x3YJLNbUuffk0uY9UP4phJ1yzWHZNwfwyOURT3YWQXIHQmJpljtLVw8dt2QOJMywm8kyCJ455pto6nZ504Dlybs/lafVeefVBGKQDcVQ1Aoys22nmZu3BuMvYYRTex98PKsxdvsGTexoc4xDJqUTI9RldPJRiraYMUlyCnDDN1V6tYLgxgJJ2y1azWksjqEBXNbkuRzWb0dSEeXNDuw1i+EalvSSIiN32M62MyA5BUOpGCivRSIHZ2ljCVFWMU1iaNNdhx+VCntwp9RrrUdS5jCyBsJ0bd4geQ9LqcmLW2xa4VvShdiHgyfhJ3P3fqeLBCm7dDCm4C+16+yQzhgav5I9PsRw6NU3m3XU014NQ4O2Rp6ZFUn5cAWmnJahUhtx6c8D09Qtm82H4eOjymwyjsSxqHU8KC1FMi9S/HjdahQ5sMm6I87kLZQ9uBuhZzLSdPYBxha7/nZCh8Rk7IHzwbh4Ufch94myxNh/PEBy7Dhvvhp7u4lNIQRly5C1WtJFDYhwBbuvWIbE8FQZBrVbvOu6vWltVxYo67C7pgJEMTQwP/AE+5cditMvPR9Mjf08zDxzMhfcoLckL1Lccydavp+5tuC+KZGhZzLR6G1hHavvXx/a3dMgfgJOyjPKbhJ4hb0e/OGWZ+AjlRjhF4+1FC5qjHEU0tcYLmqxwBp5k5rcedFtxuDPls9VqjPJqV6KSarSCSCmtSlnr0KNk/6faolfs6jbGtX9iXw/lkBoDTM7KjfwhxIOp6XvRgcJ7ty7yFpOnYQhtbheox2GsaTIDyQSRu2WUZLygdxUZ5X1O/aPsHu/TJ27SNtdny0Dtwk8faeQmDRe1m3DzoLkx4Qv3jbZNp2Xg+m6z8IZsDOQzQveOsMkhSF7BDlShhYXhA6ilWWIal04FDKMoTUgsKbRnzp+ljC7MzcX4YRxAan0qI1Lo0gooZoVl3TPhRydj9Ug+8+MC7cJwyw9nF8PGWWkfs3lH50w8XdWuNFGz5LK+mxn+P2nkEfYPwnTqx3spvZMcoo0Q4TOhJRyYQvuUUx1yrWxnBnbAvx3LPD6TKXfy+Xvjs6TGasUpYUz7VVycjJvcd2ZpZHzX4yjgm7qMsI39LeVIqx8ua5KdiXPYUydaPJy7bdx6ydGO1lIWAN8ysm9rCljRDjhQqBWi1KuLwA+5RRkEqHyopuayZF3GGPljtTNw7cLW3lWfVPQbaw9D+wZYU8mUHyg4Mpxy3xTOs9uBofLdlDXIQkhB43jJiVRv3x/D2Yn7XXxWDuzJujKz0NxlBEKpX4Z4tUvRR14pO7F3j1Jtr6piWvZCcVji3Vqp7IFV+A+0/CZF5DzBx8tOPcfLPxJVYimlp6bHCvq1ByHlben7FpQ77Yt2++BPhOTvwZ3TyPvlOQl4a9l6oekfHuu3YhwnZOgdAeGF2dMmM4Sp6kMqznhjhjh9Juza9PgW7qp8G9yRstMOHDzA/FlIGVIOEyYsrKyv40DcXZiYo3GSf/J/GocydBeQ3b+Aemxy2XLFHEBBfhevP4b3PqROy2rwhJASEsp2Z2kZxLTdU5KqTwzjtFbBWxlsFbBXLFWSCKLULH5M7eKnw4Z9p+zWCTeavFuEjIvLLPD+Nn34Sk0IS2ojsSycyTQYeVT++JeYBFlCA74duzzb46pVaeJxcS96QccCHgBKMsIe6lbLOq1mSAtP1sDUcjG3HOG/keobl8kyq9g9t3w00q7k4Cq/FlhG2WMV9vwozyQTR6wYo9YyrViSwTNueNnOWsO2HoNkzYdmwTu0QVQfPRrFPsz+9L44Oy8ICyozw7/E2X2XijqM1YqOsxytPdihgh1ytIWrauHKM3ND3Tea/x9p3wpZMp0PkVC/q6Z2w8o8MqrXKZmrko9HsGodByotNghjj0mEWFsD0lGnjkTQMLvJhM/QbbgmLZbCRM6f25E/B2Tsm9KB8oZOzlxJM75ecuWzry793jZY7w/D2SLDGeU790PlkL4cXym6JkXdvPCkztWFnJ6oPFA628G9g2y0u52AdocCT/HVIDgtgeFHMyZ8rHtH0EydkHZN44uyduPhMoxRgo29DewT9idF44i6ZASFfXCbyn7cP49ABU4qdeI/+JlcoxWh1DRTiRAUZRyYQHn2j8cXTsnQuhfoLi6AVAydkyb2JX4F0MhdeFCXRL8k6+9BHbp//ACYWMrUtLjsDaryVyjNxdpUL59gvD9BNxZ0xcS4shZReODdP2ifCN88Hfg3EXTeB7KMssnUvyfhC37tNbbU9j//EAC8RAAICAQQCAQQCAQIHAAAAAAABAhEDBBASISAxQQUTIjAyURQjQhUzQFJhkbH/2gAIAQMBAT8B3xfT3kx87H1+hbWaeaUuzULqz2aLQN1ORDHHGqWy8ZK2ZMCnHgzVad4J0/3x1ORR4RfX6mWWYM3P8JGk0XPJ+XpEUq68LEeytrNZpfvw69ksTh09nK/FeUrro0rk27/TQ4j2hFLs5Mjqskfk/wCKT4r+zDr8U12+yM1JdPdLfIQfRr5ReV8doQ5OjJj4P9WPDPJ/FWS/Do5nM5nM5nM5imcy9qIyOaE09rMGpni9Gn1sckLl0RkpK0J755cY2S+qRXSRhh9xucjPUX0aNNts1eT8qPuHMTbNFpnqW++kajC8M+PjpNd/jxcasz5Hkm5P58K3oijj4VupCmJlmn1s8K4ml1SzWIZqmuDscu7INY8VjbyyIpYYE25y5FbKcao02aWKT4MnOU3yl4vUVk4/BPzQi/0pkZWWYsnCXI02vhk6kck/RqVzTRKPGXEy39ujT4OC5SNTqOf4oim+kY8awx5TG7dj9GlTtvy+0uXIn5peS8kxTExGl1bh+Bn1EZJxRHSd3Inlhj6Z/k459MrTn38cP4mXNLL7McLVi+m5ni+4YvL7WnWmv5Mjr2Rafo0+iy6hOUCScG4yF3vFdF/qrdMln4ENZi9sebHL8kyWaTfTHK/ZXgslKhfU8yxfbMXnmgpEMfE0evnpU4pWjLN5ZOT+TFFxe8fQxPwsT3aL8GlL2fJZF9bIe1mS7oiiEOPnPeEHOSjH2Z9Dl08eU909q3SKEtkYockxx/IlHweNt2j7aSEVu9mtsUh+Ut8WR4pKa+DVa+epXF7rw+TAlJ0ZIOMhRvfTW5UanHxf4mKm/wAieJrbiRGfIvPF5y3ySaj0Y3a8FuvZpv8AmGph30Yl7JY62wZOE0ZZ82ONM5KXRPDXZyvraMLPtHrzxech7TxSS/JeCFt8EDF+M7ZJ8mVJS6J1L0TxyRhScuxx7satijQ+yWKu0V2YIIlj6MsaYov2S8cXnPbFLhNS/o1/1HHnx8IIXgtl/FkPRjjyViiUJUTVnDixS6OQ8osgnZx7MSoszKyC6JwPtnApnZi8+KOKHif+0cXF09kOEo+0LdL8WJdGGdRHkOYp2USV7M4iiY0fJBkpj7F1tQkcEOBiiS8VtGLk6Q8cYxozQdfc/sRpXU+KVtmbF+ajMegxrH6NUqyMsglQ00QfQzkzG2J7tWcTjtRyoc+yN734R6H35JmD8I8kSm8r4RNRgvTuK+NtNBSfumPK01959i1sHD2azufJfIiEuySTiYfQ4nEUa/QxsS/Iiuh+M9RGKHnyXfwJ2rXlIjm7/IhrcONVFGX6nGUHCK97LrtDySlVv1uyP9kMl9CpfrsjE4pMsYmIyZPtxszZMj9mOPVkpc/5Gmye4PyxYfvz4XRqfp09NDm3Z9P0f+TPv0a7Q6fTw5L3+mLohksq/NIjE+1+RfFF296FFGSFqj+EqXbNP/qz4N0X24WS4xyf6bsT68eThLkiDjq8HfyYMMNLjo12peoyX8fBT/TC0RyFoteEeyMBIlSJysjvYpdF2arH/vRKLT9GLTzl76IYow7JeMz6Xhniw/l8n1aU1g/H18kOmdMyKn40URXZJcULs5tCykJ3suyKRBImklyJZOW/IW6HG1TIY4xE/PQaT70+cvSNTqI6fHzZm+o8nT/iyUeLE2vW6Io4HA4EY9mQXWyRE+CLPuUffJamUuvjxsTF4vVtukjDPnG/HH9SyYFxS6NXrZ6ppyIyjVSPb79Dj1a3RARRQyuRLGRQonovoiSRPFOHckR9eSExbIzzcIWj/wCs0+OWNdikn4SGQjYqStdjtUo+idJ9bSZhe6fk98c6dms1cJQ4owu9k/BbRGaDAs06kfUdHD7NwQ8HB8mOc79mOXF2n4SOC9X2RTifPFLofKIq+doY+RHFxOJRW9DF4IzGmfYxeS97YM0sMuUTTap58lZD6rjg8av2LSx+THp1CV+EmLIxSXZ/tpMyMSF2YI9GR0KQsiFTHjOOzE/D0ZjS+xoXjYveyR2u0SnKf8nfjaXsnvCHIaojPiY0Y+ok2mR4jghQPW1DW17VtlRgdMTtC3W1ERbY4J/yJY6lxJQcd5TjH2Zpp1Qpv0xbJtehp/O2BEuoid7JkJD73aJDZHZGSDYnTMXrzSFtzlVGHFzycWZcGTCu+1vPGp+zhxbS2T7E72bb2wscmz1vjXjKJOJBMar2aTiod+zUqPJ0OPZiXQ90IR870aNqMnOfwavVxnDjDw+x82PHRxOIo2iUa9bYvCiPQpHvdQ5OjPhcezTuMJdmqnGT6ETRx7ILrxW/2sn9DxuK7EfHlPb0Rf8AZMox9C7KK3tkZC7KItxdmozuSoWyOhoj68F4JtCdi8nP/tRMSeyb9F9bdkJ0hTIsra9ozojLkT6Q3f7mY4OXox6LLP0jDpeUZSn6RmeNv/TW8vjrZp2qG7VMpt0xRFEpFI4i6FKiMxvfkYfRmn8CX7/puWm8b+TTYJadSt3Zm1uXi8clT8GrVCv/ANEv7FFXQu//ABRfgvQ0OIui7F6HMvsjlr0fy7H6814WMW0M8MUKxLv+yeZ5IKfLtEpObtleCJnJ+hetqK2W1FbOM0rroseCcVyaMUXOSRqNNHHDkhPqxMS8V4LwX6HFMoX6Jfb+2JIy58bx0iNRdmbWQlj4oUnH0TzzmqbGQZF7UUVshbxHtEjprXZKLg68m2KJX6FtRwHjPtHA+2RgV5oe8dtP/NbamnQ15L9USRYvF+Ni8kRu+iby4/5HL+xu/wBFedC6Ls47WWey9q8KEtq3oiafL9qamazVrUUkvX/QIWy8UPzXkvL/xAAqEQACAgEEAQQCAwEAAwAAAAAAAQIRAxASICExBBMwQTJRBSJAFCNhcf/aAAgBAgEBPwHV5qdfJNWiGk8n0vhUq7IS3LVST8fJsTd/HRRKNdolOl8cJ7WP1N5V+j8kYcXtp/JH4kLWjYj2VY8bRXKOmLAr3vRuiLv4nJLyIoo2lFFFFaos6ZtK1lBSJY3F8YntE5V0jH2Zf0Yo9FFaZZ7CEtyvjPHudkOlQtL5Xxss6Zt1ljUicNuiIFD/ALSPxR+bIqlWtE4qS7Eq8cb7EL/Ano1o1aJ4miiPRfRD8ic78GLHXY3Q25ukJDI3yoQv8daSkoq2ZvUNVP6I54+T3P0Ri2e3JFzPbk/JCG0bo9+N0PlunvLoTtE8sYPsT6svhXJLVaeBPTN42k/S5n0Qx5E9lGLFtgkxLStdtnsRux86sjGjJiUxKlSIJp/Kh9EezwWOKl51j40XCd3o3zQtLpWQyxn0vly5NrQu42Q0Wksbb6FBVquT5x1atUY8Sh3yWmZuK3GKanG0OSFp6mqs9LmcumZd1/1Izs2l/WqWq4vnHWTpEfHNnqe8VHpcjitpk67ITvTLj3pmGGxkZps/B2Qz30bfvRzSFlQnei4vnHW1LmhmVOWPoUaXZCSlDsx3F9kJpmS1EUiKndji5EcW0hL6ZLwZptEcjswS3LRcXzWku1RhwuLt8lpLwSybXQ7YrJMxZKN+6JX9heBRNp+I59GZ2RXR6Xok+xMs3F6PktLLFT8axnF+HonfjV+B47lZGCNiMmMqiEizGxMcib0yRsxwILbotUbhi5tpeTc2Ra8aZob15oa2L+r8nu5PyswPdC9N1CluJqmREkZNEtIycT3GObLNzFHcRxJarS+C55O3TIR29sWSppfvTNklDwrRJ+5+C8Dx5Pxo9MqjX60mY3Rkl2KZvHK+FcIogqPoYuNl80bR45Mx+n2vc9KvojjivGiSXjTbZKFDvRC+CC0+tFqyORSutGuuhcnLYrMeZTdGXJsRiyzm65rRxslHWhLhdDykZ3H4HUSX7L6LFxStDvHMlJzZihtXNaWONjxm1lPhVE5nbPTvquC4NCkpeNVxRnkpS6PTpbuz60XG9UzybEe2SxmwUDIv6nqN1UkenyZJNY4mLFtQxFHjiopePgyz2qiEHN0LFtX/ALEzrgyyy9ELS9OjocbH6ezF/HwxK/v5LFxeFS7MWNQGjtCd8GWWXqpa+SihP9mP1WLL1BinubQxaXoua64x0uhyO/oWlIlR9jRTXC+OXH7kHFfZ6H+PyYsm+RL4sknFdGObbp6rhE3Hk+xauiWeET/rg+iOWDLvwd/el8kRJr4pRUlTJw2LoweTaJcEbTaU6ojoybjZ7WOfk/5Mf0S9I1+InmxmPO6/sKdiEVwRFE/gvgkl45LW9KJvonBt2PFP6P8Ayw7F6vJ9kfU35H/YVIUiL5Y2ZFovhz5nB1Ex5N8bE71uhkZV5FwRmfRGXYikPHFk8dEZaUQZEWrZGaiMYtVxo9mO7cZJbYWiGWM9WrK0jJoi9xWslYoJcMgkvssTLISIsbS7YpKStH8j7jyWvB6Pf7S3+dJC1Wl8LPUK0oxMOCSlb4bRQPbRGCRZej4WSVjiOO0sY8qxx3M9J6yOd7T10J5ce2B/G4smOL3iS8CLH8Fou/gjwWjQ3RZetInG0S6ZZKEcsXBnovQrC97fwLlQ9Vw/+EdL4uNm0qi9IxNpPCmewRw0xeOC5Li3RLJFEp06RG/vj2bXQrvjem02FCLEhijfJaL4Msb7JyUyOKN2uUV9m/6PPKxMvVCZQuv8Di2+yMabQlXKJWl80xHu43Lan2Lox+rw5JbIvsz5Pag5fo9H/ITzZNki+65ritX8C+KC9R/0dktz8Mw+izLMmycHNUmel/jcuPNul4RKEZfkjF6TFhluiu/lWrddmb1yuoGHNa6FxQ38lm4ssTL42X8HrL9l1p6SMu+iHj4VyXCvnWsqa7F6bE/CIxUVS/wXpWtFC6+RazjuVGKGz/EvnXx//8QAOhAAAQMCAwcDAwMDAgYDAAAAAQACEQMhEBIxEyAiMDJBUUBhcQQjQlKBkTNioRQkQ1ByscHRgpLw/9oACAEBAAY/At85SD6h1oQhBtR7dFwvzMXCjnai3PPj0AX9yynpUt9cKYJDYlBzCZTSdT6PidC4XZiuCgu7FmL85RB1WVwl60XDog1p9IGOPCd6PUC+V40KzVX5o0HoOMrhMlWpiFJrFq4n5lwgK2JexTog4aI+lBCjuPW5ZufQw5oK8I/dcpfXcv8AbuJUveZWu5mGhRpnXsjIKg+lzNQd6tz36Bbebg2TXt0I9DLyiKOizVHlx91aAophSVfXCVDtFLWCVOUSi6LqIhHws4UHX0WzO7dWPpalPyFs2DiWzbfyfQfahZnOViuN0u9lmqk5VYbg3riVELM3UqwlcTefAUu3dm0wAmNJsfTPexsOfrhpjpzslLrK2v1F3eggq11cc0KSVlpXWRwvhdPI0TOTLnAfKtyi55gBQGOLf1LPTMt9FkZqs77z6OycANFDuTClt1lqNXBhm8YPKkrN43oCPnAueVs5JaeUcna5wO01cZj0N1tqndR6TRS0cQWZab4JUQs1OxQblyka4Zn2LldwRY10hQr9973RGGam7JPZSLu88ucjZ+EfQFcf9MKPT5U8C5VxvNxMBQ7QKHaDRTmUq+g3+yu1Oe8wEOW6o7sg9v7hH0GRvUo9TdaIndGLii9oWtgi53SoG5mKJlNz64l+WVmeVRd/byxS0YBPym5AXZrEegzI1joFJ5JPobogBSLriEY2QlFOTsyyjoUBBxHFjJV9EC7cew91l0A1KbTZoOXLetmi2tQcbtPYI86Vw9SEd18qXcgos78uN+4QAFlcYcRsgoF1IxloXSEOFQLBcV1LlbDNjbnHnt/SpOiAGgKHJc4pyHoTmRTjU0U0bOCu84QEWvN1ZPp9wpwl2q03TSonTUoNqGQebsqMZu58IU60Ge6POlPnyhSaraRymub5Tj5TmePQXduGdEctlC4Qto3Ve6bUHlZt4rVOq0bzqE11UQBfXmvfEtd3CbUe0tY1FXu7wstThd2wc/wptHhBwTnHsuF1/B3nkInun1Tqi/vKtySEWdwn++ErMrK2/ZQcDCMuusriAUfK8FQCipUhFEL3QaTpvy4gD0LkXDjC4B+6yucXJzFlLTKAdqqnxg0uM7uz8ptNiso9+Xn8pjxogcCFCLXdW8PC4VIw1U07ko5xxK+pUt7BAt07oOCyohR3Uu/KylvSUffezRKl5t4THeR6A4W3YOimj/CY0jQbrUXu1QHum/GEckhFp1avjcFbtoswwvodyDoe6tjCdZNz3JTwNUCMZRTD4OAP4oP3XtGqhuncprAbBcWitzjzo7YBqAwO9Ix4lmb+6jyrIBT2w2btMJ/Tor6jC6yu0Rp1NOyzBXUprm6KfCee4RRGELMNE5o1QCI7oNPbehggYwdOcd/Mf2U2jwg/dLu+AHtgRg3GE5PY5Sm5exW0CLB21RLdFlXDqvhZVLdQoOoV9Eajel2BXEszOoI0X9QV8MrkQ3pTR+pFXx2Z6EY7pzMDPpzvw3UXWXZuQbvj4RKccKeMJyc4eVLdEZRpu6UCPyWliszVdZDoUT2R8ys9LDN2CD6X8LKepXV1tqanCUw+U0nsg7ypxDm9SynUL59SfQD4RU4U/lHBpRTmlZe2E90WP62otOrVfDOv2RCIKnsVHZOY7R2iz01lfqpUFZTorYU0HDsqXxgRg4lbQeUw+/qTv5GHjP8AhX6hryGr9sW/OOdSj5VtVDkHptVn7oeDqp7YQU4FHxGEPWWbLaN1asuWy2tMLJV1UFZ2Ygo+6g9lqMSE+ii09kCfUHffKzCzO/vyAgsuA+dxzUcNozVGUWKexWyd+25nOiOyt8r7x/8Aqvtgz7rJVRBAvhmb1LJVWQr2QdgAFNfhYvtvM/K49FPdFPnws41TmH1B32GYjVQ3TkBNHunN9sGfOLk/5xhzgrOGVBzdCsp7qfGi90Q7AZNVx6KVYK7gi4aNQXWFwRmWV6kIFShWqfshTp9PdWKh9yoPSVHbCOxTnDR3qDvQagUjTkhQnO9kVS+UcHouwljwvuAu+Fai5Q4Qg3uEY6lB0Qe1R3QZ74FHK/Zt91x8bvZQwQ1cJVnLgKivqoUIBNjsE9x0OMjVAoELZv1wv6c7r4wdPTNuTKLk0+6cEw+6JRTgnu91lcFOWFAcJUonuuLRZkSLFGlW0WZuiacHQFkLrLiEbmWoOFbSksrlfD9kMXSnD3wDhqoPUr8mMTyzu3UwfhQ2w5OZSj7KUSpUK6cAr2WU1ApYySrUldsLMNVlPbD38rK7p8oHwgV9vutpVTQNNwO90J0hWFkHjrCAOqa1Ri6PCc0+VfuiCg9BziJViCpJhdQXWFDagndGdzR8lS1zSPY82910qxV9ObCcD3RpoqEMIRdmkrpylWcV/UK/quX3qizsviQQnNPT5WWUFlAVMtCzxbDismtYJdKbKDW9rrIvacAobdx7LO/+FZEt11VtUGOFx33IlarVSV0yujAuPZf2qGNRpVdfQSNN/N7wpJtjs4viKg7pxThhKOXVeU57uIwjaMbpmpYdyCEZ6ceJbGpbwpXEtu/p7BQjU7nCi5vYqEUalT9kUSLsCeybwpnlcDsq7uUGw7oMZcoAjiTan48/KxZX3lSN0xP7LIQZ1TB27rg0XEn1LbQ63xI/hGmdQnhZinYTF04eyION+yZbtyTIv5V/qXj91eoX/KhotvNA6BqrmwWSjIZ3KITnBcVtyFG+Vm/Mo1KikeUz45uU6ohry09nBMl0uRPeRCE4U+HO1r8zm+RBQ2ObI64B7LMNdEPyL3AFEMsA7TAcWtoTgNSNzaN76qo5v4o+VCsjSdqijI4ENmhlCc06lU29wOSVJcVAO85zlVc3pKms4/CIpgAItc7+MfdZYKl/dF1NFjhcKN4psqKacX+UBzLouOqIWYptH8GcTk6f1bhYYKa+nxAGfdOLTN+xXum+E4xoNdwtPdVGdii8dKI8qU2v9L1K+qtqjtbtX22mfhCpX7aKByr7+znrWU6KSnGmi464Q0ShUeLohrRK2RsWKeyOXCy0XSVfGVJ0UDme6a7snPzuh2o7KT0rIy9U6DwteJzvuO9/Cj8XzfydyZsgCSp7oFivZWYYPedwytoOyyO1KzfjgaZNsIKktlQ2nHoi49k538IO7ppI7LI3DK1XbNRSLBRUBWamYqBZS8bNZmdSyBpQ2/SuGEZAhHYkLi6go7IwraKybyjhx3apL2hbP6RsuPdS9813flqGrQbQDMZOi2xdDR/lRjDhKDp/ZXvjm77l1xaLa0Leyy1NVH4KGdUypGNh6G6LGmwXBaEGvuEG5VbVWK2jhxJzXDhCyMborraU+yGcXwkhQVFOrm9goe85fAVys7DdCflHJDG93lEanyh9P2KbHjlHG6cGtAnWAoaP8r+m3+Flbp6DNTs5GlWE/wByN9Vs598LeiNNlyUSdU4edxqBTqvlZn08yz5mj2VQgcC4cSxzzszoAjXc4AdhhcAhd1GEA6JqHKO7mcrghQ039DDgpp2TXm/ZTz7YSU5lG7k7a9W9K2VYx7qWGygolzBCy0Ggohwuoc4BAsdKMdbLhBrnnKNAuAQPKk8TkzZkuq6uQI6xdMzUmNI1Vgmv8JrgjySg9j3ZV/UchPULFElT27YZj1O9HPhcXVycp5ENXF1IuFgoOu7kag+txHwoAjCCjAuqmbzhLHGPCObVOfl4X3CqOaRDBKzYz+JwMznnXsodUsrckpzUQv7Tqtkw/OEuHA30kI1GX9l/ctd8vaVxiOR7okahEHdg+N6XWTpfYFZW3WZtkQ3qFxg3PHF4w++0uZ4CfAhhNh4WXUq6y0+pTVJ5JwzDR2Ia3UoNGGiv6K6zNMfCh7Rl84WPJ4islG5WZ+p3Lrg6t2TvFvcqR33M46X3Rr/iFYSoC1wmqbL7TZPKODs1sdq7qOmBwuFLeeSiXiy4cLrx8KadRxPytGwvutwsRjLiFFK5RkwFmO66mzVX3cwUVFwHce53Fm8o5TY7jmd9R8rLXfkpG5aEWU2ABbWnodcPZZfxQaByjhkZ0j/OMHqbbGxwPPFMao5gNFnO9oF4W0pSVxLgXE7HLuHLq60+FJ3+FcKiupa7lVEQrLMXtQ5RWVvU7CyyaDuUM12Hvu8XI/puKH23L+k5SaTleys4IlOr+LIU2+UBv6ItdoiWkbocuIJrGdyo6qn6UQRkqt4o3r7ssKy1lLTyCnnCWSqbXv15Re8oud3w2rh/0qpU/WUfpHtJkxMWlfbnLyjCL6t4UKkG+V2WVou5N2olfbEFZadQQix1N3ytp/hX3rlcLSiA6yl2qvuHAEWIRJMnzuNLRwG+fsjlMhbKqLO6XfpKLvqSGN/klWwtuA03QgyqVrK4ablJY4qHgsPvjUupQKNWqPtM/wA+yLn3JHK4cuRugldH8LK4EDuoC4rtX+ppNGfzKD6ncwAEKlI6qDyCpJhWusxKvUCZFQYaKoXeVDmysn05DHLLVBd/cuF8q+F1lZqiahV9N48gUQP9vEGn5VtPdZy3O4dI7fKbtP6rf4Ix4VfGQpQ2t2r2w2gs4IF6LmhS51vC/t8oNam/Ts/Hq93JlTwUHsMg8k7vAg2pMjSNQtIp0wnbOZGoOoXtvwVs6o7rRdK/prhbCtVhRlL1xcJU5rI1v2UEWX+3Meyy1uEqURN0XO5ORgklXqjN4hFlQQU/aMmdMYV92ytrumy4jZZaV1mqvn2UFkrNRsB2RZ0rKBdMeRZO+pP4WaPdBrbucV90bR/vooaICZROp1PjkHdsFYFOp122KdshHklGkw37T3WZum+BF0BvXC4ZeFkdwHwpF06rOZp0U6ORa8fB8oudyjm1y2wpebrRXwvg1z7tGoQ2DctONMLcgqXecLoxdVDoJQcOIjypp+RA90z6elfZ9Ufq7qntWETpIX2H8LLfKgBgPlFzjJPdCl9R+zt87rnkTlEqo11PIW+6FSm47K3e0Ig90KoqOdGgKy75d25Nhfyi2p0eUNlopfZVWuFvxKdTI+OVmbYhQWtzIue6SrnC3PyHpUgzKz/UOPwuFoVX5wqZjwhwKJZHknuVlpNDB+t9l9z6ps/2tlf8Z/8AAXB9KP8A5OlBrPpaEntl3zhICuMCHC3cJ2zaGDus9J0t9lqtVJWbtulFztZ5N1pK2lP+FAQaeyeB2PLOJG9HK91FV1ggoW2br3XlZRZpupa4ghNqVP6vchbJwl3aO6nIP5WWq0tKFV7eN2nsN8qFnIJ7QtoBHYgrZZ2ip4lSxyfSBykosquBJM27LM3THLjJX2AP3X4wrcw1BYLONYRd55bsTvTy8y1WoTsqvqvjBs6uum1SONthhlqNkKN8qUWVqeZqyUoa3wh9Q2rw5pIVQ/T9fZP/ANVMfiTqmMpsBkTJTHkRmEwjG64poDhotnQUcuVlboFl7K3Ldid48wOBUyZUFOjRVW1GhwlTs/8APMO5YrWVxNQ2gBj9QVsZjApwcftqOL+UG0NN8uYuoKzwutqO0ui0U3BSSo3o3nYu3QOblxzBVB35p5euBGiczwtI91nzBx3uJwCLXPEIubVGTwuILrAV3gqDCzUlBsvZTyo5AUcsq+JHdUq2rTrCB5Z+nLoqBHeAwtuhrdSs1US5ZWsk+FfvvDa3YoaFxNstArMus1B2VZPqAT7q1ws9IXUEabkNXGI3hiPRNwlxWX8AZXwizsED9O8Ajse6yfVM2TvPY8n6ut+hrv8A0nt+p6WDrVt66JO5qm1RctVqJK+8fudgo3sr7yvstIC6XLoK4qJWV3UjKyzLcNozTvjAWapqp7L23Z3MqOHxzc1Zwot9xJVq1Wf2X23bQf5Rf9T9SKTPEKoaQ2lI9JIyk/sjJ/09GY04iiGve7/qVFrT1G6yvEhRTOegfxOrd4irSMeWlOiplMaOsvq6n63NahbirP8A8BU3Ol20cYH9oQc3Q72bc1XF0hGIC28RCvvB4ZmCyzfwtcIN1no8J9lkrCHK2DmnunN7Fe62tTkzOObzieY76ms2WtsyfO4HvJc0aM7T5R9lJVbZU87mgcM90weAoPGPdQ2zvB3XAawiQ0PH9pX3WOb8hfSsGtR5d/4TaDL7MBgVP6dujIp/+19bXZozKxo//fCDukntu5d1j6Q+UKlW3soG/wDdK+zZyJpuJUuZZQ8ZSrrNlE7hdT6gsr+2qDRgFbHi0F1GUR8IFvS7sow1xgYTvW3aEd7n+dyntvqXFzHTLRGYeCqz6czVdmdJRBWajTFSoSM14snewAwfU/Yb9Kn4Er6Wl2pUwT/3T6ztGzUKqVz+DS9UWnWq8vK+ipR0MzH/ALpzKgu0Tm9uTTZ5QHIa93QgWgLQKFw8PwoBJV9whGr9OPlQSsruvDLSaXFfeYW4B7xwuss2duXzKGz6W9/O4VZZnc11AniYf8bn3DCIpk1H+AE41256ZOg7K1OtPwFUeCG3lcdQ/ssjNN9wgxmDV9fWDHSBkZZV3Cm/PUIZEdlUAY7aVXgR7BUWMYSxjA2U+oWnJlIB/aF9UDT43gNF/wCUGVWyz50Vt/N+g8kh11mom/hTAUVApB3yHLM3pQe3VP8AMKnlF3CSnMdoU4eFc7tkUJVlHMFWkbj/ACgHOFOr+krqCyio19T9IRcU7EsdryzvXVtEGNCk4lVj7q66gteSXd1ldopad6yLal05idIlbKqC5o0IRbRY7Me5wtv6c/hcCo/8ppcQES4zZFwRDldNcgfPKO7Ku3CQLri3Kwm8oy6y1XAV9xwIUOY5cJ3tmMMjt6VcwnFumgx0wtjZS70kqfZBSO6goNWyrco73DqpeszSo3BWZ21CB3NFmomCstUfupaZxJRwBCHbDXElEzYduRDeTZX5oZ4CATj+QULat1Ca6LxydUeRGu5dQdCs1O7O4VjB8bsFZqZt+lTo7wpKIabncsgHLWFw1AtZRjVZvO/A15luYR7YFqe33Ttu4CShT+npue4/tvuoNf8AcHZVPp9nGWbrb5xs82b3RwsCukq43buXWF1haqMM9E5SstcZfcrgV8Z1QcNDqpOiMaDdHhZWqxUVDIQA6VO9PdSdebLVB5TtoYC4A5y4AGratZTLndu8pra0ZSi+m2Cd+pVpRtPyE6JzmsaHHUga4FGjTOZ4vPZf6ZuYv9hZUz9MX7Mj8R3VL/USXOGgCFah0nDyVwjKrvlXCsrPKh4V3K11xNU06hHsofTn3XE6CpzKc66wixmiO4A1e6PKbz55MqnJ4CbotLWinHhQEajmCBrdbaoOEae/If8AUGpIM2xIPdGsHk/PZOr06f3D3w2tVxp5BchNb9O/OxtpT6TOxXFfeurLhcSvuhcDlopLBKzU+H4TmvcVZaKIxhuqzO15jvU1B4WQvJb4TXj8TKyPMFwuE1tOMo0Vt9rfpnQZveJVMVusC6rUNmRs/wAkG0XQZn5VOiTOUXWhWhWxrvaNpaCdUz6SnScadS+c+VWcO7uQVqrrhMfC8ha8S9ltGBQdyAto8XVsJ5J9DwqFfenQLZN/dMz/ANIlPLg1obosyEPcsrnnNyGV2VIYIT2UnZXnumsrOzPHfBrvp9ZvbQIVvrOH39k36n6eq003RfwiM33SLDm3UsN1lr6qdQi+lqocMMrVmeNzi1X2xZdJV96PPop3y0FBOB17IUi4lre04gtcgTyThEJ1OoyWlGj9OBTY1F9QkuPoIPQplHMF9tS+5XDvXaFZTTXEFfFvo432fKLG6qTjJWzJ5J3Knz6C+Et0VteRbE7PVDaAT3Ut1V9EZRPOurbk423Gv8Iv7KBujkQpwcU8+gbbj7uTqrRDm3spVMg9RGNkYERiQo3nZ07LonerhZzTN+8KY+CSowbHlDlvKn0ALnta+OIFOYx7XPcIthK+42/shDIapaY5ZRPqwxgkoOqcb/8AGG1pC3v2RIJcfycoQAQ5BaxskarL0hQE8KO6v6S9wszDA8LLUOUqQbckNm/rKz++mJBEhPpTDGmSVIRq7xmdcSP1hXWiIhOkWOik+kstbrRZagkKWQtFotFotFonOd2Rf6ysz98c1QwE+pE+AV8r33SntjiHdPYRJ1lHK2IMKkPAJ3LdSId29RLCstWxUtO5K2VP0Fubnp6rjpNK4aICmo74Csm5dJTR7bsokd0XDUo+E6q/V3+Bu7RiPpI3dcw91DzBW1LrKJRZRPEiXetc6YUFC1l9xxUar95QG9YwrFqzVDnKk6bpDlV/6vSTuXVl4WzzE/K8Y20wHqh4UDVU2O1aIwnlljGmSgN15IsSr6K3/OfuNDgVnZTAd6TiCJZdquI/5xCpfHpy5ohyId/zdqZ8cn//xAApEAEAAgICAgICAgIDAQEAAAABABEhMRBBUWEgcYGhMJGxwdHh8UDw/9oACAEBAAE/Ifiy4xgm6dQ3/wDDUuehcGi2A6ngwAEECm+4ooul6Zc7zqA7C+obBZpj2Nf6EoB0Te+euL5PjvmXVaeJiddlYCQtuDGE/wDtYE+bgxbmCVFvDDHoBf574uKeobgiq1+CJUvPMsM/TM8vdGQ8IgCHmJyqQFvUb1+ZZzl/xwSq/hNcHuUuEBeCEbHBSgFkuDfFxg9//MwBKwR/3PUtBR+YH814Y0a4s+smJ9xG20eBjMflR2bXEotwIqzvJDBXLMFjaIE16ku/4B8Bx8iXaup13JoMd8E8YlhmGv4gjv8AjYgobgvLPMrP8zpJZfaSJkD6k6yPuBL/AOZ+wuIAPYuHjAF74rGYilJ7gyrFx6jqGSHBwfA1yGL+HUN1HEBLn9yJfmLjPyTI4oX3K/hOH+PQcQ+9oOg8TtmXHXAw+Fy+Ll8dTEJi34mNd9rPVKFiXO4eJkEZrGoFieCCsEq/cJU3lERUI8J1YPlO/OkMDuMTocVycaJgRPgwi1mmIc98UrYvUw3KJVcG2gPc1U/Uvj8/G+Fy+D+Dypg+4UblT6hhHsnbLJZMcXLly/kVwutT3Fd/1cS0ETiJoUpm+PcNDEauVuNSsw3NLk9xIAYPRRXIqKAdEb1IOqBFbH8Lw6Y7gEMWxjbBrcNcrPpsTuMpsrLNN8WcWS5fNyuDXzvHFjwW4kie1Dws9LPenpZ6mepmzDLumelnqnoZ9koNM9TBncFdDUrKuD6lsb9RlZQeIqspUqG5eGFvmqiRDFswYNPERCGZr+Dqoyrvhk47nV6Vkx2zCoUaS5h+2zAODTAixnpZ6Wehngs9LPSyxgoQ+VQMlZxXzYLTKWLPGb/jj1AtJAh3xcvHGJ9QxLmp+JdReMNH+4e7eMR750FKHU8rzA9wjvg9OJlLgDjgecBl/Cohm5YsUzjuLwhTLua7+RlHrc6Tbl5pV6iA7HmKxi33EZIgjmsS47or2xFQUICEEbJUhfnDVsy1hnB0Q5oNwuofwMA+6jyQOqzBoIj1H/4hyxhO0ZfPaVKncWXuXBGT3wfcX5XQZqOuTh5FjL1FMbqJUJeXwU5SyzEK9bRZ/sUu0x2Q+S+JG6DDZNrLNsNRrhD/AGSZlS1o7FxCiesx7CPEyQyu2UPiPPtuXd2TGjsZbnsaWfiWKr7cJj+AFSn88j9SG34XxlBDFy5cv4Nk+IK28L1cqektg8Y+NSvhbW+I/JgmJpl/G+NhGahmPcks7Svi6ilJZcpUrERv9TrrzLmdkJwwq7F0Q/ZxMo0aOBiINLBl02Qbt+J/7kzde8z9aEI7hr5MWnHTyzBE68DwO2LfIQgBCVx1OuExjcM478R8PJ/UNV9zcY5b5qHCs8Flhm07gYlSoWOINSlmDiKTBxSCtQLKQW4J8BDOxqWEleYjCzR7lqs5i8kGmblKLWX5w1GIVvUA6W2BAjP8MDjL6IlS05pRVaqJLb/ibk0H7Mf64NSs/NifEL7PMpbMD3N8e51OoEEr4MOSyx5jZHWOiFbQTGpZSVKlcEmUKgndw7m/yog08yrZwEnwRoYhdszrQ6gIraNTUYhVt52IqP4RGfthMdwNdI6i5lxuBhax6lvUqC2xDId3ABRqVx1EInBepupqQ+DUuXDXxZZHeb2PEdHmo8kdcEMQly5fJxakbi9nwgE0yfuG81aow7Gb38QYHmXHLdSmKixcVnINy+LihRAcC+DfN1KxBxuIv4OQDNUWUuoekNawKlgC+KI26GZvrxwaOvuGEQogJdhLxCqunUrQogoHGVf1xUUaBecTR9QnUP4v0Pg05eXkmlLCyTMFZNSEYaRqprcslksly+GdWEqVW9ENxDkJ1L4uDXC+kS4s4jZxnobyzDShJmxvQSymxcfEx3cs1GlrStz3yEo0GJfQl+JtqCZUWKqhBNSpHW2DFJdwrHo69TM96t2cej6h3GHzZkyBl/SU/GtcjP1OAcAx8OocmZogItE8SnDBSQYFEMNdS5cuXzrG8JZgsdInCzuTaOEV/JmisuUywuYJEC3MIQco1BYZRbDbMBFdQpKIM9XhB+WZQUGnXwISz0H2xHr+Jbxv7LhbD+hvnDuH8K9NdgfqZIsW4tn6UKReEzE1HTTwbGfCZAv0Tua69wzShuiakPoPAQIEq4u1VOzfMYXhcJPsI6jA8LPzDmqS6lmpYMKw96iwkBJieEQu8rCz3Cxn6R2xeYNxu76lxtKu9LCKlZrEwHe8qhHfmOaLHiWJwbIxWuFg1c3DqZZgVLsqATP1FNDWAiW61MiBCBAjLywvKjRCH8PUNSoWlmyX7K3fZ9yv4fLAQE18sC23qfUT1MNbbTxFUQ02YjcSe3g6hqaltDpH15MwexKiOu7TzRc3mDeonGnCzJCtuYt9pfYzKc7gXV1KYeX6y53rZ9SzD3y73B7ivUVP9k2I07zc9w7iQpKzQrTEp74y8af4puNFjXcz38jKCdksIeBmwnHmILS8Uz9uO47gVAQJUuVu6AmrBoaJ5bB50pwa/j/SjmBQKPBHv4EwCtkBt6fOUonIc3nEWHzO7wGYojNx9RjuzAAapBdT8DxrBO4cYHuIxn4E9VTLSEGjsQ9eZQesYj1cYcqpFWGyKx1N98HuNJ+FBEuOH+DELvDmFUMSvuWfpDRJVxcAlY1KRYEYuD2eYIruZEZg2cS7VPOYrMI5xGdm0oLXAgcNuwx9zS8bdENwRoWYchMAODcP4/0p3Liw1L5O5WOFl0Zi1gKm+4Bh1iWf5GN/qhIGyeMwCFKQbiy1Ox59xL33AR6MVVwC9Hn1Frx8y/meECeu5YrWYFahv47HuFn+YhXjAQzliNqVamAtOXqO6vmUbxBXUAFlHp4FRyZKJycRAKEe4y6XhA1gRTskxijSMag2YgZ4vitzwHJ3ZIBiG3+HqdQn6kNseCECJCvoeWeI+iAIV0nuMsi3cG4RZtid0SGLLT0sR2oKDxYkC3ALusCxVa8y9WbGWl3BgLgxv29RHTVamccYm4c4P3H6BL9lzQ6VUX0N08NZYkuZ61c0D1T+8BFUKzBzXO6ViMq5eZU0sgc6lmzzHT6XccmoqU9MIAST81KOA1wQ+C1fUNv4nUrjPcXkcWqLqXFigvaY/uZDtNvvkMsqFMwwrdx00mtiJHUHNzAe2IVO2VPnHB+ce8rzPH/eGjWKmBlYCPprzADtJ/0Ma871EovbLUukaltKNtXZL8utkPtaSJbDsRCz2JRbbi46MQdGoedMLlxi182arCxD8tTXIrCd8F1m7hrBARdLqGxghxVckJXFqhuZh/F+pHvgh8BPMOSdR6lkjLSi+UZwHjgYED0U0cff1Yv7a49Eqxi+J0aOPilaZn7wNe5MewneMHe2SirkJ7JXaibLEniW744gNomzkfSgXJYR+lYm57x3O+DtY6ZnLdZUB86kGO4isSd8rLnUECHFomzxVQ/h/Unmah8A96vSVlumOuQgcLGFeJSyHlfMujBQ9x0GL2yN9Kfk0u+qK909RG4+37pLR2RVBytRS3mIctETRbajWiGS+o1ct1G42bhlAngmKj9I48x6qjM6YgeIlOXOmfUpmE1qUCPw/ugIXuKyKm7goeMTHJHqpCidfA4GufRN5cuHzO464Hbg5MI2ubF8QD1NWjr4rLxB/XKkd4nsFS8PDO8DMwPqUw6j2QrWYhIb5fUEB9YlA+jzHI/F1Bu86cvSbJqk9QRk6FRLDh6yhWvtFB7KXR2Md8vi4S0CvqNAr1j0kTTEO2BQ6al9XMKQgLxiN1s1WGBKb0gHRelTeSkWR3LzA+hZnagMytZDU75VnkjnR9cGv4SdT9SO2HcIa4YVK0e5CRUPHInTLxFlz9Ke8kXOtO6fmIcXMMx2ngii8xqTDMQwR8zLo3FMXtrB7oQMNzb24bjaQsO83LsHOBLBJ+Cp5moaiWi/cIRtxHPYAdwcP7YiV7LmbrO4PitstSGfXAAY2JjO0jdtfuLKkjtKp8glbrq4UtobjzE0DDXBuG4cXz6n1NrD+LrgVl4JeJdGZnExlgK7JnlcvEvjqZj1PUjMFXTSdCfkorek0MN85KbQZ1KuhySmF6n25oq1vqZq01ZDeZEqh0ZSRLZ9wm146jB9IhTcYmYsn4jgQf3jQt5FGP1iG1VLJSw5tlkS+ce3uZx0QFcy8iDzZbgpajMG4IaDdygescDdbgHc+xUar/WLwnogTUIuIN8YFiofU0uoPqD/ABfrTt4vhGvms/XNATt8fM2ZvNVPxyV9uPWqmPjQb6MocuPuNU6jOtUw4XDBaUzMfKEtWNpUOkxlOmCil8LBe0R+4SGi/EQG1EyXQJT3B0sXMuC5kSUQqzF0RZi6IzAyxYVmXgoX1mFWzvJ60R/YP8yynuKMKNIYbbs6hG24J3RBPZneyHdQV3z+BBgep1M4eIENfPqM/U4PFyiAseo4GejiAQA0HwOuWaJWn8pl8+0tPniL6EovSZn2zusQXoWCxINKepWL6pWU9S6PFAZqN8FQmZAEjOmk2S7Xw65BnBqUDs3mVIw1H8TOIamD4YdmkU8FipSvBb7n7qIHjVDNsuBSUZCebGZwMYSrGOoCsA/3CNqalZnJTiJQz90HjFAm8Rs7uKoZJaypey+ghpa7BJtZO4fwOpdCZi0YGBrYgt5eRKhCX8SH8ILU1pmGymYB6hU8GqP2ahZeah+sxUvUqh4iJ9SZ4ewQ11P/AHZrv7Jh0r3MELQZiWVpeArUVqsze67IFDUxBVuU4TTxLhTjqpUEYS/4GVTwICWWL6g2mAzAAFLAng5CJhQh6pFYmY91WojLSzKXS9S0lu84yllIws3CKYxUyoAgY66nrs6rINhNBcLW3PBLYkIyDngZWaZpD+BxK7IdnfAxUPscHw6YtC9RIi0fwRZIeTNEWcssm0Gk1MqjAt4hJ2REt1KI8s+sEYnhiY3flEzo0SEsGXUIzEKoZdL9lFtKlYiICwduvTPCMKzABNQJj2jsRTphrYXoiwcSuiNRSPMAoIg7teIEL+p28dvEF7xEeI6/MWONRyGZitFt8yklVAvhXuVCGcly2wvUQuwPLDWi0i1qahrMczquswnzngP4Bi/uGc5nUuib1ptj4KYQAzupeJcQba6l4hBXTVRZ4jbuDldf2j3KezBNGz1M7LoWqDbMd+oFVkI+yNLh7ZenRPYpQmhhjqyiDlZam5S4oXWIyouXFKIAvF0gYhZCBCeIXS5ch4ziVtfRHo9NFv8A7tARj0ltwKZ6S6l3N7N9BF8IiyX8kmOJjfAd7OaawjHHmGICeWVDEZTBRNuZUKETewRXULxN0yCXZOv8JHbW746i53H+owPZ2Ypu4P8AuU0/5y7jIFKCnFYVKxqwe81iJmacTl1XUU5W0hVVsHbmVp29uv8A9TFzDW+wf2TSuwnLdGcsK4FhzDa2sxvrcQfGGhAF3BQoRO4sSrFNNfma+rFQ1OhEZrk18CbAjof6YfYffO5Uz1FI1uHZy2R63WrY4HAVtXtFXLUQrjML9I/RnmosMqJkTYYg6sUPsMwwQvcp+eLu7jfI3DNVCFyL+oXFXYjLYa+XUYaYUHzPUs6y6PMsGF4iKNl4n+mlD+ViD0VfU65YwX+41lfQ9pemiV7D0ymDvrzEWLfP+oa3/Mc4s0j4Qu26IAOWWDeZUNtm2Z14VZ1DRYEvEELRvpJUIf2ip+soyf0Q1uECBwcEzkS7i941z54t6l1m0Zfd3TCNoiVU6zHsWp7SiYGWwS4+puooXQz3EWkha2pku4243lyvU/4emGqMslsc5KlBZqHtRlD6Co6u7mn8C1EqkMpx4RLnDFRe1+V28kuLC2B2zCnB/wBiVEsHuTpANmwoMH/kNQblYYvgppuoWuHXuFme8akzRZMMUigBhgQJqHtGBfaIg1lMCgx1KLDxG307mV07h6NRxQfUvAp5I4VENIFED51KIc/3I0KdxpYRzicLLIahFQwibAuYm2ZXPMDD+WIUK+pYIQ5uQz3GPw6djgzKxz5qLiLQoBErqdriLlnKamaAmmhVpZuoaY/Xiy6BsQmRGwZp8+ubmwczGV7MRt633LhOIP8A0T/8awfb/iALkuH+J2zMYUzlUsl1yxcJpKKgmiD3yusIrnH2lcBfhNYBl8piMNKe4/GVqoLewMNbjE7XBK9kGsUCHBD+PGbhuwmUboa3MZYKiHQgCUXdHxRUZ5/uYh7YQsKtLF28Q5JeVS/wJm5SnGCWWS+BrAlMdBGl09Q/cExEKA0wnmDW6s/bCh2f+rLxZM+UekbrlS6Ahr+An6Ud8AKAnuHVzWFcEEgWhZB8zIXe20a0nTGotquWHDr+EHuKmcytmqJdt4mXJB7agYF0uWXZADBlBiULDjqE6lw9fC5cMweLMqw/EjHUs1yN8SNp3jVWIwmkguOoAaYVA5z5hSs3NIga9zH1Tf1mAYr6IcQ/7gVUF+TgaevLPHmqValrHmp2mF0+Zm8+0sqbMSj6NTz/AAan6UW5fBifgQB3EH6KZH/GvmECHyDEDEN4m9Y8RUWyQgfSfUwhr4dQyYnamprbvmpUbi4V7RFI4OuooVfkzdeTqGMBhdp9GlJY6UJWFFB2MUEQ/UJIdU+0HIb8wW30lzP7LzE2ILcT9mEDd+rlg934B8EC5S0TzEczyDK/cdWq5WO4eg0R224DL5/rRlE8IOmbv88uyuAhhQZiN64HrjH/APoIfAlQMyocnGIFQZouJGX+7lK4wEBMQzdw+HUKBO5bpMrdcnH1Cm9xhN9fLbB2y49zxQ4NxpjuDyazRmbQXSHU09y5OxmEYjczVpwmOpVatFcwOKh8y/UPcPhsBdxuU60Snce5+zfUuyAUOtbAjLcYIAywwxuF3f8ABSZ719wKGzEYLekf7ljQNzv1x9hD2+Jqktj3xUEqGOOpfHfJKA4VtkySjZm39IXlDsQse0V8DMwZ5MRpjNQ7mcD51Ow1LKuOKYlk36mE9iVBsLi4Kxl4dw5wEQmUxMykoWbblFzDmipupaeYmIiM9qn9bBKDf53xMlkfW6YtwJe9JUGU4W6dS1a6InCzNcYYw/tmBbD5M/WncxF7vfDuFdkqdb23ywugL4aAFM/PG0Ik6lzHBDgixaIYsMNrqpk2wx7pf7Qu2P5SiWjBLH+4TdSsNTK7lVRLuac1HpqYY3E9ARd4uK61kqAqVKFe5WnpPujNSh8y0zK74AQnUolmZibcol+qVRr2huocXQ/4z3B8bxZcy9u+hKKt+XxNimaT3MF5JSgjaS+qqZNwM7+f6/FuimR98hU4a9COoCHohNXM6BeODhi5KYHxC0YBuYQD6luTXuAcN4hOOowGT9PRjFsuhk9EPxZ9z/dqCLT1BrMphH3D3Ey7H0MPfV5gx9cA8S4rX9xn5TCkXrWYMwqEvNRXiAB9cvgf3wJW5lilfaoj2WvMPLk+sxWqo7Ji8kUvb9s3hbfpMi/MpaQe4kMVpLqBn49QMT9KVlhW2fL5cMyR2OM+mGE4OGULmdcruG51KxuHs/cunMau44nZ3L8Tdq43B9osoMI1JWYuBiOopTJlvxEgH4RIqe2a3i6AiaoehivtuaS9u4wrG4VwtWvqGmC2XLwUQJVzFxOydXiWpv5ZcLrgQ2fct1B88KLnhAhCPcBkykfKSZUU2C/OZQG8QRCHw65fpTIf+o4CgCrFtqJfijh0VdJBEshFTZiBogy2L65uXwuoI3hlBz7AS5ap1iL/ANMaGBPUvAQ+Z/shBwCE8rDCJ7cmUwVCHK1ieTMsjAixG5r8M3RhdyBiq6j0OoOhqlBUSIopeYHX3LeyonH9wTcvQg8XFRoCgBYYlSgVK18ksJxDFqQYO3q412xMXqFS3qLzLgA5hHjqfcGNRsxKjn2sxhjgTrzxH8FFo6PL4i3WrgLZWrf5ihbweibICVvRK8uHS5OCHfF8pDxAnoQSnJiV0FTOnozJgIqk1DUzYkzM9/OR56qTvmF6RFeurQSymTqXjxFUZFyhlmr/AKgiE9xkhLdFhq8oFQYoq4cF7iTqNUOUJ1GBJtSx9SiVFbE/QDdvqVJLsNVcfUMPZ0/h7gQddU2fAQAuVdXxcIlMQlSpuICUWrfmWusDVnd1GqB4CBKhebnkQeQNYi+BlFgtIxsMqeXULDb1fXomkNfJ4HuK8hfM7C/2gIazY6gHgCOuYeNku3bUVX5rzL6U7AYI6jY+YsDTOvgSoYS1duI+BZmqU8zCb+4Yw/mXzAfMqVdnUqLpG2DWAYiMaRvEIYReB+oGRmyrWXn0id85dzZZaygHiCs8MFIzUNRswqIuDeGoZixfsu8Y7V8xibZ47VKF9p0fLz9TBgtzr/SYQKW8RGs3jCV2imwQweEh+0epQajcAwAMzLlsLWILfMfKt/Sa/AigAvQRCLBFn5P61N5Vm+oaodicX8OvhNXw8WyAvZ5lDpbSqCWi0BuVoGkVC278uLhxXcoCjbEMgTJcILrFIzNtvF12SAyjGVF5nRW6lBC1LiNaqE1qGt2dYn0JfcEKQIiEp3qfnlVlmBjkEmVHbhBCqAmd/pz+5TTP2eYJXdkMlGWYYblPtAlVKRAMrWsHvKBcK4rOIY7uWZUhgy7K38TKIeUp2Esn2qwMW/cHfLvzHxjwyhW9h2/+JrQD8wGgu0YfRC5x6CVPPbfjDOvlU/UiRhA3HCiS7WiOSo0+yY57lFsUrvgx9ZXfsHBuBuYgTuZhyjBiOoIZUiekepDbC/MTr8GaYiUcCfUweImPofEe8+aMgVwqO4+Geln3T1MDdMs6j0ILzMR8hbzZ/q51LVDD/SEwgPI8RaxqZTaCDfa0XZ4idRCvH/mDEcwNEAsQKYEMTCAcwc2CON1sxq51LUwZT54HedP1ZBKyU/2CqgsJA0b7o7H2t2Tqs2Ni7nqZc/1GrKtUOy0Y9Pv5bn607eAuBcBeDsO4KCk4sIwwNuWjsVA0IUwEhetp+5oP28VAqVuGo6uWloQFynH85gRsI53NBMhVDN7CABWIAmocCUrTA17Q7uUlEAkwgEA8cZisSqxiT2bNf1E50/oh2hl04mNEs3AITKkuJUlm0ONI8GUNyUKwIsMSN7jNofQPxAhvT4oVCn8QeLuTn2XGqjt/0Ior1jTWxXwU/wC5Uyk0v+kdhpQG3+/n+tHc1XRTTEdrOoCRWHlify42/siE54T+IjbGUWEVzYQrR6ieUo4GawZcQgcQXN1oGtPtM5676kd2pTM/tkhHzVcVFPU3GBl0aYExxtuVlNsJkMTZcNSa6lXKlTTe46I0UTbghPMs7hlKqkK0YuCHMDAESx2hKp4ZYgq8EMMssDuGdWMy2gyPLvxEOof8CY6vSbn2AjuDm32P4DsJ7alAaUB2y4Or4Jjj0TlD+vTMMDU9WMweUtBMwfY8QhPpeGEuiBHGO4rUgG5bTpGcMz5ioPmDLgrUz1AvuYCAVQ+p7klRC9ZJ1bK8QUswiyxUrlC2XcqVHcrcH7cOngvfI1NkNnix3BwbhwQjqUMoBGpRP+UTBoj5e0hYhT+1cNSoy5Y7CzxCU7fsJQgoMfLvjNTBfcOEmzYxhE2gVAyg8ufTCeammX3KvMpTpeZlJwvr6JrEq8JR614hxcYSzTAlftCjC9sYOzuHjuC2x6pcjbhvcubg2IUgPLFZaXYyZmb014lnuVbECo9CPlO7molXB+3HTD9PBrm4lfuh3ybhwahqMW/qKodRKG5fFCHX4gB7J0eCYaF08pho9Tv+VVOgJvfyTq/oiIX6zoie5koqbIZg08JWMdQxtVLOCYSVZlFqCtzI34gC5uUlGVMRJ1JBxGudDvuC2xKYP0I6qk2kOEKzcKMIHliZTxRW3BFpoJlKlbiwxYt5gRMp/ig7wccaYhtXuGBmVKhysGKlFaZ0Zep0TYS0IeP9wh2WKF1/H+hEzyR4U+GacMQwZaVUsuoW4eZrNaoW8uoLaQ7OQWMxUdCP7hlza9x0OOkzAfcyZ6Fy4eii8KMIuMMyilbubI7jTL0ivjhlnjZeNZcMogwxSm06YbHzyViAsLJ0TxfJvjqYWZUzh2O0IIID5j9tRh0MTsQQdMuD8+4yusAae/rgLXJqEsjTDCoqC5qw7llSxIyioqY+r3KMUyz1IMKBVa4LzPoiEp6Zh3FVt2MY4T7mH/tRF1QTCQnGNhtWllVnUC1YZQ7jdq3cl9euovpgCM0WzKD7RWTElzVmjGLTjJHmMbdQR1HaIV8wZPUN3DfzAktZVNGp92ZcEZgBqV6d49VN2MYBFFF9h1DNhA4m1fpX6mKzEPkwHu4P6RexB+1Ayjas8/FtcqwuduOGdQhWDMkbVQ6gMsh22dEAganXDwCpIusP4S4mMhBN8Vsp5iNteErSL0+I9s6JVPvcqHhmsf6ymmEs6yghBrCNiQri8RYmNzGFCx2yyJke5BbwgG4VMXseD4qqGZeQRMW0HuWix/HUok40/wDiYIFtCv6TelDkQ9+IqT/AR+Hu/wAQbDlaC6K7YJYH9KIxv5gtHYP6PU0+DsfEFR1rI/THSt0LQSrxj5yr/qYXBLfr/wCWbnxHWz9xaFAc74NQXAmktThnWIF0MIh29I98AL2K8T3PAqGp0ysyokPTHvxM6mA7EzNQSU+kETU5rtOpLzN+kOhuVZptESUjTMHeYUUfVwjRgC93LB6jk4W3NSDgb8Q7gYirLZLKZl7ldIahjkypDG5nQSKAKNo0ezDioTcaT/2MAgIsE5L1R2zDqrY7+0tvZv8A7lElJVKX1oblp8HYpIGd0OvL/TGDK6uIjLW/6EDhgd77/bOlx+Tt/bDAXYTjdf4h9GQCd1uv74KjqWiMBqrnnjRNF+ZcyvI8zMEf1h4QOJqByxmBnqaCTsIp9DRKtT5ZVfD3UXuHcMIRbYxDxVG0IqiuECFsNw+1mfgQMnACZHc8wJn1o1brXgy0j2ccul6lgnl5hQXcMrRXhjVQZccMwQYS1wQ9xFi6lXiy0dtvhWVbUDwE8tmBmEyq9J0yhzjs+Uqf/gf98Ar67pp8WUxp3+X/AKgq/wDUipkDfH1r91Got31+ev8AMZhXtYKD/MwmY6u21/gjFEyrWXf9kHGGac1hhjgofUN8XJ1lY+VSomQUblEL/MQDV5jmAY9dF5inM8suQcsIamXkTEk6e5n5/cMmhR5I17YrR+YPhPOz+4xMMXAVZ8Sqz+oQjN2hDp+oRljQzuQdGZX5o851N/Fh1LFiWqGWJcW7mwIg9vgIOV6WGhfsAfawUVlrfrN1n1R/uAvHceYPe+tQsFD9w+LM4A0Oo5coxteMfgic5QOzda8S9nSBsu2fcosouvf+Y7uV/VxMPDmmr9pTn9O4dQXO4AKNMO4SsMqnGuczzxZ54WpZDBylwx8ziZhRP8KMCLETWE6uEJj8wh7uMCbNx0cIUKBUdQov2rNSJUt1uyQQOhqWR4E0KlBmd6QSZdaOrQimGDWIcECMxUN5s4NwKis51dDwxVZwvt9M3sPm4IYGhb/vxEazUsTyzxDZLTZMPMPkZUrcP9U0sIx1w/UHEpEZ5Y7LRn6XSpsrcTmhggbKpmgrrLXUrzLomvHN1ArfFCiKYOksRk9xewMstfBy+GzCYoxnLocL2RxMnUE4c7oeJjalJ1UA3cw2gNzBXmYKaqDdwAcQdTDO0B4geZ7My1yybviw3lFkXNQYgzKxwpPqrnSYOqSuLOr3EGU9OoEv3MMCQwvSMq4mVbBc0h/Cu/hXUuo2Gf1DUNETN1WidR2iaxG+BYTFrwh3r+46KeLmb8MILLPNS1j9XPKFOCGCUseiGhvcwwj2Ev7xDfBOo7Q6jpIG1ZkqOTzHQw94hbR6qJvSVsIXsikUEA0gfUa9QDpM+5btOhmXcPaITMRYrvEscFEdQle5am2IPhKLynKzFQOizhj2QmXNWRdZdtY6hD+EPFzMHnUBKWouqvqdU244YS42GKnqbimQdwzvWZrWoWxUx3DqYSfJpczXlKgvpL354Z2qpcbgtZ49ytmx5lTX5jNCGuGLmKvEIR1dWXA4woSmZbiY5STfL3KzWY2M6ng47IHAZzDHSeyjkrUVTqZE8GO3KpcYcuwRUm4/UkqdYYmCKXOtRiVIGB8ssqUwpfjgO+LhxiKXMPoOoqMhulqW+JmpVFrEwguS2x8jMDoG9V+Z0GGCojcZhZ4ir6VLJKNwI4Awv1csUZizl5aEpycEKfuyjlzOFUd9mILPbKGTg52fMXMW6ysy4VzXN0wHMQW44EXmG34DDbO+CYeAho8Szu5hrBSXNtgVbEApRbSJ2h8bgd40et5mYZ7HYy5LAtbSup+rHbNyvomK/wBccNh8JBKxDCww9rIDmWFlgw51LdLCAEOmWHpB3xBiawwox2mNtSxtl4mw0sQU1CW3aqLfxK7zNtaiZWLmIQMTpqXse2db+AQMmJeI75WB9Jgcily7g2zvhK1DlIamAlWzqWvZLSLBDy1DvjqazEnDQR/whFP9tFpHtwXTXqCCgaF29SieK+vqfUNfFJekL5eupnxKAP2jdSoR7x3D+jNpUl4CQeChs37/AFGI9mz7l0rK6lEsl2Y8ExzDyRdd9+IVVFRcTV1GrKeWF4R8S2S1kxqk+ENsDymm/DC1RhsyVK15IjWn2S3a7lGUzNgzMsGYBqUjM/KYTbUWFzCJZUqbTSBZFjaNwrqbgVw7+BxWZUqpVsmOEpVcG+AzGS5tmGpT1uuo1KhWvReYRoZenmA0Nq0EOEKxSwiD4le3mCHxqZcIFM588Ooehipi3SBoIAP5P/E7YMPPE9KUBl7riNoIWS9sflP8SozOoviZDTEFl6n9Vlx8KCd8ywasTKT+ktMo6i8RXmNGUQ1NDaMTbmEs0Vz+viFGNQ5vuaQg2zwTGynKYIlxaTOe4NwhK4qHB8je+D0hwkL3E24EWxjM04FHNJjjLQpAKq5uvMeiAoQclD49xQZ2BifcQuyj9xE+q6OagtguFoEu1zGdsKUQ+ojX9UQONEH6QWfLJv8A6VDVqyXPMYCoTNcBu5VimTlBaD9x1KDC/dMB9kZm3KG+ZuadTKBuG4qJV22U+Y6YcF2iCp3MUFb6hTqMyhmUqEdN4mQ+Z0QObjzri5ZPcWb3Asip8pkh8yHGLneXiZN9svBVOu4Yd/fg8Qfp2wrMa9teoExq9wg97MPAws/I95cFbPJKDwotUpw5u19x3BDWq5bZmWLFSqK9MTeSovSZsqZf5nm8/O4m8NKJuKOGWFKDDgTD9S5tgsBFhMjECt0xxS4RMj3CpDE8yr1Nkw2Qh2P9EHYGeWUyp6humFZQAQNB87+QYhAuZkvorlmhDceUG3OoucJe52wKoDKemJSxnMEDNxMKxHRDPTGuWz0l6z8HjqfpTuMVLWIKwdJcfNGO1/ubrKL8rxyhjrUMGCqeZiF2wTWNWXAfNfUGgtb49sBDAqhUzm4zSNYj5QJP6ku2w/cuqa9stLX8T0Ezql4GMMdQUVO/h5l/F4qIZsUYY5lmQfQmegWSx8TMYcvmHIzisu9yiwirsTrugT6+TDU/S4wqPLHmdQ6+uO44+QzJubqiXnnhrUWRHEuLhshW1Tc7Hg/MuMFo5ijZWep1DdzFUmpL0g5KoFdutQ2NiLohO/xPfzYwlVXiHGYYjkllC7TW6hPFOZkz24uWAylmBtVNZ4SvfMzBK+RibodBkNxRpJdHU9mM7c738rh7lKlg0R2wsYPBa2xm5TpWQ35uUlSpTQx1fKkFdIdTaD3BUGmrm74UEMTKa0wWMsTGwujFd/BfJlzImkMzylKxwJpJtmA6YTNzudobAxFiSuidvSVzQTTL9EwBO0Z+PJj9cNckZc64dN6lnrbX9yovEyI7TCXzSUlx42jqDhuC2kpm1ySqvMwm1VdD3PIqWAXZphgK+cNYvbtlAC8cUlYhyJ1MR3iZftKi50LO7zNZZL9sv3Hc7eb4UO42mY5ceEIYiCRoaZS/jzN5ZyWf2ANIgoQR6j0t1oX9yAN3kwS6LqKYus8EPKhOw4EbiHmaroICgG23bAxGCEJ4mwjN9oazyu/jr4WReWZ5gTNmYyXCzZrC0vwIYA9kAMryhk3CDxPtKmYy35gUXc6xdxObK2il83xfNztly2ipMYs52ZmS21Gm5QzGl1Noi46CCTqESTCMW49sDqAtgl+/UGHDqO36m+pti8BLLq8wR03PQrPyTwrmDWMU5ilmIfsZvPyPi44CyNbG242pcStzKPaMP6JgVI4Pv0thbxBjEGTCHhT0J60LLrD8MLJ4G4TlBqXwuXwfC6ZuISFmpaPbgmzFsm9lyEhLQMyjzKg1w5rfmPBh/wDRGRdqUFmTcCDh1P2ow1sr3bhMS7/eC8Fz7TQdhKxAozEjhFgxO/dULrOuE+Br5XiYg2Zvh1MlMVbxEZd8yJJx1e40Vzo8Rg4l4l0Sq2wTJXBtIZeoOoPyw187g4zLiw7lj5IXIeAU/jjrhhxI5ipSYT6mBBpk2SgufTiOB/JTT90aIi0WI4wIwyvD5Y9+3g6QUxJWrW/cqVWIIHHTA3cvpa37mo7uEeT+HZCOR4rtjjbRKMULGBsY9w6D9qFgRdChanBZkiy1WdxL2uaZbL4uXycnaZgMPniNELgQ3GVwNGSLKGYFaDtmGg+5mkTyP+Y5IYVdSpH3z1QVD42ZufWJ0F+GBWjq9H0QTR7Sws4dx8QQllRz9WlxLPguXCHw7mjwGzMxTMx3ud1Fm+Vc68IQuTbP0xGtg9rljX6TstXiBDbMYDzfA8XL9y8zELmNC3NEeI90rcOuTdzNjACJMYbQSapoItFhUwFgLIp3NP4NBL8WC0wTId0VxUwMTf7I4i4xXcEx3GYSvga+BNWPFY4LoLRjATXAVqFnErMTgrjOpkJRSYkIfDrjIjruYw7nUJkmkzTM+JrfB1MSDMJFfl7MqVxR/DUrljkmrHW+USPARAJXzwBO5dnFcGuWamyM7hBcoeChwjfHVzvAlQ2z1zdL3UwOBD4dcV44c7hCPMsxPKXuYpnh1MuBuFpE9spDPNfzOo6xAZpGhHJDV7LlwW5lDCAzfw58Em+bPHfG6dwlHENkvEW+OpkxphedfAcjAyky9HTLLjy+oxMos8cERYjSVTGXX6WGAjz/AAf/2gAMAwEAAgADAAAAEPff0tfffffa98TkEd54ifznPrI5tivdSR/ffed/fffb0Z8/ff8AHdb4W3NLbovvkpdZdB4F+FZf330VbX33upPx7OvzqYdhSfM0/jVYpfozYk8ArS6WGgyNgT30vR6zZjwe6KnBKGotr7+6JjTa58uUDGtY/jsAofX289Lr/o9q+9aFOXfxWorzQ5shpU7zrZ6nI4CYV7X3vmmKiMK1+jNTG3keixT1Da7SKGo7YIBw4Z/+Yf8A9vtBGFOh6zaqfHNs7qk6Z+biz6DoXbYaY1TgWVL99rbrDgV9ZdaGn4YorMLR811sSXkEcsu3L1O8Yzf9997hNKvBgiBIYYkBS477xKRU3/LQFXhHNSol1M9997uyktcYZTX9OtVYHR15aqKbB2frFgddfwiMe93999yAXVIQnIioKVY6SQFEaLykWBBk1h3IGTN6D6X999/1FnlRuAGQi2F29AsGkyQnKyyj6Kobbhjw0Pe999/R9GWQ3ou7woAsJ+9sMdbcA9JStXaCn772AzT99r20mOmMcNmEhJdiUAw2sOrwPWyrq6R8AjPmt2X999DUOP8AvBSUeKTsYTT44Buhk5l9flzkug5w3fiTlffWPq9jgiTP8RND9RAu04bpi4FeKcW1UM7uMlZhffbVANnlUawtbKSqDuqTlvHPo37ign4FwrknuY7E3ffSCwOD1PffMWuxfXo8wMHvvt31ZI4jJdhlZ4Kku/famfVa9K5Fg3o1QG6f3v8ArJMOdXvsWS9krrgXfmb32eNxZj4sVt0OSAtdGnZXTtdX+CZXLdsHqbeTNab331J1pyTRneEdv1tQ+/ImCdrZ3B0SNICcx6SKuZr0uhAPRdztJDyvyt2bsPw68Mvhhs86PeFqcdtitO/3sExlicP094I1KAFBRoYQFqGiofZOxMEVjMFwfEf38CECTI5JkCZurcRckyFV0zQxc66/oNTk2A3PsMT30/SjqGNiHWnrI1d/YlYPSex7nbhV2Zd0qUm5kpX33/INrW6KIt4e+iHo1Eass2b3+F4K76kbS9/j2/33l8+atBcei7amek7GD+5UkSKXPLL2Gq2MnQOh6J/vjfYlgo9t4GTjnEdltzoLjSYfDudyv67wpclxuGe3rAgk+QswZRgTP8UK6AvfHJHAZJdE5nkqL8rMGt/39GWPHlBG0aesboo2Pj5t2DbY0j9/lBsXfq55u5/39qyYc7tgojPRGqSQ3hwby7pkN5T6ubwH2dSBEzeseFW+/wDr5YeV8aWGOUZk8PVQHZyyFiWbDNObMWbLrO55hh7jBac68zve4JxsE73RTMN8bcpzsTm0AY3X9GRWDBOtStDc84mhTDXTUt5CmVeSEv8Axtksw3fiUPaxv6NK93TCjakRahzoXRd/6KAvxxTiULEFeT/2/f8A/BWVVnu6AFaKnIr04Wx42bODsoinTf1KzAX353b/AEzJm0bjvbbKzALuprbYVtf30vC69Es2OVPdSPfSrvdyYTwq+HbDISH5CW9Du3ChP7NayJMcE8sU/k+HvesXX6LTdH7T9dPNTgPK2p+Ink2stnvRCqiMyqDf99t//qqK3CRrwkFZiLW+GuTS2DYtdZjuyjCaP/d9/8QAKBEBAQEAAgIBAwQCAwEAAAAAAQARITEQQVEgYXGBkaGxMNHB4fHw/9oACAEDAQE/EPBb3neQz+4az/ACXw5WwbYGWMH4fVkXCGG2eHjm3ZSz9Z90v8S7mPT5x/yP8P8AiJCfDh/YttWJ+8QHSyAupi9wnAvsuuZGdwr6OrS7yHPqok+lBvaSOj/fjjzxcXFhczuAO4ZFwDuPWxXD4sCD7mA8C+bSE3qyM1LfHEMgNXoS2EmawhuPGQ5xMO/Rqr4tNZierdqH9F3slqWzz4Y8Pg6SL4XCrifNw7tAaQJxbvEmQuvVyg5g/aULET+Lq/UQpinUXGOx/M7bv3+gtE+S7glsEWTi5jqIt02yzL35CnUxB7geSGGRnsb/ABB4MyGXKJo4yBUIO4D7ti9E4XuFEfCWMpmmrD5LqXw8tjc8kPBXjY8Z5GQ68AimPUZtj6jCrSPdzSdV6Ygk1ckTs/qZes/mX+SW9kLAzbWstd6mDySuvfgQeNjzBNxcWngeDxwWHMScwvUleLHRoShOs3MhNObJrkocJIMOyG+nq1lPiGd5vOfNkJ+gh9po73nftENUTqhYMPl7/EcGJ3MGjB4TlI68cWWR8R1ZE8Q82yHLHkuChrC4Hfx/UBl5/eePBK9XNpLMtsZcgXUmZm+8noz9QLsW3ttQB6fm7t4t2C2Xi0ci6t48NJi4IZxZ4HLhBDy2Rtpzg5lh4HhuB1IgNm5Z+sPg8NXARADH4ervxx5dkYfBzaE+A+H4dY4sCJtyf+yAh8CbxBtwjmCOpkxu30nEe7rx21rSGkAc8e2L1b2Fsg4hXFrZMT1NrL8xcReHu61lxXO0Et5y0Gsg4lsboulm3XU8xxxHM8XWzebPpXL4C+8kgr4D5g5hNvFzh4n2jf0ouD8TjbMfAA5JdyQzoJa+2PtNFHOT6R1NsXXjrblv0977I6ha4PyJt9oPmzbBdL7T2fiKuxB+K331JSOOIk97RUiqurgjqLgh5xBkcDqZBGrLsphguHcMPFlnHip9PrwLezdDn4bTjVF31nxcDY5gyOGXF7uX5JYvFgJgDiw5cNgnF8Gllzw5LINt4s2W7HG2dWXFg4jZsmH0HV7mX9SHBs8HE8DSOODeIcxp3HPhFHcF5ODCB7t0Pq2La2YsHnwwL1EtXFk4tnIcmBxs4Y/WzzrBkfr97HXSz9SIuMuewBvqUA5DRP2ePtEn93v8zm5zjv4+0Y6ns+bhPV3Zp4NXNwWwc7FAQLM6sd2xOoPEdc2Q8yCLDmcP0jq9LA0+fd3FXuAAONH8cr+sTV4uZ+ZZF5Bh/c1H/eQacATnf/L53UtBDnvzabfHxty222zwSwmmVsZFzbGMw5MM5ftCL/RH85P0EsdteLj7WpJvfy/r8fadh6K/HsjNkrTGyG3pers5lhYCOtT4RIOJDbbfq9SHjbWWGMl02W1HpNsY4ibn2j5YcY0P/iVfQ5PxZ9OCQnrf6jghucepPh+/9TFEXAb7/wBEcQ234i7uYt0YxkcvGzHlZUdnWpGM6G4yO4O82KBX7jczD5uEDXa9Gcwzpt7PePqelgdL28dWgfn6chsR0/SD+g5/P/TYH0Gr/bKw8ePw/wC7DsiLotfJYr4BZjfe8HEMsHwALuXULh3PVvNrZThfJcB+BgGI3kjB4vn3LYOfmW/QFzG1X3g+P/b5NOfh/qQK3D8w8MMdQWLah2mINpvPfEa6Vpm2V3fMg8rNlP0IUeYfdk4uXMh3HWRzYm9lg5yntgZPbP0erjX/AJn4mX6J8sD4o5j9/j7zoXeJd7jw3s5tjwx1HTLSg2cizsTYVMprbPs9R4OuPHAt7DZngIENmmRmRv0BExI+zARgdB/cbBqPFyjjhAa9IPHBlLTImbhHsiObVgnnc4hZnghzAQzRDHNvKR1PXjhcjYPiHMmHW1z5/olg+GRcevPqMINr0TLqHiDAPlaJDmw4sreYfBuxZvcgRC9RxEb9TnnX+LdzMk9xJx5P1cJN0BsV4E/neLkPk9Z8Sk8d+8nef15+g7atHEOT3I9BjuSYrabNh52K1nmDIiKUvjLWOT5mqnt7iTfGTyJxe6FxewND1vE1A5cfzalfHqd709fRlyQHc2KAM2F9983EEmcQVkoJ+BOGsjhISryNolAy4+AN8bHBYayNl7LlbnENvjhctQ6cSDammM/zY+XfpewvaPChdm4MwckWyOy1pw3pt8ELxZ5njcUGNx4jnwzjxcn4o85PbHgvgJxzFuu+rsx+fUlyf68um82T13Nf2IxrvGyu7vVsmcWjahEw42Mecz1pfZtzi4IZ1ZNzJjsw54h7QyXGIhINvRDIXq2B6nL3C/xLAf0fn7PnN+EBZvR+7dv5f4JTh9v5Yzp47S3iQeG4xY+Hg4ge5A6Jx4bJLSxZXAcycJoH5NnMeIKRFz42HEImRx4ELsn64H9ssbV/ryRnfY71D1ejIPL9z+IJmLxcody6OWXqHM2QRF4eg4ssuIRJ2tw+yFd09ybQtXKLzHg5hkOcS4xlvL8Wg4vx7lK6B4bfJZ4hHLbPtPdNy1k9cxBpJ8OLfiwYzuxyJXuxWJEwk+7i4TqH1YMT3ZEOfA8W3DbN28Xq3yoJr3cA6c5PF8B4szFuGF8HItLR8UyYceYGVofBzdeSzwQ+Tvw4or+ILcPzc0z0hy97mwgQD57X+vBa3Guf2+8HxFYGe7ffOeyEsMzj5vXAPMeKeSkp2rXw9uycThyftazA2zzll15Ijwdz1L1jq/DLWrofv8/MvdD64wnyCL3LPAh/Lix14Xu5OvW/aK9D4HvSPa3bYgcILg8RlzYJn1GtMwyOPTYcRY23MbMsLWG6jUsh7t2AYGcrv8E48cWfPwyF9Wx9HS4jDnKPSDjwIFwu5U8Ntt3X3Sm6+rqbrbveLbRtrSD1baeRZHUruPiPMn6CE5lClxI8EF3ZCWrDOoDySBOzMmBGoj+027H7dRHO5viSdSXBzHfhyuNlmGz1D4DeSIq4SbvGYLhl4nwGeCcHhMeOrPJvjrPNlZCWsQzcrC4XNzG2+Du4eBxHBLjm9wDS67kSO9/iw8Eclk9+KWXv6SHLlBHiHOMs9QZAZK0tJtiCOp58HJDjHYvPf1NmUfvLXfoQchWc88pMPAjPguU4nwbnBCiHDbhPPgFkG+QGE8PDdpZC4bk5hHy71l3/ABZsmR9Ac+FkdWWRhHvwJjvwcR1dLZ6vVzHgfT//xAAmEQEBAQADAAICAQUBAQEAAAABABEQITEgQVFhcTCBkaHB8OHR/9oACAECAQE/EOCLrw6b/QJsk6J45wJsKr3xuWtscHUyxDse28bJ9Ov6ZOoO/wCkRqeBvN9be3Ydts2AOdLYLsnyHpIOpD9DfuC/E+X1aWyNss4znyUrNkPLGeSvpdz8T3mlo9j3hYI/MJO4H3vIsljbLLODufh6fLKaRvjuGcf1iokg5M7ITLOML1LIHciOMkGQ7HXIb3OStHuYdLuscWZ1AT9x/HdcYBiV5GuO7eF6nTlnwGeRxH8JE9snNk9cCPcYIOMJauMdFttNnBsAz4C6cAjn6iXjLLGPONttbC6bHzjcLsSET6Ma1Z0ttyYNew9nhBkYw6ch2X4kDd4eZg42CbPgedzzsdwZD9M78sSY+RdAGv33bMdhnclMi6lp0gfqO2ofNuwkOM/ETEAax9FgKDh5ATqy3qzbVmXV1ZMjY2XSHZ4GCRFr7ukBkkS29gudxHRfxCYG2x2tvntsnGLtd+uMf6aUbbw8ndkdGWRxt47l9OHNRiJniluOyULx6i2y3AR0d2k/HOI6g2Qy+pR9Is5Isn2C2HjPfmNHjk+yvCYsIBI49RHfHvvB+5+I5w86g62Nn4yqGLbvgITdrdcDH5nDCB0Ozyc26sOzI+sP2K7geAOF1DexZ/Rk2OjJ+iaheAy75OB0H5LVLeR+YId2SfqRbJNinDs5iOjvhHObANt74BwO/Dv9B2gdszyCyLSzfLp1dDWde3Y/qM2H33/mdAPTayHs2o+xujqGxkWzrD+khF2+XgTg48Ce/IQt+Qg3iQWmvOMw4fL74HbDY7GC7ZSFokMRR+lqbOye22k2MqLpt9cshrhcZ8OlsIghrINgW4a2r2ffBp1EexnfrJ5fosvISsRG9bCkOQ251G5wNbFPuzm7lkRfIdcG6shFsT9W6dT6rB259wbdHrf12f5v4X8fVmTwlK8jBkQDgZ7ye26LLxONUN9voL7kHs/i6zhvD3j26R8A2C/SoB9JfnSZED+UkL0HZ2J9/wC/qP8APWNhF+pV3e2w6jrZoLLCxecFpYrSLu7WTFlgtPcMfAbuZdTp/wA3Ybr/AKm1deEBR1aOPfeNXF5BPcfmK9uxk5H82EfHGQPI8tYGwmJZdibnUdy63jP6Y+G2rnZYTLwvW3LM++N+ojg7lw3ayu+CYWcbvUiAdF3EezmXjHnHcd9Nj16LXQ3r/cKDAUl8QQfuHS9JWsf7/uP1A3kWxwIHnEYHhw6gbILY2DFbs0l7IPu9OPHOX3TTvc6/vA53AQ+ATyVfhI/J9SfSFJ6dzkWW5EHZ6b9Np9LQktidWr8kwskXo2zq767/APfV9heMh1l2jpDtkSbbOPeAj49Y9bIogVzVod2KOuBwlwITbMDOSn3bwZurOh6xwLnvf/n4jr4hJDDbF+JPfgE9pdgHPbVhTuEaXtt9TmEIdw30T5LkHDuMFXRMJVJETM/3dJS8CPGxxmd8F9RnI8HqcNtDo2QITzrZH2rsPrYdnyB8iGu9ww2vzbHGyLsRkFUA/wBxwiDg+Ccbv1b03bD21lj8OndnctIOYDqIzv1bMut6EKniWjQHXA7fAurOw6sCCDqfYOTog6ngAet0o+oFi/Hhuo5ZYRYe8hhKYs//AGJc1d/zdgr8XK6e4Z2ht5vIYNujbF1IIkd2Bt4GBk8YPTeY+Y8v3OIdLs2Mdj7jQGz9RvD/AIn8mY9IeFm5aw6SQR1PV1WhZnIn2CI6myDBudsQkg8co9T3y80XXHUfrjhHcEVJ8GH8g7hYHcdx9+FdXaTjA1jDfvqOl14p3eXZsskurF2mav8AiFvyf9kuunkPV0cLNnc+oBpYszyPIvd55bkg+x15NbJvqcvVuQjyf7YMhVMLYOn6kbVz1n5+/PvbL8N/3r/UPXce5ZZZDJlQ6c48kMdrAj8+B99hfuyfbFRbo9xojvhv5tLZ4BfqddxiXUw8SaDpPzNAek/v/FjvBzD/ALCMEQs5doh4OMgt/uz0IjN1jk5OwScN6kfLQjwN9XcG+xj3DHibDMRmyPQ+HWWzGwyucZDrkIj2wm8EyPi3re0LRiORhbDaFnE+hEGB9spdy2MkvqSCyOHl+7Yvu9cnseRem+wsLBWB3fBP8R1LoC1WyGG2LS2Olmzoy63WLD8tDuIPV7LDbvjI8lecnqPgeWT9ZQwwsJNP+/DNMtfr6u6e+l2t4zjBuiF9cPe7bU6hJmXeSMs7vLuHjchWO44zO4fgqX1+ICTpgOELHByQ3nRE22IiODEOlx9b3fYyEL/55+Y/XRuWVTHcz6vx/r36ssiDbNui+0Q/B9z5yIjklkPVu7ZBxkddRlpmRQ63e/czbB5f2/xFXgOr/D/2T5BEevyZ/qEMCb0+y/p1n9ppnX8/+LNkzgbJg/Nl/Fk7HD74IKeiHi0+3/8ALvFo/AHGPu04PiR8CAOfGdzGXVv4tWoLrLO7PxF9xf8ApfmC3gWf9ggH4ZbHfB58hP3heDkawJxlkGcdQdQwRw+76gg8/cWgZYgwgz4DwchHwIxHcwM9jOQeLzjW223uyHqDvqH88dLLZXDkule4+ZxnBw+cfUfAMPDe8/XB73yOTg+X/8QAKRABAAICAgICAgIDAQEBAQAAAQARITFBUWFxEIGRoSDBMLHR4fDxQP/aAAgBAQABPxD+Iu4g268wMVxFls7/ABEGHP8AlPjhjrU0iByxZBxKMXd7iGwOpkcG6jz6NLN8wkhaFCjcMAKAtgjAqOUD4DLLa1W0x1S6TTu3MWy5r5gVogSvaeJhdEtq4NWxBbWNy7INmJWWtku7uIqaeIMzjflMrWztJeFddL7imUbGa/i5dwKuoQ//AJFluAWWi2nbsKPu4AtQUy9MrQCK0pf+UbuXKU5mmIp7gLVH3EGM8yvkJ68jeWjO5Wo0oH3JXoekiiprDNR8asU1OOwpDUqYAbfcJGqmMQFGDMyaDLGykp+OIaZyy4Zg7l4hypl0tEpmkYSuvG4h2jQMaMahT3LKeDLphlrHTAQRZqXRKVBwPGYJBk/wkKR/xi9QvwIF4eByc/nzBNShsx7tdyhA4h/k4Y0gVCvi4UFwoilBfnxERcy4UFVo97nu1IFxxQTiYhKJWq8xSFLdMBcafF5lNjk8zIILirnHBbitgshMCzdxVVcsy+oXmtzVwHMtaShhjiGRlCJtWLjU0XW5q2oZYRc4/cAKFVNZgdiHJFRojP7n0x0yfImBZDuaBywMf4Bt1EW+IKFLX+PK6auC9grwDdEKqDbc7mPqWOn+dzE+5diOIBZ+YbcHmi8V8UeHDUVSojseC8EwrjMuXwzegQ2ra7Zlc3qA9wLSM+TsrhlLccU/UZXJRRqNGEqmWLvuDWSLFa+FfoibziHwRWc8RvfEHZKLqY0LL6+G9mG9GZssoJbS6YKnzDaY0OGPUFwJYQs2RZ9e4HTiWvtcMjxDz/gCr0SseJxfPxZ3/hZqrVcrwfcX3QDoP9IyxSw4eSDvxFLQSpQNRXd/OiaN4SZHEGtRtxNOIOHBHKDUwtNvmdYIDmVTC0imvEdNrbcEdGReASxVMXTKGo87ljQUV8PkpqAwEliQMBHC2NOFQ0aj1FaziAmAWjdwiRvLBxMEEWDiyXXBSMrDNIe4uwjsblWUSxCswQhTHnMNQczygrLkjWEV24GYZVSqfEK5XNSwJXcbhVFJoxLeo/ONqqHWe9rgkhrcrcDjzBNE4lhcFx+Jau4hPMRFO8zQ/lTe/gsFctXDJ/qIXOg6rtXiomku427Q4MamBzMDn9Q2phRcBTK5ntK1uKPMKOYJTYy+aYU2KkQGm4uMMNbArKXdi6OtwRa/pqXSSLBuXEFsCkGxeILlgABwRriYhsBEXcIVADUrDCq0qC6wxA2txoE8kIkEoDU3wgzg8w7czVjAo7lMTZ5zkqd8sNbg04l3NbI6ZxNnUok73BdkIPe5kUzAIBK3OzTBZCGGoc/Bi5zEDh2tCo84VcGYhLljcsmzMEDcMlMCcyibhlxKws2qWVhhZq9fx4l1OSINtQBuVtrnjo5qIC1JcYhQZVaxfEVXfxVskVQhGojE3VaEQytQIXJUTfCCqeYlhnJDKZohf2jW7LA4v1MPZ0i9bC4gq3A3dsdW6gMlOqhU7l/xOaj+4vJDBTnmHlRuwnGEgDAy7lbopmbMupzm45PEMYIeoO7niXdUwGElcuolsLzGY0HKeZeFoW3xKx08upjOmEYAt1D0GqvEJ40WEKkFx4hkKioWy93CmSh/Z8EUKQOkuamTIh/MBqJyN3CJVbhqH8du2B/dTwQuFQsC+8sywhrBpGzYjqHgl9xC8wJlLTFyhY+5RWJjohwSF5oROhMimIWhRqChz9Sy7JsaiqJL+kGLbsZJ5mkqsvNMY6ukGtvBCKx2Sk8JZLteBiJcvUEtMyx/2MGBUE6lLqJYkCitzeI2M5lcUzE+LqAQOFarSoDeTk5hXpDO0qrlPUxUzTROI6bKvzHVrzDYSylmhQwouCnpjmt0xBKxOjOc+oQr8UuQaNQSWvwjZS27huBgybhSXLGJTmIOWApYSAr9HbH1CLdL/wDJkAgzgzGX22aA0B6ioayhxzZmZFzF/B/EfmG1aNVsb15umJdVlVcwsLhbQAvrl9ykPMCXzMbzEe0SmZSWOiCZlEhx3Cxdxu1yniUXL4AikbUsZWvCL+4eweYAMRF+4PUTDOp0jBDX5ghpgdteRANtELuDzKpyjqoBhxCQiUETDh+GbXMQlMVzGgQ5NEIsZCiGwaewgtEGFqDUvEHcdQRYQzHhO1Ru4Td1kCh8o5ill0F4czA5XCeXJjiKjhtD0HUmjuOi1iFFlLQk1/ESllYcwFaBt1DzkVr+oVgpD8lQq2FBOmZB1b3nPCv9R3FaEsOjqaxbZu4I/wAm7iqOb3Ctne8V33NY7QVm5jVNxaupe42+MjU2XMlJ6TSOybm7thmVXMCpUWlgg18wv/ScTQoD9QBHmAVqVRSDWuIaYqmXEqybeZs5g7JZ3DmLUVRxBaHEM0tAyQDLN1uKYOCAc4Zj3BLY1KzcraxxMaw5WuYbxhrII2LJTZLxLKl25g7qA93CRMpCGixEup4x5Qg7tD7S4oGwG4EikYOk1BNiiC5rvpgMlut8sAajAEAlOoAR2EfLBSxKjUSkyNyir53b8RDFpyn/AJgYH8i3wbzHaGqRUt8zSboSkP40rO4KYDC+oPcM/PZber/jNrxB2yxzFM3O4HMC0ZqYDVyiblAscoGGFGqxMwy4EIKZrFUoEIb7Ss4sEee4RSvpgVTaw+PCChIGsRW2qlfvAv2RWcINrudjgh5TlmXSqXmNld8xxcKxOM1OYuC5bg4xMi1uUlFsrm7iGU2VrFwy4LUK4mxnENBXMr1bkwqMuUqeCtzF4sGISyQ9h1apdy5sLh4aluIqC+JYHFUOJbAEWzmAwy8JH1zWBtYQeVCR1Qru+YeYEvaQFqC+V6iyNQaB0EHYLJ3sP22hi51SiswKP5beYcASJhb+wGPdwNThNwefrcq4eIgbdxzgxqC1NkKD86GK7zEUyxElB5ly3JqGEYNQLrVr5iD0GIcvdPEsxBxUHy/AHLxHY7j8UFjPdswVlHmw18CNwbvv4U1eIMM/SBlLMMGmskqF5+TmKBrMw55FDWvMNUXAK5hxrSyMBzcxjR5mRFM4jHfDdyqW8jrxEcMr/wBwZVqqNrLHqsHbEaUvEGgZrdkN819SjCKhx+Ac7idzkHRHfIWCQ2ADASmYFtSxinCu+P3Hm0ZYY68stU0LbtW1/LDn4naW7jtX/E5gsoGr7P8AiEAKtrDxwu/X3D9Izl7g6S+ILtqAFSxm4hzzGnf1HCKVLvDNmHNQioRx3HSrEPtD+KCY6CgB2ssHsvPUJYN8fDliaahgy6VBFcIiO0QjZdqfctaxCwQbL41K9QPmWsXLUwWtzlcTI0yuGc2sJ1s7glviELPhMZlAV3SBpBzmGeDaUzcC5zI1FyGR3FAul+WGD2SiK0ORdRVlVUY36U3iq1K0J3lKawCnEaPrteYkWmB7gATM29zFdZruDCAxgg3oj9rtM13KMvMy0ahMarUq12w4oWWXQhq/xMfydQCshTADTiYzcvcxWPjf7h7l4qKjM2zFTAgbbgVcN71KwuoKoV2vNzAADBDWVAemBp4n3UY0a+QKNQDonKObhxTGwSs93C7bRMrWPBDVO4lOSYFqF7cTPUv4WrLiGkv1Hrh8kDcEByMO4+LAQcR4DEtoMnuMJAE5EUWZVOZEGpmvX2uYV+9xMAADGiFvVQzKBbyYmL86aihc3IwUCvxA7CtY1BHGoFnMphZ//CAsgFMtVrm4ul8ncAMJJZFob5hA+fiN40Jy9w/kq5j8CDmWboHdU/ceO3opQsMYqZrxabM8lwFZlCrErGYtXWY2jmLSpuOjOY1WZRSkN+plncfmg7icoKfFzxcrXUpMO13moIVFLhNefxChNWAINlwaihOwsDqZQFMFcuupZvuYjqEI8kAqQQm5XUJ6+cIZUrxApQ3GIXiBvEzIroFzzKEjsR3GlsQvLtHMN5YcxJVFgQ0VeRzHhqc8sbFyv4I7VsbjPKQ+tw6ccaG4rVEFcXBRlgbbbiZbsoajTbflKtymmBizvRj3DRHUsMcGFr4tfr4snMo/mLYFF++Au1Wn3v8AMUEFcriAc151xKNKbS5fi/cy9EoUcFbnx4fcETCMPfAUOVaP3A0S5VP+wn6DlcDCR+iXsaIDnO/9Rz9Qp5JdzKRtIordRq3mJFR/6gBaSiyikYFOMzOhmPApFeTPNwKnYXBTDA4YJCBwwafhszdxKokqhzcsPVEGE4JYI25JbiBURKf8TKoduI5WKIouk8oEqSjUUW1yphmYtNyoUcgwAVtDROYTwCfaE9E44jEBhxsQEw1k4TuFm0kdxMQiM7yUJ0TMPAlZwgPqBeu1TKoaKNGq4hjvso3mQyvUI5FgtaZSOZcUpVQaSxSC5xUTDbFFnE/Wg3fM2P8AD18wFiVTYSzJ5I2TxHO7I/uYlxlvyu4HE5eLweI6KJy6Rs/dRrnlAtfqtwcLDE24hve5zziJYKNJL4BIlqXiZXU3xiy4tG3EH1Ki42BRoSUEgAq5UhE0v+TL2dKxKh5Yq3lKgg2bxFkTiYLZdC5SGQmPeqdQYCCsPOg3CAJpkcuJU4cI+5ZYs2PZj+SDYC+oqRbupUyqHa/3Fc6yrG0whfhMHmARQBky+IGMwwyhNBCV01XRAfBAQ4ZxMxAoqHZoOTCUEuhPNs5iGYIwtqXioGGXMubjpMFA1EDWxAhRgFqYiIxvgpfhiC/LwRm2x4xMqKsvd1mLhtlnEZlpVHM5fH+OA4l/vQCbyN4ZVCGgolKirMQGBQ3DlpSZElDm1bD6eIjAwSlH6gAYmKbgCm0xQ30hXtqA6m6VG9pbOtivcI+rD+IVQ2tShhxOipinbBuKutQbGtkqgtU1Ly3uO2MYTHmQ17goXU5uGU2QTqKrY38QaAp8wJO7/cfV+2YuV4mLUjzXzle0pTeXiXWyouTcHNaf3Env6L0wmYvLuOplsjVt1fEfNrh3Llm2xxEo2iNAQqRQHkYtum8WfgNMbPLcwEzHSIBR3c70S+4MMLQ8QCkDaaIKEuWeYIzmmC4YYDI379Mn+oIK5gYf++JydkBKa1pp/wDYKxrg4mA8TZUCH+EcZgjqfsoJapTuYGvipncvDiYqZ2rMKKLQsBMMyDqW38S5DLfCbwiCbIpR9MYrwCvqWi8kQLzMVy4nnN1PIFlRR4jorTmGzFAGu4V15NPqDDVNe4mEpwxO3a15IB2/WSKy8RzBHRmWEuD9sBO6APuJqyuLpWFLyWvZCS37JtFZXu3MEHsfMUBV1cGMW7aKitKF2i2AguhRWEz9HEtqrb1FEVIvmZZpynLBjKN+5eUpsHW5l9NmIcPMSJAz9R8OnDsxCkBL/uIFu24XRVxiJs5j1CsjgagVdRyxBtbYus+6KuBRuOma3WPJCqxEmt5/wu3wVWxZfKIyTT3Fx5jxbucggNxFzULetx40sVYLnh/O4uNuRwN1FY/uIQADcvLNQ+V7KWWdu5gGLsuQ2ES+5Iju2c7jFVjVwU7vMMxznEFPV1+5eiTL8LU4OSlfcVHwEePMG6S6OJRPGnPuWEU2zUXqGK7jRaoybh8QYfhuHwsh7JbNkK80cR3TvDuOztarhJc4JdNVGmg2eZcLghIgn4EQhQFG6I7EETtiPxacQAm6zMkV8TWY9kZzIUh5lEbhKHpVw8MCosQVPC3MccNR6llYiymCR9FGkoIKuXNItXFnE5TlKb1BTuYRfsfBz/hFN1BR+GNGkRICjmBYsSmGLlTgIppoSv3L10C5Xy6SzQGw0pzFsQlAsW5cSyIMoWLDiCx4D8RC2gWptUB/uVOCwEquM3Kis1svc0z2EdRLjOcLQbs/tDYoA+ryxSm1CzUYwlQ8xFrN55uAeog+iW6NBeYONE0dMpWlVuJSYRq6tgQ1WV+IyCgI9XAIXcdVKFB0OIVoCvgZte54kwiDHklwrdIu5+zqABSyDUFkuMvmNTeoxveoS+WtPzHZaEw8BQwmGAcYg5ii3xAjQ7zLBM1G0IIBkLy1AOgVu4qykrc7eJZWnEHZ3LCcWlVLcyyuYa3uDVbmjqfqzd7iAvqLGv8ABUMT9tBn2hzBhqZDFR7hWckcEWMzKUmmIFsL5zC2zmTNXRMjbrFOKLbAFbVUDY8y29A3d/8AY8lws1CqxhXlGoYQ/smXjXOq5WAwhYzLo+WWBD7axGthVXtjh3NENKxVxVYJM6Xdvuoz8I0dZlKjCFlOrefBAi2VSvAiLdQUOxajHQplMmnEBfBZfcveCtP3OBbPdEEfLCDeUQSM1rFV6IKi2guPJuGHEtLlOaGXq4LagFfuCsuyFmUABl4xMG5htgioAMa7ipqUpF+GGwXmXMi8TJf8OVbAZiqYjsYacXDmCCxGV2D83thqtxf+n7m4MEoZQYnIygsZQzrEVCzQmc62/Uc8yL/cAHtnjZ/aAshZUjHYGHDGlxX+kPs7Z7lSvsvnE7yNO5Zo0nUXB4uke7omcXLZRqogu8ot4BnMxdgh4uEt933LLOFkGTCkpK1qIG8Qzii6cwfiVNJhmg2mSHKkpaouBt3N3ENaoVOpbwExncbIv1KYSs5hSwYhn4QCxPas1SVdZXLLPFxUWlQQxyR0sGd2iy9lu0Jk4gmiRcYYKTCTBSoS0fiMtQ55h/BN/uINsVwYlrb/AJ/7Pir9H/ZX54BbPKYDEsZXmL0W8WqyZl4ypBtwHp5ggiDeSGWFdSzMqWmOclpvSO82MJZa5K9y5OGE5twqzlwznxAYAodysLJIwNNs5JPEtYjQ4+kxpDAOZgVVVQerbl4JX4MEcnuBXCETfW1TBZHCXWC46peitxhANLH9pYa2v/KcSaCb/LGcBq6uE27kBlcN6AUxSRZNJgzXS1XGYrlowszTEuM5LSyAXNJmvuKkZgMrHxhG7BEYVyuXE7YkVx5l6qCguOO2rgYQPKUBy6bYuJ1BLFM1NHqEAXWIMtjDJiGCVuIFtjX8vzA6f4eXuO3v4rKQIFj4hbR0zSobbDRnbjz/ANhOSgAmjcCjUOG8RIU2m6WRjzcktkLA/uLRRhDWbBC18MoUQFsMy/NUxEtWVGlbYypzLcQsB0iBCXGhQF5lgATPiVEpRd5+of6PS8R0Cuiu2bHUn3qPMybvxC2VReyFVaMGGoL3E2iuAOQhAVNJGLV0EDSSj1Q6QzWhSVlS9NBxG2qXGo/fMD51iHIS0WOCXkLaalkAF+Ctwoah1tGc2qVJZRVYj1YlG8ZYbX2SJldoZZYiCp5gmON4SmsN/Bomr38G0rMyWXYzT7+MDwJhiff+AaH3Nr3Nes1GyEOSBLxCqKIlWgHcbUHpU/JiABJYlie4GVkDmie5otajeHhEwsC1U3KUrFBt4nAItdw15gupfijcgWWJuFagXSgESghlbLAuBCk0XN4pi1sq9/oHMSDg3HcWKvWNUsLCDnbKOGZJkwAvi17ITfFjJjjUaWvUBlcl8bjb1JRqVfEC225u5RavpCp5OIsuQ5lzHFGyyiinLLsZtaWD9MnmHEJ5vFS55OuY9yVUFAqpZYoCU0IsIF7bthCND0EG5Kx/WJgjbKgK6ezQsCqVvKjuETlKVWfUTksxOSqqD03BtEXqIJTPUoLUHY4JZwseZewvwiWpHc2pFo2w1/JviPbDI18UlAaWOFviauMQjoygI4sX+pWWpYF6Z31mvF/3NIKO3EVBZbtilYm2W2ILJ6mPoTaLTSKDbC/UYY9ULaYuuoDm7tH6dAhK88bgWQJivcpSYqnxNmxdsRcAocw2wGVEFg8su4pzxAVSxhh4g9MtkQYRvEVo13+YNDQT7l4y2lkZyOJbU8Fu1gNxFVacRWbGOrRSAaBqnVsdE3AZhgKWp5biAjArGAwPJAvtMs43DdwteCMBuml+oITgr/KXaFLcx7LYG5ZugzblhtLkxa7syb7Uw32RrZqjKVmjEtMxmhVywLauNSsB3CrZOUDSF23aiMY5mFGH8XTHHlNCp+4mC8Z+F1Z8ABMJRhIsoLuEG86BQEFE0JEyzZ8jCy88DyirmMmeUGqwX8I1xlQR5spSm2SWFVCh9zdNFxFRAi2xMFXKLrU3CYfbiHFiaZRgUvBm47UoDMTuAYNDZSMyiSChFEfJUL+CVS4qo95NThuNkkgRGqR2nZmVLaDAqgzMIbZqoMbWmK8yw0AWjAl0Tubrm9ZpxOS2a6ZQ2wlcN1iDLpFAyxrar8TDG0vyylhYlxGmkGggKXHKBY5I4lBoZYlO2Kv5CJULDmOs1FDKDYzcFJuC16lh3uDSoO5+2ZOrDR9jKjobmc/Ed3/N0wFuI6OEq5eC229fqDtUzyv/AGK1brkiwjH1GeZhDVzAZg4l7Y6e47xO6hszLUsYfxFXMUhO0tAEu3LzDzh2fmME1svcWMOH9xCvGaKC6S9VWpxpsfXUZhhyZY4/WMpySUGlCQjfSmEKBxLK4epSXGIII1hTmGiBl88R3Ac0wMHbwJBpAaqVpi2EtBwYsoBf/WIAWhVVAZoqDNQWlZq81Mx1k5DKFCoOGGmKA5iBQRXly8wqNYQ3L4BbRD4QqkQoYJzzEKkqk2I1L7iybggIhLMcTcARek6ylKPLGtJajBaKxeJctYnLDpW7hj4VLxGjKLRYMCNgcJGaFprhh8lnUJbIhw8G5y9/ydk3jtnLbRiglAbrTmA1+BcrFQFoEcNwXiWqrTORiYZxOGP4IC10LeZWyxGlZc/iUIld1BAEVlxdsYBbjwFb/wBfmKcMOtOG5jHVvqOpnBFUqqRgGs0KhnAx5xaKaleznLrMsGLwsDC9BSvaG3MxWoLq0GOsAEvCvUrB53GypDIZqMjBhrTGXAzcsKKMBmJbJM0CGGsx7S7Hwh6oNpyS8+GQilxDPLcoHkGuoYNCri1tu5rdChxCXTHMULjUGnhWoAdANr6jFjIdUYdIACAqFoD5lsXajsdQyZWdiDLphUbBBbJzEsleqdS9gHrLn5tJkUKWYmAkkEjoacxy2qrWlZgjoB7hoHU5e/5VcWQlt98ILVaPHEdpbbFplC3PepiWGwa/ETDaJfNwFoimHiQUFtWuiFlWZpzJcV7Ymx0RQMtrLI5KTZgP6uVzoKqyx4OrCKbgQL+lbC7iK6QX5plWU+TDGbGY9mEA8wI/GUo/YCPeuQ+7izZYtcS3aaKeGAC5Gq8sDtC8RgH0gopOHljKwVLuFxybqHZOWIFVslIAq88kIBNmF3L8NwmrfSMVlrYQpA6Cx+4cZGv/AGh4lihUboG9stKtLhSucwHY8alaDYGYyYsGuo0I1FOWXhThst1DJftYFZyxyAcw8PSEDe9zE2wCssG5ZVgDK/iN7Lxyx6HqIt7qNNriEjVp3Kyqjol9oW8RvgnT1LEN1tqrgLfRCe4VAVl/BCuOYfyXG8x03Bs8ifDteorzuAxYxcrQGlg3f2dkr8VHC1tEKpEB7H/BfqeJ44WXMU1llHij0pGDjJkwxDABVrrpTAeID1BaYFvP6gxuV7B0F0t7lXpCDLq4vwtGmq0yhluCIQzF+KHvD4fEuaZLOc7z7iUfUzEG9E4ONOpolTggK8NJ5hG1accxQNhyniaFQSmofUAsZzFwBpSGbuA+Yc9x8yXeEOExjFlQFCRVME8QO24G4Mjidw5meI8MxYbZlz+kCGVZybn+4QqxcPKZDXMM9RvlMhxoOi4FydI7fUQJltgDcDkOA2/iYvVcs5gJUU+i2XMTWQPzMlgqtsQjlhcNAJRTMYjXZHPQyzKM3dvSKDZuCsVB1DfgOpSQsKi6wKp5wkdPBWLjqtgBNH8naafUJQ9wAPTMaqA5rqvzCQ7WLm5anEUMxwe/Msul/AKLV6ox7YJ9VDelSH9QDDiOFIFXWPM31yBwezyRkmOwAWxeOI9S1c8oH17LOZcWvAdFceq/cOAIS16OHOf2EqQoBU2cl/UA8THUWzmBlY/SPQv3wiP1PBMWFoJTgoWucXBToIu05xDzEzZVpCQjMKR4MdH/ACjxHhQiw26TB9x0ADqhgjhQW7gmBqcjKfEDzKacTIo4lEqe4guf1B8gbowqosts+YDlmg6hXIb6PcvB8jbKA2bOJdJDatxs0URI0KaFib0A06/E0bpAuUVldxEe4JIhu9RDZCtnuDYV5hZC9MFynPpsKW2t0mLfpKQqCq0RqkXllR1ZtnEZ4wkWHDMGCUIdJ/oIc/ycudRE1NowXL8B7Tv1GYkbjRnf/wB4gPhC8oCg4cV9+o2xJolyyYNLPgj/AFGdaKIhlRc7Vrn1HKikjGtK0HDyz6ZYWiUOyojIpyx19yumANGXjcodoaXGR1+IjEBM81uBsXtd/UNGm8mTq+wIdwG4IqNxoQ01cFYTU3KyaA6bjeHiGj3C3VKKfUpiXeF3qWoOJUvWkVCR3VXGUcIeQkqKUPEELRMUQk3ArB/LlUKtEFFGPmheeBCOxX6gOQF7y7lxC+TxLMZUiJYpgSXAByriXKAHMXsa2taIPIVdbJxCgMVtlVdTXsi1HyIF4QmDUFbcou7YuY2r4yv9BvRvUoLzQEoZoqAAdsxeNE2H3zCAxET+yVgHuS6t/EuQi1JUfhkV4gEtbfd1EAS/lC5n9D+fK5VzHwEFCANicRRdBVrMLzGdBb43FFNBuX8n3FLMoMU1dYNKFHIjvfqMFeK6DeYeqv5IDfotj163TUyGcsdgFunvuY0NIYxquopeOBZLy2SltEcpLFPVX/2EhjEavzKdtkalF/pKPEsUsql9SVmpUDVuWFgcRrIsQdXbNDLb2R5To4iLZGsBG5h1cOMyocM5fHH+C425E1Kyw7eIXUFx5MxNy0i0oASGPELKJoBWeIhFgVXmY9mijYxRSBncvp5QYhOavtLUHI5xmCVxthUAAshsfuPAwZHplhtWG6O3/kAvqFP+h9y2AuBY8p0OkWNRYTlXD41AwmzQy7p6lnBKzN+5vLH13C2j9JGgvrP83TBZbMvam6XeMJ6ilO4FyxnABjqzNReZoOBSBausXuMUUt5Rfbc6JeP1REKjaruaMs5iF0yyKqwVW4twb+CWD7liuUBXD1FUeJBJRzRkgMv4LCZruA4GyBSFGEUzk6mgEYFzuCiU2woU58QYCqdRiNoOU+aRBtiLVAVCLVxFEozL27UefKO9KlfM5hrmGrTtzBq2BuaUiGE6oJRLg60QwhcoNeY8QnIl+oD1OdYF8RBQXfmVajJSupWLhgRFWHI1tffiVw62ErX0+INTYPjbH9Z/EN50gILeWKht2CF2imJkU2Mi1+llSp2pSZj2gJfxFZdCICqGoXUN/wAb3Cwz7MVNt3DwxEwwBWbM28soIHneD/cD3wsSupisE0YuE4hVMqvihaly1UGPlM4gbiZxDbEMt5h4Hguug4T8aWG6girImb6SwNZisevgcNwW/cvJlFRm8ykgCq2vkUrSLDTKCDOWORdjD+bzuM5l8GM9lzYYphKMTbFS4pwSq+xmXnLAZbZXJ86g5D3ZzDD4TcPkMq6lToFW6hTg1TpEIS9v6wtembXTCQ0Gnhw/EuxJQkLtCN4N8U5+pbVfQSiyxMxD/wASz7mb13ZLXiUMFHNarwEN48yxV3yv+2YGW7riKAKy/UXgOIH5i2L/ACoqfvorJbIHB9P+5j53n/8AEZB5LleH7lI52XgJhM9Oyz7ZXU4DY5PQf3+IO7g0MXNRcMAmSbS6jCBROGLzLXtlbgLqAMRUSXfS49bS4xBZT7MwHFiq8Sw5EFgmiYBDN1qVKgChjAbSrU99wWFekIQmSWA3Etm0BTmN0lmEi73Rtid6jTUURgOS5omSxVlOYorrUQAepuEhhIOoWYVA1K74Ux6ym4lsOASsltd5JodgwxuQ+moVRXfljEe40BG5oX5z90RPiqUinR9Qr5rSGB7iFsZPSFUb/smIjxxB/Cmgej3GIlorc3HobIglrhStj+RzMPUzAmm10MkFmkV7IGWV/wCp9P7iCszM2/p39R1iXF7JsHF/Zl4HGCbMxLHEaC3Mlw6rBl8KC7l63FzUusoXzmAEFsGb/MzabIKZENQO6pVMCkBS9DuM8bPDNyeBqUO4qEV1L+IVZsVqXB+I81GDhAglboiiq9pQiXyZc7USlJTDFQ08QQXVolAuxc24kol32molcQhRKJc0ldzzHmjYeZkCgAUQZa/MQBmNi0unKMVYWLWw4qt3EKgO1RmCQrJVcNEcWcj+3/Je0O6xMsuBbUPqHcI3CXhsR/cfboheRpd7Co2UDnd4iRXgdS/Ypso0wIHe6TUYC1JWkvP8azuabn7qJnKmiNvCuz+/zDAJd7uJUqq5tY4RoP8AsprwvR2MFhJwB3MC6V3HTBwyqjLzc2TaYLY7zAxDCpXPMpzuClnE2JdiPg3FdaXuKSHpE/QDzOQPAqhjyQ5RB147KltUfCBdUVatuNEjCALPEARBhkmGIMqCINtOSDCNuWc78bSOXlw4u4bdgA4lhGygQiMwHiHTuWFg0RiLYEElIRBLw+4VDcL9y5RSGoRlYmD5IZXEOSJ7RoBdThVXbkj0dwNNxMYxAFy4AY4X3uAQbnbQQwdZi4SC2xo7YaXZhH/4zUBuuIvFhQgoDOznMD+8nsmLCNYgHggDeXH8LzUUpubnE7TS5gSIbnA1/wA+4bY6ZwOBfy+3/UY0uDcLi/7AQC1DTzBFl2lcfF6mOZpsYFNMvG5ZWyaAj6hStwNwrxO8wEEl8mwXBpbWRaJRYDgwYpb0XqV8U1OGEhrvBlztQekvWhpS2oeR7s74belQO2eAXE+Zihl+YwEi9Lim6GZfk3TS7K3lgSirFfUC7YqBl2grzJiLPrXMHpj2zdsAsblSgSwp4jl5VAGUSkQCrVL6anSNzdfUVlSlDuEvwUwenUvpfCwZ0RgOcspYIlFXNQivOl0enUzFeIX8tY1MfGSvyplhvdpXGTiJcBU3TK3XmYJ0W1c4hgLAV8zJpUA0q25dfj+N6SxPU5erQHZAe4i/+aP7+NJU2qbOTh/EspzAawdPiD307z8HBCSvOYhJgal3FCEt7/cco1csI/0hi4WClS7eJRQb4jGpdsTkWgUaZg5FE3vA+NNyvQEMc1GmZwDqI0DEUquYy1wXaoocJ5jfVISB+CBXuMjUfDbdIEbmWVqH2naCIVUXcQrxAXJgGRBEDQEEbOIbeQihFExnT1LvVrVdsrUCyN4QRvcLkHUdxk+I9qLEjrAOfcyjxzZUUTcUJoYzDS6RcAqLOGK3aytSttFMuz33KFxW3cvbKN3uKimoKRXnk3Bs7Wo2lK1E7O2numVhAt7xAApuoluszTGvniO1blVVymb+OtTVuf3e3j7g22t/cZsVAGWWILIZGj7tP3APU3DO76TEJrY5s6lKSKSqNMaozlnDJoYCJiGmoC3qKtAGA3URbEUm9K7i3htYE3m9RXQQu4mRkkFMV1MFb6gyoF2RkIJNvTUxuVR1cfCg1FQ2ZgFVimalkro5gjhSXUKEGiWt0RFXxsmFL9wJgGqXH8jMJCb5qBK6VEC5p7ZRFYfvnwZmzLSEhSCM2LjsJdURgU8m2MjfwiN9SzCDMmCOqA/cTHRzBJTssleQWlYCIcYKm6TbZS/EbD2isqfcbG8alWZTOSKVGahArlwazx2J3KzBB0bEBA6Z+gJml58QIL3RMr9w/hwxxuGBg2QK5s1D8R71pTwcBNXBX5tHA5/4+4ri3sKp4/K/qah8cv2XvmqmCD0Xg8PMLrJAVDRA0SzNmNRKqoNKsBYsCbiIrRN/UDfcAPVzBr4BCLEYqA1cLRBfiO0AoaQmDINQ+lcLpELqZzfUq31EETmUmuqpm85iSmyNCqi0Z9IANi9TorgJVfMXVKHLMTbVRwiAxxEQPlYwKvtGI2wHIgzZdQqO1bjgmWo+OBeldkexdtVX3EiCKmTmVpuPmTwNdb4Ow5gLBQ9g7riLexGpop71CukpJWh4yC1azeMtUWyjuXBWh5YVfAS4v9Yhpf8AcOBS8x0TfqCmLarHzNbwoxJinmIBzQ4Rfufky4V4DFWCKUIkdzkiOyqXmAlkMkVFDWjmpuvB667y78QRdhrXgOCsUcS1bbx/OpBckyNI+2lir4//AGYB98Umd6+xto8sPsEAHAQmoN4X6IZZl2w3lo8qlTGKhbN0dAXEfqDKB0keCGKZeVfCo4gVpm2JmrxHbRklBZCO3zcZUugy+p7EHSU5g1HRDT2l0yXB4izKUeYC+KE9WxKNcNzy0CGpjb8YUE/oRUQwdzHdagcmjzAbJYR1HioqWwOXp6h9YRMyk7krSTSlzXrmWudy7LcZetEpUVt3UsmiAVdeYZMLRq1UjbY6HirJSbsrTZi65qDWHb0Dkp0GqwvlrDoZUDyFVdB+73gI85wQCaGMXqcMFlvdckCr7jaJuI9BHqXnRsrZTi4bJICg6h1V5ioiIcXNRKg2ncrpAoLm6Am/7R6jotsRl1wAtX1CkLmWKWr6wHruMYIyBy6P6gYWrbIKP3LZv+DtOPM0MN42wZ/thRakTTiXZTqOK7sij3hYVy6OQURHnHXNTstdGf2qzDJVNYunCmadPDEbDsDLnKLGZ9wyU4PiMq3WrL7lk0eXVysgAxNKXzBX7KVCaFS8cRAhpCxSFhcy08wPHBt+ITrRD8yzPcdxPjch34iBEcB5UY5IH4zuICbW3qA5W3ccBPTxCdoIWYgbNjGTRcBFZivTMFSwDbCoGmA1vWV/qKMVh4saHZiMnNQgphxlx3Z/yWCnOpRRYlm1glHBi9oKInS+pkpHEfIkuf1lwjpJnVGFAmyCtaUchNQMgu5RUO7jmYwz1DhwzRDFAU3zOC0Jus0lcy0IxXlCCUueFLPzKafEhGn6W/iFs1y3adsMBm68Cf7c/wCpqIZWD6InSGShbl5YqDon8UtfUcH3CN3aFxc0wsu2blhBpy1YQANkE4gKLppj6tMLNv6gdrwS82UxQN25+4bXHY69f8lbr4rMuIAERuclI9wBa5SL8XRWcMvRYBGGtxWNwtUOYZamJSCHMeDHpmCgWM8BDqVNU7mHOy26O4PLHaygCBF05UaWhSM+UuYCuuoEkcJlZVEbIAFVBU3BSqELVKpZN4e4BWI2ZnOtivP6LgWonsI3zWn9/uLUlIhVvD0U8JzodQK07jAVEiq0aOMll8XcoKmz26za7XzOI1WY9GhhlFaUyRwlF5ly3ARgyDcGxFlbl4+hC2OY9waSNcMtvWKuL0Qmo8xyTwt0Q3FQ1ufRDttAW0wn+YVM5LNz9lavxDOmEgsYcwYAfkJXfB9MQPbl7R4Sl+W/UVSxC1e5aWrPWOAfHf57/jWX1KpMB1l8AXpIFoiA2tTbUUehdQCJjngnWEqErYTiNrcrn/cP1v8AciUw7h0kuEtG6F4IOfzh/UDMKupmXMoNUvEovFBc2SaXXOZ2tEztXcq3liK3F0C8S4otmKLzQdxmvdoZjDm6LcsKZmF4h4jXYwQMGKOPEW1ZHI7l4BQNEYLGdR9RKpxKTiEKKQovEGm2ofGIbkZXzsFRyzl/pmlrRoOg4IIlfqFeieYAUV9xLLqEMRU13EcwqUmual/rXOfhxuVkmz4HNdRRaY5BhEQn9wQdI7vLqFq/ZWVEeG1ZDErSPXE5hUIedKFPw0weUVo5Ctm9w3HoPQ5WbN7r0R8Lu/kbjMBKswADhtbPc8V0u+xQ8QSzEInj/n+JqYn7qPM8zIYnAOcRJQ92Rw3Icwfm0FicnkZYtfFur2vBE4tly15IUc3jIf8AJdbpcJioW1sSK/vCKobgmSWG9QeUWD3TNNkA1xbEUCcyZgl3AJrMUYirrMoql9S8riWKV6jM8QRE9riwB4mMhh1XqEeClXMsEoWB2xKIXowwlzQr6gDuADZDDUxnBLKYzPb5lK6YYqAkyrlzOolaUYJDdHUrUcMEBK9PU5jHKSkw1EYVDmDMxuOqbmZTiJLY2xUss5YmJnEryaiBlgH1BSDyg7dM7V1pP/uooAeD+4ytW3NZfqVWAsPBE5UIqorLo4pjCkcBaOn/AObuceGwdfV4+50cwNHkPMUEzWbQB1e/SfzFpfDMQjmQg41oKXWXWpkLnK10oPJkbiMFqyGfiACiaGr8KSqK02aBBrY1GqODLUChQu6vXMdqciPylraYawZgtoXKt3UNPUdFRLQG1g+lqbD8RPW2ZjUZyLWfc7UGCME6z8xRgQ+EjBpCEgCsJl5JgzN0xEVg5H/QQk1jLEDbJsMCi9seiH0kerZgq0AtwMpVQOGoUq5xD1c/9ynNwXAUa8TcQCZlGagFQ1IlHiitURDLzA3XxaMOWaTd9R1FTTZF7rHmCUDRRSP3C85l+2rEDz2HMcqZS5zm/wC53mNAqDulo/IDL6lo0HcDncbKWmsnY8QqiADo/lbjxP0YcELK0lUBdOR06mVKFKL/ALlTkFbMZKnlOdeY74dooLyDlq4u7JqIvsGKfKcRdq2dM1gTOM+yW9pAbyLqBkhdQ1Vw1mLQsN4mI5AxT7mBvrwG4oMO3FcywohftMis5Qa7IK7MBNLRM1cO2H0xbkJ02goDZBLzDol5WxljIN/2ipgmA1iA7q8spNojfdRCty/g4hAtSoVzxAxzDCwC7mAviY0TQlyjzR1MhTiCOEvMMcwirvEdHeyU5FfHHwaM4m7EYIpuZjZiGqtJ38hMRJznED2Nq8SkxJlmNwOo+IWpllBWVn4WAQAAwHENsw/wnHxX/YmUubuUrLqHW2V4UjViOjP9zX/of+ynPx7HZc4nR8b/ANYmPomKsxeYS3uVUujY7hpujbK5xewniCkTOF/2GuLaW3zALeUXEMsyhVdwoT+ouQlxevqDhBt9wsjkVA7JD31wmBpNSYjY14APL7jw6NOIgrEDcPOzmFZdnErw4Ju1ibFiHc6ioU3KjllKTscTJPEZJU3qAjmCUS6b6RKclqJa2Xbd4g5czkyLhgVkMR2T4otzBUK4mG25goiPMvC/CTpNRuBbpYFzGHaQAtJLZFqsFFrHGNWW2ZF5+oNr/gHeIJdT91Ll7jQO7ltOZmMCkoN3UqMgOmoEpdnU0YhosEaqIDmBiqmhb/3LVUG3MWyLed+YtFjCOAh6jXsoeImBbUyctR0zbh1VLZH9RLasOfCnSMYnCWscAw8qJ94ZlEaZAyIkcVaLhfbIOYKefPuZsN84sEdE78JlkE8iUX0Rh33HTWFogru5y3iE/bMxQl2TL5xarnDDJRiIKkQFzHnh10QpT4TMfEGrYb4nAwaUqO5Qt8QrTeCyKI1QqVP2QrRiWFD9xBUymlpLPv6mkNuAN6gOvg/j+kTTDhcAC5dK9517jteKZkLqKMeo6UVwq8JlY4JgVSSuAC66Y0LRTZigZgFwr9wq0SA2syo0bLrMrlQonL5j1FRHYdSnpERzA+cxZdBHPowkrDY4CBk7CF5Q7vaYyb4uVRxyMYkRkC2XG2DAQOAGnEczVEt2mfAwbdxOQZDxFwcoRYwonpCMcpognF5gyXdzDcQZ9wWNQWRwGWUarlySs0xKReScRsG6Qy7JCRHEDOw0hhvcHLlMk8w0x3HcvqajbAuCXi5qIC8TOBsgO4kRw0iP5hXZrMQBp5ZeNkstFuKxIRG5DOPfLeD9TdJwYrLUT9i2wdWZJxGtVv8Ao7p9bgmsRBEisz/FwSgLNVhPhK7c5vnUayXfYIINqu/DMQFyB3Koib3A04hqXCGq5jkLNLiJtXrRFcj8AtUxFgDDLtVadoduMo6lsBr24UwTGoWh4jsYuNOKgpTUfszFZKo5x668HImFNM5j/NOLjg67R8ADL4mOF25t3AcW5cVBNBCfNS25iqVRx6gSha8wEleCvMCNMsviJN5RFkPAiwdHMJRzEAObg+7KjboitOI6rjiIZ8kIzww/E6GYJjrBcwalExLTEvGWOHuJlW0irRe3TUqA41CA5iWGV4g73tTym4D234iOVtLF8WQVCMwV52x6YFrn5qqgvzB+gNMy8s5ZSILfCVGWwTSo0UabLa+8ywq7UKv0R7AiwZJ5+oc/aTkTB3ByXtO9scTYw+Folr20WFpIplhrg88wr6iOspQFwuOH+oGoaF2o/wAZezMQrmKMpA/d4x9RH6MqwgIbwqMPAwgQUCASyziGqgyqJYiBXEDetwgZTs5qLiLNkLULMViB21OgMZCVgBWKPcoryOtO4rLa4gvCH2Q3LiMWxDGarU0gzVpphUhJmrA83CzhjYEQK8CVbhARWsoqnxYhX3pTgTRiXVXFVwhBkomrscP0iA6QojmLwDGJXbUVvOd7lAWJ4WBzWCZAyxo8lPr/AHEkU5vkl1sYxOX1LIuZdolEYu5alxFywznuUSgL+JcbcRRgR8bZWIw25iWHTNk84cgKzQn3OZij4pa1llW6rlLbFei8HGY0KKQdPmDhxAJaNAWm15lWwPa65LYwN/Vbih1aguy3/krhEGseHcvKdYAU7KWyUVXfzwwkdEGlEGMli0C2+h44vZ5hRp2S71Z4hLLOdhkvvOdR60N27remLUv14hHmlVbdr/IfiVsP48K08J+WWN/1FC7Aj8xAbqYEVbZlFi6jZZ7LMECEsruFslXEAoX4RwJVHUEo28soDdlUcwxMwVXn4CszFPcwMMwFnCzcR0wFsYvmWy2JywG4ISKcYKlhvdO48DbqZlKgo6gw1zLAojxLgZUNvM5UFPYxKyj7jWUwiUUjOCIZCIrUmoMsVIU8IhX5SXkfYFMqMFOiSw8ZK+4uxH9SwsgTSR6jYmzeIp+O3mNjmFeKpCBwmgQKpxKtsWGyZzSiMyFhdgURiDpGTGTtGYqcNgZ80B9fL4aqW27dUBkcfuOHYMg3wDoy/wCuI0YK9oGxPTTFVjYOdTdEsM1zn1KW0Rm+0MItuvsFv9QgqOf4qUG5Yd1V9f7y3K+SkaS47N5/UQPNPMbP7IryGgvMT7XbjUZWjEyH6bZPv3LAuBTUh+v/AElx1GgYA8Kr7M6gN7B0nMR8OGEAYArJCXj0hZkMfuDIIpArEBdwMQrMoLmRD6papmFyRYSc4Ag0ENFM1LM6YqtxcauCmGjp25jp5sIjcoGI8qlyQekfg7hq1xOQXk9oHE73shhsFXxCF90V5FYPuVZrpAn0s/cVwTTZJlCiP6/FwrPwtD9rlTTocmrfWCoAoBh/YPuPoVlrOsPXwcQ0s8i46kEhl1DEJ3FaZyRqNsWuYTwo1M5qFo+5mDNkra85Mem4fNJfo/1BltTAU3GBj1cSLapSxhrkK0/mWoDGZL6u/wDqKPAhlpKH0VGqRswI/c2Awrtdr3K013/GvMFapM0og8d3BlocOq5DIcPDEXpbEL9LtYX3H56BrlewHDO8Q5Mcaxba8ZX4uHvpMEoDuy1OJVUT723ayhiufqMWHoV/3ZCtecdR8bMtR2HfMFM0KblcoB3KW8yyTNkKHZSSuhJpwwuAzAsbRb5ghbqVUHEsO9bzU555yHqLhp6uDaF1bF4A4w3UBBDFTXEcUNxeYmnOIJbg1IDhzGrhaatlAioTwTbSIavExmEXlMvRdHiCGNpjTwnmM2gReadzA+AMMJzMhBfmBGrXUMgRidW+y1BgLriOg4HMssBBlGsbggTSFRWNa7mheJmzAGm4hdmmcnqHWYy0So5VMAwxCcr2A6ibMwo7Gwjwb/3MI3SymkhoRX+N02LKeZaaCaNHqFfZSaauEn12SKYX/dE2s4iv1NH3/HbU+0BUYpOWZWF5mZnUoKBccA6IaA5K4njLNSyjEtF0SkA9jiArG4EvQAq6lLm2zLwKc5Ykig6uHNJfTKnWHNXE2tzIYw3t/MPyRqqWOEFe4EiSmI+sXb4ZgdMBzzCvZOpaHc8pcKtviH0eVNSkvqt2eSbnWJs0wD9ohwY9wMI0Q9wKWeYJoWQBTgFt/wDszNiLu4UQ28SV7wQCoKIrUXWoED+pmEgwAqW2kRToeYFqFjSdTzYmAtMoXEAZMHLLZQltmBfEEyNwJQzMU3MbBFwI9RUpuIwt+4hQOs6r8S/0QRR4eZfdxSZURSMu3mxCJiwW9x6cDh5ICttYlaab/ii+pVLdSsbIPXUXLMMC3HT4l7hMjdTGXN8eUdlMFajryNMKsPwGIAKsxgjxC3ND3NrzWBLJVTGIpVD4iz9oQekM0kPCLCmEKIrwyJZZbgBQv3DgZmVqBHmQhtIKmAvwTkhayYpWMFZDEsK24GJW6iNqlD1pSQAnWAB7YzQEBwLz9q/qUZf3GpDlCkuewjjW3bUNlY1Uzxg9Swv7QEh4LJkJVcyyAYhVWKbwg6llSIu9H7YrUCeGXuri5S1DxMTUPtLrubVgl1bFy9HcNM1HeeJZXFTERgrsjCLHVwyO1xEW+WOAEVfMADSqrUYHIcoI8SsahYM2fI1+pr43Fh/i3x8GcE/Vg4IXTFU+ZgNkCyrRLrqVBZlG/thWtPB4IUUlEdkHEvacwqJKBW4g1pGJPYyCDiCl69wLLR7TJVGYjYfEYCDwQQdl2dkJFjCVHu4SHJyupVpZ4dxKIlPBL/FDll83pS78sHGtIztFikraz224ADhz2iKflxHQ+RlqOJmSiz8EuYJXyFLqA7wmgZJiSdOpu4TJODiAiWmWtQ7QtocEM5ijcsrYTbTEaG5SpMxhcERRxMoFka1sS41Q6qHZXSwkfaDQZUjcg2wjg1Gpcyago8x3fiUwKtQqMym5w4gx2zNBZr4BdRooVNxIqLQOLlhuUlZCYvECivlusRXjiCmMEeWICjT7Zn3KZW0W7b1MjFfmHDmA5qaLh08x+qUWlo9kF6gqIwsKm19wWyY0X3DioPCFDuUkHu2bgowljg8xXAWWMLrKXBqaM4n1LbuTqHQbpu464w0QSw6n3Ft7ZHaMsoK94g62NRbroNsZrpBGSPAwMtHTEQXU8QikWt4uKj8yguC7gMZUDkYlfLXUC9mWswVKA9yxw1EsgcQBAq5hW3MUkV4W8zKodEdFh8x82ZRIXMTjiBiAEGR+LwmzCKOZbhQQUjSoZiwMZxXIWVE9IX0ZiqhUnNnOa5/9xGBwaGKzdw+OIlV/uXzqiQo0pTXvhlEiQOy0mziPX3BI6USvq71cacBw+o62QTF+RmRhHnccdjChmzMU03KcoRch7gkicXEoBPMVRl8wq3OzcYXjgwkuyNWu2jmMlAYs3H68MJF5pHNEAwbQqDoUwFVBEpDOuI1VnhuXyNQfpCmaXLFDDmA5InDEzkSsTLI/EJLHK1EZuiwcXLkStVKWaTZSXETbE8y8Xctka6dxKJVYMRC/qNQ4OYm8U1cKypp9/Ngeoko7lY2WJqXlBEjyqWmBMS4vUqHI+olbT4ybZW6xc+YOdRsEGIABYq60bl+Le0uz5z+ozQL6VX3K8dSIMlbdLzWpjGMEcKE5qBs7+Fx8LHfxHw6zBuJkvErcnyMTJNsC9oLfuLDczJWHLG9DMqpBOSr/APYBRabgCot3inRUKwF4bZjBSzD7gwrWREC9KACxJmqUR3krhINCK5mf5D668zMjXyoCUF8kBxXiAEtCzzFqnWx5S0508wZvzVmo5IXC1KpFh6oUdg84oaOBxKIhk0y7FTi5rqGzcZWybYlBLi1ypDtklydqVCjhAqonRv8AF0ISXOBBFGdwxA11NFsmSCLhuIsQadEEFsE6mNm2GmpR3LKhVEBReCOoMQ5lbPMQjKaMCwFJE5s9RioBiyuLtDmM5EaAYcmKKGKeC7tz4giS60tGTwhcW6NQMUvhuGk0CLVugJVmQW7lGP3CKjtX6Xgr8+mUZ53CBncANfLpjaOxeDRLVtyEcmJl+hFgtC4sbIgLx3YS8m9VAALLRoUpTgvLDqjdVUH/AGUms2kfz9sIXkoSjdiCObiAVNg4WLbn3FtprpKaYgrljdrhA5LA+4VQC2uyMYMdoDQFtlxndeeoAB9VxcDssmiR3ogcMqBSktGjBLbCnKZgpgV3UQW0QQBBh3tUSlVYKiAQ1fuIec/v4BEK5fhELXXULBuNnMrPM2FahrcKwYMLiBcxMRqZtKwFu36ibxFVnc0Y63OXxc7iZ1LYYniI5wMLVMwcQhYDEG9wY4RqKCJmMGIMIUF7mwVYoQ8QgtKNyk1KMWOXMDGADgOCb4HuLOWX/C6DxLfR0OlKp5pqCtvEuwZb5YBvso2AMsYttPA9MO6GnENC1w0y9C6lB3lhExtEkpy29sPzCcSi2BVFcvqCKYwVDNNVQyN8wDIp1xlP6ltVpBVEqO0yzEai21mLlgv1HtWqAZJ9TDFN8MYSCILeKqN3wDe4C0FMJRzMAcQizxBuCZRtVOYW9MTPl7loGyWyI1RKcniNSwVQ8S9duJeBSv7haTcqB7ILEFskEU0Q7t4km9xDdR+HOYyMdWVW66zA5gUYheaWbNwWOMwEczHUKy5jyyhG6NUhicQaVbzFQXyxmz4ynoMaAxBbTPqHKxiUCG4ZLhjauuTniYFV62N6vv8A1Aqa1g1Zlev9SjwmSMqDu+phWqGLpDAlLBk1aekgjcUVTAUbxFW/lPxCwzAdyaJWgMUkE2fbUznJksuZb/NbJC3LRRb1GrXKu4JUtpKlHVgX5lU+TuzJ7PRvEGMqVDooK8aazcdJoAb6XWjnzxzBbSUtq8wYS8PymGYFrmOi3tQjSFSub7PmNqC8O4g6YeUd8CujMyAmU8xk1XJiB7o8GIRMSgoeZlTM1AY4NBMGSEbtGKr+AYxYdK5qNWL0mXQAdlRSxRVbf1MPv2jpcyqhsiJ9yeNia+5dGJnQSmobZZV9TOyOpxEw5ibWJsJQy4AecwgwQ9WRqWWQ2Sx+EwqOoxV1EsdsrSJQLYjk8DFYqpxEjSvqP+DMzowlVgDWIwFrj1LGrhdRfEi8HMfvUZu+Icq3Eny/gLI2ELU+Zl70f7RJjOV7JQKEGc3vuIFngLlVV7RoL2u1g2HLExFohTNTZFvMSxlY4j9MI5YRxTm45miOiwCy5gQrrccOLd5ahVCNgblR4PIUx5KX3Em4I9w2QHiW2zMG7irwhtQYyiBIh5fNblFEdUljkMmSVYQcpFlaLpxHVjRz3LObY2mHnPmWkU2DqcVDGOICLZbHcNlOJVPxx4ht5hoZlXEaFqZmNEIVL7mHPUEyIz8Ig8LlawqHUZdlHEalQI5AIeMOmuJa03TFjF1qLNAuuptGmT3GZslDxcRR9P4rQzVbjzzMvYjzmJVWUKOCZQwhUNi2KKyqbh7TuX8LmcsYuoBoj0Q1GUXKBvgjAYeInSs6Je6ahS61pmVQlsXLD4ENGN+YVYLZlW7eoYuSdRZ3CtUHLiUgppVjADDRCSTzLCFjV9cyl1iANxB9PeJXAbA2hZprzFA2kWYGH/cD4vCbPgpzcPPwF2EcRyQYSX0jPexkt7SOncAmnErra3KdZR1fFoqtbUvlDfwQKMr/ABXCqSi1HkgwyhXBMlLLSAE0AH8znMSCox/FCnEH/EOizeJzRahk8wVFnFTigGVNnLf3L+k3YtEttLxCLzDMsBuIzTHdmUVQkVFkx3KqrvojsOADjyDwGcQACjpV3g3y3LMYNHcE+e9WB/cbrVkaCucJEtRQTSbBtufoSqngxBbUGT7IvOqS/wDvEyvNEKn9wVpuo/b28zAYrGANLiMctiLtjVSgmCUVqGPjA05hrdyyY+UU0S2epQS4IhcqAtn8JEL3MzfqFgXiAKKmWSFxMFzMIz6RDB6ysUwWobd12a8vURUYRvTcpxcyipyOipVbg6li7/2Q1np8yNYhTcVQw8y7WY7blaKYZvVR1vUvS1QOKYRZQtirbiOz6ji53DUsBuYWmWYuVyXFhlFTsiqxuAlqxgFwF63UAbij3GxfGF8G4lAwrhIpMalAKCVVwsuq6gv7hYB813FwhtVp/ULKm05i2tD9cwtaaYUUo6ww0s1qUTdSrXMW6JTNgpStmD8xDfUNO9oz7iysplDtlIU7TBvECglFNNPuDltYbZgylQis21D9bzLJtcWHcxFdQWeYreIQW3UsEMRsOiWquGBuCIritBH/APFaDt6CYpUDQtfBz7YPYqUWJGNGWq30P2+IdEnNDgCFXRwl9xSni5WXQRcAx8jc76iDaro+FwNOnNQVaCBY0eWY0pHZOjqCNYgCDaq8GKrOteJQWWdeJcptKhLiLohmVbcb5tg27l2uJaRtMxXvqHmKZlttsvcYSKi03KYQXC1dQPEPiqeSDAdFrLPUFZxvtD3eYmukUrCjFxAkczAlZ3BtQi7NhfuLMCrpQOPial+JbMvLeZ9pd9yyuYDzG2jiYrcCiXUUq5xAErmEKOCDiGbJzOpkjcRAjGUKlYgEUjdkUEQuQwNrX4lC+DDLqxmHWGisSMCYcp7XeHB3cOKhLL3VtX9VEEbG84ncwx8bI/QwImzgtfHo5uGoeF5lrVTpuE6zz638ifiWKjDar8crBxVVE4W6Jo8TDa6YGJQvFxFo3FtZ3Dc0VhKhhYbgpMju4rUahMDKmBEEDBHC2FYsTKjDi4WMCZuDY9QJR0W4TsRkcS7nAZQfcjzEAGS0Ynh23d1qFTg2gPuVMO5d65QhhqGEvLy9zIzCi6+CKSJfEvxPuPRW2NGrgty4hdLMrJhZ1BQjCmpiS6jhviCXcC17ljuhnpr+5TDcLtx9w7hrXb6PMrEAaygAjkxqNXQ8VVW6qOepFvDEErG1fxsgr2JcQI3bZ/E2SmVts4OjEVnQo5pyx7L4YNB+4uxzMRtLBrJNXUtQfngPEaThwJvE3UxxdfwNQhtnDLa3LWLiJSiFgEw0hZSG/ZBRomNL7Spv7RkXcuqHq4mHbqIWaiGIMEVjcygcy2mWIy7l50zNy2LbSNkTtrhKRN4jGXpAlHMzXMEqXDMttlpzBHMyVShcsWJMiOcg9yo4GZblgeUpqmWiqWbvmYFWGxVMG2rGoj4XAvLhihezMBiKLhOGCrVbQ/DP+4tLMDjz+4csjOF9XPtmOI21FkqRXIwm6Lq+oED4TGomh7hWpXfusEv+CNew1/uW0dku11F0Fj+HR7btnpUdPUUJPW/q5ZnK2Q2zCtWFGUzzPbEotL+BDUuWfwScx6OphcxSZjF4hsKpjGrmY/aLKq+4Kj9SpS2zqMJFnHcOFGqRipgBVxnA1gbhRDBPIcYozGBltbZxAHVQCaxvomZlupb1+5br4mVhV5m0rFRKICljXCvERVVyiRgaLxCIeopmtQFOZugwzJblQGCanvdSpyxJgXAlDuytxhocLQGkLI3xA7OOhRis5FmG2R0MO7hgWhC/BCVb8o1iIpWIjbRw2/CLE8st+pxBrw9ZgfO47WDYzR3BPIJY9yo8qjVpDMWqE9w8tECuMssslM5dPqDzmtSiGBeZbuWjy31NH4DuMcKo7kwepcH2Ze0YpsqKi/BLnJ7jKrrERpiGddxq5+iJ5LlNvdv+kcVm8o/cfWobFR2DTa3HnoOGuolTHhfcsX4P9w7UuNwTsgPc9JZ1A4ghAQo5g1bEVYKsxBSzDxMW2TWIYGVxK22G1LNSqRzBa5hzXEzDsiyOCBA5JWTJqINrbVi6hO1AMqw6yONKFf1K7jxUDZW+HUbLhfVQWWs1Aox/NENjIRDC7pBdqziA7d1OJStRXogunomcITXllxEhwE2buAbBXpjRc3Vli0R/jOmdnMHFxCK3Aw1qIWAXGB1MgKYmd/cdOpbevcLVMQrIRDofiAcFyl8F91PydzPLMu1DQEXAUzXpoO4nMWG8zh8Q2/CbeIqQjJTFJpFYHMorEAVghpaxGaseLEqLFZlYGTxUNYlZjbh1KUK4ZQNG4Nh1DQpzeGNTHdV6Fo+oYbiUdMAMISzZioFa/wAFJSHwsQXXmBbMhxCkHUalcgcHnmaYwTBjUpaO5S2AYGUZhCUpFlD4FDFtDfphMwuKaGDY5YSXjzFhDjmOy2M/E9RLcCmkBXUQm4uBgYdu4I2IAQVzROU0Zq/HcG1hleJ3BvFjbnEsvwRM4sJzBhuIbyoRdKQzH1GlEh4gBJErLpTxLFOiAY21KAlC78QFmHxTPmBX+Yt5IA9yo0hgCjTBWjCnuCzhS68wBpWVFkihl4jkvmZ3OZxFxLk8QYvEwKCw2gGYraJVPMwWLiRM4zGtdwyktaQcwaUOLFwlU4uAZGWXdiXuasKNR1cc38ZJNhLtzLNqOmjKGlsa3cCrl1Cka9x2FmNQ8ES2WxBRlqWT4iFUJV8wwjcUhw8/4P/Z'
| 24,012.75
| 83,153
| 0.933181
| 5,170
| 96,051
| 17.337137
| 0.591876
| 0.001807
| 0.001964
| 0.00241
| 0.013711
| 0.011491
| 0.010398
| 0.009427
| 0.007665
| 0.006995
| 0
| 0.156235
| 0.000104
| 96,051
| 4
| 83,153
| 24,012.75
| 0.777043
| 0
| 0
| 0
| 0
| 1
| 0.999771
| 0.999771
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
8645f8e9d939292413e317a167032e7f2e017c31
| 3,799
|
py
|
Python
|
tests/test_required.py
|
nobody-65534/click-constrained-option
|
db9d3cbcf551b888cf4f38717d864a9c1e4568a9
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_required.py
|
nobody-65534/click-constrained-option
|
db9d3cbcf551b888cf4f38717d864a9c1e4568a9
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_required.py
|
nobody-65534/click-constrained-option
|
db9d3cbcf551b888cf4f38717d864a9c1e4568a9
|
[
"BSD-3-Clause"
] | null | null | null |
import unittest
import click
from click.testing import CliRunner
from click_constrained_option import ConstrainedOption
class TestRequired(unittest.TestCase):
def test_required_func(self):
@click.command()
@click.option("--a")
@click.option("--b", cls=ConstrainedOption, required_func=lambda a: a == '0')
def cli(**kwargs):
click.echo(kwargs)
runner = CliRunner()
result = runner.invoke(cli, ["--a=0"])
self.assertNotEqual(result.exit_code, 0)
self.assertIn("Missing option '--b'", result.output)
def test_required_if(self):
@click.command()
@click.option("--a")
@click.option("--b", cls=ConstrainedOption, required_if="a")
def cli(**kwargs):
click.echo(kwargs)
runner = CliRunner()
result = runner.invoke(cli, ["--a=0"])
self.assertNotEqual(result.exit_code, 0)
self.assertIn("Missing option '--b'", result.output)
def test_required_if_not(self):
@click.command()
@click.option("--a")
@click.option("--b", cls=ConstrainedOption, required_if_not="a")
def cli(**kwargs):
click.echo(kwargs)
runner = CliRunner()
result = runner.invoke(cli, [])
self.assertNotEqual(result.exit_code, 0)
self.assertIn("Missing option '--b'", result.output)
def test_required_if_all_of(self):
@click.command()
@click.option("--a")
@click.option("--b")
@click.option("--c", cls=ConstrainedOption, required_if_all_of=["a", "b"])
def cli(**kwargs):
click.echo(kwargs)
runner = CliRunner()
result = runner.invoke(cli, ["--a=0", "--b=1"])
self.assertNotEqual(result.exit_code, 0)
self.assertIn("Missing option '--c'", result.output)
def test_required_if_none_of(self):
@click.command()
@click.option("--a")
@click.option("--b")
@click.option("--c", cls=ConstrainedOption, required_if_none_of=["a", "b"])
def cli(**kwargs):
click.echo(kwargs)
runner = CliRunner()
result = runner.invoke(cli, [])
self.assertNotEqual(result.exit_code, 0)
self.assertIn("Missing option '--c'", result.output)
def test_required_if_any_of(self):
@click.command()
@click.option("--a")
@click.option("--b")
@click.option("--c", cls=ConstrainedOption, required_if_any_of=["a", "b"])
def cli(**kwargs):
click.echo(kwargs)
runner = CliRunner()
result = runner.invoke(cli, ["--a=0"])
self.assertNotEqual(result.exit_code, 0)
self.assertIn("Missing option '--c'", result.output)
def test_required_if_one_of(self):
@click.command()
@click.option("--a")
@click.option("--b")
@click.option("--c", cls=ConstrainedOption, required_if_one_of=["a", "b"])
def cli(**kwargs):
click.echo(kwargs)
runner = CliRunner()
result = runner.invoke(cli, ["--a=0"])
self.assertNotEqual(result.exit_code, 0)
self.assertIn("Missing option '--c'", result.output)
def test_composition(self):
@click.command()
@click.option("--a")
@click.option("--b")
@click.option("--c")
@click.option("--d")
@click.option("--e", cls=ConstrainedOption, required_if="a", required_if_not="b", required_if_one_of=["c", "d"])
def cli(**kwargs):
click.echo(kwargs)
runner = CliRunner()
result = runner.invoke(cli, ["--a=0", "--c=1"])
self.assertNotEqual(result.exit_code, 0)
self.assertIn("Missing option '--e'", result.output)
if __name__ == '__main__':
unittest.main()
| 30.637097
| 120
| 0.578047
| 439
| 3,799
| 4.856492
| 0.111617
| 0.118668
| 0.060038
| 0.078799
| 0.856942
| 0.842402
| 0.842402
| 0.842402
| 0.842402
| 0.842402
| 0
| 0.005971
| 0.250592
| 3,799
| 123
| 121
| 30.886179
| 0.742887
| 0
| 0
| 0.702128
| 0
| 0
| 0.076862
| 0
| 0
| 0
| 0
| 0
| 0.170213
| 1
| 0.170213
| false
| 0
| 0.042553
| 0
| 0.223404
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
865b9e6740ed2523dc088d75e39ff2d180d83ced
| 2,704
|
py
|
Python
|
application/migrations/0044_auto_20200924_0915.py
|
City-of-Helsinki/events-helsinki-cms
|
64e4c1ce6cc058fb3783e417560dc244bd753d05
|
[
"MIT"
] | 2
|
2020-04-20T05:37:28.000Z
|
2021-02-19T10:33:45.000Z
|
application/migrations/0044_auto_20200924_0915.py
|
City-of-Helsinki/events-helsinki-cms
|
64e4c1ce6cc058fb3783e417560dc244bd753d05
|
[
"MIT"
] | 6
|
2020-02-12T12:55:37.000Z
|
2021-03-30T12:56:28.000Z
|
application/migrations/0044_auto_20200924_0915.py
|
City-of-Helsinki/events-helsinki-cms
|
64e4c1ce6cc058fb3783e417560dc244bd753d05
|
[
"MIT"
] | 1
|
2021-02-18T12:11:18.000Z
|
2021-02-18T12:11:18.000Z
|
# Generated by Django 2.2.9 on 2020-09-24 09:15
from django.db import migrations
import wagtail.core.fields
class Migration(migrations.Migration):
dependencies = [
('application', '0043_auto_20200924_0901'),
]
operations = [
migrations.AlterField(
model_name='aboutpage',
name='content_section_en',
field=wagtail.core.fields.RichTextField(verbose_name='Sisältöäalue EN'),
),
migrations.AlterField(
model_name='aboutpage',
name='content_section_fi',
field=wagtail.core.fields.RichTextField(verbose_name='Sisältöäalue FI'),
),
migrations.AlterField(
model_name='aboutpage',
name='content_section_sv',
field=wagtail.core.fields.RichTextField(verbose_name='Sisältöäalue SV'),
),
migrations.AlterField(
model_name='aboutpage',
name='heading_section_en',
field=wagtail.core.fields.RichTextField(verbose_name='Ingressi EN'),
),
migrations.AlterField(
model_name='aboutpage',
name='heading_section_fi',
field=wagtail.core.fields.RichTextField(verbose_name='Ingressi FI'),
),
migrations.AlterField(
model_name='aboutpage',
name='heading_section_sv',
field=wagtail.core.fields.RichTextField(verbose_name='Ingressi SV'),
),
migrations.AlterField(
model_name='accessibilitypage',
name='content_section_en',
field=wagtail.core.fields.RichTextField(verbose_name='Sisältöäalue EN'),
),
migrations.AlterField(
model_name='accessibilitypage',
name='content_section_fi',
field=wagtail.core.fields.RichTextField(verbose_name='Sisältöäalue FI'),
),
migrations.AlterField(
model_name='accessibilitypage',
name='content_section_sv',
field=wagtail.core.fields.RichTextField(verbose_name='Sisältöäalue SV'),
),
migrations.AlterField(
model_name='accessibilitypage',
name='heading_section_en',
field=wagtail.core.fields.RichTextField(verbose_name='Ingressi EN'),
),
migrations.AlterField(
model_name='accessibilitypage',
name='heading_section_fi',
field=wagtail.core.fields.RichTextField(verbose_name='Ingressi FI'),
),
migrations.AlterField(
model_name='accessibilitypage',
name='heading_section_sv',
field=wagtail.core.fields.RichTextField(verbose_name='Ingressi SV'),
),
]
| 36.053333
| 84
| 0.614645
| 248
| 2,704
| 6.495968
| 0.169355
| 0.088765
| 0.137182
| 0.216015
| 0.893855
| 0.893855
| 0.893855
| 0.893855
| 0.761018
| 0.761018
| 0
| 0.015873
| 0.277737
| 2,704
| 74
| 85
| 36.540541
| 0.809012
| 0.016642
| 0
| 0.882353
| 1
| 0
| 0.211517
| 0.008656
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.029412
| 0
| 0.073529
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
865ef83a55daabdb553d3f0e1e136642f5f823de
| 1,834
|
py
|
Python
|
python_modules/dagster-airflow/dagster_airflow_tests/test_format_config.py
|
jake-billings/dagster
|
7a1548a1f246c48189f3d8109e831b744bceb7d4
|
[
"Apache-2.0"
] | 1
|
2019-07-15T17:34:04.000Z
|
2019-07-15T17:34:04.000Z
|
python_modules/dagster-airflow/dagster_airflow_tests/test_format_config.py
|
jake-billings/dagster
|
7a1548a1f246c48189f3d8109e831b744bceb7d4
|
[
"Apache-2.0"
] | null | null | null |
python_modules/dagster-airflow/dagster_airflow_tests/test_format_config.py
|
jake-billings/dagster
|
7a1548a1f246c48189f3d8109e831b744bceb7d4
|
[
"Apache-2.0"
] | null | null | null |
import pytest
from dagster import check
from dagster_airflow.format import format_dict_for_graphql
def test_format_dict():
with pytest.raises(check.CheckError):
format_dict_for_graphql('')
with pytest.raises(check.CheckError):
format_dict_for_graphql(None)
with pytest.raises(check.CheckError):
format_dict_for_graphql([])
with pytest.raises(check.CheckError):
format_dict_for_graphql(3)
assert format_dict_for_graphql({}) == '{\n}\n'
assert format_dict_for_graphql({'foo': 'bar'}) == '{\n foo: "bar"\n}\n'
assert (
format_dict_for_graphql({'foo': 'bar', 'baz': 'quux'})
== '{\n baz: "quux",\n foo: "bar"\n}\n'
)
assert format_dict_for_graphql({'foo': {'bar': 'baz', 'quux': 'bip'}}) == (
'{\n' ' foo: {\n' ' bar: "baz",\n' ' quux: "bip"\n' ' }\n' '}\n'
)
assert format_dict_for_graphql({'foo': {'bar': 3, 'quux': 'bip'}}) == (
'{\n' ' foo: {\n' ' bar: 3,\n' ' quux: "bip"\n' ' }\n' '}\n'
)
assert format_dict_for_graphql({'foo': {'bar': {'baz': {'quux': 'bip', 'bop': 'boop'}}}}) == (
'{\n'
' foo: {\n'
' bar: {\n'
' baz: {\n'
' bop: "boop",\n'
' quux: "bip"\n'
' }\n'
' }\n'
' }\n'
'}\n'
)
assert format_dict_for_graphql({'foo': {'bar': ['baz', 'quux']}}) == (
'{\n' ' foo: {\n' ' bar: [\n' ' "baz",\n' ' "quux"\n' ' ]\n' ' }\n' '}\n'
)
assert format_dict_for_graphql({'foo': {'bar': ['baz', {'quux': 'ruux'}]}}) == (
'{\n'
' foo: {\n'
' bar: [\n'
' "baz",\n'
' {\n'
' quux: "ruux"\n'
' }\n'
' ]\n'
' }\n'
'}\n'
)
| 27.373134
| 99
| 0.443293
| 210
| 1,834
| 3.671429
| 0.128571
| 0.049287
| 0.219196
| 0.337224
| 0.806744
| 0.766537
| 0.725032
| 0.675746
| 0.675746
| 0.631647
| 0
| 0.002427
| 0.326063
| 1,834
| 66
| 100
| 27.787879
| 0.621359
| 0
| 0
| 0.313725
| 0
| 0
| 0.292803
| 0
| 0
| 0
| 0
| 0
| 0.156863
| 1
| 0.019608
| true
| 0
| 0.058824
| 0
| 0.078431
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
86609a292d4a48f6aaaa10786ce0ec3b49c9f338
| 8,359
|
py
|
Python
|
Pilot Project ML.py
|
satvikdasarirajumml/MindfulML
|
b45c6c21dea7f8e4eda482bc99194a2b2fbb1ca5
|
[
"Apache-2.0"
] | null | null | null |
Pilot Project ML.py
|
satvikdasarirajumml/MindfulML
|
b45c6c21dea7f8e4eda482bc99194a2b2fbb1ca5
|
[
"Apache-2.0"
] | null | null | null |
Pilot Project ML.py
|
satvikdasarirajumml/MindfulML
|
b45c6c21dea7f8e4eda482bc99194a2b2fbb1ca5
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
#Importing packages
import os
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import scipy.stats as ss
# In[2]:
#setting RSEED allows for reproducible runs, otherwise each run will be random and have slightly different results
RSEED = 1
# In[3]:
df = pd.read_csv("C:\\Users\\satvi\\Downloads\\CleanedMentalHealthinTech.csv")
# In[4]:
#Classification task 1: predict if one is willing to raise mental health issues to their employer
from sklearn.model_selection import train_test_split
x_col = ['num_employees',
'tech_company_or_role',
'comf_ff',
'mh_fam_hist',
'mh_hist',
'mh_cur',
'age',
'gender',
'work_country',
'work_remote',
'cep_benefits',
'cep_know_options',
'cep_discuss',
'cep_learn',
'cep_anon',
'cep_mh_leave',
'cep_mh_ncsq',
'cep_ph_ncsq','cep_comf_cw',
'cep_comf_sup',
'cep_serious',
'cep_others_ncsq',
'pep_have',
'pep_benefits',
'pep_know_options',
'pep_discuss',
'pep_learn',
'pep_anon',
'pep_mh_ncsq',
'pep_ph_ncsq',
'pep_comf_cw',
'pep_comf_sup',
'pep_serious',
'pep_others_ncsq',
'hurt_career',
'cw_view_neg',
'neg_response',
'work_affect_effect',
'work_affect_ineffect']
X = df[x_col]
y1 = df["fep_mh_willing"]
# 30% examples in test data
X_train, X_test, y_train, y_test = train_test_split(X, y1,
test_size = 0.25,
random_state=90)
from sklearn.ensemble import RandomForestClassifier
model = RandomForestClassifier(bootstrap=True, class_weight='balanced', criterion='gini',
max_depth=None, max_features='auto', max_leaf_nodes=None,
min_impurity_decrease=0.0, min_impurity_split=None,
min_samples_leaf=1, min_samples_split=2,
min_weight_fraction_leaf=0.0, n_estimators=200, n_jobs=None,
oob_score=False, random_state=1, verbose=0, warm_start=False)
#Fits on training data
model.fit(X_train, y_train)
# In[5]:
#testing model on training data (data it has already seen)
train_rf_predictions = model.predict(X_train)
train_rf_probs = model.predict_proba(X_train)[:, 1]
#testing model on testing data (new data)
rf_predictions = model.predict(X_test)
rf_probs = model.predict_proba(X_test)[:, 1]
# In[6]:
#importing the necessary performance metrics
from sklearn.metrics import accuracy_score, balanced_accuracy_score
print('Random Forest Training Accuracy Score: ' + str(accuracy_score(y_train, train_rf_predictions)))
print('Random Forest Testing Accuracy Score: ' + str(accuracy_score(y_test, rf_predictions)))
# In[7]:
features = list(X_train.columns)
fi_model = pd.DataFrame({'feature': features,
'importance': model.feature_importances_}).\
sort_values('importance', ascending = False)
fi_model.head(10)
# In[8]:
#Classification task 2: predict if one will seek treatment
x_col = ['num_employees',
'tech_company_or_role',
'comf_ff',
'mh_fam_hist',
'mh_hist',
'mh_cur',
'age',
'gender',
'work_country',
'work_remote',
'cep_benefits',
'cep_know_options',
'cep_discuss',
'cep_learn',
'cep_anon',
'cep_mh_leave',
'cep_mh_ncsq',
'cep_ph_ncsq','cep_comf_cw',
'cep_comf_sup',
'cep_serious',
'cep_others_ncsq',
'pep_have',
'pep_benefits',
'pep_know_options',
'pep_discuss',
'pep_learn',
'pep_anon',
'pep_mh_ncsq',
'pep_ph_ncsq',
'pep_comf_cw',
'pep_comf_sup',
'pep_serious',
'pep_others_ncsq',
'hurt_career',
'cw_view_neg',
'neg_response',
'work_affect_effect',
'work_affect_ineffect']
X = df[x_col]
X = X.drop(["mh_hist","mh_cur","work_affect_effect", "work_affect_ineffect"],axis=1)
y2 = df["sought_treat"]
# 30% examples in test data
X_train, X_test, y_train, y_test = train_test_split(X, y2,
test_size = 0.25,
random_state=90)
from sklearn.ensemble import RandomForestClassifier
model = RandomForestClassifier(bootstrap=True, class_weight='balanced', criterion='gini',
max_depth=None, max_features='auto', max_leaf_nodes=None,
min_impurity_decrease=0.0, min_impurity_split=None,
min_samples_leaf=1, min_samples_split=2,
min_weight_fraction_leaf=0.0, n_estimators=200, n_jobs=None,
oob_score=False, random_state=1, verbose=0, warm_start=False)
#Fits on training data
model.fit(X_train, y_train)
# In[9]:
#testing model on training data (data it has already seen)
train_rf_predictions = model.predict(X_train)
train_rf_probs = model.predict_proba(X_train)[:, 1]
#testing model on testing data (new data)
rf_predictions = model.predict(X_test)
rf_probs = model.predict_proba(X_test)[:, 1]
# In[10]:
print('Random Forest Training Accuracy Score: ' + str(accuracy_score(y_train, train_rf_predictions)))
print('Random Forest Testing Accuracy Score: ' + str(accuracy_score(y_test, rf_predictions)))
# In[11]:
features = list(X_train.columns)
fi_model = pd.DataFrame({'feature': features,
'importance': model.feature_importances_}).\
sort_values('importance', ascending = False)
fi_model.head(10)
# In[12]:
#Introducing a constructed feature (mh_discuss_office): is an individual willing to discuss mhd in the office place?
data = pd.read_csv("C:\\Users\\satvi\\Downloads\\CleanedMentalHealthinTech.csv")
mh_discuss_office = []
for rownumber in range(0, len(data.index)):
if data.loc[rownumber, 'fep_mh_willing'] == 1 or data.loc[rownumber, 'pep_comf_cw'] == 1 or data.loc[rownumber, 'pep_comf_sup'] == 1:
mh_discuss_office.append(1)
else:
if data.loc[rownumber, 'fep_mh_willing'] == 2 or data.loc[rownumber, 'pep_comf_cw'] == 2 or data.loc[rownumber, 'pep_comf_sup'] == 2 or data.loc[rownumber, 'pep_comf_sup'] == 3:
mh_discuss_office.append(2)
else:
mh_discuss_office.append(3)
data['mh_discuss_office'] = mh_discuss_office
# In[13]:
#Classification task 2: predict if one will seek treatment
x_col = ['num_employees',
'tech_company_or_role',
'comf_ff',
'mh_fam_hist',
'mh_hist',
'mh_cur',
'age',
'gender',
'work_country',
'work_remote',
'cep_benefits',
'cep_know_options',
'cep_discuss',
'cep_learn',
'cep_anon',
'cep_mh_leave',
'cep_mh_ncsq',
'cep_ph_ncsq','cep_comf_cw',
'cep_comf_sup',
'cep_serious',
'cep_others_ncsq',
'pep_have',
'pep_benefits',
'pep_know_options',
'pep_discuss',
'pep_learn',
'pep_anon',
'pep_mh_ncsq',
'pep_ph_ncsq',
'pep_comf_cw',
'pep_comf_sup',
'pep_serious',
'pep_others_ncsq',
'hurt_career',
'cw_view_neg',
'neg_response',
'work_affect_effect',
'work_affect_ineffect']
X = data[x_col]
X = X.drop(["pep_comf_cw", "pep_comf_sup"],axis=1)
y3 = data["mh_discuss_office"]
# 30% examples in test data
X_train, X_test, y_train, y_test = train_test_split(X, y3,
test_size = 0.25,
random_state=90)
from sklearn.ensemble import RandomForestClassifier
model = RandomForestClassifier(bootstrap=True, class_weight='balanced', criterion='gini',
max_depth=None, max_features='auto', max_leaf_nodes=None,
min_impurity_decrease=0.0, min_impurity_split=None,
min_samples_leaf=1, min_samples_split=2,
min_weight_fraction_leaf=0.0, n_estimators=200, n_jobs=None,
oob_score=False, random_state=1, verbose=0, warm_start=False)
#Fits on training data
model.fit(X_train, y_train)
# In[14]:
#testing model on training data (data it has already seen)
train_rf_predictions = model.predict(X_train)
train_rf_probs = model.predict_proba(X_train)[:, 1]
#testing model on testing data (new data)
rf_predictions = model.predict(X_test)
rf_probs = model.predict_proba(X_test)[:, 1]
# In[15]:
print('Random Forest Training Accuracy Score: ' + str(accuracy_score(y_train, train_rf_predictions)))
print('Random Forest Testing Accuracy Score: ' + str(accuracy_score(y_test, rf_predictions)))
# In[16]:
features = list(X_train.columns)
fi_model = pd.DataFrame({'feature': features,
'importance': model.feature_importances_}).\
sort_values('importance', ascending = False)
fi_model.head(10)
# In[ ]:
| 23.882857
| 185
| 0.686446
| 1,200
| 8,359
| 4.454167
| 0.186667
| 0.016838
| 0.022451
| 0.028064
| 0.84565
| 0.842657
| 0.832741
| 0.805613
| 0.794761
| 0.773433
| 0
| 0.01662
| 0.186625
| 8,359
| 349
| 186
| 23.951289
| 0.769525
| 0.130518
| 0
| 0.870192
| 0
| 0
| 0.279823
| 0.016053
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.076923
| 0
| 0.076923
| 0.028846
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
86a0f6c65b8cf21a71b18416c822948e8b6a3642
| 62,240
|
py
|
Python
|
msgraph/cli/command_modules/identitydirmgt/azext_identitydirmgt/generated/_help.py
|
microsoftgraph/msgraph-cli-archived
|
489f70bf4ede1ce67b84bfb31e66da3e4db76062
|
[
"MIT"
] | null | null | null |
msgraph/cli/command_modules/identitydirmgt/azext_identitydirmgt/generated/_help.py
|
microsoftgraph/msgraph-cli-archived
|
489f70bf4ede1ce67b84bfb31e66da3e4db76062
|
[
"MIT"
] | 22
|
2022-03-29T22:54:37.000Z
|
2022-03-29T22:55:27.000Z
|
msgraph/cli/command_modules/identitydirmgt/azext_identitydirmgt/generated/_help.py
|
microsoftgraph/msgraph-cli-archived
|
489f70bf4ede1ce67b84bfb31e66da3e4db76062
|
[
"MIT"
] | null | null | null |
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
# pylint: disable=too-many-lines
from knack.help_files import helps
helps['identitydirmgt'] = '''
type: group
short-summary: Manage Identity Directory Management
'''
helps['identitydirmgt contact-org-contact'] = """
type: group
short-summary: Manage contact org contact with identitydirmgt
"""
helps['identitydirmgt contact-org-contact create-org-contact'] = """
type: command
short-summary: "Add new entity to contacts."
parameters:
- name: --addresses
long-summary: |
Usage: --addresses city=XX country-or-region=XX office-location=XX postal-code=XX state=XX street=XX
city: The city.
country-or-region: The country or region. It's a free-format string value, for example, 'United States'.
office-location: Office location such as building and office number for an organizational contact.
postal-code: The postal code.
state: The state.
street: The street.
Multiple actions can be specified by using more than one --addresses argument.
- name: --on-premises-provisioning-errors
long-summary: |
Usage: --on-premises-provisioning-errors category=XX occurred-date-time=XX property-causing-error=XX \
value=XX
category: Category of the provisioning error. Note: Currently, there is only one possible value. Possible \
value: PropertyConflict - indicates a property value is not unique. Other objects contain the same value for the \
property.
occurred-date-time: The date and time at which the error occurred.
property-causing-error: Name of the directory property causing the error. Current possible values: \
UserPrincipalName or ProxyAddress
value: Value of the property causing the error.
Multiple actions can be specified by using more than one --on-premises-provisioning-errors argument.
- name: --phones
long-summary: |
Usage: --phones language=XX number=XX region=XX type=XX
number: The phone number.
Multiple actions can be specified by using more than one --phones argument.
- name: --direct-reports
long-summary: |
Usage: --direct-reports deleted-date-time=XX id=XX
id: Read-only.
Multiple actions can be specified by using more than one --direct-reports argument.
- name: --manager
short-summary: "Represents an Azure Active Directory object. The directoryObject type is the base type for \
many other directory entity types."
long-summary: |
Usage: --manager deleted-date-time=XX id=XX
id: Read-only.
- name: --member-of
long-summary: |
Usage: --member-of deleted-date-time=XX id=XX
id: Read-only.
Multiple actions can be specified by using more than one --member-of argument.
- name: --transitive-member-of
long-summary: |
Usage: --transitive-member-of deleted-date-time=XX id=XX
id: Read-only.
Multiple actions can be specified by using more than one --transitive-member-of argument.
"""
helps['identitydirmgt contact-org-contact delete-org-contact'] = """
type: command
short-summary: "Delete entity from contacts."
"""
helps['identitydirmgt contact-org-contact list-org-contact'] = """
type: command
short-summary: "Get entities from contacts."
"""
helps['identitydirmgt contact-org-contact show-org-contact'] = """
type: command
short-summary: "Get entity from contacts by key."
"""
helps['identitydirmgt contact-org-contact update-org-contact'] = """
type: command
short-summary: "Update entity in contacts."
parameters:
- name: --addresses
long-summary: |
Usage: --addresses city=XX country-or-region=XX office-location=XX postal-code=XX state=XX street=XX
city: The city.
country-or-region: The country or region. It's a free-format string value, for example, 'United States'.
office-location: Office location such as building and office number for an organizational contact.
postal-code: The postal code.
state: The state.
street: The street.
Multiple actions can be specified by using more than one --addresses argument.
- name: --on-premises-provisioning-errors
long-summary: |
Usage: --on-premises-provisioning-errors category=XX occurred-date-time=XX property-causing-error=XX \
value=XX
category: Category of the provisioning error. Note: Currently, there is only one possible value. Possible \
value: PropertyConflict - indicates a property value is not unique. Other objects contain the same value for the \
property.
occurred-date-time: The date and time at which the error occurred.
property-causing-error: Name of the directory property causing the error. Current possible values: \
UserPrincipalName or ProxyAddress
value: Value of the property causing the error.
Multiple actions can be specified by using more than one --on-premises-provisioning-errors argument.
- name: --phones
long-summary: |
Usage: --phones language=XX number=XX region=XX type=XX
number: The phone number.
Multiple actions can be specified by using more than one --phones argument.
- name: --direct-reports
long-summary: |
Usage: --direct-reports deleted-date-time=XX id=XX
id: Read-only.
Multiple actions can be specified by using more than one --direct-reports argument.
- name: --manager
short-summary: "Represents an Azure Active Directory object. The directoryObject type is the base type for \
many other directory entity types."
long-summary: |
Usage: --manager deleted-date-time=XX id=XX
id: Read-only.
- name: --member-of
long-summary: |
Usage: --member-of deleted-date-time=XX id=XX
id: Read-only.
Multiple actions can be specified by using more than one --member-of argument.
- name: --transitive-member-of
long-summary: |
Usage: --transitive-member-of deleted-date-time=XX id=XX
id: Read-only.
Multiple actions can be specified by using more than one --transitive-member-of argument.
"""
helps['identitydirmgt contact'] = """
type: group
short-summary: Manage contact with identitydirmgt
"""
helps['identitydirmgt contact check-member-group'] = """
type: command
short-summary: "Invoke action checkMemberGroups."
"""
helps['identitydirmgt contact check-member-object'] = """
type: command
short-summary: "Invoke action checkMemberObjects."
"""
helps['identitydirmgt contact create-ref-direct-report'] = """
type: command
short-summary: "Create new navigation property ref to directReports for contacts."
"""
helps['identitydirmgt contact create-ref-member-of'] = """
type: command
short-summary: "Create new navigation property ref to memberOf for contacts."
"""
helps['identitydirmgt contact create-ref-transitive-member-of'] = """
type: command
short-summary: "Create new navigation property ref to transitiveMemberOf for contacts."
"""
helps['identitydirmgt contact delete-ref-manager'] = """
type: command
short-summary: "Delete ref of navigation property manager for contacts."
"""
helps['identitydirmgt contact delta'] = """
type: command
short-summary: "Invoke function delta."
"""
helps['identitydirmgt contact get-available-extension-property'] = """
type: command
short-summary: "Invoke action getAvailableExtensionProperties."
"""
helps['identitydirmgt contact get-by-id'] = """
type: command
short-summary: "Invoke action getByIds."
"""
helps['identitydirmgt contact get-member-group'] = """
type: command
short-summary: "Invoke action getMemberGroups."
"""
helps['identitydirmgt contact get-member-object'] = """
type: command
short-summary: "Invoke action getMemberObjects."
"""
helps['identitydirmgt contact list-direct-report'] = """
type: command
short-summary: "Get directReports from contacts."
"""
helps['identitydirmgt contact list-member-of'] = """
type: command
short-summary: "Get memberOf from contacts."
"""
helps['identitydirmgt contact list-ref-direct-report'] = """
type: command
short-summary: "Get ref of directReports from contacts."
"""
helps['identitydirmgt contact list-ref-member-of'] = """
type: command
short-summary: "Get ref of memberOf from contacts."
"""
helps['identitydirmgt contact list-ref-transitive-member-of'] = """
type: command
short-summary: "Get ref of transitiveMemberOf from contacts."
"""
helps['identitydirmgt contact list-transitive-member-of'] = """
type: command
short-summary: "Get transitiveMemberOf from contacts."
"""
helps['identitydirmgt contact restore'] = """
type: command
short-summary: "Invoke action restore."
"""
helps['identitydirmgt contact set-ref-manager'] = """
type: command
short-summary: "Update the ref of navigation property manager in contacts."
"""
helps['identitydirmgt contact show-manager'] = """
type: command
short-summary: "Get manager from contacts."
"""
helps['identitydirmgt contact show-ref-manager'] = """
type: command
short-summary: "Get ref of manager from contacts."
"""
helps['identitydirmgt contact validate-property'] = """
type: command
short-summary: "Invoke action validateProperties."
"""
helps['identitydirmgt contract-contract'] = """
type: group
short-summary: Manage contract contract with identitydirmgt
"""
helps['identitydirmgt contract-contract create-contract'] = """
type: command
short-summary: "Add new entity to contracts."
"""
helps['identitydirmgt contract-contract delete-contract'] = """
type: command
short-summary: "Delete entity from contracts."
"""
helps['identitydirmgt contract-contract list-contract'] = """
type: command
short-summary: "Get entities from contracts."
"""
helps['identitydirmgt contract-contract show-contract'] = """
type: command
short-summary: "Get entity from contracts by key."
"""
helps['identitydirmgt contract-contract update-contract'] = """
type: command
short-summary: "Update entity in contracts."
"""
helps['identitydirmgt contract'] = """
type: group
short-summary: Manage contract with identitydirmgt
"""
helps['identitydirmgt contract check-member-group'] = """
type: command
short-summary: "Invoke action checkMemberGroups."
"""
helps['identitydirmgt contract check-member-object'] = """
type: command
short-summary: "Invoke action checkMemberObjects."
"""
helps['identitydirmgt contract get-available-extension-property'] = """
type: command
short-summary: "Invoke action getAvailableExtensionProperties."
"""
helps['identitydirmgt contract get-by-id'] = """
type: command
short-summary: "Invoke action getByIds."
"""
helps['identitydirmgt contract get-member-group'] = """
type: command
short-summary: "Invoke action getMemberGroups."
"""
helps['identitydirmgt contract get-member-object'] = """
type: command
short-summary: "Invoke action getMemberObjects."
"""
helps['identitydirmgt contract restore'] = """
type: command
short-summary: "Invoke action restore."
"""
helps['identitydirmgt contract validate-property'] = """
type: command
short-summary: "Invoke action validateProperties."
"""
helps['identitydirmgt device-device'] = """
type: group
short-summary: Manage device device with identitydirmgt
"""
helps['identitydirmgt device-device create-device'] = """
type: command
short-summary: "Add new entity to devices."
parameters:
- name: --alternative-security-ids
short-summary: "For internal use only. Not nullable."
long-summary: |
Usage: --alternative-security-ids identity-provider=XX key=XX type=XX
identity-provider: For internal use only
key: For internal use only
type: For internal use only
Multiple actions can be specified by using more than one --alternative-security-ids argument.
- name: --member-of
short-summary: "Groups that this group is a member of. HTTP Methods: GET (supported for all groups). \
Read-only. Nullable."
long-summary: |
Usage: --member-of deleted-date-time=XX id=XX
id: Read-only.
Multiple actions can be specified by using more than one --member-of argument.
- name: --registered-owners
short-summary: "The user that cloud joined the device or registered their personal device. The registered \
owner is set at the time of registration. Currently, there can be only one owner. Read-only. Nullable."
long-summary: |
Usage: --registered-owners deleted-date-time=XX id=XX
id: Read-only.
Multiple actions can be specified by using more than one --registered-owners argument.
- name: --registered-users
short-summary: "Collection of registered users of the device. For cloud joined devices and registered personal \
devices, registered users are set to the same value as registered owners at the time of registration. Read-only. \
Nullable."
long-summary: |
Usage: --registered-users deleted-date-time=XX id=XX
id: Read-only.
Multiple actions can be specified by using more than one --registered-users argument.
- name: --transitive-member-of
long-summary: |
Usage: --transitive-member-of deleted-date-time=XX id=XX
id: Read-only.
Multiple actions can be specified by using more than one --transitive-member-of argument.
- name: --extensions
short-summary: "The collection of open extensions defined for the device. Read-only. Nullable."
long-summary: |
Usage: --extensions id=XX
id: Read-only.
Multiple actions can be specified by using more than one --extensions argument.
"""
helps['identitydirmgt device-device delete-device'] = """
type: command
short-summary: "Delete entity from devices."
"""
helps['identitydirmgt device-device list-device'] = """
type: command
short-summary: "Get entities from devices."
"""
helps['identitydirmgt device-device show-device'] = """
type: command
short-summary: "Get entity from devices by key."
"""
helps['identitydirmgt device-device update-device'] = """
type: command
short-summary: "Update entity in devices."
parameters:
- name: --alternative-security-ids
short-summary: "For internal use only. Not nullable."
long-summary: |
Usage: --alternative-security-ids identity-provider=XX key=XX type=XX
identity-provider: For internal use only
key: For internal use only
type: For internal use only
Multiple actions can be specified by using more than one --alternative-security-ids argument.
- name: --member-of
short-summary: "Groups that this group is a member of. HTTP Methods: GET (supported for all groups). \
Read-only. Nullable."
long-summary: |
Usage: --member-of deleted-date-time=XX id=XX
id: Read-only.
Multiple actions can be specified by using more than one --member-of argument.
- name: --registered-owners
short-summary: "The user that cloud joined the device or registered their personal device. The registered \
owner is set at the time of registration. Currently, there can be only one owner. Read-only. Nullable."
long-summary: |
Usage: --registered-owners deleted-date-time=XX id=XX
id: Read-only.
Multiple actions can be specified by using more than one --registered-owners argument.
- name: --registered-users
short-summary: "Collection of registered users of the device. For cloud joined devices and registered personal \
devices, registered users are set to the same value as registered owners at the time of registration. Read-only. \
Nullable."
long-summary: |
Usage: --registered-users deleted-date-time=XX id=XX
id: Read-only.
Multiple actions can be specified by using more than one --registered-users argument.
- name: --transitive-member-of
long-summary: |
Usage: --transitive-member-of deleted-date-time=XX id=XX
id: Read-only.
Multiple actions can be specified by using more than one --transitive-member-of argument.
- name: --extensions
short-summary: "The collection of open extensions defined for the device. Read-only. Nullable."
long-summary: |
Usage: --extensions id=XX
id: Read-only.
Multiple actions can be specified by using more than one --extensions argument.
"""
helps['identitydirmgt device'] = """
type: group
short-summary: Manage device with identitydirmgt
"""
helps['identitydirmgt device check-member-group'] = """
type: command
short-summary: "Invoke action checkMemberGroups."
"""
helps['identitydirmgt device check-member-object'] = """
type: command
short-summary: "Invoke action checkMemberObjects."
"""
helps['identitydirmgt device create-extension'] = """
type: command
short-summary: "Create new navigation property to extensions for devices."
"""
helps['identitydirmgt device create-ref-member-of'] = """
type: command
short-summary: "Create new navigation property ref to memberOf for devices."
"""
helps['identitydirmgt device create-ref-registered-owner'] = """
type: command
short-summary: "Create new navigation property ref to registeredOwners for devices."
"""
helps['identitydirmgt device create-ref-registered-user'] = """
type: command
short-summary: "Create new navigation property ref to registeredUsers for devices."
"""
helps['identitydirmgt device create-ref-transitive-member-of'] = """
type: command
short-summary: "Create new navigation property ref to transitiveMemberOf for devices."
"""
helps['identitydirmgt device delete-extension'] = """
type: command
short-summary: "Delete navigation property extensions for devices."
"""
helps['identitydirmgt device get-available-extension-property'] = """
type: command
short-summary: "Invoke action getAvailableExtensionProperties."
"""
helps['identitydirmgt device get-by-id'] = """
type: command
short-summary: "Invoke action getByIds."
"""
helps['identitydirmgt device get-member-group'] = """
type: command
short-summary: "Invoke action getMemberGroups."
"""
helps['identitydirmgt device get-member-object'] = """
type: command
short-summary: "Invoke action getMemberObjects."
"""
helps['identitydirmgt device list-extension'] = """
type: command
short-summary: "Get extensions from devices."
"""
helps['identitydirmgt device list-member-of'] = """
type: command
short-summary: "Get memberOf from devices."
"""
helps['identitydirmgt device list-ref-member-of'] = """
type: command
short-summary: "Get ref of memberOf from devices."
"""
helps['identitydirmgt device list-ref-registered-owner'] = """
type: command
short-summary: "Get ref of registeredOwners from devices."
"""
helps['identitydirmgt device list-ref-registered-user'] = """
type: command
short-summary: "Get ref of registeredUsers from devices."
"""
helps['identitydirmgt device list-ref-transitive-member-of'] = """
type: command
short-summary: "Get ref of transitiveMemberOf from devices."
"""
helps['identitydirmgt device list-registered-owner'] = """
type: command
short-summary: "Get registeredOwners from devices."
"""
helps['identitydirmgt device list-registered-user'] = """
type: command
short-summary: "Get registeredUsers from devices."
"""
helps['identitydirmgt device list-transitive-member-of'] = """
type: command
short-summary: "Get transitiveMemberOf from devices."
"""
helps['identitydirmgt device restore'] = """
type: command
short-summary: "Invoke action restore."
"""
helps['identitydirmgt device show-extension'] = """
type: command
short-summary: "Get extensions from devices."
"""
helps['identitydirmgt device update-extension'] = """
type: command
short-summary: "Update the navigation property extensions in devices."
"""
helps['identitydirmgt device validate-property'] = """
type: command
short-summary: "Invoke action validateProperties."
"""
helps['identitydirmgt directory-directory'] = """
type: group
short-summary: Manage directory directory with identitydirmgt
"""
helps['identitydirmgt directory-directory show-directory'] = """
type: command
short-summary: "Get directory."
"""
helps['identitydirmgt directory-directory update-directory'] = """
type: command
short-summary: "Update directory."
parameters:
- name: --deleted-items
short-summary: "Recently deleted items. Read-only. Nullable."
long-summary: |
Usage: --deleted-items deleted-date-time=XX id=XX
id: Read-only.
Multiple actions can be specified by using more than one --deleted-items argument.
"""
helps['identitydirmgt directory'] = """
type: group
short-summary: Manage directory with identitydirmgt
"""
helps['identitydirmgt directory create-administrative-unit'] = """
type: command
short-summary: "Create new navigation property to administrativeUnits for directory."
parameters:
- name: --members
short-summary: "Users and groups that are members of this Adminsitrative Unit. HTTP Methods: GET (list \
members), POST (add members), DELETE (remove members)."
long-summary: |
Usage: --members deleted-date-time=XX id=XX
id: Read-only.
Multiple actions can be specified by using more than one --members argument.
- name: --extensions
long-summary: |
Usage: --extensions id=XX
id: Read-only.
Multiple actions can be specified by using more than one --extensions argument.
"""
helps['identitydirmgt directory create-deleted-item'] = """
type: command
short-summary: "Create new navigation property to deletedItems for directory."
"""
helps['identitydirmgt directory delete-administrative-unit'] = """
type: command
short-summary: "Delete navigation property administrativeUnits for directory."
"""
helps['identitydirmgt directory delete-deleted-item'] = """
type: command
short-summary: "Delete navigation property deletedItems for directory."
"""
helps['identitydirmgt directory list-administrative-unit'] = """
type: command
short-summary: "Get administrativeUnits from directory."
"""
helps['identitydirmgt directory list-deleted-item'] = """
type: command
short-summary: "Get deletedItems from directory."
"""
helps['identitydirmgt directory show-administrative-unit'] = """
type: command
short-summary: "Get administrativeUnits from directory."
"""
helps['identitydirmgt directory show-deleted-item'] = """
type: command
short-summary: "Get deletedItems from directory."
"""
helps['identitydirmgt directory update-administrative-unit'] = """
type: command
short-summary: "Update the navigation property administrativeUnits in directory."
parameters:
- name: --members
short-summary: "Users and groups that are members of this Adminsitrative Unit. HTTP Methods: GET (list \
members), POST (add members), DELETE (remove members)."
long-summary: |
Usage: --members deleted-date-time=XX id=XX
id: Read-only.
Multiple actions can be specified by using more than one --members argument.
- name: --extensions
long-summary: |
Usage: --extensions id=XX
id: Read-only.
Multiple actions can be specified by using more than one --extensions argument.
"""
helps['identitydirmgt directory update-deleted-item'] = """
type: command
short-summary: "Update the navigation property deletedItems in directory."
"""
helps['identitydirmgt directory-administrative-unit'] = """
type: group
short-summary: Manage directory administrative unit with identitydirmgt
"""
helps['identitydirmgt directory-administrative-unit create-extension'] = """
type: command
short-summary: "Create new navigation property to extensions for directory."
"""
helps['identitydirmgt directory-administrative-unit create-ref-member'] = """
type: command
short-summary: "Create new navigation property ref to members for directory."
"""
helps['identitydirmgt directory-administrative-unit create-scoped-role-member'] = """
type: command
short-summary: "Create new navigation property to scopedRoleMembers for directory."
parameters:
- name: --role-member-info
short-summary: "identity"
long-summary: |
Usage: --role-member-info display-name=XX id=XX
display-name: The identity's display name. Note that this may not always be available or up to date. For \
example, if a user changes their display name, the API may show the new value in a future response, but the items \
associated with the user won't show up as having changed when using delta.
id: Unique identifier for the identity.
"""
helps['identitydirmgt directory-administrative-unit delete-extension'] = """
type: command
short-summary: "Delete navigation property extensions for directory."
"""
helps['identitydirmgt directory-administrative-unit delete-scoped-role-member'] = """
type: command
short-summary: "Delete navigation property scopedRoleMembers for directory."
"""
helps['identitydirmgt directory-administrative-unit delta'] = """
type: command
short-summary: "Invoke function delta."
"""
helps['identitydirmgt directory-administrative-unit list-extension'] = """
type: command
short-summary: "Get extensions from directory."
"""
helps['identitydirmgt directory-administrative-unit list-member'] = """
type: command
short-summary: "Get members from directory."
"""
helps['identitydirmgt directory-administrative-unit list-ref-member'] = """
type: command
short-summary: "Get ref of members from directory."
"""
helps['identitydirmgt directory-administrative-unit list-scoped-role-member'] = """
type: command
short-summary: "Get scopedRoleMembers from directory."
"""
helps['identitydirmgt directory-administrative-unit show-extension'] = """
type: command
short-summary: "Get extensions from directory."
"""
helps['identitydirmgt directory-administrative-unit show-scoped-role-member'] = """
type: command
short-summary: "Get scopedRoleMembers from directory."
"""
helps['identitydirmgt directory-administrative-unit update-extension'] = """
type: command
short-summary: "Update the navigation property extensions in directory."
"""
helps['identitydirmgt directory-administrative-unit update-scoped-role-member'] = """
type: command
short-summary: "Update the navigation property scopedRoleMembers in directory."
parameters:
- name: --role-member-info
short-summary: "identity"
long-summary: |
Usage: --role-member-info display-name=XX id=XX
display-name: The identity's display name. Note that this may not always be available or up to date. For \
example, if a user changes their display name, the API may show the new value in a future response, but the items \
associated with the user won't show up as having changed when using delta.
id: Unique identifier for the identity.
"""
helps['identitydirmgt directory-role-directory-role'] = """
type: group
short-summary: Manage directory role directory role with identitydirmgt
"""
helps['identitydirmgt directory-role-directory-role create-directory-role'] = """
type: command
short-summary: "Add new entity to directoryRoles."
parameters:
- name: --members
short-summary: "Users that are members of this directory role. HTTP Methods: GET, POST, DELETE. Read-only. \
Nullable."
long-summary: |
Usage: --members deleted-date-time=XX id=XX
id: Read-only.
Multiple actions can be specified by using more than one --members argument.
"""
helps['identitydirmgt directory-role-directory-role delete-directory-role'] = """
type: command
short-summary: "Delete entity from directoryRoles."
"""
helps['identitydirmgt directory-role-directory-role list-directory-role'] = """
type: command
short-summary: "Get entities from directoryRoles."
"""
helps['identitydirmgt directory-role-directory-role show-directory-role'] = """
type: command
short-summary: "Get entity from directoryRoles by key."
"""
helps['identitydirmgt directory-role-directory-role update-directory-role'] = """
type: command
short-summary: "Update entity in directoryRoles."
parameters:
- name: --members
short-summary: "Users that are members of this directory role. HTTP Methods: GET, POST, DELETE. Read-only. \
Nullable."
long-summary: |
Usage: --members deleted-date-time=XX id=XX
id: Read-only.
Multiple actions can be specified by using more than one --members argument.
"""
helps['identitydirmgt directory-role'] = """
type: group
short-summary: Manage directory role with identitydirmgt
"""
helps['identitydirmgt directory-role check-member-group'] = """
type: command
short-summary: "Invoke action checkMemberGroups."
"""
helps['identitydirmgt directory-role check-member-object'] = """
type: command
short-summary: "Invoke action checkMemberObjects."
"""
helps['identitydirmgt directory-role create-ref-member'] = """
type: command
short-summary: "Create new navigation property ref to members for directoryRoles."
"""
helps['identitydirmgt directory-role create-scoped-member'] = """
type: command
short-summary: "Create new navigation property to scopedMembers for directoryRoles."
parameters:
- name: --role-member-info
short-summary: "identity"
long-summary: |
Usage: --role-member-info display-name=XX id=XX
display-name: The identity's display name. Note that this may not always be available or up to date. For \
example, if a user changes their display name, the API may show the new value in a future response, but the items \
associated with the user won't show up as having changed when using delta.
id: Unique identifier for the identity.
"""
helps['identitydirmgt directory-role delete-scoped-member'] = """
type: command
short-summary: "Delete navigation property scopedMembers for directoryRoles."
"""
helps['identitydirmgt directory-role delta'] = """
type: command
short-summary: "Invoke function delta."
"""
helps['identitydirmgt directory-role get-available-extension-property'] = """
type: command
short-summary: "Invoke action getAvailableExtensionProperties."
"""
helps['identitydirmgt directory-role get-by-id'] = """
type: command
short-summary: "Invoke action getByIds."
"""
helps['identitydirmgt directory-role get-member-group'] = """
type: command
short-summary: "Invoke action getMemberGroups."
"""
helps['identitydirmgt directory-role get-member-object'] = """
type: command
short-summary: "Invoke action getMemberObjects."
"""
helps['identitydirmgt directory-role list-member'] = """
type: command
short-summary: "Get members from directoryRoles."
"""
helps['identitydirmgt directory-role list-ref-member'] = """
type: command
short-summary: "Get ref of members from directoryRoles."
"""
helps['identitydirmgt directory-role list-scoped-member'] = """
type: command
short-summary: "Get scopedMembers from directoryRoles."
"""
helps['identitydirmgt directory-role restore'] = """
type: command
short-summary: "Invoke action restore."
"""
helps['identitydirmgt directory-role show-scoped-member'] = """
type: command
short-summary: "Get scopedMembers from directoryRoles."
"""
helps['identitydirmgt directory-role update-scoped-member'] = """
type: command
short-summary: "Update the navigation property scopedMembers in directoryRoles."
parameters:
- name: --role-member-info
short-summary: "identity"
long-summary: |
Usage: --role-member-info display-name=XX id=XX
display-name: The identity's display name. Note that this may not always be available or up to date. For \
example, if a user changes their display name, the API may show the new value in a future response, but the items \
associated with the user won't show up as having changed when using delta.
id: Unique identifier for the identity.
"""
helps['identitydirmgt directory-role validate-property'] = """
type: command
short-summary: "Invoke action validateProperties."
"""
helps['identitydirmgt directory-role-template-directory-role-template'] = """
type: group
short-summary: Manage directory role template directory role template with identitydirmgt
"""
helps['identitydirmgt directory-role-template-directory-role-template create-directory-role-template'] = """
type: command
short-summary: "Add new entity to directoryRoleTemplates."
"""
helps['identitydirmgt directory-role-template-directory-role-template delete-directory-role-template'] = """
type: command
short-summary: "Delete entity from directoryRoleTemplates."
"""
helps['identitydirmgt directory-role-template-directory-role-template list-directory-role-template'] = """
type: command
short-summary: "Get entities from directoryRoleTemplates."
"""
helps['identitydirmgt directory-role-template-directory-role-template show-directory-role-template'] = """
type: command
short-summary: "Get entity from directoryRoleTemplates by key."
"""
helps['identitydirmgt directory-role-template-directory-role-template update-directory-role-template'] = """
type: command
short-summary: "Update entity in directoryRoleTemplates."
"""
helps['identitydirmgt directory-role-template'] = """
type: group
short-summary: Manage directory role template with identitydirmgt
"""
helps['identitydirmgt directory-role-template check-member-group'] = """
type: command
short-summary: "Invoke action checkMemberGroups."
"""
helps['identitydirmgt directory-role-template check-member-object'] = """
type: command
short-summary: "Invoke action checkMemberObjects."
"""
helps['identitydirmgt directory-role-template get-available-extension-property'] = """
type: command
short-summary: "Invoke action getAvailableExtensionProperties."
"""
helps['identitydirmgt directory-role-template get-by-id'] = """
type: command
short-summary: "Invoke action getByIds."
"""
helps['identitydirmgt directory-role-template get-member-group'] = """
type: command
short-summary: "Invoke action getMemberGroups."
"""
helps['identitydirmgt directory-role-template get-member-object'] = """
type: command
short-summary: "Invoke action getMemberObjects."
"""
helps['identitydirmgt directory-role-template restore'] = """
type: command
short-summary: "Invoke action restore."
"""
helps['identitydirmgt directory-role-template validate-property'] = """
type: command
short-summary: "Invoke action validateProperties."
"""
helps['identitydirmgt domain-domain'] = """
type: group
short-summary: Manage domain domain with identitydirmgt
"""
helps['identitydirmgt domain-domain create-domain'] = """
type: command
short-summary: "Add new entity to domains."
parameters:
- name: --state
short-summary: "domainState"
long-summary: |
Usage: --state last-action-date-time=XX operation=XX status=XX
last-action-date-time: Timestamp for when the last activity occurred. The value is updated when an \
operation is scheduled, the asynchronous task starts, and when the operation completes.
operation: Type of asynchronous operation. The values can be ForceDelete or Verification
status: Current status of the operation. Scheduled - Operation has been scheduled but has not started. \
InProgress - Task has started and is in progress. Failed - Operation has failed.
- name: --domain-name-references
short-summary: "Read-only, Nullable"
long-summary: |
Usage: --domain-name-references deleted-date-time=XX id=XX
id: Read-only.
Multiple actions can be specified by using more than one --domain-name-references argument.
- name: --service-configuration-records
short-summary: "DNS records the customer adds to the DNS zone file of the domain before the domain can be used \
by Microsoft Online services.Read-only, Nullable"
long-summary: |
Usage: --service-configuration-records is-optional=XX label=XX record-type=XX supported-service=XX ttl=XX \
id=XX
is-optional: If false, this record must be configured by the customer at the DNS host for Microsoft Online \
Services to operate correctly with the domain.
label: Value used when configuring the name of the DNS record at the DNS host.
record-type: Indicates what type of DNS record this entity represents.The value can be one of the \
following: CName, Mx, Srv, TxtKey
supported-service: Microsoft Online Service or feature that has a dependency on this DNS record.Can be one \
of the following values: null, Email, Sharepoint, EmailInternalRelayOnly, OfficeCommunicationsOnline, \
SharePointDefaultDomain, FullRedelegation, SharePointPublic, OrgIdAuthentication, Yammer, Intune
ttl: Value to use when configuring the time-to-live (ttl) property of the DNS record at the DNS host. Not \
nullable
id: Read-only.
Multiple actions can be specified by using more than one --service-configuration-records argument.
- name: --verification-dns-records
short-summary: "DNS records that the customer adds to the DNS zone file of the domain before the customer can \
complete domain ownership verification with Azure AD.Read-only, Nullable"
long-summary: |
Usage: --verification-dns-records is-optional=XX label=XX record-type=XX supported-service=XX ttl=XX id=XX
is-optional: If false, this record must be configured by the customer at the DNS host for Microsoft Online \
Services to operate correctly with the domain.
label: Value used when configuring the name of the DNS record at the DNS host.
record-type: Indicates what type of DNS record this entity represents.The value can be one of the \
following: CName, Mx, Srv, TxtKey
supported-service: Microsoft Online Service or feature that has a dependency on this DNS record.Can be one \
of the following values: null, Email, Sharepoint, EmailInternalRelayOnly, OfficeCommunicationsOnline, \
SharePointDefaultDomain, FullRedelegation, SharePointPublic, OrgIdAuthentication, Yammer, Intune
ttl: Value to use when configuring the time-to-live (ttl) property of the DNS record at the DNS host. Not \
nullable
id: Read-only.
Multiple actions can be specified by using more than one --verification-dns-records argument.
"""
helps['identitydirmgt domain-domain delete-domain'] = """
type: command
short-summary: "Delete entity from domains."
"""
helps['identitydirmgt domain-domain list-domain'] = """
type: command
short-summary: "Get entities from domains."
"""
helps['identitydirmgt domain-domain show-domain'] = """
type: command
short-summary: "Get entity from domains by key."
"""
helps['identitydirmgt domain-domain update-domain'] = """
type: command
short-summary: "Update entity in domains."
parameters:
- name: --state
short-summary: "domainState"
long-summary: |
Usage: --state last-action-date-time=XX operation=XX status=XX
last-action-date-time: Timestamp for when the last activity occurred. The value is updated when an \
operation is scheduled, the asynchronous task starts, and when the operation completes.
operation: Type of asynchronous operation. The values can be ForceDelete or Verification
status: Current status of the operation. Scheduled - Operation has been scheduled but has not started. \
InProgress - Task has started and is in progress. Failed - Operation has failed.
- name: --domain-name-references
short-summary: "Read-only, Nullable"
long-summary: |
Usage: --domain-name-references deleted-date-time=XX id=XX
id: Read-only.
Multiple actions can be specified by using more than one --domain-name-references argument.
- name: --service-configuration-records
short-summary: "DNS records the customer adds to the DNS zone file of the domain before the domain can be used \
by Microsoft Online services.Read-only, Nullable"
long-summary: |
Usage: --service-configuration-records is-optional=XX label=XX record-type=XX supported-service=XX ttl=XX \
id=XX
is-optional: If false, this record must be configured by the customer at the DNS host for Microsoft Online \
Services to operate correctly with the domain.
label: Value used when configuring the name of the DNS record at the DNS host.
record-type: Indicates what type of DNS record this entity represents.The value can be one of the \
following: CName, Mx, Srv, TxtKey
supported-service: Microsoft Online Service or feature that has a dependency on this DNS record.Can be one \
of the following values: null, Email, Sharepoint, EmailInternalRelayOnly, OfficeCommunicationsOnline, \
SharePointDefaultDomain, FullRedelegation, SharePointPublic, OrgIdAuthentication, Yammer, Intune
ttl: Value to use when configuring the time-to-live (ttl) property of the DNS record at the DNS host. Not \
nullable
id: Read-only.
Multiple actions can be specified by using more than one --service-configuration-records argument.
- name: --verification-dns-records
short-summary: "DNS records that the customer adds to the DNS zone file of the domain before the customer can \
complete domain ownership verification with Azure AD.Read-only, Nullable"
long-summary: |
Usage: --verification-dns-records is-optional=XX label=XX record-type=XX supported-service=XX ttl=XX id=XX
is-optional: If false, this record must be configured by the customer at the DNS host for Microsoft Online \
Services to operate correctly with the domain.
label: Value used when configuring the name of the DNS record at the DNS host.
record-type: Indicates what type of DNS record this entity represents.The value can be one of the \
following: CName, Mx, Srv, TxtKey
supported-service: Microsoft Online Service or feature that has a dependency on this DNS record.Can be one \
of the following values: null, Email, Sharepoint, EmailInternalRelayOnly, OfficeCommunicationsOnline, \
SharePointDefaultDomain, FullRedelegation, SharePointPublic, OrgIdAuthentication, Yammer, Intune
ttl: Value to use when configuring the time-to-live (ttl) property of the DNS record at the DNS host. Not \
nullable
id: Read-only.
Multiple actions can be specified by using more than one --verification-dns-records argument.
"""
helps['identitydirmgt domain'] = """
type: group
short-summary: Manage domain with identitydirmgt
"""
helps['identitydirmgt domain create-ref-domain-name-reference'] = """
type: command
short-summary: "Create new navigation property ref to domainNameReferences for domains."
"""
helps['identitydirmgt domain create-service-configuration-record'] = """
type: command
short-summary: "Create new navigation property to serviceConfigurationRecords for domains."
"""
helps['identitydirmgt domain create-verification-dns-record'] = """
type: command
short-summary: "Create new navigation property to verificationDnsRecords for domains."
"""
helps['identitydirmgt domain delete-service-configuration-record'] = """
type: command
short-summary: "Delete navigation property serviceConfigurationRecords for domains."
"""
helps['identitydirmgt domain delete-verification-dns-record'] = """
type: command
short-summary: "Delete navigation property verificationDnsRecords for domains."
"""
helps['identitydirmgt domain force-delete'] = """
type: command
short-summary: "Invoke action forceDelete."
"""
helps['identitydirmgt domain list-domain-name-reference'] = """
type: command
short-summary: "Get domainNameReferences from domains."
"""
helps['identitydirmgt domain list-ref-domain-name-reference'] = """
type: command
short-summary: "Get ref of domainNameReferences from domains."
"""
helps['identitydirmgt domain list-service-configuration-record'] = """
type: command
short-summary: "Get serviceConfigurationRecords from domains."
"""
helps['identitydirmgt domain list-verification-dns-record'] = """
type: command
short-summary: "Get verificationDnsRecords from domains."
"""
helps['identitydirmgt domain show-service-configuration-record'] = """
type: command
short-summary: "Get serviceConfigurationRecords from domains."
"""
helps['identitydirmgt domain show-verification-dns-record'] = """
type: command
short-summary: "Get verificationDnsRecords from domains."
"""
helps['identitydirmgt domain update-service-configuration-record'] = """
type: command
short-summary: "Update the navigation property serviceConfigurationRecords in domains."
"""
helps['identitydirmgt domain update-verification-dns-record'] = """
type: command
short-summary: "Update the navigation property verificationDnsRecords in domains."
"""
helps['identitydirmgt domain verify'] = """
type: command
short-summary: "Invoke action verify."
"""
helps['identitydirmgt organization-organization'] = """
type: group
short-summary: Manage organization organization with identitydirmgt
"""
helps['identitydirmgt organization-organization create-organization'] = """
type: command
short-summary: "Add new entity to organization."
parameters:
- name: --assigned-plans
short-summary: "The collection of service plans associated with the tenant. Not nullable."
long-summary: |
Usage: --assigned-plans assigned-date-time=XX capability-status=XX service=XX service-plan-id=XX
assigned-date-time: The date and time at which the plan was assigned; for example: 2013-01-02T19:32:30Z. \
The Timestamp type represents date and time information using ISO 8601 format and is always in UTC time. For example, \
midnight UTC on Jan 1, 2014 would look like this: '2014-01-01T00:00:00Z'
capability-status: For example, 'Enabled'.
service: The name of the service; for example, 'Exchange'.
service-plan-id: A GUID that identifies the service plan.
Multiple actions can be specified by using more than one --assigned-plans argument.
- name: --privacy-profile
short-summary: "privacyProfile"
long-summary: |
Usage: --privacy-profile contact-email=XX statement-url=XX
contact-email: A valid smtp email address for the privacy statement contact. Not required.
statement-url: A valid URL format that begins with http:// or https://. Maximum length is 255 characters. \
The URL that directs to the company's privacy statement. Not required.
- name: --provisioned-plans
short-summary: "Not nullable."
long-summary: |
Usage: --provisioned-plans capability-status=XX provisioning-status=XX service=XX
capability-status: For example, 'Enabled'.
provisioning-status: For example, 'Success'.
service: The name of the service; for example, 'AccessControlS2S'
Multiple actions can be specified by using more than one --provisioned-plans argument.
- name: --verified-domains
short-summary: "The collection of domains associated with this tenant. Not nullable."
long-summary: |
Usage: --verified-domains capabilities=XX is-default=XX is-initial=XX name=XX type=XX
capabilities: For example, 'Email', 'OfficeCommunicationsOnline'.
is-default: true if this is the default domain associated with the tenant; otherwise, false.
is-initial: true if this is the initial domain associated with the tenant; otherwise, false
name: The domain name; for example, 'contoso.onmicrosoft.com'
type: For example, 'Managed'.
Multiple actions can be specified by using more than one --verified-domains argument.
- name: --extensions
short-summary: "The collection of open extensions defined for the organization. Read-only. Nullable."
long-summary: |
Usage: --extensions id=XX
id: Read-only.
Multiple actions can be specified by using more than one --extensions argument.
"""
helps['identitydirmgt organization-organization delete-organization'] = """
type: command
short-summary: "Delete entity from organization."
"""
helps['identitydirmgt organization-organization list-organization'] = """
type: command
short-summary: "Get entities from organization."
"""
helps['identitydirmgt organization-organization show-organization'] = """
type: command
short-summary: "Get entity from organization by key."
"""
helps['identitydirmgt organization-organization update-organization'] = """
type: command
short-summary: "Update entity in organization."
parameters:
- name: --assigned-plans
short-summary: "The collection of service plans associated with the tenant. Not nullable."
long-summary: |
Usage: --assigned-plans assigned-date-time=XX capability-status=XX service=XX service-plan-id=XX
assigned-date-time: The date and time at which the plan was assigned; for example: 2013-01-02T19:32:30Z. \
The Timestamp type represents date and time information using ISO 8601 format and is always in UTC time. For example, \
midnight UTC on Jan 1, 2014 would look like this: '2014-01-01T00:00:00Z'
capability-status: For example, 'Enabled'.
service: The name of the service; for example, 'Exchange'.
service-plan-id: A GUID that identifies the service plan.
Multiple actions can be specified by using more than one --assigned-plans argument.
- name: --privacy-profile
short-summary: "privacyProfile"
long-summary: |
Usage: --privacy-profile contact-email=XX statement-url=XX
contact-email: A valid smtp email address for the privacy statement contact. Not required.
statement-url: A valid URL format that begins with http:// or https://. Maximum length is 255 characters. \
The URL that directs to the company's privacy statement. Not required.
- name: --provisioned-plans
short-summary: "Not nullable."
long-summary: |
Usage: --provisioned-plans capability-status=XX provisioning-status=XX service=XX
capability-status: For example, 'Enabled'.
provisioning-status: For example, 'Success'.
service: The name of the service; for example, 'AccessControlS2S'
Multiple actions can be specified by using more than one --provisioned-plans argument.
- name: --verified-domains
short-summary: "The collection of domains associated with this tenant. Not nullable."
long-summary: |
Usage: --verified-domains capabilities=XX is-default=XX is-initial=XX name=XX type=XX
capabilities: For example, 'Email', 'OfficeCommunicationsOnline'.
is-default: true if this is the default domain associated with the tenant; otherwise, false.
is-initial: true if this is the initial domain associated with the tenant; otherwise, false
name: The domain name; for example, 'contoso.onmicrosoft.com'
type: For example, 'Managed'.
Multiple actions can be specified by using more than one --verified-domains argument.
- name: --extensions
short-summary: "The collection of open extensions defined for the organization. Read-only. Nullable."
long-summary: |
Usage: --extensions id=XX
id: Read-only.
Multiple actions can be specified by using more than one --extensions argument.
"""
helps['identitydirmgt organization'] = """
type: group
short-summary: Manage organization with identitydirmgt
"""
helps['identitydirmgt organization check-member-group'] = """
type: command
short-summary: "Invoke action checkMemberGroups."
"""
helps['identitydirmgt organization check-member-object'] = """
type: command
short-summary: "Invoke action checkMemberObjects."
"""
helps['identitydirmgt organization create-extension'] = """
type: command
short-summary: "Create new navigation property to extensions for organization."
"""
helps['identitydirmgt organization delete-extension'] = """
type: command
short-summary: "Delete navigation property extensions for organization."
"""
helps['identitydirmgt organization get-available-extension-property'] = """
type: command
short-summary: "Invoke action getAvailableExtensionProperties."
"""
helps['identitydirmgt organization get-by-id'] = """
type: command
short-summary: "Invoke action getByIds."
"""
helps['identitydirmgt organization get-member-group'] = """
type: command
short-summary: "Invoke action getMemberGroups."
"""
helps['identitydirmgt organization get-member-object'] = """
type: command
short-summary: "Invoke action getMemberObjects."
"""
helps['identitydirmgt organization list-extension'] = """
type: command
short-summary: "Get extensions from organization."
"""
helps['identitydirmgt organization restore'] = """
type: command
short-summary: "Invoke action restore."
"""
helps['identitydirmgt organization set-mobile-device-management-authority'] = """
type: command
short-summary: "Invoke action setMobileDeviceManagementAuthority."
"""
helps['identitydirmgt organization show-extension'] = """
type: command
short-summary: "Get extensions from organization."
"""
helps['identitydirmgt organization update-extension'] = """
type: command
short-summary: "Update the navigation property extensions in organization."
"""
helps['identitydirmgt organization validate-property'] = """
type: command
short-summary: "Invoke action validateProperties."
"""
helps['identitydirmgt subscribed-sku-subscribed-sku'] = """
type: group
short-summary: Manage subscribed sku subscribed sku with identitydirmgt
"""
helps['identitydirmgt subscribed-sku-subscribed-sku create-subscribed-sku'] = """
type: command
short-summary: "Add new entity to subscribedSkus."
parameters:
- name: --prepaid-units
short-summary: "licenseUnitsDetail"
long-summary: |
Usage: --prepaid-units enabled=XX suspended=XX warning=XX
enabled: The number of units that are enabled.
suspended: The number of units that are suspended.
warning: The number of units that are in warning status.
- name: --service-plans
short-summary: "Information about the service plans that are available with the SKU. Not nullable"
long-summary: |
Usage: --service-plans applies-to=XX provisioning-status=XX service-plan-id=XX service-plan-name=XX
applies-to: The object the service plan can be assigned to. Possible values:'User' - service plan can be \
assigned to individual users.'Company' - service plan can be assigned to the entire tenant.
provisioning-status: The provisioning status of the service plan. Possible values:'Success' - Service is \
fully provisioned.'Disabled' - Service has been disabled.'PendingInput' - Service is not yet provisioned; awaiting \
service confirmation.'PendingActivation' - Service is provisioned but requires explicit activation by administrator \
(for example, Intune_O365 service plan)'PendingProvisioning' - Microsoft has added a new service to the product SKU \
and it has not been activated in the tenant, yet.
service-plan-id: The unique identifier of the service plan.
service-plan-name: The name of the service plan.
Multiple actions can be specified by using more than one --service-plans argument.
"""
helps['identitydirmgt subscribed-sku-subscribed-sku delete-subscribed-sku'] = """
type: command
short-summary: "Delete entity from subscribedSkus."
"""
helps['identitydirmgt subscribed-sku-subscribed-sku list-subscribed-sku'] = """
type: command
short-summary: "Get entities from subscribedSkus."
"""
helps['identitydirmgt subscribed-sku-subscribed-sku show-subscribed-sku'] = """
type: command
short-summary: "Get entity from subscribedSkus by key."
"""
helps['identitydirmgt subscribed-sku-subscribed-sku update-subscribed-sku'] = """
type: command
short-summary: "Update entity in subscribedSkus."
parameters:
- name: --prepaid-units
short-summary: "licenseUnitsDetail"
long-summary: |
Usage: --prepaid-units enabled=XX suspended=XX warning=XX
enabled: The number of units that are enabled.
suspended: The number of units that are suspended.
warning: The number of units that are in warning status.
- name: --service-plans
short-summary: "Information about the service plans that are available with the SKU. Not nullable"
long-summary: |
Usage: --service-plans applies-to=XX provisioning-status=XX service-plan-id=XX service-plan-name=XX
applies-to: The object the service plan can be assigned to. Possible values:'User' - service plan can be \
assigned to individual users.'Company' - service plan can be assigned to the entire tenant.
provisioning-status: The provisioning status of the service plan. Possible values:'Success' - Service is \
fully provisioned.'Disabled' - Service has been disabled.'PendingInput' - Service is not yet provisioned; awaiting \
service confirmation.'PendingActivation' - Service is provisioned but requires explicit activation by administrator \
(for example, Intune_O365 service plan)'PendingProvisioning' - Microsoft has added a new service to the product SKU \
and it has not been activated in the tenant, yet.
service-plan-id: The unique identifier of the service plan.
service-plan-name: The name of the service plan.
Multiple actions can be specified by using more than one --service-plans argument.
"""
helps['identitydirmgt user'] = """
type: group
short-summary: Manage user with identitydirmgt
"""
helps['identitydirmgt user create-scoped-role-member-of'] = """
type: command
short-summary: "Create new navigation property to scopedRoleMemberOf for users."
parameters:
- name: --role-member-info
short-summary: "identity"
long-summary: |
Usage: --role-member-info display-name=XX id=XX
display-name: The identity's display name. Note that this may not always be available or up to date. For \
example, if a user changes their display name, the API may show the new value in a future response, but the items \
associated with the user won't show up as having changed when using delta.
id: Unique identifier for the identity.
"""
helps['identitydirmgt user delete-scoped-role-member-of'] = """
type: command
short-summary: "Delete navigation property scopedRoleMemberOf for users."
"""
helps['identitydirmgt user list-scoped-role-member-of'] = """
type: command
short-summary: "Get scopedRoleMemberOf from users."
"""
helps['identitydirmgt user show-scoped-role-member-of'] = """
type: command
short-summary: "Get scopedRoleMemberOf from users."
"""
helps['identitydirmgt user update-scoped-role-member-of'] = """
type: command
short-summary: "Update the navigation property scopedRoleMemberOf in users."
parameters:
- name: --role-member-info
short-summary: "identity"
long-summary: |
Usage: --role-member-info display-name=XX id=XX
display-name: The identity's display name. Note that this may not always be available or up to date. For \
example, if a user changes their display name, the API may show the new value in a future response, but the items \
associated with the user won't show up as having changed when using delta.
id: Unique identifier for the identity.
"""
| 37.974375
| 120
| 0.69285
| 7,368
| 62,240
| 5.852334
| 0.056868
| 0.068182
| 0.06679
| 0.096011
| 0.951855
| 0.932514
| 0.868738
| 0.795733
| 0.749814
| 0.732699
| 0
| 0.001776
| 0.203952
| 62,240
| 1,638
| 121
| 37.997558
| 0.868526
| 0.007551
| 0
| 0.745827
| 0
| 0.068285
| 0.934471
| 0.094043
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.000759
| 0
| 0.000759
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
86b3ed42ce37598aa9bfc1fab9d370d5577dc512
| 25,097
|
py
|
Python
|
tests/components/homekit_controller/test_diagnostics.py
|
muehlen/core
|
0cd3302ebc5951c9ecd00ab1e6cd9ae28173fab5
|
[
"Apache-2.0"
] | null | null | null |
tests/components/homekit_controller/test_diagnostics.py
|
muehlen/core
|
0cd3302ebc5951c9ecd00ab1e6cd9ae28173fab5
|
[
"Apache-2.0"
] | null | null | null |
tests/components/homekit_controller/test_diagnostics.py
|
muehlen/core
|
0cd3302ebc5951c9ecd00ab1e6cd9ae28173fab5
|
[
"Apache-2.0"
] | null | null | null |
"""Test homekit_controller diagnostics."""
from aiohttp import ClientSession
from homeassistant.components.homekit_controller.const import KNOWN_DEVICES
from homeassistant.core import HomeAssistant
from homeassistant.helpers import device_registry as dr
from tests.components.diagnostics import (
get_diagnostics_for_config_entry,
get_diagnostics_for_device,
)
from tests.components.homekit_controller.common import (
setup_accessories_from_file,
setup_test_accessories,
)
async def test_config_entry(hass: HomeAssistant, hass_client: ClientSession, utcnow):
"""Test generating diagnostics for a config entry."""
accessories = await setup_accessories_from_file(hass, "koogeek_ls1.json")
config_entry, _ = await setup_test_accessories(hass, accessories)
diag = await get_diagnostics_for_config_entry(hass, hass_client, config_entry)
assert diag == {
"config-entry": {
"title": "test",
"version": 1,
"data": {"AccessoryPairingID": "00:00:00:00:00:00"},
},
"entity-map": [
{
"aid": 1,
"services": [
{
"iid": 1,
"type": "0000003E-0000-1000-8000-0026BB765291",
"characteristics": [
{
"type": "00000023-0000-1000-8000-0026BB765291",
"iid": 2,
"perms": ["pr"],
"format": "string",
"value": "Koogeek-LS1-20833F",
"description": "Name",
"maxLen": 64,
},
{
"type": "00000020-0000-1000-8000-0026BB765291",
"iid": 3,
"perms": ["pr"],
"format": "string",
"value": "Koogeek",
"description": "Manufacturer",
"maxLen": 64,
},
{
"type": "00000021-0000-1000-8000-0026BB765291",
"iid": 4,
"perms": ["pr"],
"format": "string",
"value": "LS1",
"description": "Model",
"maxLen": 64,
},
{
"type": "00000030-0000-1000-8000-0026BB765291",
"iid": 5,
"perms": ["pr"],
"format": "string",
"value": "**REDACTED**",
"description": "Serial Number",
"maxLen": 64,
},
{
"type": "00000014-0000-1000-8000-0026BB765291",
"iid": 6,
"perms": ["pw"],
"format": "bool",
"description": "Identify",
},
{
"type": "00000052-0000-1000-8000-0026BB765291",
"iid": 23,
"perms": ["pr"],
"format": "string",
"value": "2.2.15",
"description": "Firmware Revision",
"maxLen": 64,
},
],
},
{
"iid": 7,
"type": "00000043-0000-1000-8000-0026BB765291",
"characteristics": [
{
"type": "00000025-0000-1000-8000-0026BB765291",
"iid": 8,
"perms": ["pr", "pw", "ev"],
"format": "bool",
"value": False,
"description": "On",
},
{
"type": "00000013-0000-1000-8000-0026BB765291",
"iid": 9,
"perms": ["pr", "pw", "ev"],
"format": "float",
"value": 44,
"description": "Hue",
"unit": "arcdegrees",
"minValue": 0,
"maxValue": 359,
"minStep": 1,
},
{
"type": "0000002F-0000-1000-8000-0026BB765291",
"iid": 10,
"perms": ["pr", "pw", "ev"],
"format": "float",
"value": 0,
"description": "Saturation",
"unit": "percentage",
"minValue": 0,
"maxValue": 100,
"minStep": 1,
},
{
"type": "00000008-0000-1000-8000-0026BB765291",
"iid": 11,
"perms": ["pr", "pw", "ev"],
"format": "int",
"value": 100,
"description": "Brightness",
"unit": "percentage",
"minValue": 0,
"maxValue": 100,
"minStep": 1,
},
{
"type": "00000023-0000-1000-8000-0026BB765291",
"iid": 12,
"perms": ["pr"],
"format": "string",
"value": "Light Strip",
"description": "Name",
"maxLen": 64,
},
],
},
{
"iid": 13,
"type": "4aaaf940-0dec-11e5-b939-0800200c9a66",
"characteristics": [
{
"type": "4AAAF942-0DEC-11E5-B939-0800200C9A66",
"iid": 14,
"perms": ["pr", "pw"],
"format": "tlv8",
"value": "AHAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA",
"description": "TIMER_SETTINGS",
}
],
},
{
"iid": 15,
"type": "151909D0-3802-11E4-916C-0800200C9A66",
"characteristics": [
{
"type": "151909D2-3802-11E4-916C-0800200C9A66",
"iid": 16,
"perms": ["pr", "hd"],
"format": "string",
"value": "url,data",
"description": "FW Upgrade supported types",
"maxLen": 64,
},
{
"type": "151909D1-3802-11E4-916C-0800200C9A66",
"iid": 17,
"perms": ["pw", "hd"],
"format": "string",
"description": "FW Upgrade URL",
"maxLen": 64,
},
{
"type": "151909D6-3802-11E4-916C-0800200C9A66",
"iid": 18,
"perms": ["pr", "ev", "hd"],
"format": "int",
"value": 0,
"description": "FW Upgrade Status",
},
{
"type": "151909D7-3802-11E4-916C-0800200C9A66",
"iid": 19,
"perms": ["pw", "hd"],
"format": "data",
"description": "FW Upgrade Data",
},
],
},
{
"iid": 20,
"type": "151909D3-3802-11E4-916C-0800200C9A66",
"characteristics": [
{
"type": "151909D5-3802-11E4-916C-0800200C9A66",
"iid": 21,
"perms": ["pr", "pw"],
"format": "int",
"value": 0,
"description": "Timezone",
},
{
"type": "151909D4-3802-11E4-916C-0800200C9A66",
"iid": 22,
"perms": ["pr", "pw"],
"format": "int",
"value": 1550348623,
"description": "Time value since Epoch",
},
],
},
],
}
],
"devices": [
{
"name": "Koogeek-LS1-20833F",
"model": "LS1",
"manfacturer": "Koogeek",
"sw_version": "2.2.15",
"hw_version": "",
"entities": [
{
"device_class": None,
"disabled": False,
"disabled_by": None,
"entity_category": "diagnostic",
"icon": None,
"original_device_class": None,
"original_icon": None,
"original_name": "Koogeek-LS1-20833F Identify",
"state": {
"attributes": {
"friendly_name": "Koogeek-LS1-20833F Identify"
},
"entity_id": "button.koogeek_ls1_20833f_identify",
"last_changed": "2023-01-01T00:00:00+00:00",
"last_updated": "2023-01-01T00:00:00+00:00",
"state": "unknown",
},
"unit_of_measurement": None,
},
{
"original_name": "Koogeek-LS1-20833F",
"disabled": False,
"disabled_by": None,
"entity_category": None,
"device_class": None,
"original_device_class": None,
"icon": None,
"original_icon": None,
"unit_of_measurement": None,
"state": {
"entity_id": "light.koogeek_ls1_20833f",
"state": "off",
"attributes": {
"supported_color_modes": ["hs"],
"friendly_name": "Koogeek-LS1-20833F",
"supported_features": 17,
},
"last_changed": "2023-01-01T00:00:00+00:00",
"last_updated": "2023-01-01T00:00:00+00:00",
},
},
],
}
],
}
async def test_device(hass: HomeAssistant, hass_client: ClientSession, utcnow):
"""Test generating diagnostics for a device entry."""
accessories = await setup_accessories_from_file(hass, "koogeek_ls1.json")
config_entry, _ = await setup_test_accessories(hass, accessories)
connection = hass.data[KNOWN_DEVICES]["00:00:00:00:00:00"]
device_registry = dr.async_get(hass)
device = device_registry.async_get(connection.devices[1])
diag = await get_diagnostics_for_device(hass, hass_client, config_entry, device)
assert diag == {
"config-entry": {
"title": "test",
"version": 1,
"data": {"AccessoryPairingID": "00:00:00:00:00:00"},
},
"entity-map": [
{
"aid": 1,
"services": [
{
"iid": 1,
"type": "0000003E-0000-1000-8000-0026BB765291",
"characteristics": [
{
"type": "00000023-0000-1000-8000-0026BB765291",
"iid": 2,
"perms": ["pr"],
"format": "string",
"value": "Koogeek-LS1-20833F",
"description": "Name",
"maxLen": 64,
},
{
"type": "00000020-0000-1000-8000-0026BB765291",
"iid": 3,
"perms": ["pr"],
"format": "string",
"value": "Koogeek",
"description": "Manufacturer",
"maxLen": 64,
},
{
"type": "00000021-0000-1000-8000-0026BB765291",
"iid": 4,
"perms": ["pr"],
"format": "string",
"value": "LS1",
"description": "Model",
"maxLen": 64,
},
{
"type": "00000030-0000-1000-8000-0026BB765291",
"iid": 5,
"perms": ["pr"],
"format": "string",
"value": "**REDACTED**",
"description": "Serial Number",
"maxLen": 64,
},
{
"type": "00000014-0000-1000-8000-0026BB765291",
"iid": 6,
"perms": ["pw"],
"format": "bool",
"description": "Identify",
},
{
"type": "00000052-0000-1000-8000-0026BB765291",
"iid": 23,
"perms": ["pr"],
"format": "string",
"value": "2.2.15",
"description": "Firmware Revision",
"maxLen": 64,
},
],
},
{
"iid": 7,
"type": "00000043-0000-1000-8000-0026BB765291",
"characteristics": [
{
"type": "00000025-0000-1000-8000-0026BB765291",
"iid": 8,
"perms": ["pr", "pw", "ev"],
"format": "bool",
"value": False,
"description": "On",
},
{
"type": "00000013-0000-1000-8000-0026BB765291",
"iid": 9,
"perms": ["pr", "pw", "ev"],
"format": "float",
"value": 44,
"description": "Hue",
"unit": "arcdegrees",
"minValue": 0,
"maxValue": 359,
"minStep": 1,
},
{
"type": "0000002F-0000-1000-8000-0026BB765291",
"iid": 10,
"perms": ["pr", "pw", "ev"],
"format": "float",
"value": 0,
"description": "Saturation",
"unit": "percentage",
"minValue": 0,
"maxValue": 100,
"minStep": 1,
},
{
"type": "00000008-0000-1000-8000-0026BB765291",
"iid": 11,
"perms": ["pr", "pw", "ev"],
"format": "int",
"value": 100,
"description": "Brightness",
"unit": "percentage",
"minValue": 0,
"maxValue": 100,
"minStep": 1,
},
{
"type": "00000023-0000-1000-8000-0026BB765291",
"iid": 12,
"perms": ["pr"],
"format": "string",
"value": "Light Strip",
"description": "Name",
"maxLen": 64,
},
],
},
{
"iid": 13,
"type": "4aaaf940-0dec-11e5-b939-0800200c9a66",
"characteristics": [
{
"type": "4AAAF942-0DEC-11E5-B939-0800200C9A66",
"iid": 14,
"perms": ["pr", "pw"],
"format": "tlv8",
"value": "AHAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA",
"description": "TIMER_SETTINGS",
}
],
},
{
"iid": 15,
"type": "151909D0-3802-11E4-916C-0800200C9A66",
"characteristics": [
{
"type": "151909D2-3802-11E4-916C-0800200C9A66",
"iid": 16,
"perms": ["pr", "hd"],
"format": "string",
"value": "url,data",
"description": "FW Upgrade supported types",
"maxLen": 64,
},
{
"type": "151909D1-3802-11E4-916C-0800200C9A66",
"iid": 17,
"perms": ["pw", "hd"],
"format": "string",
"description": "FW Upgrade URL",
"maxLen": 64,
},
{
"type": "151909D6-3802-11E4-916C-0800200C9A66",
"iid": 18,
"perms": ["pr", "ev", "hd"],
"format": "int",
"value": 0,
"description": "FW Upgrade Status",
},
{
"type": "151909D7-3802-11E4-916C-0800200C9A66",
"iid": 19,
"perms": ["pw", "hd"],
"format": "data",
"description": "FW Upgrade Data",
},
],
},
{
"iid": 20,
"type": "151909D3-3802-11E4-916C-0800200C9A66",
"characteristics": [
{
"type": "151909D5-3802-11E4-916C-0800200C9A66",
"iid": 21,
"perms": ["pr", "pw"],
"format": "int",
"value": 0,
"description": "Timezone",
},
{
"type": "151909D4-3802-11E4-916C-0800200C9A66",
"iid": 22,
"perms": ["pr", "pw"],
"format": "int",
"value": 1550348623,
"description": "Time value since Epoch",
},
],
},
],
}
],
"device": {
"name": "Koogeek-LS1-20833F",
"model": "LS1",
"manfacturer": "Koogeek",
"sw_version": "2.2.15",
"hw_version": "",
"entities": [
{
"device_class": None,
"disabled": False,
"disabled_by": None,
"entity_category": "diagnostic",
"icon": None,
"original_device_class": None,
"original_icon": None,
"original_name": "Koogeek-LS1-20833F Identify",
"state": {
"attributes": {"friendly_name": "Koogeek-LS1-20833F Identify"},
"entity_id": "button.koogeek_ls1_20833f_identify",
"last_changed": "2023-01-01T00:00:00+00:00",
"last_updated": "2023-01-01T00:00:00+00:00",
"state": "unknown",
},
"unit_of_measurement": None,
},
{
"original_name": "Koogeek-LS1-20833F",
"disabled": False,
"disabled_by": None,
"entity_category": None,
"device_class": None,
"original_device_class": None,
"icon": None,
"original_icon": None,
"unit_of_measurement": None,
"state": {
"entity_id": "light.koogeek_ls1_20833f",
"state": "off",
"attributes": {
"supported_color_modes": ["hs"],
"friendly_name": "Koogeek-LS1-20833F",
"supported_features": 17,
},
"last_changed": "2023-01-01T00:00:00+00:00",
"last_updated": "2023-01-01T00:00:00+00:00",
},
},
],
},
}
| 45.630909
| 196
| 0.293541
| 1,330
| 25,097
| 5.442857
| 0.14812
| 0.02155
| 0.023208
| 0.0862
| 0.924023
| 0.904545
| 0.902887
| 0.902887
| 0.902887
| 0.902887
| 0
| 0.175388
| 0.597203
| 25,097
| 549
| 197
| 45.714026
| 0.540706
| 0.001434
| 0
| 0.736449
| 0
| 0
| 0.251954
| 0.096276
| 0
| 0
| 0
| 0
| 0.003738
| 1
| 0
| false
| 0
| 0.011215
| 0
| 0.011215
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
813bd04f310720d3ef12d9dfb61a39bfcc0789f1
| 177
|
py
|
Python
|
search/models/__init__.py
|
invinst/CPDB
|
c2d8ae8888b13d956cc1068742f18d45736d4121
|
[
"Apache-2.0"
] | 16
|
2016-05-20T09:03:32.000Z
|
2020-09-13T14:23:06.000Z
|
search/models/__init__.py
|
invinst/CPDB
|
c2d8ae8888b13d956cc1068742f18d45736d4121
|
[
"Apache-2.0"
] | 2
|
2016-05-24T01:44:14.000Z
|
2016-06-17T22:19:45.000Z
|
search/models/__init__.py
|
invinst/CPDB
|
c2d8ae8888b13d956cc1068742f18d45736d4121
|
[
"Apache-2.0"
] | 2
|
2016-10-10T16:14:19.000Z
|
2020-10-26T00:17:02.000Z
|
from search.models.suggestion import SuggestionLog, FilterLog # NOQA
from search.models.alias import Alias # NOQA
from search.models.session_alias import SessionAlias # NOQA
| 44.25
| 69
| 0.819209
| 23
| 177
| 6.26087
| 0.478261
| 0.208333
| 0.333333
| 0.277778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.124294
| 177
| 3
| 70
| 59
| 0.929032
| 0.079096
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
d4dfa1db2a5857f9bb6728ea202e23abc72786b6
| 8,230
|
py
|
Python
|
utils/throttle.py
|
yuntechcloud/GreaterWMS
|
4a1c54fb962a0a6a3790e30340424c81175fe07b
|
[
"Apache-2.0"
] | 3
|
2020-10-19T05:55:28.000Z
|
2020-11-12T03:55:06.000Z
|
utils/throttle.py
|
coolflywms/GreaterWMS
|
0509b8842bbd8cae9540b6f13f7b3cb6fa5a911c
|
[
"Apache-2.0"
] | 1
|
2020-07-24T07:34:36.000Z
|
2020-07-24T07:34:36.000Z
|
utils/throttle.py
|
Singosgu/Elvis_WMS
|
e6911b7daae76be640ece8946104af24b6cf0fa6
|
[
"MIT"
] | 4
|
2020-09-04T13:35:15.000Z
|
2020-10-16T15:10:38.000Z
|
from rest_framework.throttling import BaseThrottle
from throttle.models import ListModel
from utils.md5 import Md5
from django.utils import timezone
data = {}
class VisitThrottle(BaseThrottle):
def allow_request(self, request, view):
if request.path == '/docs/':
return (False, None)
elif request.path == '/swagger/':
return (False, None)
else:
ip = request.META.get('HTTP_X_FORWARDED_FOR') if request.META.get(
'HTTP_X_FORWARDED_FOR') else request.META.get('REMOTE_ADDR')
openid = request.auth.openid
appid = request.auth.appid
if request.method.lower() == "get":
ntime = timezone.now()
ctime = ntime - timezone.timedelta(seconds=1)
throttle_ctimelist = ListModel.objects.filter(method="get", create_time__lte=ctime)
for i in throttle_ctimelist:
i.delete()
t_code = Md5.md5(ip)
throttle_allocationlist = ListModel.objects.filter(openid=openid, appid=appid, ip=ip,
method='get').order_by('id')
throttle_count = throttle_allocationlist.count()
if throttle_count == 0:
ListModel.objects.create(openid=openid, appid=appid, ip=ip, method="get", t_code=t_code)
return True
else:
throttle_last_create_time = throttle_allocationlist.first().create_time
ListModel.objects.create(openid=openid, appid=appid, ip=ip, method="get", t_code=t_code)
allocation_seconds_balance = (ntime - throttle_last_create_time).seconds
data["visit_check"] = throttle_last_create_time
if allocation_seconds_balance >= 1:
return True
else:
if throttle_count >= 5:
return False
else:
return True
elif request.method.lower() == "post":
ntime = timezone.now()
ctime = ntime - timezone.timedelta(seconds=1)
throttle_ctimelist = ListModel.objects.filter(method="post", create_time__lte=ctime)
for i in throttle_ctimelist:
i.delete()
t_code = Md5.md5(ip)
throttle_allocationlist = ListModel.objects.filter(openid=openid, appid=appid, ip=ip,
method='post').order_by('id')
throttle_count = throttle_allocationlist.count()
if throttle_count == 0:
ListModel.objects.create(openid=openid, appid=appid, ip=ip, method="post", t_code=t_code)
return True
else:
throttle_last_create_time = throttle_allocationlist.first().create_time
ListModel.objects.create(openid=openid, appid=appid, ip=ip, method="post", t_code=t_code)
allocation_seconds_balance = (ntime - throttle_last_create_time).seconds
data["visit_check"] = throttle_last_create_time
if allocation_seconds_balance >= 1:
return True
else:
if throttle_count >= 4:
return False
else:
return True
elif request.method.lower() == "put":
ntime = timezone.now()
ctime = ntime - timezone.timedelta(seconds=1)
throttle_ctimelist = ListModel.objects.filter(method="put", create_time__lte=ctime)
for i in throttle_ctimelist:
i.delete()
t_code = Md5.md5(ip)
throttle_allocationlist = ListModel.objects.filter(openid=openid, appid=appid, ip=ip,
method='put').order_by('id')
throttle_count = throttle_allocationlist.count()
if throttle_count == 0:
ListModel.objects.create(openid=openid, appid=appid, ip=ip, method="put", t_code=t_code)
return True
else:
throttle_last_create_time = throttle_allocationlist.first().create_time
ListModel.objects.create(openid=openid, appid=appid, ip=ip, method="put", t_code=t_code)
allocation_seconds_balance = (ntime - throttle_last_create_time).seconds
data["visit_check"] = throttle_last_create_time
if allocation_seconds_balance >= 1:
return True
else:
if throttle_count >= 4:
return False
else:
return True
elif request.method.lower() == "patch":
ntime = timezone.now()
ctime = ntime - timezone.timedelta(seconds=1)
throttle_ctimelist = ListModel.objects.filter(method="patch", create_time__lte=ctime)
for i in throttle_ctimelist:
i.delete()
t_code = Md5.md5(ip)
throttle_allocationlist = ListModel.objects.filter(openid=openid, appid=appid, ip=ip,
method='patch').order_by('id')
throttle_count = throttle_allocationlist.count()
if throttle_count == 0:
ListModel.objects.create(openid=openid, appid=appid, ip=ip, method="patch", t_code=t_code)
return True
else:
throttle_last_create_time = throttle_allocationlist.first().create_time
ListModel.objects.create(openid=openid, appid=appid, ip=ip, method="patch", t_code=t_code)
allocation_seconds_balance = (ntime - throttle_last_create_time).seconds
data["visit_check"] = throttle_last_create_time
if allocation_seconds_balance >= 1:
return True
else:
if throttle_count >= 4:
return False
else:
return True
elif request.method.lower() == "delete":
ntime = timezone.now()
ctime = ntime - timezone.timedelta(seconds=1)
throttle_ctimelist = ListModel.objects.filter(method="delete", create_time__lte=ctime)
for i in throttle_ctimelist:
i.delete()
t_code = Md5.md5(ip)
throttle_allocationlist = ListModel.objects.filter(openid=openid, appid=appid, ip=ip,
method='delete').order_by('id')
throttle_count = throttle_allocationlist.count()
if throttle_count == 0:
ListModel.objects.create(openid=openid, appid=appid, ip=ip, method="delete", t_code=t_code)
return True
else:
throttle_last_create_time = throttle_allocationlist.first().create_time
ListModel.objects.create(openid=openid, appid=appid, ip=ip, method="delete", t_code=t_code)
allocation_seconds_balance = (ntime - throttle_last_create_time).seconds
data["visit_check"] = throttle_last_create_time
if allocation_seconds_balance >= 1:
return True
else:
if throttle_count >= 3:
return False
else:
return True
else:
return False
def wait(self):
ctime = timezone.now()
wait_time = (ctime - data["visit_check"]).seconds
balance_time = 1 - wait_time
return balance_time
| 54.144737
| 111
| 0.519806
| 785
| 8,230
| 5.228025
| 0.103185
| 0.060916
| 0.062135
| 0.080409
| 0.873782
| 0.86769
| 0.86769
| 0.852583
| 0.847466
| 0.836014
| 0
| 0.006673
| 0.399149
| 8,230
| 151
| 112
| 54.503311
| 0.823256
| 0
| 0
| 0.763514
| 0
| 0
| 0.030012
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.013514
| false
| 0
| 0.027027
| 0
| 0.209459
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
be01bcb9c5ce900a1f45d568455eb44245277fa3
| 21,801
|
py
|
Python
|
apteco_api/api/files_api.py
|
Apteco/apteco-api
|
7440c98ab10ea6d8a5997187f6fc739ce1c75d2b
|
[
"Apache-2.0"
] | 2
|
2020-05-21T14:24:16.000Z
|
2020-12-03T19:56:34.000Z
|
apteco_api/api/files_api.py
|
Apteco/apteco-api
|
7440c98ab10ea6d8a5997187f6fc739ce1c75d2b
|
[
"Apache-2.0"
] | null | null | null |
apteco_api/api/files_api.py
|
Apteco/apteco-api
|
7440c98ab10ea6d8a5997187f6fc739ce1c75d2b
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Apteco API
An API to allow access to Apteco Marketing Suite resources # noqa: E501
The version of the OpenAPI document: v2
Contact: support@apteco.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from apteco_api.api_client import ApiClient
from apteco_api.exceptions import (
ApiTypeError,
ApiValueError
)
class FilesApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def files_delete_file(self, data_view_name, system_name, file_path, **kwargs): # noqa: E501
"""Deletes file at location # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.files_delete_file(data_view_name, system_name, file_path, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str data_view_name: The name of the DataView to act on (required)
:param str system_name: The name of the FastStats system to act on (required)
:param str file_path: The path to the file to be deleted (required)
:param int timeout_in_seconds: The number of seconds before the request will time out. Leave unspecified to use the default value given in the file service's configuration
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.files_delete_file_with_http_info(data_view_name, system_name, file_path, **kwargs) # noqa: E501
def files_delete_file_with_http_info(self, data_view_name, system_name, file_path, **kwargs): # noqa: E501
"""Deletes file at location # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.files_delete_file_with_http_info(data_view_name, system_name, file_path, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str data_view_name: The name of the DataView to act on (required)
:param str system_name: The name of the FastStats system to act on (required)
:param str file_path: The path to the file to be deleted (required)
:param int timeout_in_seconds: The number of seconds before the request will time out. Leave unspecified to use the default value given in the file service's configuration
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['data_view_name', 'system_name', 'file_path', 'timeout_in_seconds'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method files_delete_file" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'data_view_name' is set
if ('data_view_name' not in local_var_params or
local_var_params['data_view_name'] is None):
raise ApiValueError("Missing the required parameter `data_view_name` when calling `files_delete_file`") # noqa: E501
# verify the required parameter 'system_name' is set
if ('system_name' not in local_var_params or
local_var_params['system_name'] is None):
raise ApiValueError("Missing the required parameter `system_name` when calling `files_delete_file`") # noqa: E501
# verify the required parameter 'file_path' is set
if ('file_path' not in local_var_params or
local_var_params['file_path'] is None):
raise ApiValueError("Missing the required parameter `file_path` when calling `files_delete_file`") # noqa: E501
collection_formats = {}
path_params = {}
if 'data_view_name' in local_var_params:
path_params['dataViewName'] = local_var_params['data_view_name'] # noqa: E501
if 'system_name' in local_var_params:
path_params['systemName'] = local_var_params['system_name'] # noqa: E501
if 'file_path' in local_var_params:
path_params['filePath'] = local_var_params['file_path'] # noqa: E501
query_params = []
if 'timeout_in_seconds' in local_var_params:
query_params.append(('timeoutInSeconds', local_var_params['timeout_in_seconds'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['faststats_auth'] # noqa: E501
return self.api_client.call_api(
'/{dataViewName}/Files/{systemName}/{filePath}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def files_get_file(self, data_view_name, system_name, file_path, **kwargs): # noqa: E501
"""Returns the contents for a file # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.files_get_file(data_view_name, system_name, file_path, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str data_view_name: The name of the DataView to act on (required)
:param str system_name: The name of the FastStats system to act on (required)
:param str file_path: The path of the file to return the contents for (required)
:param int timeout_in_seconds: The number of seconds before the request will time out. Leave unspecified to use the default value given in the file service's configuration
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: file
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.files_get_file_with_http_info(data_view_name, system_name, file_path, **kwargs) # noqa: E501
def files_get_file_with_http_info(self, data_view_name, system_name, file_path, **kwargs): # noqa: E501
"""Returns the contents for a file # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.files_get_file_with_http_info(data_view_name, system_name, file_path, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str data_view_name: The name of the DataView to act on (required)
:param str system_name: The name of the FastStats system to act on (required)
:param str file_path: The path of the file to return the contents for (required)
:param int timeout_in_seconds: The number of seconds before the request will time out. Leave unspecified to use the default value given in the file service's configuration
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(file, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['data_view_name', 'system_name', 'file_path', 'timeout_in_seconds'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method files_get_file" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'data_view_name' is set
if ('data_view_name' not in local_var_params or
local_var_params['data_view_name'] is None):
raise ApiValueError("Missing the required parameter `data_view_name` when calling `files_get_file`") # noqa: E501
# verify the required parameter 'system_name' is set
if ('system_name' not in local_var_params or
local_var_params['system_name'] is None):
raise ApiValueError("Missing the required parameter `system_name` when calling `files_get_file`") # noqa: E501
# verify the required parameter 'file_path' is set
if ('file_path' not in local_var_params or
local_var_params['file_path'] is None):
raise ApiValueError("Missing the required parameter `file_path` when calling `files_get_file`") # noqa: E501
collection_formats = {}
path_params = {}
if 'data_view_name' in local_var_params:
path_params['dataViewName'] = local_var_params['data_view_name'] # noqa: E501
if 'system_name' in local_var_params:
path_params['systemName'] = local_var_params['system_name'] # noqa: E501
if 'file_path' in local_var_params:
path_params['filePath'] = local_var_params['file_path'] # noqa: E501
query_params = []
if 'timeout_in_seconds' in local_var_params:
query_params.append(('timeoutInSeconds', local_var_params['timeout_in_seconds'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/octet-stream']) # noqa: E501
# Authentication setting
auth_settings = ['faststats_auth'] # noqa: E501
return self.api_client.call_api(
'/{dataViewName}/Files/{systemName}/{filePath}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='file', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def files_upsert_file(self, data_view_name, system_name, file_path, file, **kwargs): # noqa: E501
"""Creates or updates a file at a location # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.files_upsert_file(data_view_name, system_name, file_path, file, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str data_view_name: The name of the DataView to act on (required)
:param str system_name: The name of the FastStats system to act on (required)
:param str file_path: The path in the system where the file will be put (required)
:param file file: The file to upload. (required)
:param int timeout_in_seconds: The number of seconds before the request will time out. Leave unspecified to use the default value given in the file service's configuration
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: FileEntry
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.files_upsert_file_with_http_info(data_view_name, system_name, file_path, file, **kwargs) # noqa: E501
def files_upsert_file_with_http_info(self, data_view_name, system_name, file_path, file, **kwargs): # noqa: E501
"""Creates or updates a file at a location # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.files_upsert_file_with_http_info(data_view_name, system_name, file_path, file, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str data_view_name: The name of the DataView to act on (required)
:param str system_name: The name of the FastStats system to act on (required)
:param str file_path: The path in the system where the file will be put (required)
:param file file: The file to upload. (required)
:param int timeout_in_seconds: The number of seconds before the request will time out. Leave unspecified to use the default value given in the file service's configuration
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(FileEntry, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['data_view_name', 'system_name', 'file_path', 'file', 'timeout_in_seconds'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method files_upsert_file" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'data_view_name' is set
if ('data_view_name' not in local_var_params or
local_var_params['data_view_name'] is None):
raise ApiValueError("Missing the required parameter `data_view_name` when calling `files_upsert_file`") # noqa: E501
# verify the required parameter 'system_name' is set
if ('system_name' not in local_var_params or
local_var_params['system_name'] is None):
raise ApiValueError("Missing the required parameter `system_name` when calling `files_upsert_file`") # noqa: E501
# verify the required parameter 'file_path' is set
if ('file_path' not in local_var_params or
local_var_params['file_path'] is None):
raise ApiValueError("Missing the required parameter `file_path` when calling `files_upsert_file`") # noqa: E501
# verify the required parameter 'file' is set
if ('file' not in local_var_params or
local_var_params['file'] is None):
raise ApiValueError("Missing the required parameter `file` when calling `files_upsert_file`") # noqa: E501
collection_formats = {}
path_params = {}
if 'data_view_name' in local_var_params:
path_params['dataViewName'] = local_var_params['data_view_name'] # noqa: E501
if 'system_name' in local_var_params:
path_params['systemName'] = local_var_params['system_name'] # noqa: E501
if 'file_path' in local_var_params:
path_params['filePath'] = local_var_params['file_path'] # noqa: E501
query_params = []
if 'timeout_in_seconds' in local_var_params:
query_params.append(('timeoutInSeconds', local_var_params['timeout_in_seconds'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
if 'file' in local_var_params:
local_var_files['file'] = local_var_params['file'] # noqa: E501
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'text/json', 'application/xml', 'text/xml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['multipart/form-data']) # noqa: E501
# Authentication setting
auth_settings = ['faststats_auth'] # noqa: E501
return self.api_client.call_api(
'/{dataViewName}/Files/{systemName}/{filePath}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='FileEntry', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
| 51.296471
| 180
| 0.63809
| 2,721
| 21,801
| 4.852628
| 0.077178
| 0.046653
| 0.07422
| 0.02787
| 0.935626
| 0.933505
| 0.93199
| 0.929643
| 0.92934
| 0.924871
| 0
| 0.011482
| 0.288886
| 21,801
| 424
| 181
| 51.417453
| 0.840224
| 0.453603
| 0
| 0.732323
| 0
| 0
| 0.240782
| 0.03324
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035354
| false
| 0
| 0.025253
| 0
| 0.09596
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
077571fcd2091b3b7216c57627a11989f3db1fdf
| 9,250
|
py
|
Python
|
tensorflow/contrib/data/python/kernel_tests/assert_element_shape_test.py
|
PaulWang1905/tensorflow
|
ebf12d22b4801fb8dab5034cc94562bf7cc33fa0
|
[
"Apache-2.0"
] | 848
|
2019-12-03T00:16:17.000Z
|
2022-03-31T22:53:17.000Z
|
tensorflow/contrib/data/python/kernel_tests/assert_element_shape_test.py
|
PaulWang1905/tensorflow
|
ebf12d22b4801fb8dab5034cc94562bf7cc33fa0
|
[
"Apache-2.0"
] | 656
|
2019-12-03T00:48:46.000Z
|
2022-03-31T18:41:54.000Z
|
tensorflow/contrib/data/python/kernel_tests/assert_element_shape_test.py
|
PaulWang1905/tensorflow
|
ebf12d22b4801fb8dab5034cc94562bf7cc33fa0
|
[
"Apache-2.0"
] | 506
|
2019-12-03T00:46:26.000Z
|
2022-03-30T10:34:56.000Z
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the experimental input pipeline ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.data.python.ops import batching
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.platform import test
@test_util.run_v1_only("deprecated API, no eager or V2 test coverage")
class AssertElementShapeTest(test_base.DatasetTestBase):
def test_assert_element_shape(self):
def create_dataset(_):
return (array_ops.ones(2, dtype=dtypes.float32),
array_ops.zeros((3, 4), dtype=dtypes.int32))
dataset = dataset_ops.Dataset.range(5).map(create_dataset)
expected_shapes = (tensor_shape.TensorShape(2),
tensor_shape.TensorShape((3, 4)))
self.assertEqual(expected_shapes,
dataset_ops.get_legacy_output_shapes(dataset))
result = dataset.apply(batching.assert_element_shape(expected_shapes))
self.assertEqual(expected_shapes,
dataset_ops.get_legacy_output_shapes(result))
iterator = dataset_ops.make_initializable_iterator(result)
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op)
for _ in range(5):
sess.run(get_next)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def test_assert_wrong_element_shape(self):
def create_dataset(_):
return (array_ops.ones(2, dtype=dtypes.float32),
array_ops.zeros((3, 4), dtype=dtypes.int32))
dataset = dataset_ops.Dataset.range(3).map(create_dataset)
wrong_shapes = (tensor_shape.TensorShape(2),
tensor_shape.TensorShape((3, 10)))
with self.assertRaises(ValueError):
dataset.apply(batching.assert_element_shape(wrong_shapes))
def test_assert_element_shape_on_unknown_shape_dataset(self):
def create_unknown_shape_dataset(x):
return script_ops.py_func(
lambda _: ( # pylint: disable=g-long-lambda
np.ones(2, dtype=np.float32),
np.zeros((3, 4), dtype=np.int32)),
[x],
[dtypes.float32, dtypes.int32])
dataset = dataset_ops.Dataset.range(5).map(create_unknown_shape_dataset)
unknown_shapes = (tensor_shape.TensorShape(None),
tensor_shape.TensorShape(None))
self.assertEqual(unknown_shapes,
dataset_ops.get_legacy_output_shapes(dataset))
expected_shapes = (tensor_shape.TensorShape(2),
tensor_shape.TensorShape((3, 4)))
result = dataset.apply(batching.assert_element_shape(expected_shapes))
self.assertEqual(expected_shapes,
dataset_ops.get_legacy_output_shapes(result))
iterator = dataset_ops.make_initializable_iterator(result)
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op)
for _ in range(5):
sess.run(get_next)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def test_assert_wrong_element_shape_on_unknown_shape_dataset(self):
def create_unknown_shape_dataset(x):
return script_ops.py_func(
lambda _: ( # pylint: disable=g-long-lambda
np.ones(2, dtype=np.float32),
np.zeros((3, 4), dtype=np.int32)),
[x],
[dtypes.float32, dtypes.int32])
dataset = dataset_ops.Dataset.range(3).map(create_unknown_shape_dataset)
unknown_shapes = (tensor_shape.TensorShape(None),
tensor_shape.TensorShape(None))
self.assertEqual(unknown_shapes,
dataset_ops.get_legacy_output_shapes(dataset))
wrong_shapes = (tensor_shape.TensorShape(2),
tensor_shape.TensorShape((3, 10)))
iterator = dataset_ops.make_initializable_iterator(
dataset.apply(batching.assert_element_shape(wrong_shapes)))
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op)
with self.assertRaises(errors.InvalidArgumentError):
sess.run(get_next)
def test_assert_partial_element_shape(self):
def create_dataset(_):
return (array_ops.ones(2, dtype=dtypes.float32),
array_ops.zeros((3, 4), dtype=dtypes.int32))
dataset = dataset_ops.Dataset.range(5).map(create_dataset)
partial_expected_shape = (
tensor_shape.TensorShape(None), # Unknown shape
tensor_shape.TensorShape((None, 4))) # Partial shape
result = dataset.apply(
batching.assert_element_shape(partial_expected_shape))
# Partial shapes are merged with actual shapes:
actual_shapes = (tensor_shape.TensorShape(2),
tensor_shape.TensorShape((3, 4)))
self.assertEqual(actual_shapes,
dataset_ops.get_legacy_output_shapes(result))
iterator = dataset_ops.make_initializable_iterator(result)
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op)
for _ in range(5):
sess.run(get_next)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def test_assert_wrong_partial_element_shape(self):
def create_dataset(_):
return (array_ops.ones(2, dtype=dtypes.float32),
array_ops.zeros((3, 4), dtype=dtypes.int32))
dataset = dataset_ops.Dataset.range(3).map(create_dataset)
wrong_shapes = (tensor_shape.TensorShape(2),
tensor_shape.TensorShape((None, 10)))
with self.assertRaises(ValueError):
dataset.apply(batching.assert_element_shape(wrong_shapes))
def test_assert_partial_element_shape_on_unknown_shape_dataset(self):
def create_unknown_shape_dataset(x):
return script_ops.py_func(
lambda _: ( # pylint: disable=g-long-lambda
np.ones(2, dtype=np.float32),
np.zeros((3, 4), dtype=np.int32)),
[x],
[dtypes.float32, dtypes.int32])
dataset = dataset_ops.Dataset.range(5).map(create_unknown_shape_dataset)
unknown_shapes = (tensor_shape.TensorShape(None),
tensor_shape.TensorShape(None))
self.assertEqual(unknown_shapes,
dataset_ops.get_legacy_output_shapes(dataset))
expected_shapes = (tensor_shape.TensorShape(2),
tensor_shape.TensorShape((None, 4)))
result = dataset.apply(batching.assert_element_shape(expected_shapes))
self.assertEqual(expected_shapes,
dataset_ops.get_legacy_output_shapes(result))
iterator = dataset_ops.make_initializable_iterator(result)
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op)
for _ in range(5):
sess.run(get_next)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def test_assert_wrong_partial_element_shape_on_unknown_shape_dataset(self):
def create_unknown_shape_dataset(x):
return script_ops.py_func(
lambda _: ( # pylint: disable=g-long-lambda
np.ones(2, dtype=np.float32),
np.zeros((3, 4), dtype=np.int32)),
[x],
[dtypes.float32, dtypes.int32])
dataset = dataset_ops.Dataset.range(3).map(create_unknown_shape_dataset)
unknown_shapes = (tensor_shape.TensorShape(None),
tensor_shape.TensorShape(None))
self.assertEqual(unknown_shapes,
dataset_ops.get_legacy_output_shapes(dataset))
wrong_shapes = (tensor_shape.TensorShape(2),
tensor_shape.TensorShape((None, 10)))
iterator = dataset_ops.make_initializable_iterator(
dataset.apply(batching.assert_element_shape(wrong_shapes)))
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op)
with self.assertRaises(errors.InvalidArgumentError):
sess.run(get_next)
if __name__ == "__main__":
test.main()
| 39.194915
| 80
| 0.689946
| 1,148
| 9,250
| 5.283101
| 0.14547
| 0.04897
| 0.094312
| 0.05573
| 0.826216
| 0.781039
| 0.781039
| 0.771641
| 0.770486
| 0.769827
| 0
| 0.016358
| 0.206919
| 9,250
| 235
| 81
| 39.361702
| 0.810387
| 0.097514
| 0
| 0.822222
| 0
| 0
| 0.006249
| 0
| 0
| 0
| 0
| 0
| 0.188889
| 1
| 0.088889
| false
| 0
| 0.077778
| 0.044444
| 0.216667
| 0.005556
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0778a5b17d224eb012b626f43e26296a7e3fe0c7
| 47,993
|
py
|
Python
|
src/harness/testcases/WINNF_FT_S_FPR_testcase.py
|
NSF-Swift/Spectrum-Access-System
|
02cf3490c9fd0cec38074d3bdb3bca63bb7d03bf
|
[
"Apache-2.0"
] | 58
|
2015-07-22T14:16:52.000Z
|
2022-03-10T09:09:33.000Z
|
src/harness/testcases/WINNF_FT_S_FPR_testcase.py
|
NSF-Swift/Spectrum-Access-System
|
02cf3490c9fd0cec38074d3bdb3bca63bb7d03bf
|
[
"Apache-2.0"
] | 537
|
2015-07-30T16:28:20.000Z
|
2021-09-30T17:12:15.000Z
|
src/harness/testcases/WINNF_FT_S_FPR_testcase.py
|
NSF-Swift/Spectrum-Access-System
|
02cf3490c9fd0cec38074d3bdb3bca63bb7d03bf
|
[
"Apache-2.0"
] | 51
|
2015-06-30T00:25:15.000Z
|
2022-01-21T00:09:22.000Z
|
# Copyright 2018 SAS Project Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of FPR test cases."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import sas
import sas_testcase
from sas_test_harness import generateCbsdRecords
from util import winnforum_testcase, writeConfig, loadConfig, configurable_testcase,\
addCbsdIdsToRequests, getFqdnLocalhost, getUnusedPort, getCertFilename, json_load
from testcases.WINNF_FT_S_MCP_testcase import McpXprCommonTestcase
class FSSProtectionTestcase(McpXprCommonTestcase):
def setUp(self):
self._sas, self._sas_admin = sas.GetTestingSas()
self._sas_admin.Reset()
def tearDown(self):
self.ShutdownServers()
def generate_FPR_1_default_config(self, filename):
""" Generates the WinnForum configuration for FPR.1. """
# Load FSS record
fss_record_1 = json_load(
os.path.join('testcases', 'testdata', 'fss_record_0.json'))
fss_record_1['ttc'] = False
# Load devices for SAS UUT for multiple iterations through multiple domain proxy's
device_1 = json_load(
os.path.join('testcases', 'testdata', 'device_a.json'))
# Moving device_1 to a location within 40 KMs of FSS zone (9.596km)
device_1['installationParam']['latitude'] = \
fss_record_1['record']['deploymentParam'][0]['installationParam']['latitude'] + 0.08
device_1['installationParam']['longitude'] = \
fss_record_1['record']['deploymentParam'][0]['installationParam']['longitude'] + 0.08
device_2 = json_load(
os.path.join('testcases', 'testdata', 'device_b.json'))
# Moving device_2 to a location within 150 KMs of FSS zone (115.243km)
device_2['installationParam']['latitude'] = \
fss_record_1['record']['deploymentParam'][0]['installationParam']['latitude'] + 1
device_2['installationParam']['longitude'] = \
fss_record_1['record']['deploymentParam'][0]['installationParam']['longitude'] + 1
device_3 = json_load(
os.path.join('testcases', 'testdata', 'device_c.json'))
# Moving device_3 to a location outside 40 KMs of FSS zone(60.861km)
device_3['installationParam']['latitude'] = \
fss_record_1['record']['deploymentParam'][0]['installationParam']['latitude'] + 0.5
device_3['installationParam']['longitude'] = \
fss_record_1['record']['deploymentParam'][0]['installationParam']['longitude'] + 0.5
device_4 = json_load(
os.path.join('testcases', 'testdata', 'device_d.json'))
# Moving device_4 to a location outside 150 KMs of FSS zone (182.158km)
device_4['installationParam']['latitude'] = \
fss_record_1['record']['deploymentParam'][0]['installationParam']['latitude'] + 1.2
device_4['installationParam']['longitude'] = \
fss_record_1['record']['deploymentParam'][0]['installationParam']['longitude'] + 1.2
device_5 = json_load(
os.path.join('testcases', 'testdata', 'device_e.json'))
# Moving device_5 to a location within 40 KMs of FSS zone (5.425km)
device_5['installationParam']['latitude'] = \
fss_record_1['record']['deploymentParam'][0]['installationParam']['latitude'] + 0.02
device_5['installationParam']['longitude'] = \
fss_record_1['record']['deploymentParam'][0]['installationParam']['longitude'] + 0.02
device_6 = json_load(
os.path.join('testcases', 'testdata', 'device_f.json'))
# Moving device_6 to a location outside 40 KMs of FSS zone (44.504km)
device_6['installationParam']['latitude'] = \
fss_record_1['record']['deploymentParam'][0]['installationParam']['latitude'] +0.3
device_6['installationParam']['longitude'] = \
fss_record_1['record']['deploymentParam'][0]['installationParam']['longitude'] + 0.3
# Load Grant requests
grant_request_1 = json_load(
os.path.join('testcases', 'testdata', 'grant_0.json'))
grant_request_1['operationParam']['operationFrequencyRange']['lowFrequency'] = 3575000000
grant_request_1['operationParam']['operationFrequencyRange']['highFrequency'] = 3585000000
grant_request_2 = json_load(
os.path.join('testcases', 'testdata', 'grant_0.json'))
grant_request_2['operationParam']['operationFrequencyRange']['lowFrequency'] = 3580000000
grant_request_2['operationParam']['operationFrequencyRange']['highFrequency'] = 3590000000
grant_request_3 = json_load(
os.path.join('testcases', 'testdata', 'grant_0.json'))
grant_request_3['operationParam']['operationFrequencyRange']['lowFrequency'] = 3610000000
grant_request_3['operationParam']['operationFrequencyRange']['highFrequency'] = 3620000000
grant_request_4 = json_load(
os.path.join('testcases', 'testdata', 'grant_0.json'))
grant_request_4['operationParam']['operationFrequencyRange']['lowFrequency'] = 3645000000
grant_request_4['operationParam']['operationFrequencyRange']['highFrequency'] = 3655000000
grant_request_5 = json_load(
os.path.join('testcases', 'testdata', 'grant_0.json'))
grant_request_5['operationParam']['operationFrequencyRange']['lowFrequency'] = 3675000000
grant_request_5['operationParam']['operationFrequencyRange']['highFrequency'] = 3685000000
grant_request_6 = json_load(
os.path.join('testcases', 'testdata', 'grant_0.json'))
grant_request_6['operationParam']['operationFrequencyRange']['lowFrequency'] = 3690000000
grant_request_6['operationParam']['operationFrequencyRange']['highFrequency'] = 3700000000
# device_2 and device_4 are of Category B
# Load Conditional Data
self.assertEqual(device_2['cbsdCategory'], 'B')
conditionals_device_2 = {
'cbsdCategory': device_2['cbsdCategory'],
'fccId': device_2['fccId'],
'cbsdSerialNumber': device_2['cbsdSerialNumber'],
'airInterface': device_2['airInterface'],
'installationParam': device_2['installationParam'],
'measCapability': device_2['measCapability']
}
self.assertEqual(device_4['cbsdCategory'], 'B')
conditionals_device_4 = {
'cbsdCategory': device_4['cbsdCategory'],
'fccId': device_4['fccId'],
'cbsdSerialNumber': device_4['cbsdSerialNumber'],
'airInterface': device_4['airInterface'],
'installationParam': device_4['installationParam'],
'measCapability': device_4['measCapability']
}
# Remove conditionals from registration
del device_2['cbsdCategory']
del device_2['airInterface']
del device_2['installationParam']
del device_2['measCapability']
del device_4['cbsdCategory']
del device_4['airInterface']
del device_4['installationParam']
del device_4['measCapability']
# Registration and grant records for multiple iterations
cbsd_records_iteration_0_domain_proxy_0 = {
'registrationRequests': [device_1, device_3,device_4],
'grantRequests': [grant_request_1, grant_request_3,grant_request_4],
'conditionalRegistrationData': [conditionals_device_4]
}
cbsd_records_iteration_0_domain_proxy_1 = {
'registrationRequests': [device_5,device_2],
'grantRequests': [grant_request_5,grant_request_2],
'conditionalRegistrationData': [conditionals_device_2]
}
# Protected entities records for multiple iterations
protected_entities_iteration_0 = {
'fssRecords': [fss_record_1]
}
# SAS Test Harnesses configurations,
# Following configurations are for two SAS test harnesses for two iterations
sas_test_harness_device_1 = json_load(
os.path.join('testcases', 'testdata', 'device_a.json'))
sas_test_harness_device_1['fccId'] = "test_fcc_id_g"
sas_test_harness_device_1['userId'] = "test_user_id_g"
sas_test_harness_device_1['installationParam']['latitude'] = \
fss_record_1['record']['deploymentParam'][0]['installationParam']['latitude'] - 1.2
sas_test_harness_device_1['installationParam']['longitude'] = \
fss_record_1['record']['deploymentParam'][0]['installationParam']['longitude'] -1.2
sas_test_harness_device_2 = json_load(
os.path.join('testcases', 'testdata', 'device_b.json'))
sas_test_harness_device_2['fccId'] = "test_fcc_id_h"
sas_test_harness_device_2['userId'] = "test_user_id_h"
sas_test_harness_device_2['installationParam']['latitude'] = \
fss_record_1['record']['deploymentParam'][0]['installationParam']['latitude'] - 1.5
sas_test_harness_device_2['installationParam']['longitude'] = \
fss_record_1['record']['deploymentParam'][0]['installationParam']['longitude'] - 1.5
# Generate Cbsd FAD Records for SAS Test Harness 0, iteration 0
cbsd_fad_records_iteration_0_sas_test_harness_0 = generateCbsdRecords([sas_test_harness_device_1],[[grant_request_1]])
# Generate Cbsd FAD Records for SAS Test Harness 1, iteration 0
cbsd_fad_records_iteration_0_sas_test_harness_1 = generateCbsdRecords([sas_test_harness_device_2],[[grant_request_2]])
# Generate SAS Test Harnesses dump records for multiple iterations
dump_records_iteration_0_sas_test_harness_0 = {
'cbsdRecords': cbsd_fad_records_iteration_0_sas_test_harness_0
}
dump_records_iteration_0_sas_test_harness_1 = {
'cbsdRecords': cbsd_fad_records_iteration_0_sas_test_harness_1
}
# SAS Test Harnesses configuration
sas_test_harness_0_config = {
'sasTestHarnessName': 'SAS-TH-1',
'hostName': getFqdnLocalhost(),
'port': getUnusedPort(),
'serverCert': getCertFilename('sas.cert'),
'serverKey': getCertFilename('sas.key'),
'caCert': getCertFilename('ca.cert')
}
sas_test_harness_1_config = {
'sasTestHarnessName': 'SAS-TH-2',
'hostName': getFqdnLocalhost(),
'port': getUnusedPort() ,
'serverCert': getCertFilename('sas_1.cert'),
'serverKey': getCertFilename('sas_1.key'),
'caCert': getCertFilename('ca.cert')
}
# Create the actual config.
iteration0_config = {
'cbsdRequestsWithDomainProxies': [cbsd_records_iteration_0_domain_proxy_0, cbsd_records_iteration_0_domain_proxy_1],
'cbsdRecords': [{
'registrationRequest': device_6,
'grantRequest': grant_request_6,
'clientCert': getCertFilename('device_f.cert'),
'clientKey': getCertFilename('device_f.key')
}],
'protectedEntities': protected_entities_iteration_0,
'dpaActivationList': [],
'dpaDeactivationList': [],
'sasTestHarnessData': [dump_records_iteration_0_sas_test_harness_0, dump_records_iteration_0_sas_test_harness_1]
}
config = {
'initialCbsdRequestsWithDomainProxies': self.getEmptyCbsdRequestsWithDomainProxies(2),
'initialCbsdRecords': [],
'iterationData': [iteration0_config],
'sasTestHarnessConfigs': [sas_test_harness_0_config, sas_test_harness_1_config],
'domainProxyConfigs': [{'cert': getCertFilename('domain_proxy.cert'),
'key': getCertFilename('domain_proxy.key')},
{'cert': getCertFilename('domain_proxy_1.cert'),
'key': getCertFilename('domain_proxy_1.key')}]
}
writeConfig(filename, config)
@configurable_testcase(generate_FPR_1_default_config)
def test_WINNF_FT_S_FPR_1(self, config_filename):
"""Multiple CBSDs from Multiple SASs Inside and Outside the Neighborhood of an FSS Station for FSS Scenario 1 with TT&C Flag = OFF"""
config = loadConfig(config_filename)
# Invoke MCP test steps 1 through 22.
self.executeMcpTestSteps(config, 'xPR2')
def generate_FPR_2_default_config(self, filename):
"""Generates the WinnForum configuration for FPR.2."""
# Load FSS station operating at range below 3700 to 4200 MHz, TT&C flag ON.
fss_data = json_load(
os.path.join('testcases', 'testdata', 'fss_record_0.json'))
fss_record = fss_data['record']
fss_record['deploymentParam'][0]['operationParam'][
'operationFrequencyRange']['lowFrequency'] = 3625000000
fss_record['deploymentParam'][0]['operationParam'][
'operationFrequencyRange']['highFrequency'] = 4200000000
fss_data['ttc'] = True
# Load devices and grant info for Scenario 1.
# Loading device_1 (Cat A) to a location within 40 km of FSS,
# frequency range outside of FSS band.
device_1 = json_load(
os.path.join('testcases', 'testdata', 'device_a.json'))
device_1['installationParam']['latitude'] = fss_record[
'deploymentParam'][0]['installationParam']['latitude'] + 0.20
device_1['installationParam']['longitude'] = fss_record[
'deploymentParam'][0]['installationParam']['longitude']
grant_request_1 = json_load(
os.path.join('testcases', 'testdata', 'grant_0.json'))
grant_request_1['operationParam'][
'operationFrequencyRange']['lowFrequency'] = 3560000000
grant_request_1['operationParam'][
'operationFrequencyRange']['highFrequency'] = 3570000000
# Loading device_2 (Cat B) to a location within 40 km of FSS,
# frequency range outside of FSS band, overlapping device 1 band.
device_2 = json_load(
os.path.join('testcases', 'testdata', 'device_b.json'))
device_2['installationParam']['latitude'] = fss_record[
'deploymentParam'][0]['installationParam']['latitude']
device_2['installationParam']['longitude'] = fss_record[
'deploymentParam'][0]['installationParam']['longitude'] + 0.20
grant_request_2 = json_load(
os.path.join('testcases', 'testdata', 'grant_0.json'))
grant_request_2['operationParam'][
'operationFrequencyRange']['lowFrequency'] = 3565000000
grant_request_2['operationParam'][
'operationFrequencyRange']['highFrequency'] = 3575000000
# Loading device_3 (Cat A) to a location within 40 km of FSS,
# frequency range outside of FSS band.
device_3 = json_load(
os.path.join('testcases', 'testdata', 'device_c.json'))
device_3['installationParam']['latitude'] = fss_record[
'deploymentParam'][0]['installationParam']['latitude'] + 0.05
device_3['installationParam']['longitude'] = fss_record[
'deploymentParam'][0]['installationParam']['longitude'] + 0.05
grant_request_3 = json_load(
os.path.join('testcases', 'testdata', 'grant_0.json'))
grant_request_3['operationParam'][
'operationFrequencyRange']['lowFrequency'] = 3590000000
grant_request_3['operationParam'][
'operationFrequencyRange']['highFrequency'] = 3600000000
# Loading device_4 (Cat B) to a location within 40 km of FSS,
# frequency range overlapping FSS band.
device_4 = json_load(
os.path.join('testcases', 'testdata', 'device_d.json'))
device_4['installationParam']['latitude'] = fss_record[
'deploymentParam'][0]['installationParam']['latitude'] - 0.05
device_4['installationParam']['longitude'] = fss_record[
'deploymentParam'][0]['installationParam']['longitude'] - 0.05
grant_request_4 = json_load(
os.path.join('testcases', 'testdata', 'grant_0.json'))
grant_request_4['operationParam'][
'operationFrequencyRange']['lowFrequency'] = 3620000000
grant_request_4['operationParam'][
'operationFrequencyRange']['highFrequency'] = 3630000000
# Loading device_5 (Cat A) to a location within 40 km of FSS,
# frequency range inside of FSS band.
device_5 = json_load(
os.path.join('testcases', 'testdata', 'device_e.json'))
device_5['installationParam']['latitude'] = fss_record[
'deploymentParam'][0]['installationParam']['latitude'] + 0.05
device_5['installationParam']['longitude'] = fss_record[
'deploymentParam'][0]['installationParam']['longitude'] - 0.05
grant_request_5 = json_load(
os.path.join('testcases', 'testdata', 'grant_0.json'))
grant_request_5['operationParam'][
'operationFrequencyRange']['lowFrequency'] = 3650000000
grant_request_5['operationParam'][
'operationFrequencyRange']['highFrequency'] = 3660000000
# Loading device_6 (Cat A) to a location between 40 to 150 km of FSS,
# frequency range inside of FSS band.
device_6 = json_load(
os.path.join('testcases', 'testdata', 'device_f.json'))
device_6['installationParam']['latitude'] = fss_record[
'deploymentParam'][0]['installationParam']['latitude'] + 1.00
device_6['installationParam']['longitude'] = fss_record[
'deploymentParam'][0]['installationParam']['longitude']
grant_request_6 = json_load(
os.path.join('testcases', 'testdata', 'grant_0.json'))
grant_request_6['operationParam'][
'operationFrequencyRange']['lowFrequency'] = 3690000000
grant_request_6['operationParam'][
'operationFrequencyRange']['highFrequency'] = 3700000000
# Loading device_7 (Cat A) to a location outside 150 km of FSS,
# frequency range inside of FSS band.
device_7 = json_load(
os.path.join('testcases', 'testdata', 'device_g.json'))
device_7['installationParam']['latitude'] = fss_record[
'deploymentParam'][0]['installationParam']['latitude'] + 1.50
device_7['installationParam']['longitude'] = fss_record[
'deploymentParam'][0]['installationParam']['longitude'] + 0.50
grant_request_7 = json_load(
os.path.join('testcases', 'testdata', 'grant_0.json'))
grant_request_7['operationParam'][
'operationFrequencyRange']['lowFrequency'] = 3670000000
grant_request_7['operationParam'][
'operationFrequencyRange']['highFrequency'] = 3680000000
# Load Conditional Data for Cat B devices (devices 2 and 4).
self.assertEqual(device_2['cbsdCategory'], 'B')
conditionals_device_2 = {
'cbsdCategory': device_2['cbsdCategory'],
'fccId': device_2['fccId'],
'cbsdSerialNumber': device_2['cbsdSerialNumber'],
'airInterface': device_2['airInterface'],
'installationParam': device_2['installationParam'],
'measCapability': device_2['measCapability']
}
self.assertEqual(device_4['cbsdCategory'], 'B')
conditionals_device_4 = {
'cbsdCategory': device_4['cbsdCategory'],
'fccId': device_4['fccId'],
'cbsdSerialNumber': device_4['cbsdSerialNumber'],
'airInterface': device_4['airInterface'],
'installationParam': device_4['installationParam'],
'measCapability': device_4['measCapability']
}
# Remove conditionals from registration
del device_2['cbsdCategory']
del device_2['airInterface']
del device_2['installationParam']
del device_2['measCapability']
del device_4['cbsdCategory']
del device_4['airInterface']
del device_4['installationParam']
del device_4['measCapability']
# DP Registration and grant records
cbsd_records_domain_proxy_0 = {
'registrationRequests': [device_1, device_2, device_6],
'grantRequests': [grant_request_1, grant_request_2, grant_request_6],
'conditionalRegistrationData': [conditionals_device_2]
}
cbsd_records_domain_proxy_1 = {
'registrationRequests': [device_3, device_4, device_7],
'grantRequests': [grant_request_3, grant_request_4, grant_request_7],
'conditionalRegistrationData': [conditionals_device_4]
}
# Protected entity record
protected_entities = {
'fssRecords': [fss_data]
}
# SAS Test Harnesses configurations,
# Following configurations are for two SAS test harnesses
sas_test_harness_device_1 = json_load(
os.path.join('testcases', 'testdata', 'device_a.json'))
sas_test_harness_device_1['fccId'] = 'test_fcc_id_1'
sas_test_harness_device_1['userId'] = 'test_user_id_1'
sas_test_harness_device_2 = json_load(
os.path.join('testcases', 'testdata', 'device_b.json'))
sas_test_harness_device_2['fccId'] = 'test_fcc_id_2'
sas_test_harness_device_2['userId'] = 'test_user_id_2'
sas_test_harness_device_3 = json_load(
os.path.join('testcases', 'testdata', 'device_c.json'))
sas_test_harness_device_3['fccId'] = 'test_fcc_id_3'
sas_test_harness_device_3['userId'] = 'test_user_id_3'
# Generate Cbsd FAD Records for SAS Test Harness 0
cbsd_fad_records_sas_test_harness_0 = generateCbsdRecords(
[sas_test_harness_device_1], [[grant_request_1]])
# Generate Cbsd FAD Records for SAS Test Harness 1
cbsd_fad_records_sas_test_harness_1 = generateCbsdRecords(
[sas_test_harness_device_2, sas_test_harness_device_3],
[[grant_request_4], [grant_request_6]])
# Generate SAS Test Harnesses dump records
dump_records_sas_test_harness_0 = {
'cbsdRecords': cbsd_fad_records_sas_test_harness_0
}
dump_records_sas_test_harness_1 = {
'cbsdRecords': cbsd_fad_records_sas_test_harness_1
}
# SAS Test Harnesses configuration
sas_test_harness_0_config = {
'sasTestHarnessName': 'SAS-TH-1',
'hostName': getFqdnLocalhost(),
'port': getUnusedPort(),
'serverCert': getCertFilename('sas.cert'),
'serverKey': getCertFilename('sas.key'),
'caCert': getCertFilename('ca.cert')
}
sas_test_harness_1_config = {
'sasTestHarnessName': 'SAS-TH-2',
'hostName': getFqdnLocalhost(),
'port': getUnusedPort(),
'serverCert': getCertFilename('sas_1.cert'),
'serverKey': getCertFilename('sas_1.key'),
'caCert': getCertFilename('ca.cert')
}
iteration_config = {
'cbsdRequestsWithDomainProxies': [cbsd_records_domain_proxy_0,
cbsd_records_domain_proxy_1],
'cbsdRecords': [{
'registrationRequest': device_5,
'grantRequest': grant_request_5,
'clientCert': getCertFilename('device_e.cert'),
'clientKey': getCertFilename('device_e.key')
}],
'protectedEntities': protected_entities,
'dpaActivationList': [],
'dpaDeactivationList': [],
'sasTestHarnessData': [dump_records_sas_test_harness_0,
dump_records_sas_test_harness_1]
}
# Create the actual config.
config = {
'initialCbsdRequestsWithDomainProxies': self.getEmptyCbsdRequestsWithDomainProxies(2),
'initialCbsdRecords': [],
'iterationData': [iteration_config],
'sasTestHarnessConfigs': [sas_test_harness_0_config,
sas_test_harness_1_config],
'domainProxyConfigs': [
{'cert': getCertFilename('domain_proxy.cert'),
'key': getCertFilename('domain_proxy.key')},
{'cert': getCertFilename('domain_proxy_1.cert'),
'key': getCertFilename('domain_proxy_1.key')}
]
}
writeConfig(filename, config)
@configurable_testcase(generate_FPR_2_default_config)
def test_WINNF_FT_S_FPR_2(self, config_filename):
"""Multiple CBSDs from Multiple SASs Inside and Outside the Neighborhood
of an FSS Station for Scenario 1 with TT&C Flag = ON.
"""
config = loadConfig(config_filename)
# Invoke MCP test steps 1 through 22.
self.executeMcpTestSteps(config, 'xPR2')
def generate_FPR_3_default_config(self, filename):
""" Generates the WinnForum configuration for FPR.3. """
# Load FSS record
fss_record_1 = json_load(
os.path.join('testcases', 'testdata', 'fss_record_0.json'))
fss_record_1['ttc'] = False
fss_record_1['record']['deploymentParam'][0]['operationParam']['operationFrequencyRange']['lowFrequency'] = 3700000000
# Load devices for SAS UUT for multiple iterations through multiple domain proxy's
device_1 = json_load(
os.path.join('testcases', 'testdata', 'device_a.json'))
# Moving device_1 to a location within 40 KMs of FSS zone (9.596km)
device_1['installationParam']['latitude'] = \
fss_record_1['record']['deploymentParam'][0]['installationParam']['latitude'] - 0.02
device_1['installationParam']['longitude'] = \
fss_record_1['record']['deploymentParam'][0]['installationParam']['longitude'] +0.02
device_2 = json_load(
os.path.join('testcases', 'testdata', 'device_b.json'))
# Moving device_2 to a location within 150 KMs of FSS zone (115.243km)
device_2['installationParam']['latitude'] = \
fss_record_1['record']['deploymentParam'][0]['installationParam']['latitude'] + 1
device_2['installationParam']['longitude'] = \
fss_record_1['record']['deploymentParam'][0]['installationParam']['longitude'] +1
device_3 = json_load(
os.path.join('testcases', 'testdata', 'device_c.json'))
# Moving device_3 to a location outside 40 KMs of FSS zone(60.861km)
device_3['installationParam']['latitude'] = fss_record_1['record']['deploymentParam'][0]['installationParam']['latitude'] + 0.5
device_3['installationParam']['longitude'] = fss_record_1['record']['deploymentParam'][0]['installationParam']['longitude'] + 0.5
device_4 = json_load(
os.path.join('testcases', 'testdata', 'device_d.json'))
# Moving device_4 to a location outside 150 KMs of FSS zone (182.158km)
device_4['installationParam']['latitude'] = \
fss_record_1['record']['deploymentParam'][0]['installationParam']['latitude'] + 1.2
device_4['installationParam']['longitude'] = \
fss_record_1['record']['deploymentParam'][0]['installationParam']['longitude'] + 1.2
# Load Grant requests
grant_request_1 = json_load(
os.path.join('testcases', 'testdata', 'grant_0.json'))
grant_request_1['operationParam']['operationFrequencyRange']['lowFrequency'] = 3575000000
grant_request_1['operationParam']['operationFrequencyRange']['highFrequency'] = 3585000000
grant_request_2 = json_load(
os.path.join('testcases', 'testdata', 'grant_0.json'))
grant_request_2['operationParam']['operationFrequencyRange']['lowFrequency'] = 3580000000
grant_request_2['operationParam']['operationFrequencyRange']['highFrequency'] = 3590000000
grant_request_3 = json_load(
os.path.join('testcases', 'testdata', 'grant_0.json'))
grant_request_3['operationParam']['operationFrequencyRange']['lowFrequency'] = 3610000000
grant_request_3['operationParam']['operationFrequencyRange']['highFrequency'] = 3620000000
grant_request_4 = json_load(
os.path.join('testcases', 'testdata', 'grant_0.json'))
grant_request_4['operationParam']['operationFrequencyRange']['lowFrequency'] = 3645000000
grant_request_4['operationParam']['operationFrequencyRange']['highFrequency'] = 3655000000
# device_2 and device_4 are of Category B
# Load Conditional Data
self.assertEqual(device_2['cbsdCategory'], 'B')
conditionals_device_2 = {
'cbsdCategory': device_2['cbsdCategory'],
'fccId': device_2['fccId'],
'cbsdSerialNumber': device_2['cbsdSerialNumber'],
'airInterface': device_2['airInterface'],
'installationParam': device_2['installationParam'],
'measCapability': device_2['measCapability']
}
self.assertEqual(device_4['cbsdCategory'], 'B')
conditionals_device_4 = {
'cbsdCategory': device_4['cbsdCategory'],
'fccId': device_4['fccId'],
'cbsdSerialNumber': device_4['cbsdSerialNumber'],
'airInterface': device_4['airInterface'],
'installationParam': device_4['installationParam'],
'measCapability': device_4['measCapability']
}
conditionals = [conditionals_device_2, conditionals_device_4]
# Remove conditionals from registration
del device_2['cbsdCategory']
del device_2['airInterface']
del device_2['installationParam']
del device_2['measCapability']
del device_4['cbsdCategory']
del device_4['airInterface']
del device_4['installationParam']
del device_4['measCapability']
# Registration and grant records for multiple iterations
cbsd_records_iteration_0_domain_proxy_0 = {
'registrationRequests': [device_1, device_2],
'grantRequests': [grant_request_1, grant_request_2],
'conditionalRegistrationData': [conditionals_device_2]
}
cbsd_records_iteration_0_domain_proxy_1 = {
'registrationRequests': [device_4],
'grantRequests': [grant_request_4],
'conditionalRegistrationData': [conditionals_device_4]
}
# Protected entities records for multiple iterations
protected_entities_iteration_0 = {
'fssRecords': [fss_record_1]
}
# SAS Test Harnesses configurations,
# Following configurations are for two SAS test harnesses for two iterations
sas_test_harness_device_1 = json_load(
os.path.join('testcases', 'testdata', 'device_a.json'))
sas_test_harness_device_1['fccId'] = "test_fcc_id_e"
sas_test_harness_device_1['userId'] = "test_user_id_e"
sas_test_harness_device_1['installationParam']['latitude'] = \
fss_record_1['record']['deploymentParam'][0]['installationParam']['latitude'] - 1.2
sas_test_harness_device_1['installationParam']['longitude'] = \
fss_record_1['record']['deploymentParam'][0]['installationParam']['longitude'] -1.2
sas_test_harness_device_2 = json_load(
os.path.join('testcases', 'testdata', 'device_b.json'))
sas_test_harness_device_2['fccId'] = "test_fcc_id_f"
sas_test_harness_device_2['userId'] = "test_user_id_f"
sas_test_harness_device_2['installationParam']['latitude'] = \
fss_record_1['record']['deploymentParam'][0]['installationParam']['latitude'] - 1.5
sas_test_harness_device_2['installationParam']['longitude'] = \
fss_record_1['record']['deploymentParam'][0]['installationParam']['longitude'] -1.5
# Generate Cbsd FAD Records for SAS Test Harness 0, iteration 0
cbsd_fad_records_iteration_0_sas_test_harness_0 = generateCbsdRecords([sas_test_harness_device_1],[[grant_request_1]])
# Generate Cbsd FAD Records for SAS Test Harness 1, iteration 0
cbsd_fad_records_iteration_0_sas_test_harness_1 = generateCbsdRecords([sas_test_harness_device_2],[[grant_request_2]])
# Generate SAS Test Harnesses dump records for multiple iterations
dump_records_iteration_0_sas_test_harness_0 = {
'cbsdRecords': cbsd_fad_records_iteration_0_sas_test_harness_0
}
dump_records_iteration_0_sas_test_harness_1 = {
'cbsdRecords': cbsd_fad_records_iteration_0_sas_test_harness_1
}
# SAS Test Harnesses configuration
sas_test_harness_0_config = {
'sasTestHarnessName': 'SAS-TH-1',
'hostName': getFqdnLocalhost(),
'port': getUnusedPort(),
'serverCert': getCertFilename('sas.cert'),
'serverKey': getCertFilename('sas.key'),
'caCert': getCertFilename('ca.cert')
}
sas_test_harness_1_config = {
'sasTestHarnessName': 'SAS-TH-2',
'hostName': getFqdnLocalhost(),
'port': getUnusedPort(),
'serverCert': getCertFilename('sas_1.cert'),
'serverKey': getCertFilename('sas_1.key'),
'caCert': getCertFilename('ca.cert')
}
# Create the actual config.
iteration0_config = {
'cbsdRequestsWithDomainProxies': [cbsd_records_iteration_0_domain_proxy_0, cbsd_records_iteration_0_domain_proxy_1],
'cbsdRecords': [{
'registrationRequest': device_3,
'grantRequest': grant_request_3,
'clientCert': getCertFilename('device_c.cert'),
'clientKey': getCertFilename('device_c.key')
}],
'protectedEntities': protected_entities_iteration_0,
'dpaActivationList': [],
'dpaDeactivationList': [],
'sasTestHarnessData': [dump_records_iteration_0_sas_test_harness_0, dump_records_iteration_0_sas_test_harness_1]
}
config = {
'initialCbsdRequestsWithDomainProxies': self.getEmptyCbsdRequestsWithDomainProxies(2),
'initialCbsdRecords': [],
'iterationData': [iteration0_config],
'sasTestHarnessConfigs': [sas_test_harness_0_config, sas_test_harness_1_config],
'domainProxyConfigs': [{'cert': getCertFilename('domain_proxy.cert'),
'key': getCertFilename('domain_proxy.key')},
{'cert': getCertFilename('domain_proxy_1.cert'),
'key': getCertFilename('domain_proxy_1.key')}]
}
writeConfig(filename, config)
@configurable_testcase(generate_FPR_3_default_config)
def test_WINNF_FT_S_FPR_3(self, config_filename):
"""Multiple CBSDs from Multiple SASs Inside and Outside the Neighborhood of an FSS Station for FSS Scenario 2 with TT&C Flag = OFF"""
config = loadConfig(config_filename)
# Invoke MCP test steps 1 through 22.
self.executeMcpTestSteps(config, 'xPR2')
def generate_FPR_4_default_config(self, filename):
"""Generates the WinnForum configuration for FPR.4."""
# Load FSS station operating at range 3700 to 4200 MHz, TT&C flag ON.
fss_data = json_load(
os.path.join('testcases', 'testdata', 'fss_record_0.json'))
fss_record = fss_data['record']
fss_record['deploymentParam'][0]['operationParam'][
'operationFrequencyRange']['lowFrequency'] = 3700000000
fss_record['deploymentParam'][0]['operationParam'][
'operationFrequencyRange']['highFrequency'] = 4200000000
fss_data['ttc'] = True
# Load devices and grant info for Scenario 1.
# Loading device_1 (Cat A) to a location within 40 km of FSS,
# frequency range outside of FSS band.
device_1 = json_load(
os.path.join('testcases', 'testdata', 'device_a.json'))
device_1['installationParam']['latitude'] = fss_record[
'deploymentParam'][0]['installationParam']['latitude'] + 0.20
device_1['installationParam']['longitude'] = fss_record[
'deploymentParam'][0]['installationParam']['longitude']
grant_request_1 = json_load(
os.path.join('testcases', 'testdata', 'grant_0.json'))
grant_request_1['operationParam'][
'operationFrequencyRange']['lowFrequency'] = 3560000000
grant_request_1['operationParam'][
'operationFrequencyRange']['highFrequency'] = 3570000000
# Loading device_2 (Cat B) to a location within 40 km of FSS,
# frequency range outside of FSS band, overlapping device 1 band.
device_2 = json_load(
os.path.join('testcases', 'testdata', 'device_b.json'))
device_2['installationParam']['latitude'] = fss_record[
'deploymentParam'][0]['installationParam']['latitude']
device_2['installationParam']['longitude'] = fss_record[
'deploymentParam'][0]['installationParam']['longitude'] + 0.20
grant_request_2 = json_load(
os.path.join('testcases', 'testdata', 'grant_0.json'))
grant_request_2['operationParam'][
'operationFrequencyRange']['lowFrequency'] = 3565000000
grant_request_2['operationParam'][
'operationFrequencyRange']['highFrequency'] = 3575000000
# Loading device_3 (Cat A) to a location within 40 km of FSS,
# frequency range outside of FSS band.
device_3 = json_load(
os.path.join('testcases', 'testdata', 'device_c.json'))
device_3['installationParam']['latitude'] = fss_record[
'deploymentParam'][0]['installationParam']['latitude'] + 0.05
device_3['installationParam']['longitude'] = fss_record[
'deploymentParam'][0]['installationParam']['longitude'] + 0.05
grant_request_3 = json_load(
os.path.join('testcases', 'testdata', 'grant_0.json'))
grant_request_3['operationParam'][
'operationFrequencyRange']['lowFrequency'] = 3590000000
grant_request_3['operationParam'][
'operationFrequencyRange']['highFrequency'] = 3600000000
# Loading device_4 (Cat B) to a location within 40 km of FSS,
# frequency range overlapping FSS band.
device_4 = json_load(
os.path.join('testcases', 'testdata', 'device_d.json'))
device_4['installationParam']['latitude'] = fss_record[
'deploymentParam'][0]['installationParam']['latitude'] - 0.05
device_4['installationParam']['longitude'] = fss_record[
'deploymentParam'][0]['installationParam']['longitude'] - 0.05
grant_request_4 = json_load(
os.path.join('testcases', 'testdata', 'grant_0.json'))
grant_request_4['operationParam'][
'operationFrequencyRange']['lowFrequency'] = 3620000000
grant_request_4['operationParam'][
'operationFrequencyRange']['highFrequency'] = 3630000000
# Load Conditional Data for Cat B devices (devices 2 and 4).
self.assertEqual(device_2['cbsdCategory'], 'B')
conditionals_device_2 = {
'cbsdCategory': device_2['cbsdCategory'],
'fccId': device_2['fccId'],
'cbsdSerialNumber': device_2['cbsdSerialNumber'],
'airInterface': device_2['airInterface'],
'installationParam': device_2['installationParam'],
'measCapability': device_2['measCapability']
}
self.assertEqual(device_4['cbsdCategory'], 'B')
conditionals_device_4 = {
'cbsdCategory': device_4['cbsdCategory'],
'fccId': device_4['fccId'],
'cbsdSerialNumber': device_4['cbsdSerialNumber'],
'airInterface': device_4['airInterface'],
'installationParam': device_4['installationParam'],
'measCapability': device_4['measCapability']
}
# Remove conditionals from registration
del device_2['cbsdCategory']
del device_2['airInterface']
del device_2['installationParam']
del device_2['measCapability']
del device_4['cbsdCategory']
del device_4['airInterface']
del device_4['installationParam']
del device_4['measCapability']
# DP Registration and grant records
cbsd_records_domain_proxy_0 = {
'registrationRequests': [device_1, device_2],
'grantRequests': [grant_request_1, grant_request_2],
'conditionalRegistrationData': [conditionals_device_2]
}
cbsd_records_domain_proxy_1 = {
'registrationRequests': [device_3],
'grantRequests': [grant_request_3],
'conditionalRegistrationData': []
}
# Protected entity record
protected_entities = {
'fssRecords': [fss_data]
}
# SAS Test Harnesses configurations,
# Following configurations are for two SAS test harnesses
sas_test_harness_device_1 = json_load(
os.path.join('testcases', 'testdata', 'device_a.json'))
sas_test_harness_device_1['fccId'] = 'test_fcc_id_1'
sas_test_harness_device_1['userId'] = 'test_user_id_1'
sas_test_harness_device_2 = json_load(
os.path.join('testcases', 'testdata', 'device_b.json'))
sas_test_harness_device_2['fccId'] = 'test_fcc_id_2'
sas_test_harness_device_2['userId'] = 'test_user_id_2'
sas_test_harness_device_3 = json_load(
os.path.join('testcases', 'testdata', 'device_c.json'))
sas_test_harness_device_3['fccId'] = 'test_fcc_id_3'
sas_test_harness_device_3['userId'] = 'test_user_id_3'
# Generate Cbsd FAD Records for SAS Test Harness 0
cbsd_fad_records_sas_test_harness_0 = generateCbsdRecords(
[sas_test_harness_device_1], [[grant_request_1]])
# Generate Cbsd FAD Records for SAS Test Harness 1
cbsd_fad_records_sas_test_harness_1 = generateCbsdRecords(
[sas_test_harness_device_2, sas_test_harness_device_3],
[[grant_request_2], [grant_request_3]])
# Generate SAS Test Harnesses dump records
dump_records_sas_test_harness_0 = {
'cbsdRecords': cbsd_fad_records_sas_test_harness_0
}
dump_records_sas_test_harness_1 = {
'cbsdRecords': cbsd_fad_records_sas_test_harness_1
}
# SAS Test Harnesses configuration
sas_test_harness_0_config = {
'sasTestHarnessName': 'SAS-TH-1',
'hostName': getFqdnLocalhost(),
'port': getUnusedPort(),
'serverCert': getCertFilename('sas.cert'),
'serverKey': getCertFilename('sas.key'),
'caCert': getCertFilename('ca.cert')
}
sas_test_harness_1_config = {
'sasTestHarnessName': 'SAS-TH-2',
'hostName': getFqdnLocalhost(),
'port': getUnusedPort(),
'serverCert': getCertFilename('sas_1.cert'),
'serverKey': getCertFilename('sas_1.key'),
'caCert': getCertFilename('ca.cert')
}
iteration_config = {
'cbsdRequestsWithDomainProxies': [cbsd_records_domain_proxy_0,
cbsd_records_domain_proxy_1],
'cbsdRecords': [{
'registrationRequest': device_4,
'grantRequest': grant_request_4,
'conditionalRegistrationData': conditionals_device_4,
'clientCert': getCertFilename('device_d.cert'),
'clientKey': getCertFilename('device_d.key')
}],
'protectedEntities': protected_entities,
'dpaActivationList': [],
'dpaDeactivationList': [],
'sasTestHarnessData': [dump_records_sas_test_harness_0,
dump_records_sas_test_harness_1]
}
# Create the actual config.
config = {
'initialCbsdRequestsWithDomainProxies': self.getEmptyCbsdRequestsWithDomainProxies(2),
'initialCbsdRecords': [],
'iterationData': [iteration_config],
'sasTestHarnessConfigs': [sas_test_harness_0_config,
sas_test_harness_1_config],
'domainProxyConfigs': [
{'cert': getCertFilename('domain_proxy.cert'),
'key': getCertFilename('domain_proxy.key')},
{'cert': getCertFilename('domain_proxy_1.cert'),
'key': getCertFilename('domain_proxy_1.key')}
]
}
writeConfig(filename, config)
@configurable_testcase(generate_FPR_4_default_config)
def test_WINNF_FT_S_FPR_4(self, config_filename):
"""Multiple CBSDs from Multiple SASs Inside and Outside the Neighborhood
of an FSS Station for Scenario 2 with TT&C Flag = ON.
"""
config = loadConfig(config_filename)
# Invoke MCP test steps 1 through 22.
self.executeMcpTestSteps(config, 'xPR2')
def generate_FPR_5_default_config(self, filename):
"""Generates the WinnForum configuration for FPR.5."""
# Load FSS station operating at range 3600 - 3700 MHz.
fss_data = json_load(os.path.join('testcases', 'testdata', 'fss_record_0.json'))
fss = fss_data['record']
fss['deploymentParam'][0]['operationParam']['operationFrequencyRange']\
['lowFrequency'] = 3600000000
fss['deploymentParam'][0]['operationParam']['operationFrequencyRange']\
['highFrequency'] = 3700000000
# Load GWBL operating at range 3650 - 3700 MHz.
gwbl = json_load(
os.path.join('testcases', 'testdata', 'gwbl_record_0.json'))
gwbl['record']['deploymentParam'][0]['operationParam']['operationFrequencyRange']\
['lowFrequency'] = 3650000000
gwbl['record']['deploymentParam'][0]['operationParam']['operationFrequencyRange']\
['highFrequency'] = 3700000000
# Load device info
device_1 = json_load(
os.path.join('testcases', 'testdata', 'device_b.json'))
device_2 = json_load(
os.path.join('testcases', 'testdata', 'device_c.json'))
device_3 = json_load(
os.path.join('testcases', 'testdata', 'device_e.json'))
# Move devices within 150km of FSS
device_1['installationParam']['latitude'] = \
fss['deploymentParam'][0]['installationParam']['latitude'] + 0.05
device_1['installationParam']['longitude'] = \
fss['deploymentParam'][0]['installationParam']['longitude'] + 0.05
device_2['installationParam']['latitude'] = \
fss['deploymentParam'][0]['installationParam']['latitude'] + 0.12
device_2['installationParam']['longitude'] = \
fss['deploymentParam'][0]['installationParam']['longitude'] + 0.05
device_3['installationParam']['latitude'] = \
fss['deploymentParam'][0]['installationParam']['latitude'] + 0.15
device_3['installationParam']['longitude'] = \
fss['deploymentParam'][0]['installationParam']['longitude'] + 0.05
# Move GWBL within 150km of FSS
gwbl['record']['deploymentParam'][0]['installationParam']['latitude'] = \
fss['deploymentParam'][0]['installationParam']['latitude'] + 0.1
gwbl['record']['deploymentParam'][0]['installationParam']['longitude'] = \
fss['deploymentParam'][0]['installationParam']['longitude']
# Create info for grants
grant_1 = json_load(os.path.join('testcases', 'testdata', 'grant_0.json'))
grant_1['operationParam']['operationFrequencyRange']['lowFrequency'] = 3650000000
grant_1['operationParam']['operationFrequencyRange']['highFrequency'] = 3660000000
grant_2 = json_load(os.path.join('testcases', 'testdata', 'grant_0.json'))
grant_2['operationParam']['operationFrequencyRange']['lowFrequency'] = 3650000000
grant_2['operationParam']['operationFrequencyRange']['highFrequency'] = 3660000000
grant_3 = json_load(os.path.join('testcases', 'testdata', 'grant_0.json'))
grant_3['operationParam']['operationFrequencyRange']['lowFrequency'] = 3650000000
grant_3['operationParam']['operationFrequencyRange']['highFrequency'] = 3660000000
# Creating conditionals for Cat B device
self.assertEqual(device_1['cbsdCategory'], 'B')
conditionalParameters = {
'cbsdCategory': device_1['cbsdCategory'],
'fccId': device_1['fccId'],
'cbsdSerialNumber': device_1['cbsdSerialNumber'],
'airInterface': device_1['airInterface'],
'installationParam': device_1['installationParam'],
'measCapability': device_1['measCapability']
}
del device_1['cbsdCategory']
del device_1['airInterface']
del device_1['installationParam']
del device_1['measCapability']
# Create the actual config.
conditionals = [conditionalParameters]
config = {
'registrationRequests': [device_1, device_2, device_3],
'conditionalRegistrationData': conditionals,
'grantRequests': [grant_1, grant_2, grant_3],
'gwblRecord': gwbl,
'fssRecord': fss_data
}
writeConfig(filename, config)
@configurable_testcase(generate_FPR_5_default_config)
def test_WINNF_FT_S_FPR_5(self, config_filename):
"""[Configurable] Grant Requests from one or more CBSDs Inside an FSS-GWBL Exclusion Zone."""
config = loadConfig(config_filename)
# Light checking of the config file
self.assertEqual(len(config['registrationRequests']), len(config['grantRequests']))
# Load the FSS
self._sas_admin.InjectFss(config['fssRecord'])
# Load the GWBL
self._sas_admin.InjectWisp(config['gwblRecord'])
# Trigger CPAS activity
self.TriggerDailyActivitiesImmediatelyAndWaitUntilComplete()
# Register N > 0 CBSDs
cbsd_ids = self.assertRegistered(config['registrationRequests'],config['conditionalRegistrationData'])
# Add cbsdIds to grants
grant_request = config['grantRequests']
addCbsdIdsToRequests(cbsd_ids, grant_request)
# Send grant request and get response
request = {'grantRequest': grant_request}
response = self._sas.Grant(request)['grantResponse']
# Check grant response
self.assertEqual(len(response), len(cbsd_ids))
for response_num in response:
self.assertEqual(response_num['response']['responseCode'], 400)
| 46.058541
| 137
| 0.688225
| 5,173
| 47,993
| 6.097236
| 0.064373
| 0.026854
| 0.046606
| 0.028407
| 0.89436
| 0.870613
| 0.854919
| 0.832599
| 0.817444
| 0.797724
| 0
| 0.04006
| 0.18236
| 47,993
| 1,041
| 138
| 46.102786
| 0.763717
| 0.132603
| 0
| 0.732661
| 0
| 0
| 0.34779
| 0.047112
| 0
| 0
| 0
| 0
| 0.016393
| 1
| 0.015132
| false
| 0
| 0.01261
| 0
| 0.029004
| 0.001261
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
077a2cd2224cff48b01b6d3fafd2516b381dbcc7
| 3,581
|
py
|
Python
|
FreeTranslationExperiment/variables.py
|
tifat58/lsv-c4-django-webexperiment
|
6aa706ad36b9766fbbf4323fdcd6e5d7420f1e16
|
[
"Apache-2.0"
] | 1
|
2022-03-16T11:17:06.000Z
|
2022-03-16T11:17:06.000Z
|
FreeTranslationExperiment/variables.py
|
tifat58/lsv-c4-django-webexperiment
|
6aa706ad36b9766fbbf4323fdcd6e5d7420f1e16
|
[
"Apache-2.0"
] | null | null | null |
FreeTranslationExperiment/variables.py
|
tifat58/lsv-c4-django-webexperiment
|
6aa706ad36b9766fbbf4323fdcd6e5d7420f1e16
|
[
"Apache-2.0"
] | null | null | null |
from django.utils.translation import ugettext_lazy as _
class FreeTranslationPageConstants:
"""
Free translation experiment page constants
"""
FREE_TRANSLATION_TRANSLATE_WORD_TEXT = _("FREE_TRANSLATION_TRANSLATE_WORD_TEXT")
# FREE_TRANSLATION_TRANSLATED_WORD_FIELD_TEXT = _("Type in")
FREE_TRANSLATION_CONTINUE_BUTTON_TEXT = _("FREE_TRANSLATION_CONTINUE_BUTTON_TEXT")
FREE_TRANSLATION_RESUME_AFTER_TEXT = _("FREE_TRANSLATION_RESUME_AFTER_TEXT")
FREE_TRANSLATION_RESUME_NOW_BUTTON_TEXT = _("FREE_TRANSLATION_RESUME_NOW_BUTTON_TEXT")
FREE_TRANSLATION_BREAK_TEXT = _("FREE_TRANSLATION_BREAK_TEXT")
class FreeTranslationWelcomePageConstants:
"""
Free translation experiment welcome page constants
"""
FREE_TRANSLATION_EXPERIMENT_WELCOME_HEADER = _("FREE_TRANSLATION_EXPERIMENT_WELCOME_HEADER")
FREE_TRANSLATION_EXPERIMENT_WELCOME_PAGE_TOTAL_WORDS_TEXT = _(
"FREE_TRANSLATION_EXPERIMENT_WELCOME_PAGE_TOTAL_WORDS_TEXT")
FREE_TRANSLATION_EXPERIMENT_WELCOME_PAGE_TIME_ESTIMATE_TEXT = _(
"FREE_TRANSLATION_EXPERIMENT_WELCOME_PAGE_TIME_ESTIMATE_TEXT")
FREE_TRANSLATION_EXPERIMENT_WELCOME_PAGE_HOW_FAST_ARE_YOU_TEXT = _(
"FREE_TRANSLATION_EXPERIMENT_WELCOME_PAGE_HOW_FAST_ARE_YOU_TEXT")
FREE_TRANSLATION_EXPERIMENT_WELCOME_PAGE_SET_KEYBOARD_LANGUAGE_TEXT = _(
"FREE_TRANSLATION_EXPERIMENT_WELCOME_PAGE_SET_KEYBOARD_LANGUAGE_TEXT")
FREE_TRANSLATION_EXPERIMENT_WELCOME_PAGE_CLOSE_BROWSER_WINDOWS_TEXT = _(
"FREE_TRANSLATION_EXPERIMENT_WELCOME_PAGE_CLOSE_BROWSER_WINDOWS_TEXT")
FREE_TRANSLATION_EXPERIMENT_WELCOME_PAGE_START_WHEN_READY_TEXT = _(
"FREE_TRANSLATION_EXPERIMENT_WELCOME_PAGE_START_WHEN_READY_TEXT")
FREE_TRANSLATION_EXPERIMENT_WELCOME_PAGE_CLICK_HERE_TO_SELECT_LANGUAGE_TEXT = _(
"FREE_TRANSLATION_EXPERIMENT_WELCOME_PAGE_CLICK_HERE_TO_SELECT_LANGUAGE_TEXT")
FREE_TRANSLATION_EXPERIMENT_WELCOME_PAGE_CONTINUE_BUTTON_TEXT = _(
"FREE_TRANSLATION_EXPERIMENT_WELCOME_PAGE_CONTINUE_BUTTON_TEXT")
# Popup
FREE_TRANSLATION_EXPERIMENT_WELCOME_PAGE_SELECT_TRANSLATION_LANGUAGE_POPUP_HEADER = _(
"FREE_TRANSLATION_EXPERIMENT_WELCOME_PAGE_SELECT_TRANSLATION_LANGUAGE_POPUP_HEADER")
FREE_TRANSLATION_EXPERIMENT_WELCOME_PAGE_POPUP_SELECT_TRANSLATION_LANGUAGE_TEXT = _(
"FREE_TRANSLATION_EXPERIMENT_WELCOME_PAGE_POPUP_SELECT_TRANSLATION_LANGUAGE_TEXT")
FREE_TRANSLATION_EXPERIMENT_WELCOME_PAGE_CHOOSE_MOST_CONFIDENT_IN_LANGUAGE_TEXT = _(
"FREE_TRANSLATION_EXPERIMENT_WELCOME_PAGE_CHOOSE_MOST_CONFIDENT_IN_LANGUAGE_TEXT")
FREE_TRANSLATION_EXPERIMENT_WELCOME_PAGE_POPUP_CONTINUE_BUTTON_TEXT = _(
"FREE_TRANSLATION_EXPERIMENT_WELCOME_PAGE_POPUP_CONTINUE_BUTTON_TEXT")
class FreeTranslationCompletionPageConstants:
"""
Free translation completion page constants
"""
EXPERIMENT_FINAL_THANK_YOU_TEXT = _("EXPERIMENT_FINAL_THANK_YOU_TEXT")
EXPERIMENT_FINAL_APPRECIATION_MESSAGE = _("EXPERIMENT_FINAL_APPRECIATION_MESSAGE")
EXPERIMENT_FINAL_YOUR_RESULTS_HEADER = _("EXPERIMENT_FINAL_YOUR_RESULTS_HEADER")
EXPERIMENT_FINAL_STATISTICS_HEADER = _("EXPERIMENT_FINAL_STATISTICS_HEADER")
FREE_TRANSLATION_FINAL_TOTAL_WORDS_TEXT = _("FREE_TRANSLATION_FINAL_TOTAL_WORDS_TEXT")
FREE_TRANSLATION_FINAL_CORRECT_TRANSLATIONS_TEXT = _("FREE_TRANSLATION_FINAL_CORRECT_TRANSLATIONS_TEXT")
FREE_TRANSLATION_FINAL_TOTAL_TIME_TEXT = _("FREE_TRANSLATION_FINAL_TOTAL_TIME_TEXT")
FREE_TRANSLATION_FINAL_TIME_PER_WORD_TEXT = _("FREE_TRANSLATION_FINAL_TIME_PER_WORD_TEXT")
| 55.953125
| 108
| 0.847808
| 409
| 3,581
| 6.562347
| 0.163814
| 0.268256
| 0.254844
| 0.321908
| 0.895306
| 0.852086
| 0.83234
| 0.724665
| 0.706036
| 0.57228
| 0
| 0
| 0.107512
| 3,581
| 63
| 109
| 56.84127
| 0.8398
| 0.056409
| 0
| 0
| 0
| 0
| 0.400901
| 0.400901
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.02381
| 0
| 0.714286
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 10
|
07c06cd83af17db12d664af23d9c3de008c6adbe
| 121,922
|
py
|
Python
|
Tensorflow_OpenCV_Nightly/source/tensorflow/python/training/gen_training_ops.py
|
Con-Mi/lambda-packs
|
b23a8464abdd88050b83310e1d0e99c54dac28ab
|
[
"MIT"
] | 1
|
2017-12-05T15:35:47.000Z
|
2017-12-05T15:35:47.000Z
|
Tensorflow_OpenCV_Nightly/source/tensorflow/python/training/gen_training_ops.py
|
Con-Mi/lambda-packs
|
b23a8464abdd88050b83310e1d0e99c54dac28ab
|
[
"MIT"
] | 10
|
2017-07-13T00:24:03.000Z
|
2017-07-17T07:39:03.000Z
|
Tensorflow_OpenCV_Nightly/source/tensorflow/python/training/gen_training_ops.py
|
Con-Mi/lambda-packs
|
b23a8464abdd88050b83310e1d0e99c54dac28ab
|
[
"MIT"
] | 7
|
2017-08-01T04:02:07.000Z
|
2018-10-06T21:07:20.000Z
|
"""Python wrappers around TensorFlow ops.
This file is MACHINE GENERATED! Do not edit.
"""
import collections as _collections
from google.protobuf import text_format as _text_format
from tensorflow.core.framework import op_def_pb2 as _op_def_pb2
# Needed to trigger the call to _set_call_cpp_shape_fn.
from tensorflow.python.framework import common_shapes as _common_shapes
from tensorflow.python.framework import op_def_registry as _op_def_registry
from tensorflow.python.framework import ops as _ops
from tensorflow.python.framework import op_def_library as _op_def_library
def apply_adadelta(var, accum, accum_update, lr, rho, epsilon, grad,
use_locking=None, name=None):
r"""Update '*var' according to the adadelta scheme.
accum = rho() * accum + (1 - rho()) * grad.square();
update = (update_accum + epsilon).sqrt() * (accum + epsilon()).rsqrt() * grad;
update_accum = rho() * update_accum + (1 - rho()) * update.square();
var -= update;
Args:
var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`.
Should be from a Variable().
accum: A mutable `Tensor`. Must have the same type as `var`.
Should be from a Variable().
accum_update: A mutable `Tensor`. Must have the same type as `var`.
Should be from a Variable().
lr: A `Tensor`. Must have the same type as `var`.
Scaling factor. Must be a scalar.
rho: A `Tensor`. Must have the same type as `var`.
Decay factor. Must be a scalar.
epsilon: A `Tensor`. Must have the same type as `var`.
Constant factor. Must be a scalar.
grad: A `Tensor`. Must have the same type as `var`. The gradient.
use_locking: An optional `bool`. Defaults to `False`.
If True, updating of the var, accum and update_accum tensors will be protected by
a lock; otherwise the behavior is undefined, but may exhibit less contention.
name: A name for the operation (optional).
Returns:
A mutable `Tensor`. Has the same type as `var`. Same as "var".
"""
result = _op_def_lib.apply_op("ApplyAdadelta", var=var, accum=accum,
accum_update=accum_update, lr=lr, rho=rho,
epsilon=epsilon, grad=grad,
use_locking=use_locking, name=name)
return result
def apply_adagrad(var, accum, lr, grad, use_locking=None, name=None):
r"""Update '*var' according to the adagrad scheme.
accum += grad * grad
var -= lr * grad * (1 / sqrt(accum))
Args:
var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`.
Should be from a Variable().
accum: A mutable `Tensor`. Must have the same type as `var`.
Should be from a Variable().
lr: A `Tensor`. Must have the same type as `var`.
Scaling factor. Must be a scalar.
grad: A `Tensor`. Must have the same type as `var`. The gradient.
use_locking: An optional `bool`. Defaults to `False`.
If `True`, updating of the var and accum tensors will be protected
by a lock; otherwise the behavior is undefined, but may exhibit less
contention.
name: A name for the operation (optional).
Returns:
A mutable `Tensor`. Has the same type as `var`. Same as "var".
"""
result = _op_def_lib.apply_op("ApplyAdagrad", var=var, accum=accum, lr=lr,
grad=grad, use_locking=use_locking, name=name)
return result
def apply_adagrad_da(var, gradient_accumulator, gradient_squared_accumulator,
grad, lr, l1, l2, global_step, use_locking=None,
name=None):
r"""Update '*var' according to the proximal adagrad scheme.
Args:
var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`.
Should be from a Variable().
gradient_accumulator: A mutable `Tensor`. Must have the same type as `var`.
Should be from a Variable().
gradient_squared_accumulator: A mutable `Tensor`. Must have the same type as `var`.
Should be from a Variable().
grad: A `Tensor`. Must have the same type as `var`. The gradient.
lr: A `Tensor`. Must have the same type as `var`.
Scaling factor. Must be a scalar.
l1: A `Tensor`. Must have the same type as `var`.
L1 regularization. Must be a scalar.
l2: A `Tensor`. Must have the same type as `var`.
L2 regularization. Must be a scalar.
global_step: A `Tensor` of type `int64`.
Training step number. Must be a scalar.
use_locking: An optional `bool`. Defaults to `False`.
If True, updating of the var and accum tensors will be protected by
a lock; otherwise the behavior is undefined, but may exhibit less contention.
name: A name for the operation (optional).
Returns:
A mutable `Tensor`. Has the same type as `var`. Same as "var".
"""
result = _op_def_lib.apply_op("ApplyAdagradDA", var=var,
gradient_accumulator=gradient_accumulator,
gradient_squared_accumulator=gradient_squared_accumulator,
grad=grad, lr=lr, l1=l1, l2=l2,
global_step=global_step,
use_locking=use_locking, name=name)
return result
def apply_adam(var, m, v, beta1_power, beta2_power, lr, beta1, beta2, epsilon,
grad, use_locking=None, use_nesterov=None, name=None):
r"""Update '*var' according to the Adam algorithm.
lr_t <- learning_rate * sqrt(1 - beta2^t) / (1 - beta1^t)
m_t <- beta1 * m_{t-1} + (1 - beta1) * g_t
v_t <- beta2 * v_{t-1} + (1 - beta2) * g_t * g_t
variable <- variable - lr_t * m_t / (sqrt(v_t) + epsilon)
Args:
var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`.
Should be from a Variable().
m: A mutable `Tensor`. Must have the same type as `var`.
Should be from a Variable().
v: A mutable `Tensor`. Must have the same type as `var`.
Should be from a Variable().
beta1_power: A `Tensor`. Must have the same type as `var`.
Must be a scalar.
beta2_power: A `Tensor`. Must have the same type as `var`.
Must be a scalar.
lr: A `Tensor`. Must have the same type as `var`.
Scaling factor. Must be a scalar.
beta1: A `Tensor`. Must have the same type as `var`.
Momentum factor. Must be a scalar.
beta2: A `Tensor`. Must have the same type as `var`.
Momentum factor. Must be a scalar.
epsilon: A `Tensor`. Must have the same type as `var`.
Ridge term. Must be a scalar.
grad: A `Tensor`. Must have the same type as `var`. The gradient.
use_locking: An optional `bool`. Defaults to `False`.
If `True`, updating of the var, m, and v tensors will be protected
by a lock; otherwise the behavior is undefined, but may exhibit less
contention.
use_nesterov: An optional `bool`. Defaults to `False`.
If `True`, uses the nesterov update.
name: A name for the operation (optional).
Returns:
A mutable `Tensor`. Has the same type as `var`. Same as "var".
"""
result = _op_def_lib.apply_op("ApplyAdam", var=var, m=m, v=v,
beta1_power=beta1_power,
beta2_power=beta2_power, lr=lr, beta1=beta1,
beta2=beta2, epsilon=epsilon, grad=grad,
use_locking=use_locking,
use_nesterov=use_nesterov, name=name)
return result
def apply_centered_rms_prop(var, mg, ms, mom, lr, rho, momentum, epsilon,
grad, use_locking=None, name=None):
r"""Update '*var' according to the centered RMSProp algorithm.
The centered RMSProp algorithm uses an estimate of the centered second moment
(i.e., the variance) for normalization, as opposed to regular RMSProp, which
uses the (uncentered) second moment. This often helps with training, but is
slightly more expensive in terms of computation and memory.
Note that in dense implementation of this algorithm, mg, ms, and mom will
update even if the grad is zero, but in this sparse implementation, mg, ms,
and mom will not update in iterations during which the grad is zero.
mean_square = decay * mean_square + (1-decay) * gradient ** 2
mean_grad = decay * mean_grad + (1-decay) * gradient
Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2)
mg <- rho * mg_{t-1} + (1-rho) * grad
ms <- rho * ms_{t-1} + (1-rho) * grad * grad
mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms - mg * mg + epsilon)
var <- var - mom
Args:
var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`.
Should be from a Variable().
mg: A mutable `Tensor`. Must have the same type as `var`.
Should be from a Variable().
ms: A mutable `Tensor`. Must have the same type as `var`.
Should be from a Variable().
mom: A mutable `Tensor`. Must have the same type as `var`.
Should be from a Variable().
lr: A `Tensor`. Must have the same type as `var`.
Scaling factor. Must be a scalar.
rho: A `Tensor`. Must have the same type as `var`.
Decay rate. Must be a scalar.
momentum: A `Tensor`. Must have the same type as `var`.
epsilon: A `Tensor`. Must have the same type as `var`.
Ridge term. Must be a scalar.
grad: A `Tensor`. Must have the same type as `var`. The gradient.
use_locking: An optional `bool`. Defaults to `False`.
If `True`, updating of the var, mg, ms, and mom tensors is
protected by a lock; otherwise the behavior is undefined, but may exhibit less
contention.
name: A name for the operation (optional).
Returns:
A mutable `Tensor`. Has the same type as `var`. Same as "var".
"""
result = _op_def_lib.apply_op("ApplyCenteredRMSProp", var=var, mg=mg, ms=ms,
mom=mom, lr=lr, rho=rho, momentum=momentum,
epsilon=epsilon, grad=grad,
use_locking=use_locking, name=name)
return result
def apply_ftrl(var, accum, linear, grad, lr, l1, l2, lr_power,
use_locking=None, name=None):
r"""Update '*var' according to the Ftrl-proximal scheme.
accum_new = accum + grad * grad
linear += grad + (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var
quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2
var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0
accum = accum_new
Args:
var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`.
Should be from a Variable().
accum: A mutable `Tensor`. Must have the same type as `var`.
Should be from a Variable().
linear: A mutable `Tensor`. Must have the same type as `var`.
Should be from a Variable().
grad: A `Tensor`. Must have the same type as `var`. The gradient.
lr: A `Tensor`. Must have the same type as `var`.
Scaling factor. Must be a scalar.
l1: A `Tensor`. Must have the same type as `var`.
L1 regulariation. Must be a scalar.
l2: A `Tensor`. Must have the same type as `var`.
L2 regulariation. Must be a scalar.
lr_power: A `Tensor`. Must have the same type as `var`.
Scaling factor. Must be a scalar.
use_locking: An optional `bool`. Defaults to `False`.
If `True`, updating of the var and accum tensors will be protected
by a lock; otherwise the behavior is undefined, but may exhibit less
contention.
name: A name for the operation (optional).
Returns:
A mutable `Tensor`. Has the same type as `var`. Same as "var".
"""
result = _op_def_lib.apply_op("ApplyFtrl", var=var, accum=accum,
linear=linear, grad=grad, lr=lr, l1=l1, l2=l2,
lr_power=lr_power, use_locking=use_locking,
name=name)
return result
def apply_gradient_descent(var, alpha, delta, use_locking=None, name=None):
r"""Update '*var' by subtracting 'alpha' * 'delta' from it.
Args:
var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`.
Should be from a Variable().
alpha: A `Tensor`. Must have the same type as `var`.
Scaling factor. Must be a scalar.
delta: A `Tensor`. Must have the same type as `var`. The change.
use_locking: An optional `bool`. Defaults to `False`.
If `True`, the subtraction will be protected by a lock;
otherwise the behavior is undefined, but may exhibit less contention.
name: A name for the operation (optional).
Returns:
A mutable `Tensor`. Has the same type as `var`. Same as "var".
"""
result = _op_def_lib.apply_op("ApplyGradientDescent", var=var, alpha=alpha,
delta=delta, use_locking=use_locking,
name=name)
return result
def apply_momentum(var, accum, lr, grad, momentum, use_locking=None,
use_nesterov=None, name=None):
r"""Update '*var' according to the momentum scheme. Set use_nesterov = True if you
want to use Nesterov momentum.
accum = accum * momentum + grad
var -= lr * accum
Args:
var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`.
Should be from a Variable().
accum: A mutable `Tensor`. Must have the same type as `var`.
Should be from a Variable().
lr: A `Tensor`. Must have the same type as `var`.
Scaling factor. Must be a scalar.
grad: A `Tensor`. Must have the same type as `var`. The gradient.
momentum: A `Tensor`. Must have the same type as `var`.
Momentum. Must be a scalar.
use_locking: An optional `bool`. Defaults to `False`.
If `True`, updating of the var and accum tensors will be protected
by a lock; otherwise the behavior is undefined, but may exhibit less
contention.
use_nesterov: An optional `bool`. Defaults to `False`.
If `True`, the tensor passed to compute grad will be
var - lr * momentum * accum, so in the end, the var you get is actually
var - lr * momentum * accum.
name: A name for the operation (optional).
Returns:
A mutable `Tensor`. Has the same type as `var`. Same as "var".
"""
result = _op_def_lib.apply_op("ApplyMomentum", var=var, accum=accum, lr=lr,
grad=grad, momentum=momentum,
use_locking=use_locking,
use_nesterov=use_nesterov, name=name)
return result
def apply_proximal_adagrad(var, accum, lr, l1, l2, grad, use_locking=None,
name=None):
r"""Update '*var' and '*accum' according to FOBOS with Adagrad learning rate.
accum += grad * grad
prox_v = var - lr * grad * (1 / sqrt(accum))
var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0}
Args:
var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`.
Should be from a Variable().
accum: A mutable `Tensor`. Must have the same type as `var`.
Should be from a Variable().
lr: A `Tensor`. Must have the same type as `var`.
Scaling factor. Must be a scalar.
l1: A `Tensor`. Must have the same type as `var`.
L1 regularization. Must be a scalar.
l2: A `Tensor`. Must have the same type as `var`.
L2 regularization. Must be a scalar.
grad: A `Tensor`. Must have the same type as `var`. The gradient.
use_locking: An optional `bool`. Defaults to `False`.
If True, updating of the var and accum tensors will be protected by
a lock; otherwise the behavior is undefined, but may exhibit less contention.
name: A name for the operation (optional).
Returns:
A mutable `Tensor`. Has the same type as `var`. Same as "var".
"""
result = _op_def_lib.apply_op("ApplyProximalAdagrad", var=var, accum=accum,
lr=lr, l1=l1, l2=l2, grad=grad,
use_locking=use_locking, name=name)
return result
def apply_proximal_gradient_descent(var, alpha, l1, l2, delta,
use_locking=None, name=None):
r"""Update '*var' as FOBOS algorithm with fixed learning rate.
prox_v = var - alpha * delta
var = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0}
Args:
var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`.
Should be from a Variable().
alpha: A `Tensor`. Must have the same type as `var`.
Scaling factor. Must be a scalar.
l1: A `Tensor`. Must have the same type as `var`.
L1 regularization. Must be a scalar.
l2: A `Tensor`. Must have the same type as `var`.
L2 regularization. Must be a scalar.
delta: A `Tensor`. Must have the same type as `var`. The change.
use_locking: An optional `bool`. Defaults to `False`.
If True, the subtraction will be protected by a lock;
otherwise the behavior is undefined, but may exhibit less contention.
name: A name for the operation (optional).
Returns:
A mutable `Tensor`. Has the same type as `var`. Same as "var".
"""
result = _op_def_lib.apply_op("ApplyProximalGradientDescent", var=var,
alpha=alpha, l1=l1, l2=l2, delta=delta,
use_locking=use_locking, name=name)
return result
def apply_rms_prop(var, ms, mom, lr, rho, momentum, epsilon, grad,
use_locking=None, name=None):
r"""Update '*var' according to the RMSProp algorithm.
Note that in dense implementation of this algorithm, ms and mom will
update even if the grad is zero, but in this sparse implementation, ms
and mom will not update in iterations during which the grad is zero.
mean_square = decay * mean_square + (1-decay) * gradient ** 2
Delta = learning_rate * gradient / sqrt(mean_square + epsilon)
ms <- rho * ms_{t-1} + (1-rho) * grad * grad
mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)
var <- var - mom
Args:
var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`.
Should be from a Variable().
ms: A mutable `Tensor`. Must have the same type as `var`.
Should be from a Variable().
mom: A mutable `Tensor`. Must have the same type as `var`.
Should be from a Variable().
lr: A `Tensor`. Must have the same type as `var`.
Scaling factor. Must be a scalar.
rho: A `Tensor`. Must have the same type as `var`.
Decay rate. Must be a scalar.
momentum: A `Tensor`. Must have the same type as `var`.
epsilon: A `Tensor`. Must have the same type as `var`.
Ridge term. Must be a scalar.
grad: A `Tensor`. Must have the same type as `var`. The gradient.
use_locking: An optional `bool`. Defaults to `False`.
If `True`, updating of the var, ms, and mom tensors is protected
by a lock; otherwise the behavior is undefined, but may exhibit less
contention.
name: A name for the operation (optional).
Returns:
A mutable `Tensor`. Has the same type as `var`. Same as "var".
"""
result = _op_def_lib.apply_op("ApplyRMSProp", var=var, ms=ms, mom=mom,
lr=lr, rho=rho, momentum=momentum,
epsilon=epsilon, grad=grad,
use_locking=use_locking, name=name)
return result
def resource_apply_adadelta(var, accum, accum_update, lr, rho, epsilon, grad,
use_locking=None, name=None):
r"""Update '*var' according to the adadelta scheme.
accum = rho() * accum + (1 - rho()) * grad.square();
update = (update_accum + epsilon).sqrt() * (accum + epsilon()).rsqrt() * grad;
update_accum = rho() * update_accum + (1 - rho()) * update.square();
var -= update;
Args:
var: A `Tensor` of type `resource`. Should be from a Variable().
accum: A `Tensor` of type `resource`. Should be from a Variable().
accum_update: A `Tensor` of type `resource`. Should be from a Variable().
lr: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`.
Scaling factor. Must be a scalar.
rho: A `Tensor`. Must have the same type as `lr`.
Decay factor. Must be a scalar.
epsilon: A `Tensor`. Must have the same type as `lr`.
Constant factor. Must be a scalar.
grad: A `Tensor`. Must have the same type as `lr`. The gradient.
use_locking: An optional `bool`. Defaults to `False`.
If True, updating of the var, accum and update_accum tensors will be protected by
a lock; otherwise the behavior is undefined, but may exhibit less contention.
name: A name for the operation (optional).
Returns:
The created Operation.
"""
result = _op_def_lib.apply_op("ResourceApplyAdadelta", var=var, accum=accum,
accum_update=accum_update, lr=lr, rho=rho,
epsilon=epsilon, grad=grad,
use_locking=use_locking, name=name)
return result
def resource_apply_adagrad(var, accum, lr, grad, use_locking=None, name=None):
r"""Update '*var' according to the adagrad scheme.
accum += grad * grad
var -= lr * grad * (1 / sqrt(accum))
Args:
var: A `Tensor` of type `resource`. Should be from a Variable().
accum: A `Tensor` of type `resource`. Should be from a Variable().
lr: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`.
Scaling factor. Must be a scalar.
grad: A `Tensor`. Must have the same type as `lr`. The gradient.
use_locking: An optional `bool`. Defaults to `False`.
If `True`, updating of the var and accum tensors will be protected
by a lock; otherwise the behavior is undefined, but may exhibit less
contention.
name: A name for the operation (optional).
Returns:
The created Operation.
"""
result = _op_def_lib.apply_op("ResourceApplyAdagrad", var=var, accum=accum,
lr=lr, grad=grad, use_locking=use_locking,
name=name)
return result
def resource_apply_adagrad_da(var, gradient_accumulator,
gradient_squared_accumulator, grad, lr, l1, l2,
global_step, use_locking=None, name=None):
r"""Update '*var' according to the proximal adagrad scheme.
Args:
var: A `Tensor` of type `resource`. Should be from a Variable().
gradient_accumulator: A `Tensor` of type `resource`.
Should be from a Variable().
gradient_squared_accumulator: A `Tensor` of type `resource`.
Should be from a Variable().
grad: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`.
The gradient.
lr: A `Tensor`. Must have the same type as `grad`.
Scaling factor. Must be a scalar.
l1: A `Tensor`. Must have the same type as `grad`.
L1 regularization. Must be a scalar.
l2: A `Tensor`. Must have the same type as `grad`.
L2 regularization. Must be a scalar.
global_step: A `Tensor` of type `int64`.
Training step number. Must be a scalar.
use_locking: An optional `bool`. Defaults to `False`.
If True, updating of the var and accum tensors will be protected by
a lock; otherwise the behavior is undefined, but may exhibit less contention.
name: A name for the operation (optional).
Returns:
The created Operation.
"""
result = _op_def_lib.apply_op("ResourceApplyAdagradDA", var=var,
gradient_accumulator=gradient_accumulator,
gradient_squared_accumulator=gradient_squared_accumulator,
grad=grad, lr=lr, l1=l1, l2=l2,
global_step=global_step,
use_locking=use_locking, name=name)
return result
def resource_apply_adam(var, m, v, beta1_power, beta2_power, lr, beta1, beta2,
epsilon, grad, use_locking=None, use_nesterov=None,
name=None):
r"""Update '*var' according to the Adam algorithm.
lr_t <- learning_rate * sqrt(1 - beta2^t) / (1 - beta1^t)
m_t <- beta1 * m_{t-1} + (1 - beta1) * g_t
v_t <- beta2 * v_{t-1} + (1 - beta2) * g_t * g_t
variable <- variable - lr_t * m_t / (sqrt(v_t) + epsilon)
Args:
var: A `Tensor` of type `resource`. Should be from a Variable().
m: A `Tensor` of type `resource`. Should be from a Variable().
v: A `Tensor` of type `resource`. Should be from a Variable().
beta1_power: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`.
Must be a scalar.
beta2_power: A `Tensor`. Must have the same type as `beta1_power`.
Must be a scalar.
lr: A `Tensor`. Must have the same type as `beta1_power`.
Scaling factor. Must be a scalar.
beta1: A `Tensor`. Must have the same type as `beta1_power`.
Momentum factor. Must be a scalar.
beta2: A `Tensor`. Must have the same type as `beta1_power`.
Momentum factor. Must be a scalar.
epsilon: A `Tensor`. Must have the same type as `beta1_power`.
Ridge term. Must be a scalar.
grad: A `Tensor`. Must have the same type as `beta1_power`. The gradient.
use_locking: An optional `bool`. Defaults to `False`.
If `True`, updating of the var, m, and v tensors will be protected
by a lock; otherwise the behavior is undefined, but may exhibit less
contention.
use_nesterov: An optional `bool`. Defaults to `False`.
If `True`, uses the nesterov update.
name: A name for the operation (optional).
Returns:
The created Operation.
"""
result = _op_def_lib.apply_op("ResourceApplyAdam", var=var, m=m, v=v,
beta1_power=beta1_power,
beta2_power=beta2_power, lr=lr, beta1=beta1,
beta2=beta2, epsilon=epsilon, grad=grad,
use_locking=use_locking,
use_nesterov=use_nesterov, name=name)
return result
def resource_apply_centered_rms_prop(var, mg, ms, mom, lr, rho, momentum,
epsilon, grad, use_locking=None,
name=None):
r"""Update '*var' according to the centered RMSProp algorithm.
The centered RMSProp algorithm uses an estimate of the centered second moment
(i.e., the variance) for normalization, as opposed to regular RMSProp, which
uses the (uncentered) second moment. This often helps with training, but is
slightly more expensive in terms of computation and memory.
Note that in dense implementation of this algorithm, mg, ms, and mom will
update even if the grad is zero, but in this sparse implementation, mg, ms,
and mom will not update in iterations during which the grad is zero.
mean_square = decay * mean_square + (1-decay) * gradient ** 2
mean_grad = decay * mean_grad + (1-decay) * gradient
Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2)
mg <- rho * mg_{t-1} + (1-rho) * grad
ms <- rho * ms_{t-1} + (1-rho) * grad * grad
mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms - mg * mg + epsilon)
var <- var - mom
Args:
var: A `Tensor` of type `resource`. Should be from a Variable().
mg: A `Tensor` of type `resource`. Should be from a Variable().
ms: A `Tensor` of type `resource`. Should be from a Variable().
mom: A `Tensor` of type `resource`. Should be from a Variable().
lr: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`.
Scaling factor. Must be a scalar.
rho: A `Tensor`. Must have the same type as `lr`.
Decay rate. Must be a scalar.
momentum: A `Tensor`. Must have the same type as `lr`.
epsilon: A `Tensor`. Must have the same type as `lr`.
Ridge term. Must be a scalar.
grad: A `Tensor`. Must have the same type as `lr`. The gradient.
use_locking: An optional `bool`. Defaults to `False`.
If `True`, updating of the var, mg, ms, and mom tensors is
protected by a lock; otherwise the behavior is undefined, but may exhibit less
contention.
name: A name for the operation (optional).
Returns:
The created Operation.
"""
result = _op_def_lib.apply_op("ResourceApplyCenteredRMSProp", var=var,
mg=mg, ms=ms, mom=mom, lr=lr, rho=rho,
momentum=momentum, epsilon=epsilon, grad=grad,
use_locking=use_locking, name=name)
return result
def resource_apply_ftrl(var, accum, linear, grad, lr, l1, l2, lr_power,
use_locking=None, name=None):
r"""Update '*var' according to the Ftrl-proximal scheme.
accum_new = accum + grad * grad
linear += grad + (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var
quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2
var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0
accum = accum_new
Args:
var: A `Tensor` of type `resource`. Should be from a Variable().
accum: A `Tensor` of type `resource`. Should be from a Variable().
linear: A `Tensor` of type `resource`. Should be from a Variable().
grad: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`.
The gradient.
lr: A `Tensor`. Must have the same type as `grad`.
Scaling factor. Must be a scalar.
l1: A `Tensor`. Must have the same type as `grad`.
L1 regulariation. Must be a scalar.
l2: A `Tensor`. Must have the same type as `grad`.
L2 regulariation. Must be a scalar.
lr_power: A `Tensor`. Must have the same type as `grad`.
Scaling factor. Must be a scalar.
use_locking: An optional `bool`. Defaults to `False`.
If `True`, updating of the var and accum tensors will be protected
by a lock; otherwise the behavior is undefined, but may exhibit less
contention.
name: A name for the operation (optional).
Returns:
The created Operation.
"""
result = _op_def_lib.apply_op("ResourceApplyFtrl", var=var, accum=accum,
linear=linear, grad=grad, lr=lr, l1=l1, l2=l2,
lr_power=lr_power, use_locking=use_locking,
name=name)
return result
def resource_apply_gradient_descent(var, alpha, delta, use_locking=None,
name=None):
r"""Update '*var' by subtracting 'alpha' * 'delta' from it.
Args:
var: A `Tensor` of type `resource`. Should be from a Variable().
alpha: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`.
Scaling factor. Must be a scalar.
delta: A `Tensor`. Must have the same type as `alpha`. The change.
use_locking: An optional `bool`. Defaults to `False`.
If `True`, the subtraction will be protected by a lock;
otherwise the behavior is undefined, but may exhibit less contention.
name: A name for the operation (optional).
Returns:
The created Operation.
"""
result = _op_def_lib.apply_op("ResourceApplyGradientDescent", var=var,
alpha=alpha, delta=delta,
use_locking=use_locking, name=name)
return result
def resource_apply_momentum(var, accum, lr, grad, momentum, use_locking=None,
use_nesterov=None, name=None):
r"""Update '*var' according to the momentum scheme. Set use_nesterov = True if you
want to use Nesterov momentum.
accum = accum * momentum + grad
var -= lr * accum
Args:
var: A `Tensor` of type `resource`. Should be from a Variable().
accum: A `Tensor` of type `resource`. Should be from a Variable().
lr: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`.
Scaling factor. Must be a scalar.
grad: A `Tensor`. Must have the same type as `lr`. The gradient.
momentum: A `Tensor`. Must have the same type as `lr`.
Momentum. Must be a scalar.
use_locking: An optional `bool`. Defaults to `False`.
If `True`, updating of the var and accum tensors will be protected
by a lock; otherwise the behavior is undefined, but may exhibit less
contention.
use_nesterov: An optional `bool`. Defaults to `False`.
If `True`, the tensor passed to compute grad will be
var - lr * momentum * accum, so in the end, the var you get is actually
var - lr * momentum * accum.
name: A name for the operation (optional).
Returns:
The created Operation.
"""
result = _op_def_lib.apply_op("ResourceApplyMomentum", var=var, accum=accum,
lr=lr, grad=grad, momentum=momentum,
use_locking=use_locking,
use_nesterov=use_nesterov, name=name)
return result
def resource_apply_proximal_adagrad(var, accum, lr, l1, l2, grad,
use_locking=None, name=None):
r"""Update '*var' and '*accum' according to FOBOS with Adagrad learning rate.
accum += grad * grad
prox_v = var - lr * grad * (1 / sqrt(accum))
var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0}
Args:
var: A `Tensor` of type `resource`. Should be from a Variable().
accum: A `Tensor` of type `resource`. Should be from a Variable().
lr: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`.
Scaling factor. Must be a scalar.
l1: A `Tensor`. Must have the same type as `lr`.
L1 regularization. Must be a scalar.
l2: A `Tensor`. Must have the same type as `lr`.
L2 regularization. Must be a scalar.
grad: A `Tensor`. Must have the same type as `lr`. The gradient.
use_locking: An optional `bool`. Defaults to `False`.
If True, updating of the var and accum tensors will be protected by
a lock; otherwise the behavior is undefined, but may exhibit less contention.
name: A name for the operation (optional).
Returns:
The created Operation.
"""
result = _op_def_lib.apply_op("ResourceApplyProximalAdagrad", var=var,
accum=accum, lr=lr, l1=l1, l2=l2, grad=grad,
use_locking=use_locking, name=name)
return result
def resource_apply_proximal_gradient_descent(var, alpha, l1, l2, delta,
use_locking=None, name=None):
r"""Update '*var' as FOBOS algorithm with fixed learning rate.
prox_v = var - alpha * delta
var = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0}
Args:
var: A `Tensor` of type `resource`. Should be from a Variable().
alpha: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`.
Scaling factor. Must be a scalar.
l1: A `Tensor`. Must have the same type as `alpha`.
L1 regularization. Must be a scalar.
l2: A `Tensor`. Must have the same type as `alpha`.
L2 regularization. Must be a scalar.
delta: A `Tensor`. Must have the same type as `alpha`. The change.
use_locking: An optional `bool`. Defaults to `False`.
If True, the subtraction will be protected by a lock;
otherwise the behavior is undefined, but may exhibit less contention.
name: A name for the operation (optional).
Returns:
The created Operation.
"""
result = _op_def_lib.apply_op("ResourceApplyProximalGradientDescent",
var=var, alpha=alpha, l1=l1, l2=l2,
delta=delta, use_locking=use_locking,
name=name)
return result
def resource_apply_rms_prop(var, ms, mom, lr, rho, momentum, epsilon, grad,
use_locking=None, name=None):
r"""Update '*var' according to the RMSProp algorithm.
Note that in dense implementation of this algorithm, ms and mom will
update even if the grad is zero, but in this sparse implementation, ms
and mom will not update in iterations during which the grad is zero.
mean_square = decay * mean_square + (1-decay) * gradient ** 2
Delta = learning_rate * gradient / sqrt(mean_square + epsilon)
ms <- rho * ms_{t-1} + (1-rho) * grad * grad
mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)
var <- var - mom
Args:
var: A `Tensor` of type `resource`. Should be from a Variable().
ms: A `Tensor` of type `resource`. Should be from a Variable().
mom: A `Tensor` of type `resource`. Should be from a Variable().
lr: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`.
Scaling factor. Must be a scalar.
rho: A `Tensor`. Must have the same type as `lr`.
Decay rate. Must be a scalar.
momentum: A `Tensor`. Must have the same type as `lr`.
epsilon: A `Tensor`. Must have the same type as `lr`.
Ridge term. Must be a scalar.
grad: A `Tensor`. Must have the same type as `lr`. The gradient.
use_locking: An optional `bool`. Defaults to `False`.
If `True`, updating of the var, ms, and mom tensors is protected
by a lock; otherwise the behavior is undefined, but may exhibit less
contention.
name: A name for the operation (optional).
Returns:
The created Operation.
"""
result = _op_def_lib.apply_op("ResourceApplyRMSProp", var=var, ms=ms,
mom=mom, lr=lr, rho=rho, momentum=momentum,
epsilon=epsilon, grad=grad,
use_locking=use_locking, name=name)
return result
def resource_sparse_apply_adadelta(var, accum, accum_update, lr, rho, epsilon,
grad, indices, use_locking=None,
name=None):
r"""var: Should be from a Variable().
Args:
var: A `Tensor` of type `resource`.
accum: A `Tensor` of type `resource`. Should be from a Variable().
accum_update: A `Tensor` of type `resource`.
: Should be from a Variable().
lr: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`.
Learning rate. Must be a scalar.
rho: A `Tensor`. Must have the same type as `lr`.
Decay factor. Must be a scalar.
epsilon: A `Tensor`. Must have the same type as `lr`.
Constant factor. Must be a scalar.
grad: A `Tensor`. Must have the same type as `lr`. The gradient.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A vector of indices into the first dimension of var and accum.
use_locking: An optional `bool`. Defaults to `False`.
If True, updating of the var and accum tensors will be protected by
a lock; otherwise the behavior is undefined, but may exhibit less contention.
name: A name for the operation (optional).
Returns:
The created Operation.
"""
result = _op_def_lib.apply_op("ResourceSparseApplyAdadelta", var=var,
accum=accum, accum_update=accum_update, lr=lr,
rho=rho, epsilon=epsilon, grad=grad,
indices=indices, use_locking=use_locking,
name=name)
return result
def resource_sparse_apply_adagrad(var, accum, lr, grad, indices,
use_locking=None, name=None):
r"""Update relevant entries in '*var' and '*accum' according to the adagrad scheme.
That is for rows we have grad for, we update var and accum as follows:
accum += grad * grad
var -= lr * grad * (1 / sqrt(accum))
Args:
var: A `Tensor` of type `resource`. Should be from a Variable().
accum: A `Tensor` of type `resource`. Should be from a Variable().
lr: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`.
Learning rate. Must be a scalar.
grad: A `Tensor`. Must have the same type as `lr`. The gradient.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A vector of indices into the first dimension of var and accum.
use_locking: An optional `bool`. Defaults to `False`.
If `True`, updating of the var and accum tensors will be protected
by a lock; otherwise the behavior is undefined, but may exhibit less
contention.
name: A name for the operation (optional).
Returns:
The created Operation.
"""
result = _op_def_lib.apply_op("ResourceSparseApplyAdagrad", var=var,
accum=accum, lr=lr, grad=grad,
indices=indices, use_locking=use_locking,
name=name)
return result
def resource_sparse_apply_adagrad_da(var, gradient_accumulator,
gradient_squared_accumulator, grad,
indices, lr, l1, l2, global_step,
use_locking=None, name=None):
r"""Update entries in '*var' and '*accum' according to the proximal adagrad scheme.
Args:
var: A `Tensor` of type `resource`. Should be from a Variable().
gradient_accumulator: A `Tensor` of type `resource`.
Should be from a Variable().
gradient_squared_accumulator: A `Tensor` of type `resource`.
Should be from a Variable().
grad: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`.
The gradient.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A vector of indices into the first dimension of var and accum.
lr: A `Tensor`. Must have the same type as `grad`.
Learning rate. Must be a scalar.
l1: A `Tensor`. Must have the same type as `grad`.
L1 regularization. Must be a scalar.
l2: A `Tensor`. Must have the same type as `grad`.
L2 regularization. Must be a scalar.
global_step: A `Tensor` of type `int64`.
Training step number. Must be a scalar.
use_locking: An optional `bool`. Defaults to `False`.
If True, updating of the var and accum tensors will be protected by
a lock; otherwise the behavior is undefined, but may exhibit less contention.
name: A name for the operation (optional).
Returns:
The created Operation.
"""
result = _op_def_lib.apply_op("ResourceSparseApplyAdagradDA", var=var,
gradient_accumulator=gradient_accumulator,
gradient_squared_accumulator=gradient_squared_accumulator,
grad=grad, indices=indices, lr=lr, l1=l1,
l2=l2, global_step=global_step,
use_locking=use_locking, name=name)
return result
def resource_sparse_apply_centered_rms_prop(var, mg, ms, mom, lr, rho,
momentum, epsilon, grad, indices,
use_locking=None, name=None):
r"""Update '*var' according to the centered RMSProp algorithm.
The centered RMSProp algorithm uses an estimate of the centered second moment
(i.e., the variance) for normalization, as opposed to regular RMSProp, which
uses the (uncentered) second moment. This often helps with training, but is
slightly more expensive in terms of computation and memory.
Note that in dense implementation of this algorithm, mg, ms, and mom will
update even if the grad is zero, but in this sparse implementation, mg, ms,
and mom will not update in iterations during which the grad is zero.
mean_square = decay * mean_square + (1-decay) * gradient ** 2
mean_grad = decay * mean_grad + (1-decay) * gradient
Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2)
ms <- rho * ms_{t-1} + (1-rho) * grad * grad
mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)
var <- var - mom
Args:
var: A `Tensor` of type `resource`. Should be from a Variable().
mg: A `Tensor` of type `resource`. Should be from a Variable().
ms: A `Tensor` of type `resource`. Should be from a Variable().
mom: A `Tensor` of type `resource`. Should be from a Variable().
lr: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`.
Scaling factor. Must be a scalar.
rho: A `Tensor`. Must have the same type as `lr`.
Decay rate. Must be a scalar.
momentum: A `Tensor`. Must have the same type as `lr`.
epsilon: A `Tensor`. Must have the same type as `lr`.
Ridge term. Must be a scalar.
grad: A `Tensor`. Must have the same type as `lr`. The gradient.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A vector of indices into the first dimension of var, ms and mom.
use_locking: An optional `bool`. Defaults to `False`.
If `True`, updating of the var, mg, ms, and mom tensors is
protected by a lock; otherwise the behavior is undefined, but may exhibit less
contention.
name: A name for the operation (optional).
Returns:
The created Operation.
"""
result = _op_def_lib.apply_op("ResourceSparseApplyCenteredRMSProp", var=var,
mg=mg, ms=ms, mom=mom, lr=lr, rho=rho,
momentum=momentum, epsilon=epsilon, grad=grad,
indices=indices, use_locking=use_locking,
name=name)
return result
def resource_sparse_apply_ftrl(var, accum, linear, grad, indices, lr, l1, l2,
lr_power, use_locking=None, name=None):
r"""Update relevant entries in '*var' according to the Ftrl-proximal scheme.
That is for rows we have grad for, we update var, accum and linear as follows:
accum_new = accum + grad * grad
linear += grad + (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var
quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2
var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0
accum = accum_new
Args:
var: A `Tensor` of type `resource`. Should be from a Variable().
accum: A `Tensor` of type `resource`. Should be from a Variable().
linear: A `Tensor` of type `resource`. Should be from a Variable().
grad: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`.
The gradient.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A vector of indices into the first dimension of var and accum.
lr: A `Tensor`. Must have the same type as `grad`.
Scaling factor. Must be a scalar.
l1: A `Tensor`. Must have the same type as `grad`.
L1 regularization. Must be a scalar.
l2: A `Tensor`. Must have the same type as `grad`.
L2 regularization. Must be a scalar.
lr_power: A `Tensor`. Must have the same type as `grad`.
Scaling factor. Must be a scalar.
use_locking: An optional `bool`. Defaults to `False`.
If `True`, updating of the var and accum tensors will be protected
by a lock; otherwise the behavior is undefined, but may exhibit less
contention.
name: A name for the operation (optional).
Returns:
The created Operation.
"""
result = _op_def_lib.apply_op("ResourceSparseApplyFtrl", var=var,
accum=accum, linear=linear, grad=grad,
indices=indices, lr=lr, l1=l1, l2=l2,
lr_power=lr_power, use_locking=use_locking,
name=name)
return result
def resource_sparse_apply_momentum(var, accum, lr, grad, indices, momentum,
use_locking=None, use_nesterov=None,
name=None):
r"""Update relevant entries in '*var' and '*accum' according to the momentum scheme.
Set use_nesterov = True if you want to use Nesterov momentum.
That is for rows we have grad for, we update var and accum as follows:
accum = accum * momentum + grad
var -= lr * accum
Args:
var: A `Tensor` of type `resource`. Should be from a Variable().
accum: A `Tensor` of type `resource`. Should be from a Variable().
lr: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`.
Learning rate. Must be a scalar.
grad: A `Tensor`. Must have the same type as `lr`. The gradient.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A vector of indices into the first dimension of var and accum.
momentum: A `Tensor`. Must have the same type as `lr`.
Momentum. Must be a scalar.
use_locking: An optional `bool`. Defaults to `False`.
If `True`, updating of the var and accum tensors will be protected
by a lock; otherwise the behavior is undefined, but may exhibit less
contention.
use_nesterov: An optional `bool`. Defaults to `False`.
If `True`, the tensor passed to compute grad will be
var - lr * momentum * accum, so in the end, the var you get is actually
var - lr * momentum * accum.
name: A name for the operation (optional).
Returns:
The created Operation.
"""
result = _op_def_lib.apply_op("ResourceSparseApplyMomentum", var=var,
accum=accum, lr=lr, grad=grad,
indices=indices, momentum=momentum,
use_locking=use_locking,
use_nesterov=use_nesterov, name=name)
return result
def resource_sparse_apply_proximal_adagrad(var, accum, lr, l1, l2, grad,
indices, use_locking=None,
name=None):
r"""Sparse update entries in '*var' and '*accum' according to FOBOS algorithm.
That is for rows we have grad for, we update var and accum as follows:
accum += grad * grad
prox_v = var
prox_v -= lr * grad * (1 / sqrt(accum))
var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0}
Args:
var: A `Tensor` of type `resource`. Should be from a Variable().
accum: A `Tensor` of type `resource`. Should be from a Variable().
lr: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`.
Learning rate. Must be a scalar.
l1: A `Tensor`. Must have the same type as `lr`.
L1 regularization. Must be a scalar.
l2: A `Tensor`. Must have the same type as `lr`.
L2 regularization. Must be a scalar.
grad: A `Tensor`. Must have the same type as `lr`. The gradient.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A vector of indices into the first dimension of var and accum.
use_locking: An optional `bool`. Defaults to `False`.
If True, updating of the var and accum tensors will be protected by
a lock; otherwise the behavior is undefined, but may exhibit less contention.
name: A name for the operation (optional).
Returns:
The created Operation.
"""
result = _op_def_lib.apply_op("ResourceSparseApplyProximalAdagrad", var=var,
accum=accum, lr=lr, l1=l1, l2=l2, grad=grad,
indices=indices, use_locking=use_locking,
name=name)
return result
def resource_sparse_apply_proximal_gradient_descent(var, alpha, l1, l2, grad,
indices, use_locking=None,
name=None):
r"""Sparse update '*var' as FOBOS algorithm with fixed learning rate.
That is for rows we have grad for, we update var as follows:
prox_v = var - alpha * grad
var = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0}
Args:
var: A `Tensor` of type `resource`. Should be from a Variable().
alpha: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`.
Scaling factor. Must be a scalar.
l1: A `Tensor`. Must have the same type as `alpha`.
L1 regularization. Must be a scalar.
l2: A `Tensor`. Must have the same type as `alpha`.
L2 regularization. Must be a scalar.
grad: A `Tensor`. Must have the same type as `alpha`. The gradient.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A vector of indices into the first dimension of var and accum.
use_locking: An optional `bool`. Defaults to `False`.
If True, the subtraction will be protected by a lock;
otherwise the behavior is undefined, but may exhibit less contention.
name: A name for the operation (optional).
Returns:
The created Operation.
"""
result = _op_def_lib.apply_op("ResourceSparseApplyProximalGradientDescent",
var=var, alpha=alpha, l1=l1, l2=l2, grad=grad,
indices=indices, use_locking=use_locking,
name=name)
return result
def resource_sparse_apply_rms_prop(var, ms, mom, lr, rho, momentum, epsilon,
grad, indices, use_locking=None,
name=None):
r"""Update '*var' according to the RMSProp algorithm.
Note that in dense implementation of this algorithm, ms and mom will
update even if the grad is zero, but in this sparse implementation, ms
and mom will not update in iterations during which the grad is zero.
mean_square = decay * mean_square + (1-decay) * gradient ** 2
Delta = learning_rate * gradient / sqrt(mean_square + epsilon)
ms <- rho * ms_{t-1} + (1-rho) * grad * grad
mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)
var <- var - mom
Args:
var: A `Tensor` of type `resource`. Should be from a Variable().
ms: A `Tensor` of type `resource`. Should be from a Variable().
mom: A `Tensor` of type `resource`. Should be from a Variable().
lr: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`.
Scaling factor. Must be a scalar.
rho: A `Tensor`. Must have the same type as `lr`.
Decay rate. Must be a scalar.
momentum: A `Tensor`. Must have the same type as `lr`.
epsilon: A `Tensor`. Must have the same type as `lr`.
Ridge term. Must be a scalar.
grad: A `Tensor`. Must have the same type as `lr`. The gradient.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A vector of indices into the first dimension of var, ms and mom.
use_locking: An optional `bool`. Defaults to `False`.
If `True`, updating of the var, ms, and mom tensors is protected
by a lock; otherwise the behavior is undefined, but may exhibit less
contention.
name: A name for the operation (optional).
Returns:
The created Operation.
"""
result = _op_def_lib.apply_op("ResourceSparseApplyRMSProp", var=var, ms=ms,
mom=mom, lr=lr, rho=rho, momentum=momentum,
epsilon=epsilon, grad=grad, indices=indices,
use_locking=use_locking, name=name)
return result
def sparse_apply_adadelta(var, accum, accum_update, lr, rho, epsilon, grad,
indices, use_locking=None, name=None):
r"""var: Should be from a Variable().
Args:
var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`.
accum: A mutable `Tensor`. Must have the same type as `var`.
Should be from a Variable().
accum_update: A mutable `Tensor`. Must have the same type as `var`.
: Should be from a Variable().
lr: A `Tensor`. Must have the same type as `var`.
Learning rate. Must be a scalar.
rho: A `Tensor`. Must have the same type as `var`.
Decay factor. Must be a scalar.
epsilon: A `Tensor`. Must have the same type as `var`.
Constant factor. Must be a scalar.
grad: A `Tensor`. Must have the same type as `var`. The gradient.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A vector of indices into the first dimension of var and accum.
use_locking: An optional `bool`. Defaults to `False`.
If True, updating of the var and accum tensors will be protected by
a lock; otherwise the behavior is undefined, but may exhibit less contention.
name: A name for the operation (optional).
Returns:
A mutable `Tensor`. Has the same type as `var`. Same as "var".
"""
result = _op_def_lib.apply_op("SparseApplyAdadelta", var=var, accum=accum,
accum_update=accum_update, lr=lr, rho=rho,
epsilon=epsilon, grad=grad, indices=indices,
use_locking=use_locking, name=name)
return result
def sparse_apply_adagrad(var, accum, lr, grad, indices, use_locking=None,
name=None):
r"""Update relevant entries in '*var' and '*accum' according to the adagrad scheme.
That is for rows we have grad for, we update var and accum as follows:
accum += grad * grad
var -= lr * grad * (1 / sqrt(accum))
Args:
var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`.
Should be from a Variable().
accum: A mutable `Tensor`. Must have the same type as `var`.
Should be from a Variable().
lr: A `Tensor`. Must have the same type as `var`.
Learning rate. Must be a scalar.
grad: A `Tensor`. Must have the same type as `var`. The gradient.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A vector of indices into the first dimension of var and accum.
use_locking: An optional `bool`. Defaults to `False`.
If `True`, updating of the var and accum tensors will be protected
by a lock; otherwise the behavior is undefined, but may exhibit less
contention.
name: A name for the operation (optional).
Returns:
A mutable `Tensor`. Has the same type as `var`. Same as "var".
"""
result = _op_def_lib.apply_op("SparseApplyAdagrad", var=var, accum=accum,
lr=lr, grad=grad, indices=indices,
use_locking=use_locking, name=name)
return result
def sparse_apply_adagrad_da(var, gradient_accumulator,
gradient_squared_accumulator, grad, indices, lr,
l1, l2, global_step, use_locking=None, name=None):
r"""Update entries in '*var' and '*accum' according to the proximal adagrad scheme.
Args:
var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`.
Should be from a Variable().
gradient_accumulator: A mutable `Tensor`. Must have the same type as `var`.
Should be from a Variable().
gradient_squared_accumulator: A mutable `Tensor`. Must have the same type as `var`.
Should be from a Variable().
grad: A `Tensor`. Must have the same type as `var`. The gradient.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A vector of indices into the first dimension of var and accum.
lr: A `Tensor`. Must have the same type as `var`.
Learning rate. Must be a scalar.
l1: A `Tensor`. Must have the same type as `var`.
L1 regularization. Must be a scalar.
l2: A `Tensor`. Must have the same type as `var`.
L2 regularization. Must be a scalar.
global_step: A `Tensor` of type `int64`.
Training step number. Must be a scalar.
use_locking: An optional `bool`. Defaults to `False`.
If True, updating of the var and accum tensors will be protected by
a lock; otherwise the behavior is undefined, but may exhibit less contention.
name: A name for the operation (optional).
Returns:
A mutable `Tensor`. Has the same type as `var`. Same as "var".
"""
result = _op_def_lib.apply_op("SparseApplyAdagradDA", var=var,
gradient_accumulator=gradient_accumulator,
gradient_squared_accumulator=gradient_squared_accumulator,
grad=grad, indices=indices, lr=lr, l1=l1,
l2=l2, global_step=global_step,
use_locking=use_locking, name=name)
return result
def sparse_apply_centered_rms_prop(var, mg, ms, mom, lr, rho, momentum,
epsilon, grad, indices, use_locking=None,
name=None):
r"""Update '*var' according to the centered RMSProp algorithm.
The centered RMSProp algorithm uses an estimate of the centered second moment
(i.e., the variance) for normalization, as opposed to regular RMSProp, which
uses the (uncentered) second moment. This often helps with training, but is
slightly more expensive in terms of computation and memory.
Note that in dense implementation of this algorithm, mg, ms, and mom will
update even if the grad is zero, but in this sparse implementation, mg, ms,
and mom will not update in iterations during which the grad is zero.
mean_square = decay * mean_square + (1-decay) * gradient ** 2
mean_grad = decay * mean_grad + (1-decay) * gradient
Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2)
ms <- rho * ms_{t-1} + (1-rho) * grad * grad
mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)
var <- var - mom
Args:
var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`.
Should be from a Variable().
mg: A mutable `Tensor`. Must have the same type as `var`.
Should be from a Variable().
ms: A mutable `Tensor`. Must have the same type as `var`.
Should be from a Variable().
mom: A mutable `Tensor`. Must have the same type as `var`.
Should be from a Variable().
lr: A `Tensor`. Must have the same type as `var`.
Scaling factor. Must be a scalar.
rho: A `Tensor`. Must have the same type as `var`.
Decay rate. Must be a scalar.
momentum: A `Tensor`. Must have the same type as `var`.
epsilon: A `Tensor`. Must have the same type as `var`.
Ridge term. Must be a scalar.
grad: A `Tensor`. Must have the same type as `var`. The gradient.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A vector of indices into the first dimension of var, ms and mom.
use_locking: An optional `bool`. Defaults to `False`.
If `True`, updating of the var, mg, ms, and mom tensors is
protected by a lock; otherwise the behavior is undefined, but may exhibit less
contention.
name: A name for the operation (optional).
Returns:
A mutable `Tensor`. Has the same type as `var`. Same as "var".
"""
result = _op_def_lib.apply_op("SparseApplyCenteredRMSProp", var=var, mg=mg,
ms=ms, mom=mom, lr=lr, rho=rho,
momentum=momentum, epsilon=epsilon, grad=grad,
indices=indices, use_locking=use_locking,
name=name)
return result
def sparse_apply_ftrl(var, accum, linear, grad, indices, lr, l1, l2, lr_power,
use_locking=None, name=None):
r"""Update relevant entries in '*var' according to the Ftrl-proximal scheme.
That is for rows we have grad for, we update var, accum and linear as follows:
accum_new = accum + grad * grad
linear += grad + (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var
quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2
var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0
accum = accum_new
Args:
var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`.
Should be from a Variable().
accum: A mutable `Tensor`. Must have the same type as `var`.
Should be from a Variable().
linear: A mutable `Tensor`. Must have the same type as `var`.
Should be from a Variable().
grad: A `Tensor`. Must have the same type as `var`. The gradient.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A vector of indices into the first dimension of var and accum.
lr: A `Tensor`. Must have the same type as `var`.
Scaling factor. Must be a scalar.
l1: A `Tensor`. Must have the same type as `var`.
L1 regularization. Must be a scalar.
l2: A `Tensor`. Must have the same type as `var`.
L2 regularization. Must be a scalar.
lr_power: A `Tensor`. Must have the same type as `var`.
Scaling factor. Must be a scalar.
use_locking: An optional `bool`. Defaults to `False`.
If `True`, updating of the var and accum tensors will be protected
by a lock; otherwise the behavior is undefined, but may exhibit less
contention.
name: A name for the operation (optional).
Returns:
A mutable `Tensor`. Has the same type as `var`. Same as "var".
"""
result = _op_def_lib.apply_op("SparseApplyFtrl", var=var, accum=accum,
linear=linear, grad=grad, indices=indices,
lr=lr, l1=l1, l2=l2, lr_power=lr_power,
use_locking=use_locking, name=name)
return result
def sparse_apply_momentum(var, accum, lr, grad, indices, momentum,
use_locking=None, use_nesterov=None, name=None):
r"""Update relevant entries in '*var' and '*accum' according to the momentum scheme.
Set use_nesterov = True if you want to use Nesterov momentum.
That is for rows we have grad for, we update var and accum as follows:
accum = accum * momentum + grad
var -= lr * accum
Args:
var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`.
Should be from a Variable().
accum: A mutable `Tensor`. Must have the same type as `var`.
Should be from a Variable().
lr: A `Tensor`. Must have the same type as `var`.
Learning rate. Must be a scalar.
grad: A `Tensor`. Must have the same type as `var`. The gradient.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A vector of indices into the first dimension of var and accum.
momentum: A `Tensor`. Must have the same type as `var`.
Momentum. Must be a scalar.
use_locking: An optional `bool`. Defaults to `False`.
If `True`, updating of the var and accum tensors will be protected
by a lock; otherwise the behavior is undefined, but may exhibit less
contention.
use_nesterov: An optional `bool`. Defaults to `False`.
If `True`, the tensor passed to compute grad will be
var - lr * momentum * accum, so in the end, the var you get is actually
var - lr * momentum * accum.
name: A name for the operation (optional).
Returns:
A mutable `Tensor`. Has the same type as `var`. Same as "var".
"""
result = _op_def_lib.apply_op("SparseApplyMomentum", var=var, accum=accum,
lr=lr, grad=grad, indices=indices,
momentum=momentum, use_locking=use_locking,
use_nesterov=use_nesterov, name=name)
return result
def sparse_apply_proximal_adagrad(var, accum, lr, l1, l2, grad, indices,
use_locking=None, name=None):
r"""Sparse update entries in '*var' and '*accum' according to FOBOS algorithm.
That is for rows we have grad for, we update var and accum as follows:
accum += grad * grad
prox_v = var
prox_v -= lr * grad * (1 / sqrt(accum))
var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0}
Args:
var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`.
Should be from a Variable().
accum: A mutable `Tensor`. Must have the same type as `var`.
Should be from a Variable().
lr: A `Tensor`. Must have the same type as `var`.
Learning rate. Must be a scalar.
l1: A `Tensor`. Must have the same type as `var`.
L1 regularization. Must be a scalar.
l2: A `Tensor`. Must have the same type as `var`.
L2 regularization. Must be a scalar.
grad: A `Tensor`. Must have the same type as `var`. The gradient.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A vector of indices into the first dimension of var and accum.
use_locking: An optional `bool`. Defaults to `False`.
If True, updating of the var and accum tensors will be protected by
a lock; otherwise the behavior is undefined, but may exhibit less contention.
name: A name for the operation (optional).
Returns:
A mutable `Tensor`. Has the same type as `var`. Same as "var".
"""
result = _op_def_lib.apply_op("SparseApplyProximalAdagrad", var=var,
accum=accum, lr=lr, l1=l1, l2=l2, grad=grad,
indices=indices, use_locking=use_locking,
name=name)
return result
def sparse_apply_proximal_gradient_descent(var, alpha, l1, l2, grad, indices,
use_locking=None, name=None):
r"""Sparse update '*var' as FOBOS algorithm with fixed learning rate.
That is for rows we have grad for, we update var as follows:
prox_v = var - alpha * grad
var = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0}
Args:
var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`.
Should be from a Variable().
alpha: A `Tensor`. Must have the same type as `var`.
Scaling factor. Must be a scalar.
l1: A `Tensor`. Must have the same type as `var`.
L1 regularization. Must be a scalar.
l2: A `Tensor`. Must have the same type as `var`.
L2 regularization. Must be a scalar.
grad: A `Tensor`. Must have the same type as `var`. The gradient.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A vector of indices into the first dimension of var and accum.
use_locking: An optional `bool`. Defaults to `False`.
If True, the subtraction will be protected by a lock;
otherwise the behavior is undefined, but may exhibit less contention.
name: A name for the operation (optional).
Returns:
A mutable `Tensor`. Has the same type as `var`. Same as "var".
"""
result = _op_def_lib.apply_op("SparseApplyProximalGradientDescent", var=var,
alpha=alpha, l1=l1, l2=l2, grad=grad,
indices=indices, use_locking=use_locking,
name=name)
return result
def sparse_apply_rms_prop(var, ms, mom, lr, rho, momentum, epsilon, grad,
indices, use_locking=None, name=None):
r"""Update '*var' according to the RMSProp algorithm.
Note that in dense implementation of this algorithm, ms and mom will
update even if the grad is zero, but in this sparse implementation, ms
and mom will not update in iterations during which the grad is zero.
mean_square = decay * mean_square + (1-decay) * gradient ** 2
Delta = learning_rate * gradient / sqrt(mean_square + epsilon)
ms <- rho * ms_{t-1} + (1-rho) * grad * grad
mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)
var <- var - mom
Args:
var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`.
Should be from a Variable().
ms: A mutable `Tensor`. Must have the same type as `var`.
Should be from a Variable().
mom: A mutable `Tensor`. Must have the same type as `var`.
Should be from a Variable().
lr: A `Tensor`. Must have the same type as `var`.
Scaling factor. Must be a scalar.
rho: A `Tensor`. Must have the same type as `var`.
Decay rate. Must be a scalar.
momentum: A `Tensor`. Must have the same type as `var`.
epsilon: A `Tensor`. Must have the same type as `var`.
Ridge term. Must be a scalar.
grad: A `Tensor`. Must have the same type as `var`. The gradient.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A vector of indices into the first dimension of var, ms and mom.
use_locking: An optional `bool`. Defaults to `False`.
If `True`, updating of the var, ms, and mom tensors is protected
by a lock; otherwise the behavior is undefined, but may exhibit less
contention.
name: A name for the operation (optional).
Returns:
A mutable `Tensor`. Has the same type as `var`. Same as "var".
"""
result = _op_def_lib.apply_op("SparseApplyRMSProp", var=var, ms=ms, mom=mom,
lr=lr, rho=rho, momentum=momentum,
epsilon=epsilon, grad=grad, indices=indices,
use_locking=use_locking, name=name)
return result
def _InitOpDefLibrary():
op_list = _op_def_pb2.OpList()
_text_format.Merge(_InitOpDefLibrary.op_list_ascii, op_list)
_op_def_registry.register_op_list(op_list)
op_def_lib = _op_def_library.OpDefLibrary()
op_def_lib.add_op_list(op_list)
return op_def_lib
_InitOpDefLibrary.op_list_ascii = """op {
name: "ApplyAdadelta"
input_arg {
name: "var"
type_attr: "T"
is_ref: true
}
input_arg {
name: "accum"
type_attr: "T"
is_ref: true
}
input_arg {
name: "accum_update"
type_attr: "T"
is_ref: true
}
input_arg {
name: "lr"
type_attr: "T"
}
input_arg {
name: "rho"
type_attr: "T"
}
input_arg {
name: "epsilon"
type_attr: "T"
}
input_arg {
name: "grad"
type_attr: "T"
}
output_arg {
name: "out"
type_attr: "T"
is_ref: true
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
attr {
name: "use_locking"
type: "bool"
default_value {
b: false
}
}
}
op {
name: "ApplyAdagrad"
input_arg {
name: "var"
type_attr: "T"
is_ref: true
}
input_arg {
name: "accum"
type_attr: "T"
is_ref: true
}
input_arg {
name: "lr"
type_attr: "T"
}
input_arg {
name: "grad"
type_attr: "T"
}
output_arg {
name: "out"
type_attr: "T"
is_ref: true
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
attr {
name: "use_locking"
type: "bool"
default_value {
b: false
}
}
}
op {
name: "ApplyAdagradDA"
input_arg {
name: "var"
type_attr: "T"
is_ref: true
}
input_arg {
name: "gradient_accumulator"
type_attr: "T"
is_ref: true
}
input_arg {
name: "gradient_squared_accumulator"
type_attr: "T"
is_ref: true
}
input_arg {
name: "grad"
type_attr: "T"
}
input_arg {
name: "lr"
type_attr: "T"
}
input_arg {
name: "l1"
type_attr: "T"
}
input_arg {
name: "l2"
type_attr: "T"
}
input_arg {
name: "global_step"
type: DT_INT64
}
output_arg {
name: "out"
type_attr: "T"
is_ref: true
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
attr {
name: "use_locking"
type: "bool"
default_value {
b: false
}
}
}
op {
name: "ApplyAdam"
input_arg {
name: "var"
type_attr: "T"
is_ref: true
}
input_arg {
name: "m"
type_attr: "T"
is_ref: true
}
input_arg {
name: "v"
type_attr: "T"
is_ref: true
}
input_arg {
name: "beta1_power"
type_attr: "T"
}
input_arg {
name: "beta2_power"
type_attr: "T"
}
input_arg {
name: "lr"
type_attr: "T"
}
input_arg {
name: "beta1"
type_attr: "T"
}
input_arg {
name: "beta2"
type_attr: "T"
}
input_arg {
name: "epsilon"
type_attr: "T"
}
input_arg {
name: "grad"
type_attr: "T"
}
output_arg {
name: "out"
type_attr: "T"
is_ref: true
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
attr {
name: "use_locking"
type: "bool"
default_value {
b: false
}
}
attr {
name: "use_nesterov"
type: "bool"
default_value {
b: false
}
}
}
op {
name: "ApplyCenteredRMSProp"
input_arg {
name: "var"
type_attr: "T"
is_ref: true
}
input_arg {
name: "mg"
type_attr: "T"
is_ref: true
}
input_arg {
name: "ms"
type_attr: "T"
is_ref: true
}
input_arg {
name: "mom"
type_attr: "T"
is_ref: true
}
input_arg {
name: "lr"
type_attr: "T"
}
input_arg {
name: "rho"
type_attr: "T"
}
input_arg {
name: "momentum"
type_attr: "T"
}
input_arg {
name: "epsilon"
type_attr: "T"
}
input_arg {
name: "grad"
type_attr: "T"
}
output_arg {
name: "out"
type_attr: "T"
is_ref: true
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
attr {
name: "use_locking"
type: "bool"
default_value {
b: false
}
}
}
op {
name: "ApplyFtrl"
input_arg {
name: "var"
type_attr: "T"
is_ref: true
}
input_arg {
name: "accum"
type_attr: "T"
is_ref: true
}
input_arg {
name: "linear"
type_attr: "T"
is_ref: true
}
input_arg {
name: "grad"
type_attr: "T"
}
input_arg {
name: "lr"
type_attr: "T"
}
input_arg {
name: "l1"
type_attr: "T"
}
input_arg {
name: "l2"
type_attr: "T"
}
input_arg {
name: "lr_power"
type_attr: "T"
}
output_arg {
name: "out"
type_attr: "T"
is_ref: true
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
attr {
name: "use_locking"
type: "bool"
default_value {
b: false
}
}
}
op {
name: "ApplyGradientDescent"
input_arg {
name: "var"
type_attr: "T"
is_ref: true
}
input_arg {
name: "alpha"
type_attr: "T"
}
input_arg {
name: "delta"
type_attr: "T"
}
output_arg {
name: "out"
type_attr: "T"
is_ref: true
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
attr {
name: "use_locking"
type: "bool"
default_value {
b: false
}
}
}
op {
name: "ApplyMomentum"
input_arg {
name: "var"
type_attr: "T"
is_ref: true
}
input_arg {
name: "accum"
type_attr: "T"
is_ref: true
}
input_arg {
name: "lr"
type_attr: "T"
}
input_arg {
name: "grad"
type_attr: "T"
}
input_arg {
name: "momentum"
type_attr: "T"
}
output_arg {
name: "out"
type_attr: "T"
is_ref: true
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
attr {
name: "use_locking"
type: "bool"
default_value {
b: false
}
}
attr {
name: "use_nesterov"
type: "bool"
default_value {
b: false
}
}
}
op {
name: "ApplyProximalAdagrad"
input_arg {
name: "var"
type_attr: "T"
is_ref: true
}
input_arg {
name: "accum"
type_attr: "T"
is_ref: true
}
input_arg {
name: "lr"
type_attr: "T"
}
input_arg {
name: "l1"
type_attr: "T"
}
input_arg {
name: "l2"
type_attr: "T"
}
input_arg {
name: "grad"
type_attr: "T"
}
output_arg {
name: "out"
type_attr: "T"
is_ref: true
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
attr {
name: "use_locking"
type: "bool"
default_value {
b: false
}
}
}
op {
name: "ApplyProximalGradientDescent"
input_arg {
name: "var"
type_attr: "T"
is_ref: true
}
input_arg {
name: "alpha"
type_attr: "T"
}
input_arg {
name: "l1"
type_attr: "T"
}
input_arg {
name: "l2"
type_attr: "T"
}
input_arg {
name: "delta"
type_attr: "T"
}
output_arg {
name: "out"
type_attr: "T"
is_ref: true
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
attr {
name: "use_locking"
type: "bool"
default_value {
b: false
}
}
}
op {
name: "ApplyRMSProp"
input_arg {
name: "var"
type_attr: "T"
is_ref: true
}
input_arg {
name: "ms"
type_attr: "T"
is_ref: true
}
input_arg {
name: "mom"
type_attr: "T"
is_ref: true
}
input_arg {
name: "lr"
type_attr: "T"
}
input_arg {
name: "rho"
type_attr: "T"
}
input_arg {
name: "momentum"
type_attr: "T"
}
input_arg {
name: "epsilon"
type_attr: "T"
}
input_arg {
name: "grad"
type_attr: "T"
}
output_arg {
name: "out"
type_attr: "T"
is_ref: true
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
attr {
name: "use_locking"
type: "bool"
default_value {
b: false
}
}
}
op {
name: "ResourceApplyAdadelta"
input_arg {
name: "var"
type: DT_RESOURCE
}
input_arg {
name: "accum"
type: DT_RESOURCE
}
input_arg {
name: "accum_update"
type: DT_RESOURCE
}
input_arg {
name: "lr"
type_attr: "T"
}
input_arg {
name: "rho"
type_attr: "T"
}
input_arg {
name: "epsilon"
type_attr: "T"
}
input_arg {
name: "grad"
type_attr: "T"
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
attr {
name: "use_locking"
type: "bool"
default_value {
b: false
}
}
is_stateful: true
}
op {
name: "ResourceApplyAdagrad"
input_arg {
name: "var"
type: DT_RESOURCE
}
input_arg {
name: "accum"
type: DT_RESOURCE
}
input_arg {
name: "lr"
type_attr: "T"
}
input_arg {
name: "grad"
type_attr: "T"
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
attr {
name: "use_locking"
type: "bool"
default_value {
b: false
}
}
is_stateful: true
}
op {
name: "ResourceApplyAdagradDA"
input_arg {
name: "var"
type: DT_RESOURCE
}
input_arg {
name: "gradient_accumulator"
type: DT_RESOURCE
}
input_arg {
name: "gradient_squared_accumulator"
type: DT_RESOURCE
}
input_arg {
name: "grad"
type_attr: "T"
}
input_arg {
name: "lr"
type_attr: "T"
}
input_arg {
name: "l1"
type_attr: "T"
}
input_arg {
name: "l2"
type_attr: "T"
}
input_arg {
name: "global_step"
type: DT_INT64
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
attr {
name: "use_locking"
type: "bool"
default_value {
b: false
}
}
is_stateful: true
}
op {
name: "ResourceApplyAdam"
input_arg {
name: "var"
type: DT_RESOURCE
}
input_arg {
name: "m"
type: DT_RESOURCE
}
input_arg {
name: "v"
type: DT_RESOURCE
}
input_arg {
name: "beta1_power"
type_attr: "T"
}
input_arg {
name: "beta2_power"
type_attr: "T"
}
input_arg {
name: "lr"
type_attr: "T"
}
input_arg {
name: "beta1"
type_attr: "T"
}
input_arg {
name: "beta2"
type_attr: "T"
}
input_arg {
name: "epsilon"
type_attr: "T"
}
input_arg {
name: "grad"
type_attr: "T"
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
attr {
name: "use_locking"
type: "bool"
default_value {
b: false
}
}
attr {
name: "use_nesterov"
type: "bool"
default_value {
b: false
}
}
is_stateful: true
}
op {
name: "ResourceApplyCenteredRMSProp"
input_arg {
name: "var"
type: DT_RESOURCE
}
input_arg {
name: "mg"
type: DT_RESOURCE
}
input_arg {
name: "ms"
type: DT_RESOURCE
}
input_arg {
name: "mom"
type: DT_RESOURCE
}
input_arg {
name: "lr"
type_attr: "T"
}
input_arg {
name: "rho"
type_attr: "T"
}
input_arg {
name: "momentum"
type_attr: "T"
}
input_arg {
name: "epsilon"
type_attr: "T"
}
input_arg {
name: "grad"
type_attr: "T"
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
attr {
name: "use_locking"
type: "bool"
default_value {
b: false
}
}
is_stateful: true
}
op {
name: "ResourceApplyFtrl"
input_arg {
name: "var"
type: DT_RESOURCE
}
input_arg {
name: "accum"
type: DT_RESOURCE
}
input_arg {
name: "linear"
type: DT_RESOURCE
}
input_arg {
name: "grad"
type_attr: "T"
}
input_arg {
name: "lr"
type_attr: "T"
}
input_arg {
name: "l1"
type_attr: "T"
}
input_arg {
name: "l2"
type_attr: "T"
}
input_arg {
name: "lr_power"
type_attr: "T"
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
attr {
name: "use_locking"
type: "bool"
default_value {
b: false
}
}
is_stateful: true
}
op {
name: "ResourceApplyGradientDescent"
input_arg {
name: "var"
type: DT_RESOURCE
}
input_arg {
name: "alpha"
type_attr: "T"
}
input_arg {
name: "delta"
type_attr: "T"
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
attr {
name: "use_locking"
type: "bool"
default_value {
b: false
}
}
is_stateful: true
}
op {
name: "ResourceApplyMomentum"
input_arg {
name: "var"
type: DT_RESOURCE
}
input_arg {
name: "accum"
type: DT_RESOURCE
}
input_arg {
name: "lr"
type_attr: "T"
}
input_arg {
name: "grad"
type_attr: "T"
}
input_arg {
name: "momentum"
type_attr: "T"
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
attr {
name: "use_locking"
type: "bool"
default_value {
b: false
}
}
attr {
name: "use_nesterov"
type: "bool"
default_value {
b: false
}
}
is_stateful: true
}
op {
name: "ResourceApplyProximalAdagrad"
input_arg {
name: "var"
type: DT_RESOURCE
}
input_arg {
name: "accum"
type: DT_RESOURCE
}
input_arg {
name: "lr"
type_attr: "T"
}
input_arg {
name: "l1"
type_attr: "T"
}
input_arg {
name: "l2"
type_attr: "T"
}
input_arg {
name: "grad"
type_attr: "T"
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
attr {
name: "use_locking"
type: "bool"
default_value {
b: false
}
}
is_stateful: true
}
op {
name: "ResourceApplyProximalGradientDescent"
input_arg {
name: "var"
type: DT_RESOURCE
}
input_arg {
name: "alpha"
type_attr: "T"
}
input_arg {
name: "l1"
type_attr: "T"
}
input_arg {
name: "l2"
type_attr: "T"
}
input_arg {
name: "delta"
type_attr: "T"
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
attr {
name: "use_locking"
type: "bool"
default_value {
b: false
}
}
is_stateful: true
}
op {
name: "ResourceApplyRMSProp"
input_arg {
name: "var"
type: DT_RESOURCE
}
input_arg {
name: "ms"
type: DT_RESOURCE
}
input_arg {
name: "mom"
type: DT_RESOURCE
}
input_arg {
name: "lr"
type_attr: "T"
}
input_arg {
name: "rho"
type_attr: "T"
}
input_arg {
name: "momentum"
type_attr: "T"
}
input_arg {
name: "epsilon"
type_attr: "T"
}
input_arg {
name: "grad"
type_attr: "T"
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
attr {
name: "use_locking"
type: "bool"
default_value {
b: false
}
}
is_stateful: true
}
op {
name: "ResourceSparseApplyAdadelta"
input_arg {
name: "var"
type: DT_RESOURCE
}
input_arg {
name: "accum"
type: DT_RESOURCE
}
input_arg {
name: "accum_update"
type: DT_RESOURCE
}
input_arg {
name: "lr"
type_attr: "T"
}
input_arg {
name: "rho"
type_attr: "T"
}
input_arg {
name: "epsilon"
type_attr: "T"
}
input_arg {
name: "grad"
type_attr: "T"
}
input_arg {
name: "indices"
type_attr: "Tindices"
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
attr {
name: "Tindices"
type: "type"
allowed_values {
list {
type: DT_INT32
type: DT_INT64
}
}
}
attr {
name: "use_locking"
type: "bool"
default_value {
b: false
}
}
is_stateful: true
}
op {
name: "ResourceSparseApplyAdagrad"
input_arg {
name: "var"
type: DT_RESOURCE
}
input_arg {
name: "accum"
type: DT_RESOURCE
}
input_arg {
name: "lr"
type_attr: "T"
}
input_arg {
name: "grad"
type_attr: "T"
}
input_arg {
name: "indices"
type_attr: "Tindices"
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
attr {
name: "Tindices"
type: "type"
allowed_values {
list {
type: DT_INT32
type: DT_INT64
}
}
}
attr {
name: "use_locking"
type: "bool"
default_value {
b: false
}
}
is_stateful: true
}
op {
name: "ResourceSparseApplyAdagradDA"
input_arg {
name: "var"
type: DT_RESOURCE
}
input_arg {
name: "gradient_accumulator"
type: DT_RESOURCE
}
input_arg {
name: "gradient_squared_accumulator"
type: DT_RESOURCE
}
input_arg {
name: "grad"
type_attr: "T"
}
input_arg {
name: "indices"
type_attr: "Tindices"
}
input_arg {
name: "lr"
type_attr: "T"
}
input_arg {
name: "l1"
type_attr: "T"
}
input_arg {
name: "l2"
type_attr: "T"
}
input_arg {
name: "global_step"
type: DT_INT64
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
attr {
name: "Tindices"
type: "type"
allowed_values {
list {
type: DT_INT32
type: DT_INT64
}
}
}
attr {
name: "use_locking"
type: "bool"
default_value {
b: false
}
}
is_stateful: true
}
op {
name: "ResourceSparseApplyCenteredRMSProp"
input_arg {
name: "var"
type: DT_RESOURCE
}
input_arg {
name: "mg"
type: DT_RESOURCE
}
input_arg {
name: "ms"
type: DT_RESOURCE
}
input_arg {
name: "mom"
type: DT_RESOURCE
}
input_arg {
name: "lr"
type_attr: "T"
}
input_arg {
name: "rho"
type_attr: "T"
}
input_arg {
name: "momentum"
type_attr: "T"
}
input_arg {
name: "epsilon"
type_attr: "T"
}
input_arg {
name: "grad"
type_attr: "T"
}
input_arg {
name: "indices"
type_attr: "Tindices"
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
attr {
name: "Tindices"
type: "type"
allowed_values {
list {
type: DT_INT32
type: DT_INT64
}
}
}
attr {
name: "use_locking"
type: "bool"
default_value {
b: false
}
}
is_stateful: true
}
op {
name: "ResourceSparseApplyFtrl"
input_arg {
name: "var"
type: DT_RESOURCE
}
input_arg {
name: "accum"
type: DT_RESOURCE
}
input_arg {
name: "linear"
type: DT_RESOURCE
}
input_arg {
name: "grad"
type_attr: "T"
}
input_arg {
name: "indices"
type_attr: "Tindices"
}
input_arg {
name: "lr"
type_attr: "T"
}
input_arg {
name: "l1"
type_attr: "T"
}
input_arg {
name: "l2"
type_attr: "T"
}
input_arg {
name: "lr_power"
type_attr: "T"
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
attr {
name: "Tindices"
type: "type"
allowed_values {
list {
type: DT_INT32
type: DT_INT64
}
}
}
attr {
name: "use_locking"
type: "bool"
default_value {
b: false
}
}
is_stateful: true
}
op {
name: "ResourceSparseApplyMomentum"
input_arg {
name: "var"
type: DT_RESOURCE
}
input_arg {
name: "accum"
type: DT_RESOURCE
}
input_arg {
name: "lr"
type_attr: "T"
}
input_arg {
name: "grad"
type_attr: "T"
}
input_arg {
name: "indices"
type_attr: "Tindices"
}
input_arg {
name: "momentum"
type_attr: "T"
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
attr {
name: "Tindices"
type: "type"
allowed_values {
list {
type: DT_INT32
type: DT_INT64
}
}
}
attr {
name: "use_locking"
type: "bool"
default_value {
b: false
}
}
attr {
name: "use_nesterov"
type: "bool"
default_value {
b: false
}
}
is_stateful: true
}
op {
name: "ResourceSparseApplyProximalAdagrad"
input_arg {
name: "var"
type: DT_RESOURCE
}
input_arg {
name: "accum"
type: DT_RESOURCE
}
input_arg {
name: "lr"
type_attr: "T"
}
input_arg {
name: "l1"
type_attr: "T"
}
input_arg {
name: "l2"
type_attr: "T"
}
input_arg {
name: "grad"
type_attr: "T"
}
input_arg {
name: "indices"
type_attr: "Tindices"
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
attr {
name: "Tindices"
type: "type"
allowed_values {
list {
type: DT_INT32
type: DT_INT64
}
}
}
attr {
name: "use_locking"
type: "bool"
default_value {
b: false
}
}
is_stateful: true
}
op {
name: "ResourceSparseApplyProximalGradientDescent"
input_arg {
name: "var"
type: DT_RESOURCE
}
input_arg {
name: "alpha"
type_attr: "T"
}
input_arg {
name: "l1"
type_attr: "T"
}
input_arg {
name: "l2"
type_attr: "T"
}
input_arg {
name: "grad"
type_attr: "T"
}
input_arg {
name: "indices"
type_attr: "Tindices"
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
attr {
name: "Tindices"
type: "type"
allowed_values {
list {
type: DT_INT32
type: DT_INT64
}
}
}
attr {
name: "use_locking"
type: "bool"
default_value {
b: false
}
}
is_stateful: true
}
op {
name: "ResourceSparseApplyRMSProp"
input_arg {
name: "var"
type: DT_RESOURCE
}
input_arg {
name: "ms"
type: DT_RESOURCE
}
input_arg {
name: "mom"
type: DT_RESOURCE
}
input_arg {
name: "lr"
type_attr: "T"
}
input_arg {
name: "rho"
type_attr: "T"
}
input_arg {
name: "momentum"
type_attr: "T"
}
input_arg {
name: "epsilon"
type_attr: "T"
}
input_arg {
name: "grad"
type_attr: "T"
}
input_arg {
name: "indices"
type_attr: "Tindices"
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
attr {
name: "Tindices"
type: "type"
allowed_values {
list {
type: DT_INT32
type: DT_INT64
}
}
}
attr {
name: "use_locking"
type: "bool"
default_value {
b: false
}
}
is_stateful: true
}
op {
name: "SparseApplyAdadelta"
input_arg {
name: "var"
type_attr: "T"
is_ref: true
}
input_arg {
name: "accum"
type_attr: "T"
is_ref: true
}
input_arg {
name: "accum_update"
type_attr: "T"
is_ref: true
}
input_arg {
name: "lr"
type_attr: "T"
}
input_arg {
name: "rho"
type_attr: "T"
}
input_arg {
name: "epsilon"
type_attr: "T"
}
input_arg {
name: "grad"
type_attr: "T"
}
input_arg {
name: "indices"
type_attr: "Tindices"
}
output_arg {
name: "out"
type_attr: "T"
is_ref: true
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
attr {
name: "Tindices"
type: "type"
allowed_values {
list {
type: DT_INT32
type: DT_INT64
}
}
}
attr {
name: "use_locking"
type: "bool"
default_value {
b: false
}
}
}
op {
name: "SparseApplyAdagrad"
input_arg {
name: "var"
type_attr: "T"
is_ref: true
}
input_arg {
name: "accum"
type_attr: "T"
is_ref: true
}
input_arg {
name: "lr"
type_attr: "T"
}
input_arg {
name: "grad"
type_attr: "T"
}
input_arg {
name: "indices"
type_attr: "Tindices"
}
output_arg {
name: "out"
type_attr: "T"
is_ref: true
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
attr {
name: "Tindices"
type: "type"
allowed_values {
list {
type: DT_INT32
type: DT_INT64
}
}
}
attr {
name: "use_locking"
type: "bool"
default_value {
b: false
}
}
}
op {
name: "SparseApplyAdagradDA"
input_arg {
name: "var"
type_attr: "T"
is_ref: true
}
input_arg {
name: "gradient_accumulator"
type_attr: "T"
is_ref: true
}
input_arg {
name: "gradient_squared_accumulator"
type_attr: "T"
is_ref: true
}
input_arg {
name: "grad"
type_attr: "T"
}
input_arg {
name: "indices"
type_attr: "Tindices"
}
input_arg {
name: "lr"
type_attr: "T"
}
input_arg {
name: "l1"
type_attr: "T"
}
input_arg {
name: "l2"
type_attr: "T"
}
input_arg {
name: "global_step"
type: DT_INT64
}
output_arg {
name: "out"
type_attr: "T"
is_ref: true
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
attr {
name: "Tindices"
type: "type"
allowed_values {
list {
type: DT_INT32
type: DT_INT64
}
}
}
attr {
name: "use_locking"
type: "bool"
default_value {
b: false
}
}
}
op {
name: "SparseApplyCenteredRMSProp"
input_arg {
name: "var"
type_attr: "T"
is_ref: true
}
input_arg {
name: "mg"
type_attr: "T"
is_ref: true
}
input_arg {
name: "ms"
type_attr: "T"
is_ref: true
}
input_arg {
name: "mom"
type_attr: "T"
is_ref: true
}
input_arg {
name: "lr"
type_attr: "T"
}
input_arg {
name: "rho"
type_attr: "T"
}
input_arg {
name: "momentum"
type_attr: "T"
}
input_arg {
name: "epsilon"
type_attr: "T"
}
input_arg {
name: "grad"
type_attr: "T"
}
input_arg {
name: "indices"
type_attr: "Tindices"
}
output_arg {
name: "out"
type_attr: "T"
is_ref: true
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
attr {
name: "Tindices"
type: "type"
allowed_values {
list {
type: DT_INT32
type: DT_INT64
}
}
}
attr {
name: "use_locking"
type: "bool"
default_value {
b: false
}
}
}
op {
name: "SparseApplyFtrl"
input_arg {
name: "var"
type_attr: "T"
is_ref: true
}
input_arg {
name: "accum"
type_attr: "T"
is_ref: true
}
input_arg {
name: "linear"
type_attr: "T"
is_ref: true
}
input_arg {
name: "grad"
type_attr: "T"
}
input_arg {
name: "indices"
type_attr: "Tindices"
}
input_arg {
name: "lr"
type_attr: "T"
}
input_arg {
name: "l1"
type_attr: "T"
}
input_arg {
name: "l2"
type_attr: "T"
}
input_arg {
name: "lr_power"
type_attr: "T"
}
output_arg {
name: "out"
type_attr: "T"
is_ref: true
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
attr {
name: "Tindices"
type: "type"
allowed_values {
list {
type: DT_INT32
type: DT_INT64
}
}
}
attr {
name: "use_locking"
type: "bool"
default_value {
b: false
}
}
}
op {
name: "SparseApplyMomentum"
input_arg {
name: "var"
type_attr: "T"
is_ref: true
}
input_arg {
name: "accum"
type_attr: "T"
is_ref: true
}
input_arg {
name: "lr"
type_attr: "T"
}
input_arg {
name: "grad"
type_attr: "T"
}
input_arg {
name: "indices"
type_attr: "Tindices"
}
input_arg {
name: "momentum"
type_attr: "T"
}
output_arg {
name: "out"
type_attr: "T"
is_ref: true
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
attr {
name: "Tindices"
type: "type"
allowed_values {
list {
type: DT_INT32
type: DT_INT64
}
}
}
attr {
name: "use_locking"
type: "bool"
default_value {
b: false
}
}
attr {
name: "use_nesterov"
type: "bool"
default_value {
b: false
}
}
}
op {
name: "SparseApplyProximalAdagrad"
input_arg {
name: "var"
type_attr: "T"
is_ref: true
}
input_arg {
name: "accum"
type_attr: "T"
is_ref: true
}
input_arg {
name: "lr"
type_attr: "T"
}
input_arg {
name: "l1"
type_attr: "T"
}
input_arg {
name: "l2"
type_attr: "T"
}
input_arg {
name: "grad"
type_attr: "T"
}
input_arg {
name: "indices"
type_attr: "Tindices"
}
output_arg {
name: "out"
type_attr: "T"
is_ref: true
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
attr {
name: "Tindices"
type: "type"
allowed_values {
list {
type: DT_INT32
type: DT_INT64
}
}
}
attr {
name: "use_locking"
type: "bool"
default_value {
b: false
}
}
}
op {
name: "SparseApplyProximalGradientDescent"
input_arg {
name: "var"
type_attr: "T"
is_ref: true
}
input_arg {
name: "alpha"
type_attr: "T"
}
input_arg {
name: "l1"
type_attr: "T"
}
input_arg {
name: "l2"
type_attr: "T"
}
input_arg {
name: "grad"
type_attr: "T"
}
input_arg {
name: "indices"
type_attr: "Tindices"
}
output_arg {
name: "out"
type_attr: "T"
is_ref: true
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
attr {
name: "Tindices"
type: "type"
allowed_values {
list {
type: DT_INT32
type: DT_INT64
}
}
}
attr {
name: "use_locking"
type: "bool"
default_value {
b: false
}
}
}
op {
name: "SparseApplyRMSProp"
input_arg {
name: "var"
type_attr: "T"
is_ref: true
}
input_arg {
name: "ms"
type_attr: "T"
is_ref: true
}
input_arg {
name: "mom"
type_attr: "T"
is_ref: true
}
input_arg {
name: "lr"
type_attr: "T"
}
input_arg {
name: "rho"
type_attr: "T"
}
input_arg {
name: "momentum"
type_attr: "T"
}
input_arg {
name: "epsilon"
type_attr: "T"
}
input_arg {
name: "grad"
type_attr: "T"
}
input_arg {
name: "indices"
type_attr: "Tindices"
}
output_arg {
name: "out"
type_attr: "T"
is_ref: true
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
attr {
name: "Tindices"
type: "type"
allowed_values {
list {
type: DT_INT32
type: DT_INT64
}
}
}
attr {
name: "use_locking"
type: "bool"
default_value {
b: false
}
}
}
"""
_op_def_lib = _InitOpDefLibrary()
| 27.385894
| 204
| 0.589164
| 16,285
| 121,922
| 4.272275
| 0.017992
| 0.056055
| 0.048984
| 0.035875
| 0.967229
| 0.965332
| 0.964757
| 0.963262
| 0.963262
| 0.962572
| 0
| 0.026376
| 0.305613
| 121,922
| 4,451
| 205
| 27.392047
| 0.795419
| 0.460893
| 0
| 0.74012
| 1
| 0
| 0.709018
| 0.024459
| 0
| 0
| 0
| 0
| 0
| 1
| 0.012962
| false
| 0
| 0.002213
| 0
| 0.028138
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
07c6d722fe94af3980b1cef95f2760f640cf4eca
| 6,377
|
py
|
Python
|
test/pyaz/dls/fs/__init__.py
|
bigdatamoore/py-az-cli
|
54383a4ee7cc77556f6183e74e992eec95b28e01
|
[
"MIT"
] | null | null | null |
test/pyaz/dls/fs/__init__.py
|
bigdatamoore/py-az-cli
|
54383a4ee7cc77556f6183e74e992eec95b28e01
|
[
"MIT"
] | 9
|
2021-09-24T16:37:24.000Z
|
2021-12-24T00:39:19.000Z
|
test/pyaz/dls/fs/__init__.py
|
bigdatamoore/py-az-cli
|
54383a4ee7cc77556f6183e74e992eec95b28e01
|
[
"MIT"
] | null | null | null |
import json, subprocess
from ... pyaz_utils import get_cli_name, get_params
def show(account, path):
params = get_params(locals())
command = "az dls fs show " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def list(account, path):
params = get_params(locals())
command = "az dls fs list " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def create(account, path, content=None, folder=None, force=None):
params = get_params(locals())
command = "az dls fs create " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def append(account, path, content):
params = get_params(locals())
command = "az dls fs append " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def delete(account, path, recurse=None):
params = get_params(locals())
command = "az dls fs delete " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def upload(account, source_path, destination_path, chunk_size=None, buffer_size=None, block_size=None, thread_count=None, overwrite=None, __PROGRESS_CALLBACK=None):
params = get_params(locals())
command = "az dls fs upload " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def download(account, source_path, destination_path, chunk_size=None, buffer_size=None, block_size=None, thread_count=None, overwrite=None, __PROGRESS_CALLBACK=None):
params = get_params(locals())
command = "az dls fs download " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def test(account, path):
params = get_params(locals())
command = "az dls fs test " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def preview(account, path, length=None, offset=None, force=None):
params = get_params(locals())
command = "az dls fs preview " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def join(account, source_paths, destination_path, force=None):
params = get_params(locals())
command = "az dls fs join " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def move(account, source_path, destination_path, force=None):
params = get_params(locals())
command = "az dls fs move " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def set_expiry(account, path, expiration_time):
params = get_params(locals())
command = "az dls fs set-expiry " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def remove_expiry(account, path):
params = get_params(locals())
command = "az dls fs remove-expiry " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
| 34.284946
| 166
| 0.655324
| 785
| 6,377
| 5.270064
| 0.089172
| 0.087986
| 0.062847
| 0.06599
| 0.91709
| 0.91298
| 0.91298
| 0.91298
| 0.89606
| 0.886633
| 0
| 0.005267
| 0.225968
| 6,377
| 185
| 167
| 34.47027
| 0.832861
| 0
| 0
| 0.836257
| 0
| 0
| 0.055669
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076023
| false
| 0
| 0.011696
| 0
| 0.163743
| 0.22807
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
07cb9dee20e48de59575729856ec6e306b4d49d5
| 7,932
|
py
|
Python
|
dataset.py
|
vnigade/clownfish-3D-models
|
16c8c2e101af890e9bbf40842c164c912cd9ae91
|
[
"MIT"
] | null | null | null |
dataset.py
|
vnigade/clownfish-3D-models
|
16c8c2e101af890e9bbf40842c164c912cd9ae91
|
[
"MIT"
] | null | null | null |
dataset.py
|
vnigade/clownfish-3D-models
|
16c8c2e101af890e9bbf40842c164c912cd9ae91
|
[
"MIT"
] | null | null | null |
from datasets.kinetics import Kinetics
from datasets.activitynet import ActivityNet
from datasets.ucf101 import UCF101
from datasets.hmdb51 import HMDB51
from datasets.pkummd import PKUMMD
from datasets.pkummd_sim import PKUMMD_SIM
from datasets.pkummd_ed import PKUMMD_ED
def get_training_set(opt, spatial_transform, temporal_transform,
target_transform):
assert opt.dataset in ['kinetics', 'activitynet', 'ucf101', 'hmdb51',
'pkummd', 'pkummd_sim', 'pkummd_ed']
if opt.dataset == 'kinetics':
training_data = Kinetics(
opt.video_path,
opt.annotation_path,
'training',
spatial_transform=spatial_transform,
temporal_transform=temporal_transform,
target_transform=target_transform)
elif opt.dataset == 'activitynet':
training_data = ActivityNet(
opt.video_path,
opt.annotation_path,
'training',
False,
spatial_transform=spatial_transform,
temporal_transform=temporal_transform,
target_transform=target_transform)
elif opt.dataset == 'ucf101':
training_data = UCF101(
opt.video_path,
opt.annotation_path,
'training',
spatial_transform=spatial_transform,
temporal_transform=temporal_transform,
target_transform=target_transform)
elif opt.dataset == 'hmdb51':
training_data = HMDB51(
opt.video_path,
opt.annotation_path,
'training',
spatial_transform=spatial_transform,
temporal_transform=temporal_transform,
target_transform=target_transform)
elif opt.dataset == 'pkummd':
training_data = PKUMMD(
opt.video_path,
opt.annotation_path,
'training',
False,
spatial_transform=spatial_transform,
temporal_transform=temporal_transform,
target_transform=target_transform)
elif opt.dataset == "pkummd_sim":
training_data = PKUMMD_SIM(
opt.video_path,
opt.annotation_path,
'training',
False,
spatial_transform=spatial_transform,
temporal_transform=temporal_transform,
target_transform=target_transform)
elif opt.dataset == "pkummd_ed":
training_data = PKUMMD_ED(
opt.video_path,
opt.annotation_path,
'training',
False,
spatial_transform=spatial_transform,
temporal_transform=temporal_transform,
target_transform=target_transform)
return training_data
def get_validation_set(opt, spatial_transform, temporal_transform,
target_transform):
assert opt.dataset in ['kinetics', 'activitynet', 'ucf101', 'hmdb51',
'pkummd', 'pkummd_sim', 'pkummd_ed']
if opt.dataset == 'kinetics':
validation_data = Kinetics(
opt.video_path,
opt.annotation_path,
'validation',
opt.n_val_samples,
spatial_transform,
temporal_transform,
target_transform,
sample_duration=opt.sample_duration)
elif opt.dataset == 'activitynet':
validation_data = ActivityNet(
opt.video_path,
opt.annotation_path,
'validation',
False,
opt.n_val_samples,
spatial_transform,
temporal_transform,
target_transform,
sample_duration=opt.sample_duration)
elif opt.dataset == 'ucf101':
validation_data = UCF101(
opt.video_path,
opt.annotation_path,
'validation',
opt.n_val_samples,
spatial_transform,
temporal_transform,
target_transform,
sample_duration=opt.sample_duration)
elif opt.dataset == 'hmdb51':
validation_data = HMDB51(
opt.video_path,
opt.annotation_path,
'validation',
opt.n_val_samples,
spatial_transform,
temporal_transform,
target_transform,
sample_duration=opt.sample_duration)
elif opt.dataset == 'pkummd':
validation_data = PKUMMD(
opt.video_path,
opt.annotation_path,
'validation',
False,
opt.n_val_samples,
spatial_transform=spatial_transform,
temporal_transform=temporal_transform,
target_transform=target_transform,
sample_duration=opt.sample_duration)
elif opt.dataset == 'pkummd_sim':
validation_data = PKUMMD_SIM(
opt.video_path,
opt.annotation_path,
'validation',
False,
opt.n_val_samples,
spatial_transform=spatial_transform,
temporal_transform=temporal_transform,
target_transform=target_transform,
sample_duration=opt.sample_duration)
elif opt.dataset == 'pkummd_ed':
validation_data = PKUMMD_ED(
opt.video_path,
opt.annotation_path,
'validation',
False,
opt.n_val_samples,
spatial_transform=spatial_transform,
temporal_transform=temporal_transform,
target_transform=target_transform,
sample_duration=opt.sample_duration)
return validation_data
def get_test_set(opt, spatial_transform, temporal_transform, target_transform):
assert opt.dataset in ['kinetics',
'activitynet', 'ucf101', 'hmdb51', 'pkummd']
assert opt.test_subset in ['val', 'test', 'train']
if opt.test_subset == 'val':
subset = 'validation'
elif opt.test_subset == 'test':
subset = 'testing'
elif opt.test_subset == 'train':
subset = "training"
if opt.dataset == 'kinetics':
test_data = Kinetics(
opt.video_path,
opt.annotation_path,
subset,
0,
spatial_transform,
temporal_transform,
target_transform,
sample_duration=opt.sample_duration)
elif opt.dataset == 'activitynet':
test_data = ActivityNet(
opt.video_path,
opt.annotation_path,
subset,
True,
0,
spatial_transform,
temporal_transform,
target_transform,
sample_duration=opt.sample_duration)
elif opt.dataset == 'ucf101':
test_data = UCF101(
opt.video_path,
opt.annotation_path,
subset,
0,
spatial_transform,
temporal_transform,
target_transform,
sample_duration=opt.sample_duration)
elif opt.dataset == 'hmdb51':
test_data = HMDB51(
opt.video_path,
opt.annotation_path,
subset,
0,
spatial_transform,
temporal_transform,
target_transform,
sample_duration=opt.sample_duration)
elif opt.dataset == 'pkummd':
test_data = PKUMMD(
opt.video_path,
opt.annotation_path,
subset,
True,
0,
spatial_transform=spatial_transform,
temporal_transform=temporal_transform,
target_transform=target_transform,
sample_duration=opt.sample_duration,
window_size=opt.window_size,
window_stride=opt.window_stride,
scores_dump_path=(opt.root_path + "/" + opt.scores_dump_path))
elif opt.dataset == "pkummd_sim":
raise NotImplemented()
elif opt.dataset == "pkummd_ed":
raise NotImplemented()
return test_data
| 34.042918
| 79
| 0.589385
| 736
| 7,932
| 6.033967
| 0.067935
| 0.118892
| 0.1932
| 0.163477
| 0.823914
| 0.811529
| 0.811529
| 0.811529
| 0.773024
| 0.761765
| 0
| 0.011398
| 0.336359
| 7,932
| 232
| 80
| 34.189655
| 0.832257
| 0
| 0
| 0.78733
| 0
| 0
| 0.062153
| 0
| 0
| 0
| 0
| 0
| 0.0181
| 1
| 0.013575
| false
| 0
| 0.031674
| 0
| 0.058824
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
07e60592979d6302face2bc7260354ae28c06b62
| 97
|
py
|
Python
|
torchexpo/vision/__init__.py
|
torchexpo/torchexpo
|
88c875358e830065ee23f49f47d4995b5b2d3e3c
|
[
"Apache-2.0"
] | 23
|
2020-09-08T05:08:46.000Z
|
2021-08-12T07:16:53.000Z
|
torchexpo/vision/__init__.py
|
torchexpo/torchexpo
|
88c875358e830065ee23f49f47d4995b5b2d3e3c
|
[
"Apache-2.0"
] | 1
|
2021-12-05T06:15:18.000Z
|
2021-12-20T08:10:19.000Z
|
torchexpo/vision/__init__.py
|
torchexpo/torchexpo
|
88c875358e830065ee23f49f47d4995b5b2d3e3c
|
[
"Apache-2.0"
] | 2
|
2021-01-12T06:10:53.000Z
|
2021-07-24T08:21:59.000Z
|
from torchexpo.vision import image_classification
from torchexpo.vision import image_segmentation
| 48.5
| 49
| 0.907216
| 12
| 97
| 7.166667
| 0.583333
| 0.302326
| 0.44186
| 0.581395
| 0.697674
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.072165
| 97
| 2
| 50
| 48.5
| 0.955556
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
07f8fe81aba0e53de36c8047633173d7e8ab1b36
| 19,208
|
py
|
Python
|
pkgs/conf-pkg/src/genie/libs/conf/evpn/iosxr/evpn.py
|
miott/genielibs
|
6464642cdd67aa2367bdbb12561af4bb060e5e62
|
[
"Apache-2.0"
] | 94
|
2018-04-30T20:29:15.000Z
|
2022-03-29T13:40:31.000Z
|
pkgs/conf-pkg/src/genie/libs/conf/evpn/iosxr/evpn.py
|
miott/genielibs
|
6464642cdd67aa2367bdbb12561af4bb060e5e62
|
[
"Apache-2.0"
] | 67
|
2018-12-06T21:08:09.000Z
|
2022-03-29T18:00:46.000Z
|
pkgs/conf-pkg/src/genie/libs/conf/evpn/iosxr/evpn.py
|
miott/genielibs
|
6464642cdd67aa2367bdbb12561af4bb060e5e62
|
[
"Apache-2.0"
] | 49
|
2018-06-29T18:59:03.000Z
|
2022-03-10T02:07:59.000Z
|
"""Implement IOS-XR (iosxr) Specific Configurations for Evpn objects.
"""
# Table of contents:
# class Evpn:
# class InterfaceAttributes:
# def build_config/build_unconfig:
# class EthernetSegmentAttributes:
# def build_config/build_unconfig:
# class BgpAttributes:
# def build_config/build_unconfig:
# class PseudowireNeighborAttributes:
# def build_config/build_unconfig:
# class EthernetSegmentAttributes:
# def build_config/build_unconfig:
# class BgpAttributes:
# def build_config/build_unconfig:
# class VfiAttributes:
# def build_config/build_unconfig:
# class EthernetSegmentAttributes:
# def build_config/build_unconfig:
# class BgpAttributes:
# def build_config/build_unconfig:
# class DeviceAttributes:
# def build_config/build_unconfig:
# class BgpAttributes:
# def build_config/build_unconfig:
# class LoadBalancingAttributes:
# def build_config/build_unconfig:
from abc import ABC
import warnings
from genie.conf.base.attributes import UnsupportedAttributeWarning, AttributesHelper
from genie.conf.base.cli import CliConfigBuilder
from genie.conf.base.config import CliConfig
class Evpn(ABC):
class InterfaceAttributes(ABC):
def build_config(self, apply=True, attributes=None, unconfig=False, **kwargs):
attributes = AttributesHelper(self, attributes)
configurations = CliConfigBuilder(unconfig=unconfig)
# iosxr: evpn / interface Bundle-Ether1 (config-evpn-ac)
with configurations.submode_context(attributes.format('interface {interface_name}', force=True)):
if unconfig and attributes.iswildcard:
configurations.submode_unconfig()
# iosxr: evpn / interface Bundle-Ether1 / ethernet-segment (config-evpn-ac-es)
ns, attributes2 = attributes.namespace('ethernet_segment')
if ns is not None:
configurations.append_block(ns.build_config(apply=False, attributes=attributes2, unconfig=unconfig, **kwargs))
# iosxr: evpn / interface Bundle-Ether1 / mac-flush mvrp
configurations.append_line(attributes.format('mac-flush {mac_flush}'))
# iosxr: evpn / interface Bundle-Ether1 / timers (config-evpn-ac-timers)
with configurations.submode_context('timers', cancel_empty=True):
# iosxr: evpn / interface Bundle-Ether1 / timers / recovery 20
configurations.append_line(attributes.format('recovery {recovery_timer}', inherited=False))
if attributes.value('peering_timer', inherited=False) is not None:
warnings.warn(
'evpn interface peering_timer',
UnsupportedAttributeWarning)
return str(configurations)
def build_unconfig(self, *args, **kwargs):
return self.build_config(*args, unconfig=True, **kwargs)
class EthernetSegmentAttributes(ABC):
def build_config(self, apply=True, attributes=None, unconfig=False, **kwargs):
assert not apply
attributes = AttributesHelper(self, attributes)
configurations = CliConfigBuilder(unconfig=unconfig)
# iosxr: evpn / interface Bundle-Ether1 / ethernet-segment (config-evpn-ac-es)
with configurations.submode_context('ethernet-segment'):
if not attributes.value('enabled', force=True):
configurations.submode_cancel()
if unconfig and attributes.iswildcard:
configurations.submode_unconfig()
# iosxr: evpn / interface Bundle-Ether1 / ethernet-segment / backbone-source-mac aaaa.bbbb.cccc
configurations.append_line(attributes.format('backbone-source-mac {backbone_source_mac}'))
# iosxr: evpn / interface Bundle-Ether1 / ethernet-segment / bgp route-target aaaa.bbbb.cccc
ns, attributes2 = attributes.namespace('bgp')
if ns is not None:
configurations.append_block(ns.build_config(apply=False, attributes=attributes2, unconfig=unconfig, **kwargs))
# iosxr: evpn / interface Bundle-Ether1 / ethernet-segment / force single-homed
if attributes.value('force_single_homed'):
configurations.append_line('force single-homed')
# iosxr: evpn / interface Bundle-Ether1 / ethernet-segment / identifier type 0 00.11.22.33.44.55.66.77.88
configurations.append_line(attributes.format('identifier type {esi.type} {esi.dotted}'))
# iosxr: evpn / interface Bundle-Ether1 / ethernet-segment / load-balancing-mode single-active
configurations.append_line(attributes.format('load-balancing-mode {load_balancing_mode}'))
# iosxr: evpn / interface Bundle-Ether1 / ethernet-segment / service-carving manual (config-evpn-ac-es-vlan-man)
# iosxr: evpn / interface Bundle-Ether1 / ethernet-segment / service-carving manual / primary someword secondary someword2
return str(configurations)
def build_unconfig(self, *args, **kwargs):
return self.build_config(*args, unconfig=True, **kwargs)
class BgpAttributes(ABC):
def build_config(self, apply=True, attributes=None, unconfig=False, **kwargs):
assert not apply
attributes = AttributesHelper(self, attributes)
configurations = CliConfigBuilder(unconfig=unconfig)
if attributes.value('enabled', force=True):
# iosxr: evpn / interface Bundle-Ether1 / ethernet-segment / bgp route-target aaaa.bbbb.cccc
configurations.append_line(attributes.format('bgp route-target {import_route_target}'))
return str(configurations)
def build_unconfig(self, *args, **kwargs):
return self.build_config(*args, unconfig=True, **kwargs)
class PseudowireNeighborAttributes(ABC):
def build_config(self, apply=True, attributes=None, unconfig=False, **kwargs):
attributes = AttributesHelper(self, attributes)
configurations = CliConfigBuilder(unconfig=unconfig)
# virtual neighbor 70.70.70.70 pw-id 17300005
with configurations.submode_context(attributes.format('virtual neighbor {ip} pw-id {pw_id}', force=True)):
if unconfig and attributes.iswildcard:
configurations.submode_unconfig()
# iosxr: evpn / virtual neighbor 70.70.70.70 pw-id 17300005 / ethernet-segment (config-evpn-ac-es)
ns, attributes2 = attributes.namespace('ethernet_segment')
if ns is not None:
configurations.append_block(ns.build_config(apply=False, attributes=attributes2, unconfig=unconfig, **kwargs))
return str(configurations)
def build_unconfig(self, *args, **kwargs):
return self.build_config(*args, unconfig=True, **kwargs)
class EthernetSegmentAttributes(ABC):
def build_config(self, apply=True, attributes=None, unconfig=False, **kwargs):
assert not apply
attributes = AttributesHelper(self, attributes)
configurations = CliConfigBuilder(unconfig=unconfig)
# iosxr: evpn / virtual neighbor 70.70.70.70 pw-id 17300005 / ethernet-segment (config-evpn-ac-es)
with configurations.submode_context('ethernet-segment'):
if not attributes.value('enabled', force=True):
configurations.submode_cancel()
if unconfig and attributes.iswildcard:
configurations.submode_unconfig()
# iosxr: evpn / virtual neighbor 70.70.70.70 pw-id 17300005 / ethernet-segment / bgp route-target aaaa.bbbb.cccc
ns, attributes2 = attributes.namespace('bgp')
if ns is not None:
configurations.append_block(ns.build_config(apply=False, attributes=attributes2, unconfig=unconfig, **kwargs))
# iosxr: evpn / virtual neighbor 70.70.70.70 pw-id 17300005 / ethernet-segment / identifier type 0 00.11.22.33.44.55.66.77.88
configurations.append_line(attributes.format('identifier type {esi.type} {esi.dotted}'))
return str(configurations)
def build_unconfig(self, *args, **kwargs):
return self.build_config(*args, unconfig=True, **kwargs)
class BgpAttributes(ABC):
def build_config(self, apply=True, attributes=None, unconfig=False, **kwargs):
assert not apply
attributes = AttributesHelper(self, attributes)
configurations = CliConfigBuilder(unconfig=unconfig)
if attributes.value('enabled', force=True):
# iosxr: evpn / interface Bundle-Ether1 / ethernet-segment / bgp route-target aaaa.bbbb.cccc
configurations.append_line(attributes.format('bgp route-target {import_route_target}'))
return str(configurations)
def build_unconfig(self, *args, **kwargs):
return self.build_config(*args, unconfig=True, **kwargs)
class VfiAttributes(ABC):
def build_config(self, apply=True, attributes=None, unconfig=False, **kwargs):
attributes = AttributesHelper(self, attributes)
configurations = CliConfigBuilder(unconfig=unconfig)
# virtual vfi ac-vfi-5
with configurations.submode_context(attributes.format('virtual vfi {vfi_name}', force=True)):
if unconfig and attributes.iswildcard:
configurations.submode_unconfig()
# iosxr: evpn / virtual vfi ac-vfi-5 / ethernet-segment (config-evpn-ac-es)
ns, attributes2 = attributes.namespace('ethernet_segment')
if ns is not None:
configurations.append_block(ns.build_config(apply=False, attributes=attributes2, unconfig=unconfig, **kwargs))
return str(configurations)
def build_unconfig(self, *args, **kwargs):
return self.build_config(*args, unconfig=True, **kwargs)
class EthernetSegmentAttributes(ABC):
def build_config(self, apply=True, attributes=None, unconfig=False, **kwargs):
assert not apply
attributes = AttributesHelper(self, attributes)
configurations = CliConfigBuilder(unconfig=unconfig)
# iosxr: evpn / virtual vfi ac-vfi-5 / ethernet-segment (config-evpn-ac-es)
with configurations.submode_context('ethernet-segment'):
if not attributes.value('enabled', force=True):
configurations.submode_cancel()
if unconfig and attributes.iswildcard:
configurations.submode_unconfig()
# iosxr: evpn / virtual vfi ac-vfi-5 / ethernet-segment / bgp route-target aaaa.bbbb.cccc
ns, attributes2 = attributes.namespace('bgp')
if ns is not None:
configurations.append_block(ns.build_config(apply=False, attributes=attributes2, unconfig=unconfig, **kwargs))
# iosxr: evpn / virtual vfi ac-vfi-5 / ethernet-segment / identifier type 0 00.11.22.33.44.55.66.77.88
configurations.append_line(attributes.format('identifier type {esi.type} {esi.dotted}'))
return str(configurations)
def build_unconfig(self, *args, **kwargs):
return self.build_config(*args, unconfig=True, **kwargs)
class BgpAttributes(ABC):
def build_config(self, apply=True, attributes=None, unconfig=False, **kwargs):
assert not apply
attributes = AttributesHelper(self, attributes)
configurations = CliConfigBuilder(unconfig=unconfig)
if attributes.value('enabled', force=True):
# iosxr: evpn / interface Bundle-Ether1 / ethernet-segment / bgp route-target aaaa.bbbb.cccc
configurations.append_line(attributes.format('bgp route-target {import_route_target}'))
return str(configurations)
def build_unconfig(self, *args, **kwargs):
return self.build_config(*args, unconfig=True, **kwargs)
class DeviceAttributes(ABC):
def build_config(self, interfaces=None,
apply=True, attributes=None, unconfig=False, **kwargs):
# assert not apply
attributes = AttributesHelper(self, attributes)
configurations = CliConfigBuilder(unconfig=unconfig)
if interfaces is None:
interfaces = set(self.interfaces)
else:
interfaces = set(self.interfaces).intersection(interfaces)
# iosxr: evpn (config-evpn)
with configurations.submode_context('evpn'):
if unconfig and attributes.iswildcard:
configurations.submode_unconfig()
# iosxr: evpn / bgp (config-evpn-bgp)
ns, attributes2 = attributes.namespace('bgp')
if ns is not None:
configurations.append_block(ns.build_config(apply=False, attributes=attributes2, unconfig=unconfig, **kwargs))
# iosxr: evpn / evi 1 (config-evpn-evi)
for evi, attributes2 in attributes.sequence_values('evis', sort=True):
if unconfig:
configurations.append_block(evi.build_unconfig(apply=False, attributes=attributes2))
else:
configurations.append_block(evi.build_config(apply=False, attributes=attributes2))
# iosxr: evpn / interface Bundle-Ether1 (config-evpn-ac)
for sub, attributes2 in attributes.mapping_values('interface_attr', keys=interfaces, sort=True):
configurations.append_block(sub.build_config(apply=False, attributes=attributes2, unconfig=unconfig, **kwargs))
# iosxr: evpn / virtual neighbor 70.70.70.70 pw-id 17300005
for sub, attributes2 in attributes.mapping_values('pw_neighbor_attr', sort=True):
configurations.append_block(sub.build_config(apply=False, attributes=attributes2, unconfig=unconfig, **kwargs))
# iosxr: evpn / virtual vfi ac-vfi-5
for sub, attributes2 in attributes.mapping_values('vfi_attr', sort=True):
configurations.append_block(sub.build_config(apply=False, attributes=attributes2, unconfig=unconfig, **kwargs))
# iosxr: evpn / timers (config-evpn-timers)
with configurations.submode_context('timers', cancel_empty=True):
# iosxr: evpn / timers / recovery 20
configurations.append_line(attributes.format('recovery {recovery_timer}'))
# iosxr: evpn / timers / peering <0-300>
configurations.append_line(attributes.format('peering {peering_timer}'))
# iosxr: evpn / source interface Loopback0
configurations.append_line(attributes.format('source interface {source_interface.name}'))
# iosxr: evpn / load-balancing (config-evpn-lb)
ns, attributes2 = attributes.namespace('load_balancing')
if ns is not None:
configurations.append_block(ns.build_config(apply=False, attributes=attributes2, unconfig=unconfig, **kwargs))
# iosxr: evpn / bgp (config-evpn-bgp)
ns, attributes2 = attributes.namespace('bgp')
if ns is not None:
configurations.append_block(ns.build_config(apply=False, attributes=attributes2, unconfig=unconfig, **kwargs))
if apply:
if configurations:
self.device.configure(configurations, fail_invalid=True)
else:
return CliConfig(device=self.device, unconfig=unconfig,
cli_config=configurations, fail_invalid=True)
def build_unconfig(self, *args, **kwargs):
return self.build_config(*args, unconfig=True, **kwargs)
class BgpAttributes(ABC):
def build_config(self, apply=True, attributes=None, unconfig=False, **kwargs):
assert not apply
attributes = AttributesHelper(self, attributes)
configurations = CliConfigBuilder(unconfig=unconfig)
# iosxr: evpn / bgp (config-evpn-bgp)
with configurations.submode_context('bgp'):
if not attributes.value('enabled', force=True):
configurations.submode_cancel()
if unconfig and attributes.iswildcard:
configurations.submode_unconfig()
# iosxr: evpn / bgp / rd 100:200000
# iosxr: evpn / bgp / rd 65536:200
# iosxr: evpn / bgp / rd 1.2.3.4:1
configurations.append_line(attributes.format('rd {rd}'))
return str(configurations)
def build_unconfig(self, *args, **kwargs):
return self.build_config(*args, unconfig=True, **kwargs)
class LoadBalancingAttributes(ABC):
def build_config(self, apply=True, attributes=None, unconfig=False, **kwargs):
assert not apply
attributes = AttributesHelper(self, attributes)
configurations = CliConfigBuilder(unconfig=unconfig)
# iosxr: evpn / load-balancing (config-evpn-lb)
with configurations.submode_context('load-balancing'):
if not attributes.value('enabled', force=True):
configurations.submode_cancel()
if unconfig and attributes.iswildcard:
configurations.submode_unconfig()
# iosxr: evpn / load-balancing / flow-label static
if attributes.value('flow_label_static'):
configurations.append_line('flow-label static')
return str(configurations)
def build_unconfig(self, *args, **kwargs):
return self.build_config(*args, unconfig=True, **kwargs)
| 50.151436
| 145
| 0.604071
| 1,856
| 19,208
| 6.160022
| 0.088362
| 0.047144
| 0.029389
| 0.035686
| 0.83915
| 0.810199
| 0.797341
| 0.762267
| 0.752996
| 0.750547
| 0
| 0.017691
| 0.305498
| 19,208
| 382
| 146
| 50.282723
| 0.839355
| 0.212307
| 0
| 0.757143
| 0
| 0
| 0.060058
| 0.008504
| 0
| 0
| 0
| 0
| 0.038095
| 1
| 0.114286
| false
| 0
| 0.038095
| 0.057143
| 0.328571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
6aff8689ce57a8af50f9edbc8227b46fe4a08414
| 2,391
|
py
|
Python
|
ProgettoLube/WebInspector/venv/Lib/site-packages/tensorflow/_api/v2/compat/v1/debugging/__init__.py
|
Lube-Project/ProgettoLube
|
cbf33971e2c2e865783ec1a2302625539186a338
|
[
"MIT"
] | 2
|
2020-09-30T00:11:09.000Z
|
2021-10-04T13:00:38.000Z
|
ProgettoLube/WebInspector/venv/Lib/site-packages/tensorflow/_api/v2/compat/v1/debugging/__init__.py
|
Lube-Project/ProgettoLube
|
cbf33971e2c2e865783ec1a2302625539186a338
|
[
"MIT"
] | null | null | null |
ProgettoLube/WebInspector/venv/Lib/site-packages/tensorflow/_api/v2/compat/v1/debugging/__init__.py
|
Lube-Project/ProgettoLube
|
cbf33971e2c2e865783ec1a2302625539186a338
|
[
"MIT"
] | 1
|
2021-01-28T01:57:41.000Z
|
2021-01-28T01:57:41.000Z
|
# This file is MACHINE GENERATED! Do not edit.
# Generated by: tensorflow/python/tools/api/generator/create_python_api.py script.
"""Public API for tf.debugging namespace.
"""
from __future__ import print_function as _print_function
import sys as _sys
from . import experimental
from tensorflow.python.debug.lib.check_numerics_callback import disable_check_numerics
from tensorflow.python.debug.lib.check_numerics_callback import enable_check_numerics
from tensorflow.python.eager.context import get_log_device_placement
from tensorflow.python.eager.context import set_log_device_placement
from tensorflow.python.ops.check_ops import assert_equal
from tensorflow.python.ops.check_ops import assert_greater
from tensorflow.python.ops.check_ops import assert_greater_equal
from tensorflow.python.ops.check_ops import assert_integer
from tensorflow.python.ops.check_ops import assert_less
from tensorflow.python.ops.check_ops import assert_less_equal
from tensorflow.python.ops.check_ops import assert_near
from tensorflow.python.ops.check_ops import assert_negative
from tensorflow.python.ops.check_ops import assert_non_negative
from tensorflow.python.ops.check_ops import assert_non_positive
from tensorflow.python.ops.check_ops import assert_none_equal
from tensorflow.python.ops.check_ops import assert_positive
from tensorflow.python.ops.check_ops import assert_proper_iterable
from tensorflow.python.ops.check_ops import assert_rank
from tensorflow.python.ops.check_ops import assert_rank_at_least
from tensorflow.python.ops.check_ops import assert_rank_in
from tensorflow.python.ops.check_ops import assert_same_float_dtype
from tensorflow.python.ops.check_ops import assert_scalar
from tensorflow.python.ops.check_ops import assert_shapes
from tensorflow.python.ops.check_ops import assert_type
from tensorflow.python.ops.check_ops import is_non_decreasing
from tensorflow.python.ops.check_ops import is_numeric_tensor
from tensorflow.python.ops.check_ops import is_strictly_increasing
from tensorflow.python.ops.control_flow_ops import Assert
from tensorflow.python.ops.gen_array_ops import check_numerics
from tensorflow.python.ops.gen_math_ops import is_finite
from tensorflow.python.ops.gen_math_ops import is_inf
from tensorflow.python.ops.gen_math_ops import is_nan
from tensorflow.python.ops.numerics import verify_tensor_all_finite as assert_all_finite
del _print_function
| 51.978261
| 88
| 0.876202
| 372
| 2,391
| 5.349462
| 0.223118
| 0.273367
| 0.331658
| 0.335176
| 0.743216
| 0.710553
| 0.654271
| 0.654271
| 0.465829
| 0.054271
| 0
| 0
| 0.072773
| 2,391
| 45
| 89
| 53.133333
| 0.897609
| 0.069009
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.594595
| 1
| 0
| true
| 0
| 0.972973
| 0
| 0.972973
| 0.054054
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 9
|
ed1f1b32b0bd45f0f63c074cbf60e1cd04e88ed1
| 45,006
|
py
|
Python
|
cyder/migrations/0002_auto__chg_field_dynamicinterface_workgroup__chg_field_staticinterface_.py
|
drkitty/cyder
|
1babc443cc03aa51fa3c1015bcd22f0ea2e5f0f8
|
[
"BSD-3-Clause"
] | 6
|
2015-04-16T23:18:22.000Z
|
2020-08-25T22:50:13.000Z
|
cyder/migrations/0002_auto__chg_field_dynamicinterface_workgroup__chg_field_staticinterface_.py
|
drkitty/cyder
|
1babc443cc03aa51fa3c1015bcd22f0ea2e5f0f8
|
[
"BSD-3-Clause"
] | 267
|
2015-01-01T00:18:57.000Z
|
2015-10-14T00:01:13.000Z
|
cyder/migrations/0002_auto__chg_field_dynamicinterface_workgroup__chg_field_staticinterface_.py
|
drkitty/cyder
|
1babc443cc03aa51fa3c1015bcd22f0ea2e5f0f8
|
[
"BSD-3-Clause"
] | 5
|
2015-03-23T00:57:09.000Z
|
2019-09-09T22:42:37.000Z
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'DynamicInterface.workgroup'
db.alter_column('dynamic_interface', 'workgroup_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cyder.Workgroup']))
# Changing field 'StaticInterface.workgroup'
db.alter_column('static_interface', 'workgroup_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cyder.Workgroup']))
def backwards(self, orm):
# Changing field 'DynamicInterface.workgroup'
db.alter_column('dynamic_interface', 'workgroup_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cyder.Workgroup'], null=True))
# Changing field 'StaticInterface.workgroup'
db.alter_column('static_interface', 'workgroup_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cyder.Workgroup'], null=True))
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'cyder.addressrecord': {
'Meta': {'unique_together': "(('label', 'domain', 'fqdn', 'ip_upper', 'ip_lower', 'ip_type'),)", 'object_name': 'AddressRecord', 'db_table': "'address_record'"},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'ctnr': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cyder.Ctnr']"}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'blank': 'True'}),
'domain': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cyder.Domain']"}),
'fqdn': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_lower': ('django.db.models.fields.BigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'ip_str': ('django.db.models.fields.CharField', [], {'max_length': '39'}),
'ip_type': ('django.db.models.fields.CharField', [], {'default': "'4'", 'max_length': '1'}),
'ip_upper': ('django.db.models.fields.BigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '63', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'ttl': ('django.db.models.fields.PositiveIntegerField', [], {'default': '3600', 'null': 'True', 'blank': 'True'}),
'views': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cyder.View']", 'symmetrical': 'False', 'blank': 'True'})
},
'cyder.attribute': {
'Meta': {'object_name': 'Attribute', 'db_table': "'attribute'"},
'attribute_type': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'value_type': ('cyder.base.eav.fields.AttributeValueTypeField', [], {'attribute_type_field': "''", 'max_length': '20'})
},
'cyder.buildmanifest': {
'Meta': {'object_name': 'BuildManifest'},
'build_run': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cyder.DNSBuildRun']"}),
'files': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'zhash': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'zname': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'cyder.cname': {
'Meta': {'unique_together': "(('label', 'domain', 'target'),)", 'object_name': 'CNAME', 'db_table': "'cname'"},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'ctnr': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cyder.Ctnr']"}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'blank': 'True'}),
'domain': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cyder.Domain']"}),
'fqdn': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '63', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'target': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'ttl': ('django.db.models.fields.PositiveIntegerField', [], {'default': '3600', 'null': 'True', 'blank': 'True'}),
'views': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cyder.View']", 'symmetrical': 'False', 'blank': 'True'})
},
'cyder.ctnr': {
'Meta': {'object_name': 'Ctnr', 'db_table': "'ctnr'"},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'domains': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cyder.Domain']", 'symmetrical': 'False', 'blank': 'True'}),
'email_contact': ('django.db.models.fields.CharField', [], {'max_length': '75', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'ranges': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cyder.Range']", 'symmetrical': 'False', 'blank': 'True'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'ctnrs'", 'blank': 'True', 'through': "orm['cyder.CtnrUser']", 'to': "orm['auth.User']"}),
'workgroups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cyder.Workgroup']", 'symmetrical': 'False', 'blank': 'True'})
},
'cyder.ctnruser': {
'Meta': {'unique_together': "(('ctnr', 'user'),)", 'object_name': 'CtnrUser', 'db_table': "'ctnr_users'"},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'ctnr': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cyder.Ctnr']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.IntegerField', [], {}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'cyder.dnsbuildrun': {
'Meta': {'object_name': 'DNSBuildRun'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'log': ('django.db.models.fields.TextField', [], {})
},
'cyder.domain': {
'Meta': {'object_name': 'Domain', 'db_table': "'domain'"},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'delegated': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'dirty': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_reverse': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'master_domain': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['cyder.Domain']", 'null': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'purgeable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'soa': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['cyder.SOA']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'})
},
'cyder.dynamicinterface': {
'Meta': {'unique_together': "(('range', 'mac'),)", 'object_name': 'DynamicInterface', 'db_table': "'dynamic_interface'"},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'ctnr': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cyder.Ctnr']"}),
'dhcp_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'expire': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'mac': ('cyder.base.fields.MacAddrField', [], {'max_length': '17', 'null': 'True', 'dhcp_enabled': "'dhcp_enabled'"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'range': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cyder.Range']"}),
'system': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cyder.System']"}),
'workgroup': ('django.db.models.fields.related.ForeignKey', [], {'default': '1', 'to': "orm['cyder.Workgroup']"})
},
'cyder.dynamicinterfaceav': {
'Meta': {'unique_together': "(('entity', 'attribute'),)", 'object_name': 'DynamicInterfaceAV', 'db_table': "'dynamic_interface_av'"},
'attribute': ('cyder.base.eav.fields.EAVAttributeField', [], {'to': "orm['cyder.Attribute']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'entity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cyder.DynamicInterface']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'value': ('cyder.base.eav.fields.EAVValueField', [], {'attribute_field': "''", 'max_length': '255'})
},
'cyder.mx': {
'Meta': {'unique_together': "(('domain', 'label', 'server'),)", 'object_name': 'MX', 'db_table': "'mx'"},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'ctnr': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cyder.Ctnr']"}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'blank': 'True'}),
'domain': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cyder.Domain']"}),
'fqdn': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '63', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'priority': ('django.db.models.fields.PositiveIntegerField', [], {}),
'server': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'ttl': ('django.db.models.fields.PositiveIntegerField', [], {'default': '3600', 'null': 'True', 'blank': 'True'}),
'views': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cyder.View']", 'symmetrical': 'False', 'blank': 'True'})
},
'cyder.nameserver': {
'Meta': {'unique_together': "(('domain', 'server'),)", 'object_name': 'Nameserver', 'db_table': "'nameserver'"},
'addr_glue': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'nameserver_set'", 'null': 'True', 'to': "orm['cyder.AddressRecord']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'ctnr': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cyder.Ctnr']"}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'blank': 'True'}),
'domain': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cyder.Domain']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'intr_glue': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'nameserver_set'", 'null': 'True', 'to': "orm['cyder.StaticInterface']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'server': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'ttl': ('django.db.models.fields.PositiveIntegerField', [], {'default': '3600', 'null': 'True', 'blank': 'True'}),
'views': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cyder.View']", 'symmetrical': 'False', 'blank': 'True'})
},
'cyder.network': {
'Meta': {'unique_together': "(('ip_upper', 'ip_lower', 'prefixlen'),)", 'object_name': 'Network', 'db_table': "'network'"},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'dhcpd_raw_include': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_lower': ('django.db.models.fields.BigIntegerField', [], {'blank': 'True'}),
'ip_type': ('django.db.models.fields.CharField', [], {'default': "'4'", 'max_length': '1'}),
'ip_upper': ('django.db.models.fields.BigIntegerField', [], {'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'network_str': ('django.db.models.fields.CharField', [], {'max_length': '49'}),
'prefixlen': ('django.db.models.fields.PositiveIntegerField', [], {}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cyder.Site']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'vlan': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cyder.Vlan']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'vrf': ('django.db.models.fields.related.ForeignKey', [], {'default': '1', 'to': "orm['cyder.Vrf']"})
},
'cyder.networkav': {
'Meta': {'unique_together': "(('entity', 'attribute'),)", 'object_name': 'NetworkAV', 'db_table': "'network_av'"},
'attribute': ('cyder.base.eav.fields.EAVAttributeField', [], {'to': "orm['cyder.Attribute']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'entity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cyder.Network']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'value': ('cyder.base.eav.fields.EAVValueField', [], {'attribute_field': "''", 'max_length': '255'})
},
'cyder.ptr': {
'Meta': {'unique_together': "(('ip_str', 'ip_type', 'fqdn'),)", 'object_name': 'PTR', 'db_table': "'ptr'"},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'ctnr': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cyder.Ctnr']"}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'blank': 'True'}),
'fqdn': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_lower': ('django.db.models.fields.BigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'ip_str': ('django.db.models.fields.CharField', [], {'max_length': '39'}),
'ip_type': ('django.db.models.fields.CharField', [], {'default': "'4'", 'max_length': '1'}),
'ip_upper': ('django.db.models.fields.BigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'reverse_domain': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'reverse_ptr_set'", 'blank': 'True', 'to': "orm['cyder.Domain']"}),
'ttl': ('django.db.models.fields.PositiveIntegerField', [], {'default': '3600', 'null': 'True', 'blank': 'True'}),
'views': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cyder.View']", 'symmetrical': 'False', 'blank': 'True'})
},
'cyder.range': {
'Meta': {'unique_together': "(('start_upper', 'start_lower', 'end_upper', 'end_lower'),)", 'object_name': 'Range', 'db_table': "'range'"},
'allow': ('django.db.models.fields.CharField', [], {'default': "'l'", 'max_length': '1'}),
'allow_voip_phones': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'dhcp_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'dhcpd_raw_include': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'domain': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cyder.Domain']", 'null': 'True', 'blank': 'True'}),
'end_lower': ('django.db.models.fields.BigIntegerField', [], {'null': 'True'}),
'end_str': ('django.db.models.fields.CharField', [], {'max_length': '39'}),
'end_upper': ('django.db.models.fields.BigIntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_type': ('django.db.models.fields.CharField', [], {'default': "'4'", 'max_length': '1'}),
'is_reserved': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'network': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cyder.Network']"}),
'range_type': ('django.db.models.fields.CharField', [], {'default': "'st'", 'max_length': '2'}),
'range_usage': ('django.db.models.fields.IntegerField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'}),
'start_lower': ('django.db.models.fields.BigIntegerField', [], {'null': 'True'}),
'start_str': ('django.db.models.fields.CharField', [], {'max_length': '39'}),
'start_upper': ('django.db.models.fields.BigIntegerField', [], {'null': 'True'}),
'views': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cyder.View']", 'symmetrical': 'False', 'blank': 'True'})
},
'cyder.rangeav': {
'Meta': {'unique_together': "(('entity', 'attribute'),)", 'object_name': 'RangeAV', 'db_table': "'range_av'"},
'attribute': ('cyder.base.eav.fields.EAVAttributeField', [], {'to': "orm['cyder.Attribute']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'entity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cyder.Range']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'value': ('cyder.base.eav.fields.EAVValueField', [], {'attribute_field': "''", 'max_length': '255'})
},
'cyder.site': {
'Meta': {'unique_together': "(('name', 'parent'),)", 'object_name': 'Site', 'db_table': "'site'"},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cyder.Site']", 'null': 'True', 'blank': 'True'})
},
'cyder.siteav': {
'Meta': {'unique_together': "(('entity', 'attribute'),)", 'object_name': 'SiteAV', 'db_table': "'site_av'"},
'attribute': ('cyder.base.eav.fields.EAVAttributeField', [], {'to': "orm['cyder.Attribute']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'entity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cyder.Site']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'value': ('cyder.base.eav.fields.EAVValueField', [], {'attribute_field': "''", 'max_length': '255'})
},
'cyder.soa': {
'Meta': {'object_name': 'SOA', 'db_table': "'soa'"},
'contact': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'dirty': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'dns_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'expire': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1209600'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_signed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'minimum': ('django.db.models.fields.PositiveIntegerField', [], {'default': '180'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'primary': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'refresh': ('django.db.models.fields.PositiveIntegerField', [], {'default': '180'}),
'retry': ('django.db.models.fields.PositiveIntegerField', [], {'default': '86400'}),
'root_domain': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'root_of_soa'", 'unique': 'True', 'to': "orm['cyder.Domain']"}),
'serial': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1424905969'}),
'ttl': ('django.db.models.fields.PositiveIntegerField', [], {'default': '3600', 'null': 'True', 'blank': 'True'})
},
'cyder.soaav': {
'Meta': {'unique_together': "(('entity', 'attribute'),)", 'object_name': 'SOAAV', 'db_table': "'soa_av'"},
'attribute': ('cyder.base.eav.fields.EAVAttributeField', [], {'to': "orm['cyder.Attribute']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'entity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cyder.SOA']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'value': ('cyder.base.eav.fields.EAVValueField', [], {'attribute_field': "''", 'max_length': '255'})
},
'cyder.srv': {
'Meta': {'unique_together': "(('label', 'domain', 'target', 'port'),)", 'object_name': 'SRV', 'db_table': "'srv'"},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'ctnr': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cyder.Ctnr']"}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'blank': 'True'}),
'domain': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cyder.Domain']"}),
'fqdn': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '63', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'port': ('django.db.models.fields.PositiveIntegerField', [], {}),
'priority': ('django.db.models.fields.PositiveIntegerField', [], {}),
'target': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'ttl': ('django.db.models.fields.PositiveIntegerField', [], {'default': '3600', 'null': 'True', 'blank': 'True'}),
'views': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cyder.View']", 'symmetrical': 'False', 'blank': 'True'}),
'weight': ('django.db.models.fields.PositiveIntegerField', [], {})
},
'cyder.sshfp': {
'Meta': {'unique_together': "(('domain', 'label'),)", 'object_name': 'SSHFP', 'db_table': "'sshfp'"},
'algorithm_number': ('django.db.models.fields.PositiveIntegerField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'ctnr': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cyder.Ctnr']"}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'blank': 'True'}),
'domain': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cyder.Domain']"}),
'fingerprint_type': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'fqdn': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '63', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'ttl': ('django.db.models.fields.PositiveIntegerField', [], {'default': '3600', 'null': 'True', 'blank': 'True'}),
'views': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cyder.View']", 'symmetrical': 'False', 'blank': 'True'})
},
'cyder.staticinterface': {
'Meta': {'unique_together': "(('ip_upper', 'ip_lower'), ('label', 'domain'))", 'object_name': 'StaticInterface', 'db_table': "'static_interface'"},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'ctnr': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cyder.Ctnr']"}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'blank': 'True'}),
'dhcp_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'dns_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'domain': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cyder.Domain']"}),
'expire': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'fqdn': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_lower': ('django.db.models.fields.BigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'ip_str': ('django.db.models.fields.CharField', [], {'max_length': '39'}),
'ip_type': ('django.db.models.fields.CharField', [], {'default': "'4'", 'max_length': '1'}),
'ip_upper': ('django.db.models.fields.BigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '63', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'mac': ('cyder.base.fields.MacAddrField', [], {'max_length': '17', 'null': 'True', 'dhcp_enabled': "'dhcp_enabled'"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'reverse_domain': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'reverse_staticintr_set'", 'null': 'True', 'to': "orm['cyder.Domain']"}),
'system': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cyder.System']"}),
'ttl': ('django.db.models.fields.PositiveIntegerField', [], {'default': '3600', 'null': 'True', 'blank': 'True'}),
'views': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cyder.View']", 'symmetrical': 'False', 'blank': 'True'}),
'workgroup': ('django.db.models.fields.related.ForeignKey', [], {'default': '1', 'to': "orm['cyder.Workgroup']"})
},
'cyder.staticinterfaceav': {
'Meta': {'unique_together': "(('entity', 'attribute'),)", 'object_name': 'StaticInterfaceAV', 'db_table': "'static_interface_av'"},
'attribute': ('cyder.base.eav.fields.EAVAttributeField', [], {'to': "orm['cyder.Attribute']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'entity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cyder.StaticInterface']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'value': ('cyder.base.eav.fields.EAVValueField', [], {'attribute_field': "''", 'max_length': '255'})
},
'cyder.system': {
'Meta': {'object_name': 'System', 'db_table': "'system'"},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'cyder.systemav': {
'Meta': {'unique_together': "(('entity', 'attribute'),)", 'object_name': 'SystemAV', 'db_table': "'system_av'"},
'attribute': ('cyder.base.eav.fields.EAVAttributeField', [], {'to': "orm['cyder.Attribute']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'entity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cyder.System']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'value': ('cyder.base.eav.fields.EAVValueField', [], {'attribute_field': "''", 'max_length': '255'})
},
'cyder.task': {
'Meta': {'ordering': "['task']", 'object_name': 'Task', 'db_table': "u'task'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'task': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'ttype': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'cyder.token': {
'Meta': {'object_name': 'Token'},
'can_write': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}),
'purpose': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'cyder.txt': {
'Meta': {'object_name': 'TXT', 'db_table': "'txt'"},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'ctnr': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cyder.Ctnr']"}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'blank': 'True'}),
'domain': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cyder.Domain']"}),
'fqdn': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '63', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'ttl': ('django.db.models.fields.PositiveIntegerField', [], {'default': '3600', 'null': 'True', 'blank': 'True'}),
'txt_data': ('django.db.models.fields.TextField', [], {}),
'views': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cyder.View']", 'symmetrical': 'False', 'blank': 'True'})
},
'cyder.userprofile': {
'Meta': {'object_name': 'UserProfile', 'db_table': "'auth_user_profile'"},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'default_ctnr': ('django.db.models.fields.related.ForeignKey', [], {'default': '2', 'to': "orm['cyder.Ctnr']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'phone_number': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'profile'", 'unique': 'True', 'to': "orm['auth.User']"})
},
'cyder.view': {
'Meta': {'unique_together': "(('name',),)", 'object_name': 'View', 'db_table': "'view'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'cyder.vlan': {
'Meta': {'unique_together': "(('name', 'number'),)", 'object_name': 'Vlan', 'db_table': "'vlan'"},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'number': ('django.db.models.fields.PositiveIntegerField', [], {})
},
'cyder.vlanav': {
'Meta': {'unique_together': "(('entity', 'attribute'),)", 'object_name': 'VlanAV', 'db_table': "'vlan_av'"},
'attribute': ('cyder.base.eav.fields.EAVAttributeField', [], {'to': "orm['cyder.Attribute']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'entity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cyder.Vlan']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'value': ('cyder.base.eav.fields.EAVValueField', [], {'attribute_field': "''", 'max_length': '255'})
},
'cyder.vrf': {
'Meta': {'object_name': 'Vrf', 'db_table': "'vrf'"},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'})
},
'cyder.vrfav': {
'Meta': {'unique_together': "(('entity', 'attribute'),)", 'object_name': 'VrfAV', 'db_table': "'vrf_av'"},
'attribute': ('cyder.base.eav.fields.EAVAttributeField', [], {'to': "orm['cyder.Attribute']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'entity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cyder.Vrf']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'value': ('cyder.base.eav.fields.EAVValueField', [], {'attribute_field': "''", 'max_length': '255'})
},
'cyder.workgroup': {
'Meta': {'object_name': 'Workgroup', 'db_table': "'workgroup'"},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'})
},
'cyder.workgroupav': {
'Meta': {'unique_together': "(('entity', 'attribute'),)", 'object_name': 'WorkgroupAV', 'db_table': "'workgroup_av'"},
'attribute': ('cyder.base.eav.fields.EAVAttributeField', [], {'to': "orm['cyder.Attribute']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'entity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cyder.Workgroup']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'value': ('cyder.base.eav.fields.EAVValueField', [], {'attribute_field': "''", 'max_length': '255'})
}
}
complete_apps = ['cyder']
| 90.373494
| 209
| 0.554748
| 4,556
| 45,006
| 5.370061
| 0.054214
| 0.10627
| 0.1854
| 0.264857
| 0.870351
| 0.85114
| 0.827475
| 0.777773
| 0.736451
| 0.692471
| 0
| 0.008582
| 0.181842
| 45,006
| 498
| 210
| 90.373494
| 0.655858
| 0.004333
| 0
| 0.482402
| 0
| 0
| 0.601103
| 0.307905
| 0
| 0
| 0
| 0
| 0
| 1
| 0.004141
| false
| 0.00207
| 0.008282
| 0
| 0.018634
| 0.00207
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
ed49e2f753455996fa56a63f0fab53decc2c86c3
| 10,068
|
py
|
Python
|
GERMAN-test/textblob-de-0.4.3/tests/test_tokenizers.py
|
devinlewtan/term_extraction_nlp
|
85ba2434ced88768be6caf3f6d0426d2416ef651
|
[
"Apache-2.0"
] | null | null | null |
GERMAN-test/textblob-de-0.4.3/tests/test_tokenizers.py
|
devinlewtan/term_extraction_nlp
|
85ba2434ced88768be6caf3f6d0426d2416ef651
|
[
"Apache-2.0"
] | null | null | null |
GERMAN-test/textblob-de-0.4.3/tests/test_tokenizers.py
|
devinlewtan/term_extraction_nlp
|
85ba2434ced88768be6caf3f6d0426d2416ef651
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Code adapted from the main `TextBlob`_ library.
#
# :repo: `https://github.com/sloria/TextBlob`_
# :source: tests/test_tokenizers.py
# :version: 2013-12-27 (73bbcaa693)
#
# :modified: 2014-08-29 <m.killer@langui.ch>
#
"""Test cases for tokenziers."""
from __future__ import unicode_literals
import unittest
from nose.tools import * # PEP8 asserts
from textblob_de import NLTKPunktTokenizer, PatternTokenizer
from textblob_de.tokenizers import WordTokenizer, word_tokenize
from textblob.compat import PY2
def is_generator(obj):
if PY2:
return hasattr(obj, 'next')
else:
return hasattr(obj, '__next__')
class TestNLTKPunktTokenizer(unittest.TestCase):
def setUp(self):
self.tokenizer = NLTKPunktTokenizer()
self.text = "Heute ist der 3. Mai 2014 und Dr. Meier feiert seinen 43. " \
"Geburtstag. Ich muss unbedingt daran denken, Mehl, usw. für " \
"einen Kuchen einzukaufen. Aber leider habe ich nur noch " \
"EUR 3.50 in meiner Brieftasche."
self.snt1 = "Heute ist der 3. Mai 2014 und Dr. Meier feiert seinen 43. " \
"Geburtstag."
def tearDown(self):
pass
def test_tokenize(self):
assert_equal(self.tokenizer.tokenize(self.text),
['Heute',
'ist',
'der',
'3.',
'Mai',
'2014',
'und',
'Dr.',
'Meier',
'feiert',
'seinen',
'43.',
'Geburtstag',
'.',
'Ich',
'muss',
'unbedingt',
'daran',
'denken',
',',
'Mehl',
',',
'usw.',
'für',
'einen',
'Kuchen',
'einzukaufen',
'.',
'Aber',
'leider',
'habe',
'ich',
'nur',
'noch',
'EUR',
'3.50',
'in',
'meiner',
'Brieftasche',
'.'])
def test_exclude_punc(self):
assert_equal(self.tokenizer.tokenize(self.text,
include_punc=False),
['Heute',
'ist',
'der',
'3',
'Mai',
'2014',
'und',
'Dr',
'Meier',
'feiert',
'seinen',
'43',
'Geburtstag',
'Ich',
'muss',
'unbedingt',
'daran',
'denken',
'Mehl',
'usw',
'für',
'einen',
'Kuchen',
'einzukaufen',
'Aber',
'leider',
'habe',
'ich',
'nur',
'noch',
'EUR',
'3.50',
'in',
'meiner',
'Brieftasche'])
def test_tokenize_nested(self):
assert_equal(self.tokenizer.tokenize(self.text,
nested=True),
[['Heute',
'ist',
'der',
'3.',
'Mai',
'2014',
'und',
'Dr.',
'Meier',
'feiert',
'seinen',
'43.',
'Geburtstag',
'.'],
['Ich',
'muss',
'unbedingt',
'daran',
'denken',
',',
'Mehl',
',',
'usw.',
'für',
'einen',
'Kuchen',
'einzukaufen',
'.'],
['Aber',
'leider',
'habe',
'ich',
'nur',
'noch',
'EUR',
'3.50',
'in',
'meiner',
'Brieftasche',
'.']])
def test_itokenize(self):
gen = self.tokenizer.itokenize(self.text)
assert_true(is_generator(gen))
assert_equal(next(gen), 'Heute')
assert_equal(next(gen), 'ist')
def test_sent_tokenize(self):
assert_equal(
self.tokenizer.sent_tokenize(
self.text),
[
'Heute ist der 3. Mai 2014 und Dr. Meier feiert seinen 43. Geburtstag.',
'Ich muss unbedingt daran denken, Mehl, usw. für einen Kuchen einzukaufen.',
'Aber leider habe ich nur noch EUR 3.50 in meiner Brieftasche.'])
def test_word_tokenize(self):
tokens = self.tokenizer.word_tokenize(self.snt1)
assert_equal(tokens, ['Heute', 'ist', 'der', '3.', 'Mai', '2014',
'und', 'Dr.', 'Meier', 'feiert', 'seinen', '43.',
'Geburtstag', '.'])
class TestPatternTokenizer(unittest.TestCase):
def setUp(self):
self.tokenizer = PatternTokenizer()
self.text = "Heute ist der 3. Mai 2014 und Dr. Meier feiert seinen 43. " \
"Geburtstag."
self.snt1 = "Heute ist der 3 ."
def tearDown(self):
pass
def test_tokenize(self):
assert_equal(self.tokenizer.tokenize(self.text),
['Heute',
'ist',
'der',
'3',
'.',
'Mai',
'2014',
'und',
'Dr.',
'Meier',
'feiert',
'seinen',
'43',
'.',
'Geburtstag',
'.'])
def test_exclude_punc(self):
assert_equal(self.tokenizer.tokenize(self.text,
include_punc=False),
['Heute',
'ist',
'der',
'3',
'Mai',
'2014',
'und',
'Dr',
'Meier',
'feiert',
'seinen',
'43',
'Geburtstag'])
def test_tokenize_nested(self):
assert_equal(self.tokenizer.tokenize(self.text, nested=True),
[['Heute', 'ist', 'der', '3', '.'],
['Mai',
'2014',
'und',
'Dr.',
'Meier',
'feiert',
'seinen',
'43',
'.'],
['Geburtstag', '.']])
def test_itokenize(self):
gen = self.tokenizer.itokenize(self.text)
assert_true(is_generator(gen))
assert_equal(next(gen), 'Heute')
assert_equal(next(gen), 'ist')
def test_sent_tokenize(self):
sents = self.tokenizer.sent_tokenize(self.text)
assert_equal(sents, ['Heute ist der 3 .',
'Mai 2014 und Dr. Meier feiert seinen 43 .',
'Geburtstag .'])
def test_word_tokenize(self):
tokens = self.tokenizer.word_tokenize(self.snt1)
assert_equal(tokens, ['Heute', 'ist', 'der', '3', '.'])
class TestWordTokenizer(unittest.TestCase):
def setUp(self):
self.tokenizer = WordTokenizer()
self.text = "Python ist eine universelle, üblicherweise interpretierte höhere Programmiersprache."
def tearDown(self):
pass
def test_tokenize(self):
assert_equal(self.tokenizer.tokenize(self.text),
['Python',
'ist',
'eine',
'universelle',
',',
'üblicherweise',
'interpretierte',
'höhere',
'Programmiersprache',
'.'])
def test_exclude_punc(self):
assert_equal(self.tokenizer.tokenize(self.text,
include_punc=False),
['Python',
'ist',
'eine',
'universelle',
'üblicherweise',
'interpretierte',
'höhere',
'Programmiersprache'])
def test_itokenize(self):
gen = self.tokenizer.itokenize(self.text)
assert_equal(next(gen), "Python")
assert_equal(next(gen), "ist")
def test_word_tokenize(self):
tokens = word_tokenize(self.text)
assert_true(is_generator(tokens))
assert_equal(list(tokens), self.tokenizer.tokenize(self.text))
if __name__ == '__main__':
unittest.main()
| 32.166134
| 106
| 0.363627
| 703
| 10,068
| 5.089616
| 0.183499
| 0.073784
| 0.043041
| 0.046954
| 0.809391
| 0.801286
| 0.762158
| 0.719955
| 0.719955
| 0.719955
| 0
| 0.027248
| 0.526122
| 10,068
| 312
| 107
| 32.269231
| 0.7227
| 0.02642
| 0
| 0.781481
| 0
| 0
| 0.169102
| 0
| 0
| 0
| 0
| 0
| 0.081481
| 1
| 0.085185
| false
| 0.011111
| 0.022222
| 0
| 0.125926
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
ed5f54822e8faab418ae84fadb5a130728389904
| 7,928
|
py
|
Python
|
ckeditor_demo/demo_application/tests/test_pillow.py
|
scottwoodall/django-ckeditor
|
107b8dbe60b9f6eb1c16cc189d24ad66d09f26ac
|
[
"BSD-3-Clause"
] | 3
|
2021-01-24T13:14:33.000Z
|
2022-01-25T22:17:59.000Z
|
ckeditor_demo/demo_application/tests/test_pillow.py
|
scottwoodall/django-ckeditor
|
107b8dbe60b9f6eb1c16cc189d24ad66d09f26ac
|
[
"BSD-3-Clause"
] | 9
|
2021-03-19T02:17:08.000Z
|
2022-03-12T00:01:38.000Z
|
ckeditor_demo/demo_application/tests/test_pillow.py
|
scottwoodall/django-ckeditor
|
107b8dbe60b9f6eb1c16cc189d24ad66d09f26ac
|
[
"BSD-3-Clause"
] | 2
|
2020-09-04T16:09:06.000Z
|
2022-03-13T14:16:20.000Z
|
from __future__ import absolute_import, unicode_literals
import json
import os.path
from django.contrib.staticfiles.finders import find
from django.test import TestCase
from django.test.utils import override_settings
from .utils import get_absolute_media_path, get_media_url, remove_upload_directory, sha1
try:
from django.urls import reverse
except ImportError:
from django.core.urlresolvers import reverse
class PillowTestCase(TestCase):
fixtures = ['test_admin.json']
def setUp(self):
remove_upload_directory()
def assertJsonResponse(self, response, expected_data):
self.assertEqual(200, response.status_code)
data = json.loads(response.content)
self.assertEqual(data, expected_data)
@override_settings(CKEDITOR_ALLOW_NONIMAGE_FILES=False)
def test_upload_file_disabled(self):
self.client.login(username='test', password='test')
filepath = find('ckeditor/ckeditor/LICENSE.md')
with open(filepath, 'rb') as fp:
response = self.client.post(reverse('ckeditor_upload'), {'upload': fp})
self.assertEqual(200, response.status_code)
self.assertContains(response, 'Invalid file type.')
def test_upload_file(self):
self.client.login(username='test', password='test')
filepath = find('ckeditor/ckeditor/LICENSE.md')
with open(filepath, 'rb') as fp:
response = self.client.post(reverse('ckeditor_upload'), {'upload': fp})
self.assertJsonResponse(response, {"fileName": "license.md",
"uploaded": "1",
"url": get_media_url("license.md")})
upload = get_absolute_media_path('license.md')
self.assertTrue(os.path.isfile(upload), upload)
self.assertEqual(os.path.getsize(filepath), os.path.getsize(upload))
self.assertEqual(sha1(filepath), sha1(upload))
@override_settings(CKEDITOR_FORCE_JPEG_COMPRESSION=True)
def test_upload_file_with_compression_enabled(self):
self.client.login(username='test', password='test')
filepath = find('ckeditor/ckeditor/LICENSE.md')
with open(filepath, 'rb') as fp:
response = self.client.post(reverse('ckeditor_upload'), {'upload': fp})
self.assertJsonResponse(response, {"fileName": "license.md",
"uploaded": "1",
"url": get_media_url("license.md")})
upload = get_absolute_media_path('license.md')
self.assertTrue(os.path.isfile(upload), upload)
self.assertEqual(os.path.getsize(filepath), os.path.getsize(upload))
self.assertEqual(sha1(filepath), sha1(upload))
@override_settings(CKEDITOR_FORCE_JPEG_COMPRESSION=False)
def test_upload_jpg(self):
self.client.login(username='test', password='test')
filepath = find('ckeditor/ckeditor/plugins/codesnippet/lib/highlight/styles/pojoaque.jpg')
with open(filepath, 'rb') as fp:
response = self.client.post(reverse('ckeditor_upload'), {'upload': fp})
self.assertJsonResponse(response, {"fileName": "pojoaque.jpg",
"uploaded": "1",
"url": get_media_url("pojoaque.jpg")})
upload = get_absolute_media_path('pojoaque.jpg')
thumb = get_absolute_media_path('pojoaque_thumb.jpg')
self.assertEqual(sha1(filepath), sha1(upload))
self.assertEqual(1186, os.path.getsize(upload))
self.assertEqual(1144, os.path.getsize(thumb))
@override_settings(CKEDITOR_FORCE_JPEG_COMPRESSION=True)
def test_upload_jpg_compression_enabled(self):
self.client.login(username='test', password='test')
filepath = find('ckeditor/ckeditor/plugins/codesnippet/lib/highlight/styles/pojoaque.jpg')
with open(filepath, 'rb') as fp:
response = self.client.post(reverse('ckeditor_upload'), {'upload': fp})
self.assertJsonResponse(response, {"fileName": "pojoaque.jpg",
"uploaded": "1",
"url": get_media_url("pojoaque.jpg")})
upload = get_absolute_media_path('pojoaque.jpg')
thumb = get_absolute_media_path('pojoaque_thumb.jpg')
self.assertEqual(598, os.path.getsize(upload))
self.assertEqual(637, os.path.getsize(thumb))
@override_settings(CKEDITOR_FORCE_JPEG_COMPRESSION=False)
def test_upload_png(self):
self.client.login(username='test', password='test')
filepath = find('ckeditor/ckeditor/skins/moono/images/hidpi/close.png')
with open(filepath, 'rb') as fp:
response = self.client.post(reverse('ckeditor_upload'), {'upload': fp})
self.assertJsonResponse(response, {"fileName": "close.png",
"uploaded": "1",
"url": get_media_url("close.png")})
upload = get_absolute_media_path('close.png')
thumb = get_absolute_media_path('close_thumb.png')
self.assertEqual(sha1(filepath), sha1(upload))
self.assertEqual(1271, os.path.getsize(upload))
self.assertEqual(747, os.path.getsize(thumb))
@override_settings(CKEDITOR_FORCE_JPEG_COMPRESSION=True)
def test_upload_png_with_compression_enabled(self):
self.client.login(username='test', password='test')
filepath = find('ckeditor/ckeditor/skins/moono/images/hidpi/close.png')
with open(filepath, 'rb') as fp:
response = self.client.post(reverse('ckeditor_upload'), {'upload': fp})
self.assertJsonResponse(response, {"fileName": "close.jpg",
"uploaded": "1",
"url": get_media_url("close.jpg")})
upload = get_absolute_media_path('close.jpg')
thumb = get_absolute_media_path('close_thumb.jpg')
self.assertTrue(os.path.isfile(upload), upload)
self.assertTrue(os.path.isfile(thumb), thumb)
self.assertEqual(575, os.path.getsize(upload))
self.assertEqual(642, os.path.getsize(thumb))
@override_settings(CKEDITOR_FORCE_JPEG_COMPRESSION=False)
def test_upload_gif(self):
self.client.login(username='test', password='test')
filepath = find('ckeditor/galleriffic/css/loader.gif')
with open(filepath, 'rb') as fp:
response = self.client.post(reverse('ckeditor_upload'), {'upload': fp})
self.assertJsonResponse(response, {"fileName": "loader.gif",
"uploaded": "1",
"url": get_media_url("loader.gif")})
upload = get_absolute_media_path('loader.gif')
thumb = get_absolute_media_path('loader_thumb.gif')
self.assertEqual(sha1(filepath), sha1(upload))
self.assertEqual(10453, os.path.getsize(upload))
self.assertFalse(os.path.exists(thumb), thumb)
@override_settings(CKEDITOR_FORCE_JPEG_COMPRESSION=True)
def test_upload_gif_with_compression_enabled(self):
self.client.login(username='test', password='test')
filepath = find('ckeditor/galleriffic/css/loader.gif')
with open(filepath, 'rb') as fp:
response = self.client.post(reverse('ckeditor_upload'), {'upload': fp})
self.assertJsonResponse(response, {"fileName": "loader.gif",
"uploaded": "1",
"url": get_media_url("loader.gif")})
upload = get_absolute_media_path('loader.gif')
thumb = get_absolute_media_path('loader_thumb.gif')
self.assertEqual(sha1(filepath), sha1(upload))
self.assertEqual(10453, os.path.getsize(upload))
self.assertFalse(os.path.exists(thumb), thumb)
| 46.093023
| 98
| 0.631307
| 875
| 7,928
| 5.541714
| 0.129143
| 0.025985
| 0.049495
| 0.061868
| 0.846979
| 0.842442
| 0.786348
| 0.759744
| 0.732728
| 0.732728
| 0
| 0.010656
| 0.242432
| 7,928
| 171
| 99
| 46.362573
| 0.796703
| 0
| 0
| 0.661765
| 0
| 0
| 0.15338
| 0.050454
| 0
| 0
| 0
| 0
| 0.272059
| 1
| 0.080882
| false
| 0.066176
| 0.073529
| 0
| 0.169118
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
ed69de98c84d85718a25e5f5229a523c8d028d27
| 12,108
|
py
|
Python
|
F_CMMSE/CMMSE_FPGA_dy.py
|
angelicadavila/UNIZAR_PHD
|
532a87467ceb49d3c3851bb23e26003bfc1888d3
|
[
"MIT"
] | null | null | null |
F_CMMSE/CMMSE_FPGA_dy.py
|
angelicadavila/UNIZAR_PHD
|
532a87467ceb49d3c3851bb23e26003bfc1888d3
|
[
"MIT"
] | null | null | null |
F_CMMSE/CMMSE_FPGA_dy.py
|
angelicadavila/UNIZAR_PHD
|
532a87467ceb49d3c3851bb23e26003bfc1888d3
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
##############################################################################
# Adapted from: Antonio Vilches
# Revised by: Andres Rodriguez for ViVid and Dario Suarez Gracia for CFD
# Version: 1.1
# Date: 7/32/2015
# Description: Plotting script fro ViVid on Odroid
# Copyright: Department Computer Architecture at University of Malaga
##############################################################################
import argparse
import matplotlib
import os
import numpy as np
import os.path as op
from itertools import izip_longest, cycle, islice
from scipy.stats.stats import pearsonr
matplotlib.use('PDF')
from cycler import cycler
#from sklearn import datasets
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from matplotlib.ticker import ScalarFormatter
def carga_fichero(file_name, delim, header, indice, columna):
salida=list()
datos=np.genfromtxt(file_name, skip_header=header,
comments="us.")
datos= datos[:,1]
# new_index, new_column = 0, 1
# for i in np.unique(datos[:,new_index]):
# mediana=np.median(datos[datos[:,new_index] == i, new_column])
# salida.append([i, mediana])
return datos
##############################################################################
# Main script
#############################################################################
################################################
#Configuration variables
################################################
titlefs = 20
ylabelfs = 20
xlabelfs = 20
xticksfs = 18
yticksfs = 18
legendfs = 16
linew = 3
markers = 12
fig_width = 8
fig_height = 6
colorcycle = ['#a1dab4', '#41b6c4', '#2c7fb8', '#253494', '#4f345a', '#8fa998' ]
monochrome = (cycler('color', ['k','grey']) * cycler('linestyle', ['-', '--', ':', '=.']) * cycler('marker', ['^', '.']))
def main():
parser = argparse.ArgumentParser(description='Plot scheduler data.')
parser.add_argument('fname', help='File prefix for reading the input data')
parser.add_argument('--dir', help='Directory containing the input data.')
# ax.set_prop_cycle(monochrome)
figure_name="CMMSE_FPGA_dynamic.pdf"
fig, ax = plt.subplots(1,1, figsize=(16,2))
tex_labels= ('CPU','GPU','FPGA','CPU+GPU+FPGA')
#filas y columnas subplot
r=1
c=4
count=0
##########################################################
############ MATRIX MULT #############################3
##########################################################3
count=count+1
os.chdir("./test_matrixmult/")
x=np.arange(7,13,1)
p_size= 1065369600/1000000000
x=2**x
print ("Matrix Multiplication ")
file_test='work_dynamic1t.txt'
dato_cpu=carga_fichero(file_test,'executionKernel: ',0,1,0)
m_cpu=dato_cpu/1000000
print ("CPU ",m_cpu)
file_test='statict_1.txt'
datos_=carga_fichero(file_test,'executionKernel: ',0,1,0)
only_cpu=np.average(datos_)/1000000
print ("speedup CPU",only_cpu/np.amin(m_cpu))
file_test='work_dynamic2t.txt'
dato_gpu=carga_fichero(file_test,'executionKernel: ',0,1,0)
m_gpu=dato_gpu/1000000
print ("gPU ",m_gpu)
file_test='statict_2.txt'
datos_=carga_fichero(file_test,'executionKernel: ',0,1,0)
only_gpu=np.average(datos_)/1000000
print ("speedup GPU",only_gpu/np.amin(m_gpu))
file_test='work_dynamic2t.txt'
dato_gpu=carga_fichero(file_test,'executionKernel: ',0,1,0)
m_gpu=dato_gpu/1000000
print ("gPU ",m_gpu)
file_test='work_dynamic4t.txt'
dato_fpga=carga_fichero(file_test,'executionKernel: ',0,1,0)
m_fpga=dato_fpga/1000000
print ("FPGA ",m_fpga)
file_test='statict_4.txt'
datos_=carga_fichero(file_test,'executionKernel: ',0,1,0)
only_fpga=np.average(datos_)/1000000
print ("speedup FPGA",only_fpga/np.amin(m_fpga))
sample=3
file_test='work_dynamic7t.txt'
datos_p=carga_fichero(file_test,'executionKernel: ',0,1,0)
datos_p2=np.reshape(datos_p,(sample,8))
dato_st=np.average(datos_p2,axis=0)
m_st=dato_st/1000000
print ("dynamic work 7 ",m_st)
file_test='work_dynamic4t.txt'
dato_fpga=carga_fichero(file_test,'executionKernel: ',0,1,0)
m_fpga=dato_fpga/1000000
print ("FPGA ",m_fpga)
sample=3
file_test='work_dynamic7t.txt'
datos_p=carga_fichero(file_test,'executionKernel: ',0,1,0)
datos_p2=np.reshape(datos_p,(sample,8))
dato_st=np.average(datos_p2,axis=0)
m_st=dato_st/1000000
print ("dynamic work 7 ",m_st)
print ("------------ ")
os.chdir("./..")
print os.getcwd()
plt.subplot(r,c,count)
i=0
# plt.semilogx(x,p_size/m_cpu[0:x.size],'4-',color='k',label=tex_labels[i])
# i=i+1
# plt.semilogx(x,p_size/m_gpu[0:x.size],'.-',color='k',label=tex_labels[i])
# i=i+1
plt.semilogx(x,p_size/m_fpga[0:x.size],'.:',color='k',label=tex_labels[i])
# i=i+1
# plt.semilogx(x,p_size/m_st[0:x.size],'h-',color='grey',label=tex_labels[i])
plt.grid(True)
plt.grid(linestyle='--', linewidth=1)
plt.ylabel('Throughput(GB/s)',fontsize=legendfs)
plt.xlabel('Chunk size',fontsize=legendfs)
plt.gca().set_title('Matrix Multiplication', fontsize=legendfs)
plt.gca().set_ylim(-0.0)
##########################################################
############ MERSENNE TWISTER #########################3
##########################################################3
count=count+1
os.chdir("./test_mersenne/")
x=np.arange(7,22,1)
p_size= 7812500*64*4/1000000000
x=2**x
file_test='work_dynamic1t.txt'
dato_cpu=carga_fichero(file_test,'executionKernel: ',0,1,0)
m_cpu=dato_cpu/1000000
print ("CPU ",m_cpu)
file_test='statict_1.txt'
datos_=carga_fichero(file_test,'executionKernel: ',0,1,0)
only_cpu=np.average(datos_)/1000000
print ("speedup CPU",only_cpu/np.amin(m_cpu))
file_test='work_dynamic2t.txt'
dato_gpu=carga_fichero(file_test,'executionKernel: ',0,1,0)
m_gpu=dato_gpu/1000000
print ("gPU ",m_gpu)
file_test='statict_2.txt'
datos_=carga_fichero(file_test,'executionKernel: ',0,1,0)
only_gpu=np.average(datos_)/1000000
print ("speedup GPU",only_gpu/np.amin(m_gpu))
file_test='work_dynamic4t.txt'
dato_fpga=carga_fichero(file_test,'executionKernel: ',0,1,0)
m_fpga=dato_fpga/1000000
print ("FPGA ",m_fpga)
file_test='statict_4.txt'
datos_=carga_fichero(file_test,'executionKernel: ',0,1,0)
only_fpga=np.average(datos_)/1000000
print ("speedup FPGA",only_fpga/np.amin(m_fpga))
sample=5
file_test='work_dynamic7t.txt'
datos_p=carga_fichero(file_test,'executionKernel: ',0,1,0)
datos_p2=np.reshape(datos_p,(sample,18))
dato_st=np.average(datos_p2,axis=0)
m_st=dato_st/1000000
print ("dynamic work 7 ",m_st)
os.chdir("./..")
print os.getcwd()
plt.subplot(r,c,count)
i=0
# plt.semilogx(x,p_size/m_cpu[0:x.size],'4-',color='k',label=tex_labels[i])
# i=i+1
# plt.semilogx(x,p_size/m_gpu[0:x.size],'.-',color='k',label=tex_labels[i])
# i=i+1
plt.semilogx(x,p_size/m_fpga[0:x.size],'.:',color='k',label=tex_labels[i])
# i=i+1
# plt.semilogx(x,p_size/m_st[0:x.size],'h-',color='grey',label=tex_labels[i])
plt.grid(True)
plt.grid(linestyle='--', linewidth=1)
plt.ylabel('Throughput(GB/s)',fontsize=legendfs)
plt.xlabel('Chunk size',fontsize=legendfs)
plt.gca().set_title('Mersenne Twister', fontsize=legendfs)
plt.gca().set_ylim(-0.0)
###########################################################33
############# Watermarking 33##########################33
###########################################################33
count=count+1
os.chdir("./test_watermark/")
print("./test_watermark/")
x=np.arange(10,22,1)
p_size=312595200*4/1000000000
x=2**x
print x
file_test='work_dynamic1t.txt'
dato_cpu=carga_fichero(file_test,'executionKernel: ',0,1,0)
m_cpu=dato_cpu/1000000
print ("CPU ",m_cpu)
file_test='statict_1.txt'
datos_=carga_fichero(file_test,'executionKernel: ',0,1,0)
only_cpu=np.average(datos_)/1000000
print ("speedup CPU",only_cpu/np.amin(m_cpu))
file_test='work_dynamic2t.txt'
dato_gpu=carga_fichero(file_test,'executionKernel: ',0,1,0)
m_gpu=dato_gpu/1000000
print ("gPU ",m_gpu)
file_test='statict_2.txt'
datos_=carga_fichero(file_test,'executionKernel: ',0,1,0)
only_gpu=np.average(datos_)/1000000
print ("speedup GPU",only_gpu/np.amin(m_gpu))
file_test='work_dynamic4t.txt'
dato_fpga=carga_fichero(file_test,'executionKernel: ',0,1,0)
m_fpga=dato_fpga/1000000
print ("FPGA ",m_fpga)
file_test='statict_4.txt'
datos_=carga_fichero(file_test,'executionKernel: ',0,1,0)
only_fpga=np.average(datos_)/1000000
print ("speedup FPGA",only_fpga/np.amin(m_fpga))
sample=4
file_test='work_dynamic7t.txt'
datos_p=carga_fichero(file_test,'executionKernel: ',0,1,0)
datos_p2=np.reshape(datos_p,(sample,16))
dato_st=np.average(datos_p2,axis=0)
m_st=dato_st/1000000
print ("dynamic work 7 ",m_st)
os.chdir("./..")
print os.getcwd()
plt.subplot(r,c,count)
i=0
# plt.semilogx(x,p_size/m_cpu[0:x.size],'4-',color='k',label=tex_labels[i])
# i=i+1
# plt.semilogx(x,p_size/m_gpu[0:x.size],'.-',color='k',label=tex_labels[i])
# i=i+1
plt.semilogx(x,p_size/m_fpga[0:x.size],'.:',color='k',label=tex_labels[i])
# i=i+1
# plt.semilogx(x,p_size/m_st[0:x.size],'h-',color='grey',label=tex_labels[i])
plt.grid(True)
plt.grid(linestyle='--', linewidth=1)
plt.ylabel('Throughput(GB/s)',fontsize=legendfs)
plt.xlabel('Chunk size',fontsize=legendfs)
plt.gca().set_title('Watermarking', fontsize=legendfs)
plt.gca().set_ylim(-0.0)
###########################################################33
############# Sobel Filter 33##########################33
###########################################################33
count=count+1
os.chdir("./test_sobel/")
print("./test_sobel/")
x=np.arange(10,28,1)
p_size=312595200*4/1000000000
x=2**x
print x
file_test='work_dynamic1t.txt'
dato_cpu=carga_fichero(file_test,'executionKernel: ',0,1,0)
m_cpu=dato_cpu/1000000
print ("CPU ",m_cpu)
file_test='statict_1.txt'
datos_=carga_fichero(file_test,'executionKernel: ',0,1,0)
only_cpu=np.average(datos_)/1000000
print ("speedup CPU",only_cpu/np.amin(m_cpu))
file_test='work_dynamic2t.txt'
dato_gpu=carga_fichero(file_test,'executionKernel: ',0,1,0)
m_gpu=dato_gpu/1000000
print ("gPU ",m_gpu)
file_test='statict_2.txt'
datos_=carga_fichero(file_test,'executionKernel: ',0,1,0)
only_gpu=np.average(datos_)/1000000
print ("speedup GPU",only_gpu/np.amin(m_gpu))
file_test='work_dynamic4t.txt'
dato_fpga=carga_fichero(file_test,'executionKernel: ',0,1,0)
m_fpga=dato_fpga/1000000
print ("FPGA ",m_fpga)
file_test='statict_4.txt'
datos_=carga_fichero(file_test,'executionKernel: ',0,1,0)
only_fpga=np.average(datos_)/1000000
print ("speedup FPGA",only_fpga/np.amin(m_fpga))
sample=17
file_test='work_dynamic7t.txt'
datos_p=carga_fichero(file_test,'executionKernel: ',0,1,0)
datos_p2=np.reshape(datos_p,(sample,20))
dato_st=np.average(datos_p2,axis=0)
m_st=dato_st/1000000
print ("dynamic work 7 ",m_st)
print ("Correlacion GPU-dyn sobel", pearsonr(m_st,m_gpu))
os.chdir("./..")
print os.getcwd()
plt.subplot(r,c,count)
i=0
# plt.semilogx(x,p_size/m_cpu[0:x.size],'4-',color='k',label=tex_labels[i])
# i=i+1
# plt.semilogx(x,p_size/m_gpu[0:x.size],'.-',color='k',label=tex_labels[i])
# i=i+1
plt.semilogx(x,p_size/m_fpga[0:x.size],'.:',color='k',label=tex_labels[i])
# i=i+1
# plt.semilogx(x,p_size/m_st[0:x.size],'h-',color='grey',label=tex_labels[i])
plt.grid(True)
plt.grid(linestyle='--', linewidth=1)
plt.ylabel('Throughput(GB/s)',fontsize=legendfs)
plt.xlabel('Chunk size',fontsize=legendfs)
plt.gca().set_title('Sobel Filter ', fontsize=legendfs)
plt.gca().set_ylim(-0.0)
plt.legend(loc='upper center', bbox_to_anchor=(-1.3, -0.3),
ncol=c, fontsize=legendfs)
#plt.legend(bbox_to_anchor=(1.04,1),loc='lower center')
plt.savefig(figure_name,bbox_inches='tight')
plt.show()
if __name__ == "__main__":
main()
| 31.613577
| 121
| 0.636769
| 1,821
| 12,108
| 4.023613
| 0.137287
| 0.067695
| 0.069879
| 0.084619
| 0.755562
| 0.751331
| 0.751331
| 0.751331
| 0.736318
| 0.719667
| 0
| 0.057956
| 0.120747
| 12,108
| 382
| 122
| 31.696335
| 0.630284
| 0.140486
| 0
| 0.722656
| 0
| 0
| 0.204285
| 0.002357
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.042969
| null | null | 0.164063
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.