hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a2adf5688c4900d441d7a83bf0b2ddb04d8e66b1
| 80
|
py
|
Python
|
flasky/main/forms/__init__.py
|
by46/fasky
|
c6941972b57284c2167dfacf022f981939249256
|
[
"MIT"
] | null | null | null |
flasky/main/forms/__init__.py
|
by46/fasky
|
c6941972b57284c2167dfacf022f981939249256
|
[
"MIT"
] | null | null | null |
flasky/main/forms/__init__.py
|
by46/fasky
|
c6941972b57284c2167dfacf022f981939249256
|
[
"MIT"
] | null | null | null |
from .profile import EditProfileAdminForm
from .profile import EdtProfileForm
| 26.666667
| 42
| 0.85
| 8
| 80
| 8.5
| 0.625
| 0.323529
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 80
| 2
| 43
| 40
| 0.971429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
a2d483c1e9b75dc55073e8b235cad453d5fdc0b0
| 2,267
|
py
|
Python
|
mab/tests/test_algs.py
|
rsoaresp/mabalgs
|
82de4148269c3838256600d5e85d849244b14de1
|
[
"Apache-2.0"
] | 95
|
2019-01-25T14:54:09.000Z
|
2022-02-27T11:48:49.000Z
|
mab/tests/test_algs.py
|
rsoaresp/mabalgs
|
82de4148269c3838256600d5e85d849244b14de1
|
[
"Apache-2.0"
] | 6
|
2019-01-28T12:36:38.000Z
|
2019-12-11T22:26:40.000Z
|
mab/tests/test_algs.py
|
rsoaresp/mabalgs
|
82de4148269c3838256600d5e85d849244b14de1
|
[
"Apache-2.0"
] | 20
|
2019-02-10T01:17:54.000Z
|
2022-02-01T02:14:20.000Z
|
from mab import algs
import numpy as np
def test_ucb_init_return_first_arm():
ucb_with_two_arms = algs.UCB1(2)
assert ucb_with_two_arms.select()[0] == 0
def test_ucb_use_all_arm_dont_usage():
ucb_with_two_arms = algs.UCB1(2)
assert ucb_with_two_arms.select()[0] == 0
assert ucb_with_two_arms.select()[0] == 1
def test_ucb_use_all_arm_dont_usage_after_priorize():
ucb_with_two_arms = algs.UCB1(2)
assert ucb_with_two_arms.select()[0] == 0
assert ucb_with_two_arms.select()[0] == 1
assert ucb_with_two_arms.select()[0] == 1
def test_ucb_select_two_arms_and_success_return_second():
ucb_with_two_arms = algs.UCB1(2)
ucb_with_two_arms.select()
ucb_with_two_arms.select()
ucb_with_two_arms.reward(1)
assert ucb_with_two_arms.select()[0] == 1
def test_ucb_select_two_arms_and_success_one_return_first():
ucb_with_two_arms = algs.UCB1(2)
ucb_with_two_arms.select()
ucb_with_two_arms.select()
ucb_with_two_arms.reward(0)
assert ucb_with_two_arms.select()[0] == 0
def test_ucb_select_two_arms_and_have_two_reward_priorize_first():
ucb_with_two_arms = algs.UCB1(2)
ucb_with_two_arms.select()
ucb_with_two_arms.reward(0)
ucb_with_two_arms.select()
ucb_with_two_arms.reward(1)
ucb_with_two_arms.select()
ucb_with_two_arms.reward(0)
ucb_with_two_arms.select()
ucb_with_two_arms.reward(1)
assert ucb_with_two_arms.select()[0] == 1
def test_ucb_exploration_first():
ucb_with_two_arms = algs.UCB1(2)
ucb_with_two_arms.select()
ucb_with_two_arms.reward(0)
ucb_with_two_arms.select()
ucb_with_two_arms.reward(1)
ucb_with_two_arms.select()
ucb_with_two_arms.reward(0)
ucb_with_two_arms.select()
ucb_with_two_arms.reward(1)
last_arm = ucb_with_two_arms.select()[0]
assert last_arm == 1
def test_ucb_exploration_second():
ucb_with_two_arms = algs.UCB1(2)
ucb_with_two_arms.select()
ucb_with_two_arms.reward(0)
ucb_with_two_arms.select()
ucb_with_two_arms.reward(1)
ucb_with_two_arms.select()
ucb_with_two_arms.reward(0)
ucb_with_two_arms.select()
ucb_with_two_arms.reward(1)
ucb_with_two_arms.select()
last_arm = ucb_with_two_arms.select()[0]
assert last_arm == 0
| 28.696203
| 66
| 0.743714
| 397
| 2,267
| 3.72796
| 0.093199
| 0.250676
| 0.337838
| 0.472973
| 0.927703
| 0.903378
| 0.903378
| 0.892568
| 0.868243
| 0.868243
| 0
| 0.027168
| 0.155712
| 2,267
| 78
| 67
| 29.064103
| 0.746082
| 0
| 0
| 0.806452
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.177419
| 1
| 0.129032
| false
| 0
| 0.032258
| 0
| 0.16129
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
a2f0765912c790f675eb1bde603c47a77ca771b0
| 41,146
|
py
|
Python
|
bottleneck/slow/move.py
|
stroxler/bottleneck
|
6e91bcb8a21170588ee9a3f2c425a4e307ae05de
|
[
"BSD-2-Clause"
] | 2
|
2015-05-26T09:06:32.000Z
|
2015-05-26T09:06:46.000Z
|
bottleneck/slow/move.py
|
stroxler/bottleneck
|
6e91bcb8a21170588ee9a3f2c425a4e307ae05de
|
[
"BSD-2-Clause"
] | null | null | null |
bottleneck/slow/move.py
|
stroxler/bottleneck
|
6e91bcb8a21170588ee9a3f2c425a4e307ae05de
|
[
"BSD-2-Clause"
] | null | null | null |
"""
Alternative methods (non-Cython) of calculating moving window statistics.
These function are slow but useful for unit testing.
"""
import numpy as np
import bottleneck as bn
convolve1d = None
minimum_filter1d = None
maximum_filter1d = None
__all__ = ['move_sum', 'move_nansum',
'move_mean', 'move_nanmean',
'move_std', 'move_nanstd',
'move_min', 'move_nanmin',
'move_max', 'move_nanmax',
'move_median']
# SUM -----------------------------------------------------------------------
def move_sum(arr, window, axis=-1, method='loop'):
"""
Slow move_sum for unaccelerated ndim/dtype combinations.
Parameters
----------
arr : array_like
Input array.
window : int
The number of elements in the moving window.
axis : int, optional
The axis over which to perform the moving sum. By default the moving
sum is taken over the last axis (-1).
method : str, optional
The following moving window methods are available:
========== =====================================
'filter' scipy.ndimage.convolve1d
'strides' strides tricks
'loop' brute force python loop (default)
========== =====================================
Returns
-------
y : array_like
The moving sum of the input array along the specified axis. The output
has the same shape as the input.
Examples
--------
>>> arr = np.array([1, 2, 3, 4])
>>> bn.slow.move_sum(arr, window=2, axis=0)
array([ NaN, 3., 5., 7.])
"""
arr = np.array(arr, copy=False)
if method == 'filter':
y = move_sum_filter(arr, window, axis=axis)
elif method == 'strides':
y = move_func_strides(np.sum, arr, window, axis=axis)
elif method == 'loop':
y = move_func_loop(np.sum, arr, window, axis=axis)
else:
msg = "`method` must be 'filter', 'strides', or 'loop'."
raise ValueError(msg)
if y.dtype != arr.dtype:
if issubclass(arr.dtype.type, np.inexact):
y = y.astype(arr.dtype)
return y
def move_nansum(arr, window, axis=-1, method='loop'):
"""
Slow move_nansum for unaccelerated ndim/dtype combinations.
Parameters
----------
arr : array_like
Input array.
window : int
The number of elements in the moving window.
axis : int, optional
The axis over which to perform the moving sum. By default the moving
sum is taken over the last axis (-1).
method : str, optional
The following moving window methods are available:
========== =====================================
'filter' scipy.ndimage.convolve1d
'strides' strides tricks
'loop' brute force python loop (default)
========== =====================================
Returns
-------
y : ndarray
The moving sum of the input array along the specified axis, ignoring
NaNs. (A window with all NaNs returns NaN for the window sum.) The
output has the same shape as the input.
Examples
--------
>>> arr = np.array([1, 2, np.nan, 4])
>>> bn.slow.move_nansum(arr, window=2, axis=0)
array([ NaN, 3., 2., 4.])
"""
arr = np.array(arr, copy=False)
if method == 'filter':
y = move_nansum_filter(arr, window, axis=axis)
elif method == 'strides':
y = move_func_strides(np.nansum, arr, window, axis=axis)
elif method == 'loop':
y = move_func_loop(np.nansum, arr, window, axis=axis)
else:
msg = "`method` must be 'filter', 'strides', or 'loop'."
raise ValueError(msg)
if y.dtype != arr.dtype:
if issubclass(arr.dtype.type, np.inexact):
y = y.astype(arr.dtype)
return y
def move_sum_filter(arr, window, axis=-1):
"""
Moving window sum along the specified axis using the filter method.
Parameters
----------
arr : array_like
Input array.
window : int
The number of elements in the moving window.
axis : int, optional
The axis over which to perform the moving sum. By default the moving
sum is taken over the last axis (-1).
Returns
-------
y : ndarray
The moving sum of the input array along the specified axis. The output
has the same shape as the input.
Notes
-----
The calculation of the sums uses scipy.ndimage.convolve1d.
Examples
--------
>>> from bottleneck.slow.move import move_sum_filter
>>> arr = np.array([1, 2, 3, 4])
>>> move_sum_filter(arr, window=2, axis=0)
array([ NaN, 3., 5., 7.])
"""
arr = np.array(arr, copy=False)
global convolve1d
if convolve1d is None:
try:
from scipy.ndimage import convolve1d
except ImportError:
raise ValueError("'filter' method requires SciPy.")
if axis is None:
raise ValueError("An `axis` value of None is not supported.")
if window < 1:
raise ValueError("`window` must be at least 1.")
if window > arr.shape[axis]:
raise ValueError("`window` is too long.")
arr = arr.astype(float)
w = np.ones(window, dtype=int)
x0 = (1 - window) // 2
convolve1d(arr, w, axis=axis, mode='constant', cval=np.nan, origin=x0,
output=arr)
return arr
def move_nansum_filter(arr, window, axis=-1):
"""
Moving sum (ignoring NaNs) along specified axis using the filter method.
Parameters
----------
arr : array_like
Input array.
window : int
The number of elements in the moving window.
axis : int, optional
The axis over which to perform the moving sum. By default the moving
sum is taken over the last axis (-1).
Returns
-------
y : ndarray
The moving sum (ignoring NaNs) of the input array along the specified
axis.(A window with all NaNs returns NaN for the window sum.) The
output has the same shape as the input.
Notes
-----
The calculation of the sums uses scipy.ndimage.convolve1d.
Examples
--------
>>> from bottleneck.slow.move import move_nansum_filter
>>> arr = np.array([1, 2, np.nan, 4, 5, 6, 7])
>>> move_nansum_filter(arr, window=2, axis=0)
array([ NaN, 3., 2., 4., 9., 11., 13.])
"""
arr = np.array(arr, copy=False)
global convolve1d
if convolve1d is None:
try:
from scipy.ndimage import convolve1d
except ImportError:
raise ValueError("'filter' method requires SciPy.")
if axis is None:
raise ValueError("An `axis` value of None is not supported.")
if window < 1:
raise ValueError("`window` must be at least 1.")
if window > arr.shape[axis]:
raise ValueError("`window` is too long.")
arr = arr.astype(float)
nrr = np.isnan(arr)
arr[nrr] = 0
nrr = nrr.astype(int)
w = np.ones(window, dtype=int)
x0 = (1 - window) // 2
convolve1d(arr, w, axis=axis, mode='constant', cval=np.nan, origin=x0,
output=arr)
convolve1d(nrr, w, axis=axis, mode='constant', cval=0, origin=x0,
output=nrr)
arr[nrr == window] = np.nan
return arr
# MEAN -------------------------------------------------------------------
def move_mean(arr, window, axis=-1, method='loop'):
"""
Slow move_mean for unaccelerated ndim/dtype combinations.
Parameters
----------
arr : array_like
Input array.
window : int
The number of elements in the moving window.
axis : int, optional
The axis over which to perform the moving mean. By default the moving
mean is taken over the last axis (-1).
method : str, optional
The following moving window methods are available:
========== =====================================
'filter' scipy.ndimage.convolve1d
'strides' strides tricks
'loop' brute force python loop (default)
========== =====================================
Returns
-------
y : ndarray
The moving mean of the input array along the specified axis. The output
has the same shape as the input.
Examples
--------
>>> arr = np.array([1, 2, 3, 4])
>>> bn.slow.move_mean(arr, window=2, axis=0)
array([ NaN, 1.5, 2.5, 3.5])
"""
arr = np.array(arr, copy=False)
if method == 'filter':
y = move_mean_filter(arr, window, axis=axis)
elif method == 'strides':
y = move_func_strides(np.mean, arr, window, axis=axis)
elif method == 'loop':
y = move_func_loop(np.mean, arr, window, axis=axis)
else:
msg = "`method` must be 'filter', 'strides', or 'loop'."
raise ValueError(msg)
if y.dtype != arr.dtype:
if issubclass(arr.dtype.type, np.inexact):
y = y.astype(arr.dtype)
return y
def move_nanmean(arr, window, axis=-1, method='loop'):
"""
Slow move_nanmean for unaccelerated ndim/dtype combinations.
Parameters
----------
arr : array_like
Input array.
window : int
The number of elements in the moving window.
axis : int, optional
The axis over which to perform the moving mean. By default the moving
mean is taken over the last axis (-1).
method : str, optional
The following moving window methods are available:
========== =====================================
'filter' scipy.ndimage.convolve1d
'strides' strides tricks
'loop' brute force python loop (default)
========== =====================================
Returns
-------
y : ndarray
The moving mean of the input array along the specified axis, ignoring
NaNs. (A window with all NaNs returns NaN for the window mean.) The
output has the same shape as the input.
Examples
--------
>>> arr = np.array([1, 2, np.nan, 4])
>>> bn.slow.move_nanmean(arr, window=2, axis=0)
array([ NaN, 1.5, 2. , 4. ])
"""
arr = np.array(arr, copy=False)
if method == 'filter':
y = move_nanmean_filter(arr, window, axis=axis)
elif method == 'strides':
y = move_func_strides(bn.slow.nanmean, arr, window, axis=axis)
elif method == 'loop':
y = move_func_loop(bn.slow.nanmean, arr, window, axis=axis)
else:
msg = "`method` must be 'filter', 'strides', or 'loop'."
raise ValueError(msg)
if y.dtype != arr.dtype:
if issubclass(arr.dtype.type, np.inexact):
y = y.astype(arr.dtype)
return y
def move_mean_filter(arr, window, axis=-1):
"Moving window mean implemented with a filter."
arr = np.array(arr, copy=False)
global convolve1d
if convolve1d is None:
try:
from scipy.ndimage import convolve1d
except ImportError:
raise ValueError("'filter' method requires SciPy.")
if axis is None:
raise ValueError("An `axis` value of None is not supported.")
if window < 1:
raise ValueError("`window` must be at least 1.")
if window > arr.shape[axis]:
raise ValueError("`window` is too long.")
arr = arr.astype(float)
w = np.empty(window)
w.fill(1.0 / window)
x0 = (1 - window) // 2
convolve1d(arr, w, axis=axis, mode='constant', cval=np.nan, origin=x0,
output=arr)
return arr
def move_nanmean_filter(arr, window, axis=-1):
"Moving window nanmean implemented with a filter."
arr = np.array(arr, copy=False)
global convolve1d
if convolve1d is None:
try:
from scipy.ndimage import convolve1d
except ImportError:
raise ValueError("'filter' method requires SciPy.")
if axis is None:
raise ValueError("An `axis` value of None is not supported.")
if window < 1:
raise ValueError("`window` must be at least 1.")
if window > arr.shape[axis]:
raise ValueError("`window` is too long.")
arr = arr.astype(float)
nrr = np.isnan(arr)
arr[nrr] = 0
nrr = nrr.astype(int)
w = np.ones(window, dtype=int)
x0 = (1 - window) // 2
convolve1d(arr, w, axis=axis, mode='constant', cval=np.nan, origin=x0,
output=arr)
convolve1d(nrr, w, axis=axis, mode='constant', cval=0, origin=x0,
output=nrr)
arr /= (window - nrr)
arr[nrr == window] = np.nan
return arr
# VAR -----------------------------------------------------------------------
def move_var(arr, window, axis=-1, method='loop', ddof=0):
"""
Slow move_var for unaccelerated ndim/dtype combinations.
Parameters
----------
arr : array_like
Input array.
window : int
The number of elements in the moving window.
axis : int, optional
The axis over which to perform the moving variance. By default the
moving variance is taken over the last axis (-1).
method : str, optional
The following moving window methods are available:
========== =====================================
'filter' scipy.ndimage.convolve1d
'strides' strides tricks
'loop' brute force python loop (default)
========== =====================================
Returns
-------
y : ndarray
The moving variance of the input array along the specified axis. The
output has the same shape as the input.
Examples
--------
>>> arr = np.array([1, 2, 3, 4])
>>> bn.slow.move_var(arr, window=2, axis=0)
array([ NaN, 0.25, 0.25, 0.25])
"""
arr = np.array(arr, copy=False)
if ddof != 0:
raise ValueError("`ddof` must be zero for unaccelerated input.")
if method == 'filter':
y = move_var_filter(arr, window, axis=axis)
elif method == 'strides':
y = move_func_strides(np.var, arr, window, axis=axis)
elif method == 'loop':
y = move_func_loop(np.var, arr, window, axis=axis)
else:
msg = "`method` must be 'filter', 'strides', or 'loop'."
raise ValueError(msg)
if y.dtype != arr.dtype:
if issubclass(arr.dtype.type, np.inexact):
y = y.astype(arr.dtype)
return y
def move_nanvar(arr, window, axis=-1, method='loop', ddof=0):
"""
Slow move_nanvar for unaccelerated ndim/dtype combinations.
Parameters
----------
arr : array_like
Input array.
window : int
The number of elements in the moving window.
axis : int, optional
The axis over which to perform the moving variance. By default the
moving variance is taken over the last axis (-1).
method : str, optional
The following moving window methods are available:
========== =====================================
'filter' scipy.ndimage.convolve1d
'strides' strides tricks
'loop' brute force python loop (default)
========== =====================================
Returns
-------
y : ndarray
The moving variance of the input array along the specified axis,
ignoring NaNs. (A window with all NaNs returns NaN for the window
variance.) The output has the same shape as the input.
Examples
--------
>>> arr = np.array([1, 2, np.nan, 4, 5])
>>> bn.slow.move_nanvar(arr, window=3, axis=0)
array([ NaN, NaN, 0.25, 1. , 0.25])
"""
arr = np.array(arr, copy=False)
if ddof != 0:
raise ValueError("`ddof` must be zero for unaccelerated input.")
if method == 'filter':
y = move_nanvar_filter(arr, window, axis=axis)
elif method == 'strides':
y = move_func_strides(bn.slow.nanvar, arr, window, axis=axis)
elif method == 'loop':
y = move_func_loop(bn.slow.nanvar, arr, window, axis=axis)
else:
msg = "`method` must be 'filter', 'strides', or 'loop'."
raise ValueError(msg)
if y.dtype != arr.dtype:
if issubclass(arr.dtype.type, np.inexact):
y = y.astype(arr.dtype)
return y
def move_var_filter(arr, window, axis=-1):
"Moving window variance implemented with a filter."
arr = np.array(arr, copy=False)
global convolve1d
if convolve1d is None:
try:
from scipy.ndimage import convolve1d
except ImportError:
raise ValueError("'filter' method requires SciPy.")
if axis is None:
raise ValueError("An `axis` value of None is not supported.")
if window < 1:
raise ValueError("`window` must be at least 1.")
if window > arr.shape[axis]:
raise ValueError("`window` is too long.")
arr = arr.astype(float)
w = np.empty(window)
w.fill(1.0 / window)
x0 = (1 - window) // 2
y = convolve1d(arr, w, axis=axis, mode='constant', cval=np.nan, origin=x0)
y *= y
arr *= arr
convolve1d(arr, w, axis=axis, mode='constant', cval=np.nan, origin=x0,
output=arr)
arr -= y
return arr
def move_nanvar_filter(arr, window, axis=-1):
"Moving window variance ignoring NaNs, implemented with a filter."
arr = np.array(arr, copy=False)
global convolve1d
if convolve1d is None:
try:
from scipy.ndimage import convolve1d
except ImportError:
raise ValueError("'filter' method requires SciPy.")
if axis is None:
raise ValueError("An `axis` value of None is not supported.")
if window < 1:
raise ValueError("`window` must be at least 1.")
if window > arr.shape[axis]:
raise ValueError("`window` is too long.")
arr = arr.astype(float)
nrr = np.isnan(arr)
arr[nrr] = 0
nrr = nrr.astype(int)
w = np.ones(window, dtype=int)
x0 = (1 - window) // 2
convolve1d(nrr, w, axis=axis, mode='constant', cval=0, origin=x0,
output=nrr)
y = convolve1d(arr, w, axis=axis, mode='constant', cval=np.nan, origin=x0)
y /= (window - nrr)
y *= y
arr *= arr
convolve1d(arr, w, axis=axis, mode='constant', cval=np.nan, origin=x0,
output=arr)
arr /= (window - nrr)
arr -= y
arr[nrr == window] = np.nan
return arr
# STD -----------------------------------------------------------------------
def move_std(arr, window, axis=-1, method='loop', ddof=0):
"""
Moving window standard deviation along the specified axis.
Parameters
----------
arr : array_like
Input array.
window : int
The number of elements in the moving window.
axis : int, optional
The axis over which to perform the moving standard deviation.
By default the moving standard deviation is taken over the last
axis (-1).
method : str, optional
The following moving window methods are available:
========== =====================================
'filter' scipy.ndimage.convolve1d
'strides' strides tricks
'loop' brute force python loop (default)
========== =====================================
Returns
-------
y : ndarray
The moving standard deviation of the input array along the specified
axis. The output has the same shape as the input.
Examples
--------
>>> arr = np.array([1, 2, 3, 4])
>>> bn.slow.move_std(arr, window=2)
array([ NaN, 0.5, 0.5, 0.5])
"""
arr = np.array(arr, copy=False)
if ddof != 0:
raise ValueError("`ddof` must be zero for unaccelerated input.")
if method == 'filter':
y = move_std_filter(arr, window, axis=axis)
elif method == 'strides':
y = move_func_strides(np.std, arr, window, axis=axis)
elif method == 'loop':
y = move_func_loop(np.std, arr, window, axis=axis)
else:
msg = "`method` must be 'filter', 'strides', or 'loop'."
raise ValueError(msg)
if y.dtype != arr.dtype:
if issubclass(arr.dtype.type, np.inexact):
y = y.astype(arr.dtype)
return y
def move_nanstd(arr, window, axis=-1, method='loop', ddof=0):
"""
Moving window standard deviation along the specified axis, ignoring NaNs.
Parameters
----------
arr : array_like
Input array.
window : int
The number of elements in the moving window.
axis : int, optional
The axis over which to perform the moving standard deviation.
By default the moving standard deviation is taken over the last
axis (-1).
method : str, optional
The following moving window methods are available:
========== =====================================
'filter' scipy.ndimage.convolve1d
'strides' strides tricks
'loop' brute force python loop (default)
========== =====================================
Returns
-------
y : ndarray
The moving standard deviation of the input array along the specified
axis, ignoring NaNs. (A window with all NaNs returns NaN for the window
standard deviation.) The output has the same shape as the input.
Examples
--------
>>> arr = np.array([1, 2, np.nan, 4, 5])
>>> bn.slow.move_nanstd(arr, window=3)
array([ NaN, NaN, 0.5, 1. , 0.5])
"""
arr = np.array(arr, copy=False)
if ddof != 0:
raise ValueError("`ddof` must be zero for unaccelerated input.")
if method == 'filter':
y = move_nanstd_filter(arr, window, axis=axis)
elif method == 'strides':
y = move_func_strides(bn.slow.nanstd, arr, window, axis=axis)
elif method == 'loop':
y = move_func_loop(bn.slow.nanstd, arr, window, axis=axis)
else:
msg = "`method` must be 'filter', 'strides', or 'loop'."
raise ValueError(msg)
if y.dtype != arr.dtype:
if issubclass(arr.dtype.type, np.inexact):
y = y.astype(arr.dtype)
return y
def move_std_filter(arr, window, axis=-1):
"Moving window standard deviation implemented with a filter."
arr = np.array(arr, copy=False)
if axis is None:
raise ValueError("An `axis` value of None is not supported.")
if window < 1:
raise ValueError("`window` must be at least 1.")
if window > arr.shape[axis]:
raise ValueError("`window` is too long.")
y = move_var_filter(arr, window, axis=axis)
np.sqrt(y, y)
return y
def move_nanstd_filter(arr, window, axis=-1):
"Moving window standard deviation ignoring NaNs, implemented with filter."
arr = np.array(arr, copy=False)
if axis is None:
raise ValueError("An `axis` value of None is not supported.")
if window < 1:
raise ValueError("`window` must be at least 1.")
if window > arr.shape[axis]:
raise ValueError("`window` is too long.")
y = move_nanvar_filter(arr, window, axis=axis)
np.sqrt(y, y)
return y
# MIN -----------------------------------------------------------------------
def move_min(arr, window, axis=-1, method='loop'):
"""
Slow move_min for unaccelerated ndim/dtype combinations.
Parameters
----------
arr : array_like
Input array.
window : int
The number of elements in the moving window.
axis : int, optional
The axis over which to perform the moving minimum. By default the
moving minimum is taken over the last axis (-1).
method : str, optional
The following moving window methods are available:
========== =========================================
'filter' scipy.ndimage.minimum_filter1d
'strides' strides tricks
'loop' brute force python loop (default)
========== =========================================
Returns
-------
y : ndarray
The moving minimum of the input array along the specified axis. The
output has the same shape as the input.
Examples
--------
>>> arr = np.array([1, 2, 3, 4])
>>> bn.slow.move_min(arr, window=2)
array([ NaN, 1., 2., 3.])
"""
if method == 'filter':
y = move_min_filter(arr, window, axis=axis)
elif method == 'strides':
y = move_func_strides(np.min, arr, window, axis=axis)
elif method == 'loop':
y = move_func_loop(np.min, arr, window, axis=axis)
else:
raise ValueError("`method` must be 'filter', 'strides', or 'loop'.")
return y
def move_nanmin(arr, window, axis=-1, method='loop'):
"""
Slow move_nanmin for unaccelerated ndim/dtype combinations.
Parameters
----------
arr : array_like
Input array.
window : int
The number of elements in the moving window.
axis : int, optional
The axis over which to perform the moving minimum. By default the
moving minimum is taken over the last axis (-1).
method : str, optional
The following moving window methods are available:
========== =========================================
'filter' scipy.ndimage.minimum_filter1d
'strides' strides tricks
'loop' brute force python loop (default)
========== =========================================
Returns
-------
y : ndarray
The moving minimum of the input array along the specified axis,
ignoring NaNs. (A window with all NaNs returns NaN for the window
minimum.) The output has the same shape as the input.
Examples
--------
>>> arr = np.array([1, 2, np.nan, 4, 5])
>>> bn.slow.move_nanmin(arr, window=2)
array([ NaN, 1., 2., 4., 4.])
"""
if method == 'filter':
y = move_nanmin_filter(arr, window, axis=axis)
elif method == 'strides':
y = move_nanmin_strides(arr, window, axis=axis)
elif method == 'loop':
y = move_nanmin_loop(arr, window, axis=axis)
else:
raise ValueError("`method` must be 'filter', 'strides', or 'loop'.")
return y
def move_min_filter(arr, window, axis=-1):
"Moving window minimium implemented with a filter."
arr = np.array(arr, copy=False)
global minimum_filter1d
if minimum_filter1d is None:
try:
from scipy.ndimage import minimum_filter1d
except ImportError:
raise ValueError("'filter' method requires SciPy.")
if axis is None:
raise ValueError("An `axis` value of None is not supported.")
if window < 1:
raise ValueError("`window` must be at least 1.")
if window > arr.shape[axis]:
raise ValueError("`window` is too long.")
y = arr.astype(float)
x0 = (window - 1) // 2
minimum_filter1d(y, window, axis=axis, mode='constant', cval=np.nan,
origin=x0, output=y)
return y
def move_nanmin_filter(arr, window, axis=-1):
"Moving window minimium ignoring NaNs, implemented with a filter."
global minimum_filter1d, convolve1d
arr = np.array(arr, copy=False)
if minimum_filter1d is None:
try:
from scipy.ndimage import minimum_filter1d
except ImportError:
raise ValueError("'filter' method requires SciPy.")
if convolve1d is None:
try:
from scipy.ndimage import convolve1d
except ImportError:
raise ValueError("'filter' method requires SciPy.")
if axis is None:
raise ValueError("An `axis` value of None is not supported.")
if window < 1:
raise ValueError("`window` must be at least 1.")
if window > arr.shape[axis]:
raise ValueError("`window` is too long.")
arr = arr.astype(float)
nrr = np.isnan(arr)
arr[nrr] = np.inf
x0 = (window - 1) // 2
minimum_filter1d(arr, window, axis=axis, mode='constant', cval=np.nan,
origin=x0, output=arr)
w = np.ones(window, dtype=int)
nrr = nrr.astype(int)
x0 = (1 - window) // 2
convolve1d(nrr, w, axis=axis, mode='constant', cval=0, origin=x0,
output=nrr)
arr[nrr == window] = np.nan
return arr
def move_nanmin_loop(arr, window, axis=-1):
"Moving window minimium ignoring NaNs, implemented with a python loop."
arr = np.array(arr, copy=False)
if axis is None:
raise ValueError("An `axis` value of None is not supported.")
if window < 1:
raise ValueError("`window` must be at least 1.")
if window > arr.shape[axis]:
raise ValueError("`window` is too long.")
arr = arr.astype(float)
nrr = np.isnan(arr)
arr[nrr] = np.inf
y = move_func_loop(np.min, arr, window, axis=axis)
m = move_func_loop(np.sum, nrr.astype(int), window, axis=axis)
y[m == window] = np.nan
return y
def move_nanmin_strides(arr, window, axis=-1):
"Moving window minimium ignoring NaNs, implemented with stides tricks."
arr = np.array(arr, copy=False)
if axis is None:
raise ValueError("An `axis` value of None is not supported.")
if window < 1:
raise ValueError("`window` must be at least 1.")
if window > arr.shape[axis]:
raise ValueError("`window` is too long.")
arr = arr.astype(float)
nrr = np.isnan(arr)
arr[nrr] = np.inf
y = move_func_strides(np.min, arr, window, axis=axis)
m = move_func_strides(np.sum, nrr.astype(int), window, axis=axis)
y[m == window] = np.nan
return y
# MAX -----------------------------------------------------------------------
def move_max(arr, window, axis=-1, method='loop'):
"""
Slow move_max for unaccelerated ndim/dtype combinations.
Parameters
----------
arr : array_like
Input array.
window : int
The number of elements in the moving window.
axis : int, optional
The axis over which to perform the moving maximum. By default the
moving maximum is taken over the last axis (-1).
method : str, optional
The following moving window methods are available:
========== =========================================
'filter' scipy.ndimage.minimum_filter1d
'strides' strides tricks
'loop' brute force python loop (default)
========== =========================================
Returns
-------
y : ndarray
The moving maximum of the input array along the specified axis. The
output has the same shape as the input.
Examples
--------
>>> arr = np.array([1, 2, 3, 4])
>>> bn.slow.move_max(arr, window=2)
array([ NaN, 2., 3., 4.])
"""
if method == 'filter':
y = move_max_filter(arr, window, axis=axis)
elif method == 'strides':
y = move_func_strides(np.max, arr, window, axis=axis)
elif method == 'loop':
y = move_func_loop(np.max, arr, window, axis=axis)
else:
raise ValueError("`method` must be 'filter', 'strides', or 'loop'.")
return y
def move_nanmax(arr, window, axis=-1, method='loop'):
"""
Slow move_nanmax for unaccelerated ndim/dtype combinations, ignoring NaNs.
Parameters
----------
arr : array_like
Input array.
window : int
The number of elements in the moving window.
axis : int, optional
The axis over which to perform the moving maximum. By default the
moving maximum is taken over the last axis (-1).
method : str, optional
The following moving window methods are available:
========== =========================================
'filter' scipy.ndimage.maximum_filter1d
'strides' strides tricks
'loop' brute force python loop (default)
========== =========================================
Returns
-------
y : ndarray
The moving maximum of the input array along the specified axis,
ignoring NaNs. (A window with all NaNs returns NaN for the window
maximum.) The output has the same shape as the input.
Examples
--------
>>> arr = np.array([1, 2, np.nan, 4, 5])
>>> bn.slow.move_nanmax(arr, window=2)
array([ NaN, 2., 2., 4., 5.])
"""
if method == 'filter':
y = move_nanmax_filter(arr, window, axis=axis)
elif method == 'strides':
y = move_nanmax_strides(arr, window, axis=axis)
elif method == 'loop':
y = move_nanmax_loop(arr, window, axis=axis)
else:
raise ValueError("`method` must be 'filter', 'strides', or 'loop'.")
return y
def move_max_filter(arr, window, axis=-1):
"Moving window maximium implemented with a filter."
arr = np.array(arr, copy=False)
global maximum_filter1d
if maximum_filter1d is None:
try:
from scipy.ndimage import maximum_filter1d
except ImportError:
raise ValueError("'filter' method requires SciPy.")
if axis is None:
raise ValueError("An `axis` value of None is not supported.")
if window < 1:
raise ValueError("`window` must be at least 1.")
if window > arr.shape[axis]:
raise ValueError("`window` is too long.")
y = arr.astype(float)
x0 = (window - 1) // 2
maximum_filter1d(y, window, axis=axis, mode='constant', cval=np.nan,
origin=x0, output=y)
return y
def move_nanmax_filter(arr, window, axis=-1):
"Moving window maximium ignoring NaNs, implemented with a filter."
arr = np.array(arr, copy=False)
global maximum_filter1d, convolve1d
if maximum_filter1d is None:
try:
from scipy.ndimage import maximum_filter1d
except ImportError:
raise ValueError("'filter' method requires SciPy.")
if convolve1d is None:
try:
from scipy.ndimage import convolve1d
except ImportError:
raise ValueError("'filter' method requires SciPy.")
if axis is None:
raise ValueError("An `axis` value of None is not supported.")
if window < 1:
raise ValueError("`window` must be at least 1.")
if window > arr.shape[axis]:
raise ValueError("`window` is too long.")
arr = arr.astype(float)
nrr = np.isnan(arr)
arr[nrr] = -np.inf
x0 = (window - 1) // 2
maximum_filter1d(arr, window, axis=axis, mode='constant', cval=np.nan,
origin=x0, output=arr)
w = np.ones(window, dtype=int)
nrr = nrr.astype(int)
x0 = (1 - window) // 2
convolve1d(nrr, w, axis=axis, mode='constant', cval=0, origin=x0,
output=nrr)
arr[nrr == window] = np.nan
return arr
def move_nanmax_loop(arr, window, axis=-1):
"Moving window maximium ignoring NaNs, implemented with a python loop."
arr = np.array(arr, copy=False)
if axis is None:
raise ValueError("An `axis` value of None is not supported.")
if window < 1:
raise ValueError("`window` must be at least 1.")
if window > arr.shape[axis]:
raise ValueError("`window` is too long.")
arr = arr.astype(float)
nrr = np.isnan(arr)
arr[nrr] = -np.inf
y = move_func_loop(np.max, arr, window, axis=axis)
m = move_func_loop(np.sum, nrr.astype(int), window, axis=axis)
y[m == window] = np.nan
return y
def move_nanmax_strides(arr, window, axis=-1):
"Moving window maximium ignoring NaNs, implemented with stides tricks."
arr = np.array(arr, copy=False)
if axis is None:
raise ValueError("An `axis` value of None is not supported.")
if window < 1:
raise ValueError("`window` must be at least 1.")
if window > arr.shape[axis]:
raise ValueError("`window` is too long.")
arr = arr.astype(float)
nrr = np.isnan(arr)
arr[nrr] = -np.inf
y = move_func_strides(np.max, arr, window, axis=axis)
m = move_func_strides(np.sum, nrr.astype(int), window, axis=axis)
y[m == window] = np.nan
return y
# MEDIAN --------------------------------------------------------------------
def move_median(arr, window, axis=-1, method='loop'):
"""
Slow moving window median along the specified axis.
Parameters
----------
arr : array_like
Input array.
window : int
The number of elements in the moving window.
axis : int, optional
The axis over which to perform the moving median. By default the
moving median is taken over the last axis (-1).
method : str, optional
The following moving window methods are available:
========== =====================================
'loop' brute force python loop (default)
'strides' strides tricks
========== =====================================
Returns
-------
y : ndarray
The moving median of the input array along the specified axis. The
output has the same shape as the input.
Examples
--------
>>> arr = np.array([1, 2, 3, 4, 5])
>>> bn.move_median(arr, window=2)
array([ NaN, 1.5, 2.5, 3.5, 4.5])
"""
arr = np.array(arr, copy=False)
if method == 'strides':
y = move_func_strides(np.median, arr, window, axis=axis)
elif method == 'loop':
y = move_func_loop(np.median, arr, window, axis=axis)
else:
msg = "`method` must be 'strides' or 'loop'."
raise ValueError(msg)
if y.dtype != arr.dtype:
if issubclass(arr.dtype.type, np.inexact):
y = y.astype(arr.dtype)
return y
# GENERAL --------------------------------------------------------------------
def move_func(func, arr, window, axis=-1, method='loop', **kwargs):
"""
Generic moving window function along the specified axis.
Parameters
----------
func : function
A reducing function such as np.sum, np.max, or np.median that takes
a Numpy array and axis and, optionally, key word arguments as input.
arr : array_like
Input array.
window : int
The number of elements in the moving window.
axis : int, optional
The axis over which to evaluate `func`. By default the window moves
along the last axis (-1).
method : str, optional
The following moving window methods are available:
========== =====================================
'loop' brute force python loop (default)
'strides' strides tricks
========== =====================================
Returns
-------
y : ndarray
A moving window evaluation of `func` along the specified axis of the
input array. The output has the same shape as the input.
Examples
--------
>>> arr = np.arange(4)
>>> bn.slow.move_func(np.sum, arr, window=2)
array([ NaN, 1., 3., 5.])
which give the same result as:
>>> bn.slow.move_sum(arr, window=2)
array([ NaN, 1., 3., 5.])
"""
if method == 'strides':
y = move_func_strides(func, arr, window, axis=axis, **kwargs)
elif method == 'loop':
y = move_func_loop(func, arr, window, axis=axis)
else:
msg = "`method` must be 'strides' or 'loop'."
raise ValueError(msg)
return y
def move_func_loop(func, arr, window, axis=-1, **kwargs):
"Generic moving window function implemented with a python loop."
arr = np.array(arr, copy=False)
if axis is None:
raise ValueError("An `axis` value of None is not supported.")
if window < 1:
raise ValueError("`window` must be at least 1.")
if window > arr.shape[axis]:
raise ValueError("`window` is too long.")
y = np.empty(arr.shape)
y.fill(np.nan)
idx1 = [slice(None)] * arr.ndim
idx2 = list(idx1)
for i in range(window - 1, arr.shape[axis]):
idx1[axis] = slice(i + 1 - window, i + 1)
idx2[axis] = i
y[idx2] = func(arr[idx1], axis=axis, **kwargs)
return y
def move_func_strides(func, arr, window, axis=-1, **kwargs):
"Generic moving window function implemented with strides."
arr = np.array(arr, copy=False)
if axis is None:
raise ValueError("An `axis` value of None is not supported.")
if window < 1:
raise ValueError("`window` must be at least 1.")
if window > arr.shape[axis]:
raise ValueError("`window` is too long.")
ndim = arr.ndim
idx = range(ndim)
axis = idx[axis]
arrshape0 = tuple(arr.shape)
if axis >= ndim:
raise IndexError("`axis` is out of range.")
strides = arr.strides
num_windows = arr.shape[axis] - window + 1
shape = arr.shape[:axis] + (num_windows, window) + arr.shape[axis + 1:]
strides = (strides[:axis] + (strides[axis], strides[axis])
+ strides[axis + 1:])
z = np.lib.stride_tricks.as_strided(arr, shape=shape, strides=strides)
y = func(z, axis=(axis + 1), **kwargs)
ynan = np.empty(arrshape0)
ynan.fill(np.nan)
index = [slice(None)] * ndim
index[axis] = slice(window - 1, None)
ynan[index] = y
return ynan
| 33.561175
| 79
| 0.566325
| 5,274
| 41,146
| 4.372582
| 0.039249
| 0.044231
| 0.045098
| 0.035384
| 0.925459
| 0.915745
| 0.896449
| 0.876805
| 0.853953
| 0.84489
| 0
| 0.013871
| 0.276382
| 41,146
| 1,225
| 80
| 33.588571
| 0.760664
| 0.412823
| 0
| 0.783557
| 0
| 0
| 0.186621
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.053691
| false
| 0
| 0.043624
| 0
| 0.151007
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0c08e01c3ccb1e8541e31133bfa2c64c0f8a1605
| 10,144
|
py
|
Python
|
seqgra/learner/bayes/bayeslearner.py
|
gifford-lab/seqgra
|
3c7547878ecda4c00572746b8a07e0d614c9dbef
|
[
"MIT"
] | null | null | null |
seqgra/learner/bayes/bayeslearner.py
|
gifford-lab/seqgra
|
3c7547878ecda4c00572746b8a07e0d614c9dbef
|
[
"MIT"
] | null | null | null |
seqgra/learner/bayes/bayeslearner.py
|
gifford-lab/seqgra
|
3c7547878ecda4c00572746b8a07e0d614c9dbef
|
[
"MIT"
] | 2
|
2021-06-14T20:27:40.000Z
|
2021-06-14T20:29:29.000Z
|
"""MIT - CSAIL - Gifford Lab - seqgra
TensorFlow Keras learners
@author: Konstantin Krismer
"""
from typing import Any, List, Optional
from seqgra import ModelSize
from seqgra.learner import DNAMultiClassClassificationLearner
from seqgra.learner import DNAMultiLabelClassificationLearner
from seqgra.learner import ProteinMultiClassClassificationLearner
from seqgra.learner import ProteinMultiLabelClassificationLearner
from seqgra.learner.bayes import BayesOptimalHelper
from seqgra.model import ModelDefinition
class BayesOptimalDNAMultiClassClassificationLearner(
DNAMultiClassClassificationLearner):
def __init__(self, model_definition: ModelDefinition, data_dir: str,
output_dir: str, validate_data: bool = True,
silent: bool = False) -> None:
super().__init__(model_definition, data_dir, output_dir, validate_data,
silent=silent)
def create_model(self) -> None:
BayesOptimalHelper.create_model(self)
def print_model_summary(self):
BayesOptimalHelper.print_model_summary(self)
def set_seed(self) -> None:
BayesOptimalHelper.set_seed(self)
def _train_model(self,
file_name_train: Optional[str] = None,
file_name_val: Optional[str] = None,
x_train: Optional[List[str]] = None,
y_train: Optional[List[str]] = None,
x_val: Optional[List[str]] = None,
y_val: Optional[List[str]] = None) -> None:
BayesOptimalHelper.train_model(self)
def evaluate_model(self, file_name: Optional[str] = None,
x: Optional[List[str]] = None,
y: Optional[List[str]] = None):
if x is not None and y is not None:
pass
elif file_name is not None:
x, y = self.parse_examples_data(file_name)
else:
raise Exception("specify either file_name or x, y")
x = self.encode_x(x)
y = self.encode_y(y)
return BayesOptimalHelper.evaluate_model(self, x, y)
def predict(self, file_name: Optional[str] = None,
x: Optional[Any] = None,
encode: bool = True):
if x is not None:
if encode:
x = self.encode_x(x)
elif file_name is not None:
x, _ = self.parse_examples_data(file_name)
x = self.encode_x(x)
else:
raise Exception("specify either file_name or x")
return BayesOptimalHelper.predict(self, x, self.silent)
def save_model(self, file_name: Optional[str] = None):
pass
def write_session_info(self) -> None:
BayesOptimalHelper.write_session_info(self)
def load_model(self, file_name: Optional[str] = None):
self.create_model()
def get_num_params(self) -> ModelSize:
return 0
class BayesOptimalDNAMultiLabelClassificationLearner(
DNAMultiLabelClassificationLearner):
def __init__(self, model_definition: ModelDefinition, data_dir: str,
output_dir: str, validate_data: bool = True,
silent: bool = False) -> None:
super().__init__(model_definition, data_dir, output_dir, validate_data,
silent=silent)
def create_model(self) -> None:
BayesOptimalHelper.create_model(self)
def print_model_summary(self):
BayesOptimalHelper.print_model_summary(self)
def set_seed(self) -> None:
BayesOptimalHelper.set_seed(self)
def _train_model(self,
file_name_train: Optional[str] = None,
file_name_val: Optional[str] = None,
x_train: Optional[List[str]] = None,
y_train: Optional[List[str]] = None,
x_val: Optional[List[str]] = None,
y_val: Optional[List[str]] = None) -> None:
BayesOptimalHelper.train_model(self)
def evaluate_model(self, file_name: Optional[str] = None,
x: Optional[List[str]] = None,
y: Optional[List[str]] = None):
if x is not None and y is not None:
pass
elif file_name is not None:
x, y = self.parse_examples_data(file_name)
else:
raise Exception("specify either file_name or x, y")
x = self.encode_x(x)
y = self.encode_y(y)
return BayesOptimalHelper.evaluate_model(self, x, y)
def predict(self, file_name: Optional[str] = None,
x: Optional[Any] = None,
encode: bool = True):
if x is not None:
if encode:
x = self.encode_x(x)
elif file_name is not None:
x, _ = self.parse_examples_data(file_name)
x = self.encode_x(x)
else:
raise Exception("specify either file_name or x")
return BayesOptimalHelper.predict(self, x, self.silent)
def save_model(self, file_name: Optional[str] = None):
pass
def write_session_info(self) -> None:
BayesOptimalHelper.write_session_info(self)
def load_model(self, file_name: Optional[str] = None):
self.create_model()
def get_num_params(self) -> ModelSize:
return 0
class BayesOptimalProteinMultiClassClassificationLearner(
ProteinMultiClassClassificationLearner):
def __init__(self, model_definition: ModelDefinition, data_dir: str,
output_dir: str, validate_data: bool = True,
silent: bool = False) -> None:
super().__init__(model_definition, data_dir, output_dir, validate_data,
silent=silent)
def create_model(self) -> None:
BayesOptimalHelper.create_model(self)
def print_model_summary(self):
BayesOptimalHelper.print_model_summary(self)
def set_seed(self) -> None:
BayesOptimalHelper.set_seed(self)
def _train_model(self,
file_name_train: Optional[str] = None,
file_name_val: Optional[str] = None,
x_train: Optional[List[str]] = None,
y_train: Optional[List[str]] = None,
x_val: Optional[List[str]] = None,
y_val: Optional[List[str]] = None) -> None:
BayesOptimalHelper.train_model(self)
def evaluate_model(self, file_name: Optional[str] = None,
x: Optional[List[str]] = None,
y: Optional[List[str]] = None):
if x is not None and y is not None:
pass
elif file_name is not None:
x, y = self.parse_examples_data(file_name)
else:
raise Exception("specify either file_name or x, y")
x = self.encode_x(x)
y = self.encode_y(y)
return BayesOptimalHelper.evaluate_model(self, x, y)
def predict(self, file_name: Optional[str] = None,
x: Optional[Any] = None,
encode: bool = True):
if x is not None:
if encode:
x = self.encode_x(x)
elif file_name is not None:
x, _ = self.parse_examples_data(file_name)
x = self.encode_x(x)
else:
raise Exception("specify either file_name or x")
return BayesOptimalHelper.predict(self, x, self.silent)
def save_model(self, file_name: Optional[str] = None):
pass
def write_session_info(self) -> None:
BayesOptimalHelper.write_session_info(self)
def load_model(self, file_name: Optional[str] = None):
self.create_model()
def get_num_params(self) -> ModelSize:
return 0
class BayesOptimalProteinMultiLabelClassificationLearner(
ProteinMultiLabelClassificationLearner):
def __init__(self, model_definition: ModelDefinition, data_dir: str,
output_dir: str, validate_data: bool = True,
silent: bool = False) -> None:
super().__init__(model_definition, data_dir, output_dir, validate_data,
silent=silent)
def create_model(self) -> None:
BayesOptimalHelper.create_model(self)
def print_model_summary(self):
BayesOptimalHelper.print_model_summary(self)
def set_seed(self) -> None:
BayesOptimalHelper.set_seed(self)
def _train_model(self,
file_name_train: Optional[str] = None,
file_name_val: Optional[str] = None,
x_train: Optional[List[str]] = None,
y_train: Optional[List[str]] = None,
x_val: Optional[List[str]] = None,
y_val: Optional[List[str]] = None) -> None:
BayesOptimalHelper.train_model(self)
def evaluate_model(self, file_name: Optional[str] = None,
x: Optional[List[str]] = None,
y: Optional[List[str]] = None):
if x is not None and y is not None:
pass
elif file_name is not None:
x, y = self.parse_examples_data(file_name)
else:
raise Exception("specify either file_name or x, y")
x = self.encode_x(x)
y = self.encode_y(y)
return BayesOptimalHelper.evaluate_model(self, x, y)
def predict(self, file_name: Optional[str] = None,
x: Optional[Any] = None,
encode: bool = True):
if x is not None:
if encode:
x = self.encode_x(x)
elif file_name is not None:
x, _ = self.parse_examples_data(file_name)
x = self.encode_x(x)
else:
raise Exception("specify either file_name or x")
return BayesOptimalHelper.predict(self, x, self.silent)
def save_model(self, file_name: Optional[str] = None):
pass
def write_session_info(self) -> None:
BayesOptimalHelper.write_session_info(self)
def load_model(self, file_name: Optional[str] = None):
self.create_model()
def get_num_params(self) -> ModelSize:
return 0
| 35.71831
| 79
| 0.600946
| 1,176
| 10,144
| 4.962585
| 0.072279
| 0.065798
| 0.061686
| 0.078136
| 0.86549
| 0.86549
| 0.86549
| 0.86549
| 0.86549
| 0.86549
| 0
| 0.000571
| 0.309937
| 10,144
| 283
| 80
| 35.844523
| 0.833143
| 0.008872
| 0
| 0.927273
| 0
| 0
| 0.024286
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0.036364
| 0.036364
| 0.018182
| 0.309091
| 0.036364
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0c146bca4f6cdebf19d5624ca3ea24ba563e4507
| 3,634
|
py
|
Python
|
tests/core/test_jwt.py
|
bossjones/ultron8
|
45db73d32542a844570d44bc83defa935e15803f
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
tests/core/test_jwt.py
|
bossjones/ultron8
|
45db73d32542a844570d44bc83defa935e15803f
|
[
"Apache-2.0",
"MIT"
] | 43
|
2019-06-01T23:08:32.000Z
|
2022-02-07T22:24:53.000Z
|
tests/core/test_jwt.py
|
bossjones/ultron8
|
45db73d32542a844570d44bc83defa935e15803f
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
import datetime
from datetime import timedelta
import logging
from typing import Tuple
# import jwt as pyjwt
from freezegun import freeze_time
from jose import jwt as josejwt
import pytest
from sqlalchemy.orm import Session
import ultron8
from ultron8.api import crud, settings
from ultron8.api.core import jwt
from tests.conftest import fixtures_path
logger = logging.getLogger(__name__)
@freeze_time("2012-01-14 03:21:34", tz_offset=-4)
@pytest.mark.jwtonly
@pytest.mark.unittest
class TestCreateAccessToken(object):
# def test_create_access_token(
# self, first_superuser_username_and_password_fixtures, db
# ):
# username, password = first_superuser_username_and_password_fixtures
# access_token_expires = timedelta(minutes=settings.ACCESS_TOKEN_EXPIRE_MINUTES)
# user = crud.user.authenticate(db, email=username, password=password)
# a_token = jwt.create_access_token(
# data={"user_id": user.id}, expires_delta=access_token_expires
# )
# expire_expected = datetime.datetime.utcnow() + access_token_expires
# test_data = {"user_id": user.id, "exp": expire_expected, "sub": "access"}
# expected_token = pyjwt.encode(test_data, settings.SECRET_KEY, algorithm="HS256")
# assert a_token == expected_token
# def test_create_access_token_without_timedelta(
# self, first_superuser_username_and_password_fixtures, db
# ):
# username, password = first_superuser_username_and_password_fixtures
# user = crud.user.authenticate(db, email=username, password=password)
# a_token = jwt.create_access_token(data={"user_id": user.id})
# expire_expected = datetime.datetime.utcnow() + timedelta(minutes=15)
# test_data = {"user_id": user.id, "exp": expire_expected, "sub": "access"}
# expected_token = pyjwt.encode(test_data, settings.SECRET_KEY, algorithm="HS256")
# assert a_token == expected_token
def test_create_access_token2(
self,
first_superuser_username_and_password_fixtures: Tuple[str, str],
db: Session,
) -> None:
username, password = first_superuser_username_and_password_fixtures
FIXTURE_ACCESS_TOKEN_EXPIRE_MINUTES = (
60 * 24 * 2
) # 60 minutes * 24 hours * 2 days = 2 days
access_token_expires = timedelta(minutes=FIXTURE_ACCESS_TOKEN_EXPIRE_MINUTES)
user = crud.user.authenticate(db, email=username, password=password)
a_token = jwt.create_access_token(user.id, expires_delta=access_token_expires)
expire_expected = datetime.datetime.utcnow() + access_token_expires
test_data = {"exp": expire_expected, "sub": str(user.id)}
expected_token = josejwt.encode(
test_data, settings.SECRET_KEY, algorithm="HS256"
)
assert a_token == expected_token
def test_create_access_token_without_timedelta2(
self,
first_superuser_username_and_password_fixtures: Tuple[str, str],
db: Session,
) -> None:
username, password = first_superuser_username_and_password_fixtures
user = crud.user.authenticate(db, email=username, password=password)
a_token = jwt.create_access_token(user.id)
expire_expected = datetime.datetime.utcnow() + timedelta(
minutes=settings.ACCESS_TOKEN_EXPIRE_MINUTES
)
test_data = {"exp": expire_expected, "sub": str(user.id)}
expected_token = josejwt.encode(
test_data, settings.SECRET_KEY, algorithm="HS256"
)
assert a_token == expected_token
| 32.738739
| 90
| 0.698954
| 436
| 3,634
| 5.504587
| 0.206422
| 0.077917
| 0.073333
| 0.083333
| 0.800417
| 0.752917
| 0.752917
| 0.729583
| 0.729583
| 0.68625
| 0
| 0.01569
| 0.210787
| 3,634
| 110
| 91
| 33.036364
| 0.82113
| 0.375069
| 0
| 0.392157
| 0
| 0
| 0.018263
| 0
| 0
| 0
| 0
| 0
| 0.039216
| 1
| 0.039216
| false
| 0.117647
| 0.235294
| 0
| 0.294118
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
0c1c4aec3a8c53cda2f66cb238eab93fcd4abf54
| 1,331
|
py
|
Python
|
src/numpy_change.py
|
benchoi93/DeepMapMatching
|
ac87934d909de8fb5a635001f92b4e93cb69fdd3
|
[
"MIT"
] | 1
|
2021-12-10T08:52:15.000Z
|
2021-12-10T08:52:15.000Z
|
src/numpy_change.py
|
safarzadeh-reza/DeepMapMatching
|
ac87934d909de8fb5a635001f92b4e93cb69fdd3
|
[
"MIT"
] | null | null | null |
src/numpy_change.py
|
safarzadeh-reza/DeepMapMatching
|
ac87934d909de8fb5a635001f92b4e93cb69fdd3
|
[
"MIT"
] | 1
|
2022-01-10T17:39:02.000Z
|
2022-01-10T17:39:02.000Z
|
# %%
import numpy as np
raw_target = np.load("../data/Label_0.npy")
raw_target = raw_target.reshape(raw_target.shape[0], raw_target.shape[1])
padding = -1
temp_input = np.array([])
length_batch = raw_target.shape[0]
length_len = raw_target.shape[1]
for i in range(length_batch):
temp_data = raw_target[i]
_, idx = np.unique(temp_data, return_index=True)
temp = temp_data[np.sort(idx)]
if sum(temp == -1) == 0:
temp = np.insert(temp, len(temp), padding)
temp_input = np.append(temp_input, temp)
temp_input = temp_input.reshape(length_batch, len(temp), 1)
temp_input = temp_input.astype(int)
np.save("../data/Label_1.npy", temp_input)
# %%
def shortencode(raw_target):
raw_target = raw_target.reshape(raw_target.shape[0], raw_target.shape[1])
padding = -1
temp_input = np.array([])
length_batch = raw_target.shape[0]
length_len = raw_target.shape[1]
for i in range(length_batch):
temp_data = raw_target[i]
_, idx = np.unique(temp_data, return_index=True)
temp = temp_data[np.sort(idx)]
if sum(temp == -1) == 0:
temp = np.insert(temp, len(temp), padding)
temp_input = np.append(temp_input, temp)
temp_input = temp_input.reshape(length_batch, len(temp), 1)
temp_input = temp_input.astype(int)
return temp_input
| 30.25
| 77
| 0.670924
| 209
| 1,331
| 4.023923
| 0.191388
| 0.171225
| 0.133175
| 0.071344
| 0.870392
| 0.870392
| 0.870392
| 0.870392
| 0.870392
| 0.870392
| 0
| 0.016636
| 0.187077
| 1,331
| 43
| 78
| 30.953488
| 0.760628
| 0.003757
| 0
| 0.848485
| 0
| 0
| 0.028723
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.030303
| false
| 0
| 0.030303
| 0
| 0.090909
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0c4d0e44bd526aad9a6e99bf21fc1549a6d0699d
| 56
|
py
|
Python
|
sheslcrypto/__init__.py
|
shesl-meow/shesl-crypto
|
d6caf4fe13a15fa6700c1fef5667816f9d9a03d6
|
[
"Apache-2.0"
] | 2
|
2019-11-30T17:29:11.000Z
|
2019-12-12T15:42:01.000Z
|
sheslcrypto/__init__.py
|
shesl-meow/shesl-crypto
|
d6caf4fe13a15fa6700c1fef5667816f9d9a03d6
|
[
"Apache-2.0"
] | null | null | null |
sheslcrypto/__init__.py
|
shesl-meow/shesl-crypto
|
d6caf4fe13a15fa6700c1fef5667816f9d9a03d6
|
[
"Apache-2.0"
] | null | null | null |
from sheslcrypto import RSA
from sheslcrypto import LFSR
| 28
| 28
| 0.875
| 8
| 56
| 6.125
| 0.625
| 0.612245
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 56
| 2
| 28
| 28
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
a7986a41f7c262597114965da868ec1392d74e78
| 138
|
py
|
Python
|
Python3/Exercises/SingleLetterCount/single_letter_count.py
|
norbertosanchezdichi/TIL
|
2e9719ddd288022f53b094a42679e849bdbcc625
|
[
"MIT"
] | null | null | null |
Python3/Exercises/SingleLetterCount/single_letter_count.py
|
norbertosanchezdichi/TIL
|
2e9719ddd288022f53b094a42679e849bdbcc625
|
[
"MIT"
] | null | null | null |
Python3/Exercises/SingleLetterCount/single_letter_count.py
|
norbertosanchezdichi/TIL
|
2e9719ddd288022f53b094a42679e849bdbcc625
|
[
"MIT"
] | null | null | null |
def single_letter_count(string, letter):
return string.lower().count(letter.lower())
print (single_letter_count('Norberto', 'O'))
| 34.5
| 47
| 0.724638
| 18
| 138
| 5.333333
| 0.555556
| 0.25
| 0.354167
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.115942
| 138
| 4
| 48
| 34.5
| 0.786885
| 0
| 0
| 0
| 0
| 0
| 0.064748
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0.333333
| 0.666667
| 0.333333
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
a7a0078032602ad60b28d541d4adad1cc1ee4c08
| 140
|
py
|
Python
|
app/errors.py
|
averycrespi/statice
|
bd6158595106df90fcabd8ac16e899bf58db1a3b
|
[
"MIT"
] | null | null | null |
app/errors.py
|
averycrespi/statice
|
bd6158595106df90fcabd8ac16e899bf58db1a3b
|
[
"MIT"
] | 40
|
2020-01-23T01:45:20.000Z
|
2020-03-24T18:48:25.000Z
|
app/errors.py
|
averycrespi/statice
|
bd6158595106df90fcabd8ac16e899bf58db1a3b
|
[
"MIT"
] | null | null | null |
from flask import render_template
def page_not_found(e):
"""Handle Page Not Found error."""
return render_template("404.j2"), 404
| 20
| 41
| 0.714286
| 21
| 140
| 4.571429
| 0.714286
| 0.291667
| 0.25
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.060345
| 0.171429
| 140
| 6
| 42
| 23.333333
| 0.767241
| 0.2
| 0
| 0
| 0
| 0
| 0.056604
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
a7acce645853ca6c083b2cff0fbd528a675c874c
| 6,575
|
py
|
Python
|
pyjuque/Engine/Database.py
|
Physicworld/pyjuque
|
ad52d0409558c04583a143398d9df01d2909fda3
|
[
"MIT"
] | 1
|
2021-02-25T12:48:27.000Z
|
2021-02-25T12:48:27.000Z
|
pyjuque/Engine/Database.py
|
Physicworld/pyjuque
|
ad52d0409558c04583a143398d9df01d2909fda3
|
[
"MIT"
] | null | null | null |
pyjuque/Engine/Database.py
|
Physicworld/pyjuque
|
ad52d0409558c04583a143398d9df01d2909fda3
|
[
"MIT"
] | null | null | null |
from pyjuque.Engine.Models import TABotModel as Bot, PairModel as Pair, \
EntrySettingsModel as EntrySettings, ExitSettingsModel as ExitSettings, getSession
def InitializeDatabaseTaBot(session, params={}):
""" Function that initializes the database
by creating a bot with two pairs. """
name = 'My Bot'
symbols = []
quote_asset = 'BTC'
starting_balance = 0.001
test_run = False
initial_entry_allocation = 25
signal_distance = 0.3
profit_target = 2
stop_loss_value = 0
exit_on_signal = False
if params.__contains__('name'):
assert type(params['name']) == str
name = params['name']
if params.__contains__('symbols'):
assert type(params['symbols']) == list
symbols = params['symbols']
if params.__contains__('quote_asset'):
assert type(params['quote_asset']) == str
quote_asset = params['quote_asset']
if params.__contains__('starting_balance'):
assert type(params['starting_balance']) in [int, float]
starting_balance = params['starting_balance']
if params.__contains__('test_run'):
assert type(params['test_run']) == bool
test_run = params['test_run']
if params.__contains__('entry_settings'):
assert type(params['entry_settings']) == dict
entry_settings = params['entry_settings']
if entry_settings.__contains__('initial_entry_allocation'):
assert type(entry_settings['initial_entry_allocation']) in [int, float]
initial_entry_allocation = entry_settings['initial_entry_allocation']
if entry_settings.__contains__('signal_distance'):
assert type(entry_settings['signal_distance']) in [int, float]
signal_distance = entry_settings['signal_distance']
if params.__contains__('exit_settings'):
assert type(params['exit_settings']) == dict
exit_settings = params['exit_settings']
if exit_settings.__contains__('take_profit'):
assert type(exit_settings['take_profit']) in [int, float]
profit_target = exit_settings['take_profit']
if exit_settings.__contains__('stop_loss_value'):
assert type(exit_settings['stop_loss_value']) in [int, float]
stop_loss_value = exit_settings['stop_loss_value']
if exit_settings.__contains__('exit_on_signal'):
assert type(exit_settings['exit_on_signal']) == bool
exit_on_signal = exit_settings['exit_on_signal']
myobject = Bot(
name = name,
quote_asset = quote_asset,
starting_balance = starting_balance,
current_balance = starting_balance,
test_run = test_run
)
session.add(myobject)
entrysets = EntrySettings(
initial_entry_allocation = initial_entry_allocation,
signal_distance = signal_distance
)
exitsets = ExitSettings(
profit_target = profit_target, # in %
stop_loss_value = stop_loss_value, # in %
exit_on_signal = exit_on_signal
)
myobject.entry_settings = entrysets
myobject.exit_settings = exitsets
session.commit()
for symbol in symbols:
pair = Pair(
bot_id = myobject.id,
symbol = symbol,
current_order_id = None
)
session.add(pair)
session.commit()
def InitializeDatabaseGridBot(session, params={}):
""" Function that initializes the database
by creating a bot with two pairs. """
name = 'My Bot'
symbols = []
quote_asset = 'BTC'
starting_balance = 0.001
test_run = False
initial_entry_allocation = 25
signal_distance = 0.3
profit_target = 2
stop_loss_value = 0
exit_on_signal = False
if params.__contains__('name'):
assert type(params['name']) == str
name = params['name']
if params.__contains__('symbols'):
assert type(params['symbols']) == list
symbols = params['symbols']
if params.__contains__('quote_asset'):
assert type(params['quote_asset']) == str
quote_asset = params['quote_asset']
if params.__contains__('starting_balance'):
assert type(params['starting_balance']) in [int, float]
starting_balance = params['starting_balance']
if params.__contains__('test_run'):
assert type(params['test_run']) == bool
test_run = params['test_run']
if params.__contains__('entry_settings'):
assert type(params['entry_settings']) == dict
entry_settings = params['entry_settings']
if entry_settings.__contains__('initial_entry_allocation'):
assert type(entry_settings['initial_entry_allocation']) in [int, float]
initial_entry_allocation = entry_settings['initial_entry_allocation']
if entry_settings.__contains__('signal_distance'):
assert type(entry_settings['signal_distance']) in [int, float]
signal_distance = entry_settings['signal_distance']
if params.__contains__('exit_settings'):
assert type(params['exit_settings']) == dict
exit_settings = params['exit_settings']
if exit_settings.__contains__('take_profit'):
assert type(exit_settings['take_profit']) in [int, float]
profit_target = exit_settings['take_profit']
if exit_settings.__contains__('stop_loss_value'):
assert type(exit_settings['stop_loss_value']) in [int, float]
stop_loss_value = exit_settings['stop_loss_value']
if exit_settings.__contains__('exit_on_signal'):
assert type(exit_settings['exit_on_signal']) == bool
exit_on_signal = exit_settings['exit_on_signal']
myobject = Bot(
name = name,
quote_asset = quote_asset,
starting_balance = starting_balance,
current_balance = starting_balance,
test_run = test_run
)
session.add(myobject)
entrysets = EntrySettings(
initial_entry_allocation = initial_entry_allocation,
signal_distance = signal_distance
)
exitsets = ExitSettings(
profit_target = profit_target, # in %
stop_loss_value = stop_loss_value, # in %
exit_on_signal = exit_on_signal
)
myobject.entry_settings = entrysets
myobject.exit_settings = exitsets
session.commit()
for symbol in symbols:
pair = Pair(
bot_id = myobject.id,
symbol = symbol,
current_order_id = None
)
session.add(pair)
session.commit()
| 33.717949
| 86
| 0.651711
| 730
| 6,575
| 5.445205
| 0.113699
| 0.084528
| 0.077484
| 0.033208
| 0.952956
| 0.952956
| 0.952956
| 0.952956
| 0.952956
| 0.952956
| 0
| 0.004054
| 0.249582
| 6,575
| 194
| 87
| 33.891753
| 0.801581
| 0.025551
| 0
| 0.921053
| 0
| 0
| 0.145722
| 0.022563
| 0
| 0
| 0
| 0
| 0.157895
| 1
| 0.013158
| false
| 0
| 0.006579
| 0
| 0.019737
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
a7dcae754e511a38316f1c528bf6e653b9514829
| 12,900
|
py
|
Python
|
tests/integrationv2/test_session_resumption.py
|
bryce-shang/s2n-tls
|
b0725af8e9900da37c2ec32bb40bf5a92e6bc896
|
[
"Apache-2.0"
] | 4,256
|
2015-06-30T11:37:38.000Z
|
2021-02-17T10:46:30.000Z
|
tests/integrationv2/test_session_resumption.py
|
bryce-shang/s2n-tls
|
b0725af8e9900da37c2ec32bb40bf5a92e6bc896
|
[
"Apache-2.0"
] | 2,088
|
2015-06-30T12:12:51.000Z
|
2021-02-17T22:27:43.000Z
|
tests/integrationv2/test_session_resumption.py
|
bryce-shang/s2n-tls
|
b0725af8e9900da37c2ec32bb40bf5a92e6bc896
|
[
"Apache-2.0"
] | 676
|
2015-06-30T11:11:51.000Z
|
2021-02-15T20:07:16.000Z
|
import copy
import os
import pytest
import time
from configuration import available_ports, ALL_TEST_CIPHERS, ALL_TEST_CURVES, ALL_TEST_CERTS, PROTOCOLS, TLS13_CIPHERS
from common import ProviderOptions, Protocols, data_bytes
from fixtures import managed_process
from providers import Provider, S2N, OpenSSL
from utils import invalid_test_parameters, get_parameter_name, get_expected_s2n_version, to_bytes
@pytest.mark.uncollect_if(func=invalid_test_parameters)
@pytest.mark.parametrize("cipher", ALL_TEST_CIPHERS, ids=get_parameter_name)
@pytest.mark.parametrize("curve", ALL_TEST_CURVES, ids=get_parameter_name)
@pytest.mark.parametrize("certificate", ALL_TEST_CERTS, ids=get_parameter_name)
@pytest.mark.parametrize("protocol", [p for p in PROTOCOLS if p != Protocols.TLS13], ids=get_parameter_name)
@pytest.mark.parametrize("provider", [OpenSSL], ids=get_parameter_name)
@pytest.mark.parametrize("use_ticket", [True, False])
def test_session_resumption_s2n_server(managed_process, cipher, curve, protocol, provider, certificate, use_ticket):
port = next(available_ports)
client_options = ProviderOptions(
mode=Provider.ClientMode,
port=port,
cipher=cipher,
curve=curve,
insecure=True,
reconnect=True,
protocol=protocol)
server_options = copy.copy(client_options)
server_options.reconnects_before_exit = 6
server_options.mode = Provider.ServerMode
server_options.use_session_ticket=use_ticket,
server_options.key = certificate.key
server_options.cert = certificate.cert
# Passing the type of client and server as a parameter will
# allow us to use a fixture to enumerate all possibilities.
server = managed_process(S2N, server_options, timeout=5)
client = managed_process(provider, client_options, timeout=5)
# The client should connect and return without error
for results in client.get_results():
results.assert_success()
assert results.stdout.count(to_bytes("Session-ID:")) == 6
expected_version = get_expected_s2n_version(protocol, OpenSSL)
# S2N should indicate the procotol version in a successful connection.
for results in server.get_results():
results.assert_success()
assert results.stdout.count(to_bytes("Actual protocol version: {}".format(expected_version))) == 6
@pytest.mark.uncollect_if(func=invalid_test_parameters)
@pytest.mark.parametrize("cipher", ALL_TEST_CIPHERS, ids=get_parameter_name)
@pytest.mark.parametrize("curve", ALL_TEST_CURVES, ids=get_parameter_name)
@pytest.mark.parametrize("certificate", ALL_TEST_CERTS, ids=get_parameter_name)
@pytest.mark.parametrize("protocol", [p for p in PROTOCOLS if p != Protocols.TLS13], ids=get_parameter_name)
@pytest.mark.parametrize("provider", [OpenSSL], ids=get_parameter_name)
@pytest.mark.parametrize("use_ticket", [True, False])
def test_session_resumption_s2n_client(managed_process, cipher, curve, protocol, provider, certificate, use_ticket):
port = next(available_ports)
client_options = ProviderOptions(
mode=Provider.ClientMode,
port=port,
cipher=cipher,
curve=curve,
insecure=True,
reconnect=True,
use_session_ticket=use_ticket,
protocol=protocol)
server_options = copy.copy(client_options)
server_options.reconnects_before_exit = 6
server_options.mode = Provider.ServerMode
server_options.key = certificate.key
server_options.cert = certificate.cert
server_options.use_session_ticket = False
# Passing the type of client and server as a parameter will
# allow us to use a fixture to enumerate all possibilities.
server = managed_process(provider, server_options, timeout=5)
client = managed_process(S2N, client_options, timeout=5)
expected_version = get_expected_s2n_version(protocol, OpenSSL)
for results in client.get_results():
results.assert_success()
assert results.stdout.count(to_bytes("Actual protocol version: {}".format(expected_version))) == 6
for results in server.get_results():
results.assert_success()
assert results.stdout.count(to_bytes("6 server accepts that finished"))
@pytest.mark.uncollect_if(func=invalid_test_parameters)
@pytest.mark.parametrize("cipher", TLS13_CIPHERS, ids=get_parameter_name)
@pytest.mark.parametrize("curve", ALL_TEST_CURVES, ids=get_parameter_name)
@pytest.mark.parametrize("certificate", ALL_TEST_CERTS, ids=get_parameter_name)
@pytest.mark.parametrize("protocol", [Protocols.TLS13], ids=get_parameter_name)
@pytest.mark.parametrize("provider", [OpenSSL], ids=get_parameter_name)
def test_tls13_session_resumption_s2n_server(managed_process, tmp_path, cipher, curve, protocol, provider, certificate):
port = str(next(available_ports))
# Use temp directory to store session tickets
p = tmp_path / 'ticket.pem'
path_to_ticket = str(p)
close_marker_bytes = data_bytes(10)
client_options = ProviderOptions(
mode=Provider.ClientMode,
port=port,
cipher=cipher,
curve=curve,
insecure=True,
reconnect=False,
extra_flags = ['-sess_out', path_to_ticket],
protocol=protocol)
server_options = copy.copy(client_options)
server_options.mode = Provider.ServerMode
server_options.key = certificate.key
server_options.cert = certificate.cert
server_options.use_session_ticket = True
server_options.extra_flags = None
server_options.data_to_send = close_marker_bytes
server = managed_process(S2N, server_options, timeout=5, send_marker=S2N.get_send_marker())
client = managed_process(provider, client_options, timeout=5, close_marker=str(close_marker_bytes))
# The client should have received a session ticket
for results in client.get_results():
results.assert_success()
assert b'Post-Handshake New Session Ticket arrived:' in results.stdout
for results in server.get_results():
results.assert_success()
# The first connection is a full handshake
assert b'Resumed session' not in results.stdout
# Client inputs received session ticket to resume a session
assert os.path.exists(path_to_ticket)
client_options.extra_flags = ['-sess_in', path_to_ticket]
port = str(next(available_ports))
client_options.port = port
server_options.port = port
server = managed_process(S2N, server_options, timeout=5, send_marker=S2N.get_send_marker())
client = managed_process(provider, client_options, timeout=5, close_marker=str(close_marker_bytes))
s2n_version = get_expected_s2n_version(protocol, provider)
# Client has not read server certificate message as this is a resumed session
for results in client.get_results():
results.assert_success()
assert to_bytes("SSL_connect:SSLv3/TLS read server certificate") not in results.stderr
# The server should indicate a session has been resumed
for results in server.get_results():
results.assert_success()
assert b'Resumed session' in results.stdout
assert to_bytes("Actual protocol version: {}".format(s2n_version)) in results.stdout
@pytest.mark.uncollect_if(func=invalid_test_parameters)
@pytest.mark.parametrize("cipher", TLS13_CIPHERS, ids=get_parameter_name)
@pytest.mark.parametrize("curve", ALL_TEST_CURVES, ids=get_parameter_name)
@pytest.mark.parametrize("certificate", ALL_TEST_CERTS, ids=get_parameter_name)
@pytest.mark.parametrize("protocol", [Protocols.TLS13], ids=get_parameter_name)
@pytest.mark.parametrize("provider", [OpenSSL, S2N], ids=get_parameter_name)
def test_tls13_session_resumption_s2n_client(managed_process, cipher, curve, protocol, provider, certificate):
port = str(next(available_ports))
# The reconnect option for s2nc allows the client to reconnect automatically
# five times. In this test we expect one full connection and five resumption
# connections.
num_full_connections = 1
num_resumed_connections = 5
client_options = ProviderOptions(
mode=Provider.ClientMode,
port=port,
cipher=cipher,
curve=curve,
insecure=True,
use_session_ticket=True,
reconnect=True,
protocol=protocol)
server_options = copy.copy(client_options)
server_options.mode = Provider.ServerMode
server_options.key = certificate.key
server_options.cert = certificate.cert
server_options.reconnects_before_exit = num_resumed_connections + num_full_connections
server = managed_process(provider, server_options, timeout=5)
client = managed_process(S2N, client_options, timeout=5)
s2n_version = get_expected_s2n_version(protocol, provider)
# s2nc indicates the number of resumed connections in its output
for results in client.get_results():
results.assert_success()
assert results.stdout.count(b'Resumed session') == num_resumed_connections
assert to_bytes("Actual protocol version: {}".format(s2n_version)) in results.stdout
server_accepts_str = str(num_resumed_connections + num_full_connections) + " server accepts that finished"
for results in server.get_results():
results.assert_success()
if provider is S2N:
assert results.stdout.count(b'Resumed session') == num_resumed_connections
assert to_bytes("Actual protocol version: {}".format(s2n_version)) in results.stdout
else:
assert to_bytes(server_accepts_str) in results.stdout
# s_server only writes one certificate message in all of the connections
assert results.stderr.count(b'SSL_accept:SSLv3/TLS write certificate') == num_full_connections
@pytest.mark.uncollect_if(func=invalid_test_parameters)
@pytest.mark.parametrize("cipher", TLS13_CIPHERS, ids=get_parameter_name)
@pytest.mark.parametrize("curve", ALL_TEST_CURVES, ids=get_parameter_name)
@pytest.mark.parametrize("certificate", ALL_TEST_CERTS, ids=get_parameter_name)
@pytest.mark.parametrize("protocol", [Protocols.TLS13], ids=get_parameter_name)
@pytest.mark.parametrize("provider", [OpenSSL], ids=get_parameter_name)
def test_s2nd_falls_back_to_full_connection(managed_process, tmp_path, cipher, curve, protocol, provider, certificate):
port = str(next(available_ports))
# Use temp directory to store session tickets
p = tmp_path / 'ticket.pem'
path_to_ticket = str(p)
"""
This test will set up a full connection with an Openssl client and server to obtain
a valid Openssl session ticket. Then, the Openssl client attempts to send the
received session ticket to an s2n server to resume a session. s2nd will fallback to
a full connection as it does not recognize the session ticket.
"""
client_options = ProviderOptions(
mode=Provider.ClientMode,
port=port,
cipher=cipher,
curve=curve,
insecure=True,
reconnect=False,
extra_flags = ['-sess_out', path_to_ticket],
data_to_send = data_bytes(4069),
protocol=protocol)
server_options = copy.copy(client_options)
server_options.mode = Provider.ServerMode
server_options.key = certificate.key
server_options.cert = certificate.cert
server_options.extra_flags = None
server = managed_process(provider, server_options, timeout=5)
client = managed_process(provider, client_options, timeout=5)
# The client should have received a session ticket
for results in client.get_results():
results.assert_success()
assert b'Post-Handshake New Session Ticket arrived:' in results.stdout
for results in server.get_results():
results.assert_success()
# Server should have sent certificate message as this is a full connection
assert b'SSL_accept:SSLv3/TLS write certificate' in results.stderr
# Client inputs received session ticket to resume a session
assert os.path.exists(path_to_ticket)
client_options.extra_flags = ['-sess_in', path_to_ticket]
port = str(next(available_ports))
client_options.port = port
server_options.port = port
# Switch providers so now s2n is the server
server = managed_process(S2N, server_options, timeout=5)
client = managed_process(provider, client_options, timeout=5)
s2n_version = get_expected_s2n_version(protocol, provider)
# Client has read server certificate because this is a full connection
for results in client.get_results():
results.assert_success()
assert to_bytes("SSL_connect:SSLv3/TLS read server certificate") in results.stderr
# The server should indicate a session has not been resumed
for results in server.get_results():
results.assert_success()
assert b'Resumed session' not in results.stdout
assert to_bytes("Actual protocol version: {}".format(s2n_version)) in results.stdout
| 43.434343
| 120
| 0.745349
| 1,681
| 12,900
| 5.490779
| 0.113623
| 0.053521
| 0.06143
| 0.051463
| 0.843012
| 0.832286
| 0.820043
| 0.803575
| 0.787974
| 0.782774
| 0
| 0.008308
| 0.169612
| 12,900
| 296
| 121
| 43.581081
| 0.853342
| 0.105659
| 0
| 0.825472
| 0
| 0
| 0.073468
| 0.003758
| 0
| 0
| 0
| 0
| 0.169811
| 1
| 0.023585
| false
| 0
| 0.042453
| 0
| 0.066038
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ac0b700668b0e2af70d36f1f54959b2481bb90f0
| 301
|
py
|
Python
|
product/product_manufacturing_places.py
|
saiihamza/open_data_parsing
|
6757c6c6823a0523ca1d2af79e99b761b57a794d
|
[
"Apache-2.0"
] | null | null | null |
product/product_manufacturing_places.py
|
saiihamza/open_data_parsing
|
6757c6c6823a0523ca1d2af79e99b761b57a794d
|
[
"Apache-2.0"
] | null | null | null |
product/product_manufacturing_places.py
|
saiihamza/open_data_parsing
|
6757c6c6823a0523ca1d2af79e99b761b57a794d
|
[
"Apache-2.0"
] | null | null | null |
class ProductManufacturingPlaces(object):
def __init__(self, manufacturing_places, manufacturing_places_tags):
self.ManufacturingPlaces = manufacturing_places
self.ManufacturingPlacesTags = manufacturing_places_tags
def __str__(self):
return self.ManufacturingPlaces
| 33.444444
| 72
| 0.780731
| 26
| 301
| 8.5
| 0.5
| 0.343891
| 0.208145
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166113
| 301
| 8
| 73
| 37.625
| 0.880478
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0.166667
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
ac0d965b0834a4bfafbb52af90f17da0b16d45bf
| 26,566
|
py
|
Python
|
analysis/c_utils/utils_hour.py
|
chrelli/3DDD_social_mouse_tracker
|
291d2ed90029628dd65db0ce3e8972b721159a15
|
[
"Apache-2.0"
] | 1
|
2022-02-10T07:26:09.000Z
|
2022-02-10T07:26:09.000Z
|
analysis/c_utils/utils_hour.py
|
chrelli/3DDD_social_mouse_tracker
|
291d2ed90029628dd65db0ce3e8972b721159a15
|
[
"Apache-2.0"
] | 1
|
2022-02-11T06:55:29.000Z
|
2022-02-12T22:26:44.000Z
|
analysis/c_utils/utils_hour.py
|
chrelli/3DDD_social_mouse_tracker
|
291d2ed90029628dd65db0ce3e8972b721159a15
|
[
"Apache-2.0"
] | null | null | null |
# FROM TIRAMISU
# %matplotlib inline
# %load_ext autoreload
# %autoreload 2
import time
from pathlib import Path
import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
# import the tiramisu models
#from pytorch_tiramisu.models import tiramisu
# from pose.models import hourglass
#import deepfly.pose2d.models as flymodels
# from datasets import camvid
#from pytorch_tiramisu.datasets import joint_transforms
#import pytorch_tiramisu.utils.imgs
#import pytorch_tiramisu.utils.training as train_utils
import sys, os, pickle
import h5py
import cv2
from colour import Color
#%%
# for making the target maps!
def gaussian(img, pt, sigma):
# Draw a 2D gaussian, unless the point is in the upper corner
# Check that any part of the gaussian is in-bounds
ul = [int(pt[0] - 3 * sigma), int(pt[1] - 3 * sigma)]
br = [int(pt[0] + 3 * sigma + 1), int(pt[1] + 3 * sigma + 1)]
if (ul[0] > img.shape[1] or ul[1] >= img.shape[0] or
br[0] < 0 or br[1] < 0) :
# If not, just return the image as is
return img
# Generate gaussian
size = 6 * sigma + 1
x = np.arange(0, size, 1, float)
y = x[:, np.newaxis]
x0 = y0 = size // 2
# The gaussian is not normalized, we want the center value to equal 1
g = np.exp(- ((x - x0) ** 2 + (y - y0) ** 2) / (2 * sigma ** 2))
# Usable gaussian range
g_x = max(0, -ul[0]), min(br[0], img.shape[1]) - ul[0]
g_y = max(0, -ul[1]), min(br[1], img.shape[0]) - ul[1]
# Image range
img_x = max(0, ul[0]), min(br[0], img.shape[1])
img_y = max(0, ul[1]), min(br[1], img.shape[0])
img[img_y[0]:img_y[1], img_x[0]:img_x[1]] = g[g_y[0]:g_y[1], g_x[0]:g_x[1]]
return img
#%%
def check_h5(h5_path):
# plots a random file from the h5py
with h5py.File(h5_path, mode='r') as h5file:
print(h5file.keys())
ji = np.random.choice(len(h5file['c_images']))
c_image = h5file['c_images'][ji]
points = h5file['annotations'][ji]
plt.figure(figsize=(10,10))
plt.imshow(c_image[..., [2,1,0]])
plt.plot(points[:,0],points[:,1],'or')
plt.title("raw width: {} height: {}".format(c_image.shape[0],c_image.shape[1]))
plt.show()
h5file.close()
#%%
def check_h5_ir(h5_path, ji = None):
# plots a random file from the h5py
with h5py.File(h5_path, mode='r') as h5file:
print(h5file.keys())
if ji is None:
ji = np.random.choice(len(h5file['c_images']))
c_image = h5file['c_images'][ji]
points = h5file['annotations'][ji]
plt.figure(figsize=(10,10))
plt.imshow(c_image)
plt.plot(points[:,0],points[:,1],'or')
plt.title("raw width: {} height: {}".format(c_image.shape[0],c_image.shape[1]))
plt.show()
h5file.close()
return c_image
#%%
def check_h5_ir_bw(h5_path, ji = None,savepath=None):
# plots a random file from the h5py
with h5py.File(h5_path, mode='r') as h5file:
print(h5file.keys())
if ji is None:
ji = np.random.choice(len(h5file['c_images']))
c_image = h5file['c_images'][ji]
points = h5file['annotations'][ji]
annotated = h5file['annotated'][ji]
skel = h5file['skeleton'][:]
print(h5file.keys())
print(skel)
# housekeeping for plotting
body_colors =['dodgerblue','red','lime','orange']
label_names = ['impl','ear','ear','nose','tail','ear','ear','nose','tail']
body_names = ['mouse0','mouse0','mouse0','mouse0','mouse0','mouse1','mouse1','mouse1','mouse1']
label_index = [0,1,1,2,3,1,1,2,3]
body_index = [0,0,0,0,0,1,1,1,1]
plt.figure(figsize=(10,10))
plt.imshow(c_image,cmap = 'gray')
for jj in range(points.shape[0]):
if points[jj,0] <10:
continue
cc =body_colors[label_index[jj]]
plt.scatter(points[jj,0],points[jj,1],marker='o',s=200,edgecolor=cc,facecolor='none',linewidth=3)
# plt.title("raw width: {} height: {}".format(c_image.shape[0],c_image.shape[1]))
plt.axis('off')
if savepath is not None:
plt.savefig(savepath)
plt.show()
h5file.close()
return c_image,points
#%% SOME PLOTTING
def plot_im_target(im,target,size = 5):
im_np = im.numpy()
target_np = target.numpy()[0,:,:]
c = im_np[0,[2,1,0],:,:]
# dac = im_np[0,3,:,:]
c = np.moveaxis(c,[0],[2])
# dac = im_c[]
point_map = np.max( target_np[:4,:,:] , axis = 0)
posture_map = np.max( target_np[4:,:,:] , axis = 0)
full_map = np.max( target_np[:,:,:] , axis = 0)
# plt.imshow(posture_map)
plt.figure(figsize=(1.3*size,size))
plt.subplot(2,2,1)
plt.imshow( c )
plt.title("RGB")
plt.subplot(2,2,2)
plt.imshow( full_map )
# plt.imshow( dac )
plt.title("all")
plt.subplot(2,2,3)
plt.imshow( point_map )
plt.title("Point targets")
plt.subplot(2,2,4)
plt.imshow( posture_map )
plt.title("Affinity map")
plt.show()
def plot_im_target_ir(im,target,size = 5):
im_np = im.numpy()
target_np = target.numpy()[0,:,:]
c = im_np[0,0,:,:]
# dac = im_np[0,3,:,:]
# c = np.moveaxis(c,[0],[2])
# dac = im_c[]
point_map = np.max( target_np[:4,:,:] , axis = 0)
posture_map = np.max( target_np[4:,:,:] , axis = 0)
full_map = np.max( target_np[:,:,:] , axis = 0)
# plt.imshow(posture_map)
plt.figure(figsize=(1.3*size,size))
plt.subplot(2,2,1)
plt.imshow( c )
plt.title("RGB")
plt.subplot(2,2,2)
plt.imshow( full_map )
# plt.imshow( dac )
plt.title("all")
plt.subplot(2,2,3)
plt.imshow( point_map )
plt.title("Point targets")
plt.subplot(2,2,4)
plt.imshow( posture_map )
plt.title("Affinity map")
plt.show()
def random_from(MouseValidLoader):
N = MouseValidLoader.__len__()
k = np.random.randint(0,N)
for i, data in enumerate(MouseValidLoader):
if i == k:
print(i)
return data[0],data[1]
def specific_from(MouseValidLoader,k):
N = MouseValidLoader.__len__()
for i, data in enumerate(MouseValidLoader):
if i == k:
print(i)
return data[0],data[1]
def plot_im_target_pseudo(input_var,target_var,size = 10,save_fig = False):
# def show_frame(input_var,target_var):
# plt.imshow(input_var.data.cpu()[0,:,:,:].numpy())
input_image = input_var.data.cpu()[0,:3,:,:].numpy()
input_image = np.moveaxis(input_image,0,2)
target_stack = target_var.data.cpu()[0,:,:,:].numpy()
target_image = target_stack[:4,...]
target_pose = target_stack[4:,...]
# = np.moveaxis(target[0,:,:,:].numpy() ,0,2)
# test.shape
# score_map = output.data.cpu()
tt = ["implant","ears","noses",'tails']
# show the tracking belief map
Fig1 = plt.figure(figsize=(1.5*size,size))
plt.subplot(2,3,1)
plt.imshow(input_image[:,:,[2,1,0]])
plt.title("image space, h: {} w: {}".format(input_image.shape[0],input_image.shape[1]) )
plt.subplot(2,3,2)
# from matplotlib.pyplot import cm
pseudo = np.zeros((target_image.shape[1],target_image.shape[2],3))
body_colors =['dodgerblue','red','lime','orange']
for i,col in enumerate(body_colors):
bright = target_image[i,:,:]
rgb = Color(col).rgb
color_im = bright[:,:,np.newaxis] * np.asarray(rgb)[np.newaxis,np.newaxis,:]
pseudo += color_im
pseudo = np.clip(pseudo,0,1)
# # Write some Text
# font = cv2.FONT_HERSHEY_SIMPLEX
# fontScale = .4
# lineType = 0
t_h,t_w = pseudo.shape[:2]
pad = 10
# for i,(type,col,x,y) in enumerate(zip(tt,body_colors,[pad,pad,t_h-pad,t_h-pad],[pad,2*pad,pad,2*pad])):
# # do as in-place?
# rgb = Color(col).rgb
# fontColor = rgb
# bottomLeftCornerOfText = (10,i*10+20)
# bottomLeftCornerOfText = (x,y)
# cv2.putText(pseudo,type,bottomLeftCornerOfText,font,fontScale,fontColor,lineType)
plt.imshow(pseudo)
for i,(type,col,x,y) in enumerate(zip(tt,body_colors,[pad,pad,t_h-pad,t_h-pad],[pad,2*pad,pad,2*pad])):
x = 6
y = i*6+6
plt.text(x, y, type, fontsize=12,color = col)
pseudo_net = np.zeros((target_image.shape[1],target_image.shape[2],3))
affinity_colors = ['dodgerblue','yellow','purple','red','lime','orange','hotpink']
for i,col in enumerate(affinity_colors):
bright = target_pose[i,:,:]
rgb = Color(col).rgb
color_im = bright[:,:,np.newaxis] * np.asarray(rgb)[np.newaxis,np.newaxis,:]
pseudo_net += color_im
plt.subplot(2,3,3)
plt.imshow(pseudo_net.clip(0,1))
plt.title("affinity field")
plt.subplot(2,3,2)
plt.title("pixel targets, h: {} w: {}".format(pseudo_net.shape[0],pseudo_net.shape[1]))
for i,(t,col) in enumerate(zip(["I --> E","I --> N","I --> T","E --> E","E --> T","E --> N","N --> T"],affinity_colors)):
plt.subplot(4,4,9+i)
bright = target_pose[i,:,:]
rgb = Color(col).rgb
color_im = bright[:,:,np.newaxis] * np.asarray(rgb)[np.newaxis,np.newaxis,:]
plt.imshow( color_im/np.max(color_im) )
# plt.imshow( color_im)
# plt.imshow(bright)
plt.axis('off')
plt.title(t)
# ADD FINAL TOUCH
plt.subplot(4,4,16)
show = np.copy(pseudo_net.clip(0,1))
add_me = pseudo.clip(0,1)
mask_me = np.any(add_me > .3,2)
show[mask_me,:] = add_me[mask_me,:]
plt.imshow(show)
if save_fig:
plt.savefig('cinema_training/trainframe_{}_.png'.format(np.random.uniform()))
plt.show()
#%%
def plot_im_target_pseudo_ir(input_var,target_var,size = 10,save_fig = False):
# def show_frame(input_var,target_var):
# plt.imshow(input_var.data.cpu()[0,:,:,:].numpy())
input_image = input_var.data.cpu()[0,0,:,:].numpy()
target_stack = target_var.data.cpu()[0,:,:,:].numpy()
target_image = target_stack[:4,...]
target_pose = target_stack[4:,...]
# = np.moveaxis(target[0,:,:,:].numpy() ,0,2)
# test.shape
# score_map = output.data.cpu()
tt = ["implant","ears","noses",'tails']
# show the tracking belief map
Fig1 = plt.figure(figsize=(1.5*size,size))
plt.subplot(2,3,1)
plt.imshow(input_image,cmap='gray')
plt.title("image space, h: {} w: {}".format(input_image.shape[0],input_image.shape[1]) )
plt.subplot(2,3,2)
# from matplotlib.pyplot import cm
pseudo = np.zeros((target_image.shape[1],target_image.shape[2],3))
body_colors =['dodgerblue','red','lime','orange']
for i,col in enumerate(body_colors):
bright = target_image[i,:,:]
rgb = Color(col).rgb
color_im = bright[:,:,np.newaxis] * np.asarray(rgb)[np.newaxis,np.newaxis,:]
pseudo += color_im
pseudo = np.clip(pseudo,0,1)
# # Write some Text
# font = cv2.FONT_HERSHEY_SIMPLEX
# fontScale = .4
# lineType = 0
t_h,t_w = pseudo.shape[:2]
pad = 10
# for i,(type,col,x,y) in enumerate(zip(tt,body_colors,[pad,pad,t_h-pad,t_h-pad],[pad,2*pad,pad,2*pad])):
# # do as in-place?
# rgb = Color(col).rgb
# fontColor = rgb
# bottomLeftCornerOfText = (10,i*10+20)
# bottomLeftCornerOfText = (x,y)
# cv2.putText(pseudo,type,bottomLeftCornerOfText,font,fontScale,fontColor,lineType)
plt.imshow(pseudo)
for i,(type,col,x,y) in enumerate(zip(tt,body_colors,[pad,pad,t_h-pad,t_h-pad],[pad,2*pad,pad,2*pad])):
x = 6
y = i*6+6
plt.text(x, y, type, fontsize=12,color = col)
pseudo_net = np.zeros((target_image.shape[1],target_image.shape[2],3))
affinity_colors = ['dodgerblue','yellow','purple','red','lime','orange','hotpink']
for i,col in enumerate(affinity_colors):
bright = target_pose[i,:,:]
rgb = Color(col).rgb
color_im = bright[:,:,np.newaxis] * np.asarray(rgb)[np.newaxis,np.newaxis,:]
pseudo_net += color_im
plt.subplot(2,3,3)
plt.imshow(pseudo_net.clip(0,1))
plt.title("affinity field")
plt.subplot(2,3,2)
plt.title("pixel targets, h: {} w: {}".format(pseudo_net.shape[0],pseudo_net.shape[1]))
for i,(t,col) in enumerate(zip(["I --> E","I --> N","I --> T","E --> E","E --> T","E --> N","N --> T"],affinity_colors)):
plt.subplot(4,4,9+i)
bright = target_pose[i,:,:]
rgb = Color(col).rgb
color_im = bright[:,:,np.newaxis] * np.asarray(rgb)[np.newaxis,np.newaxis,:]
plt.imshow( color_im/np.max(color_im) )
# plt.imshow( color_im)
# plt.imshow(bright)
plt.axis('off')
plt.title(t)
# ADD FINAL TOUCH
plt.subplot(4,4,16)
show = np.copy(pseudo_net.clip(0,1))
add_me = pseudo.clip(0,1)
mask_me = np.any(add_me > .3,2)
show[mask_me,:] = add_me[mask_me,:]
plt.imshow(show)
if save_fig:
plt.savefig('cinema_training/trainframe_{}_.png'.format(np.random.uniform()))
plt.show()
#%%
def convet_to_pseudo(target_var):
# def show_frame(input_var,target_var):
# plt.imshow(input_var.data.cpu()[0,:,:,:].numpy())
target_stack = target_var.data.cpu()[0,:,:,:].numpy()
target_image = target_stack[:4,...]
target_pose = target_stack[4:,...]
# = np.moveaxis(target[0,:,:,:].numpy() ,0,2)
# test.shape
# score_map = output.data.cpu()
tt = ["implant","ears","noses",'tails']
pseudo = np.zeros((target_image.shape[1],target_image.shape[2],3))
body_colors =['dodgerblue','red','lime','orange']
for i,col in enumerate(body_colors):
bright = target_image[i,:,:]
rgb = Color(col).rgb
color_im = bright[:,:,np.newaxis] * np.asarray(rgb)[np.newaxis,np.newaxis,:]
pseudo += color_im
pseudo = np.clip(pseudo,0,1)
pseudo_net = np.zeros((target_image.shape[1],target_image.shape[2],3))
affinity_colors = ['dodgerblue','yellow','purple','red','lime','orange','hotpink']
for i,col in enumerate(affinity_colors):
bright = target_pose[i,:,:]
rgb = Color(col).rgb
color_im = bright[:,:,np.newaxis] * np.asarray(rgb)[np.newaxis,np.newaxis,:]
pseudo_net += color_im
pseudo_net = np.clip(pseudo_net,0,1)
show = np.copy(pseudo_net.clip(0,1))
add_me = pseudo.clip(0,1)
mask_me = np.any(add_me > .3,2)
show[mask_me,:] = add_me[mask_me,:]
return pseudo, pseudo_net,show
def plot_and_dump_im_target_pseudo(input_var,target_var,size = 10,save_fig = False):
# def show_frame(input_var,target_var):
# plt.imshow(input_var.data.cpu()[0,:,:,:].numpy())
input_image = input_var.data.cpu()[0,:3,:,:].numpy()
input_image = np.moveaxis(input_image,0,2)
target_stack = target_var.data.cpu()[0,:,:,:].numpy()
target_image = target_stack[:4,...]
target_pose = target_stack[4:,...]
# = np.moveaxis(target[0,:,:,:].numpy() ,0,2)
# test.shape
# score_map = output.data.cpu()
tt = ["implant","ears","noses",'tails']
# show the tracking belief map
Fig1 = plt.figure(figsize=(1.5*size,size))
plt.subplot(2,3,1)
plt.imshow(input_image[:,:,[2,1,0]])
figure_dump_folder = '/home/chrelli/git/3d_sandbox/mouseposev0p2/figure_raw_pics/figure_3'
cv2.imwrite(figure_dump_folder+'/train/dump_im'+'.png',input_image*255)
plt.title("image space, h: {} w: {}".format(input_image.shape[0],input_image.shape[1]) )
plt.subplot(2,3,2)
# from matplotlib.pyplot import cm
pseudo = np.zeros((target_image.shape[1],target_image.shape[2],3))
body_colors =['dodgerblue','red','lime','orange']
for i,col in enumerate(body_colors):
bright = target_image[i,:,:]
rgb = Color(col).rgb
color_im = bright[:,:,np.newaxis] * np.asarray(rgb)[np.newaxis,np.newaxis,:]
pseudo += color_im
pseudo = np.clip(pseudo,0,1)
# # Write some Text
# font = cv2.FONT_HERSHEY_SIMPLEX
# fontScale = .4
# lineType = 0
t_h,t_w = pseudo.shape[:2]
pad = 10
# for i,(type,col,x,y) in enumerate(zip(tt,body_colors,[pad,pad,t_h-pad,t_h-pad],[pad,2*pad,pad,2*pad])):
# # do as in-place?
# rgb = Color(col).rgb
# fontColor = rgb
# bottomLeftCornerOfText = (10,i*10+20)
# bottomLeftCornerOfText = (x,y)
# cv2.putText(pseudo,type,bottomLeftCornerOfText,font,fontScale,fontColor,lineType)
plt.imshow(pseudo)
cv2.imwrite(figure_dump_folder+'/train/dump_pseudo_targets'+'.png',pseudo[:,:,[2,1,0]]*255)
for i,(type,col,x,y) in enumerate(zip(tt,body_colors,[pad,pad,t_h-pad,t_h-pad],[pad,2*pad,pad,2*pad])):
x = 6
y = i*6+6
plt.text(x, y, type, fontsize=12,color = col)
pseudo_net = np.zeros((target_image.shape[1],target_image.shape[2],3))
affinity_colors = ['dodgerblue','yellow','purple','red','lime','orange','hotpink']
for i,col in enumerate(affinity_colors):
bright = target_pose[i,:,:]
rgb = Color(col).rgb
color_im = bright[:,:,np.newaxis] * np.asarray(rgb)[np.newaxis,np.newaxis,:]
pseudo_net += color_im
plt.subplot(2,3,3)
plt.imshow(pseudo_net.clip(0,1))
dddump = pseudo_net[:,:,[2,1,0]].clip(0,1)
cv2.imwrite(figure_dump_folder+'/train/dump_pseudo_pafs'+'.png',dddump/np.max(dddump)*255)
plt.title("affinity field")
plt.subplot(2,3,2)
plt.title("pixel targets, h: {} w: {}".format(pseudo_net.shape[0],pseudo_net.shape[1]))
for i,(t,col) in enumerate(zip(["I --> E","I --> N","I --> T","E --> E","E --> T","E --> N","N --> T"],affinity_colors)):
plt.subplot(4,4,9+i)
bright = target_pose[i,:,:]
rgb = Color(col).rgb
color_im = bright[:,:,np.newaxis] * np.asarray(rgb)[np.newaxis,np.newaxis,:]
plt.imshow( color_im/np.max(color_im) )
# plt.imshow( color_im)
cv2.imwrite(figure_dump_folder+'/train/dump_pafs'+t+'.png',color_im[:,:,[2,1,0]]/np.max(color_im)*255)
# plt.imshow(bright)
plt.axis('off')
plt.title(t)
if save_fig:
plt.savefig('cinema_training/trainframe_{}_.png'.format(np.random.uniform()))
plt.show()
#%%
def plot_ito_pseudo(input_var,target_var,output,size = 10):
# def show_frame(input_var,target_var):
# plt.imshow(input_var.data.cpu()[0,:,:,:].numpy())
input_image = input_var.data.cpu()[0,:3,:,:].numpy()
input_image = np.moveaxis(input_image,0,2)
target_stack = target_var.data.cpu()[0,:,:,:].numpy()
# clip the target to 1!
target_stack = np.clip(target_stack,0,1)
target_image = target_stack[:4,...]
target_pose = target_stack[4:,...]
# = np.moveaxis(target[0,:,:,:].numpy() ,0,2)
# test.shape
score_map = output[-1].data.cpu().numpy()
tt = ["implant","ears","noses",'tails']
# show the tracking belief map
Fig1 = plt.figure(figsize=(1.5*size,size))
plt.subplot(3,3,1)
plt.title('image space')
plt.imshow(input_image[:,:,[2,1,0]])
plt.subplot(3,3,2)
# from matplotlib.pyplot import cm
body_colors =['dodgerblue','red','lime','orange']
def color_target_points(target_image,body_colors):
pseudo = np.zeros((target_image.shape[1],target_image.shape[2],3))
for i,col in enumerate(body_colors):
bright = target_image[i,:,:]
rgb = Color(col).rgb
color_im = bright[:,:,np.newaxis] * np.asarray(rgb)[np.newaxis,np.newaxis,:]
pseudo += color_im
pseudo = np.clip(pseudo,0,1)
return pseudo
pseudo = color_target_points(target_image,body_colors)
t_h,t_w = pseudo.shape[:2]
pad = 10
plt.imshow(pseudo)
for i,(type,col,x,y) in enumerate(zip(tt,body_colors,[pad,pad,t_h-pad,t_h-pad],[pad,2*pad,pad,2*pad])):
x = 10
y = i*10+10
plt.text(x, y, type, fontsize=18,color = col)
plt.title("pixel targets")
affinity_colors = ['dodgerblue','yellow','purple','red','lime','orange','hotpink']
def color_target_lines(target_pose,affinity_colors):
pseudo_net = np.zeros((target_pose.shape[1],target_pose.shape[2],3))
for i,col in enumerate(affinity_colors):
bright = target_pose[i,:,:]
rgb = Color(col).rgb
color_im = bright[:,:,np.newaxis] * np.asarray(rgb)[np.newaxis,np.newaxis,:]
pseudo_net += color_im
pseudo_net = pseudo_net.clip(0,1)
return pseudo_net
plt.subplot(3,3,3)
pseudo_net = color_target_lines(target_pose,affinity_colors)
plt.imshow(pseudo_net)
plt.title("affinity field")
plt.subplot(3,3,5)
pseudo_belief = color_target_points(score_map[0,:4,:,:],body_colors)
plt.imshow(pseudo_belief)
plt.title("network belief")
plt.subplot(3,3,6)
pseudo_belief = color_target_lines(score_map[0,4:,:,:],affinity_colors)
plt.imshow(pseudo_belief)
plt.title("network belief")
pseudo = np.zeros((target_image.shape[1],target_image.shape[2],3))
for i,col in enumerate(body_colors):
plt.subplot(3,4,9+i)
bright = score_map[0,i,:,:]
rgb = Color(col).rgb
color_im = bright[:,:,np.newaxis] * np.asarray(rgb)[np.newaxis,np.newaxis,:]
color_im = color_im.clip(0,1)
plt.imshow(color_im)
return Fig1
def plot_ito_pseudo_ir(input_var,target_var,output,size = 10):
# def show_frame(input_var,target_var):
# plt.imshow(input_var.data.cpu()[0,:,:,:].numpy())
input_image = input_var.data.cpu()[0,0,:,:].numpy()
target_stack = target_var.data.cpu()[0,:,:,:].numpy()
# clip the target to 1!
target_stack = np.clip(target_stack,0,1)
target_image = target_stack[:4,...]
target_pose = target_stack[4:,...]
# = np.moveaxis(target[0,:,:,:].numpy() ,0,2)
# test.shape
score_map = output[-1].data.cpu().numpy()
tt = ["implant","ears","noses",'tails']
# show the tracking belief map
Fig1 = plt.figure(figsize=(1.5*size,size))
plt.subplot(3,3,1)
plt.title('image space')
plt.imshow(input_image)
plt.subplot(3,3,2)
# from matplotlib.pyplot import cm
body_colors =['dodgerblue','red','lime','orange']
def color_target_points(target_image,body_colors):
pseudo = np.zeros((target_image.shape[1],target_image.shape[2],3))
for i,col in enumerate(body_colors):
bright = target_image[i,:,:]
rgb = Color(col).rgb
color_im = bright[:,:,np.newaxis] * np.asarray(rgb)[np.newaxis,np.newaxis,:]
pseudo += color_im
pseudo = np.clip(pseudo,0,1)
return pseudo
pseudo = color_target_points(target_image,body_colors)
t_h,t_w = pseudo.shape[:2]
pad = 10
plt.imshow(pseudo)
for i,(type,col,x,y) in enumerate(zip(tt,body_colors,[pad,pad,t_h-pad,t_h-pad],[pad,2*pad,pad,2*pad])):
x = 10
y = i*10+10
plt.text(x, y, type, fontsize=18,color = col)
plt.title("pixel targets")
affinity_colors = ['dodgerblue','yellow','purple','red','lime','orange','hotpink']
def color_target_lines(target_pose,affinity_colors):
pseudo_net = np.zeros((target_pose.shape[1],target_pose.shape[2],3))
for i,col in enumerate(affinity_colors):
bright = target_pose[i,:,:]
rgb = Color(col).rgb
color_im = bright[:,:,np.newaxis] * np.asarray(rgb)[np.newaxis,np.newaxis,:]
pseudo_net += color_im
pseudo_net = pseudo_net.clip(0,1)
return pseudo_net
plt.subplot(3,3,3)
pseudo_net = color_target_lines(target_pose,affinity_colors)
plt.imshow(pseudo_net)
plt.title("affinity field")
plt.subplot(3,3,5)
pseudo_belief = color_target_points(score_map[0,:4,:,:],body_colors)
plt.imshow(pseudo_belief)
plt.title("network belief")
plt.subplot(3,3,6)
pseudo_belief = color_target_lines(score_map[0,4:,:,:],affinity_colors)
plt.imshow(pseudo_belief)
plt.title("network belief")
pseudo = np.zeros((target_image.shape[1],target_image.shape[2],3))
for i,col in enumerate(body_colors):
plt.subplot(3,4,9+i)
bright = score_map[0,i,:,:]
rgb = Color(col).rgb
color_im = bright[:,:,np.newaxis] * np.asarray(rgb)[np.newaxis,np.newaxis,:]
color_im = color_im.clip(0,1)
plt.imshow(color_im)
return Fig1
# # EXAMPLE OF AUGMENTATION
# index = 5
# geometry = pickle.load( open( tracking_folder+'/geometry.pkl', "rb" ) )
# depth_scale = geometry['d_cam_params'][3][4]
# xy = h5_file['annotations'][index]
# c_image = h5_file['c_images'][index]
# dac_image = h5_file['dac_images'][index]
# # images = im[:3,:,:].astype('float32')
# # images = np.moveaxis(images,0,2)[np.newaxis,:,:,:]
# images = c_image[np.newaxis,:,:,[2,1,0]]
# import imgaug.augmenters as iaa
# seq = iaa.Sequential([
# # iaa.Crop(px=(0, 100)), # crop images from each side by 0 to 16px (randomly chosen)
# iaa.CropAndPad(percent=(-0.05, 0.15), sample_independently=False),
# iaa.Fliplr(0.5), # horizontally flip 50% of the images
# iaa.Sometimes(.2, iaa.GaussianBlur(sigma=(0, 1.5)) ), # blur images with a sigma of 0 to 3.0
# iaa.Sometimes( 1, iaa.Dropout(p = (0,0.2)) ),
# iaa.Affine(rotate=(-30, 30)),
# iaa.Affine(translate_percent={"x": (-0.2, 0.2), "y": (-0.2, 0.2)})
# ])
# for _ in range(5):
# images_aug, xy_aug_list = seq(images = images, keypoints=[xy])
# xy_aug = xy_aug_list[0]
# plt.figure(figsize=(15,15))
# plt.subplot(2,2,1)
# plt.imshow(images[0,...])
# plt.plot(xy[:,0],xy[:,1],'or')
# plt.subplot(2,2,2)
# plt.imshow(images_aug[0,...])
# plt.plot(xy_aug[:,0],xy_aug[:,1],'or')
# plt.show()
# plt.figure(figsize = (20,20))
# st = 620
# for i,index in enumerate(range(st,st+100)):
# plt.subplot(10,10,1+i)
# c_image = h5_file['c_images'][index]
# # blank_image = np.zeros_like(c_image)
# plt.imshow(c_image[:,:,[2,1,0]])
# plt.title(index)
# plt.show()
# # cv2.imshow('hm',c_image[:,:,:])
# # cv2.waitKey(500)
# index = 10
# xy = h5_file['annotations'][index]
# c_image = h5_file['c_images'][index]
# dac_image = h5_file['dac_images'][index]
# for index in range(10):
# c_image = h5_file['c_images'][index]
# # blank_image = np.zeros_like(c_image)
# cv2.imshow('hm',c_image[:,:,:])
# cv2.waitKey(500)
# cv2.destroyAllWindows()
| 30.641292
| 125
| 0.603629
| 4,032
| 26,566
| 3.832341
| 0.084077
| 0.032617
| 0.024204
| 0.018121
| 0.821965
| 0.818211
| 0.813422
| 0.803326
| 0.792389
| 0.787924
| 0
| 0.036079
| 0.211247
| 26,566
| 867
| 126
| 30.641292
| 0.701346
| 0.218625
| 0
| 0.839479
| 0
| 0
| 0.077798
| 0.0106
| 0
| 0
| 0
| 0
| 0
| 1
| 0.039046
| false
| 0
| 0.0282
| 0
| 0.095445
| 0.015184
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ac194623144f71b8c119a3bbe4523e7774720e91
| 18,643
|
py
|
Python
|
photutils/psf/tests/test_groupstars.py
|
Onoddil/photutils
|
433f3e54d3f53282ae04eadde9e1ddf657944590
|
[
"BSD-3-Clause"
] | null | null | null |
photutils/psf/tests/test_groupstars.py
|
Onoddil/photutils
|
433f3e54d3f53282ae04eadde9e1ddf657944590
|
[
"BSD-3-Clause"
] | null | null | null |
photutils/psf/tests/test_groupstars.py
|
Onoddil/photutils
|
433f3e54d3f53282ae04eadde9e1ddf657944590
|
[
"BSD-3-Clause"
] | null | null | null |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tests for the groupstars module.
"""
from astropy.table import Table, vstack
import numpy as np
from numpy.testing import assert_almost_equal
import pytest
from ..groupstars import DAOGroup, DBSCANGroup
try:
import sklearn.cluster # noqa
HAS_SKLEARN = True
except ImportError:
HAS_SKLEARN = False
def assert_table_almost_equal(table1, table2):
assert table1.colnames == table2.colnames
assert table1.meta == table2.meta
for colname in table1.colnames:
assert_almost_equal(table1[colname], table2[colname])
class TestDAOGROUP:
def test_daogroup_one(self):
"""
+---------+--------+---------+---------+--------+---------+
| * * * * |
| |
0.2 + +
| |
| |
| |
0 + * * +
| |
| |
| |
-0.2 + +
| |
| * * * * |
+---------+--------+---------+---------+--------+---------+
0 0.5 1 1.5 2
x and y axis are in pixel coordinates. Each asterisk represents
the centroid of a star.
"""
x_0 = np.array([0, np.sqrt(2)/4, np.sqrt(2)/4, -np.sqrt(2)/4,
-np.sqrt(2)/4])
y_0 = np.array([0, np.sqrt(2)/4, -np.sqrt(2)/4, np.sqrt(2)/4,
-np.sqrt(2)/4])
x_1 = x_0 + 2.0
first_group = Table([x_0, y_0, np.arange(len(x_0)) + 1,
np.ones(len(x_0), dtype=int)],
names=('x_0', 'y_0', 'id', 'group_id'))
second_group = Table([x_1, y_0, len(x_0) + np.arange(len(x_0)) + 1,
2*np.ones(len(x_0), dtype=int)],
names=('x_0', 'y_0', 'id', 'group_id'))
starlist = vstack([first_group, second_group])
daogroup = DAOGroup(crit_separation=0.6)
test_starlist = daogroup(starlist['x_0', 'y_0', 'id'])
assert_table_almost_equal(starlist, test_starlist)
def test_daogroup_two(self):
"""
+--------------+--------------+-------------+--------------+
3 + * +
| * |
2.5 + * +
| * |
2 + * +
| |
1.5 + +
| |
1 + * +
| * |
0.5 + * +
| * |
0 + * +
+--------------+--------------+-------------+--------------+
-1 -0.5 0 0.5 1
"""
first_group = Table([np.zeros(5), np.linspace(0, 1, 5),
np.arange(5) + 1, np.ones(5, dtype=int)],
names=('x_0', 'y_0', 'id', 'group_id'))
second_group = Table([np.zeros(5), np.linspace(2, 3, 5),
6 + np.arange(5), 2*np.ones(5, dtype=int)],
names=('x_0', 'y_0', 'id', 'group_id'))
starlist = vstack([first_group, second_group])
daogroup = DAOGroup(crit_separation=0.3)
test_starlist = daogroup(starlist['x_0', 'y_0', 'id'])
assert_table_almost_equal(starlist, test_starlist)
def test_daogroup_three(self):
"""
1 +--+-------+--------+--------+--------+-------+--------+--+
| |
| |
| |
0.5 + +
| |
| |
0 + * * * * * * * * * * +
| |
| |
-0.5 + +
| |
| |
| |
-1 +--+-------+--------+--------+--------+-------+--------+--+
0 0.5 1 1.5 2 2.5 3
"""
first_group = Table([np.linspace(0, 1, 5), np.zeros(5),
np.arange(5) + 1, np.ones(5, dtype=int)],
names=('x_0', 'y_0', 'id', 'group_id'))
second_group = Table([np.linspace(2, 3, 5), np.zeros(5),
6 + np.arange(5), 2*np.ones(5, dtype=int)],
names=('x_0', 'y_0', 'id', 'group_id'))
starlist = vstack([first_group, second_group])
daogroup = DAOGroup(crit_separation=0.3)
test_starlist = daogroup(starlist['x_0', 'y_0', 'id'])
assert_table_almost_equal(starlist, test_starlist)
def test_daogroup_four(self):
"""
+-+---------+---------+---------+---------+-+
1 + * +
| * * |
| |
| |
0.5 + +
| |
| |
| |
0 + * * +
| |
| |
-0.5 + +
| |
| |
| * * |
-1 + * +
+-+---------+---------+---------+---------+-+
-1 -0.5 0 0.5 1
"""
x = np.linspace(-1., 1., 5)
y = np.sqrt(1. - x**2)
xx = np.hstack((x, x))
yy = np.hstack((y, -y))
starlist = Table([xx, yy, np.arange(10) + 1,
np.ones(10, dtype=int)],
names=('x_0', 'y_0', 'id', 'group_id'))
daogroup = DAOGroup(crit_separation=2.5)
test_starlist = daogroup(starlist['x_0', 'y_0', 'id'])
assert_table_almost_equal(starlist, test_starlist)
def test_daogroup_five(self):
"""
+--+--------+--------+-------+--------+--------+--------+--+
3 + * +
| * |
2.5 + * +
| * |
2 + * +
| |
1.5 + * * * * * * * * * * +
| |
1 + * +
| * |
0.5 + * +
| * |
0 + * +
+--+--------+--------+-------+--------+--------+--------+--+
0 0.5 1 1.5 2 2.5 3
"""
first_group = Table([1.5*np.ones(5), np.linspace(0, 1, 5),
np.arange(5) + 1, np.ones(5, dtype=int)],
names=('x_0', 'y_0', 'id', 'group_id'))
second_group = Table([1.5*np.ones(5), np.linspace(2, 3, 5),
6 + np.arange(5), 2*np.ones(5, dtype=int)],
names=('x_0', 'y_0', 'id', 'group_id'))
third_group = Table([np.linspace(0, 1, 5), 1.5*np.ones(5),
11 + np.arange(5), 3*np.ones(5, dtype=int)],
names=('x_0', 'y_0', 'id', 'group_id'))
fourth_group = Table([np.linspace(2, 3, 5), 1.5*np.ones(5),
16 + np.arange(5), 4*np.ones(5, dtype=int)],
names=('x_0', 'y_0', 'id', 'group_id'))
starlist = vstack([first_group, second_group, third_group,
fourth_group])
daogroup = DAOGroup(crit_separation=0.3)
test_starlist = daogroup(starlist['x_0', 'y_0', 'id'])
assert_table_almost_equal(starlist, test_starlist)
def test_daogroup_six(self):
"""
+------+----------+----------+----------+----------+------+
| * * * * * * |
| |
0.2 + +
| |
| |
| |
0 + * * * +
| |
| |
| |
-0.2 + +
| |
| * * * * * * |
+------+----------+----------+----------+----------+------+
0 1 2 3 4
"""
x_0 = np.array([0, np.sqrt(2)/4, np.sqrt(2)/4, -np.sqrt(2)/4,
-np.sqrt(2)/4])
y_0 = np.array([0, np.sqrt(2)/4, -np.sqrt(2)/4, np.sqrt(2)/4,
-np.sqrt(2)/4])
x_1 = x_0 + 2.0
x_2 = x_0 + 4.0
first_group = Table([x_0, y_0, np.arange(5) + 1,
np.ones(5, dtype=int)],
names=('x_0', 'y_0', 'id', 'group_id'))
second_group = Table([x_1, y_0, 6 + np.arange(5),
2*np.ones(5, dtype=int)],
names=('x_0', 'y_0', 'id', 'group_id'))
third_group = Table([x_2, y_0, 11 + np.arange(5),
3*np.ones(5, dtype=int)],
names=('x_0', 'y_0', 'id', 'group_id'))
starlist = vstack([first_group, second_group, third_group])
daogroup = DAOGroup(crit_separation=0.6)
test_starlist = daogroup(starlist['x_0', 'y_0', 'id'])
assert_table_almost_equal(starlist, test_starlist)
def test_isolated_sources(self):
"""
Test case when all sources are isolated.
"""
x_0 = np.array([0, np.sqrt(2)/4, np.sqrt(2)/4, -np.sqrt(2)/4,
-np.sqrt(2)/4])
y_0 = np.array([0, np.sqrt(2)/4, -np.sqrt(2)/4, np.sqrt(2)/4,
-np.sqrt(2)/4])
starlist = Table([x_0, y_0, np.arange(len(x_0)) + 1,
np.arange(len(x_0)) + 1],
names=('x_0', 'y_0', 'id', 'group_id'))
daogroup = DAOGroup(crit_separation=0.01)
test_starlist = daogroup(starlist['x_0', 'y_0', 'id'])
assert_table_almost_equal(starlist, test_starlist)
def test_id_column(self):
x_0 = np.array([0, np.sqrt(2)/4, np.sqrt(2)/4, -np.sqrt(2)/4,
-np.sqrt(2)/4])
y_0 = np.array([0, np.sqrt(2)/4, -np.sqrt(2)/4, np.sqrt(2)/4,
-np.sqrt(2)/4])
starlist = Table([x_0, y_0, np.arange(len(x_0)) + 1,
np.arange(len(x_0)) + 1],
names=('x_0', 'y_0', 'id', 'group_id'))
daogroup = DAOGroup(crit_separation=0.01)
test_starlist = daogroup(starlist['x_0', 'y_0'])
assert_table_almost_equal(starlist, test_starlist)
def test_id_column_raise_error(self):
x_0 = np.array([0, np.sqrt(2)/4, np.sqrt(2)/4, -np.sqrt(2)/4,
-np.sqrt(2)/4])
y_0 = np.array([0, np.sqrt(2)/4, -np.sqrt(2)/4, np.sqrt(2)/4,
-np.sqrt(2)/4])
starlist = Table([x_0, y_0, np.arange(len(x_0)),
np.arange(len(x_0)) + 1],
names=('x_0', 'y_0', 'id', 'group_id'))
daogroup = DAOGroup(crit_separation=0.01)
with pytest.raises(ValueError):
daogroup(starlist['x_0', 'y_0', 'id'])
@pytest.mark.skipif('not HAS_SKLEARN')
class TestDBSCANGroup:
def test_group_stars_one(object):
x_0 = np.array([0, np.sqrt(2)/4, np.sqrt(2)/4, -np.sqrt(2)/4,
-np.sqrt(2)/4])
y_0 = np.array([0, np.sqrt(2)/4, -np.sqrt(2)/4, np.sqrt(2)/4,
-np.sqrt(2)/4])
x_1 = x_0 + 2.0
first_group = Table([x_0, y_0, np.arange(len(x_0)) + 1,
np.ones(len(x_0), dtype=int)],
names=('x_0', 'y_0', 'id', 'group_id'))
second_group = Table([x_1, y_0, len(x_0) + np.arange(len(x_0)) + 1,
2*np.ones(len(x_0), dtype=int)],
names=('x_0', 'y_0', 'id', 'group_id'))
starlist = vstack([first_group, second_group])
dbscan = DBSCANGroup(crit_separation=0.6)
test_starlist = dbscan(starlist['x_0', 'y_0', 'id'])
assert_table_almost_equal(starlist, test_starlist)
def test_group_stars_two(object):
first_group = Table([1.5*np.ones(5), np.linspace(0, 1, 5),
np.arange(5) + 1, np.ones(5, dtype=int)],
names=('x_0', 'y_0', 'id', 'group_id'))
second_group = Table([1.5*np.ones(5), np.linspace(2, 3, 5),
6 + np.arange(5), 2*np.ones(5, dtype=int)],
names=('x_0', 'y_0', 'id', 'group_id'))
third_group = Table([np.linspace(0, 1, 5), 1.5*np.ones(5),
11 + np.arange(5), 3*np.ones(5, dtype=int)],
names=('x_0', 'y_0', 'id', 'group_id'))
fourth_group = Table([np.linspace(2, 3, 5), 1.5*np.ones(5),
16 + np.arange(5), 4*np.ones(5, dtype=int)],
names=('x_0', 'y_0', 'id', 'group_id'))
starlist = vstack([first_group, second_group, third_group,
fourth_group])
dbscan = DBSCANGroup(crit_separation=0.3)
test_starlist = dbscan(starlist['x_0', 'y_0', 'id'])
assert_table_almost_equal(starlist, test_starlist)
def test_isolated_sources(self):
"""
Test case when all sources are isolated.
"""
x_0 = np.array([0, np.sqrt(2)/4, np.sqrt(2)/4, -np.sqrt(2)/4,
-np.sqrt(2)/4])
y_0 = np.array([0, np.sqrt(2)/4, -np.sqrt(2)/4, np.sqrt(2)/4,
-np.sqrt(2)/4])
starlist = Table([x_0, y_0, np.arange(len(x_0)) + 1,
np.arange(len(x_0)) + 1],
names=('x_0', 'y_0', 'id', 'group_id'))
dbscan = DBSCANGroup(crit_separation=0.01)
test_starlist = dbscan(starlist['x_0', 'y_0', 'id'])
assert_table_almost_equal(starlist, test_starlist)
def test_id_column(self):
x_0 = np.array([0, np.sqrt(2)/4, np.sqrt(2)/4, -np.sqrt(2)/4,
-np.sqrt(2)/4])
y_0 = np.array([0, np.sqrt(2)/4, -np.sqrt(2)/4, np.sqrt(2)/4,
-np.sqrt(2)/4])
starlist = Table([x_0, y_0, np.arange(len(x_0)) + 1,
np.arange(len(x_0)) + 1],
names=('x_0', 'y_0', 'id', 'group_id'))
dbscan = DBSCANGroup(crit_separation=0.01)
test_starlist = dbscan(starlist['x_0', 'y_0'])
assert_table_almost_equal(starlist, test_starlist)
def test_id_column_raise_error(self):
x_0 = np.array([0, np.sqrt(2)/4, np.sqrt(2)/4, -np.sqrt(2)/4,
-np.sqrt(2)/4])
y_0 = np.array([0, np.sqrt(2)/4, -np.sqrt(2)/4, np.sqrt(2)/4,
-np.sqrt(2)/4])
starlist = Table([x_0, y_0, np.arange(len(x_0)),
np.arange(len(x_0)) + 1],
names=('x_0', 'y_0', 'id', 'group_id'))
dbscan = DBSCANGroup(crit_separation=0.01)
with pytest.raises(ValueError):
dbscan(starlist['x_0', 'y_0', 'id'])
| 52.221289
| 75
| 0.31599
| 1,789
| 18,643
| 3.105087
| 0.066518
| 0.030243
| 0.090729
| 0.10369
| 0.858326
| 0.858326
| 0.846085
| 0.823042
| 0.809721
| 0.809721
| 0
| 0.068546
| 0.524218
| 18,643
| 356
| 76
| 52.367978
| 0.557723
| 0.343882
| 0
| 0.75122
| 0
| 0
| 0.047805
| 0
| 0
| 0
| 0
| 0
| 0.082927
| 1
| 0.073171
| false
| 0
| 0.034146
| 0
| 0.117073
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
ac2caa6da07958c426fbea7ac8132740efedf17a
| 8,710
|
py
|
Python
|
tests/test_opts.py
|
loriab/pylibefp
|
f8d934fd1b25de4eccfd2915a7306ac766a0b0a1
|
[
"BSD-3-Clause"
] | 2
|
2019-01-27T13:37:08.000Z
|
2021-11-04T16:44:29.000Z
|
tests/test_opts.py
|
loriab/pylibefp
|
f8d934fd1b25de4eccfd2915a7306ac766a0b0a1
|
[
"BSD-3-Clause"
] | 7
|
2018-10-06T23:17:20.000Z
|
2019-09-03T06:45:31.000Z
|
tests/test_opts.py
|
loriab/pylibefp
|
f8d934fd1b25de4eccfd2915a7306ac766a0b0a1
|
[
"BSD-3-Clause"
] | 1
|
2019-05-26T23:10:52.000Z
|
2019-05-26T23:10:52.000Z
|
import sys
import pytest
import pprint
import pylibefp
from systems import *
from qcelemental.testing import compare_recursive
def test_opts_libefp():
asdf = system_1()
ref1 = {
'ai_elec': False,
'elec_damp': 'screen',
'ai_disp': False,
'chtr': False,
'swf_cutoff': 0.0,
'enable_cutoff': False,
'disp': False,
'ai_pol': False,
'pol': False,
'xr': False,
'pol_driver': 'iterative',
'ai_xr': False,
'elec': False,
'pol_damp': 'tt',
'disp_damp': 'overlap',
'enable_pbc': False,
'ai_chtr': False
}
ans1 = asdf.set_opts({})
assert compare_recursive(ref1, ans1, sys._getframe().f_code.co_name + ': blank', atol=1.e-6)
ref2 = {
'ai_elec': True,
'elec_damp': 'off',
'ai_disp': False,
'chtr': False,
'swf_cutoff': 1.0,
'enable_cutoff': False,
'disp': False,
'ai_pol': False,
'pol': False,
'xr': False,
'pol_driver': 'iterative',
'ai_xr': False,
'elec': True,
'pol_damp': 'tt',
'disp_damp': 'overlap',
'enable_pbc': False,
'ai_chtr': False
}
ans2 = asdf.set_opts({
'elec_damp': 'OFF',
'swf_cutoff': 1.0,
'elec': True,
'ai_elec': True,
'enable_cutoff': False
})
assert compare_recursive(ref2, ans2, sys._getframe().f_code.co_name + ': setting', atol=1.e-6)
ref3 = {
'ai_elec': True,
'elec_damp': 'off',
'ai_disp': False,
'chtr': False,
'swf_cutoff': 2.0,
'enable_cutoff': False,
'disp': False,
'ai_pol': False,
'pol': False,
'xr': False,
'pol_driver': 'iterative',
'ai_xr': False,
'elec': False,
'pol_damp': 'tt',
'disp_damp': 'tt',
'enable_pbc': False,
'ai_chtr': False
}
ans3 = asdf.set_opts({'swf_cutoff': 2, 'elec': False, 'ai_elec': True, 'disp_damp': 'TT'}, append='append')
assert compare_recursive(ref3, ans3, sys._getframe().f_code.co_name + ': append setting', atol=1.e-6)
ref4 = {
'ai_elec': False,
'elec_damp': 'off',
'ai_disp': False,
'chtr': False,
'swf_cutoff': 0.0,
'enable_cutoff': False,
'disp': False,
'ai_pol': False,
'pol': False,
'xr': False,
'pol_driver': 'iterative',
'ai_xr': False,
'elec': True,
'pol_damp': 'tt',
'disp_damp': 'overlap',
'enable_pbc': False,
'ai_chtr': False
}
ans4 = asdf.set_opts({
'elec_damp': 'OFF',
'swf_cutoff': 0.0,
'elec': True,
'enable_cutoff': False
},
append='libefp')
assert compare_recursive(ref4, ans4, sys._getframe().f_code.co_name + ': reset setting', atol=1.e-6)
def test_opts_fail_1():
asdf = system_1()
ans = asdf.set_opts({'nonsense_key': 'harmless'})
with pytest.raises(pylibefp.EFPSyntaxError) as e_info:
ans = asdf.set_opts({'elec_damp': 'nonsense'})
def test_opts_fail_2():
asdf = system_1()
with pytest.raises(pylibefp.EFPSyntaxError) as e_info:
ans = asdf.set_opts({'elec': 'yEs'})
def test_opts_psi():
asdf = system_1()
ref = {
'qm_elst': False,
'elst_damping': 'screen',
'qm_disp': False,
'chtr': False,
'swf_cutoff': 0.0,
'enable_cutoff': False,
'disp': False,
'qm_ind': False,
'ind': False,
'exch': False,
'ind_driver': 'iterative',
'qm_exch': False,
'elst': False,
'ind_damping': 'tt',
'disp_damping': 'overlap',
'enable_pbc': False,
'qm_chtr': False
}
ans = asdf.set_opts({}, label='psi')
assert compare_recursive(ref, ans, sys._getframe().f_code.co_name + ': psi blank', atol=1.e-6)
ref = {
'qm_elst': True,
'elst_damping': 'off',
'qm_disp': False,
'chtr': False,
'swf_cutoff': 1.0,
'enable_cutoff': False,
'disp': False,
'qm_ind': False,
'ind': False,
'exch': False,
'ind_driver': 'iterative',
'qm_exch': False,
'elst': True,
'ind_damping': 'tt',
'disp_damping': 'overlap',
'enable_pbc': False,
'qm_chtr': False
}
ans = asdf.set_opts(
{
'elst_damping': 'OFF',
'swf_cutoff': 1.0,
'elst': True,
'qm_elst': True,
'enable_cutoff': False
}, label='psi')
assert compare_recursive(ref, ans, sys._getframe().f_code.co_name + ': psi setting', atol=1.e-6)
ref = {
'qm_elst': True,
'elst_damping': 'off',
'qm_disp': False,
'chtr': False,
'swf_cutoff': 2.0,
'enable_cutoff': False,
'disp': False,
'qm_ind': False,
'ind': False,
'exch': False,
'ind_driver': 'iterative',
'qm_exch': False,
'elst': False,
'ind_damping': 'tt',
'disp_damping': 'tt',
'enable_pbc': False,
'qm_chtr': False
}
ans = asdf.set_opts({
'swf_cutoff': 2,
'elst': False,
'qm_elst': True,
'disp_damping': 'TT'
},
append='append',
label='psi')
assert compare_recursive(ref, ans, sys._getframe().f_code.co_name + ': psi append setting', atol=1.e-6)
ref = {
'qm_elst': False,
'elst_damping': 'off',
'qm_disp': False,
'chtr': False,
'swf_cutoff': 0.0,
'enable_cutoff': False,
'disp': False,
'qm_ind': False,
'ind': False,
'exch': False,
'ind_driver': 'iterative',
'qm_exch': False,
'elst': True,
'ind_damping': 'tt',
'disp_damping': 'overlap',
'enable_pbc': False,
'qm_chtr': False
}
ans = asdf.set_opts({
'elst_damping': 'OFF',
'swf_cutoff': 0.0,
'elst': True,
'enable_cutoff': False
},
append='libefp',
label='psi')
assert compare_recursive(ref, ans, sys._getframe().f_code.co_name + ': psi reset setting', atol=1.e-6)
def test_opts_psi_dflt():
asdf = system_1()
ref = {
'qm_elst': True,
'elst_damping': 'screen',
'qm_disp': False,
'chtr': False,
'swf_cutoff': 0.0,
'enable_cutoff': False,
'disp': True,
'qm_ind': True,
'ind': True,
'exch': True,
'ind_driver': 'iterative',
'qm_exch': False,
'elst': True,
'ind_damping': 'tt',
'disp_damping': 'overlap',
'enable_pbc': False,
'qm_chtr': False
}
ans = asdf.set_opts({}, label='psi', append='psi')
assert compare_recursive(ref, ans, sys._getframe().f_code.co_name + ': psi default blank', atol=1.e-6)
ref = {
'qm_elst': False,
'elst_damping': 'off',
'qm_disp': False,
'chtr': False,
'swf_cutoff': 1.0,
'enable_cutoff': False,
'disp': True,
'qm_ind': True,
'ind': True,
'exch': True,
'ind_driver': 'iterative',
'qm_exch': False,
'elst': True,
'ind_damping': 'tt',
'disp_damping': 'overlap',
'enable_pbc': False,
'qm_chtr': False
}
ans = asdf.set_opts(
{
'elst_damping': 'OFF',
'swf_cutoff': 1.0,
'elst': True,
'qm_elst': False,
'enable_cutoff': False
},
label='psi',
append='append')
assert compare_recursive(ref, ans, sys._getframe().f_code.co_name + ': psi default append setting', atol=1.e-6)
ref = {
'qm_elst': True,
'elst_damping': 'overlap',
'qm_disp': False,
'chtr': False,
'swf_cutoff': 2.0,
'enable_cutoff': True,
'disp': True,
'qm_ind': True,
'ind': True,
'exch': True,
'ind_driver': 'iterative',
'qm_exch': False,
'elst': False,
'ind_damping': 'tt',
'disp_damping': 'overlap',
'enable_pbc': False,
'qm_chtr': False
}
ans = asdf.set_opts({
'elst_damping': 'OVERlap',
'swf_cutoff': 2.0,
'elst': False,
'enable_cutoff': True
},
append='psi',
label='psi')
assert compare_recursive(ref, ans, sys._getframe().f_code.co_name + ': psi default reset setting', atol=1.e-6)
| 26.717791
| 115
| 0.49667
| 986
| 8,710
| 4.144016
| 0.083164
| 0.048458
| 0.062408
| 0.048458
| 0.864905
| 0.81816
| 0.745228
| 0.745228
| 0.727606
| 0.708272
| 0
| 0.014129
| 0.341791
| 8,710
| 325
| 116
| 26.8
| 0.698587
| 0
| 0
| 0.767442
| 0
| 0
| 0.257865
| 0
| 0
| 0
| 0
| 0
| 0.036545
| 1
| 0.016611
| false
| 0
| 0.019934
| 0
| 0.036545
| 0.003322
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ac51c75f24fba3b78d60603284f8add62c6e4703
| 8,494
|
py
|
Python
|
tests/functional_tests/test_fjs_instances.py
|
mcfadd/Job_Shop_Schedule_Problem
|
94696af80911c80174682f97fc6f607e4c73ae54
|
[
"0BSD"
] | 45
|
2019-08-27T21:42:42.000Z
|
2022-02-17T12:35:18.000Z
|
tests/functional_tests/test_fjs_instances.py
|
aisha-farooq/Job_Shop_Schedule_Problem
|
94696af80911c80174682f97fc6f607e4c73ae54
|
[
"0BSD"
] | 9
|
2019-07-20T19:45:01.000Z
|
2022-03-30T19:36:26.000Z
|
tests/functional_tests/test_fjs_instances.py
|
aisha-farooq/Job_Shop_Schedule_Problem
|
94696af80911c80174682f97fc6f607e4c73ae54
|
[
"0BSD"
] | 17
|
2020-05-05T07:38:12.000Z
|
2022-03-23T02:36:44.000Z
|
import random
import unittest
from JSSP import data
from JSSP.solver import Solver
from tests.util import project_root, tmp_dir, get_files_with_suffix, rm_tree
fjs_data = get_files_with_suffix(project_root / 'data/fjs_data', '.fjs')
fjs_data = random.choices(fjs_data, k=20)
class TestFJSOptimization(unittest.TestCase):
def setUp(self) -> None:
if not tmp_dir.exists():
tmp_dir.mkdir()
def tearDown(self) -> None:
rm_tree(tmp_dir)
def test_ts_iter(self):
# parameters
iterations = 50 # keep this value small
num_processes = 1
tabu_list_size = 10
neighborhood_size = 25
neighborhood_wait = 0.1
probability_change_machine = 0.8
for i, fjs_instance in enumerate(fjs_data):
print(f"testing fjs instance {fjs_instance} ({i + 1} of {len(fjs_data)})")
try:
data_instance = data.FJSData(fjs_instance)
solver = Solver(data_instance)
solver.tabu_search_iter(iterations,
num_solutions_per_process=1,
num_processes=num_processes,
tabu_list_size=tabu_list_size,
neighborhood_size=neighborhood_size,
neighborhood_wait=neighborhood_wait,
probability_change_machine=probability_change_machine)
except Exception as e:
self.fail(f'Unexpected exception raised while running TS for {fjs_instance}:' + str(e))
self.assertIsNotNone(solver.solution, "TS should have produced a best solution")
# output results
output_file = tmp_dir / 'fjs_ts_schedule.xlsx'
solver.solution.create_schedule_xlsx_file(output_file)
self.assertTrue(output_file.exists(), "fjs_ts_schedule.xlsx was not produced")
def test_ts_iter_benchmark(self):
# parameters
iterations = 50 # keep this value small
num_processes = 1
tabu_list_size = 10
neighborhood_size = 25
neighborhood_wait = 0.1
probability_change_machine = 0.8
for i, fjs_instance in enumerate(fjs_data):
print(f"testing fjs instance {fjs_instance} ({i + 1} of {len(fjs_data)})")
try:
data_instance = data.FJSData(fjs_instance)
solver = Solver(data_instance)
solver.tabu_search_iter(iterations,
num_solutions_per_process=1,
num_processes=num_processes,
tabu_list_size=tabu_list_size,
neighborhood_size=neighborhood_size,
neighborhood_wait=neighborhood_wait,
probability_change_machine=probability_change_machine,
benchmark=True)
except Exception as e:
self.fail(f'Unexpected exception raised while running TS for {fjs_instance}:' + str(e))
self.assertIsNotNone(solver.solution, "TS should have produced a best solution")
# output results
output_file = tmp_dir / 'fjs_ts_benchmark'
solver.output_benchmark_results(output_file, auto_open=False)
self.assertTrue(output_file.exists(), "fjs_ts_benchmark was not produced")
def test_ga_iter(self):
# parameters
iterations = 10 # keep this value small
population_size = 50 # keep this value small
mutation_probability = 0.8
selection_size = 5
for i, fjs_instance in enumerate(fjs_data):
print(f"testing fjs instance {fjs_instance} ({i + 1} of {len(fjs_data)})")
try:
data_instance = data.FJSData(fjs_instance)
# run GA
solver = Solver(data_instance)
solver.genetic_algorithm_iter(iterations=iterations,
population_size=population_size,
mutation_probability=mutation_probability,
selection_size=selection_size)
except Exception as e:
self.fail(f'Unexpected exception raised while running GA for {fjs_instance}:' + str(e))
self.assertIsNotNone(solver.solution)
self.assertIsNotNone(solver.ga_agent)
# test parameters were set
self.assertEqual(iterations, solver.ga_agent.iterations)
self.assertFalse(solver.ga_agent.time_condition)
self.assertFalse(solver.ga_agent.benchmark)
self.assertEqual(population_size, solver.ga_agent.population_size)
self.assertEqual(mutation_probability, solver.ga_agent.mutation_probability)
self.assertEqual(selection_size, solver.ga_agent.selection_size)
self.assertEqual(population_size, len(solver.ga_agent.initial_population))
self.assertEqual(population_size, len(solver.ga_agent.result_population))
# test that the result solution is better than all the solutions in the initial population
for initial_sol in solver.ga_agent.initial_population:
self.assertLessEqual(solver.solution, initial_sol)
# test that the result population does not have duplicate solutions
seen = []
self.assertTrue(not any(sol in seen or seen.append(sol) for sol in solver.ga_agent.result_population))
# output results
output_file = tmp_dir / 'fjs_ga_schedule.xlsx'
solver.solution.create_schedule_xlsx_file(output_file)
self.assertTrue(output_file.exists(), "fjs_ga_schedule.xlsx was not produced")
def test_ga_iter_benchmark(self):
# parameters
iterations = 10 # keep this value small
population_size = 50 # keep this value small
mutation_probability = 0.8
selection_size = 5
for i, fjs_instance in enumerate(fjs_data):
print(f"testing fjs instance {fjs_instance} ({i + 1} of {len(fjs_data)})")
try:
data_instance = data.FJSData(fjs_instance)
# run GA
solver = Solver(data_instance)
solver.genetic_algorithm_iter(iterations=iterations,
population_size=population_size,
mutation_probability=mutation_probability,
selection_size=selection_size,
benchmark=True)
except Exception as e:
self.fail(f'Unexpected exception raised while running GA for {fjs_instance}:' + str(e))
self.assertIsNotNone(solver.solution)
self.assertIsNotNone(solver.ga_agent)
# test parameters were set
self.assertEqual(iterations, solver.ga_agent.iterations)
self.assertFalse(solver.ga_agent.time_condition)
self.assertTrue(solver.ga_agent.benchmark)
self.assertEqual(population_size, solver.ga_agent.population_size)
self.assertEqual(mutation_probability, solver.ga_agent.mutation_probability)
self.assertEqual(selection_size, solver.ga_agent.selection_size)
self.assertEqual(population_size, len(solver.ga_agent.initial_population))
self.assertEqual(population_size, len(solver.ga_agent.result_population))
# test that the result solution is better than all the solutions in the initial population
for initial_sol in solver.ga_agent.initial_population:
self.assertLessEqual(solver.solution, initial_sol)
# test that the result population does not have duplicate solutions
seen = []
self.assertTrue(not any(sol in seen or seen.append(sol) for sol in solver.ga_agent.result_population))
# output results
output_file = tmp_dir / 'fjs_ga_benchmark'
solver.output_benchmark_results(output_file, auto_open=False)
self.assertTrue(output_file.exists(), "fjs_ga_benchmark was not produced")
if __name__ == '__main__':
unittest.main()
| 45.180851
| 114
| 0.607723
| 924
| 8,494
| 5.329004
| 0.150433
| 0.035743
| 0.058083
| 0.021933
| 0.917953
| 0.911251
| 0.911251
| 0.889521
| 0.889521
| 0.889521
| 0
| 0.007678
| 0.325288
| 8,494
| 187
| 115
| 45.42246
| 0.851509
| 0.071698
| 0
| 0.75188
| 0
| 0
| 0.10519
| 0
| 0
| 0
| 0
| 0
| 0.225564
| 1
| 0.045113
| false
| 0
| 0.037594
| 0
| 0.090226
| 0.030075
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ac52775c1669f559a7a1f49c89b1fd4d81bf1c45
| 7,633
|
py
|
Python
|
wendigo/device/event_dispatcher.py
|
medmsyk/wendigopy
|
36e0759bf8b065548fd638063768522704506236
|
[
"Apache-2.0"
] | null | null | null |
wendigo/device/event_dispatcher.py
|
medmsyk/wendigopy
|
36e0759bf8b065548fd638063768522704506236
|
[
"Apache-2.0"
] | 1
|
2022-01-05T10:28:49.000Z
|
2022-03-20T09:17:04.000Z
|
wendigo/device/event_dispatcher.py
|
medmsyk/wendigopy
|
36e0759bf8b065548fd638063768522704506236
|
[
"Apache-2.0"
] | null | null | null |
from typing import Callable, List
from wendigo import Keys
from wendigo.device import DeviceState
from wendigo.device.dll import EventDispatcher as DllEventDispatcher, \
DeviceEventArgs, DeviceEventHandler
from wendigo.logger import Logger
class EventDispatcher:
"""
Event dispatcher.
"""
@classmethod
def get_event_handler(cls, event_handler: Callable[[DeviceState], None]) -> DeviceEventHandler:
"""
Get an event handler.
Parameters
----------
event_handler: Event Handler.
Returns
-------
device_event_handler: Device event handler.
"""
def wrapper(e: DeviceEventArgs):
try:
event_handler(DeviceState(e))
except:
Logger.exception("An exception raised in event hadler")
return DeviceEventHandler(wrapper)
@classmethod
def get_event_handler_once(cls, name: str, event_handler: Callable[[DeviceState], None]) -> DeviceEventHandler:
"""
Get an event handler which is called only once.
Parameters
----------
name: Name.
event_handler: Event handler.
Returns
-------
device_event_handler: Device event handler.
"""
def wrapper(e: DeviceEventArgs):
try:
cls.unlisten(name)
event_handler(DeviceState(e))
except:
Logger.exception("An exception raised in event hadler")
return DeviceEventHandler(wrapper)
@classmethod
def key_down(cls, name: str, keys: List[Keys], event_handler: Callable[[DeviceState], None], for_system: bool=False):
"""
Listen for key down.
Parameters
----------
name: Name.
keys: Keys.
event_handler: Event handler.
for_system: The event is for system or not.
"""
DllEventDispatcher.KeyDown(name, keys, cls.get_event_handler(event_handler), for_system)
@classmethod
def key_down_once(cls, name: str, keys: List[Keys], event_handler: Callable[[DeviceState], None], for_system: bool=False):
"""
Listen for key down which is called only once.
Parameters
----------
name: Name.
keys: Keys.
event_handler: Event handler.
for_system: The event is for system or not.
"""
DllEventDispatcher.KeyDown(name, keys, cls.get_event_handler_once(name, event_handler), for_system)
@classmethod
def key_up(cls, name: str, keys: List[Keys], event_handler: Callable[[DeviceState], None], for_system: bool=False):
"""
Listen for key up.
Parameters
----------
name: Name.
keys: Keys.
event_handler: Event handler.
for_system: The event is for system or not.
"""
DllEventDispatcher.KeyUp(name, keys, cls.get_event_handler(event_handler), for_system)
@classmethod
def key_up_once(cls, name: str, keys: List[Keys], event_handler: Callable[[DeviceState], None], for_system: bool=False):
"""
Listen for key up which is called only once.
Parameters
----------
name: Name.
keys: Keys.
event_handler: Event handler.
for_system: The event is for system or not.
"""
DllEventDispatcher.KeyUp(name, keys, cls.get_event_handler_once(name, event_handler), for_system)
@classmethod
def key_press(cls, name: str, keys: List[Keys], event_handler: Callable[[DeviceState], None], for_system: bool=False):
"""
Listen for key press.
Parameters
----------
name: Name.
keys: Keys.
event_handler: Event handler.
for_system: The event is for system or not.
"""
DllEventDispatcher.KeyPress(name, keys, cls.get_event_handler(event_handler), for_system)
@classmethod
def key_press_once(cls, name: str, keys: List[Keys], event_handler: Callable[[DeviceState], None], for_system: bool=False):
"""
Listen for key press which is called only once.
Parameters
----------
name: Name.
keys: Keys.
event_handler: Event handler.
for_system: The event is for system or not.
"""
DllEventDispatcher.KeyPress(name, keys, cls.get_event_handler_once(name, event_handler), for_system)
@classmethod
def mouse_move(cls, name: str, event_handler: Callable[[DeviceState], None], for_system: bool=False):
"""
Listen for mouse move.
Parameters
----------
name: Name.
event_handler: Event handler.
for_system: The event is for system or not.
"""
DllEventDispatcher.MouseMove(name, cls.get_event_handler(event_handler), for_system)
@classmethod
def mouse_move_once(cls, name: str, event_handler: Callable[[DeviceState], None], for_system: bool=False):
"""
Listen for mouse move which is called only once.
Parameters
----------
name: Name.
event_handler: Event handler.
for_system: The event is for system or not.
"""
DllEventDispatcher.MouseMove(name, cls.get_event_handler_once(name, event_handler), for_system)
@classmethod
def mouse_wheel(cls, name: str, event_handler: Callable[[DeviceState], None], for_system: bool=False):
"""
Listen for mouse wheel.
Parameters
----------
name: Name.
event_handler: Event handler.
for_system: The event is for system or not.
"""
DllEventDispatcher.MouseWheel(name, cls.get_event_handler(event_handler), for_system)
@classmethod
def mouse_wheel_once(cls, name: str, event_handler: Callable[[DeviceState], None], for_system: bool=False):
"""
Listen for mouse wheel which is called only once.
Parameters
----------
name: Name.
event_handler: Event handler.
for_system: The event is for system or not.
"""
DllEventDispatcher.MouseWheel(name, cls.get_event_handler_once(name, event_handler), for_system)
@classmethod
def mouse_tilt(cls, name: str, event_handler: Callable[[DeviceState], None], for_system: bool=False):
"""
Listen for mouse tilt.
Parameters
----------
name: Name.
event_handler: Event handler.
for_system: The event is for system or not.
"""
DllEventDispatcher.MouseTilt(name, cls.get_event_handler(event_handler), for_system)
@classmethod
def mouse_tilt_once(cls, name: str, event_handler: Callable[[DeviceState], None], for_system: bool=False):
"""
Listen for mouse tilt which is called only once.
Parameters
----------
name: Name.
event_handler: Event handler.
for_system: The event is for system or not.
"""
DllEventDispatcher.MouseTilt(name, cls.get_event_handler_once(name, event_handler), for_system)
@classmethod
def unlisten(cls, name: str, for_system: bool=False):
"""
Unlisten for device event.
Parameters
----------
name: Name.
for_system: The event is for system or not.
"""
DllEventDispatcher.Unlisten(name, for_system)
| 33.774336
| 128
| 0.591904
| 823
| 7,633
| 5.321993
| 0.081409
| 0.208219
| 0.082192
| 0.115068
| 0.913927
| 0.903881
| 0.903881
| 0.903881
| 0.894749
| 0.891553
| 0
| 0
| 0.305254
| 7,633
| 226
| 129
| 33.774336
| 0.825948
| 0.292546
| 0
| 0.428571
| 0
| 0
| 0.016863
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.269841
| false
| 0
| 0.079365
| 0
| 0.396825
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
3bbdb27afd0611a4fc0107178eda65cbab9a0ae7
| 21,424
|
py
|
Python
|
src/models.py
|
ottokart/punctuator
|
99550ed8260a8438dbeceece79f2d9743b2c6a3b
|
[
"MIT"
] | 92
|
2015-03-19T09:55:04.000Z
|
2021-10-22T02:46:32.000Z
|
src/models.py
|
ottokart/punctuator
|
99550ed8260a8438dbeceece79f2d9743b2c6a3b
|
[
"MIT"
] | 13
|
2015-04-30T10:43:49.000Z
|
2021-05-14T03:30:28.000Z
|
src/models.py
|
ottokart/punctuator
|
99550ed8260a8438dbeceece79f2d9743b2c6a3b
|
[
"MIT"
] | 26
|
2015-07-23T13:51:36.000Z
|
2021-10-31T04:32:27.000Z
|
# coding: utf-8
import numpy as np
import cPickle
import activation_functions
from itertools import izip
from activation_functions import Softmax, Sigmoid, Tanh
from utils import get_vocabulary_size, load_model
FLOATX = np.float64
class Model(object):
def __init__(self):
super(Model, self).__init__()
self.initialized = False
def output_word_probability(self, output_word_index):
assert self.initialized, "initialize or load before using"
assert hasattr(self, "y"), "predict before trying to use output"
return self.y[range(len(output_word_index)), output_word_index]
def train(self, input_word_index, output_word_index, pause=None, learning_rate=0.1):
assert self.initialized, "initialize or load before using"
self.predict(input_word_index, pause)
self.update(input_word_index, output_word_index, learning_rate)
return -np.log(self.output_word_probability(output_word_index))
def neg_log_prob(self, input_word_index, output_word_index, pause=None):
assert self.initialized, "initialize or load before using"
self.predict(input_word_index, pause)
return -np.log(self.output_word_probability(output_word_index))
def predict_punctuation(self, input_word_index, pause=None):
assert self.initialized, "initialize or load before using"
self.predict(input_word_index, pause)
return np.argmax(self.y, axis=1)
def load(self, model):
for attr in model:
setattr(self, attr, model[attr])
self.hidden_activation = getattr(activation_functions, self.hidden_activation_name)
self.reset_state()
self.initialized = True
def weights(self, i, o):
s = 0.005#np.sqrt(6./(i+o))
return np.random.uniform(low=-s, high=s, size=(i, o)).astype(FLOATX)
def slice(self, matrix, size, i):
return matrix[:, i*size:(i+1)*size]
class T_LSTM(Model):
def __init__(self):
super(T_LSTM, self).__init__()
self.params = ["We", "Wp", # Word embeddings, pauses
"W", "Wr", "Wy", # inputs-to-LSTM, recurrency, outputs
"Wip", "Wfp", "Wop"] # peepholes
self.initialized = False
def initialize(self, hidden_size, projection_size, in_vocabulary, out_vocabulary, batch_size, hidden_activation="Tanh", bptt_steps=5, use_pauses=False):
self.hidden_size = hidden_size
self.projection_size = projection_size
self.bptt_steps = bptt_steps
self.batch_size = batch_size
self.use_pauses = use_pauses
self.in_vocabulary = in_vocabulary
self.out_vocabulary = out_vocabulary
self.hidden_activation_name = hidden_activation
self.hidden_activation = getattr(activation_functions, hidden_activation)
self.We = self.weights(get_vocabulary_size(self.in_vocabulary), self.projection_size)
self.Wp = self.weights(1, self.projection_size)
self.W = self.weights(self.projection_size, self.hidden_size*4)
self.Wip = self.weights(1, self.hidden_size)
self.Wfp = self.weights(1, self.hidden_size)
self.Wop = self.weights(1, self.hidden_size)
self.Wr = self.weights(self.hidden_size, self.hidden_size*4)
self.Wy = self.weights(self.hidden_size, get_vocabulary_size(self.out_vocabulary))
# AdaGread sum of squares of per feature historical gradients
for p in self.params:
setattr(self, p+"_hg", np.zeros_like(getattr(self, p)))
self.reset_state()
self.initialized = True
def reset_state(self):
self.m = np.zeros(shape=(self.batch_size, self.hidden_size))
self.h = np.zeros(shape=(self.batch_size, self.hidden_size))
self.word_history = []
if self.use_pauses:
self.pause_history = []
self.m_tm1_history = []
self.h_tm1_history = []
self.z_history = []
self.x_history = []
self.i_history = []
self.ig_history = []
self.fg_history = []
self.og_history = []
self.W_history = []
self.Wr_history = []
self.Wip_history = []
self.Wfp_history = []
self.Wop_history = []
def _remember_state(self, input_word_index, pause=None):
self.word_history.append(input_word_index)
if self.use_pauses:
self.pause_history.append(pause)
self.m_tm1_history.append(self.m_tm1)
self.h_tm1_history.append(self.h_tm1)
self.z_history.append(self.z)
self.x_history.append(self.x)
self.i_history.append(self.i)
self.ig_history.append(self.ig)
self.fg_history.append(self.fg)
self.og_history.append(self.og)
self.W_history.append(self.W.copy())
self.Wr_history.append(self.Wr.copy())
self.Wip_history.append(self.Wip.copy())
self.Wfp_history.append(self.Wfp.copy())
self.Wop_history.append(self.Wop.copy())
if len(self.word_history) > self.bptt_steps:
del self.word_history[0]
if self.use_pauses:
del self.pause_history[0]
del self.m_tm1_history[0]
del self.h_tm1_history[0]
del self.z_history[0]
del self.x_history[0]
del self.i_history[0]
del self.ig_history[0]
del self.fg_history[0]
del self.og_history[0]
del self.W_history[0]
del self.Wr_history[0]
del self.Wip_history[0]
del self.Wfp_history[0]
del self.Wop_history[0]
def predict(self, input_word_index, pause_duration=None, compute_only_features=False):
assert self.initialized, "initialize or load before using"
self.m_tm1 = self.m
self.h_tm1 = self.h
r = np.dot(self.h_tm1, self.Wr)
z = self.We[input_word_index]
if self.use_pauses:
z += np.dot(pause_duration[:,np.newaxis], self.Wp)
self.x = self.hidden_activation.y(z)
z1 = np.dot(self.x, self.W)
z = self.slice(r, self.hidden_size, 0) + self.slice(z1, self.hidden_size, 0)
self.i = self.hidden_activation.y(z)
z = self.slice(r, self.hidden_size, 1) + self.slice(z1, self.hidden_size, 1) + self.m_tm1 * self.Wip
self.ig = Sigmoid.y(z)
z = self.slice(r, self.hidden_size, 2) + self.slice(z1, self.hidden_size, 2) + self.m_tm1 * self.Wfp
self.fg = Sigmoid.y(z)
self.m = self.i * self.ig + self.m_tm1 * self.fg
z = self.slice(r, self.hidden_size, 3) + self.slice(z1, self.hidden_size, 3) + self.m * self.Wop
self.og = Sigmoid.y(z)
self.z = self.hidden_activation.y(self.m)
self.h = self.z * self.og
if not compute_only_features:
z_y = np.dot(self.h, self.Wy)
self.y = Softmax.y(z=z_y)
if self.use_pauses:
self._remember_state(input_word_index, pause_duration[:,np.newaxis])
else:
self._remember_state(input_word_index)
def _backpropagate(self, output_word_index):
dE_dz_y = self.y.copy() # don't remove the copy() part
dE_dz_y[range(len(output_word_index)), output_word_index] -= 1.
self.dE_dWy = np.dot(self.h.T, dE_dz_y)
dE_dh = np.dot(dE_dz_y, self.Wy.T)
self.dE_dWe = {}
self.dE_dW = np.zeros_like(self.W)
self.dE_dWr = np.zeros_like(self.Wr)
self.dE_dWip = np.zeros_like(self.Wip)
self.dE_dWfp = np.zeros_like(self.Wfp)
self.dE_dWop = np.zeros_like(self.Wop)
self.dE_dWp = np.zeros_like(self.Wp)
dE_dm_tm1 = 0.
dE_dh_tm1 = 0.
m = self.m
pause_history = self.pause_history if self.use_pauses else [None]*len(self.word_history)
for pauses, words, W, Wr, Wip, Wfp, Wop, x, m_tm1, h_tm1, z, i, ig, fg, og in reversed(zip(
pause_history, self.word_history,
self.W_history, self.Wr_history,
self.Wip_history, self.Wfp_history, self.Wop_history,
self.x_history, self.m_tm1_history, self.h_tm1_history, self.z_history,
self.i_history, self.ig_history,
self.fg_history, self.og_history)):
dE_dh = dE_dh + dE_dh_tm1
dE_dog = dE_dh * z * Sigmoid.dy_dz(y=og)
dE_dz = dE_dh * og * self.hidden_activation.dy_dz(y=z)
dE_dm = dE_dz + dE_dm_tm1 + dE_dog * Wop
dE_dfg = dE_dm * m_tm1 * Sigmoid.dy_dz(y=fg)
dE_di = dE_dm * ig * self.hidden_activation.dy_dz(y=i)
dE_dig = dE_dm * i * Sigmoid.dy_dz(y=ig)
dE_dm_tm1 = dE_dm * fg + dE_dig * Wip + dE_dfg * Wfp
self.dE_dWip += (dE_dig * m_tm1).sum(0)
self.dE_dWfp += (dE_dfg * m_tm1).sum(0)
self.dE_dWop += (dE_dog * m).sum(0)
d = np.hstack((dE_di, dE_dig, dE_dfg, dE_dog))
dE_dx = np.dot(d, W.T) * self.hidden_activation.dy_dz(y=x)
dE_dh_tm1 = np.dot(d, Wr.T)
self.dE_dW += np.dot(x.T, d)
self.dE_dWr += np.dot(h_tm1.T, d)
for word, dE_dx_word in izip(words, dE_dx):
self.dE_dWe[word] = self.dE_dWe.get(word, 0.) + dE_dx_word
if self.use_pauses:
self.dE_dWp += np.dot(pauses.T, dE_dx)
dE_dh = 0.
m = m_tm1
def update(self, _, output_word_index, learning_rate):
"""Uses AdaGrad: Duchi, John, Elad Hazan, and Yoram Singer. "Adaptive subgradient methods for online learning and stochastic optimization." The Journal of Machine Learning Research 12 (2011): 2121-2159."""
assert self.initialized, "initialize or load before using"
self._backpropagate(output_word_index)
self.Wy_hg += self.dE_dWy**2
self.Wy -= learning_rate * self.dE_dWy / (1e-6 + np.sqrt(self.Wy_hg))
self.Wr_hg += self.dE_dWr**2
self.Wr -= learning_rate * self.dE_dWr / (1e-6 + np.sqrt(self.Wr_hg))
self.Wip_hg += self.dE_dWip**2
self.Wip -= learning_rate * self.dE_dWip / (1e-6 + np.sqrt(self.Wip_hg))
self.Wfp_hg += self.dE_dWfp**2
self.Wfp -= learning_rate * self.dE_dWfp / (1e-6 + np.sqrt(self.Wfp_hg))
self.Wop_hg += self.dE_dWop**2
self.Wop -= learning_rate * self.dE_dWop / (1e-6 + np.sqrt(self.Wop_hg))
self.W_hg += self.dE_dW**2
self.W -= learning_rate * self.dE_dW / (1e-6 + np.sqrt(self.W_hg))
if self.use_pauses:
self.Wp_hg += self.dE_dWp**2
self.Wp -= learning_rate * self.dE_dWp / (1e-6 + np.sqrt(self.Wp_hg))
for i in self.dE_dWe:
self.We_hg[i] += self.dE_dWe[i]**2
self.We[i] -= learning_rate * self.dE_dWe[i] / (1e-6 + np.sqrt(self.We_hg[i]))
def save(self, file_name, final):
assert self.initialized, "initialize or load before using"
model = {
"type": self.__class__.__name__,
"hidden_size": self.hidden_size,
"projection_size": self.projection_size,
"bptt_steps": self.bptt_steps,
"batch_size": self.batch_size,
"use_pauses": self.use_pauses,
"in_vocabulary": self.in_vocabulary,
"out_vocabulary": self.out_vocabulary,
"hidden_activation_name": self.hidden_activation_name,
}
for p in self.params:
model[p] = getattr(self, p)
if not final:
model[p+"_hg"] = getattr(self, p+"_hg")
cPickle.dump(model, file(file_name, 'wb'))
class TA_LSTM(Model):
def __init__(self):
super(TA_LSTM, self).__init__()
self.params = ["Wp", #pauses
"W", "Wr", "Wy", # inputs-to-LSTM, recurrency, outputs
"Wip", "Wfp", "Wop"] # peepholes
self.initialized = False
def initialize(self, hidden_size, t_lstm, out_vocabulary, batch_size, hidden_activation="Tanh", bptt_steps=5, use_pauses=False):
assert isinstance(t_lstm, T_LSTM)
self.hidden_size = hidden_size
self.t_lstm = t_lstm
self.bptt_steps = bptt_steps
self.batch_size = batch_size
self.use_pauses = use_pauses
self.in_vocabulary = self.t_lstm.in_vocabulary
self.out_vocabulary = out_vocabulary
self.hidden_activation_name = hidden_activation
self.hidden_activation = getattr(activation_functions, hidden_activation)
self.W = self.weights(self.t_lstm.hidden_size, self.hidden_size*4)
self.Wp = self.weights(1, self.hidden_size*4)
self.Wy = self.weights(self.hidden_size, get_vocabulary_size(self.out_vocabulary))
self.Wip = self.weights(1, self.hidden_size)
self.Wfp = self.weights(1, self.hidden_size)
self.Wop = self.weights(1, self.hidden_size)
self.Wr = self.weights(self.hidden_size, self.hidden_size*4)
# AdaGread sum of squares of per feature historical gradients
for p in self.params:
setattr(self, p+"_hg", np.zeros_like(getattr(self, p)))
self.reset_state()
self.initialized = True
def reset_state(self):
self.t_lstm.reset_state()
self.m = np.zeros(shape=(self.batch_size, self.hidden_size))
self.h = np.zeros(shape=(self.batch_size, self.hidden_size))
if self.use_pauses:
self.pause_history = []
self.t_lstm_h_history = []
self.m_tm1_history = []
self.h_tm1_history = []
self.z_history = []
self.i_history = []
self.ig_history = []
self.fg_history = []
self.og_history = []
self.Wr_history = []
self.Wip_history = []
self.Wfp_history = []
self.Wop_history = []
def _remember_state(self, pause):
if self.use_pauses:
self.pause_history.append(pause)
self.t_lstm_h_history.append(self.t_lstm.h)
self.m_tm1_history.append(self.m_tm1)
self.h_tm1_history.append(self.h_tm1)
self.z_history.append(self.z)
self.i_history.append(self.i)
self.ig_history.append(self.ig)
self.fg_history.append(self.fg)
self.og_history.append(self.og)
self.Wr_history.append(self.Wr.copy())
self.Wip_history.append(self.Wip.copy())
self.Wfp_history.append(self.Wfp.copy())
self.Wop_history.append(self.Wop.copy())
if len(self.h_tm1_history) > self.bptt_steps:
if self.use_pauses:
del self.pause_history[0]
del self.t_lstm_h_history[0]
del self.m_tm1_history[0]
del self.h_tm1_history[0]
del self.z_history[0]
del self.i_history[0]
del self.ig_history[0]
del self.fg_history[0]
del self.og_history[0]
del self.Wr_history[0]
del self.Wip_history[0]
del self.Wfp_history[0]
del self.Wop_history[0]
def predict(self, input_word_index, pause_duration=None):
assert self.initialized, "initialize or load before using"
self.t_lstm.predict(input_word_index, pause_duration, compute_only_features=True)
self.m_tm1 = self.m
self.h_tm1 = self.h
r = np.dot(self.h_tm1, self.Wr)
z1 = np.dot(self.t_lstm.h, self.W)
if self.use_pauses:
z1 += np.dot(pause_duration[:,np.newaxis], self.Wp)
z = self.slice(r, self.hidden_size, 0) + self.slice(z1, self.hidden_size, 0)
self.i = self.hidden_activation.y(z)
z = self.slice(r, self.hidden_size, 1) + self.slice(z1, self.hidden_size, 1) + self.m_tm1 * self.Wip
self.ig = Sigmoid.y(z)
z = self.slice(r, self.hidden_size, 2) + self.slice(z1, self.hidden_size, 2) + self.m_tm1 * self.Wfp
self.fg = Sigmoid.y(z)
self.m = self.i * self.ig + self.m_tm1 * self.fg
z = self.slice(r, self.hidden_size, 3) + self.slice(z1, self.hidden_size, 3) + self.m * self.Wop
self.og = Sigmoid.y(z)
self.z = self.hidden_activation.y(self.m)
self.h = self.z * self.og
z_y = np.dot(self.h, self.Wy)
self.y = Softmax.y(z=z_y)
self._remember_state(pause_duration)
def _backpropagate(self, output_word_index):
dE_dz_y = self.y.copy() # don't remove the copy() part
dE_dz_y[range(len(output_word_index)), output_word_index] -= 1.
self.dE_dWy = np.dot(self.h.T, dE_dz_y)
dE_dh = np.dot(dE_dz_y, self.Wy.T) * self.hidden_activation.dy_dz(y=self.h)
self.dE_dWr = np.zeros_like(self.Wr)
self.dE_dW = np.zeros_like(self.W)
self.dE_dWip = np.zeros_like(self.Wip)
self.dE_dWfp = np.zeros_like(self.Wfp)
self.dE_dWop = np.zeros_like(self.Wop)
self.dE_dWp = np.zeros_like(self.Wp)
dE_dm_tm1 = 0.
dE_dh_tm1 = 0.
m = self.m
pause_history = self.pause_history if self.use_pauses else [None]*len(self.h_tm1_history)
for pauses, Wr, Wip, Wfp, Wop, t_lstm_h, m_tm1, h_tm1, z, i, ig, fg, og in reversed(zip(
pause_history, self.Wr_history,
self.Wip_history, self.Wfp_history, self.Wop_history,
self.t_lstm_h_history, self.m_tm1_history, self.h_tm1_history,
self.z_history, self.i_history, self.ig_history,
self.fg_history, self.og_history)):
dE_dh = dE_dh + dE_dh_tm1
dE_dog = dE_dh * z * Sigmoid.dy_dz(y=og)
dE_dz = dE_dh * og * self.hidden_activation.dy_dz(y=z)
dE_dm = dE_dz + dE_dm_tm1 + dE_dog * Wop
dE_dfg = dE_dm * m_tm1 * Sigmoid.dy_dz(y=fg)
dE_di = dE_dm * ig * self.hidden_activation.dy_dz(y=i)
dE_dig = dE_dm * i * Sigmoid.dy_dz(y=ig)
dE_dm_tm1 = dE_dm * fg + dE_dig * Wip + dE_dfg * Wfp
self.dE_dWip += (dE_dig * m_tm1).sum(0)
self.dE_dWfp += (dE_dfg * m_tm1).sum(0)
self.dE_dWop += (dE_dog * m).sum(0)
d = np.hstack((dE_di, dE_dig, dE_dfg, dE_dog))
dE_dh_tm1 = np.dot(d, Wr.T)
if self.use_pauses:
self.dE_dWp += np.dot(pauses.T, d)
self.dE_dW += np.dot(t_lstm_h.T, d)
self.dE_dWr += np.dot(h_tm1.T, d)
dE_dh = 0.
m = m_tm1
def update(self, _, output_word_index, learning_rate):
"""Uses AdaGrad: Duchi, John, Elad Hazan, and Yoram Singer. "Adaptive subgradient methods for online learning and stochastic optimization." The Journal of Machine Learning Research 12 (2011): 2121-2159."""
assert self.initialized, "initialize or load before using"
self._backpropagate(output_word_index)
self.Wy_hg += self.dE_dWy**2
self.Wy -= learning_rate * self.dE_dWy / (1e-6 + np.sqrt(self.Wy_hg))
self.W_hg += self.dE_dW**2
self.W -= learning_rate * self.dE_dW / (1e-6 + np.sqrt(self.W_hg))
self.Wr_hg += self.dE_dWr**2
self.Wr -= learning_rate * self.dE_dWr / (1e-6 + np.sqrt(self.Wr_hg))
self.Wip_hg += self.dE_dWip**2
self.Wip -= learning_rate * self.dE_dWip / (1e-6 + np.sqrt(self.Wip_hg))
self.Wfp_hg += self.dE_dWfp**2
self.Wfp -= learning_rate * self.dE_dWfp / (1e-6 + np.sqrt(self.Wfp_hg))
self.Wop_hg += self.dE_dWop**2
self.Wop -= learning_rate * self.dE_dWop / (1e-6 + np.sqrt(self.Wop_hg))
if self.use_pauses:
self.Wp_hg += self.dE_dWp**2
self.Wp -= learning_rate * self.dE_dWp / (1e-6 + np.sqrt(self.Wp_hg))
def save(self, file_name, final):
assert self.initialized, "initialize or load before using"
model = {
"type": self.__class__.__name__,
"hidden_size": self.hidden_size,
"bptt_steps": self.bptt_steps,
"batch_size": self.batch_size,
"use_pauses": self.use_pauses,
"out_vocabulary": self.out_vocabulary,
"hidden_activation_name": self.hidden_activation_name,
}
for p in self.params:
model[p] = getattr(self, p)
if not final:
model[p+"_hg"] = getattr(self, p+"_hg")
t_lstm_file_name = file_name + "_t_lstm"
self.t_lstm.save(t_lstm_file_name, True)
model["t_lstm_file_name"] = t_lstm_file_name
cPickle.dump(model, file(file_name, 'wb'))
def load(self, model):
self.t_lstm = load_model(model["t_lstm_file_name"])
self.in_vocabulary = self.t_lstm.in_vocabulary
super(TA_LSTM, self).load(model)
| 39.600739
| 213
| 0.587939
| 3,085
| 21,424
| 3.825932
| 0.068395
| 0.050835
| 0.048632
| 0.031772
| 0.868932
| 0.836991
| 0.809032
| 0.790816
| 0.770652
| 0.757689
| 0
| 0.015261
| 0.296537
| 21,424
| 540
| 214
| 39.674074
| 0.767899
| 0.034121
| 0
| 0.743961
| 0
| 0
| 0.030913
| 0.002129
| 0
| 0
| 0
| 0
| 0.028986
| 1
| 0.060386
| false
| 0
| 0.014493
| 0.002415
| 0.096618
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
3bdf6f3e641db6e23d166d7a48c692564554f68c
| 68
|
py
|
Python
|
nameSurvey/__init__.py
|
chriswilly/demoCode
|
654603012157613afa0b4a6b4cc2fa0e50d1b807
|
[
"MIT"
] | null | null | null |
nameSurvey/__init__.py
|
chriswilly/demoCode
|
654603012157613afa0b4a6b4cc2fa0e50d1b807
|
[
"MIT"
] | null | null | null |
nameSurvey/__init__.py
|
chriswilly/demoCode
|
654603012157613afa0b4a6b4cc2fa0e50d1b807
|
[
"MIT"
] | null | null | null |
from .nameSurvey import nameView
# from .nameSurvey import nameFind
| 68
| 68
| 0.823529
| 8
| 68
| 7
| 0.625
| 0.5
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.132353
| 68
| 1
| 68
| 68
| 0.949153
| 0.470588
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
cbe9e33acc09c6645384f8d9c7e9463561838769
| 4,823
|
py
|
Python
|
dispatcher/dispatcher_test.py
|
Gleamo/gleamo-device
|
6b7c24ad1683e931cacf2ce9c5aa8d3b16616503
|
[
"BSD-2-Clause"
] | 1
|
2017-05-02T15:15:03.000Z
|
2017-05-02T15:15:03.000Z
|
dispatcher/dispatcher_test.py
|
Gleamo/gleamo-python
|
6b7c24ad1683e931cacf2ce9c5aa8d3b16616503
|
[
"BSD-2-Clause"
] | 4
|
2017-05-02T13:50:15.000Z
|
2017-05-02T16:12:38.000Z
|
dispatcher/dispatcher_test.py
|
Gleamo/gleamo-python
|
6b7c24ad1683e931cacf2ce9c5aa8d3b16616503
|
[
"BSD-2-Clause"
] | null | null | null |
import unittest
from .dispatcher import Dispatcher
from hardware.mock_hardware import MockHardware
from colors.color import Color
from buzzer.buzzer_pattern import BuzzerPattern
from commands.command import Command
from state.state import State
class TestDispatcher(unittest.TestCase):
def test_dispatches_to_hardware(self):
with MockHardware() as hardware_service:
dispatcher = Dispatcher(hardware_service)
current_state = State(
color=Color(100, 100, 100),
buzzer_pattern=BuzzerPattern.NONE
)
command = Command(
color=Color(110, 110, 110),
duration=100,
buzzer_pattern=BuzzerPattern.NONE
)
now = 0
next_state = dispatcher.dispatch(current_state, command, now)
self.assertEqual(next_state.color.r, 100)
self.assertEqual(next_state.color.g, 100)
self.assertEqual(next_state.color.b, 100)
self.assertEqual(hardware_service.color_last_called_with.r, 100)
self.assertEqual(hardware_service.color_last_called_with.g, 100)
self.assertEqual(hardware_service.color_last_called_with.b, 100)
self.assertEqual(hardware_service.color_called_count, 1)
now = 10
next_state = dispatcher.dispatch(next_state, command, now)
self.assertEqual(next_state.color.r, 101)
self.assertEqual(next_state.color.g, 101)
self.assertEqual(next_state.color.b, 101)
self.assertEqual(hardware_service.color_last_called_with.r, 101)
self.assertEqual(hardware_service.color_last_called_with.g, 101)
self.assertEqual(hardware_service.color_last_called_with.b, 101)
self.assertEqual(hardware_service.color_called_count, 2)
now = 90
next_state = dispatcher.dispatch(next_state, command, now)
self.assertEqual(next_state.color.r, 109)
self.assertEqual(next_state.color.g, 109)
self.assertEqual(next_state.color.b, 109)
self.assertEqual(hardware_service.color_last_called_with.r, 109)
self.assertEqual(hardware_service.color_last_called_with.g, 109)
self.assertEqual(hardware_service.color_last_called_with.b, 109)
self.assertEqual(hardware_service.color_called_count, 3)
now = 100
next_state = dispatcher.dispatch(next_state, command, now)
self.assertEqual(next_state.color.r, 110)
self.assertEqual(next_state.color.g, 110)
self.assertEqual(next_state.color.b, 110)
self.assertEqual(hardware_service.color_last_called_with.r, 110)
self.assertEqual(hardware_service.color_last_called_with.g, 110)
self.assertEqual(hardware_service.color_last_called_with.b, 110)
self.assertEqual(hardware_service.color_called_count, 4)
def test_dispatches_to_hardware_with_buzzer(self):
with MockHardware() as hardware_service:
dispatcher = Dispatcher(hardware_service)
current_state = State(
color=Color(100, 100, 100),
buzzer_pattern=BuzzerPattern.NONE
)
command = Command(
color=Color.no_change(),
duration=100,
buzzer_pattern=BuzzerPattern(duration=10, strength=1)
)
now = 0
next_state = dispatcher.dispatch(current_state, command, now)
self.assertEqual(next_state.color.r, 100)
self.assertEqual(next_state.color.g, 100)
self.assertEqual(next_state.color.b, 100)
self.assertEqual(next_state.buzzer_pattern.strength, 1)
self.assertEqual(hardware_service.motor_called_count, 1)
self.assertEqual(hardware_service.motor_state, 1)
now = 10
next_state = dispatcher.dispatch(next_state, command, now)
self.assertEqual(next_state.color.r, 100)
self.assertEqual(next_state.color.g, 100)
self.assertEqual(next_state.color.b, 100)
self.assertEqual(next_state.buzzer_pattern.strength, 1)
self.assertEqual(hardware_service.motor_called_count, 2)
self.assertEqual(hardware_service.motor_state, 1)
now = 90
next_state = dispatcher.dispatch(next_state, command, now)
self.assertEqual(next_state.color.r, 100)
self.assertEqual(next_state.color.g, 100)
self.assertEqual(next_state.color.b, 100)
self.assertEqual(next_state.buzzer_pattern.strength, 0)
self.assertEqual(hardware_service.motor_stop_called_count, 1)
self.assertEqual(hardware_service.motor_state, 0)
| 40.191667
| 76
| 0.657475
| 560
| 4,823
| 5.416071
| 0.098214
| 0.227498
| 0.150346
| 0.189911
| 0.89845
| 0.843389
| 0.779097
| 0.713485
| 0.699967
| 0.465546
| 0
| 0.045161
| 0.260834
| 4,823
| 119
| 77
| 40.529412
| 0.80561
| 0
| 0
| 0.478261
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0.021739
| false
| 0
| 0.076087
| 0
| 0.108696
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
cbf7ba7bab387a817f43b4bf627f069c4c8f3dfa
| 39,307
|
py
|
Python
|
generate-grammars/python-awk/python3_actions.py
|
mbaak/histogrammar-python
|
6311f5b0eec9c75f12018f22604535c64675fdf6
|
[
"Apache-2.0"
] | 30
|
2016-09-25T16:36:06.000Z
|
2021-07-20T09:09:09.000Z
|
generate-grammars/python-awk/python3_actions.py
|
mbaak/histogrammar-python
|
6311f5b0eec9c75f12018f22604535c64675fdf6
|
[
"Apache-2.0"
] | 15
|
2016-07-26T19:41:31.000Z
|
2021-02-07T16:30:11.000Z
|
generate-grammars/python-awk/python3_actions.py
|
mbaak/histogrammar-python
|
6311f5b0eec9c75f12018f22604535c64675fdf6
|
[
"Apache-2.0"
] | 8
|
2016-09-19T20:48:37.000Z
|
2021-02-07T15:00:24.000Z
|
#!/usr/bin/env python
actions = {}
actions['''file_input : ENDMARKER'''] = ''' p[0] = ast.Module([], rule=inspect.currentframe().f_code.co_name, lineno=0, col_offset=0)'''
actions['''file_input : file_input_star ENDMARKER'''] = ''' p[0] = ast.Module(p[1], rule=inspect.currentframe().f_code.co_name)
inherit_lineno(p[0], p[1][0])'''
actions['''file_input_star : NEWLINE'''] = ''' p[0] = ast.Module([], rule=inspect.currentframe().f_code.co_name, lineno=0, col_offset=0)'''
actions['''file_input_star : stmt'''] = ''' p[0] = p[1]'''
actions['''file_input_star : file_input_star NEWLINE'''] = ''' p[0] = ast.Module(p[1], rule=inspect.currentframe().f_code.co_name)
inherit_lineno(p[0], p[1][0])'''
actions['''file_input_star : file_input_star stmt'''] = ''' p[0] = p[1] + p[2]'''
actions['''decorator : AT dotted_name NEWLINE'''] = ''' p[0] = p[2]
p[0].alt = p[1][1]'''
actions['''decorator : AT dotted_name LPAR RPAR NEWLINE'''] = ''' p[0] = ast.Call(p[2], [], [], None, None, rule=inspect.currentframe().f_code.co_name)
inherit_lineno(p[0], p[1][1])'''
actions['''decorator : AT dotted_name LPAR arglist RPAR NEWLINE'''] = ''' p[4].func = p[2]
p[0] = p[4]
inherit_lineno(p[0], p[2])
p[0].alt = p[1][1]'''
actions['''decorators : decorators_plus'''] = ''' p[0] = p[1]'''
actions['''decorators_plus : decorator'''] = ''' p[0] = [p[1]]'''
actions['''decorators_plus : decorators_plus decorator'''] = ''' p[0] = p[1] + [p[2]]'''
actions['''decorated : decorators classdef'''] = ''' p[2].decorator_list = p[1]
p[0] = p[2]
inherit_lineno(p[0], p[1][0])'''
actions['''decorated : decorators funcdef'''] = ''' p[2].decorator_list = p[1]
p[0] = p[2]
inherit_lineno(p[0], p[1][0])'''
actions['''funcdef : DEF NAME parameters COLON suite'''] = ''' p[0] = ast.FunctionDef(p[2][0], p[3], p[5], [], rule=inspect.currentframe().f_code.co_name, **p[1][1])'''
actions['''parameters : LPAR RPAR'''] = ''' p[0] = ast.arguments([], None, None, [], rule=inspect.currentframe().f_code.co_name, **p[1][1])'''
actions['''stmt : simple_stmt'''] = ''' p[0] = p[1]'''
actions['''stmt : compound_stmt'''] = ''' p[0] = p[1]'''
actions['''simple_stmt : small_stmt NEWLINE'''] = ''' p[0] = [p[1]]'''
actions['''simple_stmt : small_stmt SEMI NEWLINE'''] = ''' p[0] = [p[1]]'''
actions['''simple_stmt : small_stmt simple_stmt_star NEWLINE'''] = ''' p[0] = [p[1]] + p[2]'''
actions['''simple_stmt : small_stmt simple_stmt_star SEMI NEWLINE'''] = ''' p[0] = [p[1]] + p[2]'''
actions['''simple_stmt_star : SEMI small_stmt'''] = ''' p[0] = [p[2]]'''
actions['''simple_stmt_star : simple_stmt_star SEMI small_stmt'''] = ''' p[0] = p[1] + [p[3]]'''
actions['''small_stmt : expr_stmt'''] = ''' p[0] = p[1]'''
actions['''small_stmt : del_stmt'''] = ''' p[0] = p[1]'''
actions['''small_stmt : pass_stmt'''] = ''' p[0] = p[1]'''
actions['''small_stmt : flow_stmt'''] = ''' p[0] = p[1]'''
actions['''small_stmt : import_stmt'''] = ''' p[0] = p[1]'''
actions['''small_stmt : global_stmt'''] = ''' p[0] = p[1]'''
actions['''small_stmt : assert_stmt'''] = ''' p[0] = p[1]'''
actions['''expr_stmt : testlist_star_expr'''] = ''' p[0] = ast.Expr(p[1], rule=inspect.currentframe().f_code.co_name)
inherit_lineno(p[0], p[1])'''
actions['''expr_stmt_star : EQUAL yield_expr'''] = ''' p[0] = [p[2]]'''
actions['''expr_stmt_star : expr_stmt_star EQUAL yield_expr'''] = ''' p[0] = p[1] + [p[3]]'''
actions['''testlist_star_expr : test'''] = ''' p[0] = p[1]'''
actions['''augassign : PLUSEQUAL'''] = ''' p[0] = ast.Add(rule=inspect.currentframe().f_code.co_name, **p[1][1])'''
actions['''augassign : MINEQUAL'''] = ''' p[0] = ast.Sub(rule=inspect.currentframe().f_code.co_name, **p[1][1])'''
actions['''augassign : STAREQUAL'''] = ''' p[0] = ast.Mult(rule=inspect.currentframe().f_code.co_name, **p[1][1])'''
actions['''augassign : SLASHEQUAL'''] = ''' p[0] = ast.Div(rule=inspect.currentframe().f_code.co_name, **p[1][1])'''
actions['''augassign : PERCENTEQUAL'''] = ''' p[0] = ast.Mod(rule=inspect.currentframe().f_code.co_name, **p[1][1])'''
actions['''augassign : AMPEREQUAL'''] = ''' p[0] = ast.BitAnd(rule=inspect.currentframe().f_code.co_name, **p[1][1])'''
actions['''augassign : VBAREQUAL'''] = ''' p[0] = ast.BitOr(rule=inspect.currentframe().f_code.co_name, **p[1][1])'''
actions['''augassign : CIRCUMFLEXEQUAL'''] = ''' p[0] = ast.BitXor(rule=inspect.currentframe().f_code.co_name, **p[1][1])'''
actions['''augassign : LEFTSHIFTEQUAL'''] = ''' p[0] = ast.LShift(rule=inspect.currentframe().f_code.co_name, **p[1][1])'''
actions['''augassign : RIGHTSHIFTEQUAL'''] = ''' p[0] = ast.RShift(rule=inspect.currentframe().f_code.co_name, **p[1][1])'''
actions['''augassign : DOUBLESTAREQUAL'''] = ''' p[0] = ast.Pow(rule=inspect.currentframe().f_code.co_name, **p[1][1])'''
actions['''augassign : DOUBLESLASHEQUAL'''] = ''' p[0] = ast.FloorDiv(rule=inspect.currentframe().f_code.co_name, **p[1][1])'''
actions['''del_stmt : DEL exprlist'''] = ''' ctx_to_store(p[2], ast.Del)
if isinstance(p[2], ast.Tuple) and not p[2].paren:
p[0] = ast.Delete(p[2].elts, rule=inspect.currentframe().f_code.co_name, **p[1][1])
else:
p[0] = ast.Delete([p[2]], rule=inspect.currentframe().f_code.co_name, **p[1][1])'''
actions['''pass_stmt : PASS'''] = ''' p[0] = ast.Pass(rule=inspect.currentframe().f_code.co_name, **p[1][1])'''
actions['''flow_stmt : break_stmt'''] = ''' p[0] = p[1]'''
actions['''flow_stmt : continue_stmt'''] = ''' p[0] = p[1]'''
actions['''flow_stmt : return_stmt'''] = ''' p[0] = p[1]'''
actions['''flow_stmt : raise_stmt'''] = ''' p[0] = p[1]'''
actions['''flow_stmt : yield_stmt'''] = ''' p[0] = ast.Expr(p[1], rule=inspect.currentframe().f_code.co_name)
inherit_lineno(p[0], p[1])'''
actions['''break_stmt : BREAK'''] = ''' p[0] = ast.Break(rule=inspect.currentframe().f_code.co_name, **p[1][1])'''
actions['''continue_stmt : CONTINUE'''] = ''' p[0] = ast.Continue(rule=inspect.currentframe().f_code.co_name, **p[1][1])'''
actions['''return_stmt : RETURN'''] = ''' p[0] = ast.Return(None, rule=inspect.currentframe().f_code.co_name, **p[1][1])'''
actions['''return_stmt : RETURN testlist'''] = ''' p[0] = ast.Return(p[2], rule=inspect.currentframe().f_code.co_name, **p[1][1])'''
actions['''yield_stmt : yield_expr'''] = ''' p[0] = p[1]'''
actions['''raise_stmt : RAISE'''] = ''' p[0] = ast.Raise(None, None, None, rule=inspect.currentframe().f_code.co_name, **p[1][1])'''
actions['''raise_stmt : RAISE test'''] = ''' p[0] = ast.Raise(p[2], None, None, rule=inspect.currentframe().f_code.co_name, **p[1][1])'''
actions['''import_stmt : import_name'''] = ''' p[0] = p[1]'''
actions['''import_stmt : import_from'''] = ''' p[0] = p[1]'''
actions['''import_name : IMPORT dotted_as_names'''] = ''' p[0] = ast.Import(p[2], rule=inspect.currentframe().f_code.co_name, **p[1][1])'''
actions['''import_from : FROM dotted_name IMPORT STAR'''] = ''' dotted = []
last = p[2]
while isinstance(last, ast.Attribute):
dotted.insert(0, last.attr)
last = last.value
dotted.insert(0, last.id)
p[0] = ast.ImportFrom(".".join(dotted), [ast.alias("*", None, rule=inspect.currentframe().f_code.co_name, **p[3][1])], 0, rule=inspect.currentframe().f_code.co_name, **p[1][1])'''
actions['''import_from : FROM dotted_name IMPORT LPAR import_as_names RPAR'''] = ''' dotted = []
last = p[2]
while isinstance(last, ast.Attribute):
dotted.insert(0, last.attr)
last = last.value
dotted.insert(0, last.id)
p[0] = ast.ImportFrom(".".join(dotted), p[5], 0, rule=inspect.currentframe().f_code.co_name, **p[1][1])'''
actions['''import_from : FROM dotted_name IMPORT import_as_names'''] = ''' dotted = []
last = p[2]
while isinstance(last, ast.Attribute):
dotted.insert(0, last.attr)
last = last.value
dotted.insert(0, last.id)
p[0] = ast.ImportFrom(".".join(dotted), p[4], 0, rule=inspect.currentframe().f_code.co_name, **p[1][1])'''
actions['''import_from : FROM import_from_plus dotted_name IMPORT STAR'''] = ''' dotted = []
last = p[3]
while isinstance(last, ast.Attribute):
dotted.insert(0, last.attr)
last = last.value
dotted.insert(0, last.id)
p[0] = ast.ImportFrom(".".join(dotted), [ast.alias("*", None, rule=inspect.currentframe().f_code.co_name, **p[4][1])], p[2], **p[1][1])'''
actions['''import_from : FROM import_from_plus dotted_name IMPORT LPAR import_as_names RPAR'''] = ''' dotted = []
last = p[3]
while isinstance(last, ast.Attribute):
dotted.insert(0, last.attr)
last = last.value
dotted.insert(0, last.id)
p[0] = ast.ImportFrom(".".join(dotted), p[6], p[2], rule=inspect.currentframe().f_code.co_name, **p[1][1])'''
actions['''import_from : FROM import_from_plus dotted_name IMPORT import_as_names'''] = ''' dotted = []
last = p[3]
while isinstance(last, ast.Attribute):
dotted.insert(0, last.attr)
last = last.value
dotted.insert(0, last.id)
p[0] = ast.ImportFrom(".".join(dotted), p[5], p[2], rule=inspect.currentframe().f_code.co_name, **p[1][1])'''
actions['''import_from : FROM import_from_plus IMPORT STAR'''] = ''' p[0] = ast.ImportFrom(None, [ast.alias("*", None, rule=inspect.currentframe().f_code.co_name, **p[3][1])], p[2], rule=inspect.currentframe().f_code.co_name, **p[1][1])'''
actions['''import_from : FROM import_from_plus IMPORT LPAR import_as_names RPAR'''] = ''' p[0] = ast.ImportFrom(None, p[5], p[2], rule=inspect.currentframe().f_code.co_name, **p[1][1])'''
actions['''import_from : FROM import_from_plus IMPORT import_as_names'''] = ''' p[0] = ast.ImportFrom(None, p[4], p[2], rule=inspect.currentframe().f_code.co_name, **p[1][1])'''
actions['''import_from_plus : DOT'''] = ''' p[0] = 1'''
actions['''import_from_plus : import_from_plus DOT'''] = ''' p[0] = p[1] + 1'''
actions['''import_as_name : NAME'''] = ''' p[0] = ast.alias(p[1][0], None, rule=inspect.currentframe().f_code.co_name, **p[1][1])'''
actions['''import_as_name : NAME AS NAME'''] = ''' p[0] = ast.alias(p[1][0], p[3][0], rule=inspect.currentframe().f_code.co_name, **p[1][1])'''
actions['''dotted_as_name : dotted_name'''] = ''' dotted = []
last = p[1]
while isinstance(last, ast.Attribute):
dotted.insert(0, last.attr)
last = last.value
dotted.insert(0, last.id)
p[0] = ast.alias(".".join(dotted), None, rule=inspect.currentframe().f_code.co_name)
inherit_lineno(p[0], p[1])'''
actions['''dotted_as_name : dotted_name AS NAME'''] = ''' dotted = []
last = p[1]
while isinstance(last, ast.Attribute):
dotted.insert(0, last.attr)
last = last.value
dotted.insert(0, last.id)
p[0] = ast.alias(".".join(dotted), p[3][0], rule=inspect.currentframe().f_code.co_name)
inherit_lineno(p[0], p[1])'''
actions['''import_as_names : import_as_name'''] = ''' p[0] = [p[1]]'''
actions['''import_as_names : import_as_name COMMA'''] = ''' p[0] = [p[1]]'''
actions['''import_as_names : import_as_name import_as_names_star'''] = ''' p[0] = [p[1]] + p[2]'''
actions['''import_as_names : import_as_name import_as_names_star COMMA'''] = ''' p[0] = [p[1]] + p[2]'''
actions['''import_as_names_star : COMMA import_as_name'''] = ''' p[0] = [p[2]]'''
actions['''import_as_names_star : import_as_names_star COMMA import_as_name'''] = ''' p[0] = p[1] + [p[3]]'''
actions['''dotted_as_names : dotted_as_name'''] = ''' p[0] = [p[1]]'''
actions['''dotted_as_names : dotted_as_name dotted_as_names_star'''] = ''' p[0] = [p[1]] + p[2]'''
actions['''dotted_as_names_star : COMMA dotted_as_name'''] = ''' p[0] = [p[2]]'''
actions['''dotted_as_names_star : dotted_as_names_star COMMA dotted_as_name'''] = ''' p[0] = p[1] + [p[3]]'''
actions['''dotted_name : NAME'''] = ''' p[0] = ast.Name(p[1][0], ast.Load(), rule=inspect.currentframe().f_code.co_name, **p[1][1])'''
actions['''dotted_name : NAME dotted_name_star'''] = ''' last = p[2]
if isinstance(last, ast.Attribute):
inherit_lineno(last, p[1][1])
while isinstance(last.value, ast.Attribute):
last = last.value
inherit_lineno(last, p[1][1])
last.value = ast.Attribute(ast.Name(p[1][0], ast.Load(), rule=inspect.currentframe().f_code.co_name, **p[1][1]), last.value, ast.Load(), rule=inspect.currentframe().f_code.co_name, **p[1][1])
p[0] = p[2]
else:
p[0] = ast.Attribute(ast.Name(p[1][0], ast.Load(), rule=inspect.currentframe().f_code.co_name, **p[1][1]), p[2], ast.Load(), rule=inspect.currentframe().f_code.co_name, **p[1][1])'''
actions['''dotted_name_star : DOT NAME'''] = ''' p[0] = p[2][0]'''
actions['''dotted_name_star : dotted_name_star DOT NAME'''] = ''' p[0] = ast.Attribute(p[1], p[3][0], ast.Load(), rule=inspect.currentframe().f_code.co_name)'''
actions['''global_stmt : GLOBAL NAME'''] = ''' p[0] = ast.Global([p[2][0]], rule=inspect.currentframe().f_code.co_name, **p[1][1])'''
actions['''global_stmt : GLOBAL NAME global_stmt_star'''] = ''' p[0] = ast.Global([p[2][0]] + p[3], rule=inspect.currentframe().f_code.co_name, **p[1][1])'''
actions['''global_stmt_star : COMMA NAME'''] = ''' p[0] = [p[2][0]]'''
actions['''global_stmt_star : global_stmt_star COMMA NAME'''] = ''' p[0] = p[1] + [p[3][0]]'''
actions['''assert_stmt : ASSERT test'''] = ''' p[0] = ast.Assert(p[2], None, rule=inspect.currentframe().f_code.co_name, **p[1][1])'''
actions['''assert_stmt : ASSERT test COMMA test'''] = ''' p[0] = ast.Assert(p[2], p[4], rule=inspect.currentframe().f_code.co_name, **p[1][1])'''
actions['''compound_stmt : if_stmt'''] = ''' p[0] = [p[1]]'''
actions['''compound_stmt : while_stmt'''] = ''' p[0] = [p[1]]'''
actions['''compound_stmt : for_stmt'''] = ''' p[0] = [p[1]]'''
actions['''compound_stmt : try_stmt'''] = ''' p[0] = [p[1]]'''
actions['''compound_stmt : with_stmt'''] = ''' p[0] = [p[1]]'''
actions['''compound_stmt : funcdef'''] = ''' p[0] = [p[1]]'''
actions['''compound_stmt : classdef'''] = ''' p[0] = [p[1]]'''
actions['''compound_stmt : decorated'''] = ''' p[0] = [p[1]]'''
actions['''if_stmt : IF test COLON suite'''] = ''' p[0] = ast.If(p[2], p[4], [], rule=inspect.currentframe().f_code.co_name, **p[1][1])'''
actions['''if_stmt : IF test COLON suite ELSE COLON suite'''] = ''' p[0] = ast.If(p[2], p[4], p[7], rule=inspect.currentframe().f_code.co_name, **p[1][1])'''
actions['''if_stmt : IF test COLON suite if_stmt_star'''] = ''' p[0] = ast.If(p[2], p[4], [p[5]], rule=inspect.currentframe().f_code.co_name, **p[1][1])'''
actions['''if_stmt : IF test COLON suite if_stmt_star ELSE COLON suite'''] = ''' last = p[5]
while len(last.orelse) > 0:
last = last.orelse[0]
last.orelse.extend(p[8])
p[0] = ast.If(p[2], p[4], [p[5]], rule=inspect.currentframe().f_code.co_name, **p[1][1])'''
actions['''if_stmt_star : ELIF test COLON suite'''] = ''' p[0] = ast.If(p[2], p[4], [], rule=inspect.currentframe().f_code.co_name)
inherit_lineno(p[0], p[2])'''
actions['''if_stmt_star : if_stmt_star ELIF test COLON suite'''] = ''' last = p[1]
while len(last.orelse) > 0:
last = last.orelse[0]
last.orelse.append(ast.If(p[3], p[5], [], rule=inspect.currentframe().f_code.co_name))
inherit_lineno(last.orelse[-1], p[3])
p[0] = p[1]'''
actions['''while_stmt : WHILE test COLON suite'''] = ''' p[0] = ast.While(p[2], p[4], [], rule=inspect.currentframe().f_code.co_name, **p[1][1])'''
actions['''while_stmt : WHILE test COLON suite ELSE COLON suite'''] = ''' p[0] = ast.While(p[2], p[4], p[7], rule=inspect.currentframe().f_code.co_name, **p[1][1])'''
actions['''for_stmt : FOR exprlist IN testlist COLON suite'''] = ''' ctx_to_store(p[2])
p[0] = ast.For(p[2], p[4], p[6], [], rule=inspect.currentframe().f_code.co_name, **p[1][1])'''
actions['''for_stmt : FOR exprlist IN testlist COLON suite ELSE COLON suite'''] = ''' ctx_to_store(p[2])
p[0] = ast.For(p[2], p[4], p[6], p[9], rule=inspect.currentframe().f_code.co_name, **p[1][1])'''
actions['''try_stmt : TRY COLON suite try_stmt_plus'''] = ''' p[0] = ast.TryExcept(p[3], p[4], [], rule=inspect.currentframe().f_code.co_name, **p[1][1])'''
actions['''try_stmt : TRY COLON suite try_stmt_plus FINALLY COLON suite'''] = ''' p[0] = ast.TryFinally([ast.TryExcept(p[3], p[4], [], rule=inspect.currentframe().f_code.co_name, **p[1][1])], p[7], rule=inspect.currentframe().f_code.co_name, **p[1][1])'''
actions['''try_stmt : TRY COLON suite try_stmt_plus ELSE COLON suite'''] = ''' p[0] = ast.TryExcept(p[3], p[4], p[7], rule=inspect.currentframe().f_code.co_name, **p[1][1])'''
actions['''try_stmt : TRY COLON suite try_stmt_plus ELSE COLON suite FINALLY COLON suite'''] = ''' p[0] = ast.TryFinally([ast.TryExcept(p[3], p[4], p[7], rule=inspect.currentframe().f_code.co_name, **p[1][1])], p[10], rule=inspect.currentframe().f_code.co_name, **p[1][1])'''
actions['''try_stmt : TRY COLON suite FINALLY COLON suite'''] = ''' p[0] = ast.TryFinally(p[3], p[6], rule=inspect.currentframe().f_code.co_name, **p[1][1])'''
actions['''try_stmt_plus : except_clause COLON suite'''] = ''' p[1].body = p[3]
p[0] = [p[1]]'''
actions['''try_stmt_plus : try_stmt_plus except_clause COLON suite'''] = ''' p[2].body = p[4]
p[0] = p[1] + [p[2]]'''
actions['''with_stmt : WITH with_item COLON suite'''] = ''' p[2].body = p[4]
p[0] = p[2]'''
actions['''with_stmt : WITH with_item with_stmt_star COLON suite'''] = ''' p[2].body.append(p[3])
last = p[2]
while len(last.body) > 0:
last = last.body[0]
last.body = p[5]
p[0] = p[2]'''
actions['''with_stmt_star : COMMA with_item'''] = ''' p[0] = p[2]'''
actions['''with_stmt_star : with_stmt_star COMMA with_item'''] = ''' last = p[1]
while len(last.body) > 0:
last = last.body[0]
last.body.append(p[3])
p[0] = p[1]'''
actions['''with_item : test'''] = ''' p[0] = ast.With(p[1], None, [], rule=inspect.currentframe().f_code.co_name)
inherit_lineno(p[0], p[1])'''
actions['''with_item : test AS expr'''] = ''' ctx_to_store(p[3])
p[0] = ast.With(p[1], p[3], [], rule=inspect.currentframe().f_code.co_name)
inherit_lineno(p[0], p[1])'''
actions['''except_clause : EXCEPT'''] = ''' p[0] = ast.ExceptHandler(None, None, [], rule=inspect.currentframe().f_code.co_name, **p[1][1])'''
actions['''except_clause : EXCEPT test'''] = ''' p[0] = ast.ExceptHandler(p[2], None, [], rule=inspect.currentframe().f_code.co_name, **p[1][1])'''
actions['''suite : simple_stmt'''] = ''' p[0] = p[1]'''
actions['''suite : NEWLINE INDENT suite_plus DEDENT'''] = ''' p[0] = p[3]'''
actions['''suite_plus : stmt'''] = ''' p[0] = p[1]'''
actions['''suite_plus : suite_plus stmt'''] = ''' p[0] = p[1] + p[2]'''
actions['''test : or_test'''] = ''' p[0] = p[1]'''
actions['''test : or_test IF or_test ELSE test'''] = ''' p[0] = ast.IfExp(p[3], p[1], p[5], rule=inspect.currentframe().f_code.co_name)
inherit_lineno(p[0], p[1])'''
actions['''test : lambdef'''] = ''' p[0] = p[1]'''
actions['''lambdef : LAMBDA COLON test'''] = ''' p[0] = ast.Lambda(ast.arguments([], None, None, [], rule=inspect.currentframe().f_code.co_name, **p[2][1]), p[3], rule=inspect.currentframe().f_code.co_name, **p[1][1])'''
actions['''lambdef : LAMBDA varargslist COLON test'''] = ''' p[0] = ast.Lambda(p[2], p[4], rule=inspect.currentframe().f_code.co_name, **p[1][1])'''
actions['''or_test : and_test'''] = ''' p[0] = p[1]'''
actions['''or_test : and_test or_test_star'''] = ''' theor = ast.Or(rule=inspect.currentframe().f_code.co_name)
inherit_lineno(theor, p[2][0])
p[0] = ast.BoolOp(theor, [p[1]] + p[2], rule=inspect.currentframe().f_code.co_name)
inherit_lineno(p[0], p[1])'''
actions['''or_test_star : OR and_test'''] = ''' p[0] = [p[2]]'''
actions['''or_test_star : or_test_star OR and_test'''] = ''' p[0] = p[1] + [p[3]]'''
actions['''and_test : not_test'''] = ''' p[0] = p[1]'''
actions['''and_test : not_test and_test_star'''] = ''' theand = ast.And(rule=inspect.currentframe().f_code.co_name)
inherit_lineno(theand, p[2][0])
p[0] = ast.BoolOp(theand, [p[1]] + p[2], rule=inspect.currentframe().f_code.co_name)
inherit_lineno(p[0], p[1])'''
actions['''and_test_star : AND not_test'''] = ''' p[0] = [p[2]]'''
actions['''and_test_star : and_test_star AND not_test'''] = ''' p[0] = p[1] + [p[3]]'''
actions['''not_test : NOT not_test'''] = ''' thenot = ast.Not(rule=inspect.currentframe().f_code.co_name)
inherit_lineno(thenot, p[2])
p[0] = ast.UnaryOp(thenot, p[2], rule=inspect.currentframe().f_code.co_name, **p[1][1])'''
actions['''not_test : comparison'''] = ''' p[0] = p[1]'''
actions['''comparison : expr'''] = ''' p[0] = p[1]'''
actions['''comparison : expr comparison_star'''] = ''' ops, exprs = p[2]
p[0] = ast.Compare(p[1], ops, exprs, rule=inspect.currentframe().f_code.co_name)
inherit_lineno(p[0], p[1])'''
actions['''comparison_star : comp_op expr'''] = ''' inherit_lineno(p[1], p[2])
p[0] = ([p[1]], [p[2]])'''
actions['''comparison_star : comparison_star comp_op expr'''] = ''' ops, exprs = p[1]
inherit_lineno(p[2], p[3])
p[0] = (ops + [p[2]], exprs + [p[3]])'''
actions['''comp_op : LESS'''] = ''' p[0] = ast.Lt(rule=inspect.currentframe().f_code.co_name)'''
actions['''comp_op : GREATER'''] = ''' p[0] = ast.Gt(rule=inspect.currentframe().f_code.co_name)'''
actions['''comp_op : EQEQUAL'''] = ''' p[0] = ast.Eq(rule=inspect.currentframe().f_code.co_name)'''
actions['''comp_op : GREATEREQUAL'''] = ''' p[0] = ast.GtE(rule=inspect.currentframe().f_code.co_name)'''
actions['''comp_op : LESSEQUAL'''] = ''' p[0] = ast.LtE(rule=inspect.currentframe().f_code.co_name)'''
actions['''comp_op : NOTEQUAL'''] = ''' p[0] = ast.NotEq(rule=inspect.currentframe().f_code.co_name)'''
actions['''comp_op : IN'''] = ''' p[0] = ast.In(rule=inspect.currentframe().f_code.co_name)'''
actions['''comp_op : NOT IN'''] = ''' p[0] = ast.NotIn(rule=inspect.currentframe().f_code.co_name)'''
actions['''comp_op : IS'''] = ''' p[0] = ast.Is(rule=inspect.currentframe().f_code.co_name)'''
actions['''comp_op : IS NOT'''] = ''' p[0] = ast.IsNot(rule=inspect.currentframe().f_code.co_name)'''
actions['''expr : xor_expr'''] = ''' p[0] = p[1]'''
actions['''expr : xor_expr expr_star'''] = ''' p[0] = unwrap_left_associative([p[1]] + p[2], rule=inspect.currentframe().f_code.co_name, alt=len(p[2]) > 2)'''
actions['''expr_star : VBAR xor_expr'''] = ''' p[0] = [ast.BitOr(rule=inspect.currentframe().f_code.co_name, **p[1][1]), p[2]]'''
actions['''expr_star : expr_star VBAR xor_expr'''] = ''' p[0] = p[1] + [ast.BitOr(rule=inspect.currentframe().f_code.co_name, **p[2][1]), p[3]]'''
actions['''xor_expr : and_expr'''] = ''' p[0] = p[1]'''
actions['''xor_expr : and_expr xor_expr_star'''] = ''' p[0] = unwrap_left_associative([p[1]] + p[2], rule=inspect.currentframe().f_code.co_name, alt=len(p[2]) > 2)'''
actions['''xor_expr_star : CIRCUMFLEX and_expr'''] = ''' p[0] = [ast.BitXor(rule=inspect.currentframe().f_code.co_name, **p[1][1]), p[2]]'''
actions['''xor_expr_star : xor_expr_star CIRCUMFLEX and_expr'''] = ''' p[0] = p[1] + [ast.BitXor(rule=inspect.currentframe().f_code.co_name, **p[2][1]), p[3]]'''
actions['''and_expr : shift_expr'''] = ''' p[0] = p[1]'''
actions['''and_expr : shift_expr and_expr_star'''] = ''' p[0] = unwrap_left_associative([p[1]] + p[2], rule=inspect.currentframe().f_code.co_name, alt=len(p[2]) > 0)'''
actions['''and_expr_star : AMPER shift_expr'''] = ''' p[0] = [ast.BitAnd(rule=inspect.currentframe().f_code.co_name, **p[1][1]), p[2]]'''
actions['''and_expr_star : and_expr_star AMPER shift_expr'''] = ''' p[0] = p[1] + [ast.BitAnd(rule=inspect.currentframe().f_code.co_name, **p[2][1]), p[3]]'''
actions['''shift_expr : arith_expr'''] = ''' p[0] = p[1]'''
actions['''shift_expr : arith_expr shift_expr_star'''] = ''' p[0] = unwrap_left_associative([p[1]] + p[2], rule=inspect.currentframe().f_code.co_name, alt=len(p[2]) > 2)'''
actions['''shift_expr_star : LEFTSHIFT arith_expr'''] = ''' p[0] = [ast.LShift(rule=inspect.currentframe().f_code.co_name, **p[1][1]), p[2]]'''
actions['''shift_expr_star : RIGHTSHIFT arith_expr'''] = ''' p[0] = [ast.RShift(rule=inspect.currentframe().f_code.co_name, **p[1][1]), p[2]]'''
actions['''shift_expr_star : shift_expr_star LEFTSHIFT arith_expr'''] = ''' p[0] = p[1] + [ast.LShift(rule=inspect.currentframe().f_code.co_name, **p[2][1]), p[3]]'''
actions['''shift_expr_star : shift_expr_star RIGHTSHIFT arith_expr'''] = ''' p[0] = p[1] + [ast.RShift(rule=inspect.currentframe().f_code.co_name, **p[2][1]), p[3]]'''
actions['''arith_expr : term'''] = ''' p[0] = p[1]'''
actions['''arith_expr : term arith_expr_star'''] = ''' p[0] = unwrap_left_associative([p[1]] + p[2], rule=inspect.currentframe().f_code.co_name, alt=len(p[2]) > 2)'''
actions['''arith_expr_star : PLUS term'''] = ''' p[0] = [ast.Add(rule=inspect.currentframe().f_code.co_name, **p[1][1]), p[2]]'''
actions['''arith_expr_star : MINUS term'''] = ''' p[0] = [ast.Sub(rule=inspect.currentframe().f_code.co_name, **p[1][1]), p[2]]'''
actions['''arith_expr_star : arith_expr_star PLUS term'''] = ''' p[0] = p[1] + [ast.Add(rule=inspect.currentframe().f_code.co_name, **p[2][1]), p[3]]'''
actions['''arith_expr_star : arith_expr_star MINUS term'''] = ''' p[0] = p[1] + [ast.Sub(rule=inspect.currentframe().f_code.co_name, **p[2][1]), p[3]]'''
actions['''term : factor'''] = ''' p[0] = p[1]'''
actions['''term : factor term_star'''] = ''' p[0] = unwrap_left_associative([p[1]] + p[2], rule=inspect.currentframe().f_code.co_name, alt=len(p[2]) > 2)'''
actions['''term_star : STAR factor'''] = ''' p[0] = [ast.Mult(rule=inspect.currentframe().f_code.co_name, **p[1][1]), p[2]]'''
actions['''term_star : SLASH factor'''] = ''' p[0] = [ast.Div(rule=inspect.currentframe().f_code.co_name, **p[1][1]), p[2]]'''
actions['''term_star : PERCENT factor'''] = ''' p[0] = [ast.Mod(rule=inspect.currentframe().f_code.co_name, **p[1][1]), p[2]]'''
actions['''term_star : DOUBLESLASH factor'''] = ''' p[0] = [ast.FloorDiv(rule=inspect.currentframe().f_code.co_name, **p[1][1]), p[2]]'''
actions['''term_star : term_star STAR factor'''] = ''' p[0] = p[1] + [ast.Mult(rule=inspect.currentframe().f_code.co_name, **p[2][1]), p[3]]'''
actions['''term_star : term_star SLASH factor'''] = ''' p[0] = p[1] + [ast.Div(rule=inspect.currentframe().f_code.co_name, **p[2][1]), p[3]]'''
actions['''term_star : term_star PERCENT factor'''] = ''' p[0] = p[1] + [ast.Mod(rule=inspect.currentframe().f_code.co_name, **p[2][1]), p[3]]'''
actions['''term_star : term_star DOUBLESLASH factor'''] = ''' p[0] = p[1] + [ast.FloorDiv(rule=inspect.currentframe().f_code.co_name, **p[2][1]), p[3]]'''
actions['''factor : PLUS factor'''] = ''' op = ast.UAdd(rule=inspect.currentframe().f_code.co_name, **p[1][1])
p[0] = ast.UnaryOp(op, p[2], rule=inspect.currentframe().f_code.co_name)
inherit_lineno(p[0], op)'''
actions['''factor : MINUS factor'''] = ''' op = ast.USub(rule=inspect.currentframe().f_code.co_name, **p[1][1])
p[0] = ast.UnaryOp(op, p[2], rule=inspect.currentframe().f_code.co_name)
inherit_lineno(p[0], op)'''
actions['''factor : TILDE factor'''] = ''' op = ast.Invert(rule=inspect.currentframe().f_code.co_name, **p[1][1])
p[0] = ast.UnaryOp(op, p[2], rule=inspect.currentframe().f_code.co_name)
inherit_lineno(p[0], op)'''
actions['''factor : power'''] = ''' p[0] = p[1]'''
actions['''power : atom_expr'''] = ''' p[0] = p[1]'''
actions['''atom_expr : atom'''] = ''' p[0] = p[1]'''
actions['''atom : LPAR RPAR'''] = ''' p[0] = ast.Tuple([], ast.Load(), rule=inspect.currentframe().f_code.co_name, paren=True, **p[1][1])'''
actions['''atom : LPAR yield_expr RPAR'''] = ''' p[0] = p[2]
if isinstance(p[0], ast.Tuple):
p[0].paren = True
p[0].alt = p[1][1]'''
actions['''atom : LPAR testlist_comp RPAR'''] = ''' p[0] = p[2]
if isinstance(p[0], ast.Tuple):
p[0].paren = True
p[0].alt = p[1][1]'''
actions['''atom : LSQB RSQB'''] = ''' p[0] = ast.List([], ast.Load(), rule=inspect.currentframe().f_code.co_name, **p[1][1])'''
actions['''atom : LBRACE RBRACE'''] = ''' p[0] = ast.Dict([], [], rule=inspect.currentframe().f_code.co_name, **p[1][1])'''
actions['''atom : LBRACE dictorsetmaker RBRACE'''] = ''' if isinstance(p[2], (ast.SetComp, ast.DictComp)):
p[0] = p[2]
p[0].alt = p[1][1]
else:
keys, values = p[2]
if keys is None:
p[0] = ast.Set(values, rule=inspect.currentframe().f_code.co_name, **p[1][1])
else:
p[0] = ast.Dict(keys, values, rule=inspect.currentframe().f_code.co_name, **p[1][1])'''
actions['''atom : NAME'''] = ''' p[0] = ast.Name(p[1][0], ast.Load(), rule=inspect.currentframe().f_code.co_name, **p[1][1])'''
actions['''atom : NUMBER'''] = ''' p[0] = ast.Num(p[1][0], rule=inspect.currentframe().f_code.co_name, **p[1][2])'''
actions['''atom : atom_plus'''] = ''' p[0] = p[1]'''
actions['''atom_plus : STRING'''] = ''' p[0] = ast.Str(p[1][0], rule=inspect.currentframe().f_code.co_name, **p[1][1])'''
actions['''atom_plus : atom_plus STRING'''] = ''' p[1].s = p[1].s + p[2][0]
p[0] = p[1]'''
actions['''testlist_comp : test comp_for'''] = ''' p[0] = ast.GeneratorExp(p[1], p[2], rule=inspect.currentframe().f_code.co_name)
inherit_lineno(p[0], p[1])'''
actions['''testlist_comp : test'''] = ''' p[0] = p[1]'''
actions['''testlist_comp : test COMMA'''] = ''' p[0] = ast.Tuple([p[1]], ast.Load(), rule=inspect.currentframe().f_code.co_name, paren=False)
inherit_lineno(p[0], p[1])'''
actions['''testlist_comp : test testlist_comp_star'''] = ''' p[0] = ast.Tuple([p[1]] + p[2], ast.Load(), rule=inspect.currentframe().f_code.co_name, paren=False)
inherit_lineno(p[0], p[1])'''
actions['''testlist_comp : test testlist_comp_star COMMA'''] = ''' p[0] = ast.Tuple([p[1]] + p[2], ast.Load(), rule=inspect.currentframe().f_code.co_name, paren=False)
inherit_lineno(p[0], p[1])'''
actions['''testlist_comp_star : COMMA test'''] = ''' p[0] = [p[2]]'''
actions['''testlist_comp_star : testlist_comp_star COMMA test'''] = ''' p[0] = p[1] + [p[3]]'''
actions['''trailer : LPAR RPAR'''] = ''' p[0] = ast.Call(None, [], [], None, None, rule=inspect.currentframe().f_code.co_name)'''
actions['''trailer : LPAR arglist RPAR'''] = ''' p[0] = p[2]'''
actions['''trailer : LSQB subscriptlist RSQB'''] = ''' p[0] = ast.Subscript(None, p[2], ast.Load(), rule=inspect.currentframe().f_code.co_name)'''
actions['''trailer : DOT NAME'''] = ''' p[0] = ast.Attribute(None, p[2][0], ast.Load(), rule=inspect.currentframe().f_code.co_name)'''
actions['''subscriptlist : subscript'''] = ''' p[0] = p[1]'''
actions['''subscriptlist : subscript COMMA'''] = ''' if isinstance(p[1], ast.Index):
tup = ast.Tuple([p[1].value], ast.Load(), rule=inspect.currentframe().f_code.co_name, paren=False)
inherit_lineno(tup, p[1].value)
p[0] = ast.Index(tup, rule=inspect.currentframe().f_code.co_name)
inherit_lineno(p[0], tup)
else:
p[0] = ast.ExtSlice([p[1]], rule=inspect.currentframe().f_code.co_name)
inherit_lineno(p[0], p[1])'''
actions['''subscriptlist : subscript subscriptlist_star'''] = ''' args = [p[1]] + p[2]
if all(isinstance(x, ast.Index) for x in args):
tup = ast.Tuple([x.value for x in args], ast.Load(), rule=inspect.currentframe().f_code.co_name, paren=False)
inherit_lineno(tup, args[0].value)
p[0] = ast.Index(tup, rule=inspect.currentframe().f_code.co_name)
inherit_lineno(p[0], tup)
else:
p[0] = ast.ExtSlice(args, rule=inspect.currentframe().f_code.co_name)
inherit_lineno(p[0], p[1])'''
actions['''subscriptlist : subscript subscriptlist_star COMMA'''] = ''' args = [p[1]] + p[2]
if all(isinstance(x, ast.Index) for x in args):
tup = ast.Tuple([x.value for x in args], ast.Load(), rule=inspect.currentframe().f_code.co_name, paren=False)
inherit_lineno(tup, args[0].value)
p[0] = ast.Index(tup, rule=inspect.currentframe().f_code.co_name)
inherit_lineno(p[0], tup)
else:
p[0] = ast.ExtSlice(args, rule=inspect.currentframe().f_code.co_name)
inherit_lineno(p[0], p[1])'''
actions['''subscriptlist_star : COMMA subscript'''] = ''' p[0] = [p[2]]'''
actions['''subscriptlist_star : subscriptlist_star COMMA subscript'''] = ''' p[0] = p[1] + [p[3]]'''
actions['''subscript : test'''] = ''' p[0] = ast.Index(p[1], rule=inspect.currentframe().f_code.co_name)
inherit_lineno(p[0], p[1])'''
actions['''subscript : COLON'''] = ''' p[0] = ast.Slice(None, None, None, rule=inspect.currentframe().f_code.co_name, **p[1][1])'''
actions['''subscript : COLON sliceop'''] = ''' p[0] = ast.Slice(None, None, p[2], rule=inspect.currentframe().f_code.co_name, **p[1][1])'''
actions['''subscript : COLON test'''] = ''' p[0] = ast.Slice(None, p[2], None, rule=inspect.currentframe().f_code.co_name, **p[1][1])'''
actions['''subscript : COLON test sliceop'''] = ''' p[0] = ast.Slice(None, p[2], p[3], rule=inspect.currentframe().f_code.co_name, **p[1][1])'''
actions['''subscript : test COLON'''] = ''' p[0] = ast.Slice(p[1], None, None, rule=inspect.currentframe().f_code.co_name)
inherit_lineno(p[0], p[1])'''
actions['''subscript : test COLON sliceop'''] = ''' p[0] = ast.Slice(p[1], None, p[3], rule=inspect.currentframe().f_code.co_name)
inherit_lineno(p[0], p[1])'''
actions['''subscript : test COLON test'''] = ''' p[0] = ast.Slice(p[1], p[3], None, rule=inspect.currentframe().f_code.co_name)
inherit_lineno(p[0], p[1])'''
actions['''subscript : test COLON test sliceop'''] = ''' p[0] = ast.Slice(p[1], p[3], p[4], rule=inspect.currentframe().f_code.co_name)
inherit_lineno(p[0], p[1])'''
actions['''sliceop : COLON'''] = ''' p[0] = ast.Name("None", ast.Load(), rule=inspect.currentframe().f_code.co_name, **p[1][1])'''
actions['''sliceop : COLON test'''] = ''' p[0] = p[2]'''
actions['''exprlist : expr'''] = ''' p[0] = p[1]'''
actions['''exprlist : expr COMMA'''] = ''' p[0] = ast.Tuple([p[1]], ast.Load(), rule=inspect.currentframe().f_code.co_name, paren=False)
inherit_lineno(p[0], p[1])'''
actions['''exprlist : expr exprlist_star'''] = ''' p[0] = ast.Tuple([p[1]] + p[2], ast.Load(), rule=inspect.currentframe().f_code.co_name, paren=False)
inherit_lineno(p[0], p[1])'''
actions['''exprlist : expr exprlist_star COMMA'''] = ''' p[0] = ast.Tuple([p[1]] + p[2], ast.Load(), rule=inspect.currentframe().f_code.co_name, paren=False)
inherit_lineno(p[0], p[1])'''
actions['''exprlist_star : COMMA expr'''] = ''' p[0] = [p[2]]'''
actions['''exprlist_star : exprlist_star COMMA expr'''] = ''' p[0] = p[1] + [p[3]]'''
actions['''testlist : test'''] = ''' p[0] = p[1]'''
actions['''testlist : test COMMA'''] = ''' p[0] = ast.Tuple([p[1]], ast.Load(), rule=inspect.currentframe().f_code.co_name, paren=False)
inherit_lineno(p[0], p[1])'''
actions['''testlist : test testlist_star'''] = ''' p[0] = ast.Tuple([p[1]] + p[2], ast.Load(), rule=inspect.currentframe().f_code.co_name, paren=False)
inherit_lineno(p[0], p[1])'''
actions['''testlist : test testlist_star COMMA'''] = ''' p[0] = ast.Tuple([p[1]] + p[2], ast.Load(), rule=inspect.currentframe().f_code.co_name, paren=False)
inherit_lineno(p[0], p[1])'''
actions['''testlist_star : COMMA test'''] = ''' p[0] = [p[2]]'''
actions['''testlist_star : testlist_star COMMA test'''] = ''' p[0] = p[1] + [p[3]]'''
actions['''dictorsetmaker : test COLON test comp_for'''] = ''' p[0] = ast.DictComp(p[1], p[3], p[4], rule=inspect.currentframe().f_code.co_name)
inherit_lineno(p[0], p[1])'''
actions['''dictorsetmaker : test COLON test'''] = ''' p[0] = ([p[1]], [p[3]])'''
actions['''dictorsetmaker : test COLON test COMMA'''] = ''' p[0] = ([p[1]], [p[3]])'''
actions['''dictorsetmaker : test COLON test dictorsetmaker_star'''] = ''' keys, values = p[4]
p[0] = ([p[1]] + keys, [p[3]] + values)'''
actions['''dictorsetmaker : test COLON test dictorsetmaker_star COMMA'''] = ''' keys, values = p[4]
p[0] = ([p[1]] + keys, [p[3]] + values)'''
actions['''dictorsetmaker : test comp_for'''] = ''' p[0] = ast.SetComp(p[1], p[2], rule=inspect.currentframe().f_code.co_name)
inherit_lineno(p[0], p[1])'''
actions['''dictorsetmaker : test'''] = ''' p[0] = (None, [p[1]])'''
actions['''dictorsetmaker : test COMMA'''] = ''' p[0] = (None, [p[1]])'''
actions['''dictorsetmaker_star : COMMA test COLON test'''] = ''' p[0] = ([p[2]], [p[4]])'''
actions['''dictorsetmaker_star : dictorsetmaker_star COMMA test COLON test'''] = ''' keys, values = p[1]
p[0] = (keys + [p[3]], values + [p[5]])'''
actions['''classdef : CLASS NAME COLON suite'''] = ''' p[0] = ast.ClassDef(p[2][0], [], p[4], [], rule=inspect.currentframe().f_code.co_name, **p[1][1])'''
actions['''classdef : CLASS NAME LPAR RPAR COLON suite'''] = ''' p[0] = ast.ClassDef(p[2][0], [], p[6], [], rule=inspect.currentframe().f_code.co_name, **p[1][1])'''
actions['''arglist : argument'''] = ''' if notkeyword(p[1]):
p[0] = ast.Call(None, [p[1]], [], None, None, rule=inspect.currentframe().f_code.co_name)
else:
p[0] = ast.Call(None, [], [p[1]], None, None, rule=inspect.currentframe().f_code.co_name)'''
actions['''arglist : argument COMMA'''] = ''' if notkeyword(p[1]):
p[0] = ast.Call(None, [p[1]], [], None, None, rule=inspect.currentframe().f_code.co_name)
else:
p[0] = ast.Call(None, [], [p[1]], None, None, rule=inspect.currentframe().f_code.co_name)'''
actions['''arglist_star : COMMA argument'''] = ''' p[0] = [p[2]]'''
actions['''arglist_star : arglist_star COMMA argument'''] = ''' p[0] = p[1] + [p[3]]'''
actions['''argument : test'''] = ''' p[0] = p[1]'''
actions['''argument : test comp_for'''] = ''' p[0] = ast.GeneratorExp(p[1], p[2], rule=inspect.currentframe().f_code.co_name)
inherit_lineno(p[0], p[1])'''
actions['''argument : test EQUAL test'''] = ''' p[0] = ast.keyword(p[1].id, p[3], rule=inspect.currentframe().f_code.co_name)
inherit_lineno(p[0], p[1])'''
actions['''comp_iter : comp_for'''] = ''' p[0] = ([], p[1])'''
actions['''comp_iter : comp_if'''] = ''' p[0] = p[1]'''
actions['''comp_for : FOR exprlist IN or_test'''] = ''' ctx_to_store(p[2])
p[0] = [ast.comprehension(p[2], p[4], [], rule=inspect.currentframe().f_code.co_name, **p[1][1])]'''
actions['''comp_for : FOR exprlist IN or_test comp_iter'''] = ''' ctx_to_store(p[2])
ifs, iters = p[5]
p[0] = [ast.comprehension(p[2], p[4], ifs, rule=inspect.currentframe().f_code.co_name, **p[1][1])] + iters'''
actions['''encoding_decl : NAME'''] = ''' p[0] = p[1]'''
actions['''yield_expr : YIELD'''] = ''' p[0] = ast.Yield(None, rule=inspect.currentframe().f_code.co_name, **p[1][1])'''
| 82.404612
| 278
| 0.591956
| 6,121
| 39,307
| 3.644176
| 0.039863
| 0.030844
| 0.190756
| 0.19905
| 0.887071
| 0.854927
| 0.809917
| 0.750605
| 0.692011
| 0.661078
| 0
| 0.033114
| 0.142595
| 39,307
| 476
| 279
| 82.577731
| 0.628746
| 0.000509
| 0
| 0.295359
| 0
| 0.352321
| 0.820801
| 0.243013
| 0
| 0
| 0
| 0
| 0.006329
| 1
| 0
| false
| 0.004219
| 0.061181
| 0
| 0.061181
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
022e6d868c484ea0939194676e5e96b394a879b4
| 187
|
py
|
Python
|
python/tHome/util/process/__init__.py
|
ZigmundRat/T-Home
|
5dc8689f52d87dac890051e540b338b009293ced
|
[
"BSD-2-Clause"
] | 18
|
2016-04-17T19:39:28.000Z
|
2020-11-19T06:55:20.000Z
|
python/tHome/util/process/__init__.py
|
ZigmundRat/T-Home
|
5dc8689f52d87dac890051e540b338b009293ced
|
[
"BSD-2-Clause"
] | 11
|
2018-09-07T18:34:41.000Z
|
2021-05-02T04:44:54.000Z
|
python/tHome/util/process/__init__.py
|
ZigmundRat/T-Home
|
5dc8689f52d87dac890051e540b338b009293ced
|
[
"BSD-2-Clause"
] | 12
|
2016-10-31T12:29:08.000Z
|
2021-12-28T12:18:28.000Z
|
#=============================================================================
from .simple import simple
#=============================================================================
| 31.166667
| 78
| 0.117647
| 4
| 187
| 5.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.042781
| 187
| 5
| 79
| 37.4
| 0.122905
| 0.823529
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
0230f7d9bb5344b5b927a608f2e8627efdfc04c0
| 132
|
py
|
Python
|
test/fixtures/python/corpus/future_import_statement.A.py
|
matsubara0507/semantic
|
67899f701abc0f1f0cb4374d8d3c249afc33a272
|
[
"MIT"
] | 8,844
|
2019-05-31T15:47:12.000Z
|
2022-03-31T18:33:51.000Z
|
test/fixtures/python/corpus/future_import_statement.A.py
|
matsubara0507/semantic
|
67899f701abc0f1f0cb4374d8d3c249afc33a272
|
[
"MIT"
] | 401
|
2019-05-31T18:30:26.000Z
|
2022-03-31T16:32:29.000Z
|
test/fixtures/python/corpus/future_import_statement.A.py
|
matsubara0507/semantic
|
67899f701abc0f1f0cb4374d8d3c249afc33a272
|
[
"MIT"
] | 504
|
2019-05-31T17:55:03.000Z
|
2022-03-30T04:15:04.000Z
|
from __future__ import print_function
from __future__ import unicode_literals, division
from __future__ import print_function as pf
| 33
| 49
| 0.878788
| 18
| 132
| 5.611111
| 0.555556
| 0.29703
| 0.475248
| 0.415842
| 0.574257
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.113636
| 132
| 3
| 50
| 44
| 0.863248
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0.666667
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
|
0
| 9
|
5a09ba4563846e0bd24f76bdcefc47f2b88476de
| 252
|
py
|
Python
|
dining_visualizer/views.py
|
NotDachun/hows-twitter
|
6f56507d9f62e7fc9e538d69215b2f0df6d334bf
|
[
"MIT"
] | null | null | null |
dining_visualizer/views.py
|
NotDachun/hows-twitter
|
6f56507d9f62e7fc9e538d69215b2f0df6d334bf
|
[
"MIT"
] | 3
|
2020-02-11T23:22:09.000Z
|
2021-06-10T20:55:34.000Z
|
dining_visualizer/views.py
|
NotDachun/hows-twitter
|
6f56507d9f62e7fc9e538d69215b2f0df6d334bf
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
from django.http import HttpResponse
def home(request):
return render(request, 'dining_visualizer/firstPage.html')
def visualization(request):
return render(request, 'dining_visualizer/visualization.html')
| 28
| 66
| 0.801587
| 30
| 252
| 6.666667
| 0.533333
| 0.1
| 0.19
| 0.26
| 0.42
| 0.42
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 252
| 8
| 67
| 31.5
| 0.892857
| 0
| 0
| 0
| 0
| 0
| 0.269841
| 0.269841
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
5a560bcbc71b563b3e915a22d7a803f5e3957fbc
| 209
|
py
|
Python
|
sources/exceptions.py
|
plasticruler/newshound
|
c97ef09165eabb27ac65682e4893cf72dae7f3fb
|
[
"Apache-2.0"
] | null | null | null |
sources/exceptions.py
|
plasticruler/newshound
|
c97ef09165eabb27ac65682e4893cf72dae7f3fb
|
[
"Apache-2.0"
] | null | null | null |
sources/exceptions.py
|
plasticruler/newshound
|
c97ef09165eabb27ac65682e4893cf72dae7f3fb
|
[
"Apache-2.0"
] | null | null | null |
class InvalidAPIKey(Exception):
def __init__(self, provider):
self.provider = provider
class APIKeyMissing(Exception):
def __init__(self, provider):
self.provider = provider
| 20.9
| 33
| 0.669856
| 20
| 209
| 6.6
| 0.4
| 0.363636
| 0.242424
| 0.30303
| 0.727273
| 0.727273
| 0.727273
| 0.727273
| 0
| 0
| 0
| 0
| 0.244019
| 209
| 9
| 34
| 23.222222
| 0.835443
| 0
| 0
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
5a76358bc991a0e095a3168899b98d1d4edd8b09
| 4,950
|
py
|
Python
|
django_sso_app/core/apps/status/tests/test_backend.py
|
paiuolo/django-sso-app
|
75b96c669dc0b176dc77e08f018a3e97d259f636
|
[
"MIT"
] | 1
|
2021-11-16T15:16:08.000Z
|
2021-11-16T15:16:08.000Z
|
django_sso_app/core/apps/status/tests/test_backend.py
|
paiuolo/django-sso-app
|
75b96c669dc0b176dc77e08f018a3e97d259f636
|
[
"MIT"
] | null | null | null |
django_sso_app/core/apps/status/tests/test_backend.py
|
paiuolo/django-sso-app
|
75b96c669dc0b176dc77e08f018a3e97d259f636
|
[
"MIT"
] | null | null | null |
from django.contrib.auth import get_user_model
from django.urls import reverse
from django.core import mail
from rest_framework import status
from allauth.account.models import EmailConfirmationHMAC, EmailAddress
from django_sso_app.core.tests.factories import UserTestCase
from django_sso_app.core.apps.profiles.models import Profile
User = get_user_model()
class TestBackend(UserTestCase):
def test_can_login_by_apigateway_header(self):
with self.settings(DJANGO_SSO_APP_SHAPE='backend_only_apigateway'):
new_pass = self._get_random_pass()
new_user = self._get_new_user(password=new_pass)
profile_url = reverse('django_sso_app_profile:rest-detail', args=(new_user.sso_id,))
user_device = self._get_user_device(new_user)
client = self._get_client()
client.cookies = self._get_jwt_cookie(user_device)
response = client.get(
profile_url,
content_type='application/json',
HTTP_X_CONSUMER_CUSTOM_ID=new_user.sso_app_profile.sso_id
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('sso_id'), new_user.sso_id, 'sso_id differs from header')
self.assertEqual(Profile.objects.filter(sso_id=new_user.sso_id).count(), 1, 'no new user created')
def test_redirect_to_profile_complete_if_profile_is_incomplete(self):
BACKEND_URL = 'http://accounts.example.com'
with self.settings(DJANGO_SSO_APP_SHAPE='backend_only', APP_URL=BACKEND_URL):
new_pass = self._get_random_pass()
new_user = self._get_new_incomplete_user(password=new_pass)
user_device = self._get_user_device(new_user)
client = self._get_client()
client.cookies = self._get_jwt_cookie(user_device)
response = client.get(
'/profile/',
content_type='application/json',
HTTP_X_CONSUMER_CUSTOM_ID=new_user.sso_app_profile.sso_id
)
self.assertEqual(response.status_code, status.HTTP_302_FOUND)
self.assertEqual(response.url, '/profile/complete/')
def test_redirect_to_profile_complete_if_profile_is_incomplete_apigateway(self):
with self.settings(DJANGO_SSO_APP_SHAPE='backend_app_apigateway'):
new_pass = self._get_random_pass()
new_user = self._get_new_incomplete_user(password=new_pass)
user_device = self._get_user_device(new_user)
client = self._get_client()
client.cookies = self._get_jwt_cookie(user_device)
response = client.get(
'/profile/',
content_type='application/json',
HTTP_X_CONSUMER_CUSTOM_ID=new_user.sso_app_profile.sso_id
)
self.assertEqual(response.status_code, status.HTTP_302_FOUND)
self.assertEqual(response.url, '/profile/complete/')
def test_email_verification_redirects_to_profile_completion(self):
with self.settings(DJANGO_SSO_APP_SHAPE='backend_app'):
signup_obj = self._get_signup_object()
client = self._get_client()
response = client.post(
reverse('account_signup'),
data=signup_obj
)
self.assertEqual(response.status_code, status.HTTP_302_FOUND)
self.assertEqual(len(mail.outbox), 1)
email_address = EmailAddress.objects.get(email=signup_obj['email'])
email_confirmation = EmailConfirmationHMAC(email_address)
response2 = client.get(reverse('account_confirm_email',
args=[email_confirmation.key]),
follow=True)
self.assertEqual(response2.redirect_chain[-1][0], '/profile/complete/')
def test_email_verification_redirects_to_profile_completion_apigateway(self):
with self.settings(DJANGO_SSO_APP_SHAPE='backend_app'):
signup_obj = self._get_signup_object()
client = self._get_client()
response = client.post(
reverse('account_signup'),
data=signup_obj,
)
self.assertEqual(response.status_code, status.HTTP_302_FOUND)
self.assertEqual(len(mail.outbox), 1)
email_address = EmailAddress.objects.get(email=signup_obj['email'])
email_confirmation = EmailConfirmationHMAC(email_address)
new_user = User.objects.get(email=signup_obj['email'])
response2 = client.get(reverse('account_confirm_email', args=[email_confirmation.key]),
HTTP_X_CONSUMER_CUSTOM_ID=new_user.sso_app_profile.sso_id,
follow=True)
self.assertEqual(response2.redirect_chain[-1][0], '/profile/complete/')
| 38.076923
| 110
| 0.654949
| 578
| 4,950
| 5.212803
| 0.185121
| 0.044142
| 0.031862
| 0.023896
| 0.806505
| 0.793229
| 0.77232
| 0.77232
| 0.77232
| 0.741786
| 0
| 0.007067
| 0.256768
| 4,950
| 129
| 111
| 38.372093
| 0.811905
| 0
| 0
| 0.586207
| 0
| 0
| 0.083636
| 0.024444
| 0
| 0
| 0
| 0
| 0.149425
| 1
| 0.057471
| false
| 0.068966
| 0.08046
| 0
| 0.149425
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
ce6dd5efc96f3dedb79889a834f0e618573a7f82
| 37
|
py
|
Python
|
classes/etl/spa_ncdf/__init__.py
|
rhyswhitley/rooting_depth
|
204da2e2e1fac8c8ff9f81ae096d6b1e851a71d0
|
[
"CC0-1.0"
] | null | null | null |
classes/etl/spa_ncdf/__init__.py
|
rhyswhitley/rooting_depth
|
204da2e2e1fac8c8ff9f81ae096d6b1e851a71d0
|
[
"CC0-1.0"
] | null | null | null |
classes/etl/spa_ncdf/__init__.py
|
rhyswhitley/rooting_depth
|
204da2e2e1fac8c8ff9f81ae096d6b1e851a71d0
|
[
"CC0-1.0"
] | 1
|
2019-09-01T04:15:21.000Z
|
2019-09-01T04:15:21.000Z
|
from spa_netCDF4 import spa_netCDF4
| 12.333333
| 35
| 0.864865
| 6
| 37
| 5
| 0.666667
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.0625
| 0.135135
| 37
| 2
| 36
| 18.5
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
ced0d441476cd6d9c5012f0a5f706820949af69f
| 1,228
|
py
|
Python
|
tests/test_paths.py
|
RBrearton/nexusformat
|
229eb8105113a8660461c7b9150bfc769959455a
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
tests/test_paths.py
|
RBrearton/nexusformat
|
229eb8105113a8660461c7b9150bfc769959455a
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
tests/test_paths.py
|
RBrearton/nexusformat
|
229eb8105113a8660461c7b9150bfc769959455a
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
import pytest
from nexusformat.nexus import *
field1 = NXfield((1,2), name="f1")
def test_attribute_paths():
root = NXroot(NXentry())
root.entry.g1 = NXgroup(field1)
assert root.entry.g1.nxpath == "/entry/g1"
assert root["entry/g1"] is root.entry.g1
assert root["entry/g1/f1"] is root.entry.g1.f1
assert "g1" in root.entry
assert "f1" in root.entry.g1
assert "entry/g1/f1" in root
assert root.entry.g1.f1.nxroot is root
def test_dictionary_paths():
root = NXroot(NXentry())
root["entry/g1"] = NXgroup(field1)
assert root.entry.g1.nxpath == "/entry/g1"
assert root["entry/g1"] is root.entry.g1
assert root["entry/g1/f1"] is root.entry.g1.f1
assert "g1" in root["/entry"]
assert "f1" in root["/entry/g1"]
assert "/entry/g1/f1" in root
assert root["/entry/g1/f1"].nxroot is root
def test_relative_paths():
root = NXroot(NXentry())
root["entry/g1"] = NXgroup()
root["entry/g1/g2"] = NXgroup()
root["entry/g1/g2/f1"] = field1
assert "f1" in root["entry/g1/g2"]
assert "g2/f1" in root["entry/g1"]
assert "g1/g2/f1" in root["entry"]
assert root["entry/g1/g2/f1"].nxpath == "/entry/g1/g2/f1"
assert "entry" in root
| 26.12766
| 61
| 0.636808
| 194
| 1,228
| 4
| 0.14433
| 0.243557
| 0.311856
| 0.197165
| 0.80799
| 0.721649
| 0.667526
| 0.667526
| 0.615979
| 0.615979
| 0
| 0.060545
| 0.192997
| 1,228
| 46
| 62
| 26.695652
| 0.722503
| 0
| 0
| 0.272727
| 0
| 0
| 0.187296
| 0
| 0
| 0
| 0
| 0
| 0.575758
| 1
| 0.090909
| false
| 0
| 0.060606
| 0
| 0.151515
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0c74fa2e30a7e71564c45064dad682e4e7ace7d5
| 4,763
|
py
|
Python
|
visual.py
|
WhiteDOU/DNN_Pruning
|
bb84c9161ae8e2602ae00b0e6a4907a55f74a01f
|
[
"MIT"
] | null | null | null |
visual.py
|
WhiteDOU/DNN_Pruning
|
bb84c9161ae8e2602ae00b0e6a4907a55f74a01f
|
[
"MIT"
] | null | null | null |
visual.py
|
WhiteDOU/DNN_Pruning
|
bb84c9161ae8e2602ae00b0e6a4907a55f74a01f
|
[
"MIT"
] | null | null | null |
import glob
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
root = sorted(glob.glob('./all_fc/*'))
def fc():
i = 0
name = 'fc' + str(i / 3)
acc = root[i]
run_time = root[i + 1]
x = root[i + 2]
with open(acc, 'r') as f:
acc = f.readline()
acc = acc.replace(' ', '').replace('[', '').replace(']', '').split(',')
for i, item in enumerate(acc):
acc[i] = float(acc[i])
with open(x, 'r') as f:
x = f.readline()
x = x.replace(' ', '').replace('[', '').replace(']', '').split(',')
for i, item in enumerate(x):
x[i] = float(x[i])
with open(run_time, 'r') as f:
run_time = f.readline()
run_time = run_time.replace(' ', '').replace('[', '').replace(']', '').split(',')
for i, item in enumerate(run_time):
run_time[i] = float(run_time[i])
plt.figure()
plt.title(name + 'ACC & RUN_TIME')
plt.plot(x, acc, color='green', label='ACC')
plt.plot(x, run_time, color='red', label='RUN_TIME')
plt.xlabel('weights remain(%)')
plt.ylabel('ACC & RUN_TIME(s) ')
plt.legend()
plt.show()
i = 3
name = 'fc' + str(i / 3)
acc = root[i]
run_time = root[i + 1]
x = root[i + 2]
with open(acc, 'r') as f:
acc = f.readline()
acc = acc.replace(' ', '').replace('[', '').replace(']', '').split(',')
for i, item in enumerate(acc):
acc[i] = float(acc[i])
with open(x, 'r') as f:
x = f.readline()
x = x.replace(' ', '').replace('[', '').replace(']', '').split(',')
for i, item in enumerate(x):
x[i] = float(x[i])
with open(run_time, 'r') as f:
run_time = f.readline()
run_time = run_time.replace(' ', '').replace('[', '').replace(']', '').split(',')
for i, item in enumerate(run_time):
run_time[i] = float(run_time[i])
plt.figure()
plt.title(name + 'ACC & RUN_TIME')
plt.plot(x, acc, color='green', label='ACC')
plt.plot(x, run_time, color='red', label='RUN_TIME')
plt.xlabel('weights remain(%)')
plt.ylabel('ACC & RUN_TIME(s) ')
plt.legend()
plt.show()
print(root)
def feature():
for i in range(0, 15, 3):
name = 'feature:' + str(i / 3)
acc = root[i]
x = root[i + 1]
run_time = root[i + 2]
with open(acc, 'r') as f:
acc = f.readline()
acc = acc.replace(' ', '').replace('[', '').replace(']', '').split(',')
for i, item in enumerate(acc):
acc[i] = float(acc[i])
with open(x, 'r') as f:
x = f.readline()
x = x.replace(' ', '').replace('[', '').replace(']', '').split(',')
for i, item in enumerate(x):
x[i] = float(x[i])
with open(run_time, 'r') as f:
run_time = f.readline()
run_time = run_time.replace(' ', '').replace('[', '').replace(']', '').split(',')
for i, item in enumerate(run_time):
run_time[i] = float(run_time[i])
plt.figure()
plt.title(name + 'ACC & RUN_TIME')
plt.plot(x, acc, color='green', label='ACC')
plt.plot(x, run_time, color='red', label='RUN_TIME')
plt.xlabel('weights remain(%)')
plt.ylabel('ACC & RUN_TIME(s) ')
plt.legend()
plt.show()
def fc_all():
acc = root[0]
coord = root[1]
run_time = root[2]
with open(acc, 'r') as f:
acc = f.readline()
acc = acc.replace(' ', '').replace('[', '').replace(']', '').split(',')
for i, item in enumerate(acc):
acc[i] = float(acc[i])
with open(coord, 'r') as f:
coord = f.readline()
coord = coord.replace(' ', '').replace('[', '').replace(']', '').split(',')
for i, item in enumerate(coord):
coord[i] = float(coord[i])
x = coord[0::2]
y = coord[1::2]
with open(run_time, 'r') as f:
run_time = f.readline()
run_time = run_time.replace(' ', '').replace('[', '').replace(']', '').split(',')
for i, item in enumerate(run_time):
run_time[i] = float(run_time[i])
x = np.linspace(14, 0.0025, num=100)
y = np.linspace(5, 0.0025, num=100)
acc = np.array(acc).reshape(x.shape[0],y.shape[0])
run_time = np.array(run_time).reshape(x.shape[0],y.shape[0])
print(run_time)
fig = plt.figure()
ax = Axes3D(fig)
x,y = np.meshgrid(x,y)
ax.plot_surface(x,y,acc,rstride=1, cstride=1, cmap='rainbow')
plt.xlabel('fc0')
plt.ylabel('fc1')
plt.show()
plt.xlabel('fc0')
plt.ylabel('fc1')
ax.plot_surface(x,y,run_time,rstride=1, cstride=1, cmap='rainbow')
plt.show()
fc_all()
| 32.848276
| 93
| 0.500945
| 670
| 4,763
| 3.480597
| 0.119403
| 0.144082
| 0.020583
| 0.133791
| 0.810892
| 0.798027
| 0.773156
| 0.729417
| 0.729417
| 0.71012
| 0
| 0.016369
| 0.281755
| 4,763
| 144
| 94
| 33.076389
| 0.665303
| 0
| 0
| 0.723077
| 0
| 0
| 0.065519
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.023077
| false
| 0
| 0.030769
| 0
| 0.053846
| 0.015385
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0c88e8beee792e5169c40d63a655beaa2545b827
| 605
|
py
|
Python
|
hostman/utils.py
|
jonhadfield/hostman
|
c1643f5dd95833715b26032821bf631a4e2c4a7e
|
[
"MIT"
] | 20
|
2018-09-11T16:27:04.000Z
|
2021-08-16T17:21:10.000Z
|
hostman/utils.py
|
TeamAleph/hostman
|
8ba27903a6dc58464ee4cb8e1a48a2b1a5559696
|
[
"MIT"
] | 2
|
2019-09-17T11:01:17.000Z
|
2020-02-13T15:53:24.000Z
|
hostman/utils.py
|
TeamAleph/hostman
|
8ba27903a6dc58464ee4cb8e1a48a2b1a5559696
|
[
"MIT"
] | 4
|
2018-09-11T16:30:25.000Z
|
2021-02-24T19:52:43.000Z
|
import os
def is_readable(path=None):
"""Test if the supplied filesystem path can be read
:param path: A filesystem path
:return: True if the path is a file that can be read. Otherwise, False.
"""
if os.path.isfile(path) and os.access(path, os.R_OK):
return True
return False
def is_writeable(path=None):
"""Test if the supplied filesystem path can be written to
:param path: A filesystem path
:return: True if the path is a file that can be written. Otherwise, False.
"""
if os.path.isfile(path) and os.access(path, os.W_OK):
return True
| 26.304348
| 78
| 0.667769
| 99
| 605
| 4.040404
| 0.323232
| 0.05
| 0.06
| 0.07
| 0.76
| 0.76
| 0.76
| 0.76
| 0.76
| 0.76
| 0
| 0
| 0.242975
| 605
| 22
| 79
| 27.5
| 0.873362
| 0.519008
| 0
| 0.25
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.125
| 0
| 0.75
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 7
|
0c8abae002d63dd11e69e680b8d593f8881ae6bf
| 13,840
|
py
|
Python
|
Week7/final-exam-q4/validate.py
|
italoag/M101P
|
708bdd793735228f820f3f50f57c44ce8fc637ef
|
[
"MIT"
] | null | null | null |
Week7/final-exam-q4/validate.py
|
italoag/M101P
|
708bdd793735228f820f3f50f57c44ce8fc637ef
|
[
"MIT"
] | null | null | null |
Week7/final-exam-q4/validate.py
|
italoag/M101P
|
708bdd793735228f820f3f50f57c44ce8fc637ef
|
[
"MIT"
] | null | null | null |
import base64
code="
import pymongo
import urllib2
import urllib
import cookielib
import random
import re
import string
import sys
import getopt

# init the global cookie jar
cj = cookielib.CookieJar()
# declare the variables to connect to db
connection = None
db = None
webhost = "localhost:8082"
mongostr = "mongodb://localhost:27017"
db_name = "blog"

# this script will check that homework 3.2 is correct

# makes a little salt
def make_salt(n):
    salt = ""
    for i in range(n):
        salt = salt + random.choice(string.ascii_letters)
    return salt


# this is a validation script to make sure the blog works correctly.

def create_user(username, password):
    
    global cj

    try:
        print "Trying to create a test user ", username
        url = "http://{0}/signup".format(webhost)

        data = urllib.urlencode([("email",""),("username",username), ("password",password), ("verify",password)])
        request = urllib2.Request(url=url, data=data)
        opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
        f = opener.open(request)

        users = db.users
        # check that the user is in users collection
        user = users.find_one({'_id':username})
        if (user == None):
            print "Could not find the test user ", username, "in the users collection."
            return False
        print "Found the test user ", username, " in the users collection"

        # check that the user has been built
        result = f.read()
        expr = re.compile("Welcome\s+"+ username)
        if expr.search(result):
            return True
        
        print "When we tried to create a user, here is the output we got\n"
        print result
        
        return False
    except:
        print "the request to ", url, " failed, so your blog may not be running."
        raise
        return False


def try_to_login(username, password):

    try:
        print "Trying to login for test user ", username
        url = "http://{0}/login".format(webhost)

        data = urllib.urlencode([("username",username), ("password",password)])
        request = urllib2.Request(url=url, data=data)
        opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
        f = opener.open(request)

        # check for successful login
        result = f.read()
        expr = re.compile("Welcome\s+"+ username)
        if expr.search(result):
            return True

        print "When we tried to login, here is the output we got\n"
        print result
        return False
    except:
        print "the request to ", url, " failed, so your blog may not be running."
        return False


def add_blog_post(title,post,tags):

    try:
        print "Trying to submit a post with title ", title
        data = urllib.urlencode([("body",post), ("subject",title), ("tags",tags)])
        url = "http://{0}/newpost".format(webhost)
        request = urllib2.Request(url=url, data=data)
        cj.add_cookie_header(request)
        opener = urllib2.build_opener()
        f = opener.open(request)

        # check for successful login
        result = f.read()
        expr = re.compile(title + ".+" + post, re.DOTALL)

        if expr.search(result):
            return True

        print "When we tried to post, here is the output we got\n"
        print result
        return False

    except:
        print "the request to ", url, " failed, so your blog may not be running."
        raise

        return False

def add_blog_comment(title,post):

    try:
        print "+Trying to submit a blog comment for post with title", title
        url = "http://{0}/newcomment".format(webhost)
        
        doc = {}
        check_mongo_for_post(title, post, doc)

        permalink = doc['doc']['permalink']

        comment_name = make_salt(12)
        comment_body = make_salt(12)

        data = urllib.urlencode([("commentName",comment_name), ("commentBody",comment_body), ("permalink",permalink)])
        request = urllib2.Request(url=url, data=data)
        cj.add_cookie_header(request)
        opener = urllib2.build_opener()
        f = opener.open(request)

        # check for successful addition of comment on page
        result = f.read()
        expr = re.compile(title + ".+" + post, re.DOTALL)

        if not expr.search(result):
            print "When we tried to find the comment we posted at the  ", url, " here is what we got"
            print result
            return False


        # check for successful addition of comment..retrieve the doc again
        if(not check_mongo_for_post(title, post, doc)):
            print "Could not find comment in database"
            return False
        
        found = False
        if ('comments' in doc['doc']):
            for comment in doc['doc']['comments']:
                if (comment['body'] == comment_body and comment['author'] == comment_name):
                    found = True

        return found

    except:
        print "the request to ", url, " failed, so your blog may not be running."
        raise

        return False


# fetch the blog home page and return the link of the first post
def fetch_blog_home_page(posts):

    try:
        url = "http://{0}/".format(webhost)
        print "Trying to grab the blog home page at url and find the first post.", url
        request = urllib2.Request(url=url)
        cj.add_cookie_header(request)
        opener = urllib2.build_opener()
        f = opener.open(request)

        # Look for a post
        result = f.read()
        expr = re.compile("<a href=\"([^\"]+)\"\w*?>", re.DOTALL)


        match = expr.search(result)

        if match is not None:
            print "Fount a post url: ", match.group(1)
            posts.append(match.group(1))
            return True

        
        print "Hmm, can't seem to find a post. Is the blog populated with posts?"
        print "When we tried to read the blog index at ", url, " here is what we got"
        print result
        return False

    except:
        print "the request to ", url, " failed, so your blog may not be running."
        raise

        return False

# gets the likes value off the first commment or returns None
def fetch_likes(url):

    try:
        url = "http://{0}{1}".format(webhost, url)
        print "Trying to grab the number of likes for url ", url
        request = urllib2.Request(url=url)
        cj.add_cookie_header(request)
        opener = urllib2.build_opener()
        f = opener.open(request)


        # let's get the first form element
        result = f.read()
        expr = re.compile("<form[^>]*>.*?Likes:\s*(\d+)\s*<.*?</form>", re.DOTALL)

        match = expr.search(result)

        if match is not None:
            print "Likes value ", match.group(1)
            return int(match.group(1))

        print "Can't fetch the like value for the first comment. Perhaps the blog entry has no comments?"
        print "When we tried to read the blog permalink at ", url, " here is what we got"
        return None

    except:
        print "the request to ", url, " failed, so your blog may not be running."
        raise

        return None


# gets the likes value off the first commment or returns None
def click_on_like(permalink):

    print "Clicking on Like link for post: ", permalink
    try:
        expr =  re.compile("[^/]+/([^/]+)")
        match = expr.search(permalink)
        if match is None:
            return False

        permalink = match.group(1)
        url = "http://{0}/like".format(webhost)
        # print "Like POST url", url

        data = urllib.urlencode([("permalink",permalink), ("comment_ordinal","0")])
        request = urllib2.Request(url=url, data=data)
        cj.add_cookie_header(request)
        opener = urllib2.build_opener()
        f = opener.open(request)

        return True

    except:
        print "the request to ", url, " failed, so your blog may not be running."
        raise




# command line arg parsing to make folks happy who want to run at mongolabs or mongohq
# this functions uses global vars to communicate. forgive me.
def arg_parsing(argv):

    global webhost
    global mongostr
    global db_name

    try:
        opts, args = getopt.getopt(argv, "-p:-m:-d:")
    except getopt.GetoptError:
        print "usage validate.py -p webhost -m mongoConnectString -d databaseName"
        print "\twebhost defaults to {0}".format(webhost)
        print "\tmongoConnectionString default to {0}".format(mongostr)
        print "\tdatabaseName defaults to {0}".format(db_name)
        sys.exit(2)
    for opt, arg in opts:
        if (opt == '-h'):
            print "usage validate.py -p webhost -m mongoConnectString -d databaseName"
            sys.exit(2)
        elif opt in ("-p"):
            webhost = arg
            print "Overriding HTTP host to be ", webhost
        elif opt in ("-m"):
            mongostr = arg
            print "Overriding MongoDB connection string to be ", mongostr
        elif opt in ("-d"):
            db_name = arg
            print "Overriding MongoDB database to be ", db_name
            


# main section of the code
def main(argv):
            
    arg_parsing(argv)
    global connection
    global db

    print "Welcome to the M101 Final Exam, Question 4 Validation Checker"

    # connect to the db (mongostr was set in arg_parsing)
    connection = pymongo.Connection(mongostr, safe=True)
    db = connection[db_name]


    # grab the blog home page and find the first post
    posts = []
    if (not fetch_blog_home_page(posts)):
        print "I can't grab the home page of the blog"
        sys.exit(1)

    # now go to the permalink page for that post
    likes_value = fetch_likes(posts[0])

    if (likes_value is  None):
        print "Can't fetch the like value"
        sys.exit(1)

    click_on_like(posts[0])

    new_likes_value = fetch_likes(posts[0])

    if (new_likes_value != (likes_value + 1)):
        print "I was not able to increment the likes on a comment"
        print "old likes value was ", likes_value
        print "likes value after I clicked was ", new_likes_value
        print "Sorry, you have not solved it yet."
        sys.exit(1)


    print "Tests Passed for Final 4. Your validation code is 3f837hhg673ghd93hgf8"


if __name__ == "__main__":
    main(sys.argv[1:])







"
eval(compile(base64.b64decode(code), "<string>", 'exec'))
| 3,460
| 13,767
| 0.998121
| 11
| 13,840
| 1,255.818182
| 0.818182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.095056
| 0.000434
| 13,840
| 3
| 13,768
| 4,613.333333
| 0.903499
| 0
| 0
| 0
| 0
| 0
| 0.995087
| 0.99422
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 9
|
0ce345f3435ce519223a781685bbe50964f41097
| 17,219
|
py
|
Python
|
src/modules/testVTK/testDB3D.py
|
sankhaMukherjee/vtk
|
fc6ae8b4e56d62796a1a0d28e0c7dce598114103
|
[
"MIT"
] | null | null | null |
src/modules/testVTK/testDB3D.py
|
sankhaMukherjee/vtk
|
fc6ae8b4e56d62796a1a0d28e0c7dce598114103
|
[
"MIT"
] | 5
|
2020-03-24T18:03:04.000Z
|
2021-08-23T20:34:23.000Z
|
src/modules/testVTK/testDB3D.py
|
sankhaMukherjee/vtk
|
fc6ae8b4e56d62796a1a0d28e0c7dce598114103
|
[
"MIT"
] | null | null | null |
import vtk
import numpy as np
from lib.simpleFunctions import simpleObjects as sO
import matplotlib.pyplot as plt
from matplotlib import colors as cl
import os, json
from datetime import datetime as dt
# ---------------------------------------------------------
# Global variables are always bad. However, there
# appears to be no good way in which the renderer
# and the window objects can be passed along to
# other functions while the windoow is being rendered
# ---------------------------------------------------------
renWin = vtk.vtkRenderWindow() # for the screen capture
ren = vtk.vtkRenderer() # for the camera
def restoreCammeraSpecs(fileName):
try:
camera = ren.GetActiveCamera()
data = json.load(open(fileName))
camera.SetFocalPoint(data['focalPoint'])
camera.SetPosition(data['position'])
camera.SetViewUp(data['viewUp'])
camera.SetViewAngle(data['viewAngle'])
camera.SetClippingRange(data['clippingRange'])
except Exception as e:
print(f'Unable to restore the session from [{fileName}]: {e}')
return
def saveCameraSpecs():
camera = ren.GetActiveCamera()
folder = '../results/cameraPos'
os.makedirs(folder, exist_ok=True)
fileName = dt.now().strftime('3D_%Y-%m-%d--%H-%M-%S.json')
fileName = os.path.join( folder, fileName )
focalPoint = [n for n in camera.GetFocalPoint()]
position = [n for n in camera.GetPosition()]
viewUp = [n for n in camera.GetViewUp()]
viewAngle = camera.GetViewAngle()
clippingRange = [n for n in camera.GetClippingRange()]
data = {
'focalPoint' : focalPoint,
'position' : position,
'viewUp' : viewUp,
'viewAngle' : viewAngle,
'clippingRange' : clippingRange,
}
with open(fileName, 'w') as f:
f.write( json.dumps(data) )
with open(os.path.join(folder, 'latest3D.json'), 'w') as f:
f.write( json.dumps(data) )
print(f'+------------------------------------------')
print(f'| focalPoint = {focalPoint}')
print(f'| position = {position}')
print(f'| viewUp = {viewUp}')
print(f'| viewAngle = {viewAngle}')
print(f'| clippingRange = {clippingRange}')
print(f'+------------------------------------------')
return
def screenShot():
folder = '../results/screenShots'
os.makedirs(folder, exist_ok=True)
fileName = dt.now().strftime('%Y-%m-%d--%H-%M-%S.png')
fileName = os.path.join( folder, fileName )
# screenshot code:
w2if = vtk.vtkWindowToImageFilter()
w2if.SetInput(renWin)
w2if.SetInputBufferTypeToRGB()
w2if.ReadFrontBufferOff()
w2if.Update()
writer = vtk.vtkPNGWriter()
writer.SetFileName(fileName)
writer.SetInputConnection(w2if.GetOutputPort())
writer.Write()
return fileName
def Keypress(obj, event):
key = obj.GetKeySym()
if (key == 's') or (key == 'S'):
fileName = screenShot()
print(f'Screenshot saved at [{fileName}]')
if (key == 'c') or (key == 'C'):
saveCameraSpecs()
def getData():
data = [
["Something","27574","M","Hispanic"],
["ArapahoeHouse","11636","M","White"],
["Other","32608","M","American Indian"],
["ArapahoeHouse","44460","F","White"],
["Something","18899","F","White"],
["ArapahoeHouse","26025","M","White"],
["ArapahoeHouse","7971","M","Hispanic"],
["ArapahoeHouse","19373","M","Black"],
["ArapahoeHouse","41578","M","White"],
["ArapahoeHouse","42446","M","Native American"],
["ArapahoeHouse","23182","F","White"],
]
nPatients = len(data)
nDaysList = [206, 589, 278, 348, 274, 32, 317, 73, 184, 641, 468]
allCGI = [
[4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6],
[4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6],
[4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1,1,1,1,1],
[4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6],
[4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],
[4,4,4,4,4,4,5,5,5,5,5,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6],
[4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6],
[4,4,4,4,4,4,4,4,4,4,5,5,5,5,5,5,5,5,5,5,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,5,5,5,5,5,5,5],
[4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6],
[4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6],
[4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,2,2,2,2,2,2,2,2],
]
for p in range(nPatients):
nDays = nDaysList[p]
# cgi = allCGI[p]
data[p].append( np.random.randint(1,7,nDays) )
return data
def colorMapper(forMap):
uniques = sorted(list(set(forMap)))
N = len(uniques)-1
mapper = { m:plt.cm.tab20b(i/N) for i, m in enumerate(uniques)}
result = [ mapper[f][:3] for f in forMap ]
return result
def colorMapper3D_smooth(forMap):
minVal = min(map(min, forMap))
maxVal = max(map(max, forMap))
forMap1 = [ (0.2+(np.array(f) - minVal)*0.8/(maxVal - minVal)) for f in forMap]
forMap2 = [plt.cm.Blues(f)[:,:-1] for f in forMap1]
return forMap2
def sizeMapper3D_smooth(forMap):
minVal = min(map(min, forMap))
maxVal = max(map(max, forMap))
forMap1 = [ (0.2+(np.array(f) - minVal)*0.8/(maxVal - minVal)) for f in forMap]
return forMap1
def get1Dobjects(colors, xPos, xText = 'x', yPosDelta=0.5, size=0.3, highlight=None):
allObj = []
for i, color in enumerate(colors):
if (highlight is not None) and (highlight != i):
color = cl.rgb_to_hsv(color)
# color[0] = 0
color[1] = 0
color = cl.hsv_to_rgb(color)
obj = sO.Cube()
obj.source.SetCenter(xPos, i*yPosDelta, 0)
obj.setSize(size)
obj.setColor( color )
if (highlight is not None) and (highlight != i):
obj.actor.GetProperty().SetOpacity(0.2)
allObj.append( obj )
xLabel = sO.Text(f'{xText}')
xLabel.actor.SetScale( 0.1, 0.1, 0.1 )
xLabel.actor.SetPosition( xPos-0.2, -1, 0 )
xLabel.actor.GetProperty().SetColor( 0, 0, 0 )
allObj.append( xLabel )
ax1 = sO.Line((xPos,-0.4,0),(xPos,-0.6,0))
allObj.append( ax1 )
return allObj
def get1DobjectsSmooth( vals, xPos, xText='x', yPosDelta=0.5, size=0.3, vMax = None, vMin=None, highlight=None ):
if vMin is None:
minVal = min(vals)
else:
minVal = vMin
if vMax is None:
maxVal = max(vals)
else:
maxVal = vMax
size1 = 0.2 + 0.8*(np.array(vals) - minVal)/(maxVal-minVal)
colors = plt.cm.Blues(size1)[:,:-1]
allObj = []
for i, color in enumerate(colors):
if (highlight is not None) and (highlight != i):
color = cl.rgb_to_hsv(color)
# color[0] = 0
color[1] = 0
color = cl.hsv_to_rgb(color)
obj = sO.Cube()
obj.source.SetCenter(xPos, i*yPosDelta, 0)
obj.setSize(size*size1[i])
obj.setColor( color )
if (highlight is not None) and (highlight != i):
obj.actor.GetProperty().SetOpacity(0.2)
allObj.append( obj )
xLabel = sO.Text(f'{xText}')
xLabel.actor.SetScale( 0.1, 0.1, 0.1 )
xLabel.actor.SetPosition( xPos-0.2, -1, 0 )
xLabel.actor.GetProperty().SetColor( 0, 0, 0 )
allObj.append( xLabel )
ax1 = sO.Line((xPos,-0.4,0),(xPos,-0.6,0))
allObj.append( ax1 )
return allObj
def get2DObjects(colors2D, sizes2D, xPos, xText='x', yPosDelta=0.5, zPosDelta=0.5, size=0.3, maxNz=10, highlight=None):
allObj = []
for i, (colors, sizes) in enumerate(zip(colors2D, sizes2D)):
for j, (c, s) in enumerate(zip(colors, sizes)):
if j > maxNz:
break
if (highlight is not None) and (highlight != i):
c = cl.rgb_to_hsv(c)
# color[0] = 0
c[1] = 0
c = cl.hsv_to_rgb(c)
obj = sO.Cube()
obj.source.SetCenter(xPos, i*yPosDelta, -j*zPosDelta)
obj.setSize(size*s)
obj.setColor( c )
if (highlight is not None) and (highlight != i):
obj.actor.GetProperty().SetOpacity(0.1)
allObj.append( obj )
xLabel = sO.Text(f'{xText}')
xLabel.actor.SetScale( 0.1, 0.1, 0.1 )
xLabel.actor.SetPosition( xPos-0.2, -1, 0 )
xLabel.actor.GetProperty().SetColor( 0, 0, 0 )
allObj.append( xLabel )
ax1 = sO.Line((xPos,-0.4,0),(xPos,-0.6,0))
allObj.append( ax1 )
return allObj
def getPatients(nPatients, xPos, yPosDelta):
allObj = []
for p in range(nPatients):
patientText = sO.Text(f'p_{p:03d}')
patientText.actor.SetScale( 0.1, 0.1, 0.1 )
patientText.actor.SetPosition( xPos, p*yPosDelta, 0 )
patientText.actor.GetProperty().SetColor( 0, 0, 0 )
allObj.append( patientText )
ax = sO.Line((xPos-0.3 -0.1, p*yPosDelta, 0), (xPos-0.3 +0.1, p*yPosDelta, 0))
allObj.append( ax )
ax = sO.Line((xPos-0.3, 0, 0), (xPos-0.3, (nPatients-1)*yPosDelta, 0))
allObj.append( ax )
return allObj
def plot3D(config):
bgColor = [217/255, 211/255, 232/255]
data = getData()
site, patient, sex, race, cgi = zip(*data)
meanCGI = [np.mean(m[:10]) for m in cgi]
sexColors = colorMapper( sex )
raceColors = colorMapper( race )
siteColors = colorMapper( site )
cgiColors = colorMapper3D_smooth( cgi )
cgiSizes = sizeMapper3D_smooth( cgi )
ren.SetBackground(bgColor)
renWin.AddRenderer(ren)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
if config['meanCGI']:
for obj in get1DobjectsSmooth( meanCGI, xPos=0, xText='meanCGI', vMax = 7, vMin=1, highlight=config['highlight'] ):
ren.AddActor( obj.actor )
if config['cgi']:
for obj in get2DObjects(cgiColors, cgiSizes, 1, 'cgi', highlight=config['highlight']):
ren.AddActor( obj.actor )
if config['race']:
for obj in get1Dobjects(raceColors, 3, 'race', highlight=config['highlight']):
ren.AddActor( obj.actor )
if config['sex']:
for obj in get1Dobjects(sexColors, 2, 'sex', highlight=config['highlight']):
ren.AddActor( obj.actor )
for obj in getPatients(11, 4, 0.5):
ren.AddActor( obj.actor )
# day4 = sO.MeshXY(0,0, 4, 5, -2, 60)
# ren.AddActor( day4.actor )
if config['highlight']:
user4 = sO.MeshXZ(-0.3, 0, 3.3, -5, 2, 20)
ren.AddActor( user4.actor )
renWin.SetSize(900, 900)
renWin.SetWindowName('3d stuff')
iren.AddObserver("KeyPressEvent", Keypress)
iren.Initialize()
ren.ResetCamera()
restoreCammeraSpecs('../results/cameraPos/latest3D.json')
renWin.Render()
iren.Start()
return
| 46.918256
| 1,296
| 0.53952
| 4,679
| 17,219
| 1.981193
| 0.063903
| 0.406904
| 0.606796
| 0.804315
| 0.608954
| 0.587594
| 0.577131
| 0.570011
| 0.551133
| 0.52438
| 0
| 0.258654
| 0.161159
| 17,219
| 366
| 1,297
| 47.046448
| 0.383135
| 0.02805
| 0
| 0.333333
| 0
| 0
| 0.057539
| 0.011364
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05098
| false
| 0
| 0.027451
| 0
| 0.12549
| 0.035294
| 0
| 0
| 1
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0b3745173473b557b7989d2e2bac00e391fb7e55
| 104,388
|
py
|
Python
|
magni/tests/cs_reconstruction.py
|
SIP-AAU/Magni
|
6328dc98a273506f433af52e6bd394754a844550
|
[
"BSD-2-Clause"
] | 42
|
2015-02-09T10:17:26.000Z
|
2021-12-21T09:38:04.000Z
|
magni/tests/cs_reconstruction.py
|
SIP-AAU/Magni
|
6328dc98a273506f433af52e6bd394754a844550
|
[
"BSD-2-Clause"
] | 3
|
2015-03-20T12:00:40.000Z
|
2015-03-20T12:01:16.000Z
|
magni/tests/cs_reconstruction.py
|
SIP-AAU/Magni
|
6328dc98a273506f433af52e6bd394754a844550
|
[
"BSD-2-Clause"
] | 14
|
2015-04-28T03:08:32.000Z
|
2021-07-24T13:29:24.000Z
|
"""
..
Copyright (c) 2015-2017, Magni developers.
All rights reserved.
See LICENSE.rst for further information.
Module providing unittests for `magni.cs.reconstruction`.
**Testing Strategy**
The usage of FastOps is tested along with reconstructions in various points in
the phase space.
**Phase Space Tests Overview**
A set of :math:`(\delta, \rho)` points in the phase space is selected. For each
algorithm, the reconstruction capabilities in each point has been determined
for a given problem suite. The tests are based on a comparison with these
reference results. Specifically, a comparison based on `np.allclose` is done.
Also some border cases (extremes) like :math:`k = 0` are tested.
Points where it is likely to have positive results
+-----+------+------+------+------+------+------+------+
| no.| 1 | 2 | 3 | 4 | 5 | 6 | 7 |
+-----+------+------+------+------+------+------+------+
|delta| 0.08 | 0.24 | 0.38 | 0.62 | 0.78 | 0.84 | 0.96 |
+-----+------+------+------+------+------+------+------+
| rho| 0.05 | 0.01 | 0.12 | 0.38 | 0.22 | 0.08 | 0.91 |
+-----+------+------+------+------+------+------+------+
Points where it is unlikely to have positive results
+-----+------+------+------+
| no.| A | B | C |
+-----+------+------+------+
|delta| 0.06 | 0.19 | 0.29 |
+-----+------+------+------+
| rho| 0.92 | 0.84 | 0.94 |
+-----+------+------+------+
**Functions Tested**
See the docstrings of the below listed classes.
Routine listings
----------------
ComparisonGAMPTests(unittest.TestCase)
Comparison of magni.cs.reconstruction.gamp to a reference implementation.
FastOpsTests(unittest.TestCase)
Tests of FastOp input, i.e. function based measurements and FFT dictionary.
FeatureTest(object)
Reconstruction algorithm feature test base class.
FeaturePrecisionFloatTest(FeatureTest, unittest.TestCase)
Test of the precision float feature in reconstruction algorithms.
FeatureReportHistoryTest(FeatureTest, unittest.TestCase)
Test the report history feature in reconstruction algorithms.
FeatureStopCriterionTest(FeatureTest, unittest.TestCase)
Test of the stop criterion feature in reconstruction algorithms.
FeatureWarmStartTest(FeatureTest, unittest.TestCase)
Test of the warm_start feature in reconstruction algorithms.
PhaseSpaceExtremesTest(unittest.TestCase):
Tests of border case (extreme) phase space values.
PhaseSpaceTest(object)
Phase space test base class.
PhaseSpaceTest1(PhaseSpaceTest, unittest.TestCase)
Test of reconstruction capabilities at Phase Space point (0.08, 0.05)
PhaseSpaceTest2(PhaseSpaceTest, unittest.TestCase)
Test of reconstruction capabilities at Phase Space point (0.24, 0.01)
PhaseSpaceTest3(PhaseSpaceTest, unittest.TestCase)
Test of reconstruction capabilities at Phase Space point (0.38, 0.12)
PhaseSpaceTest4(PhaseSpaceTest, unittest.TestCase)
Test of reconstruction capabilities at Phase Space point (0.62, 0.38)
PhaseSpaceTest5(PhaseSpaceTest, unittest.TestCase)
Test of reconstruction capabilities at Phase Space point (0.78, 0.22)
PhaseSpaceTest6(PhaseSpaceTest, unittest.TestCase)
Test of reconstruction capabilities at Phase Space point (0.84, 0.08)
PhaseSpaceTest7(PhaseSpaceTest, unittest.TestCase)
Test of reconstruction capabilities at Phase Space point (0.96, 0.91)
PhaseSpaceTestA(PhaseSpaceTest, unittest.TestCase)
Test of reconstruction capabilities at Phase Space point (0.06, 0.92)
PhaseSpaceTestB(PhaseSpaceTest, unittest.TestCase)
Test of reconstruction capabilities at Phase Space point (0.19, 0.84)
PhaseSpaceTestC(PhaseSpaceTest, unittest.TestCase)
Test of reconstruction capabilities at Phase Space point (0.29, 0.94)
TestUSERademacher(unittest.TestCase)
Test of the use_rademacher test fixture function.
use_rademacher(n, m, k, seed)
Prepare an instance of the USE/Rademacher problem suite
"""
from __future__ import division
import os
import unittest
import warnings
import numpy as np
import magni
from magni.utils.validation import decorate_validation as _decorate_validation
from magni.utils.validation import validate_numeric as _numeric
class ComparisonGAMPTests(unittest.TestCase):
"""
Comparison of magni.cs.reconstruction.gamp to a reference implementation.
**Reference implementation**
"run_amp" from
https://github.com/eric-tramel/SwAMP-Demo/blob/master/python/amp.py
commit b32755caa8d6b59929174e2a06cc685bae5849b6
"""
def setUp(self):
self.ns = [1024, 2048, 2048, 2000, 1000]
self.ms = [770, 780, 1024, 800, 500]
self.ks = [440, 440, 1024, 126, 88]
self.sigma_sqs = [0.0, 0.0, 0.0, 1e-3, 1e-2]
self.sigma_sqs_init = [1e-6, 1e-6, 1e-6, 1, 1]
self.theta_bars = [0.0, 0.0, 0.0, 0.2, 0.0]
self.theta_tildes = [1.0, 1.0, 1.0, 0.3, 1.0]
self.taus = [float(k) / n for (k, n) in zip(self.ks, self.ns)]
self.tolerance = 1e-16
self.iterations = 500
np.random.seed(6021)
self.seeds = np.random.randint(1000, 80000, size=len(self.ns))
file_path = os.path.dirname(os.path.abspath(__file__)) + os.sep
fixed_sigma_sq_sol_file = np.load(
file_path + 'gamp_fixed_sigma_sq_sols.npz')
em_sigma_sq_sol_file = np.load(
file_path + 'gamp_em_sigma_sq_sols.npz')
self.comparison_solutions_fixed_sigma_sq = [
fixed_sigma_sq_sol_file[arr] for arr in sorted(
fixed_sigma_sq_sol_file.files)]
self.comparison_solutions_em_sigma_sq = [
em_sigma_sq_sol_file[arr] for arr in sorted(
em_sigma_sq_sol_file.files)]
def tearDown(self):
magni.cs.reconstruction.gamp.config.reset()
def testFixedGAMPComparison(self):
for ix in range(len(self.ns)):
# Setup GAMP solver
input_channel_params = {'tau': self.taus[ix],
'theta_bar': self.theta_bars[ix],
'theta_tilde': self.theta_tildes[ix],
'use_em': False}
output_channel_params = {
'sigma_sq': self.sigma_sqs_init[ix],
'noise_level_estimation': 'fixed'}
gamp_config = {'tolerance': self.tolerance,
'iterations': self.iterations,
'input_channel_parameters': input_channel_params,
'output_channel_parameters': output_channel_params}
magni.cs.reconstruction.gamp.config.update(gamp_config)
# Generate problem instance
z, A, alpha = use_gaussian(
self.ns[ix], self.ms[ix], self.ks[ix], self.seeds[ix])
A_asq = np.abs(A)**2
if self.sigma_sqs[ix] > 0:
y = z + np.random.normal(
size=z.shape, loc=0.0, scale=np.sqrt(self.sigma_sqs[ix]))
else:
y = z
# Run solver
alpha_hat = magni.cs.reconstruction.gamp.run(y, A, A_asq)
# Compare result
self.assertTrue(
np.allclose(
self.comparison_solutions_fixed_sigma_sq[ix],
alpha_hat.flatten()))
def testEMGAMPComparison(self):
for ix in range(len(self.ns)):
# Setup GAMP solver
input_channel_params = {'tau': self.taus[ix],
'theta_bar': self.theta_bars[ix],
'theta_tilde': self.theta_tildes[ix],
'use_em': False}
output_channel_params = {
'sigma_sq': self.sigma_sqs_init[ix],
'noise_level_estimation': 'em'}
gamp_config = {'tolerance': self.tolerance,
'iterations': self.iterations,
'input_channel_parameters': input_channel_params,
'output_channel_parameters': output_channel_params}
magni.cs.reconstruction.gamp.config.update(gamp_config)
# Generate problem instance
z, A, alpha = use_gaussian(
self.ns[ix], self.ms[ix], self.ks[ix], self.seeds[ix])
A_asq = np.abs(A)**2
if self.sigma_sqs[ix] > 0:
y = z + np.random.normal(
size=z.shape, loc=0.0, scale=np.sqrt(self.sigma_sqs[ix]))
else:
y = z
# Run solver
alpha_hat = magni.cs.reconstruction.gamp.run(y, A, A_asq)
# Compare result
# The difference in EM learning for AMP vs Symmetric GAMP with AWGN
# output channel makes it difficult to compare the results.
# Thus, we only compare the non-zeros up to atol=1e-7.
self.assertTrue(
np.allclose(
self.comparison_solutions_em_sigma_sq[ix][:self.ks[ix]],
alpha_hat.flatten()[:self.ks[ix]],
atol=1e-7))
class FastOpsTests(unittest.TestCase):
"""
Tests of FastOp input, i.e. function based measurements and FFT dictionary.
The following tests are implemented:
- *test_AMP_with_DCT_FFT_vs_Separable_2D*
- *test_GAMP_with_DCT_FFT_vs_Separable_2D_rangan_sum_approx*
- *test_GAMP_with_DCT_FFT_vs_Separable_2D_krzakala_sum_approx*
- *test_GAMP_with_DCT_Separable_full_transform_and_precision*
- *test_IT_with_DCT*
- *test_IT_with_DFT*
- *test_IT_with_DCT_and_precision*
"""
def setUp(self):
h = 25
w = 25
n = h * w
k = 15
self.problem_dim = (h, w)
# Spiral scan pattern
scan_length = 0.30 * 2 * h * w
num_points = 10 * int(scan_length)
img_coords = magni.imaging.measurements.spiral_sample_image(
h, w, scan_length, num_points, rect_area=True)
self.Phi = magni.imaging.measurements.construct_measurement_matrix(
img_coords, h, w)
np.random.seed(6021)
self.alpha_real = np.zeros((n, 1))
self.alpha_real[:k, 0] = np.random.normal(size=k, loc=2, scale=2.0)
self.alpha_complex = np.zeros((n, 1), dtype=np.complex128)
self.alpha_complex[:k, 0] = (np.random.randn(k) +
1j * np.random.randn(k))
self.noise = np.random.normal(size=(self.Phi.shape[0], 1), scale=0.01)
magni.cs.reconstruction.it.config.update(
{'iterations': 200, 'threshold': 'fixed', 'threshold_fixed': k})
magni.cs.reconstruction.gamp.config.update(
{'iterations': 500,
'tolerance': 1e-6,
'input_channel_parameters': {'tau': k/n,
'theta_bar': 2.0,
'theta_tilde': 4.0,
'use_em': False},
'output_channel_parameters': {'sigma_sq': 1.0,
'noise_level_estimation': 'median'}}
)
def tearDown(self):
magni.cs.reconstruction.it.config.reset()
magni.cs.reconstruction.amp.config.reset()
magni.cs.reconstruction.gamp.config.reset()
def test_AMP_with_DCT_FFT_vs_Separable_2D(self):
Psi_fft = magni.imaging.dictionaries.get_DCT(self.problem_dim)
A_fft = magni.utils.matrices.MatrixCollection((self.Phi, Psi_fft))
y_fft = A_fft.dot(self.alpha_real) + self.noise
iDCT_mtx = magni.imaging.dictionaries.get_DCT_transform_matrix(
self.problem_dim[0]).T
Psi_sep = magni.utils.matrices.Separable2DTransform(iDCT_mtx, iDCT_mtx)
A_sep = magni.utils.matrices.MatrixCollection((self.Phi, Psi_sep))
y_sep = A_sep.dot(self.alpha_real) + self.noise
self.assertTrue(np.allclose(y_fft, y_sep))
alpha_hat_fft = self._amp_run(y_fft, A_fft, self.alpha_real,
success=True)
alpha_hat_sep = self._amp_run(y_sep, A_sep, self.alpha_real,
success=True)
self.assertTrue(np.allclose(alpha_hat_fft, alpha_hat_sep))
def test_GAMP_with_DCT_FFT_vs_Separable_2D_rangan_sum_approx(self):
Psi_fft = magni.imaging.dictionaries.get_DCT(self.problem_dim)
A_fft = magni.utils.matrices.MatrixCollection((self.Phi, Psi_fft))
y_fft = A_fft.dot(self.alpha_real) + self.noise
iDCT_mtx = magni.imaging.dictionaries.get_DCT_transform_matrix(
self.problem_dim[0]).T
Psi_sep = magni.utils.matrices.Separable2DTransform(iDCT_mtx, iDCT_mtx)
A_sep = magni.utils.matrices.MatrixCollection((self.Phi, Psi_sep))
y_sep = A_sep.dot(self.alpha_real) + self.noise
self.assertEqual(
magni.cs.reconstruction.gamp.config['sum_approximation_constant'],
{'rangan': 1.0})
self.assertTrue(np.allclose(y_fft, y_sep))
alpha_hat_fft = self._gamp_run(
y_fft, A_fft, None, self.alpha_real, success=True)
alpha_hat_sep = self._gamp_run(
y_sep, A_sep, None, self.alpha_real, success=True)
self.assertTrue(np.allclose(alpha_hat_fft, alpha_hat_sep))
def test_GAMP_with_DCT_FFT_vs_Separable_2D_krzakala_sum_approx(self):
magni.cs.reconstruction.gamp.config['sum_approximation_constant'] = {
'krzakala': 1.0 / (self.problem_dim[0] * self.problem_dim[1])}
Psi_fft = magni.imaging.dictionaries.get_DCT(self.problem_dim)
A_fft = magni.utils.matrices.MatrixCollection((self.Phi, Psi_fft))
y_fft = A_fft.dot(self.alpha_real) + self.noise
iDCT_mtx = magni.imaging.dictionaries.get_DCT_transform_matrix(
self.problem_dim[0]).T
Psi_sep = magni.utils.matrices.Separable2DTransform(iDCT_mtx, iDCT_mtx)
A_sep = magni.utils.matrices.MatrixCollection((self.Phi, Psi_sep))
y_sep = A_sep.dot(self.alpha_real) + self.noise
self.assertEqual(
magni.cs.reconstruction.gamp.config['sum_approximation_constant'],
{'krzakala': 1.0 / (self.problem_dim[0] * self.problem_dim[1])})
self.assertTrue(np.allclose(y_fft, y_sep))
alpha_hat_fft = self._gamp_run(
y_fft, A_fft, None, self.alpha_real, success=True)
alpha_hat_sep = self._gamp_run(
y_sep, A_sep, None, self.alpha_real, success=True)
self.assertTrue(np.allclose(alpha_hat_fft, alpha_hat_sep))
def test_GAMP_with_DCT_Separable_full_transform_and_precision(self):
# Float 32
magni.cs.reconstruction.gamp.config['precision_float'] = np.float32
self.assertEqual(
magni.cs.reconstruction.gamp.config['precision_float'], np.float32)
iDCT_mtx = np.float32(
magni.imaging.dictionaries.get_DCT_transform_matrix(
self.problem_dim[0]).T)
iDCT_mtx_asq = np.abs(iDCT_mtx) ** 2
Psi = magni.utils.matrices.Separable2DTransform(iDCT_mtx, iDCT_mtx)
Psi_asq = magni.utils.matrices.Separable2DTransform(iDCT_mtx_asq,
iDCT_mtx_asq)
A = magni.utils.matrices.MatrixCollection((self.Phi, Psi))
A_asq = magni.utils.matrices.MatrixCollection((self.Phi, Psi_asq))
y = A.dot(np.float32(self.alpha_real)) + np.float32(self.noise)
self.assertEqual(y.dtype, np.float32)
self.assertEqual(A.T.dot(y).dtype, np.float32)
self.assertEqual(A_asq.T.dot(y).dtype, np.float32)
self.assertEqual(A_asq.A.dtype, (A.A**2).dtype)
self.assertTrue(np.allclose(A_asq.A, A.A**2))
alpha_hat = self._gamp_run(y, A, A_asq, self.alpha_real)
self.assertEqual(alpha_hat.dtype, np.float32)
# Float64
magni.cs.reconstruction.gamp.config['precision_float'] = np.float64
self.assertEqual(
magni.cs.reconstruction.gamp.config['precision_float'], np.float64)
iDCT_mtx = np.float64(
magni.imaging.dictionaries.get_DCT_transform_matrix(
self.problem_dim[0]).T)
iDCT_mtx_asq = np.abs(iDCT_mtx) ** 2
Psi = magni.utils.matrices.Separable2DTransform(iDCT_mtx, iDCT_mtx)
Psi_asq = magni.utils.matrices.Separable2DTransform(iDCT_mtx_asq,
iDCT_mtx_asq)
A = magni.utils.matrices.MatrixCollection((self.Phi, Psi))
A_asq = magni.utils.matrices.MatrixCollection((self.Phi, Psi_asq))
y = A.dot(np.float64(self.alpha_real)) + np.float64(self.noise)
self.assertEqual(y.dtype, np.float64)
self.assertEqual(A.T.dot(y).dtype, np.float64)
self.assertEqual(A_asq.T.dot(y).dtype, np.float64)
self.assertEqual(A_asq.A.dtype, (A.A**2).dtype)
self.assertTrue(np.allclose(A_asq.A, A.A**2))
alpha_hat = self._gamp_run(y, A, A_asq, self.alpha_real)
self.assertEqual(alpha_hat.dtype, np.float64)
# Float128
if not hasattr(np, 'float128'):
return
magni.cs.reconstruction.gamp.config['precision_float'] = np.float128
self.assertEqual(
magni.cs.reconstruction.gamp.config['precision_float'],
np.float128)
iDCT_mtx = np.float128(
magni.imaging.dictionaries.get_DCT_transform_matrix(
self.problem_dim[0]).T)
iDCT_mtx_asq = np.abs(iDCT_mtx) ** 2
Psi = magni.utils.matrices.Separable2DTransform(iDCT_mtx, iDCT_mtx)
Psi_asq = magni.utils.matrices.Separable2DTransform(iDCT_mtx_asq,
iDCT_mtx_asq)
A = magni.utils.matrices.MatrixCollection((self.Phi, Psi))
A_asq = magni.utils.matrices.MatrixCollection((self.Phi, Psi_asq))
y = A.dot(np.float128(self.alpha_real)) + np.float64(self.noise)
self.assertEqual(y.dtype, np.float128)
self.assertEqual(A.T.dot(y).dtype, np.float128)
self.assertEqual(A_asq.T.dot(y).dtype, np.float128)
self.assertEqual(A_asq.A.dtype, (A.A**2).dtype)
self.assertTrue(np.allclose(A_asq.A, A.A**2))
alpha_hat = self._gamp_run(y, A, A_asq, self.alpha_real)
self.assertEqual(alpha_hat.dtype, np.float128)
def test_IT_with_DCT_and_precision(self):
Psi = magni.imaging.dictionaries.get_DCT(self.problem_dim)
A = magni.utils.matrices.MatrixCollection((self.Phi, Psi))
# Float 32
magni.cs.reconstruction.it.config.update(
{'precision_float': np.float32})
self.assertEqual(
magni.cs.reconstruction.it.config['precision_float'], np.float32)
y = A.dot(np.float32(self.alpha_real))
self.assertEqual(y.dtype, np.float32)
self.assertEqual(A.T.dot(y).dtype, np.float32)
alpha_hat = self._iht_run(y, A, self.alpha_real)
self.assertEqual(alpha_hat.dtype, np.float32)
alpha_hat = self._ist_run(y, A, self.alpha_real)
self.assertEqual(alpha_hat.dtype, np.float32)
# Float 64
magni.cs.reconstruction.it.config.update(
{'precision_float': np.float64})
self.assertEqual(
magni.cs.reconstruction.it.config['precision_float'], np.float64)
y = A.dot(np.float64(self.alpha_real))
self.assertEqual(y.dtype, np.float64)
self.assertEqual(A.T.dot(y).dtype, np.float64)
alpha_hat = self._iht_run(y, A, self.alpha_real)
self.assertEqual(alpha_hat.dtype, np.float64)
alpha_hat = self._ist_run(y, A, self.alpha_real)
self.assertEqual(alpha_hat.dtype, np.float64)
# Scipy DCT does not support float128
def test_IT_with_DCT(self):
Psi = magni.imaging.dictionaries.get_DCT(self.problem_dim)
A = magni.utils.matrices.MatrixCollection((self.Phi, Psi))
y = A.dot(self.alpha_real)
self._iht_run(y, A, self.alpha_real)
self._ist_run(y, A, self.alpha_real)
def test_IT_with_DFT(self):
Psi = magni.imaging.dictionaries.get_DFT(self.problem_dim)
A = magni.utils.matrices.MatrixCollection((self.Phi, Psi))
y_real = A.dot(self.alpha_real)
y_complex = A.dot(self.alpha_complex)
self._iht_run(y_real, A, self.alpha_real)
self._ist_run(y_real, A, self.alpha_real)
self._iht_run(y_complex, A, self.alpha_complex)
self._ist_run(y_complex, A, self.alpha_complex)
def _amp_run(self, y, A, a, success=True):
threshold_params = {
'theta': magni.cs.reconstruction.amp.util.theta_mm(
float(A.shape[0]) / A.shape[1]), 'tau_hat_sq': 1.0,
'threshold_level_update_method': 'residual'}
magni.cs.reconstruction.amp.config['threshold_parameters'].update(
threshold_params)
a_hat = magni.cs.reconstruction.amp.run(y, A)
if success:
self.assertTrue(np.allclose(a_hat, a, atol=1e-1))
else:
self.assertFalse(np.allclose(a_hat, a, atol=1e-1))
return a_hat
def _gamp_run(self, y, F, F_sq, a, success=True):
a_hat = magni.cs.reconstruction.gamp.run(y, F, F_sq)
if success:
self.assertTrue(np.allclose(a_hat, a, atol=1e-1))
else:
self.assertFalse(np.allclose(a_hat, a, atol=1e-1))
return a_hat
def _iht_run(self, y, A, alpha, success=True):
iht_config = {'threshold_operator': 'hard'}
magni.cs.reconstruction.it.config.update(iht_config)
self.assertEqual(
magni.cs.reconstruction.it.config['threshold_operator'], 'hard')
alpha_hat = magni.cs.reconstruction.it.run(y, A)
if success:
self.assertTrue(np.allclose(alpha_hat, alpha, atol=1e-2))
else:
self.assertFalse(np.allclose(alpha_hat, alpha, atol=1e-2))
return alpha_hat
def _ist_run(self, y, A, alpha, success=True):
ist_config = {'threshold_operator': 'soft'}
magni.cs.reconstruction.it.config.update(ist_config)
self.assertEqual(
magni.cs.reconstruction.it.config['threshold_operator'], 'soft')
alpha_hat = magni.cs.reconstruction.it.run(y, A)
if success:
self.assertTrue(np.allclose(alpha_hat, alpha, atol=1e-2))
else:
self.assertFalse(np.allclose(alpha_hat, alpha, atol=1e-2))
return alpha_hat
class FeatureTest(object):
"""
Reconstruction algorithm feature test base class.
This class defines a reconstruction problem which may be used as the base
for testing features of reconstruction algorithms such as warm start or
different stop criteria.
See the individual feature test classes for further information.
"""
def setUp(self):
seed = 6021
n = 500
delta = 0.78
rho = 0.17
m = int(delta * n)
self.k = int(rho * m)
self.tau = delta * rho
self.y, self.A, self.alpha = use_rademacher(n, m, self.k, seed=seed)
self.oracle_support = self.alpha != 0
self.z, self.F, self.a = use_gaussian(n, m, self.k, seed=seed)
self.F_sq = self.F**2
magni.cs.reconstruction.it.config.update(iterations=200)
magni.cs.reconstruction.gamp.config.update(iterations=200)
def tearDown(self):
magni.cs.reconstruction.it.config.reset()
magni.cs.reconstruction.amp.config.reset()
magni.cs.reconstruction.gamp.config.reset()
def _amp_run(self, y, A, a, success=True):
threshold_params = {
'theta': magni.cs.reconstruction.amp.util.theta_mm(
float(A.shape[0]) / A.shape[1]), 'tau_hat_sq': 1.0,
'threshold_level_update_method': 'residual'}
magni.cs.reconstruction.amp.config['threshold_parameters'].update(
threshold_params)
a_hat = magni.cs.reconstruction.amp.run(y, A)
if success:
self.assertTrue(np.allclose(a_hat, a, atol=1e-2))
else:
self.assertFalse(np.allclose(a_hat, a, atol=1e-2))
return a_hat
def _amp_history_run(self, y, A, a, success=True):
threshold_params = {
'theta': magni.cs.reconstruction.amp.util.theta_mm(
float(A.shape[0]) / A.shape[1]), 'tau_hat_sq': 1.0,
'threshold_level_update_method': 'residual'}
magni.cs.reconstruction.amp.config['threshold_parameters'].update(
threshold_params)
a_hat, history = magni.cs.reconstruction.amp.run(y, A)
if success:
self.assertTrue(np.allclose(a_hat, a, atol=1e-2))
else:
self.assertFalse(np.allclose(a_hat, a, atol=1e-2))
return history
def _gamp_run(self, z, F, F_sq, a, success=True):
a_hat = magni.cs.reconstruction.gamp.run(z, F, F_sq)
if success:
self.assertTrue(np.allclose(a_hat, a, atol=1e-2))
else:
self.assertFalse(np.allclose(a_hat, a, atol=1e-2))
return a_hat
def _gamp_history_run(self, z, F, F_sq, a, success=True):
a_hat, history = magni.cs.reconstruction.gamp.run(z, F, F_sq)
if success:
self.assertTrue(np.allclose(a_hat, a, atol=1e-2))
else:
self.assertFalse(np.allclose(a_hat, a, atol=1e-2))
return history
def _iht_run(self, y, A, alpha, success=True):
iht_config = {'threshold_operator': 'hard'}
magni.cs.reconstruction.it.config.update(iht_config)
self.assertEqual(
magni.cs.reconstruction.it.config['threshold_operator'], 'hard')
alpha_hat = magni.cs.reconstruction.it.run(y, A)
if success:
self.assertTrue(np.allclose(alpha_hat, alpha, atol=1e-2))
else:
self.assertFalse(np.allclose(alpha_hat, alpha, atol=1e-2))
return alpha_hat
def _ist_run(self, y, A, alpha, success=True):
ist_config = {'threshold_operator': 'soft'}
magni.cs.reconstruction.it.config.update(ist_config)
self.assertEqual(
magni.cs.reconstruction.it.config['threshold_operator'], 'soft')
alpha_hat = magni.cs.reconstruction.it.run(y, A)
if success:
self.assertTrue(np.allclose(alpha_hat, alpha, atol=1e-2))
else:
self.assertFalse(np.allclose(alpha_hat, alpha, atol=1e-2))
return alpha_hat
def _ist_history_run(self, y, A, alpha, success=True):
ist_config = {'threshold_operator': 'soft'}
magni.cs.reconstruction.it.config.update(ist_config)
self.assertEqual(
magni.cs.reconstruction.it.config['threshold_operator'], 'soft')
alpha_hat, history = magni.cs.reconstruction.it.run(y, A)
if success:
self.assertTrue(np.allclose(alpha_hat, alpha, atol=1e-2))
else:
self.assertFalse(np.allclose(alpha_hat, alpha, atol=1e-2))
return history
class FeaturePrecisionFloatTest(FeatureTest, unittest.TestCase):
"""
Test of the precision float feature in reconstruction algorithms.
The following tests are implemented:
- *test_float32_AMP*
- *test_float32_GAMP*
- *test_float32_GAMP_EM*
- *test_float32_GAMP_EM_BL*
- *test_float64_AMP*
- *test_float64_GAMP*
- *test_float64_GAMP_EM*
- *test_float64_GAMP_EM_BL*
- *test_float128_AMP*
- *test_float128_GAMP*
- *test_float128_GAMP_EM*
- *test_float128_GAMP_EM_BL*
- *test_float32_IST*
- *test_float64_IST*
- *test_float128_IST*
"""
def test_float32_AMP(self, success=True):
magni.cs.reconstruction.amp.config['precision_float'] = np.float32
self.y = np.float32(self.y)
self.A = np.float32(self.A)
self.assertEqual(
magni.cs.reconstruction.amp.config['precision_float'], np.float32)
a_hat = self._amp_run(self.y, self.A, self.alpha, success=success)
self.assertEqual(a_hat.dtype, np.float32)
def test_float32_GAMP(self, success=True):
input_channel_params = {'tau': self.tau, 'theta_bar': 0,
'theta_tilde': 1, 'use_em': False}
output_channel_params = {'sigma_sq': 1,
'noise_level_estimation': 'sample_variance'}
sc = magni.cs.reconstruction.gamp.stop_criterion
magni.cs.reconstruction.gamp.config.update(
{'input_channel_parameters': input_channel_params,
'output_channel_parameters': output_channel_params,
'stop_criterion': sc.Residual,
'precision_float': np.float32})
self.z = np.float32(self.z)
self.F = np.float32(self.F)
self.F_sq = np.float32(self.F_sq)
self.assertEqual(
magni.cs.reconstruction.gamp.config['precision_float'], np.float32)
a_hat = self._gamp_run(
self.z, self.F, self.F_sq, self.a, success=success)
self.assertEqual(a_hat.dtype, np.float32)
def test_float32_GAMP_EM(self, success=True):
input_channel_params = {'tau': self.tau, 'theta_bar': 0,
'theta_tilde': 1, 'use_em': True}
output_channel_params = {'sigma_sq': 1,
'noise_level_estimation': 'em'}
sc = magni.cs.reconstruction.gamp.stop_criterion
magni.cs.reconstruction.gamp.config.update(
{'input_channel_parameters': input_channel_params,
'output_channel_parameters': output_channel_params,
'stop_criterion': sc.Residual,
'precision_float': np.float32})
self.z = np.float32(self.z)
self.F = np.float32(self.F)
self.F_sq = np.float32(self.F_sq)
self.assertEqual(
magni.cs.reconstruction.gamp.config['precision_float'], np.float32)
a_hat = self._gamp_run(
self.z, self.F, self.F_sq, self.a, success=success)
self.assertEqual(a_hat.dtype, np.float32)
def test_float32_GAMP_EM_BL(self, success=True):
input_channel_params = {
'tau': self.tau, 'weights': np.ones_like(self.alpha),
'phi_channel': magni.cs.reconstruction.gamp.input_channel.IIDL,
'phi_channel_parameters': {'mu': 0, 'b': 1, 'use_em': True},
'use_em': True}
output_channel_params = {'sigma_sq': 1,
'noise_level_estimation': 'em'}
sc = magni.cs.reconstruction.gamp.stop_criterion
magni.cs.reconstruction.gamp.config.update(
{'input_channel': magni.cs.reconstruction.gamp.input_channel.GWS,
'input_channel_parameters': input_channel_params,
'output_channel_parameters': output_channel_params,
'stop_criterion': sc.Residual,
'precision_float': np.float32})
self.z = np.float32(self.z)
self.F = np.float32(self.F)
self.F_sq = np.float32(self.F_sq)
self.assertEqual(
magni.cs.reconstruction.gamp.config['precision_float'], np.float32)
a_hat = self._gamp_run(
self.z, self.F, self.F_sq, self.a, success=success)
self.assertEqual(a_hat.dtype, np.float32)
def test_float64_AMP(self, success=True):
magni.cs.reconstruction.amp.config['precision_float'] = np.float64
self.y = np.float64(self.y)
self.A = np.float64(self.A)
self.assertEqual(
magni.cs.reconstruction.amp.config['precision_float'], np.float64)
a_hat = self._amp_run(self.y, self.A, self.alpha, success=success)
self.assertEqual(a_hat.dtype, np.float64)
def test_float64_GAMP(self, success=True):
input_channel_params = {'tau': self.tau, 'theta_bar': 0,
'theta_tilde': 1, 'use_em': False}
output_channel_params = {'sigma_sq': 1,
'noise_level_estimation': 'sample_variance'}
sc = magni.cs.reconstruction.gamp.stop_criterion
magni.cs.reconstruction.gamp.config.update(
{'input_channel_parameters': input_channel_params,
'output_channel_parameters': output_channel_params,
'stop_criterion': sc.Residual,
'precision_float': np.float64})
self.z = np.float64(self.z)
self.F = np.float64(self.F)
self.F_sq = np.float64(self.F_sq)
self.assertEqual(
magni.cs.reconstruction.gamp.config['precision_float'], np.float64)
a_hat = self._gamp_run(
self.z, self.F, self.F_sq, self.a, success=success)
self.assertEqual(a_hat.dtype, np.float64)
def test_float64_GAMP_EM(self, success=True):
input_channel_params = {'tau': self.tau, 'theta_bar': 0,
'theta_tilde': 1, 'use_em': True}
output_channel_params = {'sigma_sq': 1,
'noise_level_estimation': 'em'}
sc = magni.cs.reconstruction.gamp.stop_criterion
magni.cs.reconstruction.gamp.config.update(
{'input_channel_parameters': input_channel_params,
'output_channel_parameters': output_channel_params,
'stop_criterion': sc.Residual,
'precision_float': np.float64})
self.z = np.float64(self.z)
self.F = np.float64(self.F)
self.F_sq = np.float64(self.F_sq)
self.assertEqual(
magni.cs.reconstruction.gamp.config['precision_float'], np.float64)
a_hat = self._gamp_run(
self.z, self.F, self.F_sq, self.a, success=success)
self.assertEqual(a_hat.dtype, np.float64)
def test_float64_GAMP_EM_BL(self, success=True):
input_channel_params = {
'tau': self.tau, 'weights': np.ones_like(self.alpha),
'phi_channel': magni.cs.reconstruction.gamp.input_channel.IIDL,
'phi_channel_parameters': {'mu': 0, 'b': 1, 'use_em': True},
'use_em': True}
output_channel_params = {'sigma_sq': 1,
'noise_level_estimation': 'em'}
sc = magni.cs.reconstruction.gamp.stop_criterion
magni.cs.reconstruction.gamp.config.update(
{'input_channel': magni.cs.reconstruction.gamp.input_channel.GWS,
'input_channel_parameters': input_channel_params,
'output_channel_parameters': output_channel_params,
'stop_criterion': sc.Residual,
'precision_float': np.float64})
self.z = np.float64(self.z)
self.F = np.float64(self.F)
self.F_sq = np.float64(self.F_sq)
self.assertEqual(
magni.cs.reconstruction.gamp.config['precision_float'], np.float64)
a_hat = self._gamp_run(
self.z, self.F, self.F_sq, self.a, success=success)
self.assertEqual(a_hat.dtype, np.float64)
@unittest.skipIf(not hasattr(np, 'float128'), 'precision is not available')
def test_float128_AMP(self, success=True):
magni.cs.reconstruction.amp.config['precision_float'] = np.float128
self.y = np.float128(self.y)
self.A = np.float128(self.A)
self.assertEqual(
magni.cs.reconstruction.amp.config['precision_float'], np.float128)
a_hat = self._amp_run(self.y, self.A, self.alpha, success=success)
self.assertEqual(a_hat.dtype, np.float128)
@unittest.skipIf(not hasattr(np, 'float128'), 'precision is not available')
def test_float128_GAMP(self, success=True):
input_channel_params = {'tau': self.tau, 'theta_bar': 0,
'theta_tilde': 1, 'use_em': False}
output_channel_params = {'sigma_sq': 1,
'noise_level_estimation': 'sample_variance'}
sc = magni.cs.reconstruction.gamp.stop_criterion
magni.cs.reconstruction.gamp.config.update(
{'input_channel_parameters': input_channel_params,
'output_channel_parameters': output_channel_params,
'stop_criterion': sc.Residual,
'precision_float': np.float128})
self.z = np.float128(self.z)
self.F = np.float128(self.F)
self.F_sq = np.float128(self.F_sq)
self.assertEqual(
magni.cs.reconstruction.gamp.config['precision_float'],
np.float128)
a_hat = self._gamp_run(
self.z, self.F, self.F_sq, self.a, success=success)
self.assertEqual(a_hat.dtype, np.float128)
@unittest.skipIf(not hasattr(np, 'float128'), 'precision is not available')
def test_float128_GAMP_EM(self, success=True):
input_channel_params = {'tau': self.tau, 'theta_bar': 0,
'theta_tilde': 1, 'use_em': True}
output_channel_params = {'sigma_sq': 1,
'noise_level_estimation': 'em'}
sc = magni.cs.reconstruction.gamp.stop_criterion
magni.cs.reconstruction.gamp.config.update(
{'input_channel_parameters': input_channel_params,
'output_channel_parameters': output_channel_params,
'stop_criterion': sc.Residual,
'precision_float': np.float128})
self.z = np.float128(self.z)
self.F = np.float128(self.F)
self.F_sq = np.float128(self.F_sq)
self.assertEqual(
magni.cs.reconstruction.gamp.config['precision_float'],
np.float128)
a_hat = self._gamp_run(
self.z, self.F, self.F_sq, self.a, success=success)
self.assertEqual(a_hat.dtype, np.float128)
@unittest.skipIf(not hasattr(np, 'float128'), 'precision is not available')
def test_float128_GAMP_EM_BL(self, success=True):
input_channel_params = {
'tau': self.tau, 'weights': np.ones_like(self.alpha),
'phi_channel': magni.cs.reconstruction.gamp.input_channel.IIDL,
'phi_channel_parameters': {'mu': 0, 'b': 1, 'use_em': True},
'use_em': True}
output_channel_params = {'sigma_sq': 1,
'noise_level_estimation': 'em'}
sc = magni.cs.reconstruction.gamp.stop_criterion
magni.cs.reconstruction.gamp.config.update(
{'input_channel': magni.cs.reconstruction.gamp.input_channel.GWS,
'input_channel_parameters': input_channel_params,
'output_channel_parameters': output_channel_params,
'stop_criterion': sc.Residual,
'precision_float': np.float128})
self.z = np.float128(self.z)
self.F = np.float128(self.F)
self.F_sq = np.float128(self.F_sq)
self.assertEqual(
magni.cs.reconstruction.gamp.config['precision_float'],
np.float128)
a_hat = self._gamp_run(
self.z, self.F, self.F_sq, self.a, success=success)
self.assertEqual(a_hat.dtype, np.float128)
def test_float32_IST(self, success=True):
magni.cs.reconstruction.it.config.update(
{'precision_float': np.float32})
self.A = np.float32(self.A)
self.y = np.float32(self.y)
self.assertEqual(
magni.cs.reconstruction.it.config['precision_float'], np.float32)
alpha_hat = self._ist_run(self.y, self.A, self.alpha, success=success)
self.assertEqual(alpha_hat.dtype, np.float32)
def test_float64_IST(self, success=True):
magni.cs.reconstruction.it.config.update(
{'precision_float': np.float64})
self.A = np.float64(self.A)
self.y = np.float64(self.y)
self.assertEqual(
magni.cs.reconstruction.it.config['precision_float'], np.float64)
alpha_hat = self._ist_run(self.y, self.A, self.alpha, success=success)
self.assertEqual(alpha_hat.dtype, np.float64)
@unittest.skipIf(not hasattr(np, 'float128'), 'precision is not available')
def test_float128_IST(self, success=True):
magni.cs.reconstruction.it.config.update(
{'precision_float': np.float128})
self.A = np.float128(self.A)
self.y = np.float128(self.y)
self.assertEqual(
magni.cs.reconstruction.it.config['precision_float'], np.float128)
alpha_hat = self._ist_run(self.y, self.A, self.alpha, success=success)
self.assertEqual(alpha_hat.dtype, np.float128)
class FeatureReportHistoryTest(FeatureTest, unittest.TestCase):
"""
Test the report history feature in reconstruction algorithms.
The following tests are implemented:
- *test_MSE_CONVERGENCE_AMP* (stop based on MSE)
- *test_MAX_INTERATIONS_AMP* (stop based on max iterations)
- *test_MSE_CONVERGENCE_GAMP* (stop based on MSE)
- *test_MAX_INTERATIONS_GAMP* (stop based on max iterations)
- *test_MSE_CONVERGENCE_IST* (stop based on MSE)
- *test_MAX_INTERATIONS_IST* (stop based on max iterations)
"""
def test_MSE_CONVERGENCE_AMP(self, success=True):
magni.cs.reconstruction.amp.config.update(
{'report_history': True, 'true_solution': self.alpha})
history = self._amp_history_run(self.y, self.A, self.alpha,
success=success)
self.assertEqual(history['stop_criterion'], 'MSECONVERGENCE')
self.assertEqual(history['stop_reason'], 'MSECONVERGENCE')
self.assertEqual(history['stop_iteration'], 31)
self.assertEqual(len(history['MSE']), 33)
self.assertEqual(len(history['threshold_parameters']), 33)
self.assertEqual(len(history['alpha_bar']), 33)
self.assertEqual(len(history['stop_criterion_value']), 33)
def test_MAX_ITERATION_AMP(self, success=False):
magni.cs.reconstruction.amp.config.update(
{'report_history': True, 'iterations': 8})
history = self._amp_history_run(self.y, self.A, self.alpha,
success=success)
self.assertEqual(history['stop_criterion'], 'MSECONVERGENCE')
self.assertEqual(history['stop_reason'], 'MAX_ITERATIONS')
self.assertEqual(history['stop_iteration'], 7)
self.assertEqual(len(history['MSE']), 1)
self.assertEqual(len(history['threshold_parameters']), 9)
self.assertEqual(len(history['alpha_bar']), 9)
self.assertEqual(len(history['stop_criterion_value']), 9)
def test_MSE_CONVERGENCE_GAMP(self, success=True):
input_channel_params = {'tau': self.tau, 'theta_bar': 0,
'theta_tilde': 1, 'use_em': False}
output_channel_params = {'sigma_sq': 1,
'noise_level_estimation': 'sample_variance'}
magni.cs.reconstruction.gamp.config.update(
{'input_channel_parameters': input_channel_params,
'output_channel_parameters': output_channel_params,
'report_history': True,
'true_solution': self.a})
history = self._gamp_history_run(
self.z, self.F, self.F_sq, self.a, success=success)
self.assertEqual(history['stop_criterion'], 'MSECONVERGENCE')
self.assertEqual(history['stop_reason'], 'MSECONVERGENCE')
self.assertEqual(history['stop_iteration'], 10)
self.assertEqual(len(history['MSE']), 12)
self.assertEqual(len(history['input_channel_parameters']), 12)
self.assertEqual(len(history['output_channel_parameters']), 12)
self.assertEqual(len(history['alpha_bar']), 12)
self.assertEqual(len(history['alpha_tilde']), 12)
self.assertEqual(len(history['stop_criterion_value']), 12)
def test_MAX_ITERATIONS_GAMP(self, success=False):
input_channel_params = {'tau': self.tau, 'theta_bar': 0,
'theta_tilde': 1, 'use_em': False}
output_channel_params = {'sigma_sq': 1,
'noise_level_estimation': 'sample_variance'}
magni.cs.reconstruction.gamp.config.update(
{'input_channel_parameters': input_channel_params,
'output_channel_parameters': output_channel_params,
'report_history': True,
'iterations': 8})
history = self._gamp_history_run(
self.z, self.F, self.F_sq, self.a, success=success)
self.assertEqual(history['stop_criterion'], 'MSECONVERGENCE')
self.assertEqual(history['stop_reason'], 'MAX_ITERATIONS')
self.assertEqual(history['stop_iteration'], 7)
self.assertEqual(len(history['MSE']), 1)
self.assertEqual(len(history['output_channel_parameters']), 9)
self.assertEqual(len(history['output_channel_parameters']), 9)
self.assertEqual(len(history['alpha_bar']), 9)
self.assertEqual(len(history['alpha_tilde']), 9)
self.assertEqual(len(history['stop_criterion_value']), 9)
def test_MSE_CONVERGENCE_IST(self, success=True):
magni.cs.reconstruction.it.config.update(
{'report_history': True,
'stop_criterion': 'mse_convergence',
'true_solution': self.alpha})
history = self._ist_history_run(
self.y, self.A, self.alpha, success=False)
self.assertEqual(history['stop_criterion'], 'MSE_CONVERGENCE')
self.assertEqual(history['stop_reason'], 'MSE_CONVERGENCE')
self.assertEqual(history['stop_iteration'], 5)
self.assertEqual(len(history['MSE']), 7)
self.assertEqual(len(history['alpha']), 7)
self.assertEqual(len(history['stop_criterion_value']), 7)
def test_MAX_ITERATIONS_IST(self, success=False):
magni.cs.reconstruction.it.config.update(
{'report_history': True,
'stop_criterion': 'mse_convergence',
'iterations': 2})
history = self._ist_history_run(
self.y, self.A, self.alpha, success=False)
self.assertEqual(history['stop_criterion'], 'MSE_CONVERGENCE')
self.assertEqual(history['stop_reason'], 'MAX_ITERATIONS')
self.assertEqual(history['stop_iteration'], 1)
self.assertEqual(len(history['MSE']), 1)
self.assertEqual(len(history['alpha']), 3)
self.assertEqual(len(history['stop_criterion_value']), 3)
class FeatureStopCriterionTest(FeatureTest, unittest.TestCase):
"""
Test of the stop criterion feature in reconstruction algorithms.
The following tests are implemented:
- *test_AMP_stop_criterion_error_handling
- *test_residual_AMP* (stop based on residual)
- *test_residual_measurements_ratio_AMP* (stop based on ratio of
measurements to residual)
- *test_normalised_MSE_convergence_AMP* (stop based on NMSE)
- *test_GAMP_stop_criterion_error_handling
- *test_residual_GAMP* (stop based on residual)
- *test_residual_measurements_ratio_GAMP* (stop based on ratio of
measurements to residual)
- *test_normalised_MSE_convergence_GAMP* (stop based on NMSE)
- *test_residual_IST* (stop based on residual)
- *test_residual_measurements_ratio_IST* (stop based on ratio of
measurements to residual)
- *test_normalised_mse_IST* stop based on NMSE)
"""
def test_AMP_stop_criterion_error_handling(self):
sc = magni.cs.reconstruction.amp.stop_criterion
with self.assertRaises(TypeError):
sc.ValidatedStopCriterion('fail')
with self.assertRaises(TypeError):
sc.ValidatedStopCriterion({})('fail')
with self.assertRaises(TypeError):
sc.NormalisedMSEConvergence('fail')
with self.assertRaises(TypeError):
sc.NormalisedMSEConvergence({'tolerance': 1e-3})('fail')
def test_residual_AMP(self, success=True):
sc = magni.cs.reconstruction.amp.stop_criterion
magni.cs.reconstruction.amp.config.update(
{'stop_criterion': sc.Residual})
self._amp_run(self.y, self.A, self.alpha, success=success)
def test_residual_measurements_ratio_AMP(self, success=True):
sc = magni.cs.reconstruction.amp.stop_criterion
magni.cs.reconstruction.amp.config.update(
{'stop_criterion': sc.ResidualMeasurementsRatio})
self._amp_run(self.y, self.A, self.alpha, success=success)
def test_normalised_MSE_convergence_AMP(self, success=True):
sc = magni.cs.reconstruction.amp.stop_criterion
magni.cs.reconstruction.amp.config.update(
{'stop_criterion': sc.NormalisedMSEConvergence})
self._amp_run(self.y, self.A, self.alpha, success=success)
def test_GAMP_stop_criterion_error_handling(self):
sc = magni.cs.reconstruction.gamp.stop_criterion
with self.assertRaises(TypeError):
sc.ValidatedStopCriterion('fail')
with self.assertRaises(TypeError):
sc.ValidatedStopCriterion({})('fail')
with self.assertRaises(TypeError):
sc.NormalisedMSEConvergence('fail')
with self.assertRaises(TypeError):
sc.NormalisedMSEConvergence({'tolerance': 1e-3})('fail')
def test_residual_GAMP(self, success=True):
input_channel_params = {'tau': self.tau, 'theta_bar': 0,
'theta_tilde': 1, 'use_em': False}
output_channel_params = {'sigma_sq': 1,
'noise_level_estimation': 'sample_variance'}
sc = magni.cs.reconstruction.gamp.stop_criterion
magni.cs.reconstruction.gamp.config.update(
{'input_channel_parameters': input_channel_params,
'output_channel_parameters': output_channel_params,
'stop_criterion': sc.Residual})
self._gamp_run(self.z, self.F, self.F_sq, self.a, success=success)
def test_residual_measurements_ratio_GAMP(self, success=True):
input_channel_params = {'tau': self.tau, 'theta_bar': 0,
'theta_tilde': 1, 'use_em': False}
output_channel_params = {'sigma_sq': 1,
'noise_level_estimation': 'sample_variance'}
sc = magni.cs.reconstruction.gamp.stop_criterion
magni.cs.reconstruction.gamp.config.update(
{'input_channel_parameters': input_channel_params,
'output_channel_parameters': output_channel_params,
'stop_criterion': sc.ResidualMeasurementsRatio})
self._gamp_run(self.z, self.F, self.F_sq, self.a, success=success)
def test_normalised_MSE_convergence_GAMP(self, success=True):
input_channel_params = {'tau': self.tau, 'theta_bar': 0,
'theta_tilde': 1, 'use_em': False}
output_channel_params = {'sigma_sq': 1,
'noise_level_estimation': 'sample_variance'}
sc = magni.cs.reconstruction.gamp.stop_criterion
magni.cs.reconstruction.gamp.config.update(
{'input_channel_parameters': input_channel_params,
'output_channel_parameters': output_channel_params,
'stop_criterion': sc.NormalisedMSEConvergence})
self._gamp_run(self.z, self.F, self.F_sq, self.a, success=success)
def test_residual_IST(self, success=True):
magni.cs.reconstruction.it.config.update(
{'stop_criterion': 'residual',
'tolerance': 1e-6})
self._ist_run(self.y, self.A, self.alpha, success=success)
def test_residual_measurements_ratio_IST(self, success=True):
magni.cs.reconstruction.it.config.update(
{'stop_criterion': 'residual_measurements_ratio',
'tolerance': 1e-6})
self._ist_run(self.y, self.A, self.alpha, success=success)
def test_normalised_mse_IST(self, success=True):
magni.cs.reconstruction.it.config.update(
{'stop_criterion': 'normalised_mse_convergence',
'tolerance': 1e-6,
'iterations': 500})
self._ist_run(self.y, self.A, self.alpha, success=success)
class FeatureWarmStartTest(FeatureTest, unittest.TestCase):
"""
Test of the warm_start feature in reconstruction algorithms.
The following tests are implemented:
- *test_warm_start_IT* (Iterative thresholding)
- *test_warm_start_AMP* (Approximate Message Passing)
- *test_warm_start_GAMP* (Generalised Approximate Message Passing)
"""
def test_warm_start_IT(self, success_iht=True, success_ist=True):
it_config = {'warm_start': 0.1 * np.ones(self.alpha.shape)}
magni.cs.reconstruction.it.config.update(it_config)
self._iht_run(self.y, self.A, self.alpha, success=success_iht)
self._ist_run(self.y, self.A, self.alpha, success=success_ist)
self.assertIsNotNone(
magni.cs.reconstruction.it.config['warm_start'])
def test_warm_start_AMP(self, success=True):
magni.cs.reconstruction.amp.config.update(
{'warm_start': 0.1 * np.ones(self.a.shape)})
self._amp_run(self.y, self.A, self.alpha, success=success)
def test_warm_start_GAMP(self, success=True):
input_channel_params = {'tau': self.tau, 'theta_bar': 0,
'theta_tilde': 1, 'use_em': False}
output_channel_params = {'sigma_sq': 1,
'noise_level_estimation': 'sample_variance'}
magni.cs.reconstruction.gamp.config.update(
{'input_channel_parameters': input_channel_params,
'output_channel_parameters': output_channel_params,
'warm_start': (0.1 * np.ones(self.a.shape),
2 * np.ones(self.a.shape))})
self._gamp_run(self.z, self.F, self.F_sq, self.a, success=success)
class FeatureGAMPChannelEMTest(FeatureTest, unittest.TestCase):
"""
Test of GAMP Channel EM updates
The following tests are implemented:
- *test_IIDG_channel_EM* (EM update of pure Gauss channel)
- *test_IIDL_channel_EM* (EM update of pure Laplace channel)
"""
def test_IIDG_channel_EM(self, success=False):
IIDG = magni.cs.reconstruction.gamp.input_channel.IIDG
input_channel_params = {
'theta_bar': 0, 'theta_tilde': 1, 'use_em': True}
output_channel_params = {
'sigma_sq': 1, 'noise_level_estimation': 'sample_variance'}
magni.cs.reconstruction.gamp.config.update(
{'input_channel_parameters': input_channel_params,
'output_channel_parameters': output_channel_params,
'input_channel': IIDG})
alpha_hat_pure_G = self._gamp_run(
self.z, self.F, self.F_sq, self.a, success=success)
GWS = magni.cs.reconstruction.gamp.input_channel.GWS
input_channel_params = {
'tau': 1, 'weights': None, 'phi_channel': IIDG,
'phi_channel_parameters': {
'theta_bar': 0, 'theta_tilde': 1, 'use_em': True},
'use_em': True}
magni.cs.reconstruction.gamp.config.update(
{'input_channel_parameters': input_channel_params,
'output_channel_parameters': output_channel_params,
'input_channel': GWS})
alpha_hat_GWS_G = self._gamp_run(
self.z, self.F, self.F_sq, self.a, success=success)
self.assertTrue(np.allclose(alpha_hat_pure_G, alpha_hat_GWS_G))
def test_IIDL_channel_EM(self, success=False):
IIDL = magni.cs.reconstruction.gamp.input_channel.IIDL
input_channel_params = {
'mu': 0, 'b': 1 / np.sqrt(2), 'use_em': True}
output_channel_params = {
'sigma_sq': 1, 'noise_level_estimation': 'sample_variance'}
magni.cs.reconstruction.gamp.config.update(
{'input_channel_parameters': input_channel_params,
'output_channel_parameters': output_channel_params,
'input_channel': IIDL})
with warnings.catch_warnings():
warnings.simplefilter('ignore')
alpha_hat_pure_L = self._gamp_run(
self.z, self.F, self.F_sq, self.a, success=success)
GWS = magni.cs.reconstruction.gamp.input_channel.GWS
input_channel_params = {
'tau': 1, 'weights': None, 'phi_channel': IIDL,
'phi_channel_parameters': {
'mu': 0, 'b': 1 / np.sqrt(2), 'use_em': True},
'use_em': True}
magni.cs.reconstruction.gamp.config.update(
{'input_channel_parameters': input_channel_params,
'output_channel_parameters': output_channel_params,
'input_channel': GWS})
with warnings.catch_warnings():
warnings.simplefilter('ignore')
alpha_hat_GWS_L = self._gamp_run(
self.z, self.F, self.F_sq, self.a, success=success)
self.assertTrue(
np.allclose(alpha_hat_pure_L, alpha_hat_GWS_L, atol=1e-4))
class PhaseSpaceExtremesTest(unittest.TestCase):
"""
Tests of border case (extreme) phase space values.
The following tests are implemented:
- *test_basic_setup* (not extremum)
- *test_invalid_A_and_y* (empty A and y)
- *test_k_equals_zero*
- *test_k_equals_m*
- *test_m_equals_one*
- *test_m_equals_n*
- *test_m_and_n_equals_one*
- *test_n_equals_one*
"""
def setUp(self):
self.n = 500
self.m = 200
self.k = 10
self.seed = 6021
magni.cs.reconstruction.it.config.update(iterations=200)
magni.cs.reconstruction.gamp.config.update(iterations=200)
# GAMP setup
input_channel_params = {'tau': self.k/self.n, 'theta_bar': 0,
'theta_tilde': 1, 'use_em': False}
output_channel_params = {'sigma_sq': 0.5,
'noise_level_estimation': 'sample_variance'}
magni.cs.reconstruction.gamp.config.update(
{'input_channel_parameters': input_channel_params,
'output_channel_parameters': output_channel_params})
def tearDown(self):
magni.cs.reconstruction.it.config.reset()
magni.cs.reconstruction.amp.config.reset()
magni.cs.reconstruction.gamp.config.reset()
def test_basic_setup(self):
y, A, alpha = use_rademacher(self.n, self.m, self.k, seed=self.seed)
A_asq = A**2
self._iht_run(y, A, alpha)
self._ist_run(y, A, alpha)
self._gamp_run(y, A, A_asq, alpha)
def test_invalid_A_and_y(self):
A = np.array([])
A_asq = np.array([])
y = np.array([])
alpha = np.array([])
with self.assertRaises(ValueError):
self._iht_run(y, A, alpha)
with self.assertRaises(ValueError):
self._ist_run(y, A, alpha)
with self.assertRaises(ValueError):
self._gamp_run(y, A, A_asq, alpha)
def test_k_equals_zero(self):
k = 0
y, A, alpha = use_rademacher(self.n, self.m, k, seed=self.seed)
A_asq = A**2
self._iht_run(y, A, alpha)
self._ist_run(y, A, alpha)
self._gamp_run(y, A, A_asq, alpha)
def test_k_equals_m(self):
k = self.m
y, A, alpha = use_rademacher(self.n, self.m, k, seed=self.seed)
A_asq = A**2
self._iht_run(y, A, alpha, success=False)
self._ist_run(y, A, alpha, success=False)
self._gamp_run(y, A, A_asq, alpha, success=False)
def test_m_equals_one(self):
with warnings.catch_warnings():
warnings.simplefilter('ignore')
m = 1
y, A, alpha = use_rademacher(self.n, m, self.k, seed=self.seed)
A_asq = A**2
self._iht_run(y, A, alpha, success=False)
self._ist_run(y, A, alpha, success=False)
self._gamp_run(y, A, A_asq, alpha, success=False)
def test_m_equals_n(self):
m = self.n
y, A, alpha = use_rademacher(self.n, m, self.k, seed=self.seed)
A_asq = A**2
self._iht_run(y, A, alpha)
self._ist_run(y, A, alpha)
self._gamp_run(y, A, A_asq, alpha)
def test_m_and_n_equals_one(self):
n = 1
m = 1
k = 1
y, A, alpha = use_rademacher(n, m, k, seed=self.seed)
A_asq = A**2
self._iht_run(y, A, alpha, success=False)
self._ist_run(y, A, alpha, success=False)
self._gamp_run(y, A, A_asq, alpha, success=False)
def test_n_equals_one(self):
n = 1
k = 1
y, A, alpha = use_rademacher(n, self.m, k, seed=self.seed)
A_asq = A**2
self._iht_run(y, A, alpha)
with warnings.catch_warnings():
warnings.simplefilter('ignore')
self._ist_run(y, A, alpha, success=False)
self._gamp_run(y, A, A_asq, alpha)
def _gamp_run(self, z, F, F_sq, a, success=True):
a_hat = magni.cs.reconstruction.gamp.run(z, F, F_sq)
if success:
self.assertTrue(np.allclose(a_hat, a, atol=1e-2))
else:
self.assertFalse(np.allclose(a_hat, a, atol=1e-2))
def _iht_run(self, y, A, alpha, success=True):
self.assertEqual(
magni.cs.reconstruction.it.config['threshold_operator'], 'hard')
alpha_hat = magni.cs.reconstruction.it.run(y, A)
if success:
self.assertTrue(np.allclose(alpha_hat, alpha, atol=1e-2))
else:
self.assertFalse(np.allclose(alpha_hat, alpha, atol=1e-2))
def _ist_run(self, y, A, alpha, success=True):
ist_config = {'threshold_operator': 'soft'}
magni.cs.reconstruction.it.config.update(ist_config)
self.assertEqual(
magni.cs.reconstruction.it.config['threshold_operator'], 'soft')
alpha_hat = magni.cs.reconstruction.it.run(y, A)
if success:
self.assertTrue(np.allclose(alpha_hat, alpha, atol=1e-2))
else:
self.assertFalse(np.allclose(alpha_hat, alpha, atol=1e-2))
class PhaseSpaceTest(object):
"""
Phase space test base class.
The following tests are implemented:
- *test_default_IT* (default configuration)
- *test_fixed_IT* (fixed threshold)
- *test_adaptive_fixed_IT* (adaptive step-size, fixed threshold)
- *test_weighted_fixed_IT* (weighted, fixed threshold)
- *test_residual_soft_threshold_AMP* (soft threshold, residual level)
- *test_median_soft_threshold_AMP* (soft threshold, median level)
- *test_iidsGB_AWGN_GAMP* (s-GB)
- *test_iidsGB_AWGN_EM_GAMP* (s-GB with EM learning)
- *test_iidBL_AWGN_GAMP* (BL)
- *test_iidBL_AWGN_EM_GAMP* (BL with EM learning)
- *test_iidBG_AWGN_GAMP* (MMSE GAMP)
- *test_iidBG_AWGN_EM_GAMP* (MMSE GAMP with EM learning)
- *test_iidBG_AWGN_GAMP_rangan_sum_approx* (rangan sum approx GAMP)
- *test_iidBG_AWGN_GAMP_krzakala_sum_approx* (krzakala sum approx GAMP)
"""
def setUp(self, n=None, delta=None, rho=None, seed=6021):
m = int(delta * n)
self.k = int(rho * m)
self.tau = delta * rho
self.y, self.A, self.alpha = use_rademacher(n, m, self.k, seed=seed)
self.oracle_support = self.alpha != 0
self.z, self.F, self.a = use_gaussian(n, m, self.k, seed=seed)
self.F_sq = self.F**2
magni.cs.reconstruction.it.config.update(iterations=200)
magni.cs.reconstruction.gamp.config.update(iterations=200)
def tearDown(self):
magni.cs.reconstruction.it.config.reset()
magni.cs.reconstruction.amp.config.reset()
magni.cs.reconstruction.gamp.config.reset()
def test_residual_soft_threshold_AMP(self, success=True):
magni.cs.reconstruction.amp.config['threshold_parameters'] = {
'threshold_level_update_method': 'residual'}
self._amp_run(self.y, self.A, self.alpha, success=success)
def test_median_soft_threshold_AMP(self, success=True):
magni.cs.reconstruction.amp.config['threshold_parameters'] = {
'threshold_level_update_method': 'median'}
self._amp_run(self.y, self.A, self.alpha, success=success)
def test_iidsGB_AWGN_GAMP(self, success=True):
input_channel_params = {'tau': self.tau, 'theta_bar': 0,
'theta_tilde': 1, 'use_em': False}
output_channel_params = {'sigma_sq': 1,
'noise_level_estimation': 'sample_variance'}
IIDsGB = magni.cs.reconstruction.gamp.input_channel.IIDsGB
magni.cs.reconstruction.gamp.config.update(
{'input_channel_parameters': input_channel_params,
'output_channel_parameters': output_channel_params,
'input_channel': IIDsGB})
self._gamp_run(self.z, self.F, self.F_sq, self.a, success=success)
def test_iidsGB_AWGN_EM_GAMP(self, success=True):
input_channel_params = {'tau': self.tau, 'theta_bar': 0,
'theta_tilde': 1, 'use_em': True,
'em_damping': 0.5}
output_channel_params = {'sigma_sq': 1,
'noise_level_estimation': 'em'}
IIDsGB = magni.cs.reconstruction.gamp.input_channel.IIDsGB
magni.cs.reconstruction.gamp.config.update(
{'input_channel_parameters': input_channel_params,
'output_channel_parameters': output_channel_params,
'input_channel': IIDsGB})
with warnings.catch_warnings():
warnings.simplefilter('ignore')
self._gamp_run(self.z, self.F, self.F_sq, self.a, success=success)
def test_iidBL_AWGN_GAMP(self, success=True):
GWS = magni.cs.reconstruction.gamp.input_channel.GWS
IIDL = magni.cs.reconstruction.gamp.input_channel.IIDL
input_channel_params = {
'tau': self.tau, 'weights': None, 'phi_channel': IIDL,
'phi_channel_parameters': {
'mu': 0, 'b': 1 / np.sqrt(2), 'use_em': False},
'use_em': False}
output_channel_params = {'sigma_sq': 1,
'noise_level_estimation': 'sample_variance'}
magni.cs.reconstruction.gamp.config.update(
{'input_channel_parameters': input_channel_params,
'output_channel_parameters': output_channel_params,
'input_channel': GWS})
with warnings.catch_warnings():
warnings.simplefilter('ignore')
self._gamp_run(self.z, self.F, self.F_sq, self.a, success=success)
def test_iidBL_AWGN_EM_GAMP(self, success=True):
GWS = magni.cs.reconstruction.gamp.input_channel.GWS
IIDL = magni.cs.reconstruction.gamp.input_channel.IIDL
input_channel_params = {
'tau': self.tau, 'weights': None, 'phi_channel': IIDL,
'phi_channel_parameters': {
'mu': 0, 'b': 1 / np.sqrt(2), 'use_em': True},
'use_em': True}
output_channel_params = {'sigma_sq': 1,
'noise_level_estimation': 'em'}
magni.cs.reconstruction.gamp.config.update(
{'input_channel_parameters': input_channel_params,
'output_channel_parameters': output_channel_params,
'input_channel': GWS})
with warnings.catch_warnings():
warnings.simplefilter('ignore')
self._gamp_run(self.z, self.F, self.F_sq, self.a, success=success)
def test_iidBG_AWGN_GAMP(self, success=True):
input_channel_params = {'tau': self.tau, 'theta_bar': 0,
'theta_tilde': 1, 'use_em': False}
output_channel_params = {'sigma_sq': 1,
'noise_level_estimation': 'sample_variance'}
magni.cs.reconstruction.gamp.config.update(
{'input_channel_parameters': input_channel_params,
'output_channel_parameters': output_channel_params})
self._gamp_run(self.z, self.F, self.F_sq, self.a, success=success)
def test_iidBG_AWGN_EM_GAMP(self, success=True):
input_channel_params = {'tau': self.tau, 'theta_bar': 0,
'theta_tilde': 1, 'use_em': True}
output_channel_params = {'sigma_sq': 1,
'noise_level_estimation': 'em'}
magni.cs.reconstruction.gamp.config.update(
{'input_channel_parameters': input_channel_params,
'output_channel_parameters': output_channel_params})
self._gamp_run(self.z, self.F, self.F_sq, self.a, success=success)
def test_iidBG_AWGN_GAMP_rangan_sum_approx(self, success=True):
input_channel_params = {'tau': self.tau, 'theta_bar': 0,
'theta_tilde': 1, 'use_em': False}
output_channel_params = {'sigma_sq': 1,
'noise_level_estimation': 'sample_variance'}
magni.cs.reconstruction.gamp.config.update(
{'input_channel_parameters': input_channel_params,
'output_channel_parameters': output_channel_params})
self.assertEqual(
magni.cs.reconstruction.gamp.config['sum_approximation_constant'],
{'rangan': 1.0})
self._gamp_run(self.z, self.F, None, self.a, success=success)
def test_iidBG_AWGN_GAMP_krzakala_sum_approx(self, success=True):
input_channel_params = {'tau': self.tau, 'theta_bar': 0,
'theta_tilde': 1, 'use_em': False}
output_channel_params = {'sigma_sq': 1,
'noise_level_estimation': 'sample_variance'}
magni.cs.reconstruction.gamp.config.update(
{'input_channel_parameters': input_channel_params,
'output_channel_parameters': output_channel_params,
'sum_approximation_constant': {'krzakala': 1.0 / self.F.shape[0]}}
)
self.assertEqual(
magni.cs.reconstruction.gamp.config['sum_approximation_constant'],
{'krzakala': 1.0 / self.F.shape[0]})
self._gamp_run(self.z, self.F, None, self.a, success=success)
def test_iidwBG_ones_AWGN_GAMP(self, success=True):
GWS = magni.cs.reconstruction.gamp.input_channel.GWS
IIDG = magni.cs.reconstruction.gamp.input_channel.IIDG
input_channel_params = {
'tau': self.tau, 'weights': np.ones_like(self.a),
'phi_channel': IIDG,
'phi_channel_parameters': {
'theta_bar': 0, 'theta_tilde': 1, 'use_em': False},
'use_em': False}
output_channel_params = {'sigma_sq': 1,
'noise_level_estimation': 'em'}
magni.cs.reconstruction.gamp.config.update(
{'input_channel_parameters': input_channel_params,
'output_channel_parameters': output_channel_params,
'input_channel': GWS})
self._gamp_run(self.z, self.F, self.F_sq, self.a, success=success)
def test_iidwBG_ones_AWGN_EM_GAMP(self, success=True):
GWS = magni.cs.reconstruction.gamp.input_channel.GWS
IIDG = magni.cs.reconstruction.gamp.input_channel.IIDG
input_channel_params = {
'tau': self.tau, 'weights': np.ones_like(self.a),
'phi_channel': IIDG,
'phi_channel_parameters': {
'theta_bar': 0, 'theta_tilde': 1, 'use_em': False},
'use_em': True}
output_channel_params = {'sigma_sq': 1,
'noise_level_estimation': 'em'}
magni.cs.reconstruction.gamp.config.update(
{'input_channel_parameters': input_channel_params,
'output_channel_parameters': output_channel_params,
'input_channel': GWS})
self._gamp_run(self.z, self.F, self.F_sq, self.a, success=success)
def test_iidwBG_linspace_AWGN_GAMP(self, success=True):
GWS = magni.cs.reconstruction.gamp.input_channel.GWS
IIDG = magni.cs.reconstruction.gamp.input_channel.IIDG
input_channel_params = {
'tau': self.tau,
'weights': np.linspace(0.1, 0.9, len(self.a)).reshape(-1, 1),
'phi_channel': IIDG,
'phi_channel_parameters': {
'theta_bar': 0, 'theta_tilde': 1, 'use_em': False},
'use_em': False}
output_channel_params = {'sigma_sq': 1,
'noise_level_estimation': 'em'}
magni.cs.reconstruction.gamp.config.update(
{'input_channel_parameters': input_channel_params,
'output_channel_parameters': output_channel_params,
'input_channel': GWS})
self._gamp_run(self.z, self.F, self.F_sq, self.a, success=success)
def test_iidwBG_linspace_AWGN_EM_truncate_GAMP(self, success=True):
GWS = magni.cs.reconstruction.gamp.input_channel.GWS
IIDG = magni.cs.reconstruction.gamp.input_channel.IIDG
input_channel_params = {
'tau': self.tau,
'weights': np.linspace(0.1, 0.9, len(self.a)).reshape(-1, 1),
'phi_channel': IIDG,
'phi_channel_parameters': {
'theta_bar': 0, 'theta_tilde': 1, 'use_em': False},
'use_em': True,
'adjust_tau_method': 'truncate'}
output_channel_params = {'sigma_sq': 1,
'noise_level_estimation': 'em'}
magni.cs.reconstruction.gamp.config.update(
{'input_channel_parameters': input_channel_params,
'output_channel_parameters': output_channel_params,
'input_channel': GWS})
self._gamp_run(self.z, self.F, self.F_sq, self.a, success=success)
def test_iidwBG_linspace_AWGN_EM_reweight_GAMP(self, success=True):
GWS = magni.cs.reconstruction.gamp.input_channel.GWS
IIDG = magni.cs.reconstruction.gamp.input_channel.IIDG
input_channel_params = {
'tau': self.tau,
'weights': np.linspace(0.1, 0.9, len(self.a)).reshape(-1, 1),
'phi_channel': IIDG,
'phi_channel_parameters': {
'theta_bar': 0, 'theta_tilde': 1, 'use_em': False},
'use_em': True,
'adjust_tau_method': 'reweight'}
output_channel_params = {'sigma_sq': 1,
'noise_level_estimation': 'em'}
magni.cs.reconstruction.gamp.config.update(
{'input_channel_parameters': input_channel_params,
'output_channel_parameters': output_channel_params,
'input_channel': GWS})
self._gamp_run(self.z, self.F, self.F_sq, self.a, success=success)
def test_default_IT(self, success_iht=True, success_ist=True):
self._iht_run(self.y, self.A, self.alpha, success=success_iht)
self._ist_run(self.y, self.A, self.alpha, success=success_ist)
def test_fixed_IT(self, success_iht=True, success_ist=True):
it_config = {'threshold': 'fixed',
'threshold_fixed': self.k}
magni.cs.reconstruction.it.config.update(it_config)
self._iht_run(self.y, self.A, self.alpha, success=success_iht)
self._ist_run(self.y, self.A, self.alpha, success=success_ist)
self.assertEqual(magni.cs.reconstruction.it.config['threshold'],
'fixed')
self.assertEqual(magni.cs.reconstruction.it.config['threshold_fixed'],
self.k)
def test_adaptive_fixed_IT(self, success_iht=True, success_ist=True):
it_config = {'threshold': 'fixed',
'threshold_fixed': self.k,
'kappa': 'adaptive'}
magni.cs.reconstruction.it.config.update(it_config)
self._iht_run(self.y, self.A, self.alpha, success=success_iht)
self._ist_run(self.y, self.A, self.alpha, success=success_ist)
self.assertEqual(magni.cs.reconstruction.it.config['threshold'],
'fixed')
self.assertEqual(magni.cs.reconstruction.it.config['threshold_fixed'],
self.k)
self.assertEqual(magni.cs.reconstruction.it.config['kappa'],
'adaptive')
def test_weighted_fixed_IT(self, success_iht=True, success_ist=True):
threshold_weights = np.linspace(
1, 0.5, self.alpha.shape[0]).reshape(-1, 1)
it_config = {'threshold': 'fixed',
'threshold_fixed': self.k,
'threshold_weights': threshold_weights}
magni.cs.reconstruction.it.config.update(it_config)
self._wiht_run(self.y, self.A, self.alpha, success=success_iht)
self._wist_run(self.y, self.A, self.alpha, success=success_ist)
self.assertEqual(magni.cs.reconstruction.it.config['threshold'],
'fixed')
self.assertEqual(magni.cs.reconstruction.it.config['threshold_fixed'],
self.k)
self.assertTrue(np.allclose(
magni.cs.reconstruction.it.config['threshold_weights'],
threshold_weights))
def _amp_run(self, y, A, a, success=True):
threshold_params = {
'theta': magni.cs.reconstruction.amp.util.theta_mm(
float(A.shape[0]) / A.shape[1]), 'tau_hat_sq': 1.0}
magni.cs.reconstruction.amp.config['threshold_parameters'].update(
threshold_params)
a_hat = magni.cs.reconstruction.amp.run(y, A)
if success:
self.assertTrue(np.allclose(a_hat, a, atol=1e-2))
else:
self.assertFalse(np.allclose(a_hat, a, atol=1e-2))
def _gamp_run(self, z, F, F_sq, a, success=True):
a_hat = magni.cs.reconstruction.gamp.run(z, F, F_sq)
if success:
self.assertTrue(np.allclose(a_hat, a, atol=1e-2))
else:
self.assertFalse(np.allclose(a_hat, a, atol=1e-2))
def _iht_run(self, y, A, alpha, success=True):
iht_config = {'threshold_operator': 'hard'}
magni.cs.reconstruction.it.config.update(iht_config)
self.assertEqual(
magni.cs.reconstruction.it.config['threshold_operator'], 'hard')
alpha_hat = magni.cs.reconstruction.it.run(y, A)
if success:
self.assertTrue(np.allclose(alpha_hat, alpha, atol=1e-2))
else:
self.assertFalse(np.allclose(alpha_hat, alpha, atol=1e-2))
def _ist_run(self, y, A, alpha, success=True):
ist_config = {'threshold_operator': 'soft'}
magni.cs.reconstruction.it.config.update(ist_config)
self.assertEqual(
magni.cs.reconstruction.it.config['threshold_operator'], 'soft')
alpha_hat = magni.cs.reconstruction.it.run(y, A)
if success:
self.assertTrue(np.allclose(alpha_hat, alpha, atol=1e-2))
else:
self.assertFalse(np.allclose(alpha_hat, alpha, atol=1e-2))
def _wiht_run(self, y, A, alpha, success=True):
iht_config = {'threshold_operator': 'weighted_hard'}
magni.cs.reconstruction.it.config.update(iht_config)
self.assertEqual(
magni.cs.reconstruction.it.config['threshold_operator'],
'weighted_hard')
alpha_hat = magni.cs.reconstruction.it.run(y, A)
if success:
self.assertTrue(np.allclose(alpha_hat, alpha, atol=1e-2))
else:
self.assertFalse(np.allclose(alpha_hat, alpha, atol=1e-2))
def _wist_run(self, y, A, alpha, success=True):
ist_config = {'threshold_operator': 'weighted_soft'}
magni.cs.reconstruction.it.config.update(ist_config)
self.assertEqual(
magni.cs.reconstruction.it.config['threshold_operator'],
'weighted_soft')
alpha_hat = magni.cs.reconstruction.it.run(y, A)
if success:
self.assertTrue(np.allclose(alpha_hat, alpha, atol=1e-2))
else:
self.assertFalse(np.allclose(alpha_hat, alpha, atol=1e-2))
class PhaseSpaceTest1(PhaseSpaceTest, unittest.TestCase):
"""
Test of reconstruction capabilities at Phase Space point:
(delta, rho) = (0.08, 0.05)
"""
def setUp(self):
n = 500
delta = 0.08
rho = 0.05
PhaseSpaceTest.setUp(self, n=n, delta=delta, rho=rho)
def test_residual_soft_threshold_AMP(self):
PhaseSpaceTest.test_residual_soft_threshold_AMP(self, success=False)
def test_median_soft_threshold_AMP(self, success=True):
PhaseSpaceTest.test_median_soft_threshold_AMP(self, success=False)
def test_iidsGB_AWGN_GAMP(self):
PhaseSpaceTest.test_iidsGB_AWGN_GAMP(self, success=False)
def test_iidsGB_AWGN_EM_GAMP(self):
PhaseSpaceTest.test_iidsGB_AWGN_EM_GAMP(self, success=False)
def test_iidBL_AWGN_GAMP(self):
PhaseSpaceTest.test_iidBL_AWGN_GAMP(self, success=False)
def test_iidBL_AWGN_EM_GAMP(self):
PhaseSpaceTest.test_iidBL_AWGN_EM_GAMP(self, success=False)
def test_iidBG_AWGN_GAMP(self):
PhaseSpaceTest.test_iidBG_AWGN_GAMP(self, success=False)
def test_iidBG_AWGN_EM_GAMP(self):
PhaseSpaceTest.test_iidBG_AWGN_EM_GAMP(self, success=False)
def test_iidBG_AWGN_GAMP_rangan_sum_approx(self):
PhaseSpaceTest.test_iidBG_AWGN_GAMP_rangan_sum_approx(
self, success=False)
def test_iidBG_AWGN_GAMP_krzakala_sum_approx(self):
PhaseSpaceTest.test_iidBG_AWGN_GAMP_krzakala_sum_approx(
self, success=False)
def test_iidwBG_ones_AWGN_GAMP(self):
PhaseSpaceTest.test_iidwBG_ones_AWGN_GAMP(self, success=False)
def test_iidwBG_ones_AWGN_EM_GAMP(self):
PhaseSpaceTest.test_iidwBG_ones_AWGN_EM_GAMP(self, success=False)
def test_iidwBG_linspace_AWGN_GAMP(self):
PhaseSpaceTest.test_iidwBG_linspace_AWGN_GAMP(self, success=False)
def test_iidwBG_linspace_AWGN_EM_truncate_GAMP(self):
PhaseSpaceTest.test_iidwBG_linspace_AWGN_EM_truncate_GAMP(
self, success=False)
def test_iidwBG_linspace_AWGN_EM_reweight_GAMP(self):
PhaseSpaceTest.test_iidwBG_linspace_AWGN_EM_reweight_GAMP(
self, success=False)
class PhaseSpaceTest2(PhaseSpaceTest, unittest.TestCase):
"""
Test of reconstruction capabilities at Phase Space point:
(delta, rho) = (0.24, 0.01)
"""
def setUp(self):
n = 500
delta = 0.24
rho = 0.01
PhaseSpaceTest.setUp(self, n=n, delta=delta, rho=rho)
def test_residual_soft_threshold_AMP(self):
PhaseSpaceTest.test_residual_soft_threshold_AMP(self, success=False)
def test_median_soft_threshold_AMP(self, success=True):
PhaseSpaceTest.test_median_soft_threshold_AMP(self, success=False)
def test_iidsGB_AWGN_GAMP(self):
PhaseSpaceTest.test_iidsGB_AWGN_GAMP(self, success=False)
def test_iidsGB_AWGN_EM_GAMP(self):
PhaseSpaceTest.test_iidsGB_AWGN_EM_GAMP(self, success=False)
def test_iidBL_AWGN_GAMP(self):
PhaseSpaceTest.test_iidBL_AWGN_GAMP(self, success=False)
def test_iidBL_AWGN_EM_GAMP(self):
PhaseSpaceTest.test_iidBL_AWGN_EM_GAMP(self, success=False)
def test_iidBG_AWGN_GAMP(self):
PhaseSpaceTest.test_iidBG_AWGN_GAMP(self, success=False)
def test_iidBG_AWGN_EM_GAMP(self):
PhaseSpaceTest.test_iidBG_AWGN_EM_GAMP(self, success=False)
def test_iidBG_AWGN_GAMP_rangan_sum_approx(self):
PhaseSpaceTest.test_iidBG_AWGN_GAMP_rangan_sum_approx(
self, success=False)
def test_iidBG_AWGN_GAMP_krzakala_sum_approx(self):
PhaseSpaceTest.test_iidBG_AWGN_GAMP_krzakala_sum_approx(
self, success=False)
def test_iidwBG_ones_AWGN_GAMP(self):
PhaseSpaceTest.test_iidwBG_ones_AWGN_GAMP(self, success=False)
def test_iidwBG_ones_AWGN_EM_GAMP(self):
PhaseSpaceTest.test_iidwBG_ones_AWGN_EM_GAMP(self, success=False)
def test_iidwBG_linspace_AWGN_GAMP(self):
PhaseSpaceTest.test_iidwBG_linspace_AWGN_GAMP(self, success=False)
def test_iidwBG_linspace_AWGN_EM_truncate_GAMP(self):
PhaseSpaceTest.test_iidwBG_linspace_AWGN_EM_truncate_GAMP(
self, success=False)
def test_iidwBG_linspace_AWGN_EM_reweight_GAMP(self):
PhaseSpaceTest.test_iidwBG_linspace_AWGN_EM_reweight_GAMP(
self, success=False)
class PhaseSpaceTest3(PhaseSpaceTest, unittest.TestCase):
"""
Test of reconstruction capabilities at Phase Space point:
(delta, rho) = (0.38, 0.12)
"""
def setUp(self):
n = 500
delta = 0.38
rho = 0.12
PhaseSpaceTest.setUp(self, n=n, delta=delta, rho=rho)
def test_fixed_IT(self):
PhaseSpaceTest.test_fixed_IT(self, success_ist=False)
def test_adaptive_fixed_IT(self):
PhaseSpaceTest.test_adaptive_fixed_IT(self, success_ist=False)
class PhaseSpaceTest4(PhaseSpaceTest, unittest.TestCase):
"""
Test of reconstruction capabilities at Phase Space point:
(delta, rho) = (0.62, 0.38)
"""
def setUp(self):
n = 500
delta = 0.62
rho = 0.38
PhaseSpaceTest.setUp(self, n=n, delta=delta, rho=rho)
def test_residual_soft_threshold_AMP(self):
PhaseSpaceTest.test_residual_soft_threshold_AMP(self, success=False)
def test_median_soft_threshold_AMP(self, success=True):
PhaseSpaceTest.test_median_soft_threshold_AMP(self, success=False)
def test_default_IT(self):
PhaseSpaceTest.test_default_IT(self, success_iht=False,
success_ist=False)
def test_fixed_IT(self):
PhaseSpaceTest.test_fixed_IT(self, success_iht=False,
success_ist=False)
def test_adaptive_fixed_IT(self):
PhaseSpaceTest.test_adaptive_fixed_IT(self, success_iht=False,
success_ist=False)
def test_weighted_fixed_IT(self):
PhaseSpaceTest.test_weighted_fixed_IT(self, success_iht=False,
success_ist=False)
class PhaseSpaceTest5(PhaseSpaceTest, unittest.TestCase):
"""
Test of reconstruction capabilities at Phase Space point:
(delta, rho) = (0.78, 0.22)
"""
def setUp(self):
n = 500
delta = 0.78
rho = 0.22
PhaseSpaceTest.setUp(self, n=n, delta=delta, rho=rho)
def test_fixed_IT(self):
PhaseSpaceTest.test_fixed_IT(self, success_ist=False)
def test_adaptive_fixed_IT(self):
PhaseSpaceTest.test_adaptive_fixed_IT(self, success_ist=False)
def test_weighted_fixed_IT(self):
PhaseSpaceTest.test_weighted_fixed_IT(self, success_ist=False)
class PhaseSpaceTest6(PhaseSpaceTest, unittest.TestCase):
"""
Test of reconstruction capabilities at Phase Space point:
(delta, rho) = (0.84, 0.08)
"""
def setUp(self):
n = 500
delta = 0.84
rho = 0.08
PhaseSpaceTest.setUp(self, n=n, delta=delta, rho=rho)
def test_fixed_IT(self):
PhaseSpaceTest.test_fixed_IT(self, success_ist=False)
def test_adaptive_fixed_IT(self):
PhaseSpaceTest.test_adaptive_fixed_IT(self, success_ist=False)
def test_weighted_fixed_IT(self):
PhaseSpaceTest.test_weighted_fixed_IT(self, success_ist=False)
class PhaseSpaceTest7(PhaseSpaceTest, unittest.TestCase):
"""
Test of reconstruction capabilities at Phase Space point:
(delta, rho) = (0.96, 0.91)
"""
def setUp(self):
n = 500
delta = 0.96
rho = 0.91
PhaseSpaceTest.setUp(self, n=n, delta=delta, rho=rho)
def test_residual_soft_threshold_AMP(self):
PhaseSpaceTest.test_residual_soft_threshold_AMP(self, success=False)
def test_median_soft_threshold_AMP(self, success=True):
PhaseSpaceTest.test_median_soft_threshold_AMP(self, success=False)
def test_iidsGB_AWGN_GAMP(self):
PhaseSpaceTest.test_iidsGB_AWGN_GAMP(self, success=False)
def test_iidsGB_AWGN_EM_GAMP(self):
PhaseSpaceTest.test_iidsGB_AWGN_EM_GAMP(self, success=False)
def test_iidBL_AWGN_GAMP(self):
PhaseSpaceTest.test_iidBL_AWGN_GAMP(self, success=False)
def test_iidBL_AWGN_EM_GAMP(self):
PhaseSpaceTest.test_iidBL_AWGN_EM_GAMP(self, success=False)
def test_iidBG_AWGN_GAMP(self):
PhaseSpaceTest.test_iidBG_AWGN_GAMP(self, success=False)
def test_iidBG_AWGN_EM_GAMP(self):
PhaseSpaceTest.test_iidBG_AWGN_EM_GAMP(self, success=False)
def test_iidBG_AWGN_GAMP_rangan_sum_approx(self):
PhaseSpaceTest.test_iidBG_AWGN_GAMP_rangan_sum_approx(
self, success=False)
def test_iidBG_AWGN_GAMP_krzakala_sum_approx(self):
PhaseSpaceTest.test_iidBG_AWGN_GAMP_krzakala_sum_approx(
self, success=False)
def test_iidwBG_ones_AWGN_GAMP(self):
PhaseSpaceTest.test_iidwBG_ones_AWGN_GAMP(self, success=False)
def test_iidwBG_ones_AWGN_EM_GAMP(self):
PhaseSpaceTest.test_iidwBG_ones_AWGN_EM_GAMP(self, success=False)
def test_iidwBG_linspace_AWGN_GAMP(self):
PhaseSpaceTest.test_iidwBG_linspace_AWGN_GAMP(self, success=False)
def test_iidwBG_linspace_AWGN_EM_truncate_GAMP(self):
PhaseSpaceTest.test_iidwBG_linspace_AWGN_EM_truncate_GAMP(
self, success=False)
def test_iidwBG_linspace_AWGN_EM_reweight_GAMP(self):
PhaseSpaceTest.test_iidwBG_linspace_AWGN_EM_reweight_GAMP(
self, success=False)
def test_default_IT(self):
PhaseSpaceTest.test_default_IT(self, success_iht=False,
success_ist=False)
def test_fixed_IT(self):
PhaseSpaceTest.test_fixed_IT(self, success_iht=False,
success_ist=False)
def test_adaptive_fixed_IT(self):
PhaseSpaceTest.test_adaptive_fixed_IT(self, success_iht=False,
success_ist=False)
def test_weighted_fixed_IT(self):
PhaseSpaceTest.test_weighted_fixed_IT(self, success_iht=False,
success_ist=False)
class PhaseSpaceTestA(PhaseSpaceTest, unittest.TestCase):
"""
Test of reconstruction capabilities at Phase Space point:
(delta, rho) = (0.06, 0.92)
"""
def setUp(self):
n = 500
delta = 0.06
rho = 0.92
PhaseSpaceTest.setUp(self, n=n, delta=delta, rho=rho)
def test_residual_soft_threshold_AMP(self):
PhaseSpaceTest.test_residual_soft_threshold_AMP(self, success=False)
def test_median_soft_threshold_AMP(self, success=True):
PhaseSpaceTest.test_median_soft_threshold_AMP(self, success=False)
def test_iidsGB_AWGN_GAMP(self):
PhaseSpaceTest.test_iidsGB_AWGN_GAMP(self, success=False)
def test_iidsGB_AWGN_EM_GAMP(self):
PhaseSpaceTest.test_iidsGB_AWGN_EM_GAMP(self, success=False)
def test_iidBL_AWGN_GAMP(self):
PhaseSpaceTest.test_iidBL_AWGN_GAMP(self, success=False)
def test_iidBL_AWGN_EM_GAMP(self):
PhaseSpaceTest.test_iidBL_AWGN_EM_GAMP(self, success=False)
def test_iidBG_AWGN_GAMP(self):
PhaseSpaceTest.test_iidBG_AWGN_GAMP(self, success=False)
def test_iidBG_AWGN_EM_GAMP(self):
PhaseSpaceTest.test_iidBG_AWGN_EM_GAMP(self, success=False)
def test_iidBG_AWGN_GAMP_rangan_sum_approx(self):
PhaseSpaceTest.test_iidBG_AWGN_GAMP_rangan_sum_approx(
self, success=False)
def test_iidBG_AWGN_GAMP_krzakala_sum_approx(self):
PhaseSpaceTest.test_iidBG_AWGN_GAMP_krzakala_sum_approx(
self, success=False)
def test_iidwBG_ones_AWGN_GAMP(self):
PhaseSpaceTest.test_iidwBG_ones_AWGN_GAMP(self, success=False)
def test_iidwBG_ones_AWGN_EM_GAMP(self):
PhaseSpaceTest.test_iidwBG_ones_AWGN_EM_GAMP(self, success=False)
def test_iidwBG_linspace_AWGN_GAMP(self):
PhaseSpaceTest.test_iidwBG_linspace_AWGN_GAMP(self, success=False)
def test_iidwBG_linspace_AWGN_EM_truncate_GAMP(self):
PhaseSpaceTest.test_iidwBG_linspace_AWGN_EM_truncate_GAMP(
self, success=False)
def test_iidwBG_linspace_AWGN_EM_reweight_GAMP(self):
PhaseSpaceTest.test_iidwBG_linspace_AWGN_EM_reweight_GAMP(
self, success=False)
def test_default_IT(self):
PhaseSpaceTest.test_default_IT(self, success_iht=False,
success_ist=False)
def test_fixed_IT(self):
PhaseSpaceTest.test_fixed_IT(self, success_iht=False,
success_ist=False)
def test_adaptive_fixed_IT(self):
PhaseSpaceTest.test_adaptive_fixed_IT(self, success_iht=False,
success_ist=False)
def test_weighted_fixed_IT(self):
PhaseSpaceTest.test_weighted_fixed_IT(self, success_iht=False,
success_ist=False)
class PhaseSpaceTestB(PhaseSpaceTest, unittest.TestCase):
"""
Test of reconstruction capabilities at Phase Space point:
(delta, rho) = (0.19, 0.84)
"""
def setUp(self):
n = 500
delta = 0.19
rho = 0.84
PhaseSpaceTest.setUp(self, n=n, delta=delta, rho=rho)
def test_residual_soft_threshold_AMP(self):
PhaseSpaceTest.test_residual_soft_threshold_AMP(self, success=False)
def test_median_soft_threshold_AMP(self, success=True):
PhaseSpaceTest.test_median_soft_threshold_AMP(self, success=False)
def test_iidsGB_AWGN_GAMP(self):
PhaseSpaceTest.test_iidsGB_AWGN_GAMP(self, success=False)
def test_iidsGB_AWGN_EM_GAMP(self):
PhaseSpaceTest.test_iidsGB_AWGN_EM_GAMP(self, success=False)
def test_iidBL_AWGN_GAMP(self):
PhaseSpaceTest.test_iidBL_AWGN_GAMP(self, success=False)
def test_iidBL_AWGN_EM_GAMP(self):
PhaseSpaceTest.test_iidBL_AWGN_EM_GAMP(self, success=False)
def test_iidBG_AWGN_GAMP(self):
PhaseSpaceTest.test_iidBG_AWGN_GAMP(self, success=False)
def test_iidBG_AWGN_EM_GAMP(self):
PhaseSpaceTest.test_iidBG_AWGN_EM_GAMP(self, success=False)
def test_iidBG_AWGN_GAMP_rangan_sum_approx(self):
PhaseSpaceTest.test_iidBG_AWGN_GAMP_rangan_sum_approx(
self, success=False)
def test_iidBG_AWGN_GAMP_krzakala_sum_approx(self):
PhaseSpaceTest.test_iidBG_AWGN_GAMP_krzakala_sum_approx(
self, success=False)
def test_iidwBG_ones_AWGN_GAMP(self):
PhaseSpaceTest.test_iidwBG_ones_AWGN_GAMP(self, success=False)
def test_iidwBG_ones_AWGN_EM_GAMP(self):
PhaseSpaceTest.test_iidwBG_ones_AWGN_EM_GAMP(self, success=False)
def test_iidwBG_linspace_AWGN_GAMP(self):
PhaseSpaceTest.test_iidwBG_linspace_AWGN_GAMP(self, success=False)
def test_iidwBG_linspace_AWGN_EM_truncate_GAMP(self):
PhaseSpaceTest.test_iidwBG_linspace_AWGN_EM_truncate_GAMP(
self, success=False)
def test_iidwBG_linspace_AWGN_EM_reweight_GAMP(self):
PhaseSpaceTest.test_iidwBG_linspace_AWGN_EM_reweight_GAMP(
self, success=False)
def test_default_IT(self):
PhaseSpaceTest.test_default_IT(self, success_iht=False,
success_ist=False)
def test_fixed_IT(self):
PhaseSpaceTest.test_fixed_IT(self, success_iht=False,
success_ist=False)
def test_adaptive_fixed_IT(self):
PhaseSpaceTest.test_adaptive_fixed_IT(self, success_iht=False,
success_ist=False)
def test_weighted_fixed_IT(self):
PhaseSpaceTest.test_weighted_fixed_IT(self, success_iht=False,
success_ist=False)
class PhaseSpaceTestC(PhaseSpaceTest, unittest.TestCase):
"""
Test of reconstruction capabilities at Phase Space point:
(delta, rho) = (0.29, 0.94)
"""
def setUp(self):
n = 500
delta = 0.29
rho = 0.94
PhaseSpaceTest.setUp(self, n=n, delta=delta, rho=rho)
def test_residual_soft_threshold_AMP(self):
PhaseSpaceTest.test_residual_soft_threshold_AMP(self, success=False)
def test_median_soft_threshold_AMP(self, success=True):
PhaseSpaceTest.test_median_soft_threshold_AMP(self, success=False)
def test_iidsGB_AWGN_GAMP(self):
PhaseSpaceTest.test_iidsGB_AWGN_GAMP(self, success=False)
def test_iidsGB_AWGN_EM_GAMP(self):
PhaseSpaceTest.test_iidsGB_AWGN_EM_GAMP(self, success=False)
def test_iidBL_AWGN_GAMP(self):
PhaseSpaceTest.test_iidBL_AWGN_GAMP(self, success=False)
def test_iidBL_AWGN_EM_GAMP(self):
PhaseSpaceTest.test_iidBL_AWGN_EM_GAMP(self, success=False)
def test_iidBG_AWGN_GAMP(self):
PhaseSpaceTest.test_iidBG_AWGN_GAMP(self, success=False)
def test_iidBG_AWGN_EM_GAMP(self):
PhaseSpaceTest.test_iidBG_AWGN_EM_GAMP(self, success=False)
def test_iidBG_AWGN_GAMP_rangan_sum_approx(self):
PhaseSpaceTest.test_iidBG_AWGN_GAMP_rangan_sum_approx(
self, success=False)
def test_iidBG_AWGN_GAMP_krzakala_sum_approx(self):
PhaseSpaceTest.test_iidBG_AWGN_GAMP_krzakala_sum_approx(
self, success=False)
def test_iidwBG_ones_AWGN_GAMP(self):
PhaseSpaceTest.test_iidwBG_ones_AWGN_GAMP(self, success=False)
def test_iidwBG_ones_AWGN_EM_GAMP(self):
PhaseSpaceTest.test_iidwBG_ones_AWGN_EM_GAMP(self, success=False)
def test_iidwBG_linspace_AWGN_GAMP(self):
PhaseSpaceTest.test_iidwBG_linspace_AWGN_GAMP(self, success=False)
def test_iidwBG_linspace_AWGN_EM_truncate_GAMP(self):
PhaseSpaceTest.test_iidwBG_linspace_AWGN_EM_truncate_GAMP(
self, success=False)
def test_iidwBG_linspace_AWGN_EM_reweight_GAMP(self):
PhaseSpaceTest.test_iidwBG_linspace_AWGN_EM_reweight_GAMP(
self, success=False)
def test_default_IT(self):
PhaseSpaceTest.test_default_IT(self, success_iht=False,
success_ist=False)
def test_fixed_IT(self):
PhaseSpaceTest.test_fixed_IT(self, success_iht=False,
success_ist=False)
def test_adaptive_fixed_IT(self):
PhaseSpaceTest.test_adaptive_fixed_IT(self, success_iht=False,
success_ist=False)
def test_weighted_fixed_IT(self):
PhaseSpaceTest.test_weighted_fixed_IT(self, success_iht=False,
success_ist=False)
class TestUSERademacher(unittest.TestCase):
"""
Test of the use_rademacher test fixture function.
"""
def test_seed_6021(self):
n = 10
m = 6
k = 3
seed = 6021
alpha_true = np.array([
[-1], [1], [1], [0], [0], [0], [0], [0], [0], [0]])
A_true = np.array([
[0.3970924, 0.39094998, -0.51535881, -0.29376165, 0.80329912,
0.2343297, 0.20381475, -0.4006275, 0.97687495, -0.02913711],
[-0.21781685, -0.46838027, -0.39565219, -0.29879357, -0.1528902,
-0.09484526, -0.24859693, 0.42678941, -0.17170236, -0.09260817],
[-0.08024309, -0.24175707, -0.1299679, -0.15608146, -0.51588714,
-0.48385891, 0.15647558, -0.54407042, 0.16007046, -0.39455782],
[-0.11461544, -0.09242993, 0.10134369, 0.03684144, 0.24202215,
0.22913925, -0.16115897, 0.07449874, 0.24777711, -0.20584097],
[-0.49012155, 0.30646838, 0.27297925, -0.03009987, 0.21501576,
-0.16483217, 0.49937075, 0.04385046, 0.26298357, 0.33893551],
[-0.06364924, 0.68731702, -0.21930248, -0.20445363, 0.38122107,
-0.05793133, 0.12713844, -1.14521796, -0.62776378, -0.1934683]])
y_true = A_true.dot(alpha_true)
y, A, alpha = use_rademacher(n, m, k, seed)
self.assertTrue(np.alltrue(alpha_true == alpha))
self.assertTrue(np.allclose(A_true, A))
self.assertTrue(np.allclose(y_true, y))
class TestUSEGaussian(unittest.TestCase):
"""
Test of the use_gaussian test fixture function.
"""
def test_seed_6021(self):
n = 10
m = 6
k = 3
seed = 6021
alpha_true = np.array([
2.5616611, -0.30927792, -0.56096039, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0]).reshape(-1, 1)
A_true = np.array([
[0.3970924, 0.39094998, -0.51535881, -0.29376165, 0.80329912,
0.2343297, 0.20381475, -0.4006275, 0.97687495, -0.02913711],
[-0.21781685, -0.46838027, -0.39565219, -0.29879357, -0.1528902,
-0.09484526, -0.24859693, 0.42678941, -0.17170236, -0.09260817],
[-0.08024309, -0.24175707, -0.1299679, -0.15608146, -0.51588714,
-0.48385891, 0.15647558, -0.54407042, 0.16007046, -0.39455782],
[-0.11461544, -0.09242993, 0.10134369, 0.03684144, 0.24202215,
0.22913925, -0.16115897, 0.07449874, 0.24777711, -0.20584097],
[-0.49012155, 0.30646838, 0.27297925, -0.03009987, 0.21501576,
-0.16483217, 0.49937075, 0.04385046, 0.26298357, 0.33893551],
[-0.06364924, 0.68731702, -0.21930248, -0.20445363, 0.38122107,
-0.05793133, 0.12713844, -1.14521796, -0.62776378, -0.1934683]])
y_true = A_true.dot(alpha_true)
y, A, alpha = use_gaussian(n, m, k, seed)
self.assertTrue(np.allclose(alpha_true, alpha))
self.assertTrue(np.allclose(A_true, A))
self.assertTrue(np.allclose(y_true, y))
def use_gaussian(n, m, k, seed):
"""
Prepare an instance of the USE/Gaussian problem suite
Prepares:
* :math:`\mathbf{A} \in \mathbb{R}^{m \times n}` from Uniform Spherical
Ensemble (USE).
* :math:`\mathbf{alpha} \in \mathbb{R}^{n}` with :math:`k` non-zero entries
drawn from the standard normal distribution.
* :math:`\mathbf{y} = \mathbf{A}\mathbf{\alpha}`
Parameters
----------
n : int
The problem size.
m : int
The number of measurements.
k : int
The number of non-zero coefficients.
seed : int
The seed used in the random number generator.
Returns
-------
(y, A, alpha) : tuple
The measurements, measurement matrix, and coefficients.
"""
@_decorate_validation
def validate_input():
_numeric('n', 'integer', range_='[1;inf)')
_numeric('m', 'integer', range_='[1;inf)')
_numeric('k', 'integer', range_='[0;inf)')
_numeric('seed', 'integer', range_='[0;inf)')
@_decorate_validation
def validate_output():
_numeric('y', ('integer', 'floating', 'complex'), shape=(m, 1))
_numeric('A', ('integer', 'floating', 'complex'), shape=(m, n))
_numeric('alpha', ('integer', 'floating', 'complex'), shape=(n, 1))
validate_input()
np.random.seed(seed=seed)
A = 1/np.sqrt(m) * np.random.randn(m, n)
alpha = np.zeros((n, 1))
alpha[:k, 0] = np.random.randn(k)
y = A.dot(alpha)
validate_output()
return y, A, alpha
def use_rademacher(n, m, k, seed):
"""
Prepare an instance of the USE/Rademacher problem suite
Prepares:
* :math:`\mathbf{A} \in \mathbb{R}^{m \times n}` from Uniform Spherical
Ensemble (USE).
* :math:`\mathbf{alpha} \in \mathbb{R}^{n}` with :math:`k` non-zero entries
drawn from the Rademacher distribution {1, -1}.
* :math:`\mathbf{y} = \mathbf{A}\mathbf{\alpha}`
Parameters
----------
n : int
The problem size.
m : int
The number of measurements.
k : int
The number of non-zero coefficients.
seed : int
The seed used in the random number generator.
Returns
-------
(y, A, alpha) : tuple
The measurements, measurement matrix, and coefficients.
"""
@_decorate_validation
def validate_input():
_numeric('n', 'integer', range_='[1;inf)')
_numeric('m', 'integer', range_='[1;inf)')
_numeric('k', 'integer', range_='[0;inf)')
_numeric('seed', 'integer', range_='[0;inf)')
@_decorate_validation
def validate_output():
_numeric('y', ('integer', 'floating', 'complex'), shape=(m, 1))
_numeric('A', ('integer', 'floating', 'complex'), shape=(m, n))
_numeric('alpha', ('integer', 'floating', 'complex'), shape=(n, 1))
validate_input()
np.random.seed(seed=seed)
A = 1/np.sqrt(m) * np.random.randn(m, n)
alpha = np.zeros((n, 1))
alpha[:k, 0] = np.random.randint(0, 2, k) * 2 - 1
y = A.dot(alpha)
validate_output()
return y, A, alpha
| 40.19561
| 79
| 0.636108
| 13,183
| 104,388
| 4.779716
| 0.043238
| 0.023885
| 0.071654
| 0.044437
| 0.92714
| 0.909413
| 0.893082
| 0.867436
| 0.855597
| 0.838631
| 0
| 0.031268
| 0.247548
| 104,388
| 2,596
| 80
| 40.211094
| 0.77094
| 0.102847
| 0
| 0.809577
| 0
| 0
| 0.094449
| 0.035402
| 0
| 0
| 0
| 0
| 0.110423
| 1
| 0.135775
| false
| 0
| 0.004507
| 0
| 0.160563
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0b56e0093724b8ce683484a5eb75886e640287b0
| 11,251
|
py
|
Python
|
src/se_data_process.py
|
RuihongQiu/GAG
|
819e33eee2e3d7d56e635361aff53faea2bfad3a
|
[
"Apache-2.0"
] | 16
|
2020-09-26T07:42:49.000Z
|
2022-03-06T07:26:05.000Z
|
src/se_data_process.py
|
UQMM/GAG
|
819e33eee2e3d7d56e635361aff53faea2bfad3a
|
[
"Apache-2.0"
] | 2
|
2020-11-03T12:48:42.000Z
|
2021-07-30T01:00:35.000Z
|
src/se_data_process.py
|
UQMM/GAG
|
819e33eee2e3d7d56e635361aff53faea2bfad3a
|
[
"Apache-2.0"
] | 4
|
2021-01-10T12:21:18.000Z
|
2022-01-16T14:31:39.000Z
|
# -*- coding: utf-8 -*-
"""
Created on 17/9/2019
@author: LeiGuo, RuihongQiu
"""
import pickle
import numpy
def prepare_data(seqs, labels):
"""Create the matrices from the datasets.
This pad each sequence to the same lenght: the lenght of the
longuest sequence or maxlen.
if maxlen is set, we will cut all sequence to this maximum
lenght.
This swap the axis!
"""
lengths = [len(s) for s in seqs]
n_samples = len(seqs)
maxlen = numpy.max(lengths)
x = numpy.zeros((maxlen, n_samples)).astype('int64')
x_mask = numpy.ones((maxlen, n_samples)).astype(theano.config.floatX)
for idx, s in enumerate(seqs):
x[:lengths[idx], idx] = s
x_mask *= (1 - (x == 0))
return x, x_mask, labels
def load_data(path, valid_portion=0.1, maxlen=19, sort_by_len=False):
"""Loads the dataset
:type path: String
:param path: The path to the dataset (here RSC2015)
:type n_items: int
:param n_items: The number of items.
:type valid_portion: float
:param valid_portion: The proportion of the full train set used for
the validation set.
:type maxlen: None or positive int
:param maxlen: the max sequence length we use in the train/valid set.
:type sort_by_len: bool
:name sort_by_len: Sort by the sequence lenght for the train,
valid and test set. This allow faster execution as it cause
less padding per minibatch. Another mechanism must be used to
shuffle the train set at each epoch.
"""
path_train_data = path+'train_final.csv'
path_test_data = path+'test_final.csv'
f1 = open(path_train_data, 'rb')
train_set = pickle.load(f1)
f1.close()
f2 = open(path_test_data, 'rb')
test_set = pickle.load(f2)
f2.close()
if maxlen:
new_train_set_x = []
new_train_set_y = []
new_train_set_u = []
for x, y, u in list(zip(train_set[0], train_set[1],train_set[2])):
if len(x) < maxlen:
new_train_set_x.append(x)
new_train_set_y.append(y)
new_train_set_u.append(u)
else:
new_train_set_x.append(x[:maxlen])
new_train_set_y.append(y)
new_train_set_u.append(u)
train_set = (new_train_set_x, new_train_set_y, new_train_set_u)
del new_train_set_x, new_train_set_y, new_train_set_u
new_test_set_x = []
new_test_set_y = []
new_test_set_u = []
for xx, yy, uu in zip(test_set[0], test_set[1], test_set[2]):
if len(xx) < maxlen:
new_test_set_x.append(xx)
new_test_set_y.append(yy)
new_test_set_u.append(uu)
else:
new_test_set_x.append(xx[:maxlen])
new_test_set_y.append(yy)
new_test_set_u.append(uu)
test_set = (new_test_set_x, new_test_set_y, new_test_set_u)
del new_test_set_x, new_test_set_y, new_test_set_u
# split training set into validation set
train_set_x, train_set_y, train_set_u = train_set
n_samples = len(train_set_x)
sidx = numpy.arange(n_samples, dtype='int32')
numpy.random.shuffle(sidx)
n_train = int(numpy.round(n_samples * (1. - valid_portion)))
valid_set_x = [train_set_x[s] for s in sidx[n_train:]]
valid_set_y = [train_set_y[s] for s in sidx[n_train:]]
valid_set_u = [train_set_u[s] for s in sidx[n_train:]]
train_set_x = [train_set_x[s] for s in sidx[:n_train]]
train_set_y = [train_set_y[s] for s in sidx[:n_train]]
train_set_u = [train_set_u[s] for s in sidx[:n_train]]
train_set = (train_set_x, train_set_y, train_set_u)
valid_set = (valid_set_x, valid_set_y, valid_set_u)
test_set_x, test_set_y, test_set_u = test_set
valid_set_x, valid_set_y, valid_set_u = valid_set
train_set_x, train_set_y, train_set_u = train_set
def len_argsort(seq):
return sorted(range(len(seq)), key=lambda x: len(seq[x]))
if sort_by_len:
sorted_index = len_argsort(test_set_x)
test_set_x = [test_set_x[i] for i in sorted_index]
test_set_y = [test_set_y[i] for i in sorted_index]
sorted_index = len_argsort(valid_set_x)
valid_set_x = [valid_set_x[i] for i in sorted_index]
valid_set_y = [valid_set_y[i] for i in sorted_index]
train = (train_set_x, train_set_y, train_set_u)
valid = (valid_set_x, valid_set_y, valid_set_u)
test = (test_set_x, test_set_y, test_set_u)
return train, valid, test
def load_traindata(trainFile, validFile, valid_portion=0.1, maxlen=19, sort_by_len=False):
"""Loads the dataset
:type path: String
:param path: The path to the dataset (here RSC2015)
:type n_items: int
:param n_items: The number of items.
:type valid_portion: float
:param valid_portion: The proportion of the full train set used for
the validation set.
:type maxlen: None or positive int
:param maxlen: the max sequence length we use in the train/valid set.
:type sort_by_len: bool
:name sort_by_len: Sort by the sequence lenght for the train,
valid and test set. This allow faster execution as it cause
less padding per minibatch. Another mechanism must be used to
shuffle the train set at each epoch.
"""
path_train_data = trainFile
path_test_data = validFile
f1 = open(path_train_data, 'rb')
train_set = pickle.load(f1)
f1.close()
f2 = open(path_test_data, 'rb')
test_set = pickle.load(f2)
f2.close()
if maxlen:
new_train_set_x = []
new_train_set_y = []
new_train_set_u = []
for x, y, u in list(zip(train_set[0], train_set[1], train_set[2])):
if len(x) < maxlen:
new_train_set_x.append(x)
new_train_set_y.append(y)
new_train_set_u.append(u)
else:
new_train_set_x.append(x[:maxlen])
new_train_set_y.append(y)
new_train_set_u.append(u)
train_set = (new_train_set_x, new_train_set_y, new_train_set_u)
del new_train_set_x, new_train_set_y, new_train_set_u
new_test_set_x = []
new_test_set_y = []
new_test_set_u = []
for xx, yy, uu in zip(test_set[0], test_set[1], test_set[2]):
if len(xx) < maxlen:
new_test_set_x.append(xx)
new_test_set_y.append(yy)
new_test_set_u.append(uu)
else:
new_test_set_x.append(xx[:maxlen])
new_test_set_y.append(yy)
new_test_set_u.append(uu)
test_set = (new_test_set_x, new_test_set_y, new_test_set_u)
del new_test_set_x, new_test_set_y, new_test_set_u
test_set_x, test_set_y, test_set_u = test_set
train_set_x, train_set_y, train_set_u = train_set
def len_argsort(seq):
return sorted(range(len(seq)), key=lambda x: len(seq[x]))
if sort_by_len:
sorted_index = len_argsort(test_set_x)
test_set_x = [test_set_x[i] for i in sorted_index]
test_set_y = [test_set_y[i] for i in sorted_index]
train = (train_set_x, train_set_y, train_set_u)
test = (test_set_x, test_set_y, test_set_u)
return train, test
def load_testdata(testFile, maxlen=19, sort_by_len=False):
"""Loads the dataset
:type path: String
:param path: The path to the dataset (here RSC2015)
:type n_items: int
:param n_items: The number of items.
:type valid_portion: float
:param valid_portion: The proportion of the full train set used for
the validation set.
:type maxlen: None or positive int
:param maxlen: the max sequence length we use in the train/valid set.
:type sort_by_len: bool
:name sort_by_len: Sort by the sequence lenght for the train,
valid and test set. This allow faster execution as it cause
less padding per minibatch. Another mechanism must be used to
shuffle the train set at each epoch.
"""
path_test_data = testFile
f2 = open(path_test_data, 'rb')
test_set = pickle.load(f2)
f2.close()
if maxlen:
new_test_set_x = []
new_test_set_y = []
new_test_set_u = []
for xx, yy, uu in zip(test_set[0], test_set[1], test_set[2]):
if len(xx) < maxlen:
new_test_set_x.append(xx)
new_test_set_y.append(yy)
new_test_set_u.append(uu)
else:
new_test_set_x.append(xx[:maxlen])
new_test_set_y.append(yy)
new_test_set_u.append(uu)
test_set = (new_test_set_x, new_test_set_y, new_test_set_u)
del new_test_set_x, new_test_set_y, new_test_set_u
test_set_x, test_set_y, test_set_u = test_set
def len_argsort(seq):
return sorted(range(len(seq)), key=lambda x: len(seq[x]))
if sort_by_len:
sorted_index = len_argsort(test_set_x)
test_set_x = [test_set_x[i] for i in sorted_index]
test_set_y = [test_set_y[i] for i in sorted_index]
test = (test_set_x, test_set_y, test_set_u)
return test
def load_data_valid(train_file, valid_portion=0.1, maxlen=19, sort_by_len=False):
path_train_data = train_file
f1 = open(path_train_data, 'rb')
train_set = pickle.load(f1)
f1.close()
if maxlen:
new_train_set_x = []
new_train_set_y = []
new_train_set_u = []
for x, y, u in list(zip(train_set[0], train_set[1],train_set[2])):
if len(x) < maxlen:
new_train_set_x.append(x)
new_train_set_y.append(y)
new_train_set_u.append(u)
else:
new_train_set_x.append(x[:maxlen])
new_train_set_y.append(y)
new_train_set_u.append(u)
train_set = (new_train_set_x, new_train_set_y, new_train_set_u)
del new_train_set_x, new_train_set_y, new_train_set_u
# split training set into validation set
train_set_x, train_set_y, train_set_u = train_set
n_samples = len(train_set_x)
sidx = numpy.arange(n_samples, dtype='int32')
numpy.random.shuffle(sidx)
n_train = int(numpy.round(n_samples * (1. - valid_portion)))
valid_set_x = [train_set_x[s] for s in sidx[n_train:]]
valid_set_y = [train_set_y[s] for s in sidx[n_train:]]
valid_set_u = [train_set_u[s] for s in sidx[n_train:]]
train_set_x = [train_set_x[s] for s in sidx[:n_train]]
train_set_y = [train_set_y[s] for s in sidx[:n_train]]
train_set_u = [train_set_u[s] for s in sidx[:n_train]]
train_set = (train_set_x, train_set_y, train_set_u)
valid_set = (valid_set_x, valid_set_y, valid_set_u)
valid_set_x, valid_set_y, valid_set_u = valid_set
train_set_x, train_set_y, train_set_u = train_set
def len_argsort(seq):
return sorted(range(len(seq)), key=lambda x: len(seq[x]))
if sort_by_len:
sorted_index = len_argsort(valid_set_x)
valid_set_x = [valid_set_x[i] for i in sorted_index]
valid_set_y = [valid_set_y[i] for i in sorted_index]
train = (train_set_x, train_set_y, train_set_u)
valid = (valid_set_x, valid_set_y, valid_set_u)
return train, valid
| 35.492114
| 90
| 0.64492
| 1,891
| 11,251
| 3.479112
| 0.083554
| 0.149567
| 0.075239
| 0.02736
| 0.894209
| 0.891017
| 0.891017
| 0.891017
| 0.891017
| 0.891017
| 0
| 0.009633
| 0.261843
| 11,251
| 316
| 91
| 35.60443
| 0.782541
| 0.19936
| 0
| 0.870647
| 0
| 0
| 0.006385
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.044776
| false
| 0
| 0.00995
| 0.0199
| 0.099502
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0b667665d3aaf90a2decbcfc87fb082c7674c212
| 254
|
py
|
Python
|
7.0-threads/model.py
|
zehemz/clases-python-101
|
633cb5f0cbc85e64e242514f0394754a5bed0513
|
[
"Apache-2.0"
] | null | null | null |
7.0-threads/model.py
|
zehemz/clases-python-101
|
633cb5f0cbc85e64e242514f0394754a5bed0513
|
[
"Apache-2.0"
] | null | null | null |
7.0-threads/model.py
|
zehemz/clases-python-101
|
633cb5f0cbc85e64e242514f0394754a5bed0513
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
class UserModel:
def __init__(self, username, email):
self.username = username
self.email = email
self.groups = []
def getUserDictionary(self):
return {"username" : self.username, "email": self.email, "groups": self.groups}
| 25.4
| 82
| 0.700787
| 31
| 254
| 5.612903
| 0.419355
| 0.206897
| 0.195402
| 0.241379
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.149606
| 254
| 9
| 83
| 28.222222
| 0.805556
| 0.062992
| 0
| 0
| 0
| 0
| 0.080169
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.285714
| false
| 0
| 0
| 0.142857
| 0.571429
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
0b9192cccc41dad4003bef43bef38b542d91dd2e
| 982
|
py
|
Python
|
src/visions/backends/python/types/__init__.py
|
bhumikapahariapuresoftware/visions
|
8838d89b4f02e401112378b4662a779227ead9f8
|
[
"BSD-4-Clause"
] | 142
|
2020-01-07T21:17:10.000Z
|
2022-03-30T13:10:14.000Z
|
src/visions/backends/python/types/__init__.py
|
bhumikapahariapuresoftware/visions
|
8838d89b4f02e401112378b4662a779227ead9f8
|
[
"BSD-4-Clause"
] | 121
|
2020-01-07T02:26:38.000Z
|
2022-03-29T17:18:19.000Z
|
src/visions/backends/python/types/__init__.py
|
bhumikapahariapuresoftware/visions
|
8838d89b4f02e401112378b4662a779227ead9f8
|
[
"BSD-4-Clause"
] | 18
|
2020-02-17T03:17:37.000Z
|
2022-02-20T14:01:11.000Z
|
import visions.backends.python.types.boolean
import visions.backends.python.types.categorical
import visions.backends.python.types.complex
import visions.backends.python.types.count
import visions.backends.python.types.date
import visions.backends.python.types.date_time
import visions.backends.python.types.email_address
import visions.backends.python.types.file
import visions.backends.python.types.float
import visions.backends.python.types.geometry
import visions.backends.python.types.image
import visions.backends.python.types.integer
import visions.backends.python.types.ip_address
import visions.backends.python.types.numeric
import visions.backends.python.types.object
import visions.backends.python.types.ordinal
import visions.backends.python.types.path
import visions.backends.python.types.string
import visions.backends.python.types.time
import visions.backends.python.types.time_delta
import visions.backends.python.types.url
import visions.backends.python.types.uuid
| 42.695652
| 50
| 0.86558
| 136
| 982
| 6.220588
| 0.198529
| 0.338061
| 0.546099
| 0.702128
| 0.87234
| 0.304965
| 0
| 0
| 0
| 0
| 0
| 0
| 0.044807
| 982
| 22
| 51
| 44.636364
| 0.901919
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
0bb493e6adea4c87eaaa091c90eb86af14e9db2b
| 7,496
|
py
|
Python
|
decompress_gnu.py
|
oppressionslayer/maxentropy
|
0f00d2ee6733dd4038821abb86490ffb1dd4dac0
|
[
"MIT"
] | 1
|
2019-09-24T01:09:12.000Z
|
2019-09-24T01:09:12.000Z
|
decompress_gnu.py
|
oppressionslayer/maxentropy
|
0f00d2ee6733dd4038821abb86490ffb1dd4dac0
|
[
"MIT"
] | 1
|
2020-01-17T16:32:09.000Z
|
2020-01-17T16:32:09.000Z
|
decompress_gnu.py
|
oppressionslayer/maxentropy
|
0f00d2ee6733dd4038821abb86490ffb1dd4dac0
|
[
"MIT"
] | null | null | null |
# Use python3 not python2
# The original is 30533 bytes.
# It's the GNU Header repeated 4 times.
# The gzip version compressed file is 2872 bytes
# the lzma version of our compreesed codes
# is 2840 bytes
# We use lzma to compress our math operations
# to uncompress the GNU Header with better
# results than gzip in this case by using
# an lzma header to compress our codes.
# We are not compressing the file, instead
# we are compressing the pattern used for
# XOR operations to rebuild the original file
# In no way can you uncompress the code
# to reproduce the original without using our
# math XOR operations.
# This is to prove that XOR operations have
# repetition on par with the original file
# just by compressing the maths logic.
# I used gzip as an example as it doesn't
# compress repetition as well as other compressors.
import lzma
# This code uncompresses the math required to rebuild a GNU licence that is repeated 4 times.
code='fd377a585a000004e6d6b4460200210116000000742fe5a3e077450ad55d0000953185805d1e19050eb115bdd0f560d3516fc2a4d1f9603d38f615e4012ce3d463f56b72f09dcfe8746a3c95e5d95ba427f711d2e70cebbdda888a340eb8acb2dfcff6381b49d185051e595a1aec163eaf70cab9b67d9f953d62d5f99b4757434dbf72dbf14e5ca7c1f786a2e245f7b8969bd8232b9f5c48b3a6ef37c33ab46d89c67ddc793c8b1c9bd62219ce9f0fe529d33776e714c707db01084a93a26d14bdd331f8e46000d7356c29ca8214691a605586a6e24d53488d4de3de87f0487ff372d0f3a8c292547d1337ae8d133072f855deb17d9534c1d1fa20e5d6f3eeb4a5bdae17f898f3c2669cbe564f0e21f94c063e88edfaa21a6c4552350771b789d7fea56c9d100f8feb513e578aabc70a813870eb17c3da76c8420480fdcd8fd1025cfb090629e1f455bd99c906c97b5dc40f9ce3d5b9fd6752f4090106ff64d24be55338ff33c7af686981b0cf8b29a9db092016d8f971feb8f4c7a28ecb7413c6dee182b76b85b9a2e2574fedce00fdb9f5795934cd0f12c9194c20b6181cbe56764dac2ebaa184f738dd4c640546bbcef1288119d105ff29d1caf90144b82b8d13430b0f7bc02b934b24edc0d1b9d39e6e68e9c4727787f713eeebf73bf7c14cc91ce3279e80fcb5d97869201f89bd689f32644f1ecc437126e741e0d255e0236b0ff044b8c8f19eaab2f8931e057e61253d781c8a895f19d3e4bfdca1480eb25ecf5fe0b9226ef5a9ec3bbc5daa2a632f8ec36e66b1fad219adb2b7e4fb3f94ae725b36c4a1d3df3dcccc3cda58c1d428b10d6db6e94dbb5925a5cb5f8684a4b8754f6995f379f4fb0e5bd85aab83c66dea1fe8fb8ae69fc275b06e1f5d325a0d6f8b910cb50fbb0cdfec39111f8d48a2bcb218f4c1831e12ef0ec2e436c63555b6ce710cd4fb9598bb2d66eb733456187394c97462be70e7dc48a94df8f0e61b84f6ca558034ef2ac021bef029f27b3ed75b1514fb9ca5072a02e759ff7a1ba4d916b6e77d72e5cb0a49fb9bf856f4a6df9e2f7ec16e92a4442e60a9838be2757bc54fc4ee7dc687cf11f7a3c410cf536fb20474bba282c74622a27d49013ed387f0e59fb1b9699d73a279bdb5d4716be8fbe062aa5bc330f901bb474836632bc3f8370f3c754578d4d107be8b31870c5914701300e7d62bb243ad46dc7786b9ded96e3e8a36aa2aaaf77f79683b9b1179d2e9e623eda2657563fab531b523dc54d74de6a27a90cc6c87aaab1c03ed9bb69586d198be4080a6ad88d485b33cfc3b203e853fcbe000265681efafe50262f1685ecbad4c0f9e8fca72f92a826a033623af98fa6b9fc4007b2f3b2ee1b2f608d95039b6e0608839c957569ba6120dcc5110c79e7f3fb474f70f9a92d505fdede096333a9c3173884b8488ac1b0c12f57a2db90ecb1639c90004e5d59e3f5e0b45d6a29606f06622047e56b40ee8c959766fbd633b4d8549b75cb287af9515facebb6f4a57a1d6cebcb2ffbe7ae999e5d109096c0777034f5b7ea96b92e75e03457c0618785f2e0091d1e44be7ed7142e988e9694fbf81892c3cc1c7670900396d3a8bdef2494fcef9afe623b94a0d067354da3a4377fadd1e72480a55184bb182b2569b9dfa7c5d59c1d72c93b4e39d2e3055d3670a74669f02b2814d9608ae888dfe9707b1461f195d1e85a87a1eb4b5228a37ec1990ffa54f310f9732cfb789582304654d2f97c596d77d812cd42797992b6d87f395f8bb23235e1af668e72bb1a5d4668e7e59672aba817446379a3cabe3797ac1abcc7dad53b22d4ea4a3c71a491d88bd49e7c472b5e20f149c5ab69708bee8149c2583f93334e54132e18711fb3f59f8df1f4f5671ce0dcb54b8b45d9ef5097dd8630115b4f44352aefd73ca8a051536bdb4fcfdfaf1062e16d6c92266a056951d7799bea9b46d79a0c602fb3136450ba9e1a2a4a09490f388d3ffab52a22e66f82d106a91412f10a283a697eac2a7c3c0bb31ace9d1202c3d716d7ab666669d4880b72c7724faff0ccdf6908e48914f4642dfb6f9732cc2cf5331333e59e8f60d861dc531f846e4ae2f6944d523f5d06ab444d45b55f3d9771a1f395b9ba0edb93f868cdb2f15c3e7cb662fe6e62c76843afb329c517676a82b3d5e644df8671f5aa0478372125b6cb8648c34393186582eba5fd62984144d35ba074046fd52a187ef17a5915c2a5cbf90816678bfaff5bf21d2398c72e8183b4ed5c327010adbe0a6ef20f2938f5ac118b5f0f65ea32c40e73384d7dae11b816bbcfbacbb1db75cbef17c43333917067245232a9efa6984c612c7e415c71a5aa7109b5674eec99f07164ceba3b8cbf8fdfadf324b5acf89811aef310308588447442a083a8eea2bb7060e89ab438c4f76178947d9ede21fd2ab327d56c2d6705d41162d8795c1d55146f0bdc9e901981a04cbd8dce76037d9ec27318fe087b73bd0b4b8995513d950ee5cf1691b1c921fcbe69ef951d465bfc91fbd403b4ddb8b228507abdd09fb68cb0d9d1f327dfd3cbcab9e6a72b913d1c49c53f26c09fedd64e86c7c5715cd6820c034690cce9a96a96711556e7573439acd3361c40ebba90d98ebb7286ba1d58fa7bd9eaad3b1b670dd3369ddc719a1b39ef292ccd94fb46e4c674d88e262c597b4bbe201741f334130ba00cd4993c75bf3db75e8e3c703d8d1116f389f1152798ea49eabb3b12b808a238f2722968f28b779d9e5cde734425023c7bdb6d8eb8fc530261da60129bde2fb8c162831648e4ac169a845eec29d3610a5c5f2a62c088e84bb205b484326cc7cd0b0be5de6d4a0587ac00722aafcfe40b16c5f6a062cdd9fe23ec1d413c94fabd3894b5b9dac0e21ca12f25bc767b22724e9cd6499d8fde8aaa20ef6a15af813b3a713df3ee4d8c7f447e625a8af9950a3e8ebcc1139f8ef03c5fe003e5d7ed89096527771033ef945822cf625f703a7f25bb42f492eae542ebd1ec779854189df226d254fd93ee71183cbc3c0e9fc0676e3c6df363a783f3f3eff3d34586728d663334d4347180436c201daf2dfa307dd94a3b66d650c215341a590e77489c8e61439e6d9932b64fbe8292e57ef859204caa4ea5cc477fddefa4eed652c2f587b2909be2d306c176f0a263865b7ded7dcc3e0029f3f990df4e81bd06a189855970886a203fb9c7396eeaf44478ec26c19e7b0592f772d46636422135a7f9df24c5199c1e92a525b90a7666aeed46c59dfb7680d3f0e49c9c1457a322d2d459399810b8de1b6c40c12e64fd679bb58ad99b4329e5a5f45ce69fca80b9105cf857a1403952fbaf32daeb9750c88be3b749efe61b186f35170e72688a478b695319267452cdc4c4e668b53af2c23d5b5b29f3d7260b5a4524f3a9b6baa71c8f4cf7c95eb29050c3916c59d5686ff5919726402f06523674c3b8d1893d98a44154d0d240dd357e0bfd11c724ea9e02abf781230929e440084b94852e9db4fad27f4b8b6ddd5c92933d39470b4552cdb628f9710646289e271e7ab2946d0ae6c0a595178b6976b13dccc9e0c8040341b88a13787b93546139ac49b5a9b70eaae3a9311eb268f38bcbf3eba8284fffc19925c16ba00ff9bb8aeb095ef566c065d286b1bc81925df7e7f055706524047aaf084076bae5f2e9845acbe1085f708e07b220b54d12f2184001f6ad65a31978a9c95c71344dc4fde7bc1fa371d7d97677a8afd17bce479dc28dc81079719ae39fdf8325bee94721b8470981cff34b03457ca35619e146a032239e442dda0fb721a15241bacee5b4fc271ed4397e6ecd4d58ca20d0cc00af7ee4a5087f2c0f8b31926bbcb150e50f43de275d2b1fa8db8728f351e3be2860ddec9815007b918513420db484b6fc288ed00670e66db694e741a6a397ca9fe4e3b51e9913540ac5954fc8646f17fab5be98ea265478d00000000006102a54efb4c9eee0001f115c6ee0100a27bda5cb1c467fb020000000004595a'
def decompressmaplzma(code):
return bin(int(lzma.decompress(bytes.fromhex(code)).hex(),16))[2:]
def decodethebetterstuff(themap):
j=0
y=1
for c in range(1,len(themap)):
if j < 0:
if themap[c] == '0':
j^=y
else:
j^=-y
else:
if themap[c] == '0':
j^=-y
else:
j^=y
y<<=1
answer=((y>>1)-1)-(abs(j)>>1)
return answer
themap = decompressmaplzma(code)
origint = decodethebetterstuff(themap)
#print(bytes.fromhex(hex(origint)[2:]).decode())
zzwrite = open('gnu_repeat4.bin', 'wb')
zzbytes = bytes.fromhex(code)
zzwrite.write(zzbytes)
zzwrite.close()
zzwrite = open('gnu_repeat4.txt', 'wb')
zzbytes = bytes.fromhex(hex(origint)[2:])
zzwrite.write(zzbytes)
zzwrite.close()
print("")
print("gnu_repeat4.txt and gnu_repeat4.bin has been created.")
| 108.637681
| 5,687
| 0.924493
| 280
| 7,496
| 24.735714
| 0.425
| 0.006353
| 0.002599
| 0.002888
| 0.020791
| 0.005198
| 0.005198
| 0.005198
| 0.005198
| 0
| 0
| 0.503021
| 0.05056
| 7,496
| 68
| 5,688
| 110.235294
| 0.470142
| 0.125133
| 0
| 0.393939
| 0
| 0
| 0.88292
| 0.869299
| 0
| 1
| 0
| 0
| 0
| 1
| 0.060606
| false
| 0
| 0.030303
| 0.030303
| 0.151515
| 0.060606
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
e7f797f35f57a5e11ff23730a05715efcef45845
| 1,108
|
py
|
Python
|
alex/resources/asr/voip_cs/kaldi/download_models.py
|
beka-evature/alex
|
e8fdc6f2d908d7a1911b18f29c218ae58d19ed6f
|
[
"Apache-2.0"
] | 1
|
2015-10-19T17:36:27.000Z
|
2015-10-19T17:36:27.000Z
|
alex/resources/asr/voip_cs/kaldi/download_models.py
|
beka-evature/alex
|
e8fdc6f2d908d7a1911b18f29c218ae58d19ed6f
|
[
"Apache-2.0"
] | null | null | null |
alex/resources/asr/voip_cs/kaldi/download_models.py
|
beka-evature/alex
|
e8fdc6f2d908d7a1911b18f29c218ae58d19ed6f
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# encoding: utf-8
from alex.utils.config import online_update
if __name__ == '__main__':
import autopath
# Description files
online_update('resources/asr/voip_cs/kaldi/results.log')
online_update('resources/asr/voip_cs/kaldi/experiment_bash_vars.log')
online_update('resources/asr/voip_cs/kaldi/alex_gitlog.log')
online_update('resources/asr/voip_cs/kaldi/alex_gitdiff.log')
# Models
online_update('resources/asr/voip_cs/kaldi/mfcc.conf')
online_update('resources/asr/voip_cs/kaldi/phones.txt')
online_update('resources/asr/voip_cs/kaldi/silence.csl')
online_update('resources/asr/voip_cs/kaldi/tri2a.mdl')
online_update('resources/asr/voip_cs/kaldi/tri2a.tree')
online_update('resources/asr/voip_cs/kaldi/tri2b.mdl')
online_update('resources/asr/voip_cs/kaldi/tri2b.tree')
online_update('resources/asr/voip_cs/kaldi/tri2b.mat')
online_update('resources/asr/voip_cs/kaldi/tri2b_bmmi.mdl')
online_update('resources/asr/voip_cs/kaldi/tri2b_bmmi.tree')
online_update('resources/asr/voip_cs/kaldi/tri2b_bmmi.mat')
| 41.037037
| 73
| 0.762635
| 164
| 1,108
| 4.871951
| 0.27439
| 0.2403
| 0.394243
| 0.450563
| 0.769712
| 0.769712
| 0.769712
| 0.594493
| 0.443054
| 0
| 0
| 0.009027
| 0.100181
| 1,108
| 26
| 74
| 42.615385
| 0.792377
| 0.055054
| 0
| 0
| 0
| 0
| 0.588686
| 0.581016
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.111111
| 0
| 0.111111
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f0012b301d911be0dc09f80ba22b40b347a401c5
| 195
|
py
|
Python
|
CodeWars/7 Kyu/Moves in squared strings (I).py
|
anubhab-code/Competitive-Programming
|
de28cb7d44044b9e7d8bdb475da61e37c018ac35
|
[
"MIT"
] | null | null | null |
CodeWars/7 Kyu/Moves in squared strings (I).py
|
anubhab-code/Competitive-Programming
|
de28cb7d44044b9e7d8bdb475da61e37c018ac35
|
[
"MIT"
] | null | null | null |
CodeWars/7 Kyu/Moves in squared strings (I).py
|
anubhab-code/Competitive-Programming
|
de28cb7d44044b9e7d8bdb475da61e37c018ac35
|
[
"MIT"
] | null | null | null |
def vert_mirror(strng):
return "\n".join(w[::-1] for w in strng.split("\n"))
def hor_mirror(strng):
return "\n".join(strng.split("\n")[::-1])
def oper(fct, strng):
return fct(strng)
| 24.375
| 56
| 0.610256
| 33
| 195
| 3.545455
| 0.454545
| 0.282051
| 0.290598
| 0.307692
| 0.376068
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012121
| 0.153846
| 195
| 8
| 57
| 24.375
| 0.69697
| 0
| 0
| 0
| 0
| 0
| 0.040816
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
f07b3ae150ab897a8c4d993d776bb8dc54a732f4
| 14,477
|
py
|
Python
|
GUI/PyQt/utils/Training_Test_Split.py
|
thomaskuestner/CNNArt
|
c2fc639dd2ce035f6ca90113290682a0ccd26fb8
|
[
"Apache-2.0"
] | 22
|
2018-04-27T21:28:46.000Z
|
2021-12-24T06:44:55.000Z
|
GUI/PyQt/utils/Training_Test_Split.py
|
thomaskuestner/CNNArt
|
c2fc639dd2ce035f6ca90113290682a0ccd26fb8
|
[
"Apache-2.0"
] | 81
|
2017-11-09T17:23:15.000Z
|
2020-01-28T22:54:13.000Z
|
GUI/PyQt/utils/Training_Test_Split.py
|
thomaskuestner/CNNArt
|
c2fc639dd2ce035f6ca90113290682a0ccd26fb8
|
[
"Apache-2.0"
] | 18
|
2017-11-13T16:12:17.000Z
|
2020-08-27T10:17:34.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 02 15:59:36 2017
@author: Sebastian Milde, Thomas Kuestner
"""
import dis
import inspect
import math
import numpy as np
from sklearn.model_selection import KFold
from DLart.Constants_DLart import *
def expecting():
"""Return how many values the caller is expecting"""
f = inspect.currentframe()
f = f.f_back.f_back
c = f.f_code
i = f.f_lasti
bytecode = c.co_code
instruction = bytecode[i + 3]
if instruction == dis.opmap['UNPACK_SEQUENCE']:
howmany = bytecode[i + 4]
return howmany
elif instruction == dis.opmap['POP_TOP']:
return 0
return 1
def fSplitDataset(allPatches, allY, allPats, sSplitting, patchSize, patchOverlap, testTrainingDatasetRatio=0,
validationTrainRatio=0, outPutPath=None, nfolds=0, isRandomShuffle=True):
# TODO: adapt path
iReturn = expecting()
# iReturn = 1000
# 2D or 3D patching?
if len(patchSize) == 2:
# 2D patches are used
if allPatches.shape[0] == patchSize[0] and allPatches.shape[1] == patchSize[1]:
allPatches = np.transpose(allPatches, (2, 0, 1))
elif len(patchSize) == 3:
# 3D patches are used
if allPatches.shape[0] == patchSize[0] and allPatches.shape[1] == patchSize[1] and allPatches.shape[2] == \
patchSize[2]:
allPatches = np.transpose(allPatches, (3, 0, 1, 2))
if sSplitting == SIMPLE_RANDOM_SAMPLE_SPLITTING:
# splitting
indexSlices = range(allPatches.shape[0])
if isRandomShuffle:
indexSlices = np.random.permutation(indexSlices)
if len(patchSize) == 2:
# 2D patching
allPatches = allPatches[indexSlices, :, :]
elif len(patchSize) == 3:
# 3D patching
allPatches = allPatches[indexSlices, :, :, :]
shapeAllY = allY.shape
if len(shapeAllY) > 1:
if allY.shape[0] == patchSize[0] and allY.shape[1] == patchSize[1]:
allY = np.transpose(allY, (2, 0, 1))
allY = allY[indexSlices]
# num of samples in test set and validation set
numAllPatches = allPatches.shape[0]
numSamplesTest = math.floor(testTrainingDatasetRatio * numAllPatches)
numSamplesValidation = math.floor(validationTrainRatio * (numAllPatches - numSamplesTest))
if len(patchSize) == 2:
# 2D patching
# subarrays as no-copy views (array slices)
X_test = allPatches[:numSamplesTest, :, :]
X_valid = allPatches[numSamplesTest:(numSamplesTest + numSamplesValidation), :, :]
X_train = allPatches[(numSamplesTest + numSamplesValidation):, :, :]
elif len(patchSize) == 3:
# 3D patching
# subarrays as no-copy views (array slices)
X_test = allPatches[:numSamplesTest, :, :, :]
X_valid = allPatches[numSamplesTest:(numSamplesTest + numSamplesValidation), :, :, :]
X_train = allPatches[(numSamplesTest + numSamplesValidation):, :, :, :]
y_test = allY[:numSamplesTest]
y_valid = allY[numSamplesTest:(numSamplesTest + numSamplesValidation)]
y_train = allY[(numSamplesTest + numSamplesValidation):]
return [X_train], [y_train], [X_valid], [y_valid], [X_test], [y_test] # embed in a 1-fold list
elif sSplitting == CROSS_VALIDATION_SPLITTING:
# split into test/train sets
# shuffle
indexSlices = range(allPatches.shape[0])
indexSlices = np.random.permutation(indexSlices)
allPatches = allPatches[indexSlices, :, :]
allY = allY[indexSlices]
# num of samples in test set
numAllPatches = allPatches.shape[0]
numSamplesTest = math.floor(testTrainingDatasetRatio * numAllPatches)
# subarrays as no-copy views (array slices)
xTest = allPatches[:numSamplesTest, :, :]
yTest = allY[:numSamplesTest]
xTrain = allPatches[numSamplesTest:, :, :]
yTrain = allY[numSamplesTest:]
# split training dataset into n folds
if nfolds == 0:
kf = KFold(n_splits=len(allPats))
else:
kf = KFold(n_splits=nfolds)
ind_split = 0
X_trainFold = []
X_testFold = []
y_trainFold = []
y_testFold = []
for train_index, test_index in kf.split(xTrain):
X_train, X_test = xTrain[train_index], xTrain[test_index]
y_train, y_test = yTrain[train_index], yTrain[test_index]
X_trainFold.append(X_train)
X_testFold.append(X_test)
y_trainFold.append(y_train)
y_testFold.append(y_test)
ind_split += 1
X_trainFold = np.asarray(X_trainFold)
X_testFold = np.asarray(X_testFold)
y_trainFold = np.asarray(y_trainFold)
y_testFold = np.asarray(y_testFold)
return [X_trainFold], [y_trainFold], [X_testFold], [y_testFold], [xTest], [yTest]
elif sSplitting == PATIENT_CROSS_VALIDATION_SPLITTING:
unique_pats = len(allPats)
X_trainFold = []
X_testFold = []
y_trainFold = []
y_testFold = []
for ind_split in range(unique_pats):
train_index = np.where(allPats != ind_split)[0]
test_index = np.where(allPats == ind_split)[0]
X_train, X_test = allPatches[train_index], allPatches[test_index]
y_train, y_test = allY[train_index], allY[test_index]
X_trainFold.append(X_train)
X_testFold.append(X_test)
y_trainFold.append(y_train)
y_testFold.append(y_test)
X_trainFold = np.asarray(X_trainFold, dtype='f')
X_testFold = np.asarray(X_testFold, dtype='f')
y_trainFold = np.asarray(y_trainFold, dtype='f')
y_testFold = np.asarray(y_testFold, dtype='f')
X_valFold = np.asarray([])
y_valFold = np.asarray([])
if iReturn > 0:
return [X_trainFold], [y_trainFold], [X_valFold], [y_valFold], [X_testFold], [y_testFold]
def fSplitSegmentationDataset(allPatches, allY, allSegmentationMasks, allPats, sSplitting, patchSize, patchOverlap,
testTrainingDatasetRatio=0, validationTrainRatio=0, outPutPath=None, nfolds=0,
isRandomShuffle=True):
# TODO: adapt path
iReturn = expecting()
# iReturn = 1000
# 2D or 3D patching?
if len(patchSize) == 2:
# 2D patches are used
if allPatches.shape[0] == patchSize[0] and allPatches.shape[1] == patchSize[1]:
allPatches = np.transpose(allPatches, (2, 0, 1))
allSegmentationMasks = np.transpose(allSegmentationMasks, (2, 0, 1))
elif len(patchSize) == 3:
# 3D patches are used
if allPatches.shape[0] == patchSize[0] and allPatches.shape[1] == patchSize[1] and allPatches.shape[2] == \
patchSize[2]:
allPatches = np.transpose(allPatches, (3, 0, 1, 2))
allSegmentationMasks = np.transpose(allSegmentationMasks, (3, 0, 1, 2))
if sSplitting == SIMPLE_RANDOM_SAMPLE_SPLITTING:
# splitting
indexSlices = range(allPatches.shape[0])
if isRandomShuffle:
indexSlices = np.random.permutation(indexSlices)
if len(patchSize) == 2:
# 2D patching
allPatches = allPatches[indexSlices, :, :]
allSegmentationMasks = allSegmentationMasks[indexSlices, :, :]
elif len(patchSize) == 3:
# 3D patching
allPatches = allPatches[indexSlices, :, :, :]
allSegmentationMasks = allSegmentationMasks[indexSlices, :, :, :]
shapeAllY = allY.shape
if len(shapeAllY) > 1:
if allY.shape[0] == patchSize[0] and allY.shape[1] == patchSize[1]:
allY = np.transpose(allY, (2, 0, 1))
allY = allY[indexSlices]
# num of samples in test set and validation set
numAllPatches = allPatches.shape[0]
numSamplesTest = math.floor(testTrainingDatasetRatio * numAllPatches)
numSamplesValidation = math.floor(validationTrainRatio * (numAllPatches - numSamplesTest))
if len(patchSize) == 2:
# 2D patching
# subarrays as no-copy views (array slices)
X_test = allPatches[:numSamplesTest, :, :]
Y_segMasks_test = allSegmentationMasks[:numSamplesTest, :, :]
X_valid = allPatches[numSamplesTest:(numSamplesTest + numSamplesValidation), :, :]
Y_segMasks_valid = allSegmentationMasks[numSamplesTest:(numSamplesTest + numSamplesValidation), :, :]
X_train = allPatches[(numSamplesTest + numSamplesValidation):, :, :]
Y_segMasks_train = allSegmentationMasks[(numSamplesTest + numSamplesValidation):, :, :]
elif len(patchSize) == 3:
# 3D patching
# subarrays as no-copy views (array slices)
X_test = allPatches[:numSamplesTest, :, :, :]
Y_segMasks_test = allSegmentationMasks[:numSamplesTest, :, :, :]
X_valid = allPatches[numSamplesTest:(numSamplesTest + numSamplesValidation), :, :, :]
Y_segMasks_valid = allSegmentationMasks[numSamplesTest:(numSamplesTest + numSamplesValidation), :, :, :]
X_train = allPatches[(numSamplesTest + numSamplesValidation):, :, :, :]
Y_segMasks_train = allSegmentationMasks[(numSamplesTest + numSamplesValidation):, :, :, :]
y_test = allY[:numSamplesTest]
y_valid = allY[numSamplesTest:(numSamplesTest + numSamplesValidation)]
y_train = allY[(numSamplesTest + numSamplesValidation):]
return [X_train], [y_train], [Y_segMasks_train], [X_valid], [y_valid], [Y_segMasks_valid], [X_test], [
y_test], [Y_segMasks_test] # embed in a 1-fold list
elif sSplitting == CROSS_VALIDATION_SPLITTING:
# split into test/train sets
# shuffle
indexSlices = range(allPatches.shape[0])
indexSlices = np.random.permutation(indexSlices)
allPatches = allPatches[indexSlices, :, :]
allY = allY[indexSlices]
# num of samples in test set
numAllPatches = allPatches.shape[0]
numSamplesTest = math.floor(testTrainingDatasetRatio * numAllPatches)
# subarrays as no-copy views (array slices)
xTest = allPatches[:numSamplesTest, :, :]
yTest = allY[:numSamplesTest]
xTrain = allPatches[numSamplesTest:, :, :]
yTrain = allY[numSamplesTest:]
# split training dataset into n folds
if nfolds == 0:
kf = KFold(n_splits=len(allPats))
else:
kf = KFold(n_splits=nfolds)
ind_split = 0
X_trainFold = []
X_testFold = []
y_trainFold = []
y_testFold = []
for train_index, test_index in kf.split(xTrain):
X_train, X_test = xTrain[train_index], xTrain[test_index]
y_train, y_test = yTrain[train_index], yTrain[test_index]
X_trainFold.append(X_train)
X_testFold.append(X_test)
y_trainFold.append(y_train)
y_testFold.append(y_test)
ind_split += 1
X_trainFold = np.asarray(X_trainFold)
X_testFold = np.asarray(X_testFold)
y_trainFold = np.asarray(y_trainFold)
y_testFold = np.asarray(y_testFold)
return [X_trainFold], [y_trainFold], [X_testFold], [y_testFold], [xTest], [yTest]
elif sSplitting == PATIENT_CROSS_VALIDATION_SPLITTING:
unique_pats = len(allPats)
X_trainFold = []
X_testFold = []
y_trainFold = []
y_testFold = []
for ind_split in range(unique_pats):
train_index = np.where(allPats != ind_split)[0]
test_index = np.where(allPats == ind_split)[0]
X_train, X_test = allPatches[train_index], allPatches[test_index]
y_train, y_test = allY[train_index], allY[test_index]
X_trainFold.append(X_train)
X_testFold.append(X_test)
y_trainFold.append(y_train)
y_testFold.append(y_test)
X_trainFold = np.asarray(X_trainFold, dtype='f')
X_testFold = np.asarray(X_testFold, dtype='f')
y_trainFold = np.asarray(y_trainFold, dtype='f')
y_testFold = np.asarray(y_testFold, dtype='f')
X_valFold = np.asarray([])
y_valFold = np.asarray([])
if iReturn > 0:
return [X_trainFold], [y_trainFold], [X_valFold], [y_valFold], [X_testFold], [y_testFold]
def TransformDataset(allPatches, allY, patchSize, patchOverlap, isRandomShuffle=True, isUsingSegmentation=False, allSegmentationMasks=None):
if len(patchSize) == 2:
# 2D patches are used
if allPatches.shape[0] == patchSize[0] and allPatches.shape[1] == patchSize[1]:
allPatches = np.transpose(allPatches, (2, 0, 1))
if isUsingSegmentation:
allSegmentationMasks = np.transpose(allSegmentationMasks, (2, 0, 1))
elif len(patchSize) == 3:
# 3D patches are used
if allPatches.shape[0] == patchSize[0] and allPatches.shape[1] == patchSize[1] and allPatches.shape[2] == \
patchSize[2]:
allPatches = np.transpose(allPatches, (3, 0, 1, 2))
if isUsingSegmentation:
allSegmentationMasks = np.transpose(allSegmentationMasks, (3, 0, 1, 2))
indexSlices = range(allPatches.shape[0])
if isRandomShuffle:
indexSlices = np.random.permutation(indexSlices)
if len(patchSize) == 2:
# 2D patching
allPatches = allPatches[indexSlices, :, :]
if isUsingSegmentation:
allSegmentationMasks = allSegmentationMasks[indexSlices, :, :]
elif len(patchSize) == 3:
# 3D patching
allPatches = allPatches[indexSlices, :, :, :]
if isUsingSegmentation:
allSegmentationMasks = allSegmentationMasks[indexSlices, :, :, :]
shapeAllY = allY.shape
if len(shapeAllY) > 1:
if allY.shape[0] == patchSize[0] and allY.shape[1] == patchSize[1]:
allY = np.transpose(allY, (2, 0, 1))
allY = allY[indexSlices]
X_data = allPatches
if isUsingSegmentation:
Y_segMasks_data = allSegmentationMasks
y_data = allY
if isUsingSegmentation:
return [X_data], [y_data], [Y_segMasks_data]
else:
return [X_data], [y_data]
| 36.931122
| 140
| 0.616081
| 1,525
| 14,477
| 5.687869
| 0.10623
| 0.041503
| 0.027669
| 0.016601
| 0.910653
| 0.901199
| 0.896588
| 0.896588
| 0.884367
| 0.879525
| 0
| 0.017033
| 0.27409
| 14,477
| 391
| 141
| 37.025575
| 0.808355
| 0.075637
| 0
| 0.847059
| 0
| 0
| 0.002251
| 0
| 0
| 0
| 0
| 0.002558
| 0
| 1
| 0.015686
| false
| 0
| 0.023529
| 0
| 0.082353
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f07cf14135a8d2bdbfaf33a33a53dc4f17a3d911
| 37,155
|
py
|
Python
|
ingenico/connect/sdk/merchant/payments/payments_client.py
|
festicket/connect-sdk-python3
|
c399c6443789dd978f319c89e1ebd387c812a77b
|
[
"MIT"
] | 12
|
2016-09-26T21:46:31.000Z
|
2020-12-23T18:44:54.000Z
|
ingenico/connect/sdk/merchant/payments/payments_client.py
|
festicket/connect-sdk-python3
|
c399c6443789dd978f319c89e1ebd387c812a77b
|
[
"MIT"
] | 3
|
2020-05-02T16:53:02.000Z
|
2020-06-02T12:49:51.000Z
|
ingenico/connect/sdk/merchant/payments/payments_client.py
|
festicket/connect-sdk-python3
|
c399c6443789dd978f319c89e1ebd387c812a77b
|
[
"MIT"
] | 11
|
2017-07-16T00:55:28.000Z
|
2021-09-24T17:00:49.000Z
|
#
# This class was auto-generated from the API references found at
# https://epayments-api.developer-ingenico.com/s2sapi/v1/
#
from ingenico.connect.sdk.api_resource import ApiResource
from ingenico.connect.sdk.response_exception import ResponseException
from ingenico.connect.sdk.domain.capture.capture_response import CaptureResponse
from ingenico.connect.sdk.domain.capture.captures_response import CapturesResponse
from ingenico.connect.sdk.domain.dispute.dispute_response import DisputeResponse
from ingenico.connect.sdk.domain.dispute.disputes_response import DisputesResponse
from ingenico.connect.sdk.domain.errors.error_response import ErrorResponse
from ingenico.connect.sdk.domain.payment.cancel_approval_payment_response import CancelApprovalPaymentResponse
from ingenico.connect.sdk.domain.payment.cancel_payment_response import CancelPaymentResponse
from ingenico.connect.sdk.domain.payment.complete_payment_response import CompletePaymentResponse
from ingenico.connect.sdk.domain.payment.create_payment_response import CreatePaymentResponse
from ingenico.connect.sdk.domain.payment.device_fingerprint_details import DeviceFingerprintDetails
from ingenico.connect.sdk.domain.payment.find_payments_response import FindPaymentsResponse
from ingenico.connect.sdk.domain.payment.payment_approval_response import PaymentApprovalResponse
from ingenico.connect.sdk.domain.payment.payment_error_response import PaymentErrorResponse
from ingenico.connect.sdk.domain.payment.payment_response import PaymentResponse
from ingenico.connect.sdk.domain.payment.third_party_status_response import ThirdPartyStatusResponse
from ingenico.connect.sdk.domain.refund.refund_error_response import RefundErrorResponse
from ingenico.connect.sdk.domain.refund.refund_response import RefundResponse
from ingenico.connect.sdk.domain.refund.refunds_response import RefundsResponse
from ingenico.connect.sdk.domain.token.create_token_response import CreateTokenResponse
class PaymentsClient(ApiResource):
"""
Payments client. Thread-safe.
"""
def __init__(self, parent, path_context):
"""
:param parent: :class:`ingenico.connect.sdk.api_resource.ApiResource`
:param path_context: dict[str, str]
"""
super(PaymentsClient, self).__init__(parent, path_context)
def create(self, body, context=None):
"""
Resource /{merchantId}/payments - Create payment
See also https://epayments-api.developer-ingenico.com/s2sapi/v1/en_US/python/payments/create.html
:param body: :class:`ingenico.connect.sdk.domain.payment.create_payment_request.CreatePaymentRequest`
:param context: :class:`ingenico.connect.sdk.call_context.CallContext`
:return: :class:`ingenico.connect.sdk.domain.payment.create_payment_response.CreatePaymentResponse`
:raise: DeclinedPaymentException if the Ingenico ePayments platform declined / rejected the payment. The payment result will be available from the exception.
:raise: ValidationException if the request was not correct and couldn't be processed (HTTP status code 400)
:raise: AuthorizationException if the request was not allowed (HTTP status code 403)
:raise: ReferenceException if an object was attempted to be referenced that doesn't exist or has been removed,
or there was a conflict (HTTP status code 404, 409 or 410)
:raise: GlobalCollectException if something went wrong at the Ingenico ePayments platform,
the Ingenico ePayments platform was unable to process a message from a downstream partner/acquirer,
or the service that you're trying to reach is temporary unavailable (HTTP status code 500, 502 or 503)
:raise: ApiException if the Ingenico ePayments platform returned any other error
"""
uri = self._instantiate_uri("/v1/{merchantId}/payments", None)
try:
return self._communicator.post(
uri,
self._client_headers,
None,
body,
CreatePaymentResponse,
context)
except ResponseException as e:
error_type = PaymentErrorResponse
error_object = self._communicator.marshaller.unmarshal(e.body, error_type)
raise self._create_exception(e.status_code, e.body, error_object, context)
def find(self, query, context=None):
"""
Resource /{merchantId}/payments - Find payments
See also https://epayments-api.developer-ingenico.com/s2sapi/v1/en_US/python/payments/find.html
:param query: :class:`ingenico.connect.sdk.merchant.payments.find_payments_params.FindPaymentsParams`
:param context: :class:`ingenico.connect.sdk.call_context.CallContext`
:return: :class:`ingenico.connect.sdk.domain.payment.find_payments_response.FindPaymentsResponse`
:raise: ValidationException if the request was not correct and couldn't be processed (HTTP status code 400)
:raise: AuthorizationException if the request was not allowed (HTTP status code 403)
:raise: ReferenceException if an object was attempted to be referenced that doesn't exist or has been removed,
or there was a conflict (HTTP status code 404, 409 or 410)
:raise: GlobalCollectException if something went wrong at the Ingenico ePayments platform,
the Ingenico ePayments platform was unable to process a message from a downstream partner/acquirer,
or the service that you're trying to reach is temporary unavailable (HTTP status code 500, 502 or 503)
:raise: ApiException if the Ingenico ePayments platform returned any other error
"""
uri = self._instantiate_uri("/v1/{merchantId}/payments", None)
try:
return self._communicator.get(
uri,
self._client_headers,
query,
FindPaymentsResponse,
context)
except ResponseException as e:
error_type = ErrorResponse
error_object = self._communicator.marshaller.unmarshal(e.body, error_type)
raise self._create_exception(e.status_code, e.body, error_object, context)
def get(self, payment_id, context=None):
"""
Resource /{merchantId}/payments/{paymentId} - Get payment
See also https://epayments-api.developer-ingenico.com/s2sapi/v1/en_US/python/payments/get.html
:param payment_id: str
:param context: :class:`ingenico.connect.sdk.call_context.CallContext`
:return: :class:`ingenico.connect.sdk.domain.payment.payment_response.PaymentResponse`
:raise: ValidationException if the request was not correct and couldn't be processed (HTTP status code 400)
:raise: AuthorizationException if the request was not allowed (HTTP status code 403)
:raise: ReferenceException if an object was attempted to be referenced that doesn't exist or has been removed,
or there was a conflict (HTTP status code 404, 409 or 410)
:raise: GlobalCollectException if something went wrong at the Ingenico ePayments platform,
the Ingenico ePayments platform was unable to process a message from a downstream partner/acquirer,
or the service that you're trying to reach is temporary unavailable (HTTP status code 500, 502 or 503)
:raise: ApiException if the Ingenico ePayments platform returned any other error
"""
path_context = {
"paymentId": payment_id,
}
uri = self._instantiate_uri("/v1/{merchantId}/payments/{paymentId}", path_context)
try:
return self._communicator.get(
uri,
self._client_headers,
None,
PaymentResponse,
context)
except ResponseException as e:
error_type = ErrorResponse
error_object = self._communicator.marshaller.unmarshal(e.body, error_type)
raise self._create_exception(e.status_code, e.body, error_object, context)
def complete(self, payment_id, body, context=None):
"""
Resource /{merchantId}/payments/{paymentId}/complete - Complete payment
See also https://epayments-api.developer-ingenico.com/s2sapi/v1/en_US/python/payments/complete.html
:param payment_id: str
:param body: :class:`ingenico.connect.sdk.domain.payment.complete_payment_request.CompletePaymentRequest`
:param context: :class:`ingenico.connect.sdk.call_context.CallContext`
:return: :class:`ingenico.connect.sdk.domain.payment.complete_payment_response.CompletePaymentResponse`
:raise: ValidationException if the request was not correct and couldn't be processed (HTTP status code 400)
:raise: AuthorizationException if the request was not allowed (HTTP status code 403)
:raise: ReferenceException if an object was attempted to be referenced that doesn't exist or has been removed,
or there was a conflict (HTTP status code 404, 409 or 410)
:raise: GlobalCollectException if something went wrong at the Ingenico ePayments platform,
the Ingenico ePayments platform was unable to process a message from a downstream partner/acquirer,
or the service that you're trying to reach is temporary unavailable (HTTP status code 500, 502 or 503)
:raise: ApiException if the Ingenico ePayments platform returned any other error
"""
path_context = {
"paymentId": payment_id,
}
uri = self._instantiate_uri("/v1/{merchantId}/payments/{paymentId}/complete", path_context)
try:
return self._communicator.post(
uri,
self._client_headers,
None,
body,
CompletePaymentResponse,
context)
except ResponseException as e:
error_type = ErrorResponse
error_object = self._communicator.marshaller.unmarshal(e.body, error_type)
raise self._create_exception(e.status_code, e.body, error_object, context)
def third_party_status(self, payment_id, context=None):
"""
Resource /{merchantId}/payments/{paymentId}/thirdpartystatus - Third party status poll
See also https://epayments-api.developer-ingenico.com/s2sapi/v1/en_US/python/payments/thirdPartyStatus.html
:param payment_id: str
:param context: :class:`ingenico.connect.sdk.call_context.CallContext`
:return: :class:`ingenico.connect.sdk.domain.payment.third_party_status_response.ThirdPartyStatusResponse`
:raise: ValidationException if the request was not correct and couldn't be processed (HTTP status code 400)
:raise: AuthorizationException if the request was not allowed (HTTP status code 403)
:raise: ReferenceException if an object was attempted to be referenced that doesn't exist or has been removed,
or there was a conflict (HTTP status code 404, 409 or 410)
:raise: GlobalCollectException if something went wrong at the Ingenico ePayments platform,
the Ingenico ePayments platform was unable to process a message from a downstream partner/acquirer,
or the service that you're trying to reach is temporary unavailable (HTTP status code 500, 502 or 503)
:raise: ApiException if the Ingenico ePayments platform returned any other error
"""
path_context = {
"paymentId": payment_id,
}
uri = self._instantiate_uri("/v1/{merchantId}/payments/{paymentId}/thirdpartystatus", path_context)
try:
return self._communicator.get(
uri,
self._client_headers,
None,
ThirdPartyStatusResponse,
context)
except ResponseException as e:
error_type = ErrorResponse
error_object = self._communicator.marshaller.unmarshal(e.body, error_type)
raise self._create_exception(e.status_code, e.body, error_object, context)
def tokenize(self, payment_id, body, context=None):
"""
Resource /{merchantId}/payments/{paymentId}/tokenize - Create a token from payment
See also https://epayments-api.developer-ingenico.com/s2sapi/v1/en_US/python/payments/tokenize.html
:param payment_id: str
:param body: :class:`ingenico.connect.sdk.domain.payment.tokenize_payment_request.TokenizePaymentRequest`
:param context: :class:`ingenico.connect.sdk.call_context.CallContext`
:return: :class:`ingenico.connect.sdk.domain.token.create_token_response.CreateTokenResponse`
:raise: ValidationException if the request was not correct and couldn't be processed (HTTP status code 400)
:raise: AuthorizationException if the request was not allowed (HTTP status code 403)
:raise: ReferenceException if an object was attempted to be referenced that doesn't exist or has been removed,
or there was a conflict (HTTP status code 404, 409 or 410)
:raise: GlobalCollectException if something went wrong at the Ingenico ePayments platform,
the Ingenico ePayments platform was unable to process a message from a downstream partner/acquirer,
or the service that you're trying to reach is temporary unavailable (HTTP status code 500, 502 or 503)
:raise: ApiException if the Ingenico ePayments platform returned any other error
"""
path_context = {
"paymentId": payment_id,
}
uri = self._instantiate_uri("/v1/{merchantId}/payments/{paymentId}/tokenize", path_context)
try:
return self._communicator.post(
uri,
self._client_headers,
None,
body,
CreateTokenResponse,
context)
except ResponseException as e:
error_type = ErrorResponse
error_object = self._communicator.marshaller.unmarshal(e.body, error_type)
raise self._create_exception(e.status_code, e.body, error_object, context)
def processchallenged(self, payment_id, context=None):
"""
Resource /{merchantId}/payments/{paymentId}/processchallenged - Approves challenged payment
See also https://epayments-api.developer-ingenico.com/s2sapi/v1/en_US/python/payments/processchallenged.html
:param payment_id: str
:param context: :class:`ingenico.connect.sdk.call_context.CallContext`
:return: :class:`ingenico.connect.sdk.domain.payment.payment_response.PaymentResponse`
:raise: ValidationException if the request was not correct and couldn't be processed (HTTP status code 400)
:raise: AuthorizationException if the request was not allowed (HTTP status code 403)
:raise: ReferenceException if an object was attempted to be referenced that doesn't exist or has been removed,
or there was a conflict (HTTP status code 404, 409 or 410)
:raise: GlobalCollectException if something went wrong at the Ingenico ePayments platform,
the Ingenico ePayments platform was unable to process a message from a downstream partner/acquirer,
or the service that you're trying to reach is temporary unavailable (HTTP status code 500, 502 or 503)
:raise: ApiException if the Ingenico ePayments platform returned any other error
"""
path_context = {
"paymentId": payment_id,
}
uri = self._instantiate_uri("/v1/{merchantId}/payments/{paymentId}/processchallenged", path_context)
try:
return self._communicator.post(
uri,
self._client_headers,
None,
None,
PaymentResponse,
context)
except ResponseException as e:
error_type = ErrorResponse
error_object = self._communicator.marshaller.unmarshal(e.body, error_type)
raise self._create_exception(e.status_code, e.body, error_object, context)
def approve(self, payment_id, body, context=None):
"""
Resource /{merchantId}/payments/{paymentId}/approve - Approve payment
See also https://epayments-api.developer-ingenico.com/s2sapi/v1/en_US/python/payments/approve.html
:param payment_id: str
:param body: :class:`ingenico.connect.sdk.domain.payment.approve_payment_request.ApprovePaymentRequest`
:param context: :class:`ingenico.connect.sdk.call_context.CallContext`
:return: :class:`ingenico.connect.sdk.domain.payment.payment_approval_response.PaymentApprovalResponse`
:raise: ValidationException if the request was not correct and couldn't be processed (HTTP status code 400)
:raise: AuthorizationException if the request was not allowed (HTTP status code 403)
:raise: ReferenceException if an object was attempted to be referenced that doesn't exist or has been removed,
or there was a conflict (HTTP status code 404, 409 or 410)
:raise: GlobalCollectException if something went wrong at the Ingenico ePayments platform,
the Ingenico ePayments platform was unable to process a message from a downstream partner/acquirer,
or the service that you're trying to reach is temporary unavailable (HTTP status code 500, 502 or 503)
:raise: ApiException if the Ingenico ePayments platform returned any other error
"""
path_context = {
"paymentId": payment_id,
}
uri = self._instantiate_uri("/v1/{merchantId}/payments/{paymentId}/approve", path_context)
try:
return self._communicator.post(
uri,
self._client_headers,
None,
body,
PaymentApprovalResponse,
context)
except ResponseException as e:
error_type = ErrorResponse
error_object = self._communicator.marshaller.unmarshal(e.body, error_type)
raise self._create_exception(e.status_code, e.body, error_object, context)
def capture(self, payment_id, body, context=None):
"""
Resource /{merchantId}/payments/{paymentId}/capture - Capture payment
See also https://epayments-api.developer-ingenico.com/s2sapi/v1/en_US/python/payments/capture.html
:param payment_id: str
:param body: :class:`ingenico.connect.sdk.domain.payment.capture_payment_request.CapturePaymentRequest`
:param context: :class:`ingenico.connect.sdk.call_context.CallContext`
:return: :class:`ingenico.connect.sdk.domain.capture.capture_response.CaptureResponse`
:raise: ValidationException if the request was not correct and couldn't be processed (HTTP status code 400)
:raise: AuthorizationException if the request was not allowed (HTTP status code 403)
:raise: ReferenceException if an object was attempted to be referenced that doesn't exist or has been removed,
or there was a conflict (HTTP status code 404, 409 or 410)
:raise: GlobalCollectException if something went wrong at the Ingenico ePayments platform,
the Ingenico ePayments platform was unable to process a message from a downstream partner/acquirer,
or the service that you're trying to reach is temporary unavailable (HTTP status code 500, 502 or 503)
:raise: ApiException if the Ingenico ePayments platform returned any other error
"""
path_context = {
"paymentId": payment_id,
}
uri = self._instantiate_uri("/v1/{merchantId}/payments/{paymentId}/capture", path_context)
try:
return self._communicator.post(
uri,
self._client_headers,
None,
body,
CaptureResponse,
context)
except ResponseException as e:
error_type = ErrorResponse
error_object = self._communicator.marshaller.unmarshal(e.body, error_type)
raise self._create_exception(e.status_code, e.body, error_object, context)
def cancelapproval(self, payment_id, context=None):
"""
Resource /{merchantId}/payments/{paymentId}/cancelapproval - Undo capture payment
See also https://epayments-api.developer-ingenico.com/s2sapi/v1/en_US/python/payments/cancelapproval.html
:param payment_id: str
:param context: :class:`ingenico.connect.sdk.call_context.CallContext`
:return: :class:`ingenico.connect.sdk.domain.payment.cancel_approval_payment_response.CancelApprovalPaymentResponse`
:raise: ValidationException if the request was not correct and couldn't be processed (HTTP status code 400)
:raise: AuthorizationException if the request was not allowed (HTTP status code 403)
:raise: ReferenceException if an object was attempted to be referenced that doesn't exist or has been removed,
or there was a conflict (HTTP status code 404, 409 or 410)
:raise: GlobalCollectException if something went wrong at the Ingenico ePayments platform,
the Ingenico ePayments platform was unable to process a message from a downstream partner/acquirer,
or the service that you're trying to reach is temporary unavailable (HTTP status code 500, 502 or 503)
:raise: ApiException if the Ingenico ePayments platform returned any other error
"""
path_context = {
"paymentId": payment_id,
}
uri = self._instantiate_uri("/v1/{merchantId}/payments/{paymentId}/cancelapproval", path_context)
try:
return self._communicator.post(
uri,
self._client_headers,
None,
None,
CancelApprovalPaymentResponse,
context)
except ResponseException as e:
error_type = ErrorResponse
error_object = self._communicator.marshaller.unmarshal(e.body, error_type)
raise self._create_exception(e.status_code, e.body, error_object, context)
def captures(self, payment_id, context=None):
"""
Resource /{merchantId}/payments/{paymentId}/captures - Get captures of payment
See also https://epayments-api.developer-ingenico.com/s2sapi/v1/en_US/python/payments/captures.html
:param payment_id: str
:param context: :class:`ingenico.connect.sdk.call_context.CallContext`
:return: :class:`ingenico.connect.sdk.domain.capture.captures_response.CapturesResponse`
:raise: ValidationException if the request was not correct and couldn't be processed (HTTP status code 400)
:raise: AuthorizationException if the request was not allowed (HTTP status code 403)
:raise: ReferenceException if an object was attempted to be referenced that doesn't exist or has been removed,
or there was a conflict (HTTP status code 404, 409 or 410)
:raise: GlobalCollectException if something went wrong at the Ingenico ePayments platform,
the Ingenico ePayments platform was unable to process a message from a downstream partner/acquirer,
or the service that you're trying to reach is temporary unavailable (HTTP status code 500, 502 or 503)
:raise: ApiException if the Ingenico ePayments platform returned any other error
"""
path_context = {
"paymentId": payment_id,
}
uri = self._instantiate_uri("/v1/{merchantId}/payments/{paymentId}/captures", path_context)
try:
return self._communicator.get(
uri,
self._client_headers,
None,
CapturesResponse,
context)
except ResponseException as e:
error_type = ErrorResponse
error_object = self._communicator.marshaller.unmarshal(e.body, error_type)
raise self._create_exception(e.status_code, e.body, error_object, context)
def refund(self, payment_id, body, context=None):
"""
Resource /{merchantId}/payments/{paymentId}/refund - Create refund
See also https://epayments-api.developer-ingenico.com/s2sapi/v1/en_US/python/payments/refund.html
:param payment_id: str
:param body: :class:`ingenico.connect.sdk.domain.refund.refund_request.RefundRequest`
:param context: :class:`ingenico.connect.sdk.call_context.CallContext`
:return: :class:`ingenico.connect.sdk.domain.refund.refund_response.RefundResponse`
:raise: DeclinedRefundException if the Ingenico ePayments platform declined / rejected the refund. The refund result will be available from the exception.
:raise: ValidationException if the request was not correct and couldn't be processed (HTTP status code 400)
:raise: AuthorizationException if the request was not allowed (HTTP status code 403)
:raise: ReferenceException if an object was attempted to be referenced that doesn't exist or has been removed,
or there was a conflict (HTTP status code 404, 409 or 410)
:raise: GlobalCollectException if something went wrong at the Ingenico ePayments platform,
the Ingenico ePayments platform was unable to process a message from a downstream partner/acquirer,
or the service that you're trying to reach is temporary unavailable (HTTP status code 500, 502 or 503)
:raise: ApiException if the Ingenico ePayments platform returned any other error
"""
path_context = {
"paymentId": payment_id,
}
uri = self._instantiate_uri("/v1/{merchantId}/payments/{paymentId}/refund", path_context)
try:
return self._communicator.post(
uri,
self._client_headers,
None,
body,
RefundResponse,
context)
except ResponseException as e:
error_type = RefundErrorResponse
error_object = self._communicator.marshaller.unmarshal(e.body, error_type)
raise self._create_exception(e.status_code, e.body, error_object, context)
def refunds(self, payment_id, context=None):
"""
Resource /{merchantId}/payments/{paymentId}/refunds - Get refunds of payment
See also https://epayments-api.developer-ingenico.com/s2sapi/v1/en_US/python/payments/refunds.html
:param payment_id: str
:param context: :class:`ingenico.connect.sdk.call_context.CallContext`
:return: :class:`ingenico.connect.sdk.domain.refund.refunds_response.RefundsResponse`
:raise: ValidationException if the request was not correct and couldn't be processed (HTTP status code 400)
:raise: AuthorizationException if the request was not allowed (HTTP status code 403)
:raise: ReferenceException if an object was attempted to be referenced that doesn't exist or has been removed,
or there was a conflict (HTTP status code 404, 409 or 410)
:raise: GlobalCollectException if something went wrong at the Ingenico ePayments platform,
the Ingenico ePayments platform was unable to process a message from a downstream partner/acquirer,
or the service that you're trying to reach is temporary unavailable (HTTP status code 500, 502 or 503)
:raise: ApiException if the Ingenico ePayments platform returned any other error
"""
path_context = {
"paymentId": payment_id,
}
uri = self._instantiate_uri("/v1/{merchantId}/payments/{paymentId}/refunds", path_context)
try:
return self._communicator.get(
uri,
self._client_headers,
None,
RefundsResponse,
context)
except ResponseException as e:
error_type = ErrorResponse
error_object = self._communicator.marshaller.unmarshal(e.body, error_type)
raise self._create_exception(e.status_code, e.body, error_object, context)
def cancel(self, payment_id, context=None):
"""
Resource /{merchantId}/payments/{paymentId}/cancel - Cancel payment
See also https://epayments-api.developer-ingenico.com/s2sapi/v1/en_US/python/payments/cancel.html
:param payment_id: str
:param context: :class:`ingenico.connect.sdk.call_context.CallContext`
:return: :class:`ingenico.connect.sdk.domain.payment.cancel_payment_response.CancelPaymentResponse`
:raise: ValidationException if the request was not correct and couldn't be processed (HTTP status code 400)
:raise: AuthorizationException if the request was not allowed (HTTP status code 403)
:raise: ReferenceException if an object was attempted to be referenced that doesn't exist or has been removed,
or there was a conflict (HTTP status code 404, 409 or 410)
:raise: GlobalCollectException if something went wrong at the Ingenico ePayments platform,
the Ingenico ePayments platform was unable to process a message from a downstream partner/acquirer,
or the service that you're trying to reach is temporary unavailable (HTTP status code 500, 502 or 503)
:raise: ApiException if the Ingenico ePayments platform returned any other error
"""
path_context = {
"paymentId": payment_id,
}
uri = self._instantiate_uri("/v1/{merchantId}/payments/{paymentId}/cancel", path_context)
try:
return self._communicator.post(
uri,
self._client_headers,
None,
None,
CancelPaymentResponse,
context)
except ResponseException as e:
error_type = ErrorResponse
error_object = self._communicator.marshaller.unmarshal(e.body, error_type)
raise self._create_exception(e.status_code, e.body, error_object, context)
def dispute(self, payment_id, body, context=None):
"""
Resource /{merchantId}/payments/{paymentId}/dispute - Create dispute
See also https://epayments-api.developer-ingenico.com/s2sapi/v1/en_US/python/payments/dispute.html
:param payment_id: str
:param body: :class:`ingenico.connect.sdk.domain.dispute.create_dispute_request.CreateDisputeRequest`
:param context: :class:`ingenico.connect.sdk.call_context.CallContext`
:return: :class:`ingenico.connect.sdk.domain.dispute.dispute_response.DisputeResponse`
:raise: ValidationException if the request was not correct and couldn't be processed (HTTP status code 400)
:raise: AuthorizationException if the request was not allowed (HTTP status code 403)
:raise: ReferenceException if an object was attempted to be referenced that doesn't exist or has been removed,
or there was a conflict (HTTP status code 404, 409 or 410)
:raise: GlobalCollectException if something went wrong at the Ingenico ePayments platform,
the Ingenico ePayments platform was unable to process a message from a downstream partner/acquirer,
or the service that you're trying to reach is temporary unavailable (HTTP status code 500, 502 or 503)
:raise: ApiException if the Ingenico ePayments platform returned any other error
"""
path_context = {
"paymentId": payment_id,
}
uri = self._instantiate_uri("/v1/{merchantId}/payments/{paymentId}/dispute", path_context)
try:
return self._communicator.post(
uri,
self._client_headers,
None,
body,
DisputeResponse,
context)
except ResponseException as e:
error_type = ErrorResponse
error_object = self._communicator.marshaller.unmarshal(e.body, error_type)
raise self._create_exception(e.status_code, e.body, error_object, context)
def disputes(self, payment_id, context=None):
"""
Resource /{merchantId}/payments/{paymentId}/disputes - Get disputes
See also https://epayments-api.developer-ingenico.com/s2sapi/v1/en_US/python/payments/disputes.html
:param payment_id: str
:param context: :class:`ingenico.connect.sdk.call_context.CallContext`
:return: :class:`ingenico.connect.sdk.domain.dispute.disputes_response.DisputesResponse`
:raise: ValidationException if the request was not correct and couldn't be processed (HTTP status code 400)
:raise: AuthorizationException if the request was not allowed (HTTP status code 403)
:raise: ReferenceException if an object was attempted to be referenced that doesn't exist or has been removed,
or there was a conflict (HTTP status code 404, 409 or 410)
:raise: GlobalCollectException if something went wrong at the Ingenico ePayments platform,
the Ingenico ePayments platform was unable to process a message from a downstream partner/acquirer,
or the service that you're trying to reach is temporary unavailable (HTTP status code 500, 502 or 503)
:raise: ApiException if the Ingenico ePayments platform returned any other error
"""
path_context = {
"paymentId": payment_id,
}
uri = self._instantiate_uri("/v1/{merchantId}/payments/{paymentId}/disputes", path_context)
try:
return self._communicator.get(
uri,
self._client_headers,
None,
DisputesResponse,
context)
except ResponseException as e:
error_type = ErrorResponse
error_object = self._communicator.marshaller.unmarshal(e.body, error_type)
raise self._create_exception(e.status_code, e.body, error_object, context)
def devicefingerprint(self, payment_id, context=None):
"""
Resource /{merchantId}/payments/{paymentId}/devicefingerprint - Get Device Fingerprint details
See also https://epayments-api.developer-ingenico.com/s2sapi/v1/en_US/python/payments/devicefingerprint.html
:param payment_id: str
:param context: :class:`ingenico.connect.sdk.call_context.CallContext`
:return: :class:`ingenico.connect.sdk.domain.payment.device_fingerprint_details.DeviceFingerprintDetails`
:raise: ValidationException if the request was not correct and couldn't be processed (HTTP status code 400)
:raise: AuthorizationException if the request was not allowed (HTTP status code 403)
:raise: ReferenceException if an object was attempted to be referenced that doesn't exist or has been removed,
or there was a conflict (HTTP status code 404, 409 or 410)
:raise: GlobalCollectException if something went wrong at the Ingenico ePayments platform,
the Ingenico ePayments platform was unable to process a message from a downstream partner/acquirer,
or the service that you're trying to reach is temporary unavailable (HTTP status code 500, 502 or 503)
:raise: ApiException if the Ingenico ePayments platform returned any other error
"""
path_context = {
"paymentId": payment_id,
}
uri = self._instantiate_uri("/v1/{merchantId}/payments/{paymentId}/devicefingerprint", path_context)
try:
return self._communicator.get(
uri,
self._client_headers,
None,
DeviceFingerprintDetails,
context)
except ResponseException as e:
error_type = ErrorResponse
error_object = self._communicator.marshaller.unmarshal(e.body, error_type)
raise self._create_exception(e.status_code, e.body, error_object, context)
| 57.51548
| 165
| 0.669331
| 4,243
| 37,155
| 5.761961
| 0.047136
| 0.034768
| 0.03894
| 0.0607
| 0.890993
| 0.885798
| 0.882199
| 0.857248
| 0.827593
| 0.798307
| 0
| 0.016797
| 0.261338
| 37,155
| 645
| 166
| 57.604651
| 0.874003
| 0.559225
| 0
| 0.703333
| 1
| 0
| 0.063103
| 0.053531
| 0
| 0
| 0
| 0
| 0
| 1
| 0.06
| false
| 0
| 0.07
| 0
| 0.19
| 0.013333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
b2b83c08d3799492f8cd7c057700451af6ef58fb
| 21,161
|
py
|
Python
|
optax/_src/control_variates_test.py
|
asmith26/optax
|
46849fdbfb50667cf9a7c0443f514d575a910654
|
[
"Apache-2.0"
] | null | null | null |
optax/_src/control_variates_test.py
|
asmith26/optax
|
46849fdbfb50667cf9a7c0443f514d575a910654
|
[
"Apache-2.0"
] | null | null | null |
optax/_src/control_variates_test.py
|
asmith26/optax
|
46849fdbfb50667cf9a7c0443f514d575a910654
|
[
"Apache-2.0"
] | null | null | null |
# Lint as: python3
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import itertools
from absl.testing import absltest
from absl.testing import parameterized
import chex
import jax
import jax.numpy as jnp
import numpy as np
from optax._src import control_variates
from optax._src import stochastic_gradient_estimators
from optax._src import utils
# Set seed for deterministic sampling.
np.random.seed(42)
def _assert_equal(actual, expected, rtol=1e-2, atol=1e-2):
# Note: assert_allclose does not check shapes
assert actual.shape == expected.shape
# Scalar.
if not actual.shape:
return np.testing.assert_allclose(
np.asarray(actual), np.asarray(expected), rtol, atol)
# We get around the bug https://github.com/numpy/numpy/issues/13801
zero_indices = np.argwhere(expected == 0)
if not np.all(np.abs(actual[zero_indices]) <= atol):
raise AssertionError('Larger than {} diff in {}'.format(
atol, actual[zero_indices]))
non_zero_indices = np.argwhere(expected != 0)
np.testing.assert_allclose(
np.asarray(actual)[non_zero_indices],
expected[non_zero_indices], rtol, atol)
def _cross_prod(items1, items2):
prod = itertools.product(items1, items2)
return [i1 + (i2,) for i1, i2 in prod]
def _map(cv, params, samples, state=None):
return jax.vmap(lambda x: cv(params, x, state))(samples)
def _map_variant(variant):
return variant(_map, static_argnums=0)
def _cv_jac_variant(variant):
return variant(
control_variates.control_variates_jacobians,
static_argnums=(0, 1, 2, 4, 5, 6, 7, 8))
class DeltaControlVariateTest(chex.TestCase):
@chex.all_variants
@parameterized.parameters([(1.0, 0.5)])
def testQuadraticFunction(self, effective_mean, effective_log_scale):
data_dims = 20
num_samples = 10**6
rng = jax.random.PRNGKey(1)
mean = effective_mean * jnp.ones(shape=(data_dims), dtype=jnp.float32)
log_scale = effective_log_scale * jnp.ones(
shape=(data_dims), dtype=jnp.float32)
params = [mean, log_scale]
dist = utils.multi_normal(*params)
dist_samples = dist.sample((num_samples,), rng)
function = lambda x: jnp.sum(x**2)
cv, expected_cv, _ = control_variates.control_delta_method(function)
avg_cv = jnp.mean(_map_variant(self.variant)(cv, params, dist_samples))
expected_cv_value = jnp.sum(dist_samples** 2) / num_samples
# This should be an analytical computation, the result needs to be
# accurate.
_assert_equal(avg_cv, expected_cv_value, rtol=1e-1, atol=1e-3)
_assert_equal(expected_cv(params, None), expected_cv_value, atol=1e-1)
@chex.all_variants
@parameterized.parameters([(1.0, 1.0)])
def testPolinomialFunction(self, effective_mean, effective_log_scale):
data_dims = 10
num_samples = 10**3
mean = effective_mean * jnp.ones(shape=(data_dims), dtype=jnp.float32)
log_scale = effective_log_scale * jnp.ones(
shape=(data_dims), dtype=jnp.float32)
params = [mean, log_scale]
dist = utils.multi_normal(*params)
rng = jax.random.PRNGKey(1)
dist_samples = dist.sample((num_samples,), rng)
function = lambda x: jnp.sum(x**5)
cv, expected_cv, _ = control_variates.control_delta_method(function)
avg_cv = jnp.mean(_map_variant(self.variant)(cv, params, dist_samples))
# Check that the average value of the control variate is close to the
# expected value.
_assert_equal(avg_cv, expected_cv(params, None), rtol=1e-1, atol=1e-3)
@chex.all_variants
def testNonPolynomialFunction(self):
data_dims = 10
num_samples = 10**3
mean = jnp.ones(shape=(data_dims), dtype=jnp.float32)
log_scale = jnp.ones(shape=(data_dims), dtype=jnp.float32)
params = [mean, log_scale]
rng = jax.random.PRNGKey(1)
dist = utils.multi_normal(*params)
dist_samples = dist.sample((num_samples,), rng)
function = lambda x: jnp.sum(jnp.log(x**2))
cv, expected_cv, _ = control_variates.control_delta_method(function)
avg_cv = jnp.mean(_map_variant(self.variant)(cv, params, dist_samples))
# Check that the average value of the control variate is close to the
# expected value.
_assert_equal(avg_cv, expected_cv(params, None), rtol=1e-1, atol=1e-3)
# Second order expansion is log(\mu**2) + 1/2 * \sigma**2 (-2 / \mu**2)
expected_cv_val = - np.exp(1.) ** 2 * data_dims
_assert_equal(
expected_cv(params, None), expected_cv_val, rtol=1e-1, atol=1e-3)
class MovingAverageBaselineTest(chex.TestCase):
@chex.all_variants
@parameterized.parameters(
[(1.0, 0.5, 0.9),
(1.0, 0.5, 0.99)])
def testLinearFunction(
self, effective_mean, effective_log_scale, decay):
weights = jnp.array([1., 2., 3.], dtype=jnp.float32)
num_samples = 10**4
data_dims = len(weights)
mean = effective_mean * jnp.ones(shape=(data_dims), dtype=jnp.float32)
log_scale = effective_log_scale * jnp.ones(
shape=(data_dims), dtype=jnp.float32)
params = [mean, log_scale]
function = lambda x: jnp.sum(weights * x)
rng = jax.random.PRNGKey(1)
dist = utils.multi_normal(*params)
dist_samples = dist.sample((num_samples,), rng)
cv, expected_cv, update_state = control_variates.moving_avg_baseline(
function, decay=decay, zero_debias=False,
use_decay_early_training_heuristic=False)
state_1 = jnp.array(1.)
avg_cv = jnp.mean(_map_variant(self.variant)(
cv, params, dist_samples, (state_1, 0)))
_assert_equal(avg_cv, state_1)
_assert_equal(expected_cv(params, (state_1, 0)), state_1)
state_2 = jnp.array(2.)
avg_cv = jnp.mean(
_map_variant(self.variant)(cv, params, dist_samples, (state_2, 0)))
_assert_equal(avg_cv, state_2)
_assert_equal(expected_cv(params, (state_2, 0)), state_2)
update_state_1 = update_state(params, dist_samples, (state_1, 0))[0]
_assert_equal(
update_state_1,
decay * state_1 + (1 - decay) * function(mean))
update_state_2 = update_state(params, dist_samples, (state_2, 0))[0]
_assert_equal(
update_state_2,
decay * state_2 + (1 - decay) * function(mean))
@chex.all_variants
@parameterized.parameters(
[(1.0, 0.5, 0.9),
(1.0, 0.5, 0.99)])
def testLinearFunctionWithHeuristic(
self, effective_mean, effective_log_scale, decay):
weights = jnp.array([1., 2., 3.], dtype=jnp.float32)
num_samples = 10**5
data_dims = len(weights)
mean = effective_mean * jnp.ones(shape=(data_dims), dtype=jnp.float32)
log_scale = effective_log_scale * jnp.ones(
shape=(data_dims), dtype=jnp.float32)
params = [mean, log_scale]
function = lambda x: jnp.sum(weights * x)
rng = jax.random.PRNGKey(1)
dist = utils.multi_normal(*params)
dist_samples = dist.sample((num_samples,), rng)
cv, expected_cv, update_state = control_variates.moving_avg_baseline(
function, decay=decay, zero_debias=False,
use_decay_early_training_heuristic=True)
state_1 = jnp.array(1.)
avg_cv = jnp.mean(_map_variant(self.variant)(
cv, params, dist_samples, (state_1, 0)))
_assert_equal(avg_cv, state_1)
_assert_equal(expected_cv(params, (state_1, 0)), state_1)
state_2 = jnp.array(2.)
avg_cv = jnp.mean(
_map_variant(self.variant)(cv, params, dist_samples, (state_2, 0)))
_assert_equal(avg_cv, state_2)
_assert_equal(expected_cv(params, (state_2, 0)), state_2)
first_step_decay = 0.1
update_state_1 = update_state(params, dist_samples, (state_1, 0))[0]
_assert_equal(
update_state_1,
first_step_decay * state_1 + (1 - first_step_decay) * function(mean))
second_step_decay = 2. / 11
update_state_2 = update_state(params, dist_samples, (state_2, 1))[0]
_assert_equal(
update_state_2,
second_step_decay * state_2 + (1 - second_step_decay) * function(mean))
@parameterized.parameters(
[(1.0, 0.5, 0.9),
(1.0, 0.5, 0.99)])
def testLinearFunctionZeroDebias(
self, effective_mean, effective_log_scale, decay):
weights = jnp.array([1., 2., 3.], dtype=jnp.float32)
num_samples = 10**5
data_dims = len(weights)
mean = effective_mean * jnp.ones(shape=(data_dims), dtype=jnp.float32)
log_scale = effective_log_scale * jnp.ones(
shape=(data_dims), dtype=jnp.float32)
params = [mean, log_scale]
function = lambda x: jnp.sum(weights * x)
rng = jax.random.PRNGKey(1)
dist = utils.multi_normal(*params)
dist_samples = dist.sample((num_samples,), rng)
update_state = control_variates.moving_avg_baseline(
function, decay=decay, zero_debias=False,
use_decay_early_training_heuristic=False)[-1]
update_state_zero_debias = control_variates.moving_avg_baseline(
function, decay=decay, zero_debias=True,
use_decay_early_training_heuristic=False)[-1]
updated_state = update_state(params, dist_samples, (jnp.array(0.), 0))[0]
_assert_equal(updated_state, (1 - decay) * function(mean))
updated_state_zero_debias = update_state_zero_debias(
params, dist_samples, (jnp.array(0.), 0))[0]
_assert_equal(
updated_state_zero_debias, function(mean))
class DeltaMethodAnalyticalExpectedGrads(chex.TestCase):
@chex.all_variants
@parameterized.parameters(_cross_prod([
(1.0, 1.0, stochastic_gradient_estimators.score_function_jacobians),
(1.0, 1.0, stochastic_gradient_estimators.pathwise_jacobians),
(1.0, 1.0, stochastic_gradient_estimators.measure_valued_jacobians)],
[True, False]))
def testQuadraticFunction(
self, effective_mean, effective_log_scale, grad_estimator,
estimate_cv_coeffs):
data_dims = 3
num_samples = 10**3
mean = effective_mean * jnp.ones(shape=(data_dims), dtype=jnp.float32)
log_scale = effective_log_scale * jnp.ones(
shape=(data_dims), dtype=jnp.float32)
params = [mean, log_scale]
function = lambda x: jnp.sum(x**2)
rng = jax.random.PRNGKey(1)
jacobians = _cv_jac_variant(self.variant)(
function,
control_variates.control_delta_method,
grad_estimator,
params,
utils.multi_normal, # dist_builder
rng,
num_samples,
None, # No cv state.
estimate_cv_coeffs)[0]
expected_mean_grads = 2 * effective_mean * np.ones(
data_dims, dtype=np.float32)
expected_log_scale_grads = 2 * np.exp(2 * effective_log_scale) * np.ones(
data_dims, dtype=np.float32)
mean_jacobians = jacobians[0]
chex.assert_shape(mean_jacobians, (num_samples, data_dims))
mean_grads_from_jacobian = jnp.mean(mean_jacobians, axis=0)
log_scale_jacobians = jacobians[1]
chex.assert_shape(log_scale_jacobians, (num_samples, data_dims))
log_scale_grads_from_jacobian = jnp.mean(log_scale_jacobians, axis=0)
_assert_equal(mean_grads_from_jacobian, expected_mean_grads,
rtol=1e-1, atol=1e-3)
_assert_equal(log_scale_grads_from_jacobian, expected_log_scale_grads,
rtol=1e-1, atol=1e-3)
@chex.all_variants
@parameterized.parameters(_cross_prod([
(1.0, 1.0, stochastic_gradient_estimators.score_function_jacobians),
(1.0, 1.0, stochastic_gradient_estimators.pathwise_jacobians),
(1.0, 1.0, stochastic_gradient_estimators.measure_valued_jacobians)],
[True, False]))
def testCubicFunction(
self, effective_mean, effective_log_scale, grad_estimator,
estimate_cv_coeffs):
data_dims = 1
num_samples = 10**5
mean = effective_mean * jnp.ones(shape=(data_dims), dtype=jnp.float32)
log_scale = effective_log_scale * jnp.ones(
shape=(data_dims), dtype=jnp.float32)
params = [mean, log_scale]
function = lambda x: jnp.sum(x**3)
rng = jax.random.PRNGKey(1)
jacobians = _cv_jac_variant(self.variant)(
function,
control_variates.control_delta_method,
grad_estimator,
params,
utils.multi_normal,
rng,
num_samples,
None, # No cv state.
estimate_cv_coeffs)[0]
# The third order uncentered moment of the Gaussian distribution is
# mu**3 + 2 mu * sigma **2. We use that to compute the expected value
# of the gradients. Note: for the log scale we need use the chain rule.
expected_mean_grads = (
3 * effective_mean**2 + 3 * np.exp(effective_log_scale)**2)
expected_mean_grads *= np.ones(data_dims, dtype=np.float32)
expected_log_scale_grads = (
6 * effective_mean * np.exp(effective_log_scale) ** 2)
expected_log_scale_grads *= np.ones(data_dims, dtype=np.float32)
mean_jacobians = jacobians[0]
chex.assert_shape(mean_jacobians, (num_samples, data_dims))
mean_grads_from_jacobian = jnp.mean(mean_jacobians, axis=0)
log_scale_jacobians = jacobians[1]
chex.assert_shape(log_scale_jacobians, (num_samples, data_dims))
log_scale_grads_from_jacobian = jnp.mean(log_scale_jacobians, axis=0)
_assert_equal(mean_grads_from_jacobian, expected_mean_grads,
rtol=1e-1, atol=1e-3)
_assert_equal(log_scale_grads_from_jacobian, expected_log_scale_grads,
rtol=1e-1, atol=1e-3)
@chex.all_variants
@parameterized.parameters(_cross_prod([
(1.0, 1.0, stochastic_gradient_estimators.score_function_jacobians),
(1.0, 1.0, stochastic_gradient_estimators.pathwise_jacobians),
(1.0, 1.0, stochastic_gradient_estimators.measure_valued_jacobians)],
[True, False]))
def testForthPowerFunction(
self, effective_mean, effective_log_scale, grad_estimator,
estimate_cv_coeffs):
data_dims = 1
num_samples = 10**5
mean = effective_mean * jnp.ones(shape=(data_dims), dtype=jnp.float32)
log_scale = effective_log_scale * jnp.ones(
shape=(data_dims), dtype=jnp.float32)
params = [mean, log_scale]
function = lambda x: jnp.sum(x**4)
rng = jax.random.PRNGKey(1)
jacobians = _cv_jac_variant(self.variant)(
function,
control_variates.control_delta_method,
grad_estimator,
params,
utils.multi_normal,
rng,
num_samples,
None, # No cv state
estimate_cv_coeffs)[0]
# The third order uncentered moment of the Gaussian distribution is
# mu**4 + 6 mu **2 sigma **2 + 3 sigma**4. We use that to compute the
# expected value of the gradients.
# Note: for the log scale we need use the chain rule.
expected_mean_grads = (
3 * effective_mean**3
+ 12 * effective_mean * np.exp(effective_log_scale)**2)
expected_mean_grads *= np.ones(data_dims, dtype=np.float32)
expected_log_scale_grads = 12 * (
effective_mean**2 * np.exp(effective_log_scale) +
np.exp(effective_log_scale) ** 3) * np.exp(effective_log_scale)
expected_log_scale_grads *= np.ones(data_dims, dtype=np.float32)
mean_jacobians = jacobians[0]
chex.assert_shape(mean_jacobians, (num_samples, data_dims))
mean_grads_from_jacobian = jnp.mean(mean_jacobians, axis=0)
log_scale_jacobians = jacobians[1]
chex.assert_shape(log_scale_jacobians, (num_samples, data_dims))
log_scale_grads_from_jacobian = jnp.mean(log_scale_jacobians, axis=0)
_assert_equal(mean_grads_from_jacobian, expected_mean_grads,
rtol=1e-1, atol=1e-3)
_assert_equal(log_scale_grads_from_jacobian, expected_log_scale_grads,
rtol=1e-1, atol=1e-3)
class ConsistencyWithStandardEstimators(chex.TestCase):
@chex.all_variants
@parameterized.parameters(_cross_prod([
(1, 1, stochastic_gradient_estimators.score_function_jacobians, 10 **6),
(1, 1, stochastic_gradient_estimators.pathwise_jacobians, 10 **5),
(1, 1, stochastic_gradient_estimators.measure_valued_jacobians, 10 **5)],
[control_variates.control_delta_method,
control_variates.moving_avg_baseline]))
def testWeightedLinearFunction(
self, effective_mean, effective_log_scale,
grad_estimator, num_samples, control_variate_from_function):
"""Check that the gradients are consistent between estimators."""
weights = jnp.array([1., 2., 3.], dtype=jnp.float32)
data_dims = len(weights)
mean = effective_mean * jnp.ones(shape=(data_dims), dtype=jnp.float32)
log_scale = effective_log_scale * jnp.ones(
shape=(data_dims), dtype=jnp.float32)
params = [mean, log_scale]
function = lambda x: jnp.sum(weights * x)
rng = jax.random.PRNGKey(1)
cv_rng, ge_rng = jax.random.split(rng)
jacobians = _cv_jac_variant(self.variant)(
function,
control_variate_from_function,
grad_estimator,
params,
utils.multi_normal, # dist_builder
cv_rng, # rng
num_samples,
(0., 0), # control_variate_state
False)[0]
mean_jacobians = jacobians[0]
chex.assert_shape(mean_jacobians, (num_samples, data_dims))
mean_grads = jnp.mean(mean_jacobians, axis=0)
log_scale_jacobians = jacobians[1]
chex.assert_shape(log_scale_jacobians, (num_samples, data_dims))
log_scale_grads = jnp.mean(log_scale_jacobians, axis=0)
# We use a different random number generator for the gradient estimator
# without the control variate.
no_cv_jacobians = grad_estimator(
function, [mean, log_scale],
utils.multi_normal, ge_rng, num_samples=num_samples)
no_cv_mean_jacobians = no_cv_jacobians[0]
chex.assert_shape(no_cv_mean_jacobians, (num_samples, data_dims))
no_cv_mean_grads = jnp.mean(no_cv_mean_jacobians, axis=0)
no_cv_log_scale_jacobians = no_cv_jacobians[1]
chex.assert_shape(no_cv_log_scale_jacobians, (num_samples, data_dims))
no_cv_log_scale_grads = jnp.mean(no_cv_log_scale_jacobians, axis=0)
_assert_equal(mean_grads, no_cv_mean_grads, rtol=1e-1, atol=5e-2)
_assert_equal(log_scale_grads, no_cv_log_scale_grads, rtol=1, atol=5e-2)
@chex.all_variants
@parameterized.parameters(_cross_prod([
(1, 1, stochastic_gradient_estimators.score_function_jacobians, 10 **5),
(1, 1, stochastic_gradient_estimators.pathwise_jacobians, 10 **5),
(1, 1, stochastic_gradient_estimators.measure_valued_jacobians, 10 **5)],
[control_variates.control_delta_method,
control_variates.moving_avg_baseline]))
def testNonPolynomialFunction(
self, effective_mean, effective_log_scale,
grad_estimator, num_samples, control_variate_from_function):
"""Check that the gradients are consistent between estimators."""
data_dims = 3
mean = effective_mean * jnp.ones(shape=(data_dims), dtype=jnp.float32)
log_scale = effective_log_scale * jnp.ones(
shape=(data_dims), dtype=jnp.float32)
params = [mean, log_scale]
function = lambda x: jnp.log(jnp.sum(x**2))
rng = jax.random.PRNGKey(1)
cv_rng, ge_rng = jax.random.split(rng)
jacobians = _cv_jac_variant(self.variant)(
function,
control_variate_from_function,
grad_estimator,
params,
utils.multi_normal,
cv_rng,
num_samples,
(0., 0), # control_variate_state
False)[0]
mean_jacobians = jacobians[0]
chex.assert_shape(mean_jacobians, (num_samples, data_dims))
mean_grads = jnp.mean(mean_jacobians, axis=0)
log_scale_jacobians = jacobians[1]
chex.assert_shape(log_scale_jacobians, (num_samples, data_dims))
log_scale_grads = jnp.mean(log_scale_jacobians, axis=0)
# We use a different random number generator for the gradient estimator
# without the control variate.
no_cv_jacobians = grad_estimator(
function, [mean, log_scale],
utils.multi_normal, ge_rng, num_samples=num_samples)
no_cv_mean_jacobians = no_cv_jacobians[0]
chex.assert_shape(no_cv_mean_jacobians, (num_samples, data_dims))
no_cv_mean_grads = jnp.mean(no_cv_mean_jacobians, axis=0)
no_cv_log_scale_jacobians = no_cv_jacobians[1]
chex.assert_shape(no_cv_log_scale_jacobians, (num_samples, data_dims))
no_cv_log_scale_grads = jnp.mean(no_cv_log_scale_jacobians, axis=0)
_assert_equal(mean_grads, no_cv_mean_grads, rtol=1e-1, atol=5e-2)
_assert_equal(log_scale_grads, no_cv_log_scale_grads, rtol=1e-1, atol=5e-2)
if __name__ == '__main__':
absltest.main()
| 37.059545
| 80
| 0.692075
| 2,941
| 21,161
| 4.67324
| 0.093506
| 0.055879
| 0.026484
| 0.025611
| 0.821595
| 0.816138
| 0.803114
| 0.789508
| 0.769645
| 0.761932
| 0
| 0.028676
| 0.197439
| 21,161
| 570
| 81
| 37.124561
| 0.780604
| 0.094797
| 0
| 0.758373
| 0
| 0
| 0.001728
| 0
| 0
| 0
| 0
| 0
| 0.114833
| 1
| 0.038278
| false
| 0
| 0.023923
| 0.007177
| 0.083732
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b2d2169b9e42d8d759a47793cf34031da4dc110e
| 5,741
|
py
|
Python
|
yolo3/models/yolo3_shufflenetv2.py
|
rootadminWalker/keras-YOLOv3-model-set
|
196ec711975e1821a260a9f6523008bf47ff8c84
|
[
"MIT"
] | null | null | null |
yolo3/models/yolo3_shufflenetv2.py
|
rootadminWalker/keras-YOLOv3-model-set
|
196ec711975e1821a260a9f6523008bf47ff8c84
|
[
"MIT"
] | null | null | null |
yolo3/models/yolo3_shufflenetv2.py
|
rootadminWalker/keras-YOLOv3-model-set
|
196ec711975e1821a260a9f6523008bf47ff8c84
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""YOLO_v3 ShuffleNetV2 Model Defined in Keras."""
from tensorflow.keras.layers import UpSampling2D, Concatenate
from tensorflow.keras.models import Model
from ...common.backbones.shufflenet_v2 import ShuffleNetV2
#from yolo3.models.layers import compose, DarknetConv2D, DarknetConv2D_BN_Leaky, Depthwise_Separable_Conv2D_BN_Leaky, make_last_layers, make_depthwise_separable_last_layers, make_spp_depthwise_separable_last_layers
from .layers import yolo3_predictions, yolo3lite_predictions, tiny_yolo3_predictions, tiny_yolo3lite_predictions
def yolo3_shufflenetv2_body(inputs, num_anchors, num_classes):
"""Create YOLO_V3 ShuffleNetV2 model CNN body in Keras."""
shufflenetv2 = ShuffleNetV2(input_tensor=inputs, weights=None, include_top=False)
print('backbone layers number: {}'.format(len(shufflenetv2.layers)))
# input: 416 x 416 x 3
# 1x1conv5_out: 13 x 13 x 1024
# stage4/block1/relu_1x1conv_1: 26 x 26 x 464
# stage3/block1/relu_1x1conv_1: 52 x 52 x 232
# f1: 13 x 13 x 1024
f1 = shufflenetv2.get_layer('1x1conv5_out').output
# f2: 26 x 26 x 464
f2 = shufflenetv2.get_layer('stage4/block1/relu_1x1conv_1').output
# f3: 52 x 52 x 232
f3 = shufflenetv2.get_layer('stage3/block1/relu_1x1conv_1').output
f1_channel_num = 1024
f2_channel_num = 464
f3_channel_num = 232
#f1_channel_num = 1024
#f2_channel_num = 512
#f3_channel_num = 256
y1, y2, y3 = yolo3_predictions((f1, f2, f3), (f1_channel_num, f2_channel_num, f3_channel_num), num_anchors, num_classes)
return Model(inputs = inputs, outputs=[y1,y2,y3])
def yolo3lite_shufflenetv2_body(inputs, num_anchors, num_classes):
'''Create YOLO_v3 Lite ShuffleNetV2 model CNN body in keras.'''
shufflenetv2 = ShuffleNetV2(input_tensor=inputs, weights=None, include_top=False)
print('backbone layers number: {}'.format(len(shufflenetv2.layers)))
# input: 416 x 416 x 3
# 1x1conv5_out: 13 x 13 x 1024
# stage4/block1/relu_1x1conv_1: 26 x 26 x 464
# stage3/block1/relu_1x1conv_1: 52 x 52 x 232
# f1: 13 x 13 x 1024
f1 = shufflenetv2.get_layer('1x1conv5_out').output
# f2: 26 x 26 x 464
f2 = shufflenetv2.get_layer('stage4/block1/relu_1x1conv_1').output
# f3: 52 x 52 x 232
f3 = shufflenetv2.get_layer('stage3/block1/relu_1x1conv_1').output
f1_channel_num = 1024
f2_channel_num = 464
f3_channel_num = 232
#f1_channel_num = 1024
#f2_channel_num = 512
#f3_channel_num = 256
y1, y2, y3 = yolo3lite_predictions((f1, f2, f3), (f1_channel_num, f2_channel_num, f3_channel_num), num_anchors, num_classes)
return Model(inputs = inputs, outputs=[y1,y2,y3])
def yolo3lite_spp_shufflenetv2_body(inputs, num_anchors, num_classes):
'''Create YOLO_v3 Lite SPP ShuffleNetV2 model CNN body in keras.'''
shufflenetv2 = ShuffleNetV2(input_tensor=inputs, weights=None, include_top=False)
print('backbone layers number: {}'.format(len(shufflenetv2.layers)))
# input: 416 x 416 x 3
# 1x1conv5_out: 13 x 13 x 1024
# stage4/block1/relu_1x1conv_1: 26 x 26 x 464
# stage3/block1/relu_1x1conv_1: 52 x 52 x 232
# f1: 13 x 13 x 1024
f1 = shufflenetv2.get_layer('1x1conv5_out').output
# f2: 26 x 26 x 464
f2 = shufflenetv2.get_layer('stage4/block1/relu_1x1conv_1').output
# f3: 52 x 52 x 232
f3 = shufflenetv2.get_layer('stage3/block1/relu_1x1conv_1').output
f1_channel_num = 1024
f2_channel_num = 464
f3_channel_num = 232
#f1_channel_num = 1024
#f2_channel_num = 512
#f3_channel_num = 256
y1, y2, y3 = yolo3lite_predictions((f1, f2, f3), (f1_channel_num, f2_channel_num, f3_channel_num), num_anchors, num_classes, use_spp=True)
return Model(inputs = inputs, outputs=[y1,y2,y3])
def tiny_yolo3_shufflenetv2_body(inputs, num_anchors, num_classes):
'''Create Tiny YOLO_v3 ShuffleNetV2 model CNN body in keras.'''
shufflenetv2 = ShuffleNetV2(input_tensor=inputs, weights=None, include_top=False)
print('backbone layers number: {}'.format(len(shufflenetv2.layers)))
# input: 416 x 416 x 3
# 1x1conv5_out: 13 x 13 x 1024
# stage4/block1/relu_1x1conv_1: 26 x 26 x 464
# stage3/block1/relu_1x1conv_1: 52 x 52 x 232
# f1: 13 x 13 x 1024
f1 = shufflenetv2.get_layer('1x1conv5_out').output
# f2: 26 x 26 x 464
f2 = shufflenetv2.get_layer('stage4/block1/relu_1x1conv_1').output
f1_channel_num = 1024
f2_channel_num = 464
#f1_channel_num = 1024
#f2_channel_num = 512
y1, y2 = tiny_yolo3_predictions((f1, f2), (f1_channel_num, f2_channel_num), num_anchors, num_classes)
return Model(inputs, [y1,y2])
def tiny_yolo3lite_shufflenetv2_body(inputs, num_anchors, num_classes):
'''Create Tiny YOLO_v3 Lite ShuffleNetV2 model CNN body in keras.'''
shufflenetv2 = ShuffleNetV2(input_tensor=inputs, weights=None, include_top=False)
print('backbone layers number: {}'.format(len(shufflenetv2.layers)))
# input: 416 x 416 x 3
# 1x1conv5_out: 13 x 13 x 1024
# stage4/block1/relu_1x1conv_1: 26 x 26 x 464
# stage3/block1/relu_1x1conv_1: 52 x 52 x 232
# f1: 13 x 13 x 1024
f1 = shufflenetv2.get_layer('1x1conv5_out').output
# f2: 26 x 26 x 464
f2 = shufflenetv2.get_layer('stage4/block1/relu_1x1conv_1').output
f1_channel_num = 1024
f2_channel_num = 464
#f1_channel_num = 1024
#f2_channel_num = 512
y1, y2 = tiny_yolo3lite_predictions((f1, f2), (f1_channel_num, f2_channel_num), num_anchors, num_classes)
return Model(inputs, [y1,y2])
| 38.530201
| 215
| 0.702491
| 861
| 5,741
| 4.430894
| 0.108014
| 0.102228
| 0.08021
| 0.084928
| 0.862123
| 0.862123
| 0.862123
| 0.862123
| 0.862123
| 0.84692
| 0
| 0.135324
| 0.200662
| 5,741
| 148
| 216
| 38.790541
| 0.696012
| 0.311618
| 0
| 0.745455
| 0
| 0
| 0.111171
| 0.06015
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.072727
| 0
| 0.254545
| 0.090909
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
650b9d8870752bc3b0826615663d65b1e87b2796
| 1,779
|
py
|
Python
|
imagersite/imager_images/models.py
|
allanliebold/django-imager
|
1e89652ee344172b1a36a21fae1a7baf8cc28df1
|
[
"MIT"
] | null | null | null |
imagersite/imager_images/models.py
|
allanliebold/django-imager
|
1e89652ee344172b1a36a21fae1a7baf8cc28df1
|
[
"MIT"
] | null | null | null |
imagersite/imager_images/models.py
|
allanliebold/django-imager
|
1e89652ee344172b1a36a21fae1a7baf8cc28df1
|
[
"MIT"
] | null | null | null |
"""Models."""
from django.db import models
from django.contrib.auth.models import User
class Photo(models.Model):
"""Photo Model that creates a photo."""
user = models.ForeignKey(User,
on_delete=models.CASCADE,
related_name='photo')
image = models.ImageField(upload_to='images')
title = models.CharField(max_length=30, blank=False)
description = models.TextField(blank=True)
date_uploaded = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
date_published = models.DateTimeField(auto_now=True)
PUBLISHED = [
('PRIVATE', 'Private'),
('SHARED', 'Shared'),
('PUBLIC', 'Public')
]
published = models.CharField(
max_length=10,
choices=PUBLISHED,
blank=True
)
def __str__(self):
"""."""
return self.title
class Album(models.Model):
"""Album Model for pictures."""
user = models.ForeignKey(User,
on_delete=models.CASCADE,
related_name='album')
photo = models.ManyToManyField(Photo, related_name='album')
title = models.CharField(max_length=30, blank=False)
description = models.TextField(blank=True)
date_uploaded = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
date_published = models.DateTimeField(auto_now=True)
PUBLISHED = [
('PRIVATE', 'Private'),
('SHARED', 'Shared'),
('PUBLIC', 'Public')
]
published = models.CharField(
max_length=10,
choices=PUBLISHED,
blank=True
)
def __str__(self):
"""Return Album title."""
return self.title
| 27.796875
| 63
| 0.609893
| 185
| 1,779
| 5.691892
| 0.302703
| 0.045584
| 0.131054
| 0.148148
| 0.731244
| 0.731244
| 0.731244
| 0.731244
| 0.731244
| 0.731244
| 0
| 0.006149
| 0.26869
| 1,779
| 63
| 64
| 28.238095
| 0.803228
| 0.050028
| 0
| 0.73913
| 0
| 0
| 0.058293
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.043478
| false
| 0
| 0.043478
| 0
| 0.565217
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 7
|
3303be2c8afd3c8a06638659c37bc04822397703
| 89
|
py
|
Python
|
server/handler/__init__.py
|
10000ms/aiohttp_mongodb_unit
|
5163b3e34b1648ea3a2d6135fb367debd6ed87a7
|
[
"MIT"
] | null | null | null |
server/handler/__init__.py
|
10000ms/aiohttp_mongodb_unit
|
5163b3e34b1648ea3a2d6135fb367debd6ed87a7
|
[
"MIT"
] | null | null | null |
server/handler/__init__.py
|
10000ms/aiohttp_mongodb_unit
|
5163b3e34b1648ea3a2d6135fb367debd6ed87a7
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from server.handler import user
from server.handler import error
| 22.25
| 32
| 0.730337
| 13
| 89
| 5
| 0.692308
| 0.307692
| 0.523077
| 0.707692
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013158
| 0.146067
| 89
| 3
| 33
| 29.666667
| 0.842105
| 0.235955
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
3311adbf8db38ae4b31584b1382497ba77c4d46b
| 4,903
|
py
|
Python
|
pytorch_bcnn/links/connection/pixel_shuffle_upsampler.py
|
psadda/pytorch_bayesian_unet
|
bb22b44c64f5d83d78aa93880da97e0e6168dc1c
|
[
"MIT"
] | 34
|
2020-03-30T16:48:45.000Z
|
2022-03-25T15:53:08.000Z
|
pytorch_bcnn/links/connection/pixel_shuffle_upsampler.py
|
IhabBendidi/pytorch_bayesian_unet
|
cc09653a051072790760447c711887e289ed11dc
|
[
"MIT"
] | 2
|
2021-01-24T04:21:16.000Z
|
2021-04-25T19:22:14.000Z
|
pytorch_bcnn/links/connection/pixel_shuffle_upsampler.py
|
IhabBendidi/pytorch_bayesian_unet
|
cc09653a051072790760447c711887e289ed11dc
|
[
"MIT"
] | 15
|
2020-04-10T05:29:31.000Z
|
2022-01-03T08:45:02.000Z
|
from __future__ import absolute_import
import torch
import torch.nn as nn
import torch.nn.functional as F
class PixelShuffleUpsampler2D(nn.Conv2d):
"""Pixel Shuffler for the super resolution.
This upsampler is effective upsampling method compared with the deconvolution.
The deconvolution has a problem of the checkerboard artifact.
A detail of this problem shows the following.
http://distill.pub/2016/deconv-checkerboard/
See also:
https://arxiv.org/abs/1609.05158
"""
ndim = 2
def __init__(self, in_channels, out_channels, resolution, kernel_size, stride=1,
padding=0, dilation=1, groups=1,
bias=True, padding_mode='zeros'):
m = resolution ** self.ndim
super(PixelShuffleUpsampler2D, self).__init__(
in_channels, out_channels * m, kernel_size, stride,
padding, dilation, groups, bias, padding_mode)
self.resolution = resolution
self.out_channels = out_channels
def extra_repr(self):
s = ('{in_channels}, {out_channels}, resolution={resolution}'
', kernel_size={kernel_size}, stride={stride}')
if self.padding != (0,) * len(self.padding):
s += ', padding={padding}'
if self.dilation != (1,) * len(self.dilation):
s += ', dilation={dilation}'
if self.output_padding != (0,) * len(self.output_padding):
s += ', output_padding={output_padding}'
if self.groups != 1:
s += ', groups={groups}'
if self.bias is None:
s += ', bias=False'
if self.padding_mode != 'zeros':
s += ', padding_mode={padding_mode}'
return s.format(**self.__dict__)
def forward(self, x):
r = self.resolution
out = super().forward(x)
batchsize = out.shape[0]
in_channels = out.shape[1]
out_channels = self.out_channels
in_shape = out.shape[2:]
out_shape = tuple(s * r for s in in_shape)
r_tuple = tuple(self.resolution for _ in range(self.ndim))
out = out.view((batchsize, out_channels,) + r_tuple + in_shape)
out = out.permute(self.make_transpose_indices()).contiguous()
out = out.view((batchsize, out_channels, ) + out_shape)
return out
def make_transpose_indices(self):
si = [0, 1]
si.extend([2 * (i + 1) + 1 for i in range(self.ndim)])
si.extend([2 * (i + 1) for i in range(self.ndim)])
return si
class PixelShuffleUpsampler3D(nn.Conv3d):
"""Pixel Shuffler for the super resolution.
This upsampler is effective upsampling method compared with the deconvolution.
The deconvolution has a problem of the checkerboard artifact.
A detail of this problem shows the following.
http://distill.pub/2016/deconv-checkerboard/
See also:
https://arxiv.org/abs/1609.05158
"""
ndim = 3
def __init__(self, in_channels, out_channels, resolution, kernel_size, stride=1,
padding=0, dilation=1, groups=1,
bias=True, padding_mode='zeros'):
m = resolution ** self.ndim
super(PixelShuffleUpsampler3D, self).__init__(
in_channels, out_channels * m, kernel_size, stride,
padding, dilation, groups, bias, padding_mode)
self.resolution = resolution
self.out_channels = out_channels
def extra_repr(self):
s = ('{in_channels}, {out_channels}, resolution={resolution}'
', kernel_size={kernel_size}, stride={stride}')
if self.padding != (0,) * len(self.padding):
s += ', padding={padding}'
if self.dilation != (1,) * len(self.dilation):
s += ', dilation={dilation}'
if self.output_padding != (0,) * len(self.output_padding):
s += ', output_padding={output_padding}'
if self.groups != 1:
s += ', groups={groups}'
if self.bias is None:
s += ', bias=False'
if self.padding_mode != 'zeros':
s += ', padding_mode={padding_mode}'
return s.format(**self.__dict__)
def forward(self, x):
r = self.resolution
out = super().forward(x)
batchsize = out.shape[0]
in_channels = out.shape[1]
out_channels = self.out_channels
in_shape = out.shape[2:]
out_shape = tuple(s * r for s in in_shape)
r_tuple = tuple(self.resolution for _ in range(self.ndim))
out = out.view((batchsize, out_channels,) + r_tuple + in_shape)
out = out.permute(self.make_transpose_indices()).contiguous()
out = out.view((batchsize, out_channels, ) + out_shape)
return out
def make_transpose_indices(self):
si = [0, 1]
si.extend([2 * (i + 1) + 1 for i in range(self.ndim)])
si.extend([2 * (i + 1) for i in range(self.ndim)])
return si
| 36.051471
| 84
| 0.603508
| 614
| 4,903
| 4.644951
| 0.162866
| 0.069425
| 0.036466
| 0.04418
| 0.928471
| 0.928471
| 0.928471
| 0.928471
| 0.928471
| 0.928471
| 0
| 0.019674
| 0.274322
| 4,903
| 135
| 85
| 36.318519
| 0.7819
| 0.130736
| 0
| 0.893617
| 0
| 0
| 0.114
| 0.051037
| 0
| 0
| 0
| 0
| 0
| 1
| 0.085106
| false
| 0
| 0.042553
| 0
| 0.234043
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
332362264777945740f0f0020b4a1ca93c01e76c
| 15,868
|
py
|
Python
|
tests/test_modules/test_biggan_deep_archs.py
|
plutoyuxie/mmgeneration
|
0a7f5d16c970de1766ebf049d7a0264fe506504b
|
[
"Apache-2.0"
] | 718
|
2021-04-15T11:26:20.000Z
|
2022-03-31T03:11:56.000Z
|
tests/test_modules/test_biggan_deep_archs.py
|
plutoyuxie/mmgeneration
|
0a7f5d16c970de1766ebf049d7a0264fe506504b
|
[
"Apache-2.0"
] | 191
|
2021-04-15T12:13:34.000Z
|
2022-03-31T16:04:36.000Z
|
tests/test_modules/test_biggan_deep_archs.py
|
plutoyuxie/mmgeneration
|
0a7f5d16c970de1766ebf049d7a0264fe506504b
|
[
"Apache-2.0"
] | 107
|
2021-04-15T12:38:41.000Z
|
2022-03-27T02:47:16.000Z
|
from copy import deepcopy
from functools import partial
import pytest
import torch
from mmgen.models import build_module
# yapf:disable
from mmgen.models.architectures.biggan import (BigGANDeepDiscResBlock,
BigGANDeepDiscriminator,
BigGANDeepGenerator,
BigGANDeepGenResBlock)
# yapf:enable
class TestBigGANDeepGenResBlock:
@classmethod
def setup_class(cls):
cls.default_cfg = dict(
type='BigGANDeepGenResBlock',
in_channels=32,
out_channels=16,
dim_after_concat=100,
act_cfg=dict(type='ReLU'),
upsample_cfg=dict(type='nearest', scale_factor=2),
sn_eps=1e-6,
bn_eps=1e-5,
with_spectral_norm=True,
input_is_label=False,
auto_sync_bn=True,
channel_ratio=4)
cls.x = torch.randn(2, 32, 8, 8)
cls.y = torch.randn(2, 100)
cls.label = torch.randint(0, 100, (2, ))
def test_biggan_deep_gen_res_block(self):
# test default setting
module = build_module(self.default_cfg)
assert isinstance(module, BigGANDeepGenResBlock)
out = module(self.x, self.y)
assert out.shape == (2, 16, 16, 16)
# test without upsample
cfg = deepcopy(self.default_cfg)
cfg.update(dict(upsample_cfg=None))
module = build_module(cfg)
out = module(self.x, self.y)
assert out.shape == (2, 16, 8, 8)
# test input_is_label
cfg = deepcopy(self.default_cfg)
cfg.update(dict(input_is_label=True))
module = build_module(cfg)
out = module(self.x, self.label)
assert out.shape == (2, 16, 16, 16)
# test torch-sn
cfg = deepcopy(self.default_cfg)
cfg.update(dict(sn_style='torch'))
module = build_module(cfg)
out = module(self.x, self.y)
assert out.shape == (2, 16, 16, 16)
@pytest.mark.skipif(not torch.cuda.is_available(), reason='requires cuda')
def test_biggan_deep_gen_res_block_cuda(self):
# test default setting
module = build_module(self.default_cfg).cuda()
assert isinstance(module, BigGANDeepGenResBlock)
out = module(self.x.cuda(), self.y.cuda())
assert out.shape == (2, 16, 16, 16)
# test without upsample
cfg = deepcopy(self.default_cfg)
cfg.update(dict(upsample_cfg=None))
module = build_module(cfg).cuda()
out = module(self.x.cuda(), self.y.cuda())
assert out.shape == (2, 16, 8, 8)
# test input_is_label
cfg = deepcopy(self.default_cfg)
cfg.update(dict(input_is_label=True))
module = build_module(cfg).cuda()
out = module(self.x.cuda(), self.label.cuda())
assert out.shape == (2, 16, 16, 16)
# test torch-sn
cfg = deepcopy(self.default_cfg)
cfg.update(dict(sn_style='torch'))
module = build_module(cfg).cuda()
out = module(self.x.cuda(), self.y.cuda())
assert out.shape == (2, 16, 16, 16)
class TestBigGANDeepDiscResBlock:
@classmethod
def setup_class(cls):
cls.default_cfg = dict(
type='BigGANDeepDiscResBlock',
in_channels=32,
out_channels=64,
channel_ratio=4,
act_cfg=dict(type='ReLU', inplace=False),
sn_eps=1e-6,
with_downsample=True,
with_spectral_norm=True)
cls.x = torch.randn(2, 32, 16, 16)
def test_biggan_deep_disc_res_block(self):
# test default setting
module = build_module(self.default_cfg)
assert isinstance(module, BigGANDeepDiscResBlock)
out = module(self.x)
assert out.shape == (2, 64, 8, 8)
# test with_downsample
cfg = deepcopy(self.default_cfg)
cfg.update(dict(with_downsample=False))
module = build_module(cfg)
out = module(self.x)
assert out.shape == (2, 64, 16, 16)
# test different channel_ratio
cfg = deepcopy(self.default_cfg)
cfg.update(dict(channel_ratio=8))
module = build_module(cfg)
out = module(self.x)
assert out.shape == (2, 64, 8, 8)
# test torch-sn
cfg = deepcopy(self.default_cfg)
cfg.update(dict(sn_style='torch'))
module = build_module(cfg)
out = module(self.x)
assert out.shape == (2, 64, 8, 8)
@pytest.mark.skipif(not torch.cuda.is_available(), reason='requires cuda')
def test_biggan_deep_disc_res_block_cuda(self):
# test default setting
module = build_module(self.default_cfg).cuda()
assert isinstance(module, BigGANDeepDiscResBlock)
out = module(self.x.cuda())
assert out.shape == (2, 64, 8, 8)
# test with_downsample
cfg = deepcopy(self.default_cfg)
cfg.update(dict(with_downsample=False))
module = build_module(cfg).cuda()
out = module(self.x.cuda())
assert out.shape == (2, 64, 16, 16)
# test different channel_ratio
cfg = deepcopy(self.default_cfg)
cfg.update(dict(channel_ratio=8))
module = build_module(cfg)
out = module(self.x)
assert out.shape == (2, 64, 8, 8)
# test torch-sn
cfg = deepcopy(self.default_cfg)
cfg.update(dict(sn_style='torch'))
module = build_module(cfg).cuda()
out = module(self.x.cuda())
assert out.shape == (2, 64, 8, 8)
class TestBigGANDeepGenerator(object):
@classmethod
def setup_class(cls):
cls.noise = torch.randn((3, 120))
num_classes = 1000
cls.label = torch.randint(0, num_classes, (3, ))
cls.default_config = dict(
type='BigGANDeepGenerator',
output_scale=128,
num_classes=num_classes,
base_channels=4)
def test_biggan_deep_generator(self):
# test default setting with builder
g = build_module(self.default_config)
assert isinstance(g, BigGANDeepGenerator)
res = g(self.noise, self.label)
assert res.shape == (3, 3, 128, 128)
# test 'return_noise'
res = g(self.noise, self.label, return_noise=True)
assert res['fake_img'].shape == (3, 3, 128, 128)
assert res['noise_batch'].shape == (3, 120)
assert res['label'].shape == (3, )
res = g(None, None, num_batches=3, return_noise=True)
assert res['fake_img'].shape == (3, 3, 128, 128)
assert res['noise_batch'].shape == (3, 120)
assert res['label'].shape == (3, )
# test callable
noise = torch.randn
label = partial(torch.randint, 0, 1000)
res = g(noise, label, num_batches=2)
assert res.shape == (2, 3, 128, 128)
# test different output scale
cfg = deepcopy(self.default_config)
cfg.update(dict(output_scale=256))
g = build_module(cfg)
noise = torch.randn((3, 120))
res = g(noise, self.label)
assert res.shape == (3, 3, 256, 256)
res = g(None, None, num_batches=3)
assert res.shape == (3, 3, 256, 256)
cfg = deepcopy(self.default_config)
cfg.update(dict(output_scale=512))
g = build_module(cfg)
res = g(None, None, num_batches=3)
assert res.shape == (3, 3, 512, 512)
cfg = deepcopy(self.default_config)
cfg.update(dict(output_scale=64))
g = build_module(cfg)
res = g(None, None, num_batches=3)
assert res.shape == (3, 3, 64, 64)
cfg = deepcopy(self.default_config)
cfg.update(dict(output_scale=32))
g = build_module(cfg)
res = g(None, None, num_batches=3)
assert res.shape == (3, 3, 32, 32)
# test with `concat_noise=False`
cfg = deepcopy(self.default_config)
cfg.update(dict(concat_noise=False))
g = build_module(cfg)
res = g(None, None, num_batches=3)
assert res.shape == (3, 3, 128, 128)
# test with `with_spectral_norm=False`
cfg = deepcopy(self.default_config)
cfg.update(dict(with_spectral_norm=False))
g = build_module(cfg)
res = g(None, None, num_batches=3)
assert res.shape == (3, 3, 128, 128)
# test different num_classes
cfg = deepcopy(self.default_config)
cfg.update(
dict(
num_classes=0, with_shared_embedding=False,
concat_noise=False))
g = build_module(cfg)
res = g(None, None, num_batches=3)
assert res.shape == (3, 3, 128, 128)
# test no shared embedding
cfg = deepcopy(self.default_config)
cfg.update(dict(with_shared_embedding=False, concat_noise=False))
g = build_module(cfg)
res = g(None, None, num_batches=3)
assert res.shape == (3, 3, 128, 128)
# test torch-sn
cfg = deepcopy(self.default_config)
cfg.update(dict(sn_style='torch'))
g = build_module(cfg)
res = g(self.noise, self.label)
assert res.shape == (3, 3, 128, 128)
@pytest.mark.skipif(not torch.cuda.is_available(), reason='requires cuda')
def test_biggan_deep_generator_cuda(self):
# test default setting with builder
g = build_module(self.default_config).cuda()
assert isinstance(g, BigGANDeepGenerator)
res = g(self.noise.cuda(), self.label.cuda())
assert res.shape == (3, 3, 128, 128)
# test 'return_noise'
res = g(self.noise.cuda(), self.label.cuda(), return_noise=True)
assert res['fake_img'].shape == (3, 3, 128, 128)
assert res['noise_batch'].shape == (3, 120)
assert res['label'].shape == (3, )
res = g(None, None, num_batches=3, return_noise=True)
assert res['fake_img'].shape == (3, 3, 128, 128)
assert res['noise_batch'].shape == (3, 120)
assert res['label'].shape == (3, )
# test callable
noise = torch.randn
label = partial(torch.randint, 0, 1000)
res = g(noise, label, num_batches=2)
assert res.shape == (2, 3, 128, 128)
# test different output scale
cfg = deepcopy(self.default_config)
cfg.update(dict(output_scale=256))
g = build_module(cfg).cuda()
noise = torch.randn((3, 120))
res = g(noise.cuda(), self.label.cuda())
assert res.shape == (3, 3, 256, 256)
res = g(None, None, num_batches=3)
assert res.shape == (3, 3, 256, 256)
cfg = deepcopy(self.default_config)
cfg.update(dict(output_scale=512))
g = build_module(cfg).cuda()
res = g(None, None, num_batches=3)
assert res.shape == (3, 3, 512, 512)
cfg = deepcopy(self.default_config)
cfg.update(dict(output_scale=64))
g = build_module(cfg).cuda()
res = g(None, None, num_batches=3)
assert res.shape == (3, 3, 64, 64)
cfg = deepcopy(self.default_config)
cfg.update(dict(output_scale=32))
g = build_module(cfg).cuda()
res = g(None, None, num_batches=3)
assert res.shape == (3, 3, 32, 32)
# test with `concat_noise=False`
cfg = deepcopy(self.default_config)
cfg.update(dict(concat_noise=False))
g = build_module(cfg).cuda()
res = g(None, None, num_batches=3)
assert res.shape == (3, 3, 128, 128)
# test with `with_spectral_norm=False`
cfg = deepcopy(self.default_config)
cfg.update(dict(with_spectral_norm=False))
g = build_module(cfg).cuda()
res = g(None, None, num_batches=3)
assert res.shape == (3, 3, 128, 128)
# test different num_classes
cfg = deepcopy(self.default_config)
cfg.update(
dict(
num_classes=0, with_shared_embedding=False,
concat_noise=False))
g = build_module(cfg).cuda()
res = g(None, None, num_batches=3)
assert res.shape == (3, 3, 128, 128)
# test no shared embedding
cfg = deepcopy(self.default_config)
cfg.update(dict(with_shared_embedding=False, concat_noise=False))
g = build_module(cfg).cuda()
res = g(None, None, num_batches=3)
assert res.shape == (3, 3, 128, 128)
# test torch-sn
cfg = deepcopy(self.default_config)
cfg.update(dict(sn_style='torch'))
g = build_module(cfg).cuda()
res = g(None, None, num_batches=3)
assert res.shape == (3, 3, 128, 128)
class TestBigGANDeepDiscriminator(object):
@classmethod
def setup_class(cls):
num_classes = 1000
cls.default_config = dict(
type='BigGANDeepDiscriminator',
input_scale=128,
num_classes=num_classes,
base_channels=8)
cls.x = torch.randn((2, 3, 128, 128))
cls.label = torch.randint(0, num_classes, (2, ))
def test_biggan_deep_discriminator(self):
# test default settings
d = build_module(self.default_config)
assert isinstance(d, BigGANDeepDiscriminator)
y = d(self.x, self.label)
assert y.shape == (2, 1)
# test different init types
cfg = deepcopy(self.default_config)
cfg.update(dict(init_type='N02'))
d = build_module(cfg)
y = d(self.x, self.label)
assert y.shape == (2, 1)
cfg = deepcopy(self.default_config)
cfg.update(dict(init_type='xavier'))
d = build_module(cfg)
y = d(self.x, self.label)
assert y.shape == (2, 1)
# test different num_classes
cfg = deepcopy(self.default_config)
cfg.update(dict(num_classes=0))
d = build_module(cfg)
y = d(self.x, None)
assert y.shape == (2, 1)
# test with `with_spectral_norm=False`
cfg = deepcopy(self.default_config)
cfg.update(dict(with_spectral_norm=False))
d = build_module(cfg)
y = d(self.x, self.label)
assert y.shape == (2, 1)
# test torch-sn
cfg = deepcopy(self.default_config)
cfg.update(dict(sn_style='torch'))
d = build_module(cfg)
y = d(self.x, self.label)
assert y.shape == (2, 1)
@pytest.mark.skipif(not torch.cuda.is_available(), reason='requires cuda')
def test_biggan_deep_discriminator_cuda(self):
# test default settings
d = build_module(self.default_config).cuda()
assert isinstance(d, BigGANDeepDiscriminator)
y = d(self.x.cuda(), self.label.cuda())
assert y.shape == (2, 1)
# test different init types
cfg = deepcopy(self.default_config)
cfg.update(dict(init_type='N02'))
d = build_module(cfg).cuda()
y = d(self.x.cuda(), self.label.cuda())
assert y.shape == (2, 1)
cfg = deepcopy(self.default_config)
cfg.update(dict(init_type='xavier'))
d = build_module(cfg).cuda()
y = d(self.x.cuda(), self.label.cuda())
assert y.shape == (2, 1)
# test different num_classes
cfg = deepcopy(self.default_config)
cfg.update(dict(num_classes=0))
d = build_module(cfg).cuda()
y = d(self.x.cuda(), None)
assert y.shape == (2, 1)
# test with `with_spectral_norm=False`
cfg = deepcopy(self.default_config)
cfg.update(dict(with_spectral_norm=False))
d = build_module(cfg).cuda()
y = d(self.x.cuda(), self.label.cuda())
assert y.shape == (2, 1)
# test torch-sn
cfg = deepcopy(self.default_config)
cfg.update(dict(sn_style='torch'))
d = build_module(cfg).cuda()
y = d(self.x.cuda(), self.label.cuda())
assert y.shape == (2, 1)
| 34.420824
| 78
| 0.582556
| 2,072
| 15,868
| 4.319498
| 0.064672
| 0.060223
| 0.067039
| 0.098324
| 0.91676
| 0.885698
| 0.873184
| 0.860335
| 0.813631
| 0.811173
| 0
| 0.044306
| 0.291656
| 15,868
| 460
| 79
| 34.495652
| 0.751957
| 0.066801
| 0
| 0.813411
| 0
| 0
| 0.02073
| 0.004471
| 0
| 0
| 0
| 0
| 0.209913
| 1
| 0.034985
| false
| 0
| 0.017493
| 0
| 0.06414
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
683e092f89710072f596285139a1b4cad0294c46
| 123,056
|
bzl
|
Python
|
dnn/scripts/cutlass_generator/list.bzl
|
nero19960329/MegEngine
|
4462953fba45bdfb9aaf47b406688206fa5796c3
|
[
"Apache-2.0"
] | 1
|
2022-03-21T03:13:45.000Z
|
2022-03-21T03:13:45.000Z
|
dnn/scripts/cutlass_generator/list.bzl
|
Viktor-Paul/MegEngine
|
4462953fba45bdfb9aaf47b406688206fa5796c3
|
[
"Apache-2.0"
] | null | null | null |
dnn/scripts/cutlass_generator/list.bzl
|
Viktor-Paul/MegEngine
|
4462953fba45bdfb9aaf47b406688206fa5796c3
|
[
"Apache-2.0"
] | null | null | null |
# Generated by dnn/scripts/cutlass_generator/gen_list.py
cutlass_gen_list = [
"cutlass_simt_sgemm_8x32_8x2_nn_align1.cu",
"cutlass_simt_sgemm_split_k_parallel_8x32_8x2_nn_align1.cu",
"cutlass_simt_sgemm_16x32_8x2_nn_align1.cu",
"cutlass_simt_sgemm_split_k_parallel_16x32_8x2_nn_align1.cu",
"cutlass_simt_sgemm_16x64_8x2_nn_align1.cu",
"cutlass_simt_sgemm_split_k_parallel_16x64_8x2_nn_align1.cu",
"cutlass_simt_sgemm_32x32_8x2_nn_align1.cu",
"cutlass_simt_sgemm_split_k_parallel_32x32_8x2_nn_align1.cu",
"cutlass_simt_sgemm_32x64_8x2_nn_align1.cu",
"cutlass_simt_sgemm_split_k_parallel_32x64_8x2_nn_align1.cu",
"cutlass_simt_sgemm_64x32_8x2_nn_align1.cu",
"cutlass_simt_sgemm_split_k_parallel_64x32_8x2_nn_align1.cu",
"cutlass_simt_sgemm_16x128_8x2_nn_align1.cu",
"cutlass_simt_sgemm_split_k_parallel_16x128_8x2_nn_align1.cu",
"cutlass_simt_sgemm_32x128_8x2_nn_align1.cu",
"cutlass_simt_sgemm_split_k_parallel_32x128_8x2_nn_align1.cu",
"cutlass_simt_sgemm_64x64_8x2_nn_align1.cu",
"cutlass_simt_sgemm_split_k_parallel_64x64_8x2_nn_align1.cu",
"cutlass_simt_sgemm_128x32_8x2_nn_align1.cu",
"cutlass_simt_sgemm_split_k_parallel_128x32_8x2_nn_align1.cu",
"cutlass_simt_sgemm_64x128_8x2_nn_align1.cu",
"cutlass_simt_sgemm_split_k_parallel_64x128_8x2_nn_align1.cu",
"cutlass_simt_sgemm_128x64_8x2_nn_align1.cu",
"cutlass_simt_sgemm_split_k_parallel_128x64_8x2_nn_align1.cu",
"cutlass_simt_sgemm_32x256_8x2_nn_align1.cu",
"cutlass_simt_sgemm_split_k_parallel_32x256_8x2_nn_align1.cu",
"cutlass_simt_sgemm_64x256_8x2_nn_align1.cu",
"cutlass_simt_sgemm_split_k_parallel_64x256_8x2_nn_align1.cu",
"cutlass_simt_sgemm_128x128_8x2_nn_align1.cu",
"cutlass_simt_sgemm_split_k_parallel_128x128_8x2_nn_align1.cu",
"cutlass_simt_sgemm_256x32_8x2_nn_align1.cu",
"cutlass_simt_sgemm_split_k_parallel_256x32_8x2_nn_align1.cu",
"cutlass_simt_sgemm_256x64_8x2_nn_align1.cu",
"cutlass_simt_sgemm_split_k_parallel_256x64_8x2_nn_align1.cu",
"cutlass_simt_sgemm_8x32_8x2_nt_align1.cu",
"cutlass_simt_sgemm_split_k_parallel_8x32_8x2_nt_align1.cu",
"cutlass_simt_sgemm_16x32_8x2_nt_align1.cu",
"cutlass_simt_sgemm_split_k_parallel_16x32_8x2_nt_align1.cu",
"cutlass_simt_sgemm_16x64_8x2_nt_align1.cu",
"cutlass_simt_sgemm_split_k_parallel_16x64_8x2_nt_align1.cu",
"cutlass_simt_sgemm_32x32_8x2_nt_align1.cu",
"cutlass_simt_sgemm_split_k_parallel_32x32_8x2_nt_align1.cu",
"cutlass_simt_sgemm_32x64_8x2_nt_align1.cu",
"cutlass_simt_sgemm_split_k_parallel_32x64_8x2_nt_align1.cu",
"cutlass_simt_sgemm_64x32_8x2_nt_align1.cu",
"cutlass_simt_sgemm_split_k_parallel_64x32_8x2_nt_align1.cu",
"cutlass_simt_sgemm_16x128_8x2_nt_align1.cu",
"cutlass_simt_sgemm_split_k_parallel_16x128_8x2_nt_align1.cu",
"cutlass_simt_sgemm_32x128_8x2_nt_align1.cu",
"cutlass_simt_sgemm_split_k_parallel_32x128_8x2_nt_align1.cu",
"cutlass_simt_sgemm_64x64_8x2_nt_align1.cu",
"cutlass_simt_sgemm_split_k_parallel_64x64_8x2_nt_align1.cu",
"cutlass_simt_sgemm_128x32_8x2_nt_align1.cu",
"cutlass_simt_sgemm_split_k_parallel_128x32_8x2_nt_align1.cu",
"cutlass_simt_sgemm_64x128_8x2_nt_align1.cu",
"cutlass_simt_sgemm_split_k_parallel_64x128_8x2_nt_align1.cu",
"cutlass_simt_sgemm_128x64_8x2_nt_align1.cu",
"cutlass_simt_sgemm_split_k_parallel_128x64_8x2_nt_align1.cu",
"cutlass_simt_sgemm_32x256_8x2_nt_align1.cu",
"cutlass_simt_sgemm_split_k_parallel_32x256_8x2_nt_align1.cu",
"cutlass_simt_sgemm_64x256_8x2_nt_align1.cu",
"cutlass_simt_sgemm_split_k_parallel_64x256_8x2_nt_align1.cu",
"cutlass_simt_sgemm_128x128_8x2_nt_align1.cu",
"cutlass_simt_sgemm_split_k_parallel_128x128_8x2_nt_align1.cu",
"cutlass_simt_sgemm_256x32_8x2_nt_align1.cu",
"cutlass_simt_sgemm_split_k_parallel_256x32_8x2_nt_align1.cu",
"cutlass_simt_sgemm_256x64_8x2_nt_align1.cu",
"cutlass_simt_sgemm_split_k_parallel_256x64_8x2_nt_align1.cu",
"cutlass_simt_sgemm_8x32_8x2_tn_align1.cu",
"cutlass_simt_sgemm_split_k_parallel_8x32_8x2_tn_align1.cu",
"cutlass_simt_sgemm_16x32_8x2_tn_align1.cu",
"cutlass_simt_sgemm_split_k_parallel_16x32_8x2_tn_align1.cu",
"cutlass_simt_sgemm_16x64_8x2_tn_align1.cu",
"cutlass_simt_sgemm_split_k_parallel_16x64_8x2_tn_align1.cu",
"cutlass_simt_sgemm_32x32_8x2_tn_align1.cu",
"cutlass_simt_sgemm_split_k_parallel_32x32_8x2_tn_align1.cu",
"cutlass_simt_sgemm_32x64_8x2_tn_align1.cu",
"cutlass_simt_sgemm_split_k_parallel_32x64_8x2_tn_align1.cu",
"cutlass_simt_sgemm_64x32_8x2_tn_align1.cu",
"cutlass_simt_sgemm_split_k_parallel_64x32_8x2_tn_align1.cu",
"cutlass_simt_sgemm_16x128_8x2_tn_align1.cu",
"cutlass_simt_sgemm_split_k_parallel_16x128_8x2_tn_align1.cu",
"cutlass_simt_sgemm_32x128_8x2_tn_align1.cu",
"cutlass_simt_sgemm_split_k_parallel_32x128_8x2_tn_align1.cu",
"cutlass_simt_sgemm_64x64_8x2_tn_align1.cu",
"cutlass_simt_sgemm_split_k_parallel_64x64_8x2_tn_align1.cu",
"cutlass_simt_sgemm_128x32_8x2_tn_align1.cu",
"cutlass_simt_sgemm_split_k_parallel_128x32_8x2_tn_align1.cu",
"cutlass_simt_sgemm_64x128_8x2_tn_align1.cu",
"cutlass_simt_sgemm_split_k_parallel_64x128_8x2_tn_align1.cu",
"cutlass_simt_sgemm_128x64_8x2_tn_align1.cu",
"cutlass_simt_sgemm_split_k_parallel_128x64_8x2_tn_align1.cu",
"cutlass_simt_sgemm_32x256_8x2_tn_align1.cu",
"cutlass_simt_sgemm_split_k_parallel_32x256_8x2_tn_align1.cu",
"cutlass_simt_sgemm_64x256_8x2_tn_align1.cu",
"cutlass_simt_sgemm_split_k_parallel_64x256_8x2_tn_align1.cu",
"cutlass_simt_sgemm_128x128_8x2_tn_align1.cu",
"cutlass_simt_sgemm_split_k_parallel_128x128_8x2_tn_align1.cu",
"cutlass_simt_sgemm_256x32_8x2_tn_align1.cu",
"cutlass_simt_sgemm_split_k_parallel_256x32_8x2_tn_align1.cu",
"cutlass_simt_sgemm_256x64_8x2_tn_align1.cu",
"cutlass_simt_sgemm_split_k_parallel_256x64_8x2_tn_align1.cu",
"cutlass_simt_sgemm_8x32_8x2_tt_align1.cu",
"cutlass_simt_sgemm_split_k_parallel_8x32_8x2_tt_align1.cu",
"cutlass_simt_sgemm_16x32_8x2_tt_align1.cu",
"cutlass_simt_sgemm_split_k_parallel_16x32_8x2_tt_align1.cu",
"cutlass_simt_sgemm_16x64_8x2_tt_align1.cu",
"cutlass_simt_sgemm_split_k_parallel_16x64_8x2_tt_align1.cu",
"cutlass_simt_sgemm_32x32_8x2_tt_align1.cu",
"cutlass_simt_sgemm_split_k_parallel_32x32_8x2_tt_align1.cu",
"cutlass_simt_sgemm_32x64_8x2_tt_align1.cu",
"cutlass_simt_sgemm_split_k_parallel_32x64_8x2_tt_align1.cu",
"cutlass_simt_sgemm_64x32_8x2_tt_align1.cu",
"cutlass_simt_sgemm_split_k_parallel_64x32_8x2_tt_align1.cu",
"cutlass_simt_sgemm_16x128_8x2_tt_align1.cu",
"cutlass_simt_sgemm_split_k_parallel_16x128_8x2_tt_align1.cu",
"cutlass_simt_sgemm_32x128_8x2_tt_align1.cu",
"cutlass_simt_sgemm_split_k_parallel_32x128_8x2_tt_align1.cu",
"cutlass_simt_sgemm_64x64_8x2_tt_align1.cu",
"cutlass_simt_sgemm_split_k_parallel_64x64_8x2_tt_align1.cu",
"cutlass_simt_sgemm_128x32_8x2_tt_align1.cu",
"cutlass_simt_sgemm_split_k_parallel_128x32_8x2_tt_align1.cu",
"cutlass_simt_sgemm_64x128_8x2_tt_align1.cu",
"cutlass_simt_sgemm_split_k_parallel_64x128_8x2_tt_align1.cu",
"cutlass_simt_sgemm_128x64_8x2_tt_align1.cu",
"cutlass_simt_sgemm_split_k_parallel_128x64_8x2_tt_align1.cu",
"cutlass_simt_sgemm_32x256_8x2_tt_align1.cu",
"cutlass_simt_sgemm_split_k_parallel_32x256_8x2_tt_align1.cu",
"cutlass_simt_sgemm_64x256_8x2_tt_align1.cu",
"cutlass_simt_sgemm_split_k_parallel_64x256_8x2_tt_align1.cu",
"cutlass_simt_sgemm_128x128_8x2_tt_align1.cu",
"cutlass_simt_sgemm_split_k_parallel_128x128_8x2_tt_align1.cu",
"cutlass_simt_sgemm_256x32_8x2_tt_align1.cu",
"cutlass_simt_sgemm_split_k_parallel_256x32_8x2_tt_align1.cu",
"cutlass_simt_sgemm_256x64_8x2_tt_align1.cu",
"cutlass_simt_sgemm_split_k_parallel_256x64_8x2_tt_align1.cu",
"all_gemm_simt_operations.cu",
"cutlass_tensorop_f16_s1688gemm_f16_256x128_32x2_nn_align8.cu",
"cutlass_tensorop_f16_s1688gemm_split_k_parallel_f16_256x128_32x2_nn_align8.cu",
"cutlass_tensorop_f16_s1688gemm_f16_128x256_32x2_nn_align8.cu",
"cutlass_tensorop_f16_s1688gemm_split_k_parallel_f16_128x256_32x2_nn_align8.cu",
"cutlass_tensorop_f16_s1688gemm_f16_128x128_32x2_nn_align8.cu",
"cutlass_tensorop_f16_s1688gemm_split_k_parallel_f16_128x128_32x2_nn_align8.cu",
"cutlass_tensorop_f16_s1688gemm_f16_256x128_32x2_nn_align4.cu",
"cutlass_tensorop_f16_s1688gemm_split_k_parallel_f16_256x128_32x2_nn_align4.cu",
"cutlass_tensorop_f16_s1688gemm_f16_128x256_32x2_nn_align4.cu",
"cutlass_tensorop_f16_s1688gemm_split_k_parallel_f16_128x256_32x2_nn_align4.cu",
"cutlass_tensorop_f16_s1688gemm_f16_128x128_32x2_nn_align4.cu",
"cutlass_tensorop_f16_s1688gemm_split_k_parallel_f16_128x128_32x2_nn_align4.cu",
"cutlass_tensorop_f16_s1688gemm_f16_256x128_32x2_nn_align2.cu",
"cutlass_tensorop_f16_s1688gemm_split_k_parallel_f16_256x128_32x2_nn_align2.cu",
"cutlass_tensorop_f16_s1688gemm_f16_128x256_32x2_nn_align2.cu",
"cutlass_tensorop_f16_s1688gemm_split_k_parallel_f16_128x256_32x2_nn_align2.cu",
"cutlass_tensorop_f16_s1688gemm_f16_128x128_32x2_nn_align2.cu",
"cutlass_tensorop_f16_s1688gemm_split_k_parallel_f16_128x128_32x2_nn_align2.cu",
"cutlass_tensorop_f16_s1688gemm_f16_256x128_32x2_nt_align8.cu",
"cutlass_tensorop_f16_s1688gemm_split_k_parallel_f16_256x128_32x2_nt_align8.cu",
"cutlass_tensorop_f16_s1688gemm_f16_128x256_32x2_nt_align8.cu",
"cutlass_tensorop_f16_s1688gemm_split_k_parallel_f16_128x256_32x2_nt_align8.cu",
"cutlass_tensorop_f16_s1688gemm_f16_128x128_32x2_nt_align8.cu",
"cutlass_tensorop_f16_s1688gemm_split_k_parallel_f16_128x128_32x2_nt_align8.cu",
"cutlass_tensorop_f16_s1688gemm_f16_256x128_32x2_nt_align4.cu",
"cutlass_tensorop_f16_s1688gemm_split_k_parallel_f16_256x128_32x2_nt_align4.cu",
"cutlass_tensorop_f16_s1688gemm_f16_128x256_32x2_nt_align4.cu",
"cutlass_tensorop_f16_s1688gemm_split_k_parallel_f16_128x256_32x2_nt_align4.cu",
"cutlass_tensorop_f16_s1688gemm_f16_128x128_32x2_nt_align4.cu",
"cutlass_tensorop_f16_s1688gemm_split_k_parallel_f16_128x128_32x2_nt_align4.cu",
"cutlass_tensorop_f16_s1688gemm_f16_256x128_32x2_nt_align2.cu",
"cutlass_tensorop_f16_s1688gemm_split_k_parallel_f16_256x128_32x2_nt_align2.cu",
"cutlass_tensorop_f16_s1688gemm_f16_128x256_32x2_nt_align2.cu",
"cutlass_tensorop_f16_s1688gemm_split_k_parallel_f16_128x256_32x2_nt_align2.cu",
"cutlass_tensorop_f16_s1688gemm_f16_128x128_32x2_nt_align2.cu",
"cutlass_tensorop_f16_s1688gemm_split_k_parallel_f16_128x128_32x2_nt_align2.cu",
"cutlass_tensorop_f16_s1688gemm_f16_256x128_32x2_tn_align8.cu",
"cutlass_tensorop_f16_s1688gemm_split_k_parallel_f16_256x128_32x2_tn_align8.cu",
"cutlass_tensorop_f16_s1688gemm_f16_128x256_32x2_tn_align8.cu",
"cutlass_tensorop_f16_s1688gemm_split_k_parallel_f16_128x256_32x2_tn_align8.cu",
"cutlass_tensorop_f16_s1688gemm_f16_128x128_32x2_tn_align8.cu",
"cutlass_tensorop_f16_s1688gemm_split_k_parallel_f16_128x128_32x2_tn_align8.cu",
"cutlass_tensorop_f16_s1688gemm_f16_256x128_32x2_tn_align4.cu",
"cutlass_tensorop_f16_s1688gemm_split_k_parallel_f16_256x128_32x2_tn_align4.cu",
"cutlass_tensorop_f16_s1688gemm_f16_128x256_32x2_tn_align4.cu",
"cutlass_tensorop_f16_s1688gemm_split_k_parallel_f16_128x256_32x2_tn_align4.cu",
"cutlass_tensorop_f16_s1688gemm_f16_128x128_32x2_tn_align4.cu",
"cutlass_tensorop_f16_s1688gemm_split_k_parallel_f16_128x128_32x2_tn_align4.cu",
"cutlass_tensorop_f16_s1688gemm_f16_256x128_32x2_tn_align2.cu",
"cutlass_tensorop_f16_s1688gemm_split_k_parallel_f16_256x128_32x2_tn_align2.cu",
"cutlass_tensorop_f16_s1688gemm_f16_128x256_32x2_tn_align2.cu",
"cutlass_tensorop_f16_s1688gemm_split_k_parallel_f16_128x256_32x2_tn_align2.cu",
"cutlass_tensorop_f16_s1688gemm_f16_128x128_32x2_tn_align2.cu",
"cutlass_tensorop_f16_s1688gemm_split_k_parallel_f16_128x128_32x2_tn_align2.cu",
"cutlass_tensorop_f16_s1688gemm_f16_256x128_32x2_tt_align8.cu",
"cutlass_tensorop_f16_s1688gemm_split_k_parallel_f16_256x128_32x2_tt_align8.cu",
"cutlass_tensorop_f16_s1688gemm_f16_128x256_32x2_tt_align8.cu",
"cutlass_tensorop_f16_s1688gemm_split_k_parallel_f16_128x256_32x2_tt_align8.cu",
"cutlass_tensorop_f16_s1688gemm_f16_128x128_32x2_tt_align8.cu",
"cutlass_tensorop_f16_s1688gemm_split_k_parallel_f16_128x128_32x2_tt_align8.cu",
"cutlass_tensorop_f16_s1688gemm_f16_256x128_32x2_tt_align4.cu",
"cutlass_tensorop_f16_s1688gemm_split_k_parallel_f16_256x128_32x2_tt_align4.cu",
"cutlass_tensorop_f16_s1688gemm_f16_128x256_32x2_tt_align4.cu",
"cutlass_tensorop_f16_s1688gemm_split_k_parallel_f16_128x256_32x2_tt_align4.cu",
"cutlass_tensorop_f16_s1688gemm_f16_128x128_32x2_tt_align4.cu",
"cutlass_tensorop_f16_s1688gemm_split_k_parallel_f16_128x128_32x2_tt_align4.cu",
"cutlass_tensorop_f16_s1688gemm_f16_256x128_32x2_tt_align2.cu",
"cutlass_tensorop_f16_s1688gemm_split_k_parallel_f16_256x128_32x2_tt_align2.cu",
"cutlass_tensorop_f16_s1688gemm_f16_128x256_32x2_tt_align2.cu",
"cutlass_tensorop_f16_s1688gemm_split_k_parallel_f16_128x256_32x2_tt_align2.cu",
"cutlass_tensorop_f16_s1688gemm_f16_128x128_32x2_tt_align2.cu",
"cutlass_tensorop_f16_s1688gemm_split_k_parallel_f16_128x128_32x2_tt_align2.cu",
"cutlass_tensorop_h1688gemm_256x128_32x2_nn_align8.cu",
"cutlass_tensorop_h1688gemm_split_k_parallel_256x128_32x2_nn_align8.cu",
"cutlass_tensorop_h1688gemm_128x256_32x2_nn_align8.cu",
"cutlass_tensorop_h1688gemm_split_k_parallel_128x256_32x2_nn_align8.cu",
"cutlass_tensorop_h1688gemm_128x128_32x2_nn_align8.cu",
"cutlass_tensorop_h1688gemm_split_k_parallel_128x128_32x2_nn_align8.cu",
"cutlass_tensorop_h1688gemm_256x128_32x2_nn_align4.cu",
"cutlass_tensorop_h1688gemm_split_k_parallel_256x128_32x2_nn_align4.cu",
"cutlass_tensorop_h1688gemm_128x256_32x2_nn_align4.cu",
"cutlass_tensorop_h1688gemm_split_k_parallel_128x256_32x2_nn_align4.cu",
"cutlass_tensorop_h1688gemm_128x128_32x2_nn_align4.cu",
"cutlass_tensorop_h1688gemm_split_k_parallel_128x128_32x2_nn_align4.cu",
"cutlass_tensorop_h1688gemm_256x128_32x2_nn_align2.cu",
"cutlass_tensorop_h1688gemm_split_k_parallel_256x128_32x2_nn_align2.cu",
"cutlass_tensorop_h1688gemm_128x256_32x2_nn_align2.cu",
"cutlass_tensorop_h1688gemm_split_k_parallel_128x256_32x2_nn_align2.cu",
"cutlass_tensorop_h1688gemm_128x128_32x2_nn_align2.cu",
"cutlass_tensorop_h1688gemm_split_k_parallel_128x128_32x2_nn_align2.cu",
"cutlass_tensorop_h1688gemm_256x128_32x2_nt_align8.cu",
"cutlass_tensorop_h1688gemm_split_k_parallel_256x128_32x2_nt_align8.cu",
"cutlass_tensorop_h1688gemm_128x256_32x2_nt_align8.cu",
"cutlass_tensorop_h1688gemm_split_k_parallel_128x256_32x2_nt_align8.cu",
"cutlass_tensorop_h1688gemm_128x128_32x2_nt_align8.cu",
"cutlass_tensorop_h1688gemm_split_k_parallel_128x128_32x2_nt_align8.cu",
"cutlass_tensorop_h1688gemm_256x128_32x2_nt_align4.cu",
"cutlass_tensorop_h1688gemm_split_k_parallel_256x128_32x2_nt_align4.cu",
"cutlass_tensorop_h1688gemm_128x256_32x2_nt_align4.cu",
"cutlass_tensorop_h1688gemm_split_k_parallel_128x256_32x2_nt_align4.cu",
"cutlass_tensorop_h1688gemm_128x128_32x2_nt_align4.cu",
"cutlass_tensorop_h1688gemm_split_k_parallel_128x128_32x2_nt_align4.cu",
"cutlass_tensorop_h1688gemm_256x128_32x2_nt_align2.cu",
"cutlass_tensorop_h1688gemm_split_k_parallel_256x128_32x2_nt_align2.cu",
"cutlass_tensorop_h1688gemm_128x256_32x2_nt_align2.cu",
"cutlass_tensorop_h1688gemm_split_k_parallel_128x256_32x2_nt_align2.cu",
"cutlass_tensorop_h1688gemm_128x128_32x2_nt_align2.cu",
"cutlass_tensorop_h1688gemm_split_k_parallel_128x128_32x2_nt_align2.cu",
"cutlass_tensorop_h1688gemm_256x128_32x2_tn_align8.cu",
"cutlass_tensorop_h1688gemm_split_k_parallel_256x128_32x2_tn_align8.cu",
"cutlass_tensorop_h1688gemm_128x256_32x2_tn_align8.cu",
"cutlass_tensorop_h1688gemm_split_k_parallel_128x256_32x2_tn_align8.cu",
"cutlass_tensorop_h1688gemm_128x128_32x2_tn_align8.cu",
"cutlass_tensorop_h1688gemm_split_k_parallel_128x128_32x2_tn_align8.cu",
"cutlass_tensorop_h1688gemm_256x128_32x2_tn_align4.cu",
"cutlass_tensorop_h1688gemm_split_k_parallel_256x128_32x2_tn_align4.cu",
"cutlass_tensorop_h1688gemm_128x256_32x2_tn_align4.cu",
"cutlass_tensorop_h1688gemm_split_k_parallel_128x256_32x2_tn_align4.cu",
"cutlass_tensorop_h1688gemm_128x128_32x2_tn_align4.cu",
"cutlass_tensorop_h1688gemm_split_k_parallel_128x128_32x2_tn_align4.cu",
"cutlass_tensorop_h1688gemm_256x128_32x2_tn_align2.cu",
"cutlass_tensorop_h1688gemm_split_k_parallel_256x128_32x2_tn_align2.cu",
"cutlass_tensorop_h1688gemm_128x256_32x2_tn_align2.cu",
"cutlass_tensorop_h1688gemm_split_k_parallel_128x256_32x2_tn_align2.cu",
"cutlass_tensorop_h1688gemm_128x128_32x2_tn_align2.cu",
"cutlass_tensorop_h1688gemm_split_k_parallel_128x128_32x2_tn_align2.cu",
"cutlass_tensorop_h1688gemm_256x128_32x2_tt_align8.cu",
"cutlass_tensorop_h1688gemm_split_k_parallel_256x128_32x2_tt_align8.cu",
"cutlass_tensorop_h1688gemm_128x256_32x2_tt_align8.cu",
"cutlass_tensorop_h1688gemm_split_k_parallel_128x256_32x2_tt_align8.cu",
"cutlass_tensorop_h1688gemm_128x128_32x2_tt_align8.cu",
"cutlass_tensorop_h1688gemm_split_k_parallel_128x128_32x2_tt_align8.cu",
"cutlass_tensorop_h1688gemm_256x128_32x2_tt_align4.cu",
"cutlass_tensorop_h1688gemm_split_k_parallel_256x128_32x2_tt_align4.cu",
"cutlass_tensorop_h1688gemm_128x256_32x2_tt_align4.cu",
"cutlass_tensorop_h1688gemm_split_k_parallel_128x256_32x2_tt_align4.cu",
"cutlass_tensorop_h1688gemm_128x128_32x2_tt_align4.cu",
"cutlass_tensorop_h1688gemm_split_k_parallel_128x128_32x2_tt_align4.cu",
"cutlass_tensorop_h1688gemm_256x128_32x2_tt_align2.cu",
"cutlass_tensorop_h1688gemm_split_k_parallel_256x128_32x2_tt_align2.cu",
"cutlass_tensorop_h1688gemm_128x256_32x2_tt_align2.cu",
"cutlass_tensorop_h1688gemm_split_k_parallel_128x256_32x2_tt_align2.cu",
"cutlass_tensorop_h1688gemm_128x128_32x2_tt_align2.cu",
"cutlass_tensorop_h1688gemm_split_k_parallel_128x128_32x2_tt_align2.cu",
"all_gemm_tensorop1688_operations.cu",
"cutlass_tensorop_f16_s884gemm_f16_256x128_32x2_nn_align8.cu",
"cutlass_tensorop_f16_s884gemm_split_k_parallel_f16_256x128_32x2_nn_align8.cu",
"cutlass_tensorop_f16_s884gemm_f16_128x256_32x2_nn_align8.cu",
"cutlass_tensorop_f16_s884gemm_split_k_parallel_f16_128x256_32x2_nn_align8.cu",
"cutlass_tensorop_f16_s884gemm_f16_128x128_32x2_nn_align8.cu",
"cutlass_tensorop_f16_s884gemm_split_k_parallel_f16_128x128_32x2_nn_align8.cu",
"cutlass_tensorop_f16_s884gemm_f16_256x128_32x2_nn_align4.cu",
"cutlass_tensorop_f16_s884gemm_split_k_parallel_f16_256x128_32x2_nn_align4.cu",
"cutlass_tensorop_f16_s884gemm_f16_128x256_32x2_nn_align4.cu",
"cutlass_tensorop_f16_s884gemm_split_k_parallel_f16_128x256_32x2_nn_align4.cu",
"cutlass_tensorop_f16_s884gemm_f16_128x128_32x2_nn_align4.cu",
"cutlass_tensorop_f16_s884gemm_split_k_parallel_f16_128x128_32x2_nn_align4.cu",
"cutlass_tensorop_f16_s884gemm_f16_256x128_32x2_nn_align2.cu",
"cutlass_tensorop_f16_s884gemm_split_k_parallel_f16_256x128_32x2_nn_align2.cu",
"cutlass_tensorop_f16_s884gemm_f16_128x256_32x2_nn_align2.cu",
"cutlass_tensorop_f16_s884gemm_split_k_parallel_f16_128x256_32x2_nn_align2.cu",
"cutlass_tensorop_f16_s884gemm_f16_128x128_32x2_nn_align2.cu",
"cutlass_tensorop_f16_s884gemm_split_k_parallel_f16_128x128_32x2_nn_align2.cu",
"cutlass_tensorop_f16_s884gemm_f16_256x128_32x2_nt_align8.cu",
"cutlass_tensorop_f16_s884gemm_split_k_parallel_f16_256x128_32x2_nt_align8.cu",
"cutlass_tensorop_f16_s884gemm_f16_128x256_32x2_nt_align8.cu",
"cutlass_tensorop_f16_s884gemm_split_k_parallel_f16_128x256_32x2_nt_align8.cu",
"cutlass_tensorop_f16_s884gemm_f16_128x128_32x2_nt_align8.cu",
"cutlass_tensorop_f16_s884gemm_split_k_parallel_f16_128x128_32x2_nt_align8.cu",
"cutlass_tensorop_f16_s884gemm_f16_256x128_32x2_nt_align4.cu",
"cutlass_tensorop_f16_s884gemm_split_k_parallel_f16_256x128_32x2_nt_align4.cu",
"cutlass_tensorop_f16_s884gemm_f16_128x256_32x2_nt_align4.cu",
"cutlass_tensorop_f16_s884gemm_split_k_parallel_f16_128x256_32x2_nt_align4.cu",
"cutlass_tensorop_f16_s884gemm_f16_128x128_32x2_nt_align4.cu",
"cutlass_tensorop_f16_s884gemm_split_k_parallel_f16_128x128_32x2_nt_align4.cu",
"cutlass_tensorop_f16_s884gemm_f16_256x128_32x2_nt_align2.cu",
"cutlass_tensorop_f16_s884gemm_split_k_parallel_f16_256x128_32x2_nt_align2.cu",
"cutlass_tensorop_f16_s884gemm_f16_128x256_32x2_nt_align2.cu",
"cutlass_tensorop_f16_s884gemm_split_k_parallel_f16_128x256_32x2_nt_align2.cu",
"cutlass_tensorop_f16_s884gemm_f16_128x128_32x2_nt_align2.cu",
"cutlass_tensorop_f16_s884gemm_split_k_parallel_f16_128x128_32x2_nt_align2.cu",
"cutlass_tensorop_f16_s884gemm_f16_256x128_32x2_tn_align8.cu",
"cutlass_tensorop_f16_s884gemm_split_k_parallel_f16_256x128_32x2_tn_align8.cu",
"cutlass_tensorop_f16_s884gemm_f16_128x256_32x2_tn_align8.cu",
"cutlass_tensorop_f16_s884gemm_split_k_parallel_f16_128x256_32x2_tn_align8.cu",
"cutlass_tensorop_f16_s884gemm_f16_128x128_32x2_tn_align8.cu",
"cutlass_tensorop_f16_s884gemm_split_k_parallel_f16_128x128_32x2_tn_align8.cu",
"cutlass_tensorop_f16_s884gemm_f16_256x128_32x2_tn_align4.cu",
"cutlass_tensorop_f16_s884gemm_split_k_parallel_f16_256x128_32x2_tn_align4.cu",
"cutlass_tensorop_f16_s884gemm_f16_128x256_32x2_tn_align4.cu",
"cutlass_tensorop_f16_s884gemm_split_k_parallel_f16_128x256_32x2_tn_align4.cu",
"cutlass_tensorop_f16_s884gemm_f16_128x128_32x2_tn_align4.cu",
"cutlass_tensorop_f16_s884gemm_split_k_parallel_f16_128x128_32x2_tn_align4.cu",
"cutlass_tensorop_f16_s884gemm_f16_256x128_32x2_tn_align2.cu",
"cutlass_tensorop_f16_s884gemm_split_k_parallel_f16_256x128_32x2_tn_align2.cu",
"cutlass_tensorop_f16_s884gemm_f16_128x256_32x2_tn_align2.cu",
"cutlass_tensorop_f16_s884gemm_split_k_parallel_f16_128x256_32x2_tn_align2.cu",
"cutlass_tensorop_f16_s884gemm_f16_128x128_32x2_tn_align2.cu",
"cutlass_tensorop_f16_s884gemm_split_k_parallel_f16_128x128_32x2_tn_align2.cu",
"cutlass_tensorop_f16_s884gemm_f16_256x128_32x2_tt_align8.cu",
"cutlass_tensorop_f16_s884gemm_split_k_parallel_f16_256x128_32x2_tt_align8.cu",
"cutlass_tensorop_f16_s884gemm_f16_128x256_32x2_tt_align8.cu",
"cutlass_tensorop_f16_s884gemm_split_k_parallel_f16_128x256_32x2_tt_align8.cu",
"cutlass_tensorop_f16_s884gemm_f16_128x128_32x2_tt_align8.cu",
"cutlass_tensorop_f16_s884gemm_split_k_parallel_f16_128x128_32x2_tt_align8.cu",
"cutlass_tensorop_f16_s884gemm_f16_256x128_32x2_tt_align4.cu",
"cutlass_tensorop_f16_s884gemm_split_k_parallel_f16_256x128_32x2_tt_align4.cu",
"cutlass_tensorop_f16_s884gemm_f16_128x256_32x2_tt_align4.cu",
"cutlass_tensorop_f16_s884gemm_split_k_parallel_f16_128x256_32x2_tt_align4.cu",
"cutlass_tensorop_f16_s884gemm_f16_128x128_32x2_tt_align4.cu",
"cutlass_tensorop_f16_s884gemm_split_k_parallel_f16_128x128_32x2_tt_align4.cu",
"cutlass_tensorop_f16_s884gemm_f16_256x128_32x2_tt_align2.cu",
"cutlass_tensorop_f16_s884gemm_split_k_parallel_f16_256x128_32x2_tt_align2.cu",
"cutlass_tensorop_f16_s884gemm_f16_128x256_32x2_tt_align2.cu",
"cutlass_tensorop_f16_s884gemm_split_k_parallel_f16_128x256_32x2_tt_align2.cu",
"cutlass_tensorop_f16_s884gemm_f16_128x128_32x2_tt_align2.cu",
"cutlass_tensorop_f16_s884gemm_split_k_parallel_f16_128x128_32x2_tt_align2.cu",
"cutlass_tensorop_h884gemm_256x128_32x2_nn_align8.cu",
"cutlass_tensorop_h884gemm_split_k_parallel_256x128_32x2_nn_align8.cu",
"cutlass_tensorop_h884gemm_128x256_32x2_nn_align8.cu",
"cutlass_tensorop_h884gemm_split_k_parallel_128x256_32x2_nn_align8.cu",
"cutlass_tensorop_h884gemm_128x128_32x2_nn_align8.cu",
"cutlass_tensorop_h884gemm_split_k_parallel_128x128_32x2_nn_align8.cu",
"cutlass_tensorop_h884gemm_256x128_32x2_nn_align4.cu",
"cutlass_tensorop_h884gemm_split_k_parallel_256x128_32x2_nn_align4.cu",
"cutlass_tensorop_h884gemm_128x256_32x2_nn_align4.cu",
"cutlass_tensorop_h884gemm_split_k_parallel_128x256_32x2_nn_align4.cu",
"cutlass_tensorop_h884gemm_128x128_32x2_nn_align4.cu",
"cutlass_tensorop_h884gemm_split_k_parallel_128x128_32x2_nn_align4.cu",
"cutlass_tensorop_h884gemm_256x128_32x2_nn_align2.cu",
"cutlass_tensorop_h884gemm_split_k_parallel_256x128_32x2_nn_align2.cu",
"cutlass_tensorop_h884gemm_128x256_32x2_nn_align2.cu",
"cutlass_tensorop_h884gemm_split_k_parallel_128x256_32x2_nn_align2.cu",
"cutlass_tensorop_h884gemm_128x128_32x2_nn_align2.cu",
"cutlass_tensorop_h884gemm_split_k_parallel_128x128_32x2_nn_align2.cu",
"cutlass_tensorop_h884gemm_256x128_32x2_nt_align8.cu",
"cutlass_tensorop_h884gemm_split_k_parallel_256x128_32x2_nt_align8.cu",
"cutlass_tensorop_h884gemm_128x256_32x2_nt_align8.cu",
"cutlass_tensorop_h884gemm_split_k_parallel_128x256_32x2_nt_align8.cu",
"cutlass_tensorop_h884gemm_128x128_32x2_nt_align8.cu",
"cutlass_tensorop_h884gemm_split_k_parallel_128x128_32x2_nt_align8.cu",
"cutlass_tensorop_h884gemm_256x128_32x2_nt_align4.cu",
"cutlass_tensorop_h884gemm_split_k_parallel_256x128_32x2_nt_align4.cu",
"cutlass_tensorop_h884gemm_128x256_32x2_nt_align4.cu",
"cutlass_tensorop_h884gemm_split_k_parallel_128x256_32x2_nt_align4.cu",
"cutlass_tensorop_h884gemm_128x128_32x2_nt_align4.cu",
"cutlass_tensorop_h884gemm_split_k_parallel_128x128_32x2_nt_align4.cu",
"cutlass_tensorop_h884gemm_256x128_32x2_nt_align2.cu",
"cutlass_tensorop_h884gemm_split_k_parallel_256x128_32x2_nt_align2.cu",
"cutlass_tensorop_h884gemm_128x256_32x2_nt_align2.cu",
"cutlass_tensorop_h884gemm_split_k_parallel_128x256_32x2_nt_align2.cu",
"cutlass_tensorop_h884gemm_128x128_32x2_nt_align2.cu",
"cutlass_tensorop_h884gemm_split_k_parallel_128x128_32x2_nt_align2.cu",
"cutlass_tensorop_h884gemm_256x128_32x2_tn_align8.cu",
"cutlass_tensorop_h884gemm_split_k_parallel_256x128_32x2_tn_align8.cu",
"cutlass_tensorop_h884gemm_128x256_32x2_tn_align8.cu",
"cutlass_tensorop_h884gemm_split_k_parallel_128x256_32x2_tn_align8.cu",
"cutlass_tensorop_h884gemm_128x128_32x2_tn_align8.cu",
"cutlass_tensorop_h884gemm_split_k_parallel_128x128_32x2_tn_align8.cu",
"cutlass_tensorop_h884gemm_256x128_32x2_tn_align4.cu",
"cutlass_tensorop_h884gemm_split_k_parallel_256x128_32x2_tn_align4.cu",
"cutlass_tensorop_h884gemm_128x256_32x2_tn_align4.cu",
"cutlass_tensorop_h884gemm_split_k_parallel_128x256_32x2_tn_align4.cu",
"cutlass_tensorop_h884gemm_128x128_32x2_tn_align4.cu",
"cutlass_tensorop_h884gemm_split_k_parallel_128x128_32x2_tn_align4.cu",
"cutlass_tensorop_h884gemm_256x128_32x2_tn_align2.cu",
"cutlass_tensorop_h884gemm_split_k_parallel_256x128_32x2_tn_align2.cu",
"cutlass_tensorop_h884gemm_128x256_32x2_tn_align2.cu",
"cutlass_tensorop_h884gemm_split_k_parallel_128x256_32x2_tn_align2.cu",
"cutlass_tensorop_h884gemm_128x128_32x2_tn_align2.cu",
"cutlass_tensorop_h884gemm_split_k_parallel_128x128_32x2_tn_align2.cu",
"cutlass_tensorop_h884gemm_256x128_32x2_tt_align8.cu",
"cutlass_tensorop_h884gemm_split_k_parallel_256x128_32x2_tt_align8.cu",
"cutlass_tensorop_h884gemm_128x256_32x2_tt_align8.cu",
"cutlass_tensorop_h884gemm_split_k_parallel_128x256_32x2_tt_align8.cu",
"cutlass_tensorop_h884gemm_128x128_32x2_tt_align8.cu",
"cutlass_tensorop_h884gemm_split_k_parallel_128x128_32x2_tt_align8.cu",
"cutlass_tensorop_h884gemm_256x128_32x2_tt_align4.cu",
"cutlass_tensorop_h884gemm_split_k_parallel_256x128_32x2_tt_align4.cu",
"cutlass_tensorop_h884gemm_128x256_32x2_tt_align4.cu",
"cutlass_tensorop_h884gemm_split_k_parallel_128x256_32x2_tt_align4.cu",
"cutlass_tensorop_h884gemm_128x128_32x2_tt_align4.cu",
"cutlass_tensorop_h884gemm_split_k_parallel_128x128_32x2_tt_align4.cu",
"cutlass_tensorop_h884gemm_256x128_32x2_tt_align2.cu",
"cutlass_tensorop_h884gemm_split_k_parallel_256x128_32x2_tt_align2.cu",
"cutlass_tensorop_h884gemm_128x256_32x2_tt_align2.cu",
"cutlass_tensorop_h884gemm_split_k_parallel_128x256_32x2_tt_align2.cu",
"cutlass_tensorop_h884gemm_128x128_32x2_tt_align2.cu",
"cutlass_tensorop_h884gemm_split_k_parallel_128x128_32x2_tt_align2.cu",
"all_gemm_tensorop884_operations.cu",
"cutlass_simt_sgemv_batched_strided_1x128_32_tt_align4x4.cu",
"cutlass_simt_sgemv_batched_strided_1x128_16_tt_align4x2.cu",
"cutlass_simt_sgemv_batched_strided_1x128_8_tt_align4x1.cu",
"cutlass_simt_sgemv_batched_strided_1x128_16_tt_align2x4.cu",
"cutlass_simt_sgemv_batched_strided_1x128_8_tt_align2x2.cu",
"cutlass_simt_sgemv_batched_strided_1x128_4_tt_align2x1.cu",
"cutlass_simt_sgemv_batched_strided_1x128_8_tt_align1x4.cu",
"cutlass_simt_sgemv_batched_strided_1x128_4_tt_align1x2.cu",
"cutlass_simt_sgemv_batched_strided_1x128_2_tt_align1x1.cu",
"cutlass_simt_sgemv_batched_strided_1x64_64_tt_align4x4.cu",
"cutlass_simt_sgemv_batched_strided_1x64_32_tt_align4x2.cu",
"cutlass_simt_sgemv_batched_strided_1x64_16_tt_align4x1.cu",
"cutlass_simt_sgemv_batched_strided_1x64_32_tt_align2x4.cu",
"cutlass_simt_sgemv_batched_strided_1x64_16_tt_align2x2.cu",
"cutlass_simt_sgemv_batched_strided_1x64_8_tt_align2x1.cu",
"cutlass_simt_sgemv_batched_strided_1x64_16_tt_align1x4.cu",
"cutlass_simt_sgemv_batched_strided_1x64_8_tt_align1x2.cu",
"cutlass_simt_sgemv_batched_strided_1x64_4_tt_align1x1.cu",
"cutlass_simt_sgemv_batched_strided_1x32_128_tt_align4x4.cu",
"cutlass_simt_sgemv_batched_strided_1x32_64_tt_align4x2.cu",
"cutlass_simt_sgemv_batched_strided_1x32_32_tt_align4x1.cu",
"cutlass_simt_sgemv_batched_strided_1x32_64_tt_align2x4.cu",
"cutlass_simt_sgemv_batched_strided_1x32_32_tt_align2x2.cu",
"cutlass_simt_sgemv_batched_strided_1x32_16_tt_align2x1.cu",
"cutlass_simt_sgemv_batched_strided_1x32_32_tt_align1x4.cu",
"cutlass_simt_sgemv_batched_strided_1x32_16_tt_align1x2.cu",
"cutlass_simt_sgemv_batched_strided_1x32_8_tt_align1x1.cu",
"cutlass_simt_s8_idgrad_id_s8_32x128x32_32x64x32_2_nc4hw4_k4rsc4_align4x16.cu",
"cutlass_simt_s8_idgrad_s2_id_s8_32x128x32_32x64x32_2_nc4hw4_k4rsc4_align4x16.cu",
"cutlass_simt_s8_idgrad_id_s8_16x128x16_16x64x16_2_nc4hw4_k4rsc4_align4x4.cu",
"cutlass_simt_s8_idgrad_s2_id_s8_16x128x16_16x64x16_2_nc4hw4_k4rsc4_align4x4.cu",
"cutlass_simt_s8_idgrad_id_s8_16x128x16_16x128x16_1_nc4hw4_k4rsc4_align4x8.cu",
"cutlass_simt_s8_idgrad_s2_id_s8_16x128x16_16x128x16_1_nc4hw4_k4rsc4_align4x8.cu",
"cutlass_simt_s8_idgrad_id_s8_16x64x8_16x64x8_2_nc4hw4_k4rsc4_align4x4.cu",
"cutlass_simt_s8_idgrad_s2_id_s8_16x64x8_16x64x8_2_nc4hw4_k4rsc4_align4x4.cu",
"all_deconv_simt_operations.cu",
"cutlass_tensorop_s8_i8816dgrad_id_s8_128x32x32_64x32x32_1_nhwc_ck4rs4_align4x4.cu",
"cutlass_tensorop_s8_i8816dgrad_s2_id_s8_128x32x32_64x32x32_1_nhwc_ck4rs4_align4x4.cu",
"cutlass_tensorop_s8_i8816dgrad_id_s8_64x16x32_64x16x32_2_nhwc_ck4rs4_align4x4.cu",
"cutlass_tensorop_s8_i8816dgrad_s2_id_s8_64x16x32_64x16x32_2_nhwc_ck4rs4_align4x4.cu",
"cutlass_tensorop_s8_i8816dgrad_id_s8_128x32x32_64x32x32_1_nhwc_ck8rs8_align8x8.cu",
"cutlass_tensorop_s8_i8816dgrad_s2_id_s8_128x32x32_64x32x32_1_nhwc_ck8rs8_align8x8.cu",
"cutlass_tensorop_s8_i8816dgrad_id_s8_64x16x32_64x16x32_2_nhwc_ck8rs8_align8x8.cu",
"cutlass_tensorop_s8_i8816dgrad_s2_id_s8_64x16x32_64x16x32_2_nhwc_ck8rs8_align8x8.cu",
"cutlass_tensorop_s8_i8816dgrad_id_s8_128x32x32_64x32x32_1_nhwc_ck16rs16_align16x16.cu",
"cutlass_tensorop_s8_i8816dgrad_s2_id_s8_128x32x32_64x32x32_1_nhwc_ck16rs16_align16x16.cu",
"cutlass_tensorop_s8_i8816dgrad_id_s8_64x16x32_64x16x32_2_nhwc_ck16rs16_align16x16.cu",
"cutlass_tensorop_s8_i8816dgrad_s2_id_s8_64x16x32_64x16x32_2_nhwc_ck16rs16_align16x16.cu",
"all_deconv_tensorop8816_operations.cu",
"cutlass_simt_s8_ifprop_id_s8_128x128x32_64x32x32_2_nc4hw4_c4rsk4_align4x16.cu",
"cutlass_simt_s8_ifprop_1x1_id_s8_128x128x32_64x32x32_2_nc4hw4_c4rsk4_align4x16.cu",
"cutlass_simt_s8_ifprop_relu_s8_128x128x32_64x32x32_2_nc4hw4_c4rsk4_align4x16.cu",
"cutlass_simt_s8_ifprop_1x1_relu_s8_128x128x32_64x32x32_2_nc4hw4_c4rsk4_align4x16.cu",
"cutlass_simt_s8_ifprop_hswish_s8_128x128x32_64x32x32_2_nc4hw4_c4rsk4_align4x16.cu",
"cutlass_simt_s8_ifprop_1x1_hswish_s8_128x128x32_64x32x32_2_nc4hw4_c4rsk4_align4x16.cu",
"cutlass_simt_s8_ifprop_id_s8_128x64x32_64x32x32_2_nc4hw4_c4rsk4_align4x16.cu",
"cutlass_simt_s8_ifprop_1x1_id_s8_128x64x32_64x32x32_2_nc4hw4_c4rsk4_align4x16.cu",
"cutlass_simt_s8_ifprop_relu_s8_128x64x32_64x32x32_2_nc4hw4_c4rsk4_align4x16.cu",
"cutlass_simt_s8_ifprop_1x1_relu_s8_128x64x32_64x32x32_2_nc4hw4_c4rsk4_align4x16.cu",
"cutlass_simt_s8_ifprop_hswish_s8_128x64x32_64x32x32_2_nc4hw4_c4rsk4_align4x16.cu",
"cutlass_simt_s8_ifprop_1x1_hswish_s8_128x64x32_64x32x32_2_nc4hw4_c4rsk4_align4x16.cu",
"cutlass_simt_s8_ifprop_id_s8_64x128x32_64x32x32_2_nc4hw4_c4rsk4_align4x16.cu",
"cutlass_simt_s8_ifprop_1x1_id_s8_64x128x32_64x32x32_2_nc4hw4_c4rsk4_align4x16.cu",
"cutlass_simt_s8_ifprop_relu_s8_64x128x32_64x32x32_2_nc4hw4_c4rsk4_align4x16.cu",
"cutlass_simt_s8_ifprop_1x1_relu_s8_64x128x32_64x32x32_2_nc4hw4_c4rsk4_align4x16.cu",
"cutlass_simt_s8_ifprop_hswish_s8_64x128x32_64x32x32_2_nc4hw4_c4rsk4_align4x16.cu",
"cutlass_simt_s8_ifprop_1x1_hswish_s8_64x128x32_64x32x32_2_nc4hw4_c4rsk4_align4x16.cu",
"cutlass_simt_s8_ifprop_id_s8_128x32x32_64x32x32_2_nc4hw4_c4rsk4_align4x16.cu",
"cutlass_simt_s8_ifprop_1x1_id_s8_128x32x32_64x32x32_2_nc4hw4_c4rsk4_align4x16.cu",
"cutlass_simt_s8_ifprop_relu_s8_128x32x32_64x32x32_2_nc4hw4_c4rsk4_align4x16.cu",
"cutlass_simt_s8_ifprop_1x1_relu_s8_128x32x32_64x32x32_2_nc4hw4_c4rsk4_align4x16.cu",
"cutlass_simt_s8_ifprop_hswish_s8_128x32x32_64x32x32_2_nc4hw4_c4rsk4_align4x16.cu",
"cutlass_simt_s8_ifprop_1x1_hswish_s8_128x32x32_64x32x32_2_nc4hw4_c4rsk4_align4x16.cu",
"cutlass_simt_s8_ifprop_id_s8_32x128x32_32x64x32_2_nc4hw4_c4rsk4_align4x16.cu",
"cutlass_simt_s8_ifprop_1x1_id_s8_32x128x32_32x64x32_2_nc4hw4_c4rsk4_align4x16.cu",
"cutlass_simt_s8_ifprop_relu_s8_32x128x32_32x64x32_2_nc4hw4_c4rsk4_align4x16.cu",
"cutlass_simt_s8_ifprop_1x1_relu_s8_32x128x32_32x64x32_2_nc4hw4_c4rsk4_align4x16.cu",
"cutlass_simt_s8_ifprop_hswish_s8_32x128x32_32x64x32_2_nc4hw4_c4rsk4_align4x16.cu",
"cutlass_simt_s8_ifprop_1x1_hswish_s8_32x128x32_32x64x32_2_nc4hw4_c4rsk4_align4x16.cu",
"cutlass_simt_s8_ifprop_id_s8_32x64x32_32x64x32_2_nc4hw4_c4rsk4_align4x16.cu",
"cutlass_simt_s8_ifprop_1x1_id_s8_32x64x32_32x64x32_2_nc4hw4_c4rsk4_align4x16.cu",
"cutlass_simt_s8_ifprop_relu_s8_32x64x32_32x64x32_2_nc4hw4_c4rsk4_align4x16.cu",
"cutlass_simt_s8_ifprop_1x1_relu_s8_32x64x32_32x64x32_2_nc4hw4_c4rsk4_align4x16.cu",
"cutlass_simt_s8_ifprop_hswish_s8_32x64x32_32x64x32_2_nc4hw4_c4rsk4_align4x16.cu",
"cutlass_simt_s8_ifprop_1x1_hswish_s8_32x64x32_32x64x32_2_nc4hw4_c4rsk4_align4x16.cu",
"cutlass_simt_s8_ifprop_id_s8_64x32x32_64x32x32_2_nc4hw4_c4rsk4_align4x16.cu",
"cutlass_simt_s8_ifprop_1x1_id_s8_64x32x32_64x32x32_2_nc4hw4_c4rsk4_align4x16.cu",
"cutlass_simt_s8_ifprop_relu_s8_64x32x32_64x32x32_2_nc4hw4_c4rsk4_align4x16.cu",
"cutlass_simt_s8_ifprop_1x1_relu_s8_64x32x32_64x32x32_2_nc4hw4_c4rsk4_align4x16.cu",
"cutlass_simt_s8_ifprop_hswish_s8_64x32x32_64x32x32_2_nc4hw4_c4rsk4_align4x16.cu",
"cutlass_simt_s8_ifprop_1x1_hswish_s8_64x32x32_64x32x32_2_nc4hw4_c4rsk4_align4x16.cu",
"cutlass_simt_s8_ifprop_id_s8_16x128x16_16x128x16_1_nc4hw4_c4rsk4_align4x8.cu",
"cutlass_simt_s8_ifprop_1x1_id_s8_16x128x16_16x128x16_1_nc4hw4_c4rsk4_align4x8.cu",
"cutlass_simt_s8_ifprop_relu_s8_16x128x16_16x128x16_1_nc4hw4_c4rsk4_align4x8.cu",
"cutlass_simt_s8_ifprop_1x1_relu_s8_16x128x16_16x128x16_1_nc4hw4_c4rsk4_align4x8.cu",
"cutlass_simt_s8_ifprop_hswish_s8_16x128x16_16x128x16_1_nc4hw4_c4rsk4_align4x8.cu",
"cutlass_simt_s8_ifprop_1x1_hswish_s8_16x128x16_16x128x16_1_nc4hw4_c4rsk4_align4x8.cu",
"cutlass_simt_s8_ifprop_id_s8_16x64x8_16x64x8_2_nc4hw4_c4rsk4_align4x4.cu",
"cutlass_simt_s8_ifprop_1x1_id_s8_16x64x8_16x64x8_2_nc4hw4_c4rsk4_align4x4.cu",
"cutlass_simt_s8_ifprop_relu_s8_16x64x8_16x64x8_2_nc4hw4_c4rsk4_align4x4.cu",
"cutlass_simt_s8_ifprop_1x1_relu_s8_16x64x8_16x64x8_2_nc4hw4_c4rsk4_align4x4.cu",
"cutlass_simt_s8_ifprop_hswish_s8_16x64x8_16x64x8_2_nc4hw4_c4rsk4_align4x4.cu",
"cutlass_simt_s8_ifprop_1x1_hswish_s8_16x64x8_16x64x8_2_nc4hw4_c4rsk4_align4x4.cu",
"cutlass_simt_s8_ifprop_id_s8_32x128x32_32x64x32_2_nc4hw4_c4rsk4_nc32hw32_align4x16.cu",
"cutlass_simt_s8_ifprop_1x1_id_s8_32x128x32_32x64x32_2_nc4hw4_c4rsk4_nc32hw32_align4x16.cu",
"cutlass_simt_s8_ifprop_relu_s8_32x128x32_32x64x32_2_nc4hw4_c4rsk4_nc32hw32_align4x16.cu",
"cutlass_simt_s8_ifprop_1x1_relu_s8_32x128x32_32x64x32_2_nc4hw4_c4rsk4_nc32hw32_align4x16.cu",
"cutlass_simt_s8_ifprop_hswish_s8_32x128x32_32x64x32_2_nc4hw4_c4rsk4_nc32hw32_align4x16.cu",
"cutlass_simt_s8_ifprop_1x1_hswish_s8_32x128x32_32x64x32_2_nc4hw4_c4rsk4_nc32hw32_align4x16.cu",
"cutlass_simt_s8_ifprop_id_s8_32x64x32_32x64x32_2_nc4hw4_c4rsk4_nc32hw32_align4x16.cu",
"cutlass_simt_s8_ifprop_1x1_id_s8_32x64x32_32x64x32_2_nc4hw4_c4rsk4_nc32hw32_align4x16.cu",
"cutlass_simt_s8_ifprop_relu_s8_32x64x32_32x64x32_2_nc4hw4_c4rsk4_nc32hw32_align4x16.cu",
"cutlass_simt_s8_ifprop_1x1_relu_s8_32x64x32_32x64x32_2_nc4hw4_c4rsk4_nc32hw32_align4x16.cu",
"cutlass_simt_s8_ifprop_hswish_s8_32x64x32_32x64x32_2_nc4hw4_c4rsk4_nc32hw32_align4x16.cu",
"cutlass_simt_s8_ifprop_1x1_hswish_s8_32x64x32_32x64x32_2_nc4hw4_c4rsk4_nc32hw32_align4x16.cu",
"cutlass_simt_u4_ifprop_id_s8_16x128x16_16x128x16_1_nc4hw4_c4rsk4_nhwc_align4x8.cu",
"cutlass_simt_u4_ifprop_relu_s8_16x128x16_16x128x16_1_nc4hw4_c4rsk4_nhwc_align4x8.cu",
"cutlass_simt_u4_ifprop_hswish_s8_16x128x16_16x128x16_1_nc4hw4_c4rsk4_nhwc_align4x8.cu",
"cutlass_simt_u4_ifprop_id_s8_16x64x8_16x64x8_2_nc4hw4_c4rsk4_nhwc_align4x4.cu",
"cutlass_simt_u4_ifprop_relu_s8_16x64x8_16x64x8_2_nc4hw4_c4rsk4_nhwc_align4x4.cu",
"cutlass_simt_u4_ifprop_hswish_s8_16x64x8_16x64x8_2_nc4hw4_c4rsk4_nhwc_align4x4.cu",
"cutlass_simt_s4_ifprop_id_s8_16x128x16_16x128x16_1_nc4hw4_c4rsk4_nhwc_align4x8.cu",
"cutlass_simt_s4_ifprop_relu_s8_16x128x16_16x128x16_1_nc4hw4_c4rsk4_nhwc_align4x8.cu",
"cutlass_simt_s4_ifprop_hswish_s8_16x128x16_16x128x16_1_nc4hw4_c4rsk4_nhwc_align4x8.cu",
"cutlass_simt_s4_ifprop_id_s8_16x64x8_16x64x8_2_nc4hw4_c4rsk4_nhwc_align4x4.cu",
"cutlass_simt_s4_ifprop_relu_s8_16x64x8_16x64x8_2_nc4hw4_c4rsk4_nhwc_align4x4.cu",
"cutlass_simt_s4_ifprop_hswish_s8_16x64x8_16x64x8_2_nc4hw4_c4rsk4_nhwc_align4x4.cu",
"cutlass_simt_f32_ifprop_id_s8_16x128x16_16x128x16_1_nc4hw4_c4rsk4_nchw_align4x8.cu",
"cutlass_simt_f32_ifprop_1x1_id_s8_16x128x16_16x128x16_1_nc4hw4_c4rsk4_nchw_align4x8.cu",
"cutlass_simt_f32_ifprop_relu_s8_16x128x16_16x128x16_1_nc4hw4_c4rsk4_nchw_align4x8.cu",
"cutlass_simt_f32_ifprop_1x1_relu_s8_16x128x16_16x128x16_1_nc4hw4_c4rsk4_nchw_align4x8.cu",
"cutlass_simt_f32_ifprop_hswish_s8_16x128x16_16x128x16_1_nc4hw4_c4rsk4_nchw_align4x8.cu",
"cutlass_simt_f32_ifprop_1x1_hswish_s8_16x128x16_16x128x16_1_nc4hw4_c4rsk4_nchw_align4x8.cu",
"cutlass_simt_f32_ifprop_id_s8_16x64x8_16x64x8_2_nc4hw4_c4rsk4_nchw_align4x4.cu",
"cutlass_simt_f32_ifprop_1x1_id_s8_16x64x8_16x64x8_2_nc4hw4_c4rsk4_nchw_align4x4.cu",
"cutlass_simt_f32_ifprop_relu_s8_16x64x8_16x64x8_2_nc4hw4_c4rsk4_nchw_align4x4.cu",
"cutlass_simt_f32_ifprop_1x1_relu_s8_16x64x8_16x64x8_2_nc4hw4_c4rsk4_nchw_align4x4.cu",
"cutlass_simt_f32_ifprop_hswish_s8_16x64x8_16x64x8_2_nc4hw4_c4rsk4_nchw_align4x4.cu",
"cutlass_simt_f32_ifprop_1x1_hswish_s8_16x64x8_16x64x8_2_nc4hw4_c4rsk4_nchw_align4x4.cu",
"all_conv2d_simt_operations.cu",
"cutlass_tensorop_s8_i8816fprop_roc_id_s8_128x256x64_64x64x64_2_nc32hw32_c32rsk32_align16x16.cu",
"cutlass_tensorop_s8_i8816fprop_1x1_roc_id_s8_128x256x64_64x64x64_2_nc32hw32_c32rsk32_align16x16.cu",
"cutlass_tensorop_s8_i8816fprop_roc_relu_s8_128x256x64_64x64x64_2_nc32hw32_c32rsk32_align16x16.cu",
"cutlass_tensorop_s8_i8816fprop_1x1_roc_relu_s8_128x256x64_64x64x64_2_nc32hw32_c32rsk32_align16x16.cu",
"cutlass_tensorop_s8_i8816fprop_roc_hswish_s8_128x256x64_64x64x64_2_nc32hw32_c32rsk32_align16x16.cu",
"cutlass_tensorop_s8_i8816fprop_1x1_roc_hswish_s8_128x256x64_64x64x64_2_nc32hw32_c32rsk32_align16x16.cu",
"cutlass_tensorop_s8_i8816fprop_roc_id_s8_256x128x64_64x64x64_2_nc32hw32_c32rsk32_align16x16.cu",
"cutlass_tensorop_s8_i8816fprop_1x1_roc_id_s8_256x128x64_64x64x64_2_nc32hw32_c32rsk32_align16x16.cu",
"cutlass_tensorop_s8_i8816fprop_roc_relu_s8_256x128x64_64x64x64_2_nc32hw32_c32rsk32_align16x16.cu",
"cutlass_tensorop_s8_i8816fprop_1x1_roc_relu_s8_256x128x64_64x64x64_2_nc32hw32_c32rsk32_align16x16.cu",
"cutlass_tensorop_s8_i8816fprop_roc_hswish_s8_256x128x64_64x64x64_2_nc32hw32_c32rsk32_align16x16.cu",
"cutlass_tensorop_s8_i8816fprop_1x1_roc_hswish_s8_256x128x64_64x64x64_2_nc32hw32_c32rsk32_align16x16.cu",
"cutlass_tensorop_s8_i8816fprop_roc_id_s8_128x128x64_64x64x64_2_nc32hw32_c32rsk32_align16x16.cu",
"cutlass_tensorop_s8_i8816fprop_1x1_roc_id_s8_128x128x64_64x64x64_2_nc32hw32_c32rsk32_align16x16.cu",
"cutlass_tensorop_s8_i8816fprop_roc_relu_s8_128x128x64_64x64x64_2_nc32hw32_c32rsk32_align16x16.cu",
"cutlass_tensorop_s8_i8816fprop_1x1_roc_relu_s8_128x128x64_64x64x64_2_nc32hw32_c32rsk32_align16x16.cu",
"cutlass_tensorop_s8_i8816fprop_roc_hswish_s8_128x128x64_64x64x64_2_nc32hw32_c32rsk32_align16x16.cu",
"cutlass_tensorop_s8_i8816fprop_1x1_roc_hswish_s8_128x128x64_64x64x64_2_nc32hw32_c32rsk32_align16x16.cu",
"cutlass_tensorop_s8_i8816fprop_roc_id_s8_128x64x64_64x32x64_2_nc32hw32_c32rsk32_align16x16.cu",
"cutlass_tensorop_s8_i8816fprop_1x1_roc_id_s8_128x64x64_64x32x64_2_nc32hw32_c32rsk32_align16x16.cu",
"cutlass_tensorop_s8_i8816fprop_roc_relu_s8_128x64x64_64x32x64_2_nc32hw32_c32rsk32_align16x16.cu",
"cutlass_tensorop_s8_i8816fprop_1x1_roc_relu_s8_128x64x64_64x32x64_2_nc32hw32_c32rsk32_align16x16.cu",
"cutlass_tensorop_s8_i8816fprop_roc_hswish_s8_128x64x64_64x32x64_2_nc32hw32_c32rsk32_align16x16.cu",
"cutlass_tensorop_s8_i8816fprop_1x1_roc_hswish_s8_128x64x64_64x32x64_2_nc32hw32_c32rsk32_align16x16.cu",
"cutlass_tensorop_s8_i8816fprop_roc_id_s8_64x128x64_32x64x64_2_nc32hw32_c32rsk32_align16x16.cu",
"cutlass_tensorop_s8_i8816fprop_1x1_roc_id_s8_64x128x64_32x64x64_2_nc32hw32_c32rsk32_align16x16.cu",
"cutlass_tensorop_s8_i8816fprop_roc_relu_s8_64x128x64_32x64x64_2_nc32hw32_c32rsk32_align16x16.cu",
"cutlass_tensorop_s8_i8816fprop_1x1_roc_relu_s8_64x128x64_32x64x64_2_nc32hw32_c32rsk32_align16x16.cu",
"cutlass_tensorop_s8_i8816fprop_roc_hswish_s8_64x128x64_32x64x64_2_nc32hw32_c32rsk32_align16x16.cu",
"cutlass_tensorop_s8_i8816fprop_1x1_roc_hswish_s8_64x128x64_32x64x64_2_nc32hw32_c32rsk32_align16x16.cu",
"cutlass_tensorop_s8_i8816fprop_roc_id_s8_128x64x32_64x32x32_1_nc32hw32_c32rsk32_align16x16.cu",
"cutlass_tensorop_s8_i8816fprop_1x1_roc_id_s8_128x64x32_64x32x32_1_nc32hw32_c32rsk32_align16x16.cu",
"cutlass_tensorop_s8_i8816fprop_roc_relu_s8_128x64x32_64x32x32_1_nc32hw32_c32rsk32_align16x16.cu",
"cutlass_tensorop_s8_i8816fprop_1x1_roc_relu_s8_128x64x32_64x32x32_1_nc32hw32_c32rsk32_align16x16.cu",
"cutlass_tensorop_s8_i8816fprop_roc_hswish_s8_128x64x32_64x32x32_1_nc32hw32_c32rsk32_align16x16.cu",
"cutlass_tensorop_s8_i8816fprop_1x1_roc_hswish_s8_128x64x32_64x32x32_1_nc32hw32_c32rsk32_align16x16.cu",
"cutlass_tensorop_s8_i8816fprop_roc_id_s8_128x32x32_64x32x32_1_nc32hw32_c32rsk32_align16x16.cu",
"cutlass_tensorop_s8_i8816fprop_1x1_roc_id_s8_128x32x32_64x32x32_1_nc32hw32_c32rsk32_align16x16.cu",
"cutlass_tensorop_s8_i8816fprop_roc_relu_s8_128x32x32_64x32x32_1_nc32hw32_c32rsk32_align16x16.cu",
"cutlass_tensorop_s8_i8816fprop_1x1_roc_relu_s8_128x32x32_64x32x32_1_nc32hw32_c32rsk32_align16x16.cu",
"cutlass_tensorop_s8_i8816fprop_roc_hswish_s8_128x32x32_64x32x32_1_nc32hw32_c32rsk32_align16x16.cu",
"cutlass_tensorop_s8_i8816fprop_1x1_roc_hswish_s8_128x32x32_64x32x32_1_nc32hw32_c32rsk32_align16x16.cu",
"cutlass_tensorop_s8_i8816fprop_id_s8_64x128x64_32x64x64_2_nc32hw32_c32rsk32_nc4hw4_align16x16.cu",
"cutlass_tensorop_s8_i8816fprop_1x1_id_s8_64x128x64_32x64x64_2_nc32hw32_c32rsk32_nc4hw4_align16x16.cu",
"cutlass_tensorop_s8_i8816fprop_relu_s8_64x128x64_32x64x64_2_nc32hw32_c32rsk32_nc4hw4_align16x16.cu",
"cutlass_tensorop_s8_i8816fprop_1x1_relu_s8_64x128x64_32x64x64_2_nc32hw32_c32rsk32_nc4hw4_align16x16.cu",
"cutlass_tensorop_s8_i8816fprop_hswish_s8_64x128x64_32x64x64_2_nc32hw32_c32rsk32_nc4hw4_align16x16.cu",
"cutlass_tensorop_s8_i8816fprop_1x1_hswish_s8_64x128x64_32x64x64_2_nc32hw32_c32rsk32_nc4hw4_align16x16.cu",
"cutlass_tensorop_s8_i8816fprop_id_s8_32x128x32_32x64x32_1_nc32hw32_c32rsk32_nc4hw4_align16x16.cu",
"cutlass_tensorop_s8_i8816fprop_1x1_id_s8_32x128x32_32x64x32_1_nc32hw32_c32rsk32_nc4hw4_align16x16.cu",
"cutlass_tensorop_s8_i8816fprop_relu_s8_32x128x32_32x64x32_1_nc32hw32_c32rsk32_nc4hw4_align16x16.cu",
"cutlass_tensorop_s8_i8816fprop_1x1_relu_s8_32x128x32_32x64x32_1_nc32hw32_c32rsk32_nc4hw4_align16x16.cu",
"cutlass_tensorop_s8_i8816fprop_hswish_s8_32x128x32_32x64x32_1_nc32hw32_c32rsk32_nc4hw4_align16x16.cu",
"cutlass_tensorop_s8_i8816fprop_1x1_hswish_s8_32x128x32_32x64x32_1_nc32hw32_c32rsk32_nc4hw4_align16x16.cu",
"cutlass_tensorop_s8_i8816fprop_id_s8_128x32x32_64x32x32_1_nhwc_nc4hw4_align4x4.cu",
"cutlass_tensorop_s8_i8816fprop_1x1_id_s8_128x32x32_64x32x32_1_nhwc_nc4hw4_align4x4.cu",
"cutlass_tensorop_s8_i8816fprop_relu_s8_128x32x32_64x32x32_1_nhwc_nc4hw4_align4x4.cu",
"cutlass_tensorop_s8_i8816fprop_1x1_relu_s8_128x32x32_64x32x32_1_nhwc_nc4hw4_align4x4.cu",
"cutlass_tensorop_s8_i8816fprop_hswish_s8_128x32x32_64x32x32_1_nhwc_nc4hw4_align4x4.cu",
"cutlass_tensorop_s8_i8816fprop_1x1_hswish_s8_128x32x32_64x32x32_1_nhwc_nc4hw4_align4x4.cu",
"cutlass_tensorop_s8_i8816fprop_roc_id_s8_128x32x32_64x32x32_1_nhwc_nc4hw4_align4x4.cu",
"cutlass_tensorop_s8_i8816fprop_1x1_roc_id_s8_128x32x32_64x32x32_1_nhwc_nc4hw4_align4x4.cu",
"cutlass_tensorop_s8_i8816fprop_roc_relu_s8_128x32x32_64x32x32_1_nhwc_nc4hw4_align4x4.cu",
"cutlass_tensorop_s8_i8816fprop_1x1_roc_relu_s8_128x32x32_64x32x32_1_nhwc_nc4hw4_align4x4.cu",
"cutlass_tensorop_s8_i8816fprop_roc_hswish_s8_128x32x32_64x32x32_1_nhwc_nc4hw4_align4x4.cu",
"cutlass_tensorop_s8_i8816fprop_1x1_roc_hswish_s8_128x32x32_64x32x32_1_nhwc_nc4hw4_align4x4.cu",
"cutlass_tensorop_s8_i8816fprop_id_s8_64x16x32_64x16x32_2_nhwc_nc4hw4_align4x4.cu",
"cutlass_tensorop_s8_i8816fprop_1x1_id_s8_64x16x32_64x16x32_2_nhwc_nc4hw4_align4x4.cu",
"cutlass_tensorop_s8_i8816fprop_relu_s8_64x16x32_64x16x32_2_nhwc_nc4hw4_align4x4.cu",
"cutlass_tensorop_s8_i8816fprop_1x1_relu_s8_64x16x32_64x16x32_2_nhwc_nc4hw4_align4x4.cu",
"cutlass_tensorop_s8_i8816fprop_hswish_s8_64x16x32_64x16x32_2_nhwc_nc4hw4_align4x4.cu",
"cutlass_tensorop_s8_i8816fprop_1x1_hswish_s8_64x16x32_64x16x32_2_nhwc_nc4hw4_align4x4.cu",
"cutlass_tensorop_s8_i8816fprop_roc_id_s8_64x16x32_64x16x32_2_nhwc_nc4hw4_align4x4.cu",
"cutlass_tensorop_s8_i8816fprop_1x1_roc_id_s8_64x16x32_64x16x32_2_nhwc_nc4hw4_align4x4.cu",
"cutlass_tensorop_s8_i8816fprop_roc_relu_s8_64x16x32_64x16x32_2_nhwc_nc4hw4_align4x4.cu",
"cutlass_tensorop_s8_i8816fprop_1x1_roc_relu_s8_64x16x32_64x16x32_2_nhwc_nc4hw4_align4x4.cu",
"cutlass_tensorop_s8_i8816fprop_roc_hswish_s8_64x16x32_64x16x32_2_nhwc_nc4hw4_align4x4.cu",
"cutlass_tensorop_s8_i8816fprop_1x1_roc_hswish_s8_64x16x32_64x16x32_2_nhwc_nc4hw4_align4x4.cu",
"cutlass_tensorop_s8_i8816fprop_id_s8_128x32x32_64x32x32_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s8_i8816fprop_1x1_id_s8_128x32x32_64x32x32_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s8_i8816fprop_relu_s8_128x32x32_64x32x32_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s8_i8816fprop_1x1_relu_s8_128x32x32_64x32x32_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s8_i8816fprop_hswish_s8_128x32x32_64x32x32_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s8_i8816fprop_1x1_hswish_s8_128x32x32_64x32x32_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s8_i8816fprop_roc_id_s8_128x32x32_64x32x32_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s8_i8816fprop_1x1_roc_id_s8_128x32x32_64x32x32_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s8_i8816fprop_roc_relu_s8_128x32x32_64x32x32_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s8_i8816fprop_1x1_roc_relu_s8_128x32x32_64x32x32_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s8_i8816fprop_roc_hswish_s8_128x32x32_64x32x32_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s8_i8816fprop_1x1_roc_hswish_s8_128x32x32_64x32x32_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s8_i8816fprop_id_s8_64x16x32_64x16x32_2_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s8_i8816fprop_1x1_id_s8_64x16x32_64x16x32_2_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s8_i8816fprop_relu_s8_64x16x32_64x16x32_2_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s8_i8816fprop_1x1_relu_s8_64x16x32_64x16x32_2_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s8_i8816fprop_hswish_s8_64x16x32_64x16x32_2_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s8_i8816fprop_1x1_hswish_s8_64x16x32_64x16x32_2_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s8_i8816fprop_roc_id_s8_64x16x32_64x16x32_2_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s8_i8816fprop_1x1_roc_id_s8_64x16x32_64x16x32_2_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s8_i8816fprop_roc_relu_s8_64x16x32_64x16x32_2_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s8_i8816fprop_1x1_roc_relu_s8_64x16x32_64x16x32_2_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s8_i8816fprop_roc_hswish_s8_64x16x32_64x16x32_2_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s8_i8816fprop_1x1_roc_hswish_s8_64x16x32_64x16x32_2_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s8_i8816fprop_id_s8_128x32x32_64x32x32_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s8_i8816fprop_1x1_id_s8_128x32x32_64x32x32_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s8_i8816fprop_relu_s8_128x32x32_64x32x32_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s8_i8816fprop_1x1_relu_s8_128x32x32_64x32x32_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s8_i8816fprop_hswish_s8_128x32x32_64x32x32_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s8_i8816fprop_1x1_hswish_s8_128x32x32_64x32x32_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s8_i8816fprop_roc_id_s8_128x32x32_64x32x32_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s8_i8816fprop_1x1_roc_id_s8_128x32x32_64x32x32_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s8_i8816fprop_roc_relu_s8_128x32x32_64x32x32_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s8_i8816fprop_1x1_roc_relu_s8_128x32x32_64x32x32_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s8_i8816fprop_roc_hswish_s8_128x32x32_64x32x32_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s8_i8816fprop_1x1_roc_hswish_s8_128x32x32_64x32x32_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s8_i8816fprop_id_s8_64x16x32_64x16x32_2_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s8_i8816fprop_1x1_id_s8_64x16x32_64x16x32_2_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s8_i8816fprop_relu_s8_64x16x32_64x16x32_2_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s8_i8816fprop_1x1_relu_s8_64x16x32_64x16x32_2_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s8_i8816fprop_hswish_s8_64x16x32_64x16x32_2_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s8_i8816fprop_1x1_hswish_s8_64x16x32_64x16x32_2_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s8_i8816fprop_roc_id_s8_64x16x32_64x16x32_2_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s8_i8816fprop_1x1_roc_id_s8_64x16x32_64x16x32_2_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s8_i8816fprop_roc_relu_s8_64x16x32_64x16x32_2_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s8_i8816fprop_1x1_roc_relu_s8_64x16x32_64x16x32_2_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s8_i8816fprop_roc_hswish_s8_64x16x32_64x16x32_2_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s8_i8816fprop_1x1_roc_hswish_s8_64x16x32_64x16x32_2_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s4_i8816fprop_id_s8_128x32x32_64x32x32_1_nhwc_nc4hw4_align4x4.cu",
"cutlass_tensorop_s4_i8816fprop_1x1_id_s8_128x32x32_64x32x32_1_nhwc_nc4hw4_align4x4.cu",
"cutlass_tensorop_s4_i8816fprop_relu_s8_128x32x32_64x32x32_1_nhwc_nc4hw4_align4x4.cu",
"cutlass_tensorop_s4_i8816fprop_1x1_relu_s8_128x32x32_64x32x32_1_nhwc_nc4hw4_align4x4.cu",
"cutlass_tensorop_s4_i8816fprop_hswish_s8_128x32x32_64x32x32_1_nhwc_nc4hw4_align4x4.cu",
"cutlass_tensorop_s4_i8816fprop_1x1_hswish_s8_128x32x32_64x32x32_1_nhwc_nc4hw4_align4x4.cu",
"cutlass_tensorop_s4_i8816fprop_roc_id_s8_128x32x32_64x32x32_1_nhwc_nc4hw4_align4x4.cu",
"cutlass_tensorop_s4_i8816fprop_1x1_roc_id_s8_128x32x32_64x32x32_1_nhwc_nc4hw4_align4x4.cu",
"cutlass_tensorop_s4_i8816fprop_roc_relu_s8_128x32x32_64x32x32_1_nhwc_nc4hw4_align4x4.cu",
"cutlass_tensorop_s4_i8816fprop_1x1_roc_relu_s8_128x32x32_64x32x32_1_nhwc_nc4hw4_align4x4.cu",
"cutlass_tensorop_s4_i8816fprop_roc_hswish_s8_128x32x32_64x32x32_1_nhwc_nc4hw4_align4x4.cu",
"cutlass_tensorop_s4_i8816fprop_1x1_roc_hswish_s8_128x32x32_64x32x32_1_nhwc_nc4hw4_align4x4.cu",
"cutlass_tensorop_s4_i8816fprop_id_s8_64x16x32_64x16x32_2_nhwc_nc4hw4_align4x4.cu",
"cutlass_tensorop_s4_i8816fprop_1x1_id_s8_64x16x32_64x16x32_2_nhwc_nc4hw4_align4x4.cu",
"cutlass_tensorop_s4_i8816fprop_relu_s8_64x16x32_64x16x32_2_nhwc_nc4hw4_align4x4.cu",
"cutlass_tensorop_s4_i8816fprop_1x1_relu_s8_64x16x32_64x16x32_2_nhwc_nc4hw4_align4x4.cu",
"cutlass_tensorop_s4_i8816fprop_hswish_s8_64x16x32_64x16x32_2_nhwc_nc4hw4_align4x4.cu",
"cutlass_tensorop_s4_i8816fprop_1x1_hswish_s8_64x16x32_64x16x32_2_nhwc_nc4hw4_align4x4.cu",
"cutlass_tensorop_s4_i8816fprop_roc_id_s8_64x16x32_64x16x32_2_nhwc_nc4hw4_align4x4.cu",
"cutlass_tensorop_s4_i8816fprop_1x1_roc_id_s8_64x16x32_64x16x32_2_nhwc_nc4hw4_align4x4.cu",
"cutlass_tensorop_s4_i8816fprop_roc_relu_s8_64x16x32_64x16x32_2_nhwc_nc4hw4_align4x4.cu",
"cutlass_tensorop_s4_i8816fprop_1x1_roc_relu_s8_64x16x32_64x16x32_2_nhwc_nc4hw4_align4x4.cu",
"cutlass_tensorop_s4_i8816fprop_roc_hswish_s8_64x16x32_64x16x32_2_nhwc_nc4hw4_align4x4.cu",
"cutlass_tensorop_s4_i8816fprop_1x1_roc_hswish_s8_64x16x32_64x16x32_2_nhwc_nc4hw4_align4x4.cu",
"cutlass_tensorop_u4_i8816fprop_id_s8_128x32x32_64x32x32_1_nhwc_nc4hw4_align4x4.cu",
"cutlass_tensorop_u4_i8816fprop_1x1_id_s8_128x32x32_64x32x32_1_nhwc_nc4hw4_align4x4.cu",
"cutlass_tensorop_u4_i8816fprop_relu_s8_128x32x32_64x32x32_1_nhwc_nc4hw4_align4x4.cu",
"cutlass_tensorop_u4_i8816fprop_1x1_relu_s8_128x32x32_64x32x32_1_nhwc_nc4hw4_align4x4.cu",
"cutlass_tensorop_u4_i8816fprop_hswish_s8_128x32x32_64x32x32_1_nhwc_nc4hw4_align4x4.cu",
"cutlass_tensorop_u4_i8816fprop_1x1_hswish_s8_128x32x32_64x32x32_1_nhwc_nc4hw4_align4x4.cu",
"cutlass_tensorop_u4_i8816fprop_roc_id_s8_128x32x32_64x32x32_1_nhwc_nc4hw4_align4x4.cu",
"cutlass_tensorop_u4_i8816fprop_1x1_roc_id_s8_128x32x32_64x32x32_1_nhwc_nc4hw4_align4x4.cu",
"cutlass_tensorop_u4_i8816fprop_roc_relu_s8_128x32x32_64x32x32_1_nhwc_nc4hw4_align4x4.cu",
"cutlass_tensorop_u4_i8816fprop_1x1_roc_relu_s8_128x32x32_64x32x32_1_nhwc_nc4hw4_align4x4.cu",
"cutlass_tensorop_u4_i8816fprop_roc_hswish_s8_128x32x32_64x32x32_1_nhwc_nc4hw4_align4x4.cu",
"cutlass_tensorop_u4_i8816fprop_1x1_roc_hswish_s8_128x32x32_64x32x32_1_nhwc_nc4hw4_align4x4.cu",
"cutlass_tensorop_u4_i8816fprop_id_s8_64x16x32_64x16x32_2_nhwc_nc4hw4_align4x4.cu",
"cutlass_tensorop_u4_i8816fprop_1x1_id_s8_64x16x32_64x16x32_2_nhwc_nc4hw4_align4x4.cu",
"cutlass_tensorop_u4_i8816fprop_relu_s8_64x16x32_64x16x32_2_nhwc_nc4hw4_align4x4.cu",
"cutlass_tensorop_u4_i8816fprop_1x1_relu_s8_64x16x32_64x16x32_2_nhwc_nc4hw4_align4x4.cu",
"cutlass_tensorop_u4_i8816fprop_hswish_s8_64x16x32_64x16x32_2_nhwc_nc4hw4_align4x4.cu",
"cutlass_tensorop_u4_i8816fprop_1x1_hswish_s8_64x16x32_64x16x32_2_nhwc_nc4hw4_align4x4.cu",
"cutlass_tensorop_u4_i8816fprop_roc_id_s8_64x16x32_64x16x32_2_nhwc_nc4hw4_align4x4.cu",
"cutlass_tensorop_u4_i8816fprop_1x1_roc_id_s8_64x16x32_64x16x32_2_nhwc_nc4hw4_align4x4.cu",
"cutlass_tensorop_u4_i8816fprop_roc_relu_s8_64x16x32_64x16x32_2_nhwc_nc4hw4_align4x4.cu",
"cutlass_tensorop_u4_i8816fprop_1x1_roc_relu_s8_64x16x32_64x16x32_2_nhwc_nc4hw4_align4x4.cu",
"cutlass_tensorop_u4_i8816fprop_roc_hswish_s8_64x16x32_64x16x32_2_nhwc_nc4hw4_align4x4.cu",
"cutlass_tensorop_u4_i8816fprop_1x1_roc_hswish_s8_64x16x32_64x16x32_2_nhwc_nc4hw4_align4x4.cu",
"cutlass_tensorop_f32_i8816fprop_id_s8_128x32x32_64x32x32_1_nhwc_nc4hw4_align4x4.cu",
"cutlass_tensorop_f32_i8816fprop_1x1_id_s8_128x32x32_64x32x32_1_nhwc_nc4hw4_align4x4.cu",
"cutlass_tensorop_f32_i8816fprop_relu_s8_128x32x32_64x32x32_1_nhwc_nc4hw4_align4x4.cu",
"cutlass_tensorop_f32_i8816fprop_1x1_relu_s8_128x32x32_64x32x32_1_nhwc_nc4hw4_align4x4.cu",
"cutlass_tensorop_f32_i8816fprop_hswish_s8_128x32x32_64x32x32_1_nhwc_nc4hw4_align4x4.cu",
"cutlass_tensorop_f32_i8816fprop_1x1_hswish_s8_128x32x32_64x32x32_1_nhwc_nc4hw4_align4x4.cu",
"cutlass_tensorop_f32_i8816fprop_id_s8_64x16x32_64x16x32_2_nhwc_nc4hw4_align4x4.cu",
"cutlass_tensorop_f32_i8816fprop_1x1_id_s8_64x16x32_64x16x32_2_nhwc_nc4hw4_align4x4.cu",
"cutlass_tensorop_f32_i8816fprop_relu_s8_64x16x32_64x16x32_2_nhwc_nc4hw4_align4x4.cu",
"cutlass_tensorop_f32_i8816fprop_1x1_relu_s8_64x16x32_64x16x32_2_nhwc_nc4hw4_align4x4.cu",
"cutlass_tensorop_f32_i8816fprop_hswish_s8_64x16x32_64x16x32_2_nhwc_nc4hw4_align4x4.cu",
"cutlass_tensorop_f32_i8816fprop_1x1_hswish_s8_64x16x32_64x16x32_2_nhwc_nc4hw4_align4x4.cu",
"cutlass_tensorop_f32_i8816fprop_roc_id_s8_64x16x32_64x16x32_2_nhwc_nc4hw4_align4x4.cu",
"cutlass_tensorop_f32_i8816fprop_1x1_roc_id_s8_64x16x32_64x16x32_2_nhwc_nc4hw4_align4x4.cu",
"cutlass_tensorop_f32_i8816fprop_roc_relu_s8_64x16x32_64x16x32_2_nhwc_nc4hw4_align4x4.cu",
"cutlass_tensorop_f32_i8816fprop_1x1_roc_relu_s8_64x16x32_64x16x32_2_nhwc_nc4hw4_align4x4.cu",
"cutlass_tensorop_f32_i8816fprop_roc_hswish_s8_64x16x32_64x16x32_2_nhwc_nc4hw4_align4x4.cu",
"cutlass_tensorop_f32_i8816fprop_1x1_roc_hswish_s8_64x16x32_64x16x32_2_nhwc_nc4hw4_align4x4.cu",
"cutlass_tensorop_s4_i8816fprop_id_s8_128x32x32_64x32x32_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s4_i8816fprop_1x1_id_s8_128x32x32_64x32x32_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s4_i8816fprop_relu_s8_128x32x32_64x32x32_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s4_i8816fprop_1x1_relu_s8_128x32x32_64x32x32_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s4_i8816fprop_hswish_s8_128x32x32_64x32x32_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s4_i8816fprop_1x1_hswish_s8_128x32x32_64x32x32_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s4_i8816fprop_roc_id_s8_128x32x32_64x32x32_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s4_i8816fprop_1x1_roc_id_s8_128x32x32_64x32x32_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s4_i8816fprop_roc_relu_s8_128x32x32_64x32x32_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s4_i8816fprop_1x1_roc_relu_s8_128x32x32_64x32x32_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s4_i8816fprop_roc_hswish_s8_128x32x32_64x32x32_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s4_i8816fprop_1x1_roc_hswish_s8_128x32x32_64x32x32_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s4_i8816fprop_id_s8_64x16x32_64x16x32_2_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s4_i8816fprop_1x1_id_s8_64x16x32_64x16x32_2_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s4_i8816fprop_relu_s8_64x16x32_64x16x32_2_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s4_i8816fprop_1x1_relu_s8_64x16x32_64x16x32_2_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s4_i8816fprop_hswish_s8_64x16x32_64x16x32_2_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s4_i8816fprop_1x1_hswish_s8_64x16x32_64x16x32_2_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s4_i8816fprop_roc_id_s8_64x16x32_64x16x32_2_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s4_i8816fprop_1x1_roc_id_s8_64x16x32_64x16x32_2_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s4_i8816fprop_roc_relu_s8_64x16x32_64x16x32_2_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s4_i8816fprop_1x1_roc_relu_s8_64x16x32_64x16x32_2_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s4_i8816fprop_roc_hswish_s8_64x16x32_64x16x32_2_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s4_i8816fprop_1x1_roc_hswish_s8_64x16x32_64x16x32_2_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_u4_i8816fprop_id_s8_128x32x32_64x32x32_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_u4_i8816fprop_1x1_id_s8_128x32x32_64x32x32_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_u4_i8816fprop_relu_s8_128x32x32_64x32x32_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_u4_i8816fprop_1x1_relu_s8_128x32x32_64x32x32_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_u4_i8816fprop_hswish_s8_128x32x32_64x32x32_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_u4_i8816fprop_1x1_hswish_s8_128x32x32_64x32x32_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_u4_i8816fprop_roc_id_s8_128x32x32_64x32x32_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_u4_i8816fprop_1x1_roc_id_s8_128x32x32_64x32x32_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_u4_i8816fprop_roc_relu_s8_128x32x32_64x32x32_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_u4_i8816fprop_1x1_roc_relu_s8_128x32x32_64x32x32_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_u4_i8816fprop_roc_hswish_s8_128x32x32_64x32x32_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_u4_i8816fprop_1x1_roc_hswish_s8_128x32x32_64x32x32_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_u4_i8816fprop_id_s8_64x16x32_64x16x32_2_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_u4_i8816fprop_1x1_id_s8_64x16x32_64x16x32_2_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_u4_i8816fprop_relu_s8_64x16x32_64x16x32_2_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_u4_i8816fprop_1x1_relu_s8_64x16x32_64x16x32_2_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_u4_i8816fprop_hswish_s8_64x16x32_64x16x32_2_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_u4_i8816fprop_1x1_hswish_s8_64x16x32_64x16x32_2_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_u4_i8816fprop_roc_id_s8_64x16x32_64x16x32_2_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_u4_i8816fprop_1x1_roc_id_s8_64x16x32_64x16x32_2_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_u4_i8816fprop_roc_relu_s8_64x16x32_64x16x32_2_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_u4_i8816fprop_1x1_roc_relu_s8_64x16x32_64x16x32_2_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_u4_i8816fprop_roc_hswish_s8_64x16x32_64x16x32_2_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_u4_i8816fprop_1x1_roc_hswish_s8_64x16x32_64x16x32_2_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_f32_i8816fprop_id_s8_128x32x32_64x32x32_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_f32_i8816fprop_1x1_id_s8_128x32x32_64x32x32_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_f32_i8816fprop_relu_s8_128x32x32_64x32x32_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_f32_i8816fprop_1x1_relu_s8_128x32x32_64x32x32_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_f32_i8816fprop_hswish_s8_128x32x32_64x32x32_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_f32_i8816fprop_1x1_hswish_s8_128x32x32_64x32x32_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_f32_i8816fprop_id_s8_64x16x32_64x16x32_2_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_f32_i8816fprop_1x1_id_s8_64x16x32_64x16x32_2_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_f32_i8816fprop_relu_s8_64x16x32_64x16x32_2_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_f32_i8816fprop_1x1_relu_s8_64x16x32_64x16x32_2_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_f32_i8816fprop_hswish_s8_64x16x32_64x16x32_2_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_f32_i8816fprop_1x1_hswish_s8_64x16x32_64x16x32_2_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_f32_i8816fprop_roc_id_s8_64x16x32_64x16x32_2_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_f32_i8816fprop_1x1_roc_id_s8_64x16x32_64x16x32_2_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_f32_i8816fprop_roc_relu_s8_64x16x32_64x16x32_2_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_f32_i8816fprop_1x1_roc_relu_s8_64x16x32_64x16x32_2_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_f32_i8816fprop_roc_hswish_s8_64x16x32_64x16x32_2_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_f32_i8816fprop_1x1_roc_hswish_s8_64x16x32_64x16x32_2_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s4_i8816fprop_id_s8_128x32x32_64x32x32_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s4_i8816fprop_1x1_id_s8_128x32x32_64x32x32_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s4_i8816fprop_relu_s8_128x32x32_64x32x32_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s4_i8816fprop_1x1_relu_s8_128x32x32_64x32x32_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s4_i8816fprop_hswish_s8_128x32x32_64x32x32_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s4_i8816fprop_1x1_hswish_s8_128x32x32_64x32x32_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s4_i8816fprop_roc_id_s8_128x32x32_64x32x32_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s4_i8816fprop_1x1_roc_id_s8_128x32x32_64x32x32_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s4_i8816fprop_roc_relu_s8_128x32x32_64x32x32_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s4_i8816fprop_1x1_roc_relu_s8_128x32x32_64x32x32_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s4_i8816fprop_roc_hswish_s8_128x32x32_64x32x32_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s4_i8816fprop_1x1_roc_hswish_s8_128x32x32_64x32x32_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s4_i8816fprop_id_s8_64x16x32_64x16x32_2_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s4_i8816fprop_1x1_id_s8_64x16x32_64x16x32_2_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s4_i8816fprop_relu_s8_64x16x32_64x16x32_2_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s4_i8816fprop_1x1_relu_s8_64x16x32_64x16x32_2_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s4_i8816fprop_hswish_s8_64x16x32_64x16x32_2_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s4_i8816fprop_1x1_hswish_s8_64x16x32_64x16x32_2_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s4_i8816fprop_roc_id_s8_64x16x32_64x16x32_2_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s4_i8816fprop_1x1_roc_id_s8_64x16x32_64x16x32_2_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s4_i8816fprop_roc_relu_s8_64x16x32_64x16x32_2_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s4_i8816fprop_1x1_roc_relu_s8_64x16x32_64x16x32_2_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s4_i8816fprop_roc_hswish_s8_64x16x32_64x16x32_2_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s4_i8816fprop_1x1_roc_hswish_s8_64x16x32_64x16x32_2_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_u4_i8816fprop_id_s8_128x32x32_64x32x32_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_u4_i8816fprop_1x1_id_s8_128x32x32_64x32x32_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_u4_i8816fprop_relu_s8_128x32x32_64x32x32_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_u4_i8816fprop_1x1_relu_s8_128x32x32_64x32x32_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_u4_i8816fprop_hswish_s8_128x32x32_64x32x32_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_u4_i8816fprop_1x1_hswish_s8_128x32x32_64x32x32_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_u4_i8816fprop_roc_id_s8_128x32x32_64x32x32_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_u4_i8816fprop_1x1_roc_id_s8_128x32x32_64x32x32_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_u4_i8816fprop_roc_relu_s8_128x32x32_64x32x32_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_u4_i8816fprop_1x1_roc_relu_s8_128x32x32_64x32x32_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_u4_i8816fprop_roc_hswish_s8_128x32x32_64x32x32_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_u4_i8816fprop_1x1_roc_hswish_s8_128x32x32_64x32x32_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_u4_i8816fprop_id_s8_64x16x32_64x16x32_2_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_u4_i8816fprop_1x1_id_s8_64x16x32_64x16x32_2_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_u4_i8816fprop_relu_s8_64x16x32_64x16x32_2_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_u4_i8816fprop_1x1_relu_s8_64x16x32_64x16x32_2_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_u4_i8816fprop_hswish_s8_64x16x32_64x16x32_2_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_u4_i8816fprop_1x1_hswish_s8_64x16x32_64x16x32_2_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_u4_i8816fprop_roc_id_s8_64x16x32_64x16x32_2_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_u4_i8816fprop_1x1_roc_id_s8_64x16x32_64x16x32_2_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_u4_i8816fprop_roc_relu_s8_64x16x32_64x16x32_2_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_u4_i8816fprop_1x1_roc_relu_s8_64x16x32_64x16x32_2_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_u4_i8816fprop_roc_hswish_s8_64x16x32_64x16x32_2_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_u4_i8816fprop_1x1_roc_hswish_s8_64x16x32_64x16x32_2_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_f32_i8816fprop_id_s8_128x32x32_64x32x32_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_f32_i8816fprop_1x1_id_s8_128x32x32_64x32x32_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_f32_i8816fprop_relu_s8_128x32x32_64x32x32_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_f32_i8816fprop_1x1_relu_s8_128x32x32_64x32x32_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_f32_i8816fprop_hswish_s8_128x32x32_64x32x32_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_f32_i8816fprop_1x1_hswish_s8_128x32x32_64x32x32_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_f32_i8816fprop_id_s8_64x16x32_64x16x32_2_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_f32_i8816fprop_1x1_id_s8_64x16x32_64x16x32_2_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_f32_i8816fprop_relu_s8_64x16x32_64x16x32_2_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_f32_i8816fprop_1x1_relu_s8_64x16x32_64x16x32_2_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_f32_i8816fprop_hswish_s8_64x16x32_64x16x32_2_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_f32_i8816fprop_1x1_hswish_s8_64x16x32_64x16x32_2_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_f32_i8816fprop_roc_id_s8_64x16x32_64x16x32_2_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_f32_i8816fprop_1x1_roc_id_s8_64x16x32_64x16x32_2_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_f32_i8816fprop_roc_relu_s8_64x16x32_64x16x32_2_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_f32_i8816fprop_1x1_roc_relu_s8_64x16x32_64x16x32_2_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_f32_i8816fprop_roc_hswish_s8_64x16x32_64x16x32_2_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_f32_i8816fprop_1x1_roc_hswish_s8_64x16x32_64x16x32_2_nhwc_nc16hw16_align16x16.cu",
"all_conv2d_tensorop8816_operations.cu",
"cutlass_tensorop_s4_i8832fprop_roc_id_s4_128x256x128_64x64x128_2_nc64hw64_c64rsk64_align32x32.cu",
"cutlass_tensorop_s4_i8832fprop_1x1_roc_id_s4_128x256x128_64x64x128_2_nc64hw64_c64rsk64_align32x32.cu",
"cutlass_tensorop_s4_i8832fprop_roc_relu_s4_128x256x128_64x64x128_2_nc64hw64_c64rsk64_align32x32.cu",
"cutlass_tensorop_s4_i8832fprop_1x1_roc_relu_s4_128x256x128_64x64x128_2_nc64hw64_c64rsk64_align32x32.cu",
"cutlass_tensorop_s4_i8832fprop_roc_hswish_s4_128x256x128_64x64x128_2_nc64hw64_c64rsk64_align32x32.cu",
"cutlass_tensorop_s4_i8832fprop_1x1_roc_hswish_s4_128x256x128_64x64x128_2_nc64hw64_c64rsk64_align32x32.cu",
"cutlass_tensorop_s4_i8832fprop_roc_id_s4_128x128x128_64x64x128_2_nc64hw64_c64rsk64_align32x32.cu",
"cutlass_tensorop_s4_i8832fprop_1x1_roc_id_s4_128x128x128_64x64x128_2_nc64hw64_c64rsk64_align32x32.cu",
"cutlass_tensorop_s4_i8832fprop_roc_relu_s4_128x128x128_64x64x128_2_nc64hw64_c64rsk64_align32x32.cu",
"cutlass_tensorop_s4_i8832fprop_1x1_roc_relu_s4_128x128x128_64x64x128_2_nc64hw64_c64rsk64_align32x32.cu",
"cutlass_tensorop_s4_i8832fprop_roc_hswish_s4_128x128x128_64x64x128_2_nc64hw64_c64rsk64_align32x32.cu",
"cutlass_tensorop_s4_i8832fprop_1x1_roc_hswish_s4_128x128x128_64x64x128_2_nc64hw64_c64rsk64_align32x32.cu",
"cutlass_tensorop_s4_i8832fprop_roc_id_s4_128x64x128_64x64x128_2_nc64hw64_c64rsk64_align32x32.cu",
"cutlass_tensorop_s4_i8832fprop_1x1_roc_id_s4_128x64x128_64x64x128_2_nc64hw64_c64rsk64_align32x32.cu",
"cutlass_tensorop_s4_i8832fprop_roc_relu_s4_128x64x128_64x64x128_2_nc64hw64_c64rsk64_align32x32.cu",
"cutlass_tensorop_s4_i8832fprop_1x1_roc_relu_s4_128x64x128_64x64x128_2_nc64hw64_c64rsk64_align32x32.cu",
"cutlass_tensorop_s4_i8832fprop_roc_hswish_s4_128x64x128_64x64x128_2_nc64hw64_c64rsk64_align32x32.cu",
"cutlass_tensorop_s4_i8832fprop_1x1_roc_hswish_s4_128x64x128_64x64x128_2_nc64hw64_c64rsk64_align32x32.cu",
"cutlass_tensorop_s4_i8832fprop_roc_id_s4_128x64x64_64x64x64_1_nc64hw64_c64rsk64_align32x32.cu",
"cutlass_tensorop_s4_i8832fprop_1x1_roc_id_s4_128x64x64_64x64x64_1_nc64hw64_c64rsk64_align32x32.cu",
"cutlass_tensorop_s4_i8832fprop_roc_relu_s4_128x64x64_64x64x64_1_nc64hw64_c64rsk64_align32x32.cu",
"cutlass_tensorop_s4_i8832fprop_1x1_roc_relu_s4_128x64x64_64x64x64_1_nc64hw64_c64rsk64_align32x32.cu",
"cutlass_tensorop_s4_i8832fprop_roc_hswish_s4_128x64x64_64x64x64_1_nc64hw64_c64rsk64_align32x32.cu",
"cutlass_tensorop_s4_i8832fprop_1x1_roc_hswish_s4_128x64x64_64x64x64_1_nc64hw64_c64rsk64_align32x32.cu",
"cutlass_tensorop_u4_i8832fprop_roc_id_u4_s4_128x256x128_64x64x128_2_nc64hw64_c64rsk64_align32x32.cu",
"cutlass_tensorop_u4_i8832fprop_1x1_roc_id_u4_s4_128x256x128_64x64x128_2_nc64hw64_c64rsk64_align32x32.cu",
"cutlass_tensorop_u4_i8832fprop_roc_relu_u4_s4_128x256x128_64x64x128_2_nc64hw64_c64rsk64_align32x32.cu",
"cutlass_tensorop_u4_i8832fprop_1x1_roc_relu_u4_s4_128x256x128_64x64x128_2_nc64hw64_c64rsk64_align32x32.cu",
"cutlass_tensorop_u4_i8832fprop_roc_id_u4_s4_128x128x128_64x64x128_2_nc64hw64_c64rsk64_align32x32.cu",
"cutlass_tensorop_u4_i8832fprop_1x1_roc_id_u4_s4_128x128x128_64x64x128_2_nc64hw64_c64rsk64_align32x32.cu",
"cutlass_tensorop_u4_i8832fprop_roc_relu_u4_s4_128x128x128_64x64x128_2_nc64hw64_c64rsk64_align32x32.cu",
"cutlass_tensorop_u4_i8832fprop_1x1_roc_relu_u4_s4_128x128x128_64x64x128_2_nc64hw64_c64rsk64_align32x32.cu",
"cutlass_tensorop_u4_i8832fprop_roc_id_u4_s4_128x64x128_64x64x128_2_nc64hw64_c64rsk64_align32x32.cu",
"cutlass_tensorop_u4_i8832fprop_1x1_roc_id_u4_s4_128x64x128_64x64x128_2_nc64hw64_c64rsk64_align32x32.cu",
"cutlass_tensorop_u4_i8832fprop_roc_relu_u4_s4_128x64x128_64x64x128_2_nc64hw64_c64rsk64_align32x32.cu",
"cutlass_tensorop_u4_i8832fprop_1x1_roc_relu_u4_s4_128x64x128_64x64x128_2_nc64hw64_c64rsk64_align32x32.cu",
"cutlass_tensorop_u4_i8832fprop_roc_id_u4_s4_128x64x64_64x64x64_1_nc64hw64_c64rsk64_align32x32.cu",
"cutlass_tensorop_u4_i8832fprop_1x1_roc_id_u4_s4_128x64x64_64x64x64_1_nc64hw64_c64rsk64_align32x32.cu",
"cutlass_tensorop_u4_i8832fprop_roc_relu_u4_s4_128x64x64_64x64x64_1_nc64hw64_c64rsk64_align32x32.cu",
"cutlass_tensorop_u4_i8832fprop_1x1_roc_relu_u4_s4_128x64x64_64x64x64_1_nc64hw64_c64rsk64_align32x32.cu",
"cutlass_tensorop_s4_i8832fprop_id_s4_128x16x64_128x16x64_2_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s4_i8832fprop_1x1_id_s4_128x16x64_128x16x64_2_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s4_i8832fprop_relu_s4_128x16x64_128x16x64_2_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s4_i8832fprop_1x1_relu_s4_128x16x64_128x16x64_2_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s4_i8832fprop_hswish_s4_128x16x64_128x16x64_2_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s4_i8832fprop_1x1_hswish_s4_128x16x64_128x16x64_2_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s4_i8832fprop_id_s4_128x32x64_64x32x64_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s4_i8832fprop_1x1_id_s4_128x32x64_64x32x64_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s4_i8832fprop_relu_s4_128x32x64_64x32x64_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s4_i8832fprop_1x1_relu_s4_128x32x64_64x32x64_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s4_i8832fprop_hswish_s4_128x32x64_64x32x64_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s4_i8832fprop_1x1_hswish_s4_128x32x64_64x32x64_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s4_i8832fprop_roc_id_s4_128x32x64_64x32x64_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s4_i8832fprop_1x1_roc_id_s4_128x32x64_64x32x64_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s4_i8832fprop_roc_relu_s4_128x32x64_64x32x64_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s4_i8832fprop_1x1_roc_relu_s4_128x32x64_64x32x64_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s4_i8832fprop_roc_hswish_s4_128x32x64_64x32x64_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s4_i8832fprop_1x1_roc_hswish_s4_128x32x64_64x32x64_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s4_i8832fprop_id_s4_128x64x64_64x64x64_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s4_i8832fprop_1x1_id_s4_128x64x64_64x64x64_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s4_i8832fprop_relu_s4_128x64x64_64x64x64_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s4_i8832fprop_1x1_relu_s4_128x64x64_64x64x64_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s4_i8832fprop_hswish_s4_128x64x64_64x64x64_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s4_i8832fprop_1x1_hswish_s4_128x64x64_64x64x64_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s4_i8832fprop_roc_id_s4_128x64x64_64x64x64_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s4_i8832fprop_1x1_roc_id_s4_128x64x64_64x64x64_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s4_i8832fprop_roc_relu_s4_128x64x64_64x64x64_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s4_i8832fprop_1x1_roc_relu_s4_128x64x64_64x64x64_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s4_i8832fprop_roc_hswish_s4_128x64x64_64x64x64_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s4_i8832fprop_1x1_roc_hswish_s4_128x64x64_64x64x64_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s4_i8832fprop_id_s4_128x16x64_128x16x64_2_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s4_i8832fprop_1x1_id_s4_128x16x64_128x16x64_2_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s4_i8832fprop_relu_s4_128x16x64_128x16x64_2_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s4_i8832fprop_1x1_relu_s4_128x16x64_128x16x64_2_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s4_i8832fprop_hswish_s4_128x16x64_128x16x64_2_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s4_i8832fprop_1x1_hswish_s4_128x16x64_128x16x64_2_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s4_i8832fprop_id_s4_128x32x64_64x32x64_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s4_i8832fprop_1x1_id_s4_128x32x64_64x32x64_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s4_i8832fprop_relu_s4_128x32x64_64x32x64_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s4_i8832fprop_1x1_relu_s4_128x32x64_64x32x64_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s4_i8832fprop_hswish_s4_128x32x64_64x32x64_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s4_i8832fprop_1x1_hswish_s4_128x32x64_64x32x64_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s4_i8832fprop_roc_id_s4_128x32x64_64x32x64_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s4_i8832fprop_1x1_roc_id_s4_128x32x64_64x32x64_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s4_i8832fprop_roc_relu_s4_128x32x64_64x32x64_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s4_i8832fprop_1x1_roc_relu_s4_128x32x64_64x32x64_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s4_i8832fprop_roc_hswish_s4_128x32x64_64x32x64_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s4_i8832fprop_1x1_roc_hswish_s4_128x32x64_64x32x64_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s4_i8832fprop_id_s4_128x64x64_64x64x64_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s4_i8832fprop_1x1_id_s4_128x64x64_64x64x64_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s4_i8832fprop_relu_s4_128x64x64_64x64x64_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s4_i8832fprop_1x1_relu_s4_128x64x64_64x64x64_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s4_i8832fprop_hswish_s4_128x64x64_64x64x64_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s4_i8832fprop_1x1_hswish_s4_128x64x64_64x64x64_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s4_i8832fprop_roc_id_s4_128x64x64_64x64x64_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s4_i8832fprop_1x1_roc_id_s4_128x64x64_64x64x64_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s4_i8832fprop_roc_relu_s4_128x64x64_64x64x64_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s4_i8832fprop_1x1_roc_relu_s4_128x64x64_64x64x64_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s4_i8832fprop_roc_hswish_s4_128x64x64_64x64x64_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s4_i8832fprop_1x1_roc_hswish_s4_128x64x64_64x64x64_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s4_i8832fprop_id_s4_128x16x64_128x16x64_2_nhwc_nc32hw32_align32x32.cu",
"cutlass_tensorop_s4_i8832fprop_1x1_id_s4_128x16x64_128x16x64_2_nhwc_nc32hw32_align32x32.cu",
"cutlass_tensorop_s4_i8832fprop_relu_s4_128x16x64_128x16x64_2_nhwc_nc32hw32_align32x32.cu",
"cutlass_tensorop_s4_i8832fprop_1x1_relu_s4_128x16x64_128x16x64_2_nhwc_nc32hw32_align32x32.cu",
"cutlass_tensorop_s4_i8832fprop_hswish_s4_128x16x64_128x16x64_2_nhwc_nc32hw32_align32x32.cu",
"cutlass_tensorop_s4_i8832fprop_1x1_hswish_s4_128x16x64_128x16x64_2_nhwc_nc32hw32_align32x32.cu",
"cutlass_tensorop_s4_i8832fprop_id_s4_128x32x64_64x32x64_1_nhwc_nc32hw32_align32x32.cu",
"cutlass_tensorop_s4_i8832fprop_1x1_id_s4_128x32x64_64x32x64_1_nhwc_nc32hw32_align32x32.cu",
"cutlass_tensorop_s4_i8832fprop_relu_s4_128x32x64_64x32x64_1_nhwc_nc32hw32_align32x32.cu",
"cutlass_tensorop_s4_i8832fprop_1x1_relu_s4_128x32x64_64x32x64_1_nhwc_nc32hw32_align32x32.cu",
"cutlass_tensorop_s4_i8832fprop_hswish_s4_128x32x64_64x32x64_1_nhwc_nc32hw32_align32x32.cu",
"cutlass_tensorop_s4_i8832fprop_1x1_hswish_s4_128x32x64_64x32x64_1_nhwc_nc32hw32_align32x32.cu",
"cutlass_tensorop_s4_i8832fprop_roc_id_s4_128x32x64_64x32x64_1_nhwc_nc32hw32_align32x32.cu",
"cutlass_tensorop_s4_i8832fprop_1x1_roc_id_s4_128x32x64_64x32x64_1_nhwc_nc32hw32_align32x32.cu",
"cutlass_tensorop_s4_i8832fprop_roc_relu_s4_128x32x64_64x32x64_1_nhwc_nc32hw32_align32x32.cu",
"cutlass_tensorop_s4_i8832fprop_1x1_roc_relu_s4_128x32x64_64x32x64_1_nhwc_nc32hw32_align32x32.cu",
"cutlass_tensorop_s4_i8832fprop_roc_hswish_s4_128x32x64_64x32x64_1_nhwc_nc32hw32_align32x32.cu",
"cutlass_tensorop_s4_i8832fprop_1x1_roc_hswish_s4_128x32x64_64x32x64_1_nhwc_nc32hw32_align32x32.cu",
"cutlass_tensorop_s4_i8832fprop_id_s4_128x64x64_64x64x64_1_nhwc_nc32hw32_align32x32.cu",
"cutlass_tensorop_s4_i8832fprop_1x1_id_s4_128x64x64_64x64x64_1_nhwc_nc32hw32_align32x32.cu",
"cutlass_tensorop_s4_i8832fprop_relu_s4_128x64x64_64x64x64_1_nhwc_nc32hw32_align32x32.cu",
"cutlass_tensorop_s4_i8832fprop_1x1_relu_s4_128x64x64_64x64x64_1_nhwc_nc32hw32_align32x32.cu",
"cutlass_tensorop_s4_i8832fprop_hswish_s4_128x64x64_64x64x64_1_nhwc_nc32hw32_align32x32.cu",
"cutlass_tensorop_s4_i8832fprop_1x1_hswish_s4_128x64x64_64x64x64_1_nhwc_nc32hw32_align32x32.cu",
"cutlass_tensorop_s4_i8832fprop_roc_id_s4_128x64x64_64x64x64_1_nhwc_nc32hw32_align32x32.cu",
"cutlass_tensorop_s4_i8832fprop_1x1_roc_id_s4_128x64x64_64x64x64_1_nhwc_nc32hw32_align32x32.cu",
"cutlass_tensorop_s4_i8832fprop_roc_relu_s4_128x64x64_64x64x64_1_nhwc_nc32hw32_align32x32.cu",
"cutlass_tensorop_s4_i8832fprop_1x1_roc_relu_s4_128x64x64_64x64x64_1_nhwc_nc32hw32_align32x32.cu",
"cutlass_tensorop_s4_i8832fprop_roc_hswish_s4_128x64x64_64x64x64_1_nhwc_nc32hw32_align32x32.cu",
"cutlass_tensorop_s4_i8832fprop_1x1_roc_hswish_s4_128x64x64_64x64x64_1_nhwc_nc32hw32_align32x32.cu",
"cutlass_tensorop_u4_i8832fprop_id_u4_s4_128x16x64_128x16x64_2_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_u4_i8832fprop_1x1_id_u4_s4_128x16x64_128x16x64_2_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_u4_i8832fprop_relu_u4_s4_128x16x64_128x16x64_2_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_u4_i8832fprop_1x1_relu_u4_s4_128x16x64_128x16x64_2_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_u4_i8832fprop_id_u4_s4_128x32x64_64x32x64_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_u4_i8832fprop_1x1_id_u4_s4_128x32x64_64x32x64_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_u4_i8832fprop_relu_u4_s4_128x32x64_64x32x64_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_u4_i8832fprop_1x1_relu_u4_s4_128x32x64_64x32x64_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_u4_i8832fprop_roc_id_u4_s4_128x32x64_64x32x64_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_u4_i8832fprop_1x1_roc_id_u4_s4_128x32x64_64x32x64_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_u4_i8832fprop_roc_relu_u4_s4_128x32x64_64x32x64_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_u4_i8832fprop_1x1_roc_relu_u4_s4_128x32x64_64x32x64_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_u4_i8832fprop_id_u4_s4_128x64x64_64x64x64_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_u4_i8832fprop_1x1_id_u4_s4_128x64x64_64x64x64_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_u4_i8832fprop_relu_u4_s4_128x64x64_64x64x64_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_u4_i8832fprop_1x1_relu_u4_s4_128x64x64_64x64x64_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_u4_i8832fprop_roc_id_u4_s4_128x64x64_64x64x64_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_u4_i8832fprop_1x1_roc_id_u4_s4_128x64x64_64x64x64_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_u4_i8832fprop_roc_relu_u4_s4_128x64x64_64x64x64_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_u4_i8832fprop_1x1_roc_relu_u4_s4_128x64x64_64x64x64_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_u4_i8832fprop_id_u4_s4_128x16x64_128x16x64_2_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_u4_i8832fprop_1x1_id_u4_s4_128x16x64_128x16x64_2_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_u4_i8832fprop_relu_u4_s4_128x16x64_128x16x64_2_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_u4_i8832fprop_1x1_relu_u4_s4_128x16x64_128x16x64_2_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_u4_i8832fprop_id_u4_s4_128x32x64_64x32x64_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_u4_i8832fprop_1x1_id_u4_s4_128x32x64_64x32x64_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_u4_i8832fprop_relu_u4_s4_128x32x64_64x32x64_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_u4_i8832fprop_1x1_relu_u4_s4_128x32x64_64x32x64_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_u4_i8832fprop_roc_id_u4_s4_128x32x64_64x32x64_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_u4_i8832fprop_1x1_roc_id_u4_s4_128x32x64_64x32x64_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_u4_i8832fprop_roc_relu_u4_s4_128x32x64_64x32x64_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_u4_i8832fprop_1x1_roc_relu_u4_s4_128x32x64_64x32x64_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_u4_i8832fprop_id_u4_s4_128x64x64_64x64x64_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_u4_i8832fprop_1x1_id_u4_s4_128x64x64_64x64x64_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_u4_i8832fprop_relu_u4_s4_128x64x64_64x64x64_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_u4_i8832fprop_1x1_relu_u4_s4_128x64x64_64x64x64_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_u4_i8832fprop_roc_id_u4_s4_128x64x64_64x64x64_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_u4_i8832fprop_1x1_roc_id_u4_s4_128x64x64_64x64x64_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_u4_i8832fprop_roc_relu_u4_s4_128x64x64_64x64x64_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_u4_i8832fprop_1x1_roc_relu_u4_s4_128x64x64_64x64x64_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_u4_i8832fprop_id_u4_s4_128x16x64_128x16x64_2_nhwc_nc32hw32_align32x32.cu",
"cutlass_tensorop_u4_i8832fprop_1x1_id_u4_s4_128x16x64_128x16x64_2_nhwc_nc32hw32_align32x32.cu",
"cutlass_tensorop_u4_i8832fprop_relu_u4_s4_128x16x64_128x16x64_2_nhwc_nc32hw32_align32x32.cu",
"cutlass_tensorop_u4_i8832fprop_1x1_relu_u4_s4_128x16x64_128x16x64_2_nhwc_nc32hw32_align32x32.cu",
"cutlass_tensorop_u4_i8832fprop_id_u4_s4_128x32x64_64x32x64_1_nhwc_nc32hw32_align32x32.cu",
"cutlass_tensorop_u4_i8832fprop_1x1_id_u4_s4_128x32x64_64x32x64_1_nhwc_nc32hw32_align32x32.cu",
"cutlass_tensorop_u4_i8832fprop_relu_u4_s4_128x32x64_64x32x64_1_nhwc_nc32hw32_align32x32.cu",
"cutlass_tensorop_u4_i8832fprop_1x1_relu_u4_s4_128x32x64_64x32x64_1_nhwc_nc32hw32_align32x32.cu",
"cutlass_tensorop_u4_i8832fprop_roc_id_u4_s4_128x32x64_64x32x64_1_nhwc_nc32hw32_align32x32.cu",
"cutlass_tensorop_u4_i8832fprop_1x1_roc_id_u4_s4_128x32x64_64x32x64_1_nhwc_nc32hw32_align32x32.cu",
"cutlass_tensorop_u4_i8832fprop_roc_relu_u4_s4_128x32x64_64x32x64_1_nhwc_nc32hw32_align32x32.cu",
"cutlass_tensorop_u4_i8832fprop_1x1_roc_relu_u4_s4_128x32x64_64x32x64_1_nhwc_nc32hw32_align32x32.cu",
"cutlass_tensorop_u4_i8832fprop_id_u4_s4_128x64x64_64x64x64_1_nhwc_nc32hw32_align32x32.cu",
"cutlass_tensorop_u4_i8832fprop_1x1_id_u4_s4_128x64x64_64x64x64_1_nhwc_nc32hw32_align32x32.cu",
"cutlass_tensorop_u4_i8832fprop_relu_u4_s4_128x64x64_64x64x64_1_nhwc_nc32hw32_align32x32.cu",
"cutlass_tensorop_u4_i8832fprop_1x1_relu_u4_s4_128x64x64_64x64x64_1_nhwc_nc32hw32_align32x32.cu",
"cutlass_tensorop_u4_i8832fprop_roc_id_u4_s4_128x64x64_64x64x64_1_nhwc_nc32hw32_align32x32.cu",
"cutlass_tensorop_u4_i8832fprop_1x1_roc_id_u4_s4_128x64x64_64x64x64_1_nhwc_nc32hw32_align32x32.cu",
"cutlass_tensorop_u4_i8832fprop_roc_relu_u4_s4_128x64x64_64x64x64_1_nhwc_nc32hw32_align32x32.cu",
"cutlass_tensorop_u4_i8832fprop_1x1_roc_relu_u4_s4_128x64x64_64x64x64_1_nhwc_nc32hw32_align32x32.cu",
"cutlass_tensorop_s8_i8832fprop_id_s4_128x16x64_128x16x64_2_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s8_i8832fprop_1x1_id_s4_128x16x64_128x16x64_2_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s8_i8832fprop_relu_s4_128x16x64_128x16x64_2_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s8_i8832fprop_1x1_relu_s4_128x16x64_128x16x64_2_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s8_i8832fprop_hswish_s4_128x16x64_128x16x64_2_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s8_i8832fprop_1x1_hswish_s4_128x16x64_128x16x64_2_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s8_i8832fprop_id_s4_128x32x64_64x32x64_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s8_i8832fprop_1x1_id_s4_128x32x64_64x32x64_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s8_i8832fprop_relu_s4_128x32x64_64x32x64_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s8_i8832fprop_1x1_relu_s4_128x32x64_64x32x64_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s8_i8832fprop_hswish_s4_128x32x64_64x32x64_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s8_i8832fprop_1x1_hswish_s4_128x32x64_64x32x64_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s8_i8832fprop_roc_id_s4_128x32x64_64x32x64_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s8_i8832fprop_1x1_roc_id_s4_128x32x64_64x32x64_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s8_i8832fprop_roc_relu_s4_128x32x64_64x32x64_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s8_i8832fprop_1x1_roc_relu_s4_128x32x64_64x32x64_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s8_i8832fprop_roc_hswish_s4_128x32x64_64x32x64_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s8_i8832fprop_1x1_roc_hswish_s4_128x32x64_64x32x64_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s8_i8832fprop_id_s4_128x64x64_64x64x64_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s8_i8832fprop_1x1_id_s4_128x64x64_64x64x64_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s8_i8832fprop_relu_s4_128x64x64_64x64x64_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s8_i8832fprop_1x1_relu_s4_128x64x64_64x64x64_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s8_i8832fprop_hswish_s4_128x64x64_64x64x64_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s8_i8832fprop_1x1_hswish_s4_128x64x64_64x64x64_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s8_i8832fprop_roc_id_s4_128x64x64_64x64x64_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s8_i8832fprop_1x1_roc_id_s4_128x64x64_64x64x64_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s8_i8832fprop_roc_relu_s4_128x64x64_64x64x64_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s8_i8832fprop_1x1_roc_relu_s4_128x64x64_64x64x64_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s8_i8832fprop_roc_hswish_s4_128x64x64_64x64x64_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s8_i8832fprop_1x1_roc_hswish_s4_128x64x64_64x64x64_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s8_i8832fprop_id_s4_128x16x64_128x16x64_2_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s8_i8832fprop_1x1_id_s4_128x16x64_128x16x64_2_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s8_i8832fprop_relu_s4_128x16x64_128x16x64_2_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s8_i8832fprop_1x1_relu_s4_128x16x64_128x16x64_2_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s8_i8832fprop_hswish_s4_128x16x64_128x16x64_2_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s8_i8832fprop_1x1_hswish_s4_128x16x64_128x16x64_2_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s8_i8832fprop_id_s4_128x32x64_64x32x64_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s8_i8832fprop_1x1_id_s4_128x32x64_64x32x64_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s8_i8832fprop_relu_s4_128x32x64_64x32x64_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s8_i8832fprop_1x1_relu_s4_128x32x64_64x32x64_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s8_i8832fprop_hswish_s4_128x32x64_64x32x64_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s8_i8832fprop_1x1_hswish_s4_128x32x64_64x32x64_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s8_i8832fprop_roc_id_s4_128x32x64_64x32x64_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s8_i8832fprop_1x1_roc_id_s4_128x32x64_64x32x64_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s8_i8832fprop_roc_relu_s4_128x32x64_64x32x64_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s8_i8832fprop_1x1_roc_relu_s4_128x32x64_64x32x64_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s8_i8832fprop_roc_hswish_s4_128x32x64_64x32x64_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s8_i8832fprop_1x1_roc_hswish_s4_128x32x64_64x32x64_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s8_i8832fprop_id_s4_128x64x64_64x64x64_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s8_i8832fprop_1x1_id_s4_128x64x64_64x64x64_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s8_i8832fprop_relu_s4_128x64x64_64x64x64_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s8_i8832fprop_1x1_relu_s4_128x64x64_64x64x64_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s8_i8832fprop_hswish_s4_128x64x64_64x64x64_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s8_i8832fprop_1x1_hswish_s4_128x64x64_64x64x64_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s8_i8832fprop_roc_id_s4_128x64x64_64x64x64_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s8_i8832fprop_1x1_roc_id_s4_128x64x64_64x64x64_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s8_i8832fprop_roc_relu_s4_128x64x64_64x64x64_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s8_i8832fprop_1x1_roc_relu_s4_128x64x64_64x64x64_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s8_i8832fprop_roc_hswish_s4_128x64x64_64x64x64_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s8_i8832fprop_1x1_roc_hswish_s4_128x64x64_64x64x64_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s8_i8832fprop_id_s4_128x16x64_128x16x64_2_nhwc_nc32hw32_align32x32.cu",
"cutlass_tensorop_s8_i8832fprop_1x1_id_s4_128x16x64_128x16x64_2_nhwc_nc32hw32_align32x32.cu",
"cutlass_tensorop_s8_i8832fprop_relu_s4_128x16x64_128x16x64_2_nhwc_nc32hw32_align32x32.cu",
"cutlass_tensorop_s8_i8832fprop_1x1_relu_s4_128x16x64_128x16x64_2_nhwc_nc32hw32_align32x32.cu",
"cutlass_tensorop_s8_i8832fprop_hswish_s4_128x16x64_128x16x64_2_nhwc_nc32hw32_align32x32.cu",
"cutlass_tensorop_s8_i8832fprop_1x1_hswish_s4_128x16x64_128x16x64_2_nhwc_nc32hw32_align32x32.cu",
"cutlass_tensorop_s8_i8832fprop_id_s4_128x32x64_64x32x64_1_nhwc_nc32hw32_align32x32.cu",
"cutlass_tensorop_s8_i8832fprop_1x1_id_s4_128x32x64_64x32x64_1_nhwc_nc32hw32_align32x32.cu",
"cutlass_tensorop_s8_i8832fprop_relu_s4_128x32x64_64x32x64_1_nhwc_nc32hw32_align32x32.cu",
"cutlass_tensorop_s8_i8832fprop_1x1_relu_s4_128x32x64_64x32x64_1_nhwc_nc32hw32_align32x32.cu",
"cutlass_tensorop_s8_i8832fprop_hswish_s4_128x32x64_64x32x64_1_nhwc_nc32hw32_align32x32.cu",
"cutlass_tensorop_s8_i8832fprop_1x1_hswish_s4_128x32x64_64x32x64_1_nhwc_nc32hw32_align32x32.cu",
"cutlass_tensorop_s8_i8832fprop_roc_id_s4_128x32x64_64x32x64_1_nhwc_nc32hw32_align32x32.cu",
"cutlass_tensorop_s8_i8832fprop_1x1_roc_id_s4_128x32x64_64x32x64_1_nhwc_nc32hw32_align32x32.cu",
"cutlass_tensorop_s8_i8832fprop_roc_relu_s4_128x32x64_64x32x64_1_nhwc_nc32hw32_align32x32.cu",
"cutlass_tensorop_s8_i8832fprop_1x1_roc_relu_s4_128x32x64_64x32x64_1_nhwc_nc32hw32_align32x32.cu",
"cutlass_tensorop_s8_i8832fprop_roc_hswish_s4_128x32x64_64x32x64_1_nhwc_nc32hw32_align32x32.cu",
"cutlass_tensorop_s8_i8832fprop_1x1_roc_hswish_s4_128x32x64_64x32x64_1_nhwc_nc32hw32_align32x32.cu",
"cutlass_tensorop_s8_i8832fprop_id_s4_128x64x64_64x64x64_1_nhwc_nc32hw32_align32x32.cu",
"cutlass_tensorop_s8_i8832fprop_1x1_id_s4_128x64x64_64x64x64_1_nhwc_nc32hw32_align32x32.cu",
"cutlass_tensorop_s8_i8832fprop_relu_s4_128x64x64_64x64x64_1_nhwc_nc32hw32_align32x32.cu",
"cutlass_tensorop_s8_i8832fprop_1x1_relu_s4_128x64x64_64x64x64_1_nhwc_nc32hw32_align32x32.cu",
"cutlass_tensorop_s8_i8832fprop_hswish_s4_128x64x64_64x64x64_1_nhwc_nc32hw32_align32x32.cu",
"cutlass_tensorop_s8_i8832fprop_1x1_hswish_s4_128x64x64_64x64x64_1_nhwc_nc32hw32_align32x32.cu",
"cutlass_tensorop_s8_i8832fprop_roc_id_s4_128x64x64_64x64x64_1_nhwc_nc32hw32_align32x32.cu",
"cutlass_tensorop_s8_i8832fprop_1x1_roc_id_s4_128x64x64_64x64x64_1_nhwc_nc32hw32_align32x32.cu",
"cutlass_tensorop_s8_i8832fprop_roc_relu_s4_128x64x64_64x64x64_1_nhwc_nc32hw32_align32x32.cu",
"cutlass_tensorop_s8_i8832fprop_1x1_roc_relu_s4_128x64x64_64x64x64_1_nhwc_nc32hw32_align32x32.cu",
"cutlass_tensorop_s8_i8832fprop_roc_hswish_s4_128x64x64_64x64x64_1_nhwc_nc32hw32_align32x32.cu",
"cutlass_tensorop_s8_i8832fprop_1x1_roc_hswish_s4_128x64x64_64x64x64_1_nhwc_nc32hw32_align32x32.cu",
"cutlass_tensorop_s8_i8832fprop_id_u4_s4_128x16x64_128x16x64_2_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s8_i8832fprop_1x1_id_u4_s4_128x16x64_128x16x64_2_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s8_i8832fprop_relu_u4_s4_128x16x64_128x16x64_2_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s8_i8832fprop_1x1_relu_u4_s4_128x16x64_128x16x64_2_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s8_i8832fprop_id_u4_s4_128x32x64_64x32x64_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s8_i8832fprop_1x1_id_u4_s4_128x32x64_64x32x64_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s8_i8832fprop_relu_u4_s4_128x32x64_64x32x64_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s8_i8832fprop_1x1_relu_u4_s4_128x32x64_64x32x64_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s8_i8832fprop_roc_id_u4_s4_128x32x64_64x32x64_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s8_i8832fprop_1x1_roc_id_u4_s4_128x32x64_64x32x64_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s8_i8832fprop_roc_relu_u4_s4_128x32x64_64x32x64_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s8_i8832fprop_1x1_roc_relu_u4_s4_128x32x64_64x32x64_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s8_i8832fprop_id_u4_s4_128x64x64_64x64x64_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s8_i8832fprop_1x1_id_u4_s4_128x64x64_64x64x64_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s8_i8832fprop_relu_u4_s4_128x64x64_64x64x64_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s8_i8832fprop_1x1_relu_u4_s4_128x64x64_64x64x64_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s8_i8832fprop_roc_id_u4_s4_128x64x64_64x64x64_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s8_i8832fprop_1x1_roc_id_u4_s4_128x64x64_64x64x64_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s8_i8832fprop_roc_relu_u4_s4_128x64x64_64x64x64_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s8_i8832fprop_1x1_roc_relu_u4_s4_128x64x64_64x64x64_1_nhwc_nc8hw8_align8x8.cu",
"cutlass_tensorop_s8_i8832fprop_id_u4_s4_128x16x64_128x16x64_2_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s8_i8832fprop_1x1_id_u4_s4_128x16x64_128x16x64_2_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s8_i8832fprop_relu_u4_s4_128x16x64_128x16x64_2_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s8_i8832fprop_1x1_relu_u4_s4_128x16x64_128x16x64_2_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s8_i8832fprop_id_u4_s4_128x32x64_64x32x64_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s8_i8832fprop_1x1_id_u4_s4_128x32x64_64x32x64_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s8_i8832fprop_relu_u4_s4_128x32x64_64x32x64_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s8_i8832fprop_1x1_relu_u4_s4_128x32x64_64x32x64_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s8_i8832fprop_roc_id_u4_s4_128x32x64_64x32x64_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s8_i8832fprop_1x1_roc_id_u4_s4_128x32x64_64x32x64_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s8_i8832fprop_roc_relu_u4_s4_128x32x64_64x32x64_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s8_i8832fprop_1x1_roc_relu_u4_s4_128x32x64_64x32x64_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s8_i8832fprop_id_u4_s4_128x64x64_64x64x64_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s8_i8832fprop_1x1_id_u4_s4_128x64x64_64x64x64_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s8_i8832fprop_relu_u4_s4_128x64x64_64x64x64_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s8_i8832fprop_1x1_relu_u4_s4_128x64x64_64x64x64_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s8_i8832fprop_roc_id_u4_s4_128x64x64_64x64x64_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s8_i8832fprop_1x1_roc_id_u4_s4_128x64x64_64x64x64_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s8_i8832fprop_roc_relu_u4_s4_128x64x64_64x64x64_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s8_i8832fprop_1x1_roc_relu_u4_s4_128x64x64_64x64x64_1_nhwc_nc16hw16_align16x16.cu",
"cutlass_tensorop_s8_i8832fprop_id_u4_s4_128x16x64_128x16x64_2_nhwc_nc32hw32_align32x32.cu",
"cutlass_tensorop_s8_i8832fprop_1x1_id_u4_s4_128x16x64_128x16x64_2_nhwc_nc32hw32_align32x32.cu",
"cutlass_tensorop_s8_i8832fprop_relu_u4_s4_128x16x64_128x16x64_2_nhwc_nc32hw32_align32x32.cu",
"cutlass_tensorop_s8_i8832fprop_1x1_relu_u4_s4_128x16x64_128x16x64_2_nhwc_nc32hw32_align32x32.cu",
"cutlass_tensorop_s8_i8832fprop_id_u4_s4_128x32x64_64x32x64_1_nhwc_nc32hw32_align32x32.cu",
"cutlass_tensorop_s8_i8832fprop_1x1_id_u4_s4_128x32x64_64x32x64_1_nhwc_nc32hw32_align32x32.cu",
"cutlass_tensorop_s8_i8832fprop_relu_u4_s4_128x32x64_64x32x64_1_nhwc_nc32hw32_align32x32.cu",
"cutlass_tensorop_s8_i8832fprop_1x1_relu_u4_s4_128x32x64_64x32x64_1_nhwc_nc32hw32_align32x32.cu",
"cutlass_tensorop_s8_i8832fprop_roc_id_u4_s4_128x32x64_64x32x64_1_nhwc_nc32hw32_align32x32.cu",
"cutlass_tensorop_s8_i8832fprop_1x1_roc_id_u4_s4_128x32x64_64x32x64_1_nhwc_nc32hw32_align32x32.cu",
"cutlass_tensorop_s8_i8832fprop_roc_relu_u4_s4_128x32x64_64x32x64_1_nhwc_nc32hw32_align32x32.cu",
"cutlass_tensorop_s8_i8832fprop_1x1_roc_relu_u4_s4_128x32x64_64x32x64_1_nhwc_nc32hw32_align32x32.cu",
"cutlass_tensorop_s8_i8832fprop_id_u4_s4_128x64x64_64x64x64_1_nhwc_nc32hw32_align32x32.cu",
"cutlass_tensorop_s8_i8832fprop_1x1_id_u4_s4_128x64x64_64x64x64_1_nhwc_nc32hw32_align32x32.cu",
"cutlass_tensorop_s8_i8832fprop_relu_u4_s4_128x64x64_64x64x64_1_nhwc_nc32hw32_align32x32.cu",
"cutlass_tensorop_s8_i8832fprop_1x1_relu_u4_s4_128x64x64_64x64x64_1_nhwc_nc32hw32_align32x32.cu",
"cutlass_tensorop_s8_i8832fprop_roc_id_u4_s4_128x64x64_64x64x64_1_nhwc_nc32hw32_align32x32.cu",
"cutlass_tensorop_s8_i8832fprop_1x1_roc_id_u4_s4_128x64x64_64x64x64_1_nhwc_nc32hw32_align32x32.cu",
"cutlass_tensorop_s8_i8832fprop_roc_relu_u4_s4_128x64x64_64x64x64_1_nhwc_nc32hw32_align32x32.cu",
"cutlass_tensorop_s8_i8832fprop_1x1_roc_relu_u4_s4_128x64x64_64x64x64_1_nhwc_nc32hw32_align32x32.cu",
"all_conv2d_tensorop8832_operations.cu",
"cutlass_simt_sdwfprop_id_f32_32x32x8_32x32x8_2_nchw_nchw_align4x1.cu",
"cutlass_simt_sdwfprop_relu_f32_32x32x8_32x32x8_2_nchw_nchw_align4x1.cu",
"cutlass_simt_sdwfprop_id_f32_32x32x8_32x32x8_2_nchw_nchw_align1x1.cu",
"cutlass_simt_sdwfprop_relu_f32_32x32x8_32x32x8_2_nchw_nchw_align1x1.cu",
"cutlass_simt_sdwfprop_id_f32_32x64x8_32x64x8_2_nchw_nchw_align4x1.cu",
"cutlass_simt_sdwfprop_relu_f32_32x64x8_32x64x8_2_nchw_nchw_align4x1.cu",
"cutlass_simt_sdwfprop_id_f32_32x64x8_32x64x8_2_nchw_nchw_align1x1.cu",
"cutlass_simt_sdwfprop_relu_f32_32x64x8_32x64x8_2_nchw_nchw_align1x1.cu",
"cutlass_simt_sdwfprop_id_f32_64x32x8_64x32x8_2_nchw_nchw_align4x1.cu",
"cutlass_simt_sdwfprop_relu_f32_64x32x8_64x32x8_2_nchw_nchw_align4x1.cu",
"cutlass_simt_sdwfprop_id_f32_64x32x8_64x32x8_2_nchw_nchw_align1x1.cu",
"cutlass_simt_sdwfprop_relu_f32_64x32x8_64x32x8_2_nchw_nchw_align1x1.cu",
"cutlass_simt_sdwfprop_id_f32_32x128x8_32x64x8_2_nchw_nchw_align4x1.cu",
"cutlass_simt_sdwfprop_relu_f32_32x128x8_32x64x8_2_nchw_nchw_align4x1.cu",
"cutlass_simt_sdwfprop_id_f32_32x128x8_32x64x8_2_nchw_nchw_align1x1.cu",
"cutlass_simt_sdwfprop_relu_f32_32x128x8_32x64x8_2_nchw_nchw_align1x1.cu",
"cutlass_simt_sdwfprop_id_f32_64x64x8_32x64x8_2_nchw_nchw_align4x1.cu",
"cutlass_simt_sdwfprop_relu_f32_64x64x8_32x64x8_2_nchw_nchw_align4x1.cu",
"cutlass_simt_sdwfprop_id_f32_64x64x8_32x64x8_2_nchw_nchw_align1x1.cu",
"cutlass_simt_sdwfprop_relu_f32_64x64x8_32x64x8_2_nchw_nchw_align1x1.cu",
"cutlass_simt_sdwfprop_id_f32_128x32x8_64x32x8_2_nchw_nchw_align4x1.cu",
"cutlass_simt_sdwfprop_relu_f32_128x32x8_64x32x8_2_nchw_nchw_align4x1.cu",
"cutlass_simt_sdwfprop_id_f32_128x32x8_64x32x8_2_nchw_nchw_align1x1.cu",
"cutlass_simt_sdwfprop_relu_f32_128x32x8_64x32x8_2_nchw_nchw_align1x1.cu",
"cutlass_simt_sdwfprop_id_f32_64x128x8_32x64x8_2_nchw_nchw_align4x1.cu",
"cutlass_simt_sdwfprop_relu_f32_64x128x8_32x64x8_2_nchw_nchw_align4x1.cu",
"cutlass_simt_sdwfprop_id_f32_64x128x8_32x64x8_2_nchw_nchw_align1x1.cu",
"cutlass_simt_sdwfprop_relu_f32_64x128x8_32x64x8_2_nchw_nchw_align1x1.cu",
"cutlass_simt_sdwfprop_id_f32_128x64x8_64x32x8_2_nchw_nchw_align4x1.cu",
"cutlass_simt_sdwfprop_relu_f32_128x64x8_64x32x8_2_nchw_nchw_align4x1.cu",
"cutlass_simt_sdwfprop_id_f32_128x64x8_64x32x8_2_nchw_nchw_align1x1.cu",
"cutlass_simt_sdwfprop_relu_f32_128x64x8_64x32x8_2_nchw_nchw_align1x1.cu",
"cutlass_simt_sdwfprop_id_f32_128x128x8_32x64x8_2_nchw_nchw_align4x1.cu",
"cutlass_simt_sdwfprop_relu_f32_128x128x8_32x64x8_2_nchw_nchw_align4x1.cu",
"cutlass_simt_sdwfprop_id_f32_128x128x8_32x64x8_2_nchw_nchw_align1x1.cu",
"cutlass_simt_sdwfprop_relu_f32_128x128x8_32x64x8_2_nchw_nchw_align1x1.cu",
"all_dwconv2d_fprop_simt_operations.cu",
"cutlass_tensorop_f16_s884dwfprop_id_f16_128x256x32_64x64x32_2_nchw_nchw_align8x1.cu",
"cutlass_tensorop_f16_s884dwfprop_relu_f16_128x256x32_64x64x32_2_nchw_nchw_align8x1.cu",
"cutlass_tensorop_f16_s884dwfprop_id_f16_128x128x32_32x32x32_2_nchw_nchw_align8x1.cu",
"cutlass_tensorop_f16_s884dwfprop_relu_f16_128x128x32_32x32x32_2_nchw_nchw_align8x1.cu",
"cutlass_tensorop_f16_s884dwfprop_id_f16_64x128x32_32x32x32_2_nchw_nchw_align8x1.cu",
"cutlass_tensorop_f16_s884dwfprop_relu_f16_64x128x32_32x32x32_2_nchw_nchw_align8x1.cu",
"cutlass_tensorop_f16_s884dwfprop_id_f16_128x64x32_32x32x32_2_nchw_nchw_align8x1.cu",
"cutlass_tensorop_f16_s884dwfprop_relu_f16_128x64x32_32x32x32_2_nchw_nchw_align8x1.cu",
"cutlass_tensorop_f16_s884dwfprop_id_f16_64x64x32_32x32x32_2_nchw_nchw_align8x1.cu",
"cutlass_tensorop_f16_s884dwfprop_relu_f16_64x64x32_32x32x32_2_nchw_nchw_align8x1.cu",
"cutlass_tensorop_f16_s884dwfprop_id_f16_128x256x32_64x64x32_2_nchw_nchw_align2x1.cu",
"cutlass_tensorop_f16_s884dwfprop_relu_f16_128x256x32_64x64x32_2_nchw_nchw_align2x1.cu",
"cutlass_tensorop_f16_s884dwfprop_id_f16_128x128x32_32x32x32_2_nchw_nchw_align2x1.cu",
"cutlass_tensorop_f16_s884dwfprop_relu_f16_128x128x32_32x32x32_2_nchw_nchw_align2x1.cu",
"cutlass_tensorop_f16_s884dwfprop_id_f16_64x128x32_32x32x32_2_nchw_nchw_align2x1.cu",
"cutlass_tensorop_f16_s884dwfprop_relu_f16_64x128x32_32x32x32_2_nchw_nchw_align2x1.cu",
"cutlass_tensorop_f16_s884dwfprop_id_f16_128x64x32_32x32x32_2_nchw_nchw_align2x1.cu",
"cutlass_tensorop_f16_s884dwfprop_relu_f16_128x64x32_32x32x32_2_nchw_nchw_align2x1.cu",
"cutlass_tensorop_f16_s884dwfprop_id_f16_64x64x32_32x32x32_2_nchw_nchw_align2x1.cu",
"cutlass_tensorop_f16_s884dwfprop_relu_f16_64x64x32_32x32x32_2_nchw_nchw_align2x1.cu",
"cutlass_tensorop_f16_s884dwfprop_id_f16_128x256x32_64x64x32_2_nchw_nchw_align1x1.cu",
"cutlass_tensorop_f16_s884dwfprop_relu_f16_128x256x32_64x64x32_2_nchw_nchw_align1x1.cu",
"cutlass_tensorop_f16_s884dwfprop_id_f16_128x128x32_32x32x32_2_nchw_nchw_align1x1.cu",
"cutlass_tensorop_f16_s884dwfprop_relu_f16_128x128x32_32x32x32_2_nchw_nchw_align1x1.cu",
"cutlass_tensorop_f16_s884dwfprop_id_f16_64x128x32_32x32x32_2_nchw_nchw_align1x1.cu",
"cutlass_tensorop_f16_s884dwfprop_relu_f16_64x128x32_32x32x32_2_nchw_nchw_align1x1.cu",
"cutlass_tensorop_f16_s884dwfprop_id_f16_128x64x32_32x32x32_2_nchw_nchw_align1x1.cu",
"cutlass_tensorop_f16_s884dwfprop_relu_f16_128x64x32_32x32x32_2_nchw_nchw_align1x1.cu",
"cutlass_tensorop_f16_s884dwfprop_id_f16_64x64x32_32x32x32_2_nchw_nchw_align1x1.cu",
"cutlass_tensorop_f16_s884dwfprop_relu_f16_64x64x32_32x32x32_2_nchw_nchw_align1x1.cu",
"cutlass_tensorop_h884dwfprop_id_f16_128x256x32_64x64x32_2_nchw_nchw_align8x1.cu",
"cutlass_tensorop_h884dwfprop_relu_f16_128x256x32_64x64x32_2_nchw_nchw_align8x1.cu",
"cutlass_tensorop_h884dwfprop_id_f16_128x128x32_32x32x32_2_nchw_nchw_align8x1.cu",
"cutlass_tensorop_h884dwfprop_relu_f16_128x128x32_32x32x32_2_nchw_nchw_align8x1.cu",
"cutlass_tensorop_h884dwfprop_id_f16_64x128x32_32x32x32_2_nchw_nchw_align8x1.cu",
"cutlass_tensorop_h884dwfprop_relu_f16_64x128x32_32x32x32_2_nchw_nchw_align8x1.cu",
"cutlass_tensorop_h884dwfprop_id_f16_128x64x32_32x32x32_2_nchw_nchw_align8x1.cu",
"cutlass_tensorop_h884dwfprop_relu_f16_128x64x32_32x32x32_2_nchw_nchw_align8x1.cu",
"cutlass_tensorop_h884dwfprop_id_f16_64x64x32_32x32x32_2_nchw_nchw_align8x1.cu",
"cutlass_tensorop_h884dwfprop_relu_f16_64x64x32_32x32x32_2_nchw_nchw_align8x1.cu",
"cutlass_tensorop_h884dwfprop_id_f16_128x256x32_64x64x32_2_nchw_nchw_align2x1.cu",
"cutlass_tensorop_h884dwfprop_relu_f16_128x256x32_64x64x32_2_nchw_nchw_align2x1.cu",
"cutlass_tensorop_h884dwfprop_id_f16_128x128x32_32x32x32_2_nchw_nchw_align2x1.cu",
"cutlass_tensorop_h884dwfprop_relu_f16_128x128x32_32x32x32_2_nchw_nchw_align2x1.cu",
"cutlass_tensorop_h884dwfprop_id_f16_64x128x32_32x32x32_2_nchw_nchw_align2x1.cu",
"cutlass_tensorop_h884dwfprop_relu_f16_64x128x32_32x32x32_2_nchw_nchw_align2x1.cu",
"cutlass_tensorop_h884dwfprop_id_f16_128x64x32_32x32x32_2_nchw_nchw_align2x1.cu",
"cutlass_tensorop_h884dwfprop_relu_f16_128x64x32_32x32x32_2_nchw_nchw_align2x1.cu",
"cutlass_tensorop_h884dwfprop_id_f16_64x64x32_32x32x32_2_nchw_nchw_align2x1.cu",
"cutlass_tensorop_h884dwfprop_relu_f16_64x64x32_32x32x32_2_nchw_nchw_align2x1.cu",
"cutlass_tensorop_h884dwfprop_id_f16_128x256x32_64x64x32_2_nchw_nchw_align1x1.cu",
"cutlass_tensorop_h884dwfprop_relu_f16_128x256x32_64x64x32_2_nchw_nchw_align1x1.cu",
"cutlass_tensorop_h884dwfprop_id_f16_128x128x32_32x32x32_2_nchw_nchw_align1x1.cu",
"cutlass_tensorop_h884dwfprop_relu_f16_128x128x32_32x32x32_2_nchw_nchw_align1x1.cu",
"cutlass_tensorop_h884dwfprop_id_f16_64x128x32_32x32x32_2_nchw_nchw_align1x1.cu",
"cutlass_tensorop_h884dwfprop_relu_f16_64x128x32_32x32x32_2_nchw_nchw_align1x1.cu",
"cutlass_tensorop_h884dwfprop_id_f16_128x64x32_32x32x32_2_nchw_nchw_align1x1.cu",
"cutlass_tensorop_h884dwfprop_relu_f16_128x64x32_32x32x32_2_nchw_nchw_align1x1.cu",
"cutlass_tensorop_h884dwfprop_id_f16_64x64x32_32x32x32_2_nchw_nchw_align1x1.cu",
"cutlass_tensorop_h884dwfprop_relu_f16_64x64x32_32x32x32_2_nchw_nchw_align1x1.cu",
"all_dwconv2d_fprop_tensorop884_operations.cu",
"cutlass_simt_sdwdgrad_id_f32_32x32x8_32x32x8_2_nchw_nchw_align4x1.cu",
"cutlass_simt_sdwdgrad_id_f32_32x32x8_32x32x8_2_nchw_nchw_align1x1.cu",
"cutlass_simt_sdwdgrad_id_f32_32x64x8_32x64x8_2_nchw_nchw_align4x1.cu",
"cutlass_simt_sdwdgrad_id_f32_32x64x8_32x64x8_2_nchw_nchw_align1x1.cu",
"cutlass_simt_sdwdgrad_id_f32_64x32x8_64x32x8_2_nchw_nchw_align4x1.cu",
"cutlass_simt_sdwdgrad_id_f32_64x32x8_64x32x8_2_nchw_nchw_align1x1.cu",
"cutlass_simt_sdwdgrad_id_f32_32x128x8_32x64x8_2_nchw_nchw_align4x1.cu",
"cutlass_simt_sdwdgrad_id_f32_32x128x8_32x64x8_2_nchw_nchw_align1x1.cu",
"cutlass_simt_sdwdgrad_id_f32_64x64x8_32x64x8_2_nchw_nchw_align4x1.cu",
"cutlass_simt_sdwdgrad_id_f32_64x64x8_32x64x8_2_nchw_nchw_align1x1.cu",
"cutlass_simt_sdwdgrad_id_f32_128x32x8_64x32x8_2_nchw_nchw_align4x1.cu",
"cutlass_simt_sdwdgrad_id_f32_128x32x8_64x32x8_2_nchw_nchw_align1x1.cu",
"cutlass_simt_sdwdgrad_id_f32_64x128x8_32x64x8_2_nchw_nchw_align4x1.cu",
"cutlass_simt_sdwdgrad_id_f32_64x128x8_32x64x8_2_nchw_nchw_align1x1.cu",
"cutlass_simt_sdwdgrad_id_f32_128x64x8_64x32x8_2_nchw_nchw_align4x1.cu",
"cutlass_simt_sdwdgrad_id_f32_128x64x8_64x32x8_2_nchw_nchw_align1x1.cu",
"cutlass_simt_sdwdgrad_id_f32_128x128x8_32x64x8_2_nchw_nchw_align4x1.cu",
"cutlass_simt_sdwdgrad_id_f32_128x128x8_32x64x8_2_nchw_nchw_align1x1.cu",
"all_dwconv2d_dgrad_simt_operations.cu",
"cutlass_tensorop_f16_s884dwdgrad_id_f16_128x256x32_64x64x32_2_nchw_nchw_align8x1.cu",
"cutlass_tensorop_f16_s884dwdgrad_id_f16_128x128x32_32x32x32_2_nchw_nchw_align8x1.cu",
"cutlass_tensorop_f16_s884dwdgrad_id_f16_64x128x32_32x32x32_2_nchw_nchw_align8x1.cu",
"cutlass_tensorop_f16_s884dwdgrad_id_f16_128x64x32_32x32x32_2_nchw_nchw_align8x1.cu",
"cutlass_tensorop_f16_s884dwdgrad_id_f16_64x64x32_32x32x32_2_nchw_nchw_align8x1.cu",
"cutlass_tensorop_f16_s884dwdgrad_id_f16_128x256x32_64x64x32_2_nchw_nchw_align2x1.cu",
"cutlass_tensorop_f16_s884dwdgrad_id_f16_128x128x32_32x32x32_2_nchw_nchw_align2x1.cu",
"cutlass_tensorop_f16_s884dwdgrad_id_f16_64x128x32_32x32x32_2_nchw_nchw_align2x1.cu",
"cutlass_tensorop_f16_s884dwdgrad_id_f16_128x64x32_32x32x32_2_nchw_nchw_align2x1.cu",
"cutlass_tensorop_f16_s884dwdgrad_id_f16_64x64x32_32x32x32_2_nchw_nchw_align2x1.cu",
"cutlass_tensorop_f16_s884dwdgrad_id_f16_128x256x32_64x64x32_2_nchw_nchw_align1x1.cu",
"cutlass_tensorop_f16_s884dwdgrad_id_f16_128x128x32_32x32x32_2_nchw_nchw_align1x1.cu",
"cutlass_tensorop_f16_s884dwdgrad_id_f16_64x128x32_32x32x32_2_nchw_nchw_align1x1.cu",
"cutlass_tensorop_f16_s884dwdgrad_id_f16_128x64x32_32x32x32_2_nchw_nchw_align1x1.cu",
"cutlass_tensorop_f16_s884dwdgrad_id_f16_64x64x32_32x32x32_2_nchw_nchw_align1x1.cu",
"cutlass_tensorop_h884dwdgrad_id_f16_128x256x32_64x64x32_2_nchw_nchw_align8x1.cu",
"cutlass_tensorop_h884dwdgrad_id_f16_128x128x32_32x32x32_2_nchw_nchw_align8x1.cu",
"cutlass_tensorop_h884dwdgrad_id_f16_64x128x32_32x32x32_2_nchw_nchw_align8x1.cu",
"cutlass_tensorop_h884dwdgrad_id_f16_128x64x32_32x32x32_2_nchw_nchw_align8x1.cu",
"cutlass_tensorop_h884dwdgrad_id_f16_64x64x32_32x32x32_2_nchw_nchw_align8x1.cu",
"cutlass_tensorop_h884dwdgrad_id_f16_128x256x32_64x64x32_2_nchw_nchw_align2x1.cu",
"cutlass_tensorop_h884dwdgrad_id_f16_128x128x32_32x32x32_2_nchw_nchw_align2x1.cu",
"cutlass_tensorop_h884dwdgrad_id_f16_64x128x32_32x32x32_2_nchw_nchw_align2x1.cu",
"cutlass_tensorop_h884dwdgrad_id_f16_128x64x32_32x32x32_2_nchw_nchw_align2x1.cu",
"cutlass_tensorop_h884dwdgrad_id_f16_64x64x32_32x32x32_2_nchw_nchw_align2x1.cu",
"cutlass_tensorop_h884dwdgrad_id_f16_128x256x32_64x64x32_2_nchw_nchw_align1x1.cu",
"cutlass_tensorop_h884dwdgrad_id_f16_128x128x32_32x32x32_2_nchw_nchw_align1x1.cu",
"cutlass_tensorop_h884dwdgrad_id_f16_64x128x32_32x32x32_2_nchw_nchw_align1x1.cu",
"cutlass_tensorop_h884dwdgrad_id_f16_128x64x32_32x32x32_2_nchw_nchw_align1x1.cu",
"cutlass_tensorop_h884dwdgrad_id_f16_64x64x32_32x32x32_2_nchw_nchw_align1x1.cu",
"all_dwconv2d_dgrad_tensorop884_operations.cu",
"cutlass_simt_sdwwgrad_id_f32_32x32x8_32x32x8_2_nchw_nchw_align1x1.cu",
"cutlass_simt_sdwwgrad_id_f32_32x64x8_32x64x8_2_nchw_nchw_align1x1.cu",
"cutlass_simt_sdwwgrad_id_f32_64x32x8_64x32x8_2_nchw_nchw_align1x1.cu",
"cutlass_simt_sdwwgrad_id_f32_32x128x8_32x64x8_2_nchw_nchw_align1x1.cu",
"cutlass_simt_sdwwgrad_id_f32_64x64x8_32x64x8_2_nchw_nchw_align1x1.cu",
"cutlass_simt_sdwwgrad_id_f32_128x32x8_64x32x8_2_nchw_nchw_align1x1.cu",
"cutlass_simt_sdwwgrad_id_f32_64x128x8_32x64x8_2_nchw_nchw_align1x1.cu",
"cutlass_simt_sdwwgrad_id_f32_128x64x8_64x32x8_2_nchw_nchw_align1x1.cu",
"cutlass_simt_sdwwgrad_id_f32_128x128x8_32x64x8_2_nchw_nchw_align1x1.cu",
"all_dwconv2d_wgrad_simt_operations.cu",
"cutlass_tensorop_s884dwwgrad_id_f16_128x256x32_64x64x32_2_nchw_nchw_align8x8.cu",
"cutlass_tensorop_s884dwwgrad_id_f16_128x128x32_32x32x32_2_nchw_nchw_align8x8.cu",
"cutlass_tensorop_s884dwwgrad_id_f16_64x128x32_32x32x32_2_nchw_nchw_align8x8.cu",
"cutlass_tensorop_s884dwwgrad_id_f16_128x64x32_32x32x32_2_nchw_nchw_align8x8.cu",
"cutlass_tensorop_s884dwwgrad_id_f16_64x64x32_32x32x32_2_nchw_nchw_align8x8.cu",
"cutlass_tensorop_s884dwwgrad_id_f16_128x256x32_64x64x32_2_nchw_nchw_align8x2.cu",
"cutlass_tensorop_s884dwwgrad_id_f16_128x128x32_32x32x32_2_nchw_nchw_align8x2.cu",
"cutlass_tensorop_s884dwwgrad_id_f16_64x128x32_32x32x32_2_nchw_nchw_align8x2.cu",
"cutlass_tensorop_s884dwwgrad_id_f16_128x64x32_32x32x32_2_nchw_nchw_align8x2.cu",
"cutlass_tensorop_s884dwwgrad_id_f16_64x64x32_32x32x32_2_nchw_nchw_align8x2.cu",
"cutlass_tensorop_s884dwwgrad_id_f16_128x256x32_64x64x32_2_nchw_nchw_align8x1.cu",
"cutlass_tensorop_s884dwwgrad_id_f16_128x128x32_32x32x32_2_nchw_nchw_align8x1.cu",
"cutlass_tensorop_s884dwwgrad_id_f16_64x128x32_32x32x32_2_nchw_nchw_align8x1.cu",
"cutlass_tensorop_s884dwwgrad_id_f16_128x64x32_32x32x32_2_nchw_nchw_align8x1.cu",
"cutlass_tensorop_s884dwwgrad_id_f16_64x64x32_32x32x32_2_nchw_nchw_align8x1.cu",
"cutlass_tensorop_s884dwwgrad_id_f16_128x256x32_64x64x32_2_nchw_nchw_align2x8.cu",
"cutlass_tensorop_s884dwwgrad_id_f16_128x128x32_32x32x32_2_nchw_nchw_align2x8.cu",
"cutlass_tensorop_s884dwwgrad_id_f16_64x128x32_32x32x32_2_nchw_nchw_align2x8.cu",
"cutlass_tensorop_s884dwwgrad_id_f16_128x64x32_32x32x32_2_nchw_nchw_align2x8.cu",
"cutlass_tensorop_s884dwwgrad_id_f16_64x64x32_32x32x32_2_nchw_nchw_align2x8.cu",
"cutlass_tensorop_s884dwwgrad_id_f16_128x256x32_64x64x32_2_nchw_nchw_align2x2.cu",
"cutlass_tensorop_s884dwwgrad_id_f16_128x128x32_32x32x32_2_nchw_nchw_align2x2.cu",
"cutlass_tensorop_s884dwwgrad_id_f16_64x128x32_32x32x32_2_nchw_nchw_align2x2.cu",
"cutlass_tensorop_s884dwwgrad_id_f16_128x64x32_32x32x32_2_nchw_nchw_align2x2.cu",
"cutlass_tensorop_s884dwwgrad_id_f16_64x64x32_32x32x32_2_nchw_nchw_align2x2.cu",
"cutlass_tensorop_s884dwwgrad_id_f16_128x256x32_64x64x32_2_nchw_nchw_align2x1.cu",
"cutlass_tensorop_s884dwwgrad_id_f16_128x128x32_32x32x32_2_nchw_nchw_align2x1.cu",
"cutlass_tensorop_s884dwwgrad_id_f16_64x128x32_32x32x32_2_nchw_nchw_align2x1.cu",
"cutlass_tensorop_s884dwwgrad_id_f16_128x64x32_32x32x32_2_nchw_nchw_align2x1.cu",
"cutlass_tensorop_s884dwwgrad_id_f16_64x64x32_32x32x32_2_nchw_nchw_align2x1.cu",
"cutlass_tensorop_s884dwwgrad_id_f16_128x256x32_64x64x32_2_nchw_nchw_align1x8.cu",
"cutlass_tensorop_s884dwwgrad_id_f16_128x128x32_32x32x32_2_nchw_nchw_align1x8.cu",
"cutlass_tensorop_s884dwwgrad_id_f16_64x128x32_32x32x32_2_nchw_nchw_align1x8.cu",
"cutlass_tensorop_s884dwwgrad_id_f16_128x64x32_32x32x32_2_nchw_nchw_align1x8.cu",
"cutlass_tensorop_s884dwwgrad_id_f16_64x64x32_32x32x32_2_nchw_nchw_align1x8.cu",
"cutlass_tensorop_s884dwwgrad_id_f16_128x256x32_64x64x32_2_nchw_nchw_align1x2.cu",
"cutlass_tensorop_s884dwwgrad_id_f16_128x128x32_32x32x32_2_nchw_nchw_align1x2.cu",
"cutlass_tensorop_s884dwwgrad_id_f16_64x128x32_32x32x32_2_nchw_nchw_align1x2.cu",
"cutlass_tensorop_s884dwwgrad_id_f16_128x64x32_32x32x32_2_nchw_nchw_align1x2.cu",
"cutlass_tensorop_s884dwwgrad_id_f16_64x64x32_32x32x32_2_nchw_nchw_align1x2.cu",
"cutlass_tensorop_s884dwwgrad_id_f16_128x256x32_64x64x32_2_nchw_nchw_align1x1.cu",
"cutlass_tensorop_s884dwwgrad_id_f16_128x128x32_32x32x32_2_nchw_nchw_align1x1.cu",
"cutlass_tensorop_s884dwwgrad_id_f16_64x128x32_32x32x32_2_nchw_nchw_align1x1.cu",
"cutlass_tensorop_s884dwwgrad_id_f16_128x64x32_32x32x32_2_nchw_nchw_align1x1.cu",
"cutlass_tensorop_s884dwwgrad_id_f16_64x64x32_32x32x32_2_nchw_nchw_align1x1.cu",
"all_dwconv2d_wgrad_tensorop884_operations.cu",
]
| 85.396253
| 112
| 0.894771
| 18,050
| 123,056
| 5.259778
| 0.009363
| 0.134802
| 0.19679
| 0.057637
| 0.997651
| 0.996356
| 0.995397
| 0.994228
| 0.955192
| 0.944722
| 0
| 0.267618
| 0.058453
| 123,056
| 1,441
| 113
| 85.396253
| 0.55179
| 0.000439
| 0
| 0
| 1
| 0
| 0.906334
| 0.906334
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 11
|
68440a5018c4b7319d90021ec4c087a4ac490da8
| 203
|
py
|
Python
|
example_project/__init__.py
|
ihumphrey/example_project
|
8d0d765560c8ce0fb65abb61fc8be9d532d7fd79
|
[
"MIT"
] | null | null | null |
example_project/__init__.py
|
ihumphrey/example_project
|
8d0d765560c8ce0fb65abb61fc8be9d532d7fd79
|
[
"MIT"
] | null | null | null |
example_project/__init__.py
|
ihumphrey/example_project
|
8d0d765560c8ce0fb65abb61fc8be9d532d7fd79
|
[
"MIT"
] | null | null | null |
"""Top-level package for Example Project."""
from . import _version
import sys
__version__ = _version.get_versions()['version']
from . import _version
__version__ = _version.get_versions()['version']
| 20.3
| 48
| 0.753695
| 24
| 203
| 5.791667
| 0.5
| 0.302158
| 0.244604
| 0.359712
| 0.460432
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.118227
| 203
| 9
| 49
| 22.555556
| 0.776536
| 0.187192
| 0
| 0.8
| 0
| 0
| 0.08805
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.6
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
68ab6e9b4c232f8c1742e5a9cd264d37347a634d
| 5,883
|
py
|
Python
|
src/genie/libs/parser/iosxr/tests/ShowMldGroupsDetail/cli/equal/golden_output_1_expected.py
|
balmasea/genieparser
|
d1e71a96dfb081e0a8591707b9d4872decd5d9d3
|
[
"Apache-2.0"
] | 204
|
2018-06-27T00:55:27.000Z
|
2022-03-06T21:12:18.000Z
|
src/genie/libs/parser/iosxr/tests/ShowMldGroupsDetail/cli/equal/golden_output_1_expected.py
|
balmasea/genieparser
|
d1e71a96dfb081e0a8591707b9d4872decd5d9d3
|
[
"Apache-2.0"
] | 468
|
2018-06-19T00:33:18.000Z
|
2022-03-31T23:23:35.000Z
|
src/genie/libs/parser/iosxr/tests/ShowMldGroupsDetail/cli/equal/golden_output_1_expected.py
|
balmasea/genieparser
|
d1e71a96dfb081e0a8591707b9d4872decd5d9d3
|
[
"Apache-2.0"
] | 309
|
2019-01-16T20:21:07.000Z
|
2022-03-30T12:56:41.000Z
|
expected_output = {
'vrf': {
'default': {
'interface': {
'GigabitEthernet0/0/0/0': {
'group': {
'ff02::16': {
'expire': 'never',
'router_mode': 'exclude',
'host_mode': 'exclude',
'last_reporter': 'fe80::5054:ff:fefa:9ad7',
'up_time': '1d06h'
},
'ff02::1:ff28:cd4b': {
'expire': '01:00:01',
'router_mode': 'exclude',
'host_mode': 'include',
'last_reporter': 'fe80::eca7:a4ff:fe28:cd4b',
'up_time': '1d06h'
},
'ff02::1:ff60:50aa': {
'expire': '01:00:01',
'router_mode': 'exclude',
'host_mode': 'include',
'last_reporter': 'fe80::eca7:a4ff:fe28:cd4b',
'up_time': '1d06h'
},
'ff02::1:ffae:4aba': {
'expire': '01:00:01',
'router_mode': 'exclude',
'host_mode': 'include',
'last_reporter': 'fe80::eca7:a4ff:fe28:cd4b',
'up_time': '1d06h'
},
'ff02::1:ffd7:c01f': {
'expire': '00:29:15',
'router_mode': 'exclude',
'host_mode': 'include',
'last_reporter': 'fe80::5054:ff:fed7:c01f',
'up_time': '00:33:19'
},
'ff02::1:ffda:f428': {
'expire': '01:00:01',
'router_mode': 'exclude',
'host_mode': 'include',
'last_reporter': 'fe80::eca7:a4ff:fe28:cd4b',
'up_time': '06:27:46'
},
'ff02::2': {
'expire': 'never',
'router_mode': 'exclude',
'host_mode': 'exclude',
'last_reporter': 'fe80::5054:ff:fefa:9ad7',
'up_time': '1d06h'
},
'ff02::d': {
'expire': 'never',
'router_mode': 'exclude',
'host_mode': 'exclude',
'last_reporter': 'fe80::5054:ff:fefa:9ad7',
'up_time': '1d06h'
},
'ff15:1::1': {
'router_mode': 'include',
'host_mode': 'include',
'last_reporter': 'fe80::5054:ff:fefa:9ad7',
'source': {
'2001:db8:2:2::2': {
'expire': '01:00:00',
'flags': 'Remote Local 2d',
'forward': True,
'up_time': '08:06:00'
}
},
'up_time': '08:06:00'
},
'ff25:2::1': {
'expire': 'never',
'router_mode': 'exclude',
'host_mode': 'exclude',
'last_reporter': 'fe80::5054:ff:fefa:9ad7',
'up_time': '08:06:00'
},
'ff35:1::1': {
'router_mode': 'include',
'host_mode': 'include',
'last_reporter': 'fe80::5054:ff:fefa:9ad7',
'source': {
'2001:db8:3:3::3': {
'expire': '01:00:00',
'flags': 'Remote Local e',
'forward': True,
'up_time': '00:33:28'
}
},
'up_time': '00:33:28'
},
'ff45:1::1': {
'expire': 'never',
'router_mode': 'exclude',
'host_mode': 'exclude',
'last_reporter': 'fe80::5054:ff:fefa:9ad7',
'up_time': '00:33:28'
},
'fffe::1': {
'expire': '00:59:49',
'router_mode': 'exclude',
'host_mode': 'include',
'last_reporter': 'fe80::5054:ff:fed7:c01f',
'up_time': '07:59:31'
}
},
'join_group': {
'ff15:1::1 2001:db8:2:2::2': {
'group': 'ff15:1::1',
'source': '2001:db8:2:2::2'
}
},
'static_group': {
'ff35:1::1 2001:db8:3:3::3': {
'group': 'ff35:1::1',
'source': '2001:db8:3:3::3'
}
}
}
}
}
}
}
| 44.908397
| 73
| 0.264831
| 388
| 5,883
| 3.868557
| 0.198454
| 0.117255
| 0.138574
| 0.153897
| 0.830779
| 0.774151
| 0.748834
| 0.711526
| 0.711526
| 0.711526
| 0
| 0.149138
| 0.605643
| 5,883
| 130
| 74
| 45.253846
| 0.497845
| 0
| 0
| 0.511628
| 0
| 0
| 0.293268
| 0.055933
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
d7b15e0ff41af35d85ee96a91978868dba7ecba6
| 301
|
py
|
Python
|
tests/data/fmtskip5.py
|
BigNuoLi/black
|
71e71e5f52e5f6bdeae63cc8c11b1bee44d11c30
|
[
"MIT"
] | 16,110
|
2019-07-22T21:54:54.000Z
|
2022-03-31T22:52:39.000Z
|
tests/data/fmtskip5.py
|
marnixah/black-but-usable
|
83b83d3066d1d857983bfa1a666a409e7255d79d
|
[
"MIT"
] | 1,981
|
2019-07-22T21:26:16.000Z
|
2022-03-31T23:14:35.000Z
|
tests/data/fmtskip5.py
|
marnixah/black-but-usable
|
83b83d3066d1d857983bfa1a666a409e7255d79d
|
[
"MIT"
] | 1,762
|
2019-07-22T21:23:00.000Z
|
2022-03-31T06:10:22.000Z
|
a, b, c = 3, 4, 5
if (
a == 3
and b != 9 # fmt: skip
and c is not None
):
print("I'm good!")
else:
print("I'm bad")
# output
a, b, c = 3, 4, 5
if (
a == 3
and b != 9 # fmt: skip
and c is not None
):
print("I'm good!")
else:
print("I'm bad")
| 13.086957
| 30
| 0.418605
| 57
| 301
| 2.210526
| 0.368421
| 0.190476
| 0.222222
| 0.063492
| 0.952381
| 0.952381
| 0.952381
| 0.952381
| 0.952381
| 0.952381
| 0
| 0.055866
| 0.405316
| 301
| 22
| 31
| 13.681818
| 0.648045
| 0.086379
| 0
| 1
| 0
| 0
| 0.118081
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0.222222
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
d7ea7b0c541a7eae6337932663ca96ca35b0b3da
| 176
|
py
|
Python
|
ocrr.py
|
Arjitg450/Python-Programs
|
0630422c9002632a91b5ccf75f6cd02308c6e929
|
[
"MIT"
] | null | null | null |
ocrr.py
|
Arjitg450/Python-Programs
|
0630422c9002632a91b5ccf75f6cd02308c6e929
|
[
"MIT"
] | null | null | null |
ocrr.py
|
Arjitg450/Python-Programs
|
0630422c9002632a91b5ccf75f6cd02308c6e929
|
[
"MIT"
] | null | null | null |
from PIL import Image
import pytesseract
print(pytesseract.image_to_string(Image.open('E:\\aa.png')))
print(pytesseract.image_to_string(Image.open('E:\\aa.png'), lang='eng'))
| 29.333333
| 72
| 0.761364
| 28
| 176
| 4.642857
| 0.5
| 0.246154
| 0.323077
| 0.353846
| 0.676923
| 0.676923
| 0.676923
| 0.676923
| 0.676923
| 0.676923
| 0
| 0
| 0.056818
| 176
| 5
| 73
| 35.2
| 0.783133
| 0
| 0
| 0
| 0
| 0
| 0.130682
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.5
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 9
|
cc2032061afb599f9c1abfbcba8193fb1728c5f8
| 134
|
py
|
Python
|
pyautofinance/common/plotting/__init__.py
|
webclinic017/PyAutoFinance
|
532cb1c5418dd9eeb07f2f08646170cde1fe0303
|
[
"MIT"
] | null | null | null |
pyautofinance/common/plotting/__init__.py
|
webclinic017/PyAutoFinance
|
532cb1c5418dd9eeb07f2f08646170cde1fe0303
|
[
"MIT"
] | null | null | null |
pyautofinance/common/plotting/__init__.py
|
webclinic017/PyAutoFinance
|
532cb1c5418dd9eeb07f2f08646170cde1fe0303
|
[
"MIT"
] | 1
|
2022-02-24T09:18:13.000Z
|
2022-02-24T09:18:13.000Z
|
from pyautofinance.common.plotting.live_plotter import LivePlotter
from pyautofinance.common.plotting.back_plotter import BackPlotter
| 44.666667
| 66
| 0.895522
| 16
| 134
| 7.375
| 0.625
| 0.288136
| 0.389831
| 0.525424
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.059701
| 134
| 2
| 67
| 67
| 0.936508
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
cc2fbf1668de8ddea5a97de5a3526a2ed4340909
| 470
|
py
|
Python
|
gameOfLife/test.py
|
Bertik23/spg
|
f6449f1ca8f3a869f0f493f3988b3d84901c1be0
|
[
"MIT"
] | null | null | null |
gameOfLife/test.py
|
Bertik23/spg
|
f6449f1ca8f3a869f0f493f3988b3d84901c1be0
|
[
"MIT"
] | null | null | null |
gameOfLife/test.py
|
Bertik23/spg
|
f6449f1ca8f3a869f0f493f3988b3d84901c1be0
|
[
"MIT"
] | null | null | null |
for a in range(2):
for b in range(2):
for c in range(2):
for d in range(2):
for e in range(2):
for f in range(2):
for g in range(2):
for h in range(2):
print(f"{a}{b}{c}{d}{e}{f}{g}{h}")
for a in range(2):
for b in range(2):
for c in range(2):
for d in range(2):
print(f"{a}{b}{c}{d}")
| 31.333333
| 66
| 0.357447
| 76
| 470
| 2.210526
| 0.171053
| 0.5
| 0.571429
| 0.654762
| 0.738095
| 0.738095
| 0.738095
| 0.738095
| 0.738095
| 0.571429
| 0
| 0.050847
| 0.497872
| 470
| 15
| 67
| 31.333333
| 0.661017
| 0
| 0
| 0.571429
| 0
| 0
| 0.076433
| 0.050955
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.142857
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0400059a157f4aa825beaef36545cbf62762aecf
| 18,237
|
py
|
Python
|
sdk/python/pulumi_aws/ec2/fleet.py
|
johnktims/pulumi-aws
|
c838bc79043f5376c66fc66275a1e012edd3ab7d
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/ec2/fleet.py
|
johnktims/pulumi-aws
|
c838bc79043f5376c66fc66275a1e012edd3ab7d
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/ec2/fleet.py
|
johnktims/pulumi-aws
|
c838bc79043f5376c66fc66275a1e012edd3ab7d
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class Fleet(pulumi.CustomResource):
excess_capacity_termination_policy: pulumi.Output[str]
"""
Whether running instances should be terminated if the total target capacity of the EC2 Fleet is decreased below the current size of the EC2. Valid values: `no-termination`, `termination`. Defaults to `termination`.
"""
launch_template_config: pulumi.Output[dict]
"""
Nested argument containing EC2 Launch Template configurations. Defined below.
* `launchTemplateSpecification` (`dict`) - Nested argument containing EC2 Launch Template to use. Defined below.
* `launchTemplateId` (`str`) - ID of the launch template.
* `launchTemplateName` (`str`) - Name of the launch template.
* `version` (`str`) - Version number of the launch template.
* `overrides` (`list`) - Nested argument(s) containing parameters to override the same parameters in the Launch Template. Defined below.
* `availability_zone` (`str`) - Availability Zone in which to launch the instances.
* `instance_type` (`str`) - Instance type.
* `maxPrice` (`str`) - Maximum price per unit hour that you are willing to pay for a Spot Instance.
* `priority` (`float`) - Priority for the launch template override. If `on_demand_options` `allocation_strategy` is set to `prioritized`, EC2 Fleet uses priority to determine which launch template override to use first in fulfilling On-Demand capacity. The highest priority is launched first. The lower the number, the higher the priority. If no number is set, the launch template override has the lowest priority. Valid values are whole numbers starting at 0.
* `subnet_id` (`str`) - ID of the subnet in which to launch the instances.
* `weightedCapacity` (`float`) - Number of units provided by the specified instance type.
"""
on_demand_options: pulumi.Output[dict]
"""
Nested argument containing On-Demand configurations. Defined below.
* `allocation_strategy` (`str`) - How to allocate the target capacity across the Spot pools. Valid values: `diversified`, `lowestPrice`. Default: `lowestPrice`.
"""
replace_unhealthy_instances: pulumi.Output[bool]
"""
Whether EC2 Fleet should replace unhealthy instances. Defaults to `false`.
"""
spot_options: pulumi.Output[dict]
"""
Nested argument containing Spot configurations. Defined below.
* `allocation_strategy` (`str`) - How to allocate the target capacity across the Spot pools. Valid values: `diversified`, `lowestPrice`. Default: `lowestPrice`.
* `instanceInterruptionBehavior` (`str`) - Behavior when a Spot Instance is interrupted. Valid values: `hibernate`, `stop`, `terminate`. Default: `terminate`.
* `instance_pools_to_use_count` (`float`) - Number of Spot pools across which to allocate your target Spot capacity. Valid only when Spot `allocation_strategy` is set to `lowestPrice`. Default: `1`.
"""
tags: pulumi.Output[dict]
"""
Map of Fleet tags. To tag instances at launch, specify the tags in the Launch Template.
"""
target_capacity_specification: pulumi.Output[dict]
"""
Nested argument containing target capacity configurations. Defined below.
* `defaultTargetCapacityType` (`str`) - Default target capacity type. Valid values: `on-demand`, `spot`.
* `onDemandTargetCapacity` (`float`) - The number of On-Demand units to request.
* `spotTargetCapacity` (`float`) - The number of Spot units to request.
* `totalTargetCapacity` (`float`) - The number of units to request, filled using `default_target_capacity_type`.
"""
terminate_instances: pulumi.Output[bool]
"""
Whether to terminate instances for an EC2 Fleet if it is deleted successfully. Defaults to `false`.
"""
terminate_instances_with_expiration: pulumi.Output[bool]
"""
Whether running instances should be terminated when the EC2 Fleet expires. Defaults to `false`.
"""
type: pulumi.Output[str]
"""
The type of request. Indicates whether the EC2 Fleet only requests the target capacity, or also attempts to maintain it. Valid values: `maintain`, `request`. Defaults to `maintain`.
"""
def __init__(__self__, resource_name, opts=None, excess_capacity_termination_policy=None, launch_template_config=None, on_demand_options=None, replace_unhealthy_instances=None, spot_options=None, tags=None, target_capacity_specification=None, terminate_instances=None, terminate_instances_with_expiration=None, type=None, __props__=None, __name__=None, __opts__=None):
"""
Provides a resource to manage EC2 Fleets.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] excess_capacity_termination_policy: Whether running instances should be terminated if the total target capacity of the EC2 Fleet is decreased below the current size of the EC2. Valid values: `no-termination`, `termination`. Defaults to `termination`.
:param pulumi.Input[dict] launch_template_config: Nested argument containing EC2 Launch Template configurations. Defined below.
:param pulumi.Input[dict] on_demand_options: Nested argument containing On-Demand configurations. Defined below.
:param pulumi.Input[bool] replace_unhealthy_instances: Whether EC2 Fleet should replace unhealthy instances. Defaults to `false`.
:param pulumi.Input[dict] spot_options: Nested argument containing Spot configurations. Defined below.
:param pulumi.Input[dict] tags: Map of Fleet tags. To tag instances at launch, specify the tags in the Launch Template.
:param pulumi.Input[dict] target_capacity_specification: Nested argument containing target capacity configurations. Defined below.
:param pulumi.Input[bool] terminate_instances: Whether to terminate instances for an EC2 Fleet if it is deleted successfully. Defaults to `false`.
:param pulumi.Input[bool] terminate_instances_with_expiration: Whether running instances should be terminated when the EC2 Fleet expires. Defaults to `false`.
:param pulumi.Input[str] type: The type of request. Indicates whether the EC2 Fleet only requests the target capacity, or also attempts to maintain it. Valid values: `maintain`, `request`. Defaults to `maintain`.
The **launch_template_config** object supports the following:
* `launchTemplateSpecification` (`pulumi.Input[dict]`) - Nested argument containing EC2 Launch Template to use. Defined below.
* `launchTemplateId` (`pulumi.Input[str]`) - ID of the launch template.
* `launchTemplateName` (`pulumi.Input[str]`) - Name of the launch template.
* `version` (`pulumi.Input[str]`) - Version number of the launch template.
* `overrides` (`pulumi.Input[list]`) - Nested argument(s) containing parameters to override the same parameters in the Launch Template. Defined below.
* `availability_zone` (`pulumi.Input[str]`) - Availability Zone in which to launch the instances.
* `instance_type` (`pulumi.Input[str]`) - Instance type.
* `maxPrice` (`pulumi.Input[str]`) - Maximum price per unit hour that you are willing to pay for a Spot Instance.
* `priority` (`pulumi.Input[float]`) - Priority for the launch template override. If `on_demand_options` `allocation_strategy` is set to `prioritized`, EC2 Fleet uses priority to determine which launch template override to use first in fulfilling On-Demand capacity. The highest priority is launched first. The lower the number, the higher the priority. If no number is set, the launch template override has the lowest priority. Valid values are whole numbers starting at 0.
* `subnet_id` (`pulumi.Input[str]`) - ID of the subnet in which to launch the instances.
* `weightedCapacity` (`pulumi.Input[float]`) - Number of units provided by the specified instance type.
The **on_demand_options** object supports the following:
* `allocation_strategy` (`pulumi.Input[str]`) - How to allocate the target capacity across the Spot pools. Valid values: `diversified`, `lowestPrice`. Default: `lowestPrice`.
The **spot_options** object supports the following:
* `allocation_strategy` (`pulumi.Input[str]`) - How to allocate the target capacity across the Spot pools. Valid values: `diversified`, `lowestPrice`. Default: `lowestPrice`.
* `instanceInterruptionBehavior` (`pulumi.Input[str]`) - Behavior when a Spot Instance is interrupted. Valid values: `hibernate`, `stop`, `terminate`. Default: `terminate`.
* `instance_pools_to_use_count` (`pulumi.Input[float]`) - Number of Spot pools across which to allocate your target Spot capacity. Valid only when Spot `allocation_strategy` is set to `lowestPrice`. Default: `1`.
The **target_capacity_specification** object supports the following:
* `defaultTargetCapacityType` (`pulumi.Input[str]`) - Default target capacity type. Valid values: `on-demand`, `spot`.
* `onDemandTargetCapacity` (`pulumi.Input[float]`) - The number of On-Demand units to request.
* `spotTargetCapacity` (`pulumi.Input[float]`) - The number of Spot units to request.
* `totalTargetCapacity` (`pulumi.Input[float]`) - The number of units to request, filled using `default_target_capacity_type`.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['excess_capacity_termination_policy'] = excess_capacity_termination_policy
if launch_template_config is None:
raise TypeError("Missing required property 'launch_template_config'")
__props__['launch_template_config'] = launch_template_config
__props__['on_demand_options'] = on_demand_options
__props__['replace_unhealthy_instances'] = replace_unhealthy_instances
__props__['spot_options'] = spot_options
__props__['tags'] = tags
if target_capacity_specification is None:
raise TypeError("Missing required property 'target_capacity_specification'")
__props__['target_capacity_specification'] = target_capacity_specification
__props__['terminate_instances'] = terminate_instances
__props__['terminate_instances_with_expiration'] = terminate_instances_with_expiration
__props__['type'] = type
super(Fleet, __self__).__init__(
'aws:ec2/fleet:Fleet',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, excess_capacity_termination_policy=None, launch_template_config=None, on_demand_options=None, replace_unhealthy_instances=None, spot_options=None, tags=None, target_capacity_specification=None, terminate_instances=None, terminate_instances_with_expiration=None, type=None):
"""
Get an existing Fleet resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] excess_capacity_termination_policy: Whether running instances should be terminated if the total target capacity of the EC2 Fleet is decreased below the current size of the EC2. Valid values: `no-termination`, `termination`. Defaults to `termination`.
:param pulumi.Input[dict] launch_template_config: Nested argument containing EC2 Launch Template configurations. Defined below.
:param pulumi.Input[dict] on_demand_options: Nested argument containing On-Demand configurations. Defined below.
:param pulumi.Input[bool] replace_unhealthy_instances: Whether EC2 Fleet should replace unhealthy instances. Defaults to `false`.
:param pulumi.Input[dict] spot_options: Nested argument containing Spot configurations. Defined below.
:param pulumi.Input[dict] tags: Map of Fleet tags. To tag instances at launch, specify the tags in the Launch Template.
:param pulumi.Input[dict] target_capacity_specification: Nested argument containing target capacity configurations. Defined below.
:param pulumi.Input[bool] terminate_instances: Whether to terminate instances for an EC2 Fleet if it is deleted successfully. Defaults to `false`.
:param pulumi.Input[bool] terminate_instances_with_expiration: Whether running instances should be terminated when the EC2 Fleet expires. Defaults to `false`.
:param pulumi.Input[str] type: The type of request. Indicates whether the EC2 Fleet only requests the target capacity, or also attempts to maintain it. Valid values: `maintain`, `request`. Defaults to `maintain`.
The **launch_template_config** object supports the following:
* `launchTemplateSpecification` (`pulumi.Input[dict]`) - Nested argument containing EC2 Launch Template to use. Defined below.
* `launchTemplateId` (`pulumi.Input[str]`) - ID of the launch template.
* `launchTemplateName` (`pulumi.Input[str]`) - Name of the launch template.
* `version` (`pulumi.Input[str]`) - Version number of the launch template.
* `overrides` (`pulumi.Input[list]`) - Nested argument(s) containing parameters to override the same parameters in the Launch Template. Defined below.
* `availability_zone` (`pulumi.Input[str]`) - Availability Zone in which to launch the instances.
* `instance_type` (`pulumi.Input[str]`) - Instance type.
* `maxPrice` (`pulumi.Input[str]`) - Maximum price per unit hour that you are willing to pay for a Spot Instance.
* `priority` (`pulumi.Input[float]`) - Priority for the launch template override. If `on_demand_options` `allocation_strategy` is set to `prioritized`, EC2 Fleet uses priority to determine which launch template override to use first in fulfilling On-Demand capacity. The highest priority is launched first. The lower the number, the higher the priority. If no number is set, the launch template override has the lowest priority. Valid values are whole numbers starting at 0.
* `subnet_id` (`pulumi.Input[str]`) - ID of the subnet in which to launch the instances.
* `weightedCapacity` (`pulumi.Input[float]`) - Number of units provided by the specified instance type.
The **on_demand_options** object supports the following:
* `allocation_strategy` (`pulumi.Input[str]`) - How to allocate the target capacity across the Spot pools. Valid values: `diversified`, `lowestPrice`. Default: `lowestPrice`.
The **spot_options** object supports the following:
* `allocation_strategy` (`pulumi.Input[str]`) - How to allocate the target capacity across the Spot pools. Valid values: `diversified`, `lowestPrice`. Default: `lowestPrice`.
* `instanceInterruptionBehavior` (`pulumi.Input[str]`) - Behavior when a Spot Instance is interrupted. Valid values: `hibernate`, `stop`, `terminate`. Default: `terminate`.
* `instance_pools_to_use_count` (`pulumi.Input[float]`) - Number of Spot pools across which to allocate your target Spot capacity. Valid only when Spot `allocation_strategy` is set to `lowestPrice`. Default: `1`.
The **target_capacity_specification** object supports the following:
* `defaultTargetCapacityType` (`pulumi.Input[str]`) - Default target capacity type. Valid values: `on-demand`, `spot`.
* `onDemandTargetCapacity` (`pulumi.Input[float]`) - The number of On-Demand units to request.
* `spotTargetCapacity` (`pulumi.Input[float]`) - The number of Spot units to request.
* `totalTargetCapacity` (`pulumi.Input[float]`) - The number of units to request, filled using `default_target_capacity_type`.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["excess_capacity_termination_policy"] = excess_capacity_termination_policy
__props__["launch_template_config"] = launch_template_config
__props__["on_demand_options"] = on_demand_options
__props__["replace_unhealthy_instances"] = replace_unhealthy_instances
__props__["spot_options"] = spot_options
__props__["tags"] = tags
__props__["target_capacity_specification"] = target_capacity_specification
__props__["terminate_instances"] = terminate_instances
__props__["terminate_instances_with_expiration"] = terminate_instances_with_expiration
__props__["type"] = type
return Fleet(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 76.62605
| 486
| 0.719965
| 2,229
| 18,237
| 5.712876
| 0.106774
| 0.050102
| 0.028585
| 0.015706
| 0.874038
| 0.862101
| 0.856447
| 0.837679
| 0.821344
| 0.811921
| 0
| 0.002447
| 0.193398
| 18,237
| 237
| 487
| 76.949367
| 0.863222
| 0.521358
| 0
| 0.027778
| 1
| 0
| 0.176722
| 0.078568
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055556
| false
| 0.013889
| 0.083333
| 0.027778
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
042ae980ee8fddcecd775808d54d940227f36412
| 312,036
|
py
|
Python
|
msgraph-cli-extensions/beta/teams_beta/azext_teams_beta/generated/_params.py
|
thewahome/msgraph-cli
|
33127d9efa23a0e5f5303c93242fbdbb73348671
|
[
"MIT"
] | null | null | null |
msgraph-cli-extensions/beta/teams_beta/azext_teams_beta/generated/_params.py
|
thewahome/msgraph-cli
|
33127d9efa23a0e5f5303c93242fbdbb73348671
|
[
"MIT"
] | null | null | null |
msgraph-cli-extensions/beta/teams_beta/azext_teams_beta/generated/_params.py
|
thewahome/msgraph-cli
|
33127d9efa23a0e5f5303c93242fbdbb73348671
|
[
"MIT"
] | null | null | null |
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
# pylint: disable=too-many-lines
# pylint: disable=too-many-statements
from msgraph.cli.core.commands.parameters import (
get_three_state_flag,
get_enum_type,
get_location_type
)
from msgraph.cli.core.commands.validators import validate_file_or_dict
from azext_teams_beta.action import (
AddApplication,
AddUsersMembers,
AddAttachments,
AddBody,
AddChannelIdentity,
AddHostedContents,
AddPolicyTip,
AddConfiguration,
AddTopic,
AddChatsTemplateParameters,
AddChatsMembersValues,
AddFunSettings,
AddGuestSettings,
AddMemberSettings,
AddMessagingSettings,
AddGroupsMembers,
AddGroupsPhoto,
AddOfferShiftRequests,
AddOpenShiftChangeRequests,
AddSchedulingGroups,
AddSwapShiftsChangeRequests,
AddTimeOffReasons,
AddTimeOffRequests,
AddApprovedLocation,
AddModerationSettings,
AddTeamsMembers,
AddError,
AddTeamsTemplateParameters,
AddSharepointIds,
AddAudio,
AddFileSystemInfo,
AddImage,
AddTeamsChannelsPhoto,
AddPublication,
AddVideo,
AddSubscriptions,
AddVersions,
AddMicrosoftGraphWorkbookApplication,
AddFunctions,
AddPackage,
AddSpecialFolder,
AddView,
AddHashes,
AddAlbum,
AddTeamsChannelsMembersValues,
AddTeamsMembersValues,
AddTeamsPrimarychannelMembersValues,
AddDraftOpenShift,
AddActivities,
AddDraftTimeOff,
AddEncryption
)
def load_arguments(self, _):
with self.argument_context('teams app-catalog create-team-app') as c:
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('display_name', type=str, help='The name of the catalog app provided by the app developer in the '
'Microsoft Teams zip app package.')
c.argument('distribution_method', arg_type=get_enum_type(['store', 'organization', 'sideloaded',
'unknownFutureValue']), help='')
c.argument('external_id', type=str, help='The ID of the catalog provided by the app developer in the Microsoft '
'Teams zip app package.')
c.argument('app_definitions', type=validate_file_or_dict, help='The details for each version of the app. '
'Expected value: json-string/@json-file.')
with self.argument_context('teams app-catalog delete-team-app') as c:
c.argument('teams_app_id', type=str, help='key: id of teamsApp')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('teams app-catalog list-team-app') as c:
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('teams app-catalog show-team-app') as c:
c.argument('teams_app_id', type=str, help='key: id of teamsApp')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('teams app-catalog update-team-app') as c:
c.argument('teams_app_id', type=str, help='key: id of teamsApp')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('display_name', type=str, help='The name of the catalog app provided by the app developer in the '
'Microsoft Teams zip app package.')
c.argument('distribution_method', arg_type=get_enum_type(['store', 'organization', 'sideloaded',
'unknownFutureValue']), help='')
c.argument('external_id', type=str, help='The ID of the catalog provided by the app developer in the Microsoft '
'Teams zip app package.')
c.argument('app_definitions', type=validate_file_or_dict, help='The details for each version of the app. '
'Expected value: json-string/@json-file.')
with self.argument_context('teams app-catalog-team-app create-app-definition') as c:
c.argument('teams_app_id', type=str, help='key: id of teamsApp')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('azure_ad_app_id', type=str, help='')
c.argument('description', type=str, help='')
c.argument('display_name', type=str, help='The name of the app provided by the app developer.')
c.argument('last_modified_date_time', help='')
c.argument('publishing_state', arg_type=get_enum_type(['submitted', 'rejected', 'published',
'unknownFutureValue']), help='')
c.argument('shortdescription', type=str, help='')
c.argument('microsoft_graph_teams_app_definition_teams_app_id_teams_app_id', type=str, help='The ID from the '
'Teams app manifest.')
c.argument('version', type=str, help='The version number of the application.')
c.argument('application', action=AddApplication, nargs='+', help='identity', arg_group='Created By')
c.argument('device', action=AddApplication, nargs='+', help='identity', arg_group='Created By')
c.argument('user', action=AddApplication, nargs='+', help='identity', arg_group='Created By')
with self.argument_context('teams app-catalog-team-app delete-app-definition') as c:
c.argument('teams_app_id', type=str, help='key: id of teamsApp')
c.argument('teams_app_definition_id', type=str, help='key: id of teamsAppDefinition')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('teams app-catalog-team-app list-app-definition') as c:
c.argument('teams_app_id', type=str, help='key: id of teamsApp')
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('teams app-catalog-team-app show-app-definition') as c:
c.argument('teams_app_id', type=str, help='key: id of teamsApp')
c.argument('teams_app_definition_id', type=str, help='key: id of teamsAppDefinition')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('teams app-catalog-team-app update-app-definition') as c:
c.argument('teams_app_id', type=str, help='key: id of teamsApp')
c.argument('teams_app_definition_id', type=str, help='key: id of teamsAppDefinition')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('azure_ad_app_id', type=str, help='')
c.argument('description', type=str, help='')
c.argument('display_name', type=str, help='The name of the app provided by the app developer.')
c.argument('last_modified_date_time', help='')
c.argument('publishing_state', arg_type=get_enum_type(['submitted', 'rejected', 'published',
'unknownFutureValue']), help='')
c.argument('shortdescription', type=str, help='')
c.argument('microsoft_graph_teams_app_definition_teams_app_id_teams_app_id', type=str, help='The ID from the '
'Teams app manifest.')
c.argument('version', type=str, help='The version number of the application.')
c.argument('application', action=AddApplication, nargs='+', help='identity', arg_group='Created By')
c.argument('device', action=AddApplication, nargs='+', help='identity', arg_group='Created By')
c.argument('user', action=AddApplication, nargs='+', help='identity', arg_group='Created By')
with self.argument_context('teams chat-chat create-chat') as c:
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('created_date_time', help='')
c.argument('last_updated_date_time', help='')
c.argument('topic', type=str, help='')
c.argument('installed_apps', type=validate_file_or_dict, help=' Expected value: json-string/@json-file.')
c.argument('members', action=AddUsersMembers, nargs='+', help='')
c.argument('messages', type=validate_file_or_dict, help=' Expected value: json-string/@json-file.')
c.argument('tabs', type=validate_file_or_dict, help=' Expected value: json-string/@json-file.')
with self.argument_context('teams chat-chat delete-chat') as c:
c.argument('chat_id', type=str, help='key: id of chat')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('teams chat-chat list-chat') as c:
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('teams chat-chat show-chat') as c:
c.argument('chat_id', type=str, help='key: id of chat')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('teams chat-chat update-chat') as c:
c.argument('chat_id', type=str, help='key: id of chat')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('created_date_time', help='')
c.argument('last_updated_date_time', help='')
c.argument('topic', type=str, help='')
c.argument('installed_apps', type=validate_file_or_dict, help=' Expected value: json-string/@json-file.')
c.argument('members', action=AddUsersMembers, nargs='+', help='')
c.argument('messages', type=validate_file_or_dict, help=' Expected value: json-string/@json-file.')
c.argument('tabs', type=validate_file_or_dict, help=' Expected value: json-string/@json-file.')
with self.argument_context('teams chat create-installed-app') as c:
c.argument('chat_id', type=str, help='key: id of chat')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('microsoft_graph_entity_id', type=str, help='Read-only.', arg_group='Teams App Definition')
c.argument('azure_ad_app_id', type=str, help='', arg_group='Teams App Definition')
c.argument('description', type=str, help='', arg_group='Teams App Definition')
c.argument('display_name', type=str, help='The name of the app provided by the app developer.',
arg_group='Teams App Definition')
c.argument('last_modified_date_time', help='', arg_group='Teams App Definition')
c.argument('publishing_state', arg_type=get_enum_type(['submitted', 'rejected', 'published',
'unknownFutureValue']), help='', arg_group='Teams App '
'Definition')
c.argument('shortdescription', type=str, help='', arg_group='Teams App Definition')
c.argument('teams_app_id', type=str, help='The ID from the Teams app manifest.', arg_group='Teams App '
'Definition')
c.argument('version', type=str, help='The version number of the application.',
arg_group='Teams App Definition')
c.argument('application', action=AddApplication, nargs='+', help='identity', arg_group='Teams App Definition '
'Created By')
c.argument('device', action=AddApplication, nargs='+', help='identity', arg_group='Teams App Definition '
'Created By')
c.argument('user', action=AddApplication, nargs='+', help='identity', arg_group='Teams App Definition Created '
'By')
c.argument('id1', type=str, help='Read-only.', arg_group='Teams App')
c.argument('microsoft_graph_teams_app_display_name', type=str, help='The name of the catalog app provided by '
'the app developer in the Microsoft Teams zip app package.', arg_group='Teams App')
c.argument('distribution_method', arg_type=get_enum_type(['store', 'organization', 'sideloaded',
'unknownFutureValue']), help='', arg_group='Teams '
'App')
c.argument('external_id', type=str, help='The ID of the catalog provided by the app developer in the Microsoft '
'Teams zip app package.', arg_group='Teams App')
c.argument('app_definitions', type=validate_file_or_dict, help='The details for each version of the app. '
'Expected value: json-string/@json-file.', arg_group='Teams App')
with self.argument_context('teams chat create-member') as c:
c.argument('chat_id', type=str, help='key: id of chat')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('display_name', type=str, help='The display name of the user.')
c.argument('roles', nargs='+', help='The roles for that user.')
with self.argument_context('teams chat create-message') as c:
c.argument('chat_id', type=str, help='key: id of chat')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('attachments', action=AddAttachments, nargs='+', help='Attached files. Attachments are currently '
'read-only – sending attachments is not supported.')
c.argument('body', action=AddBody, nargs='+', help='itemBody')
c.argument('channel_identity', action=AddChannelIdentity, nargs='+', help='channelIdentity')
c.argument('microsoft_graph_chat_message_chat_id', type=str, help='')
c.argument('created_date_time', help='Read only. Timestamp of when the chat message was created.')
c.argument('deleted_date_time', help='Read only. Timestamp at which the chat message was deleted, or null if '
'not deleted.')
c.argument('etag', type=str, help='Read-only. Version number of the chat message.')
c.argument('importance', arg_type=get_enum_type(['normal', 'high', 'urgent']), help='')
c.argument('last_edited_date_time', help='Read only. Timestamp when edits to the chat message were made. '
'Triggers an \'Edited\' flag in the Microsoft Teams UI. If no edits are made the value is null.')
c.argument('last_modified_date_time', help='Read only. Timestamp when the chat message is created (initial '
'setting) or edited, including when a reaction is added or removed.')
c.argument('locale', type=str, help='Locale of the chat message set by the client.')
c.argument('mentions', type=validate_file_or_dict, help='List of entities mentioned in the chat message. '
'Currently supports user, bot, team, channel. Expected value: json-string/@json-file.')
c.argument('message_type', arg_type=get_enum_type(['message', 'chatEvent', 'typing']), help='')
c.argument('reactions', type=validate_file_or_dict, help=' Expected value: json-string/@json-file.')
c.argument('reply_to_id', type=str, help='Read-only. Id of the parent chat message or root chat message of the '
'thread. (Only applies to chat messages in channels not chats)')
c.argument('subject', type=str, help='The subject of the chat message, in plaintext.')
c.argument('summary', type=str, help='Summary text of the chat message that could be used for push '
'notifications and summary views or fall back views. Only applies to channel chat messages, not '
'chat messages in a chat.')
c.argument('web_url', type=str, help='')
c.argument('hosted_contents', action=AddHostedContents, nargs='+', help='')
c.argument('replies', type=validate_file_or_dict, help=' Expected value: json-string/@json-file.')
c.argument('dlp_action', arg_type=get_enum_type(['none', 'notifySender', 'blockAccess',
'blockAccessExternal']), help='',
arg_group='Policy Violation')
c.argument('justification_text', type=str, help='Justification text provided by the sender of the message when '
'overriding a policy violation.', arg_group='Policy Violation')
c.argument('policy_tip', action=AddPolicyTip, nargs='+', help='chatMessagePolicyViolationPolicyTip',
arg_group='Policy Violation')
c.argument('user_action', arg_type=get_enum_type(['none', 'override', 'reportFalsePositive']), help='',
arg_group='Policy Violation')
c.argument('verdict_details', arg_type=get_enum_type(['none', 'allowFalsePositiveOverride',
'allowOverrideWithoutJustification',
'allowOverrideWithJustification']), help='',
arg_group='Policy Violation')
c.argument('application', action=AddApplication, nargs='+', help='identity', arg_group='From')
c.argument('device', action=AddApplication, nargs='+', help='identity', arg_group='From')
c.argument('user', action=AddApplication, nargs='+', help='identity', arg_group='From')
with self.argument_context('teams chat create-tab') as c:
c.argument('chat_id', type=str, help='key: id of chat')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('configuration', action=AddConfiguration, nargs='+', help='teamsTabConfiguration')
c.argument('display_name', type=str, help='Name of the tab.')
c.argument('message_id', type=str, help='')
c.argument('sort_order_index', type=str, help='')
c.argument('teams_app_id', type=str, help='')
c.argument('web_url', type=str, help='Deep link URL of the tab instance. Read only.')
c.argument('microsoft_graph_entity_id', type=str, help='Read-only.', arg_group='Teams App')
c.argument('microsoft_graph_teams_app_display_name', type=str, help='The name of the catalog app provided by '
'the app developer in the Microsoft Teams zip app package.', arg_group='Teams App')
c.argument('distribution_method', arg_type=get_enum_type(['store', 'organization', 'sideloaded',
'unknownFutureValue']), help='', arg_group='Teams '
'App')
c.argument('external_id', type=str, help='The ID of the catalog provided by the app developer in the Microsoft '
'Teams zip app package.', arg_group='Teams App')
c.argument('app_definitions', type=validate_file_or_dict, help='The details for each version of the app. '
'Expected value: json-string/@json-file.', arg_group='Teams App')
with self.argument_context('teams chat delete-installed-app') as c:
c.argument('chat_id', type=str, help='key: id of chat')
c.argument('teams_app_installation_id', type=str, help='key: id of teamsAppInstallation')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('teams chat delete-member') as c:
c.argument('chat_id', type=str, help='key: id of chat')
c.argument('conversation_member_id', type=str, help='key: id of conversationMember')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('teams chat delete-message') as c:
c.argument('chat_id', type=str, help='key: id of chat')
c.argument('chat_message_id', type=str, help='key: id of chatMessage')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('teams chat delete-tab') as c:
c.argument('chat_id', type=str, help='key: id of chat')
c.argument('teams_tab_id', type=str, help='key: id of teamsTab')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('teams chat list-installed-app') as c:
c.argument('chat_id', type=str, help='key: id of chat')
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('teams chat list-member') as c:
c.argument('chat_id', type=str, help='key: id of chat')
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('teams chat list-message') as c:
c.argument('chat_id', type=str, help='key: id of chat')
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('teams chat list-tab') as c:
c.argument('chat_id', type=str, help='key: id of chat')
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('teams chat send-activity-notification') as c:
c.argument('chat_id', type=str, help='key: id of chat')
c.argument('topic', action=AddTopic, nargs='+', help='teamworkActivityTopic')
c.argument('activity_type', type=str, help='')
c.argument('chain_id', type=int, help='')
c.argument('preview_text', action=AddBody, nargs='+', help='itemBody')
c.argument('template_parameters', action=AddChatsTemplateParameters, nargs='+', help='')
c.argument('recipient', type=validate_file_or_dict, help='teamworkNotificationRecipient Expected value: '
'json-string/@json-file.')
with self.argument_context('teams chat show-installed-app') as c:
c.argument('chat_id', type=str, help='key: id of chat')
c.argument('teams_app_installation_id', type=str, help='key: id of teamsAppInstallation')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('teams chat show-member') as c:
c.argument('chat_id', type=str, help='key: id of chat')
c.argument('conversation_member_id', type=str, help='key: id of conversationMember')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('teams chat show-message') as c:
c.argument('chat_id', type=str, help='key: id of chat')
c.argument('chat_message_id', type=str, help='key: id of chatMessage')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('teams chat show-tab') as c:
c.argument('chat_id', type=str, help='key: id of chat')
c.argument('teams_tab_id', type=str, help='key: id of teamsTab')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('teams chat update-installed-app') as c:
c.argument('chat_id', type=str, help='key: id of chat')
c.argument('teams_app_installation_id', type=str, help='key: id of teamsAppInstallation')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('microsoft_graph_entity_id', type=str, help='Read-only.', arg_group='Teams App Definition')
c.argument('azure_ad_app_id', type=str, help='', arg_group='Teams App Definition')
c.argument('description', type=str, help='', arg_group='Teams App Definition')
c.argument('display_name', type=str, help='The name of the app provided by the app developer.',
arg_group='Teams App Definition')
c.argument('last_modified_date_time', help='', arg_group='Teams App Definition')
c.argument('publishing_state', arg_type=get_enum_type(['submitted', 'rejected', 'published',
'unknownFutureValue']), help='', arg_group='Teams App '
'Definition')
c.argument('shortdescription', type=str, help='', arg_group='Teams App Definition')
c.argument('teams_app_id', type=str, help='The ID from the Teams app manifest.', arg_group='Teams App '
'Definition')
c.argument('version', type=str, help='The version number of the application.',
arg_group='Teams App Definition')
c.argument('application', action=AddApplication, nargs='+', help='identity', arg_group='Teams App Definition '
'Created By')
c.argument('device', action=AddApplication, nargs='+', help='identity', arg_group='Teams App Definition '
'Created By')
c.argument('user', action=AddApplication, nargs='+', help='identity', arg_group='Teams App Definition Created '
'By')
c.argument('id1', type=str, help='Read-only.', arg_group='Teams App')
c.argument('microsoft_graph_teams_app_display_name', type=str, help='The name of the catalog app provided by '
'the app developer in the Microsoft Teams zip app package.', arg_group='Teams App')
c.argument('distribution_method', arg_type=get_enum_type(['store', 'organization', 'sideloaded',
'unknownFutureValue']), help='', arg_group='Teams '
'App')
c.argument('external_id', type=str, help='The ID of the catalog provided by the app developer in the Microsoft '
'Teams zip app package.', arg_group='Teams App')
c.argument('app_definitions', type=validate_file_or_dict, help='The details for each version of the app. '
'Expected value: json-string/@json-file.', arg_group='Teams App')
with self.argument_context('teams chat update-member') as c:
c.argument('chat_id', type=str, help='key: id of chat')
c.argument('conversation_member_id', type=str, help='key: id of conversationMember')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('display_name', type=str, help='The display name of the user.')
c.argument('roles', nargs='+', help='The roles for that user.')
with self.argument_context('teams chat update-message') as c:
c.argument('chat_id', type=str, help='key: id of chat')
c.argument('chat_message_id', type=str, help='key: id of chatMessage')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('attachments', action=AddAttachments, nargs='+', help='Attached files. Attachments are currently '
'read-only – sending attachments is not supported.')
c.argument('body', action=AddBody, nargs='+', help='itemBody')
c.argument('channel_identity', action=AddChannelIdentity, nargs='+', help='channelIdentity')
c.argument('microsoft_graph_chat_message_chat_id', type=str, help='')
c.argument('created_date_time', help='Read only. Timestamp of when the chat message was created.')
c.argument('deleted_date_time', help='Read only. Timestamp at which the chat message was deleted, or null if '
'not deleted.')
c.argument('etag', type=str, help='Read-only. Version number of the chat message.')
c.argument('importance', arg_type=get_enum_type(['normal', 'high', 'urgent']), help='')
c.argument('last_edited_date_time', help='Read only. Timestamp when edits to the chat message were made. '
'Triggers an \'Edited\' flag in the Microsoft Teams UI. If no edits are made the value is null.')
c.argument('last_modified_date_time', help='Read only. Timestamp when the chat message is created (initial '
'setting) or edited, including when a reaction is added or removed.')
c.argument('locale', type=str, help='Locale of the chat message set by the client.')
c.argument('mentions', type=validate_file_or_dict, help='List of entities mentioned in the chat message. '
'Currently supports user, bot, team, channel. Expected value: json-string/@json-file.')
c.argument('message_type', arg_type=get_enum_type(['message', 'chatEvent', 'typing']), help='')
c.argument('reactions', type=validate_file_or_dict, help=' Expected value: json-string/@json-file.')
c.argument('reply_to_id', type=str, help='Read-only. Id of the parent chat message or root chat message of the '
'thread. (Only applies to chat messages in channels not chats)')
c.argument('subject', type=str, help='The subject of the chat message, in plaintext.')
c.argument('summary', type=str, help='Summary text of the chat message that could be used for push '
'notifications and summary views or fall back views. Only applies to channel chat messages, not '
'chat messages in a chat.')
c.argument('web_url', type=str, help='')
c.argument('hosted_contents', action=AddHostedContents, nargs='+', help='')
c.argument('replies', type=validate_file_or_dict, help=' Expected value: json-string/@json-file.')
c.argument('dlp_action', arg_type=get_enum_type(['none', 'notifySender', 'blockAccess',
'blockAccessExternal']), help='',
arg_group='Policy Violation')
c.argument('justification_text', type=str, help='Justification text provided by the sender of the message when '
'overriding a policy violation.', arg_group='Policy Violation')
c.argument('policy_tip', action=AddPolicyTip, nargs='+', help='chatMessagePolicyViolationPolicyTip',
arg_group='Policy Violation')
c.argument('user_action', arg_type=get_enum_type(['none', 'override', 'reportFalsePositive']), help='',
arg_group='Policy Violation')
c.argument('verdict_details', arg_type=get_enum_type(['none', 'allowFalsePositiveOverride',
'allowOverrideWithoutJustification',
'allowOverrideWithJustification']), help='',
arg_group='Policy Violation')
c.argument('application', action=AddApplication, nargs='+', help='identity', arg_group='From')
c.argument('device', action=AddApplication, nargs='+', help='identity', arg_group='From')
c.argument('user', action=AddApplication, nargs='+', help='identity', arg_group='From')
with self.argument_context('teams chat update-tab') as c:
c.argument('chat_id', type=str, help='key: id of chat')
c.argument('teams_tab_id', type=str, help='key: id of teamsTab')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('configuration', action=AddConfiguration, nargs='+', help='teamsTabConfiguration')
c.argument('display_name', type=str, help='Name of the tab.')
c.argument('message_id', type=str, help='')
c.argument('sort_order_index', type=str, help='')
c.argument('teams_app_id', type=str, help='')
c.argument('web_url', type=str, help='Deep link URL of the tab instance. Read only.')
c.argument('microsoft_graph_entity_id', type=str, help='Read-only.', arg_group='Teams App')
c.argument('microsoft_graph_teams_app_display_name', type=str, help='The name of the catalog app provided by '
'the app developer in the Microsoft Teams zip app package.', arg_group='Teams App')
c.argument('distribution_method', arg_type=get_enum_type(['store', 'organization', 'sideloaded',
'unknownFutureValue']), help='', arg_group='Teams '
'App')
c.argument('external_id', type=str, help='The ID of the catalog provided by the app developer in the Microsoft '
'Teams zip app package.', arg_group='Teams App')
c.argument('app_definitions', type=validate_file_or_dict, help='The details for each version of the app. '
'Expected value: json-string/@json-file.', arg_group='Teams App')
with self.argument_context('teams chat-installed-app delete-ref-team-app') as c:
c.argument('chat_id', type=str, help='key: id of chat')
c.argument('teams_app_installation_id', type=str, help='key: id of teamsAppInstallation')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('teams chat-installed-app delete-ref-team-app-definition') as c:
c.argument('chat_id', type=str, help='key: id of chat')
c.argument('teams_app_installation_id', type=str, help='key: id of teamsAppInstallation')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('teams chat-installed-app set-ref-team-app') as c:
c.argument('chat_id', type=str, help='key: id of chat')
c.argument('teams_app_installation_id', type=str, help='key: id of teamsAppInstallation')
c.argument('body', type=validate_file_or_dict, help='New navigation property ref values Expected value: '
'json-string/@json-file.')
with self.argument_context('teams chat-installed-app set-ref-team-app-definition') as c:
c.argument('chat_id', type=str, help='key: id of chat')
c.argument('teams_app_installation_id', type=str, help='key: id of teamsAppInstallation')
c.argument('body', type=validate_file_or_dict, help='New navigation property ref values Expected value: '
'json-string/@json-file.')
with self.argument_context('teams chat-installed-app show-ref-team-app') as c:
c.argument('chat_id', type=str, help='key: id of chat')
c.argument('teams_app_installation_id', type=str, help='key: id of teamsAppInstallation')
with self.argument_context('teams chat-installed-app show-ref-team-app-definition') as c:
c.argument('chat_id', type=str, help='key: id of chat')
c.argument('teams_app_installation_id', type=str, help='key: id of teamsAppInstallation')
with self.argument_context('teams chat-installed-app show-team-app') as c:
c.argument('chat_id', type=str, help='key: id of chat')
c.argument('teams_app_installation_id', type=str, help='key: id of teamsAppInstallation')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('teams chat-installed-app show-team-app-definition') as c:
c.argument('chat_id', type=str, help='key: id of chat')
c.argument('teams_app_installation_id', type=str, help='key: id of teamsAppInstallation')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('teams chat-installed-app upgrade') as c:
c.argument('chat_id', type=str, help='key: id of chat')
c.argument('teams_app_installation_id', type=str, help='key: id of teamsAppInstallation')
with self.argument_context('teams chat-member add') as c:
c.argument('chat_id', type=str, help='key: id of chat')
c.argument('values', action=AddChatsMembersValues, nargs='+', help='')
with self.argument_context('teams chat-message create-hosted-content') as c:
c.argument('chat_id', type=str, help='key: id of chat')
c.argument('chat_message_id', type=str, help='key: id of chatMessage')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('content_bytes', help='')
c.argument('content_type', type=str, help='')
with self.argument_context('teams chat-message create-reply') as c:
c.argument('chat_id', type=str, help='key: id of chat')
c.argument('chat_message_id', type=str, help='key: id of chatMessage')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('attachments', action=AddAttachments, nargs='+', help='Attached files. Attachments are currently '
'read-only – sending attachments is not supported.')
c.argument('body', action=AddBody, nargs='+', help='itemBody')
c.argument('channel_identity', action=AddChannelIdentity, nargs='+', help='channelIdentity')
c.argument('microsoft_graph_chat_message_chat_id', type=str, help='')
c.argument('created_date_time', help='Read only. Timestamp of when the chat message was created.')
c.argument('deleted_date_time', help='Read only. Timestamp at which the chat message was deleted, or null if '
'not deleted.')
c.argument('etag', type=str, help='Read-only. Version number of the chat message.')
c.argument('importance', arg_type=get_enum_type(['normal', 'high', 'urgent']), help='')
c.argument('last_edited_date_time', help='Read only. Timestamp when edits to the chat message were made. '
'Triggers an \'Edited\' flag in the Microsoft Teams UI. If no edits are made the value is null.')
c.argument('last_modified_date_time', help='Read only. Timestamp when the chat message is created (initial '
'setting) or edited, including when a reaction is added or removed.')
c.argument('locale', type=str, help='Locale of the chat message set by the client.')
c.argument('mentions', type=validate_file_or_dict, help='List of entities mentioned in the chat message. '
'Currently supports user, bot, team, channel. Expected value: json-string/@json-file.')
c.argument('message_type', arg_type=get_enum_type(['message', 'chatEvent', 'typing']), help='')
c.argument('reactions', type=validate_file_or_dict, help=' Expected value: json-string/@json-file.')
c.argument('reply_to_id', type=str, help='Read-only. Id of the parent chat message or root chat message of the '
'thread. (Only applies to chat messages in channels not chats)')
c.argument('subject', type=str, help='The subject of the chat message, in plaintext.')
c.argument('summary', type=str, help='Summary text of the chat message that could be used for push '
'notifications and summary views or fall back views. Only applies to channel chat messages, not '
'chat messages in a chat.')
c.argument('web_url', type=str, help='')
c.argument('hosted_contents', action=AddHostedContents, nargs='+', help='')
c.argument('replies', type=validate_file_or_dict, help=' Expected value: json-string/@json-file.')
c.argument('dlp_action', arg_type=get_enum_type(['none', 'notifySender', 'blockAccess',
'blockAccessExternal']), help='',
arg_group='Policy Violation')
c.argument('justification_text', type=str, help='Justification text provided by the sender of the message when '
'overriding a policy violation.', arg_group='Policy Violation')
c.argument('policy_tip', action=AddPolicyTip, nargs='+', help='chatMessagePolicyViolationPolicyTip',
arg_group='Policy Violation')
c.argument('user_action', arg_type=get_enum_type(['none', 'override', 'reportFalsePositive']), help='',
arg_group='Policy Violation')
c.argument('verdict_details', arg_type=get_enum_type(['none', 'allowFalsePositiveOverride',
'allowOverrideWithoutJustification',
'allowOverrideWithJustification']), help='',
arg_group='Policy Violation')
c.argument('application', action=AddApplication, nargs='+', help='identity', arg_group='From')
c.argument('device', action=AddApplication, nargs='+', help='identity', arg_group='From')
c.argument('user', action=AddApplication, nargs='+', help='identity', arg_group='From')
with self.argument_context('teams chat-message delete-hosted-content') as c:
c.argument('chat_id', type=str, help='key: id of chat')
c.argument('chat_message_id', type=str, help='key: id of chatMessage')
c.argument('chat_message_hosted_content_id', type=str, help='key: id of chatMessageHostedContent')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('teams chat-message delete-reply') as c:
c.argument('chat_id', type=str, help='key: id of chat')
c.argument('chat_message_id', type=str, help='key: id of chatMessage')
c.argument('chat_message_id1', type=str, help='key: id of chatMessage')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('teams chat-message delta') as c:
c.argument('chat_id', type=str, help='key: id of chat')
with self.argument_context('teams chat-message list-hosted-content') as c:
c.argument('chat_id', type=str, help='key: id of chat')
c.argument('chat_message_id', type=str, help='key: id of chatMessage')
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('teams chat-message list-reply') as c:
c.argument('chat_id', type=str, help='key: id of chat')
c.argument('chat_message_id', type=str, help='key: id of chatMessage')
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('teams chat-message set-hosted-content-content') as c:
c.argument('chat_id', type=str, help='key: id of chat')
c.argument('chat_message_id', type=str, help='key: id of chatMessage')
c.argument('chat_message_hosted_content_id', type=str, help='key: id of chatMessageHostedContent')
c.argument('data', help='New media content.')
with self.argument_context('teams chat-message show-hosted-content') as c:
c.argument('chat_id', type=str, help='key: id of chat')
c.argument('chat_message_id', type=str, help='key: id of chatMessage')
c.argument('chat_message_hosted_content_id', type=str, help='key: id of chatMessageHostedContent')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('teams chat-message show-hosted-content-content') as c:
c.argument('chat_id', type=str, help='key: id of chat')
c.argument('chat_message_id', type=str, help='key: id of chatMessage')
c.argument('chat_message_hosted_content_id', type=str, help='key: id of chatMessageHostedContent')
with self.argument_context('teams chat-message show-reply') as c:
c.argument('chat_id', type=str, help='key: id of chat')
c.argument('chat_message_id', type=str, help='key: id of chatMessage')
c.argument('chat_message_id1', type=str, help='key: id of chatMessage')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('teams chat-message update-hosted-content') as c:
c.argument('chat_id', type=str, help='key: id of chat')
c.argument('chat_message_id', type=str, help='key: id of chatMessage')
c.argument('chat_message_hosted_content_id', type=str, help='key: id of chatMessageHostedContent')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('content_bytes', help='')
c.argument('content_type', type=str, help='')
with self.argument_context('teams chat-message update-reply') as c:
c.argument('chat_id', type=str, help='key: id of chat')
c.argument('chat_message_id', type=str, help='key: id of chatMessage')
c.argument('chat_message_id1', type=str, help='key: id of chatMessage')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('attachments', action=AddAttachments, nargs='+', help='Attached files. Attachments are currently '
'read-only – sending attachments is not supported.')
c.argument('body', action=AddBody, nargs='+', help='itemBody')
c.argument('channel_identity', action=AddChannelIdentity, nargs='+', help='channelIdentity')
c.argument('microsoft_graph_chat_message_chat_id', type=str, help='')
c.argument('created_date_time', help='Read only. Timestamp of when the chat message was created.')
c.argument('deleted_date_time', help='Read only. Timestamp at which the chat message was deleted, or null if '
'not deleted.')
c.argument('etag', type=str, help='Read-only. Version number of the chat message.')
c.argument('importance', arg_type=get_enum_type(['normal', 'high', 'urgent']), help='')
c.argument('last_edited_date_time', help='Read only. Timestamp when edits to the chat message were made. '
'Triggers an \'Edited\' flag in the Microsoft Teams UI. If no edits are made the value is null.')
c.argument('last_modified_date_time', help='Read only. Timestamp when the chat message is created (initial '
'setting) or edited, including when a reaction is added or removed.')
c.argument('locale', type=str, help='Locale of the chat message set by the client.')
c.argument('mentions', type=validate_file_or_dict, help='List of entities mentioned in the chat message. '
'Currently supports user, bot, team, channel. Expected value: json-string/@json-file.')
c.argument('message_type', arg_type=get_enum_type(['message', 'chatEvent', 'typing']), help='')
c.argument('reactions', type=validate_file_or_dict, help=' Expected value: json-string/@json-file.')
c.argument('reply_to_id', type=str, help='Read-only. Id of the parent chat message or root chat message of the '
'thread. (Only applies to chat messages in channels not chats)')
c.argument('subject', type=str, help='The subject of the chat message, in plaintext.')
c.argument('summary', type=str, help='Summary text of the chat message that could be used for push '
'notifications and summary views or fall back views. Only applies to channel chat messages, not '
'chat messages in a chat.')
c.argument('web_url', type=str, help='')
c.argument('hosted_contents', action=AddHostedContents, nargs='+', help='')
c.argument('replies', type=validate_file_or_dict, help=' Expected value: json-string/@json-file.')
c.argument('dlp_action', arg_type=get_enum_type(['none', 'notifySender', 'blockAccess',
'blockAccessExternal']), help='',
arg_group='Policy Violation')
c.argument('justification_text', type=str, help='Justification text provided by the sender of the message when '
'overriding a policy violation.', arg_group='Policy Violation')
c.argument('policy_tip', action=AddPolicyTip, nargs='+', help='chatMessagePolicyViolationPolicyTip',
arg_group='Policy Violation')
c.argument('user_action', arg_type=get_enum_type(['none', 'override', 'reportFalsePositive']), help='',
arg_group='Policy Violation')
c.argument('verdict_details', arg_type=get_enum_type(['none', 'allowFalsePositiveOverride',
'allowOverrideWithoutJustification',
'allowOverrideWithJustification']), help='',
arg_group='Policy Violation')
c.argument('application', action=AddApplication, nargs='+', help='identity', arg_group='From')
c.argument('device', action=AddApplication, nargs='+', help='identity', arg_group='From')
c.argument('user', action=AddApplication, nargs='+', help='identity', arg_group='From')
with self.argument_context('teams chat-message-reply delta') as c:
c.argument('chat_id', type=str, help='key: id of chat')
c.argument('chat_message_id', type=str, help='key: id of chatMessage')
with self.argument_context('teams chat-tab delete-ref-team-app') as c:
c.argument('chat_id', type=str, help='key: id of chat')
c.argument('teams_tab_id', type=str, help='key: id of teamsTab')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('teams chat-tab set-ref-team-app') as c:
c.argument('chat_id', type=str, help='key: id of chat')
c.argument('teams_tab_id', type=str, help='key: id of teamsTab')
c.argument('body', type=validate_file_or_dict, help='New navigation property ref values Expected value: '
'json-string/@json-file.')
with self.argument_context('teams chat-tab show-ref-team-app') as c:
c.argument('chat_id', type=str, help='key: id of chat')
c.argument('teams_tab_id', type=str, help='key: id of teamsTab')
with self.argument_context('teams chat-tab show-team-app') as c:
c.argument('chat_id', type=str, help='key: id of chat')
c.argument('teams_tab_id', type=str, help='key: id of teamsTab')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('teams group delete-team') as c:
c.argument('group_id', type=str, help='key: id of group')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('teams group show-team') as c:
c.argument('group_id', type=str, help='key: id of group')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('teams group update-team') as c:
c.argument('group_id', type=str, help='key: id of group')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('classification', type=str, help='An optional label. Typically describes the data or business '
'sensitivity of the team. Must match one of a pre-configured set in the tenant\'s directory.')
c.argument('created_date_time', help='')
c.argument('description', type=str, help='An optional description for the team.')
c.argument('display_name', type=str, help='The name of the team.')
c.argument('fun_settings', action=AddFunSettings, nargs='+', help='teamFunSettings')
c.argument('guest_settings', action=AddGuestSettings, nargs='+', help='teamGuestSettings')
c.argument('internal_id', type=str, help='A unique ID for the team that has been used in a few places such as '
'the audit log/Office 365 Management Activity API.')
c.argument('is_archived', arg_type=get_three_state_flag(), help='Whether this team is in read-only mode.')
c.argument('is_membership_limited_to_owners', arg_type=get_three_state_flag(), help='')
c.argument('member_settings', action=AddMemberSettings, nargs='+', help='teamMemberSettings')
c.argument('messaging_settings', action=AddMessagingSettings, nargs='+', help='teamMessagingSettings')
c.argument('specialization', arg_type=get_enum_type(['none', 'educationStandard', 'educationClass',
'educationProfessionalLearningCommunity',
'educationStaff', 'healthcareStandard',
'healthcareCareCoordination', 'unknownFutureValue']),
help='')
c.argument('visibility', arg_type=get_enum_type(['private', 'public', 'hiddenMembership',
'unknownFutureValue']), help='')
c.argument('web_url', type=str, help='A hyperlink that will go to the team in the Microsoft Teams client. This '
'is the URL that you get when you right-click a team in the Microsoft Teams client and select Get '
'link to team. This URL should be treated as an opaque blob, and not parsed.')
c.argument('channels', type=validate_file_or_dict, help='The collection of channels & messages associated with '
'the team. Expected value: json-string/@json-file.')
c.argument('group', type=validate_file_or_dict, help='Represents an Azure Active Directory object. The '
'directoryObject type is the base type for many other directory entity types. Expected value: '
'json-string/@json-file.')
c.argument('installed_apps', type=validate_file_or_dict, help='The apps installed in this team. Expected '
'value: json-string/@json-file.')
c.argument('members', action=AddGroupsMembers, nargs='+', help='Members and owners of the team.')
c.argument('operations', type=validate_file_or_dict, help='The async operations that ran or are running on '
'this team. Expected value: json-string/@json-file.')
c.argument('owners', type=validate_file_or_dict, help=' Expected value: json-string/@json-file.')
c.argument('photo', action=AddGroupsPhoto, nargs='+', help='profilePhoto')
c.argument('primary_channel', type=validate_file_or_dict, help='channel Expected value: '
'json-string/@json-file.')
c.argument('microsoft_graph_entity_id', type=str, help='Read-only.', arg_group='Template')
c.argument('id1', type=str, help='Read-only.', arg_group='Schedule')
c.argument('enabled', arg_type=get_three_state_flag(), help='Indicates whether the schedule is enabled for the '
'team. Required.', arg_group='Schedule')
c.argument('offer_shift_requests_enabled', arg_type=get_three_state_flag(), help='Indicates whether offer '
'shift requests are enabled for the schedule.', arg_group='Schedule')
c.argument('open_shifts_enabled', arg_type=get_three_state_flag(), help='Indicates whether open shifts are '
'enabled for the schedule.', arg_group='Schedule')
c.argument('provision_status', arg_type=get_enum_type(['NotStarted', 'Running', 'Completed', 'Failed']),
help='', arg_group='Schedule')
c.argument('provision_status_code', type=str, help='Additional information about why schedule provisioning '
'failed.', arg_group='Schedule')
c.argument('swap_shifts_requests_enabled', arg_type=get_three_state_flag(), help='Indicates whether swap '
'shifts requests are enabled for the schedule.', arg_group='Schedule')
c.argument('time_clock_enabled', arg_type=get_three_state_flag(), help='Indicates whether time clock is '
'enabled for the schedule.', arg_group='Schedule')
c.argument('time_off_requests_enabled', arg_type=get_three_state_flag(), help='Indicates whether time off '
'requests are enabled for the schedule.', arg_group='Schedule')
c.argument('time_zone', type=str, help='Indicates the time zone of the schedule team using tz database format. '
'Required.', arg_group='Schedule')
c.argument('workforce_integration_ids', nargs='+', help='', arg_group='Schedule')
c.argument('offer_shift_requests', action=AddOfferShiftRequests, nargs='+', help='', arg_group='Schedule')
c.argument('open_shift_change_requests', action=AddOpenShiftChangeRequests, nargs='+', help='',
arg_group='Schedule')
c.argument('open_shifts', type=validate_file_or_dict, help=' Expected value: json-string/@json-file.',
arg_group='Schedule')
c.argument('scheduling_groups', action=AddSchedulingGroups, nargs='+', help='The logical grouping of users in '
'the schedule (usually by role).', arg_group='Schedule')
c.argument('shifts', type=validate_file_or_dict, help='The shifts in the schedule. Expected value: '
'json-string/@json-file.', arg_group='Schedule')
c.argument('swap_shifts_change_requests', action=AddSwapShiftsChangeRequests, nargs='+', help='',
arg_group='Schedule')
c.argument('time_cards', type=validate_file_or_dict, help=' Expected value: json-string/@json-file.',
arg_group='Schedule')
c.argument('time_off_reasons', action=AddTimeOffReasons, nargs='+', help='The set of reasons for a time off in '
'the schedule.', arg_group='Schedule')
c.argument('time_off_requests', action=AddTimeOffRequests, nargs='+', help='', arg_group='Schedule')
c.argument('times_off', type=validate_file_or_dict, help='The instances of times off in the schedule. Expected '
'value: json-string/@json-file.', arg_group='Schedule')
c.argument('approved_location', action=AddApprovedLocation, nargs='+', help='geoCoordinates',
arg_group='Schedule Time Clock Settings')
c.argument('show_in_teams_search_and_suggestions', arg_type=get_three_state_flag(), help='',
arg_group='Discovery Settings')
with self.argument_context('teams team list') as c:
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('teams team create') as c:
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('classification', type=str, help='An optional label. Typically describes the data or business '
'sensitivity of the team. Must match one of a pre-configured set in the tenant\'s directory.')
c.argument('created_date_time', help='')
c.argument('description', type=str, help='An optional description for the team.')
c.argument('display_name', type=str, help='The name of the team.')
c.argument('fun_settings', action=AddFunSettings, nargs='+', help='teamFunSettings')
c.argument('guest_settings', action=AddGuestSettings, nargs='+', help='teamGuestSettings')
c.argument('internal_id', type=str, help='A unique ID for the team that has been used in a few places such as '
'the audit log/Office 365 Management Activity API.')
c.argument('is_archived', arg_type=get_three_state_flag(), help='Whether this team is in read-only mode.')
c.argument('is_membership_limited_to_owners', arg_type=get_three_state_flag(), help='')
c.argument('member_settings', action=AddMemberSettings, nargs='+', help='teamMemberSettings')
c.argument('messaging_settings', action=AddMessagingSettings, nargs='+', help='teamMessagingSettings')
c.argument('specialization', arg_type=get_enum_type(['none', 'educationStandard', 'educationClass',
'educationProfessionalLearningCommunity',
'educationStaff', 'healthcareStandard',
'healthcareCareCoordination', 'unknownFutureValue']),
help='')
c.argument('visibility', arg_type=get_enum_type(['private', 'public', 'hiddenMembership',
'unknownFutureValue']), help='')
c.argument('web_url', type=str, help='A hyperlink that will go to the team in the Microsoft Teams client. This '
'is the URL that you get when you right-click a team in the Microsoft Teams client and select Get '
'link to team. This URL should be treated as an opaque blob, and not parsed.')
c.argument('channels', type=validate_file_or_dict, help='The collection of channels & messages associated with '
'the team. Expected value: json-string/@json-file.')
c.argument('group', type=validate_file_or_dict, help='Represents an Azure Active Directory object. The '
'directoryObject type is the base type for many other directory entity types. Expected value: '
'json-string/@json-file.')
c.argument('installed_apps', type=validate_file_or_dict, help='The apps installed in this team. Expected '
'value: json-string/@json-file.')
c.argument('members', action=AddGroupsMembers, nargs='+', help='Members and owners of the team.')
c.argument('operations', type=validate_file_or_dict, help='The async operations that ran or are running on '
'this team. Expected value: json-string/@json-file.')
c.argument('owners', type=validate_file_or_dict, help=' Expected value: json-string/@json-file.')
c.argument('photo', action=AddGroupsPhoto, nargs='+', help='profilePhoto')
c.argument('primary_channel', type=validate_file_or_dict, help='channel Expected value: '
'json-string/@json-file.')
c.argument('microsoft_graph_entity_id', type=str, help='Read-only.', arg_group='Template')
c.argument('id1', type=str, help='Read-only.', arg_group='Schedule')
c.argument('enabled', arg_type=get_three_state_flag(), help='Indicates whether the schedule is enabled for the '
'team. Required.', arg_group='Schedule')
c.argument('offer_shift_requests_enabled', arg_type=get_three_state_flag(), help='Indicates whether offer '
'shift requests are enabled for the schedule.', arg_group='Schedule')
c.argument('open_shifts_enabled', arg_type=get_three_state_flag(), help='Indicates whether open shifts are '
'enabled for the schedule.', arg_group='Schedule')
c.argument('provision_status', arg_type=get_enum_type(['NotStarted', 'Running', 'Completed', 'Failed']),
help='', arg_group='Schedule')
c.argument('provision_status_code', type=str, help='Additional information about why schedule provisioning '
'failed.', arg_group='Schedule')
c.argument('swap_shifts_requests_enabled', arg_type=get_three_state_flag(), help='Indicates whether swap '
'shifts requests are enabled for the schedule.', arg_group='Schedule')
c.argument('time_clock_enabled', arg_type=get_three_state_flag(), help='Indicates whether time clock is '
'enabled for the schedule.', arg_group='Schedule')
c.argument('time_off_requests_enabled', arg_type=get_three_state_flag(), help='Indicates whether time off '
'requests are enabled for the schedule.', arg_group='Schedule')
c.argument('time_zone', type=str, help='Indicates the time zone of the schedule team using tz database format. '
'Required.', arg_group='Schedule')
c.argument('workforce_integration_ids', nargs='+', help='', arg_group='Schedule')
c.argument('offer_shift_requests', action=AddOfferShiftRequests, nargs='+', help='', arg_group='Schedule')
c.argument('open_shift_change_requests', action=AddOpenShiftChangeRequests, nargs='+', help='',
arg_group='Schedule')
c.argument('open_shifts', type=validate_file_or_dict, help=' Expected value: json-string/@json-file.',
arg_group='Schedule')
c.argument('scheduling_groups', action=AddSchedulingGroups, nargs='+', help='The logical grouping of users in '
'the schedule (usually by role).', arg_group='Schedule')
c.argument('shifts', type=validate_file_or_dict, help='The shifts in the schedule. Expected value: '
'json-string/@json-file.', arg_group='Schedule')
c.argument('swap_shifts_change_requests', action=AddSwapShiftsChangeRequests, nargs='+', help='',
arg_group='Schedule')
c.argument('time_cards', type=validate_file_or_dict, help=' Expected value: json-string/@json-file.',
arg_group='Schedule')
c.argument('time_off_reasons', action=AddTimeOffReasons, nargs='+', help='The set of reasons for a time off in '
'the schedule.', arg_group='Schedule')
c.argument('time_off_requests', action=AddTimeOffRequests, nargs='+', help='', arg_group='Schedule')
c.argument('times_off', type=validate_file_or_dict, help='The instances of times off in the schedule. Expected '
'value: json-string/@json-file.', arg_group='Schedule')
c.argument('approved_location', action=AddApprovedLocation, nargs='+', help='geoCoordinates',
arg_group='Schedule Time Clock Settings')
c.argument('show_in_teams_search_and_suggestions', arg_type=get_three_state_flag(), help='',
arg_group='Discovery Settings')
with self.argument_context('teams team update') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('classification', type=str, help='An optional label. Typically describes the data or business '
'sensitivity of the team. Must match one of a pre-configured set in the tenant\'s directory.')
c.argument('created_date_time', help='')
c.argument('description', type=str, help='An optional description for the team.')
c.argument('display_name', type=str, help='The name of the team.')
c.argument('fun_settings', action=AddFunSettings, nargs='+', help='teamFunSettings')
c.argument('guest_settings', action=AddGuestSettings, nargs='+', help='teamGuestSettings')
c.argument('internal_id', type=str, help='A unique ID for the team that has been used in a few places such as '
'the audit log/Office 365 Management Activity API.')
c.argument('is_archived', arg_type=get_three_state_flag(), help='Whether this team is in read-only mode.')
c.argument('is_membership_limited_to_owners', arg_type=get_three_state_flag(), help='')
c.argument('member_settings', action=AddMemberSettings, nargs='+', help='teamMemberSettings')
c.argument('messaging_settings', action=AddMessagingSettings, nargs='+', help='teamMessagingSettings')
c.argument('specialization', arg_type=get_enum_type(['none', 'educationStandard', 'educationClass',
'educationProfessionalLearningCommunity',
'educationStaff', 'healthcareStandard',
'healthcareCareCoordination', 'unknownFutureValue']),
help='')
c.argument('visibility', arg_type=get_enum_type(['private', 'public', 'hiddenMembership',
'unknownFutureValue']), help='')
c.argument('web_url', type=str, help='A hyperlink that will go to the team in the Microsoft Teams client. This '
'is the URL that you get when you right-click a team in the Microsoft Teams client and select Get '
'link to team. This URL should be treated as an opaque blob, and not parsed.')
c.argument('channels', type=validate_file_or_dict, help='The collection of channels & messages associated with '
'the team. Expected value: json-string/@json-file.')
c.argument('group', type=validate_file_or_dict, help='Represents an Azure Active Directory object. The '
'directoryObject type is the base type for many other directory entity types. Expected value: '
'json-string/@json-file.')
c.argument('installed_apps', type=validate_file_or_dict, help='The apps installed in this team. Expected '
'value: json-string/@json-file.')
c.argument('members', action=AddGroupsMembers, nargs='+', help='Members and owners of the team.')
c.argument('operations', type=validate_file_or_dict, help='The async operations that ran or are running on '
'this team. Expected value: json-string/@json-file.')
c.argument('owners', type=validate_file_or_dict, help=' Expected value: json-string/@json-file.')
c.argument('photo', action=AddGroupsPhoto, nargs='+', help='profilePhoto')
c.argument('primary_channel', type=validate_file_or_dict, help='channel Expected value: '
'json-string/@json-file.')
c.argument('microsoft_graph_entity_id', type=str, help='Read-only.', arg_group='Template')
c.argument('id1', type=str, help='Read-only.', arg_group='Schedule')
c.argument('enabled', arg_type=get_three_state_flag(), help='Indicates whether the schedule is enabled for the '
'team. Required.', arg_group='Schedule')
c.argument('offer_shift_requests_enabled', arg_type=get_three_state_flag(), help='Indicates whether offer '
'shift requests are enabled for the schedule.', arg_group='Schedule')
c.argument('open_shifts_enabled', arg_type=get_three_state_flag(), help='Indicates whether open shifts are '
'enabled for the schedule.', arg_group='Schedule')
c.argument('provision_status', arg_type=get_enum_type(['NotStarted', 'Running', 'Completed', 'Failed']),
help='', arg_group='Schedule')
c.argument('provision_status_code', type=str, help='Additional information about why schedule provisioning '
'failed.', arg_group='Schedule')
c.argument('swap_shifts_requests_enabled', arg_type=get_three_state_flag(), help='Indicates whether swap '
'shifts requests are enabled for the schedule.', arg_group='Schedule')
c.argument('time_clock_enabled', arg_type=get_three_state_flag(), help='Indicates whether time clock is '
'enabled for the schedule.', arg_group='Schedule')
c.argument('time_off_requests_enabled', arg_type=get_three_state_flag(), help='Indicates whether time off '
'requests are enabled for the schedule.', arg_group='Schedule')
c.argument('time_zone', type=str, help='Indicates the time zone of the schedule team using tz database format. '
'Required.', arg_group='Schedule')
c.argument('workforce_integration_ids', nargs='+', help='', arg_group='Schedule')
c.argument('offer_shift_requests', action=AddOfferShiftRequests, nargs='+', help='', arg_group='Schedule')
c.argument('open_shift_change_requests', action=AddOpenShiftChangeRequests, nargs='+', help='',
arg_group='Schedule')
c.argument('open_shifts', type=validate_file_or_dict, help=' Expected value: json-string/@json-file.',
arg_group='Schedule')
c.argument('scheduling_groups', action=AddSchedulingGroups, nargs='+', help='The logical grouping of users in '
'the schedule (usually by role).', arg_group='Schedule')
c.argument('shifts', type=validate_file_or_dict, help='The shifts in the schedule. Expected value: '
'json-string/@json-file.', arg_group='Schedule')
c.argument('swap_shifts_change_requests', action=AddSwapShiftsChangeRequests, nargs='+', help='',
arg_group='Schedule')
c.argument('time_cards', type=validate_file_or_dict, help=' Expected value: json-string/@json-file.',
arg_group='Schedule')
c.argument('time_off_reasons', action=AddTimeOffReasons, nargs='+', help='The set of reasons for a time off in '
'the schedule.', arg_group='Schedule')
c.argument('time_off_requests', action=AddTimeOffRequests, nargs='+', help='', arg_group='Schedule')
c.argument('times_off', type=validate_file_or_dict, help='The instances of times off in the schedule. Expected '
'value: json-string/@json-file.', arg_group='Schedule')
c.argument('approved_location', action=AddApprovedLocation, nargs='+', help='geoCoordinates',
arg_group='Schedule Time Clock Settings')
c.argument('show_in_teams_search_and_suggestions', arg_type=get_three_state_flag(), help='',
arg_group='Discovery Settings')
with self.argument_context('teams team delete-team') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('teams team show-team') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('teams team archive') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('should_set_spo_site_read_only_for_members', arg_type=get_three_state_flag(), help='')
with self.argument_context('teams team clone') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('display_name', type=str, help='')
c.argument('description', type=str, help='')
c.argument('mail_nickname', type=str, help='')
c.argument('classification', type=str, help='')
c.argument('visibility', arg_type=get_enum_type(['private', 'public', 'hiddenMembership',
'unknownFutureValue']), help='')
c.argument('parts_to_clone', arg_type=get_enum_type(['apps', 'tabs', 'settings', 'channels', 'members']),
help='')
with self.argument_context('teams team complete-migration') as c:
c.argument('team_id', type=str, help='key: id of team')
with self.argument_context('teams team create-channel') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('created_date_time', help='Read only. Timestamp at which the channel was created.')
c.argument('description', type=str, help='Optional textual description for the channel.')
c.argument('display_name', type=str, help='Channel name as it will appear to the user in Microsoft Teams.')
c.argument('email', type=str, help='The email address for sending messages to the channel. Read-only.')
c.argument('is_favorite_by_default', arg_type=get_three_state_flag(), help='Indicates whether the channel '
'should automatically be marked \'favorite\' for all members of the team. Can only be set '
'programmatically with Create team. Default: false.')
c.argument('membership_type', arg_type=get_enum_type(['standard', 'private', 'unknownFutureValue']), help='')
c.argument('moderation_settings', action=AddModerationSettings, nargs='+', help='channelModerationSettings')
c.argument('web_url', type=str, help='A hyperlink that will go to the channel in Microsoft Teams. This is the '
'URL that you get when you right-click a channel in Microsoft Teams and select Get link to channel. '
'This URL should be treated as an opaque blob, and not parsed. Read-only.')
c.argument('files_folder', type=validate_file_or_dict,
help='driveItem Expected value: json-string/@json-file.')
c.argument('members', action=AddTeamsMembers, nargs='+', help='')
c.argument('messages', type=validate_file_or_dict, help='A collection of all the messages in the channel. A '
'navigation property. Nullable. Expected value: json-string/@json-file.')
c.argument('tabs', type=validate_file_or_dict, help='A collection of all the tabs in the channel. A navigation '
'property. Expected value: json-string/@json-file.')
with self.argument_context('teams team create-installed-app') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('microsoft_graph_entity_id', type=str, help='Read-only.', arg_group='Teams App Definition')
c.argument('azure_ad_app_id', type=str, help='', arg_group='Teams App Definition')
c.argument('description', type=str, help='', arg_group='Teams App Definition')
c.argument('display_name', type=str, help='The name of the app provided by the app developer.',
arg_group='Teams App Definition')
c.argument('last_modified_date_time', help='', arg_group='Teams App Definition')
c.argument('publishing_state', arg_type=get_enum_type(['submitted', 'rejected', 'published',
'unknownFutureValue']), help='', arg_group='Teams App '
'Definition')
c.argument('shortdescription', type=str, help='', arg_group='Teams App Definition')
c.argument('teams_app_id', type=str, help='The ID from the Teams app manifest.', arg_group='Teams App '
'Definition')
c.argument('version', type=str, help='The version number of the application.',
arg_group='Teams App Definition')
c.argument('application', action=AddApplication, nargs='+', help='identity', arg_group='Teams App Definition '
'Created By')
c.argument('device', action=AddApplication, nargs='+', help='identity', arg_group='Teams App Definition '
'Created By')
c.argument('user', action=AddApplication, nargs='+', help='identity', arg_group='Teams App Definition Created '
'By')
c.argument('id1', type=str, help='Read-only.', arg_group='Teams App')
c.argument('microsoft_graph_teams_app_display_name', type=str, help='The name of the catalog app provided by '
'the app developer in the Microsoft Teams zip app package.', arg_group='Teams App')
c.argument('distribution_method', arg_type=get_enum_type(['store', 'organization', 'sideloaded',
'unknownFutureValue']), help='', arg_group='Teams '
'App')
c.argument('external_id', type=str, help='The ID of the catalog provided by the app developer in the Microsoft '
'Teams zip app package.', arg_group='Teams App')
c.argument('app_definitions', type=validate_file_or_dict, help='The details for each version of the app. '
'Expected value: json-string/@json-file.', arg_group='Teams App')
with self.argument_context('teams team create-member') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('display_name', type=str, help='The display name of the user.')
c.argument('roles', nargs='+', help='The roles for that user.')
with self.argument_context('teams team create-operation') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('attempts_count', type=int, help='Number of times the operation was attempted before being marked '
'successful or failed.')
c.argument('created_date_time', help='Time when the operation was created.')
c.argument('error', action=AddError, nargs='+', help='operationError')
c.argument('last_action_date_time', help='Time when the async operation was last updated.')
c.argument('operation_type', arg_type=get_enum_type(['invalid', 'cloneTeam', 'archiveTeam', 'unarchiveTeam',
'createTeam', 'unknownFutureValue']), help='')
c.argument('status', arg_type=get_enum_type(['invalid', 'notStarted', 'inProgress', 'succeeded', 'failed',
'unknownFutureValue']), help='')
c.argument('target_resource_id', type=str, help='The ID of the object that\'s created or modified as result of '
'this async operation, typically a team.')
c.argument('target_resource_location', type=str, help='The location of the object that\'s created or modified '
'as result of this async operation. This URL should be treated as an opaque value and not parsed '
'into its component paths.')
with self.argument_context('teams team create-ref-owner') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('body', type=validate_file_or_dict, help='New navigation property ref value Expected value: '
'json-string/@json-file.')
with self.argument_context('teams team delete-channel') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('channel_id', type=str, help='key: id of channel')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('teams team delete-installed-app') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('teams_app_installation_id', type=str, help='key: id of teamsAppInstallation')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('teams team delete-member') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('conversation_member_id', type=str, help='key: id of conversationMember')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('teams team delete-operation') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('teams_async_operation_id', type=str, help='key: id of teamsAsyncOperation')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('teams team delete-photo') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('teams team delete-primary-channel') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('teams team delete-ref-group') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('teams team delete-ref-template') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('teams team delete-schedule') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('teams team list-channel') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('teams team list-installed-app') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('teams team list-member') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('teams team list-operation') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('teams team list-owner') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('teams team list-ref-owner') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('orderby', nargs='+', help='Order items by property values')
with self.argument_context('teams team send-activity-notification') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('topic', action=AddTopic, nargs='+', help='teamworkActivityTopic')
c.argument('activity_type', type=str, help='')
c.argument('chain_id', type=int, help='')
c.argument('preview_text', action=AddBody, nargs='+', help='itemBody')
c.argument('template_parameters', action=AddTeamsTemplateParameters, nargs='+', help='')
c.argument('recipient', type=validate_file_or_dict, help='teamworkNotificationRecipient Expected value: '
'json-string/@json-file.')
with self.argument_context('teams team set-photo-content') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('data', help='New media content.')
with self.argument_context('teams team set-ref-group') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('body', type=validate_file_or_dict, help='New navigation property ref values Expected value: '
'json-string/@json-file.')
with self.argument_context('teams team set-ref-template') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('body', type=validate_file_or_dict, help='New navigation property ref values Expected value: '
'json-string/@json-file.')
with self.argument_context('teams team show-channel') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('channel_id', type=str, help='key: id of channel')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('teams team show-group') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('teams team show-installed-app') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('teams_app_installation_id', type=str, help='key: id of teamsAppInstallation')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('teams team show-member') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('conversation_member_id', type=str, help='key: id of conversationMember')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('teams team show-operation') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('teams_async_operation_id', type=str, help='key: id of teamsAsyncOperation')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('teams team show-photo') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('teams team show-photo-content') as c:
c.argument('team_id', type=str, help='key: id of team')
with self.argument_context('teams team show-primary-channel') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('teams team show-ref-group') as c:
c.argument('team_id', type=str, help='key: id of team')
with self.argument_context('teams team show-ref-template') as c:
c.argument('team_id', type=str, help='key: id of team')
with self.argument_context('teams team show-schedule') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('teams team show-template') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('teams team unarchive') as c:
c.argument('team_id', type=str, help='key: id of team')
with self.argument_context('teams team update-channel') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('channel_id', type=str, help='key: id of channel')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('created_date_time', help='Read only. Timestamp at which the channel was created.')
c.argument('description', type=str, help='Optional textual description for the channel.')
c.argument('display_name', type=str, help='Channel name as it will appear to the user in Microsoft Teams.')
c.argument('email', type=str, help='The email address for sending messages to the channel. Read-only.')
c.argument('is_favorite_by_default', arg_type=get_three_state_flag(), help='Indicates whether the channel '
'should automatically be marked \'favorite\' for all members of the team. Can only be set '
'programmatically with Create team. Default: false.')
c.argument('membership_type', arg_type=get_enum_type(['standard', 'private', 'unknownFutureValue']), help='')
c.argument('moderation_settings', action=AddModerationSettings, nargs='+', help='channelModerationSettings')
c.argument('web_url', type=str, help='A hyperlink that will go to the channel in Microsoft Teams. This is the '
'URL that you get when you right-click a channel in Microsoft Teams and select Get link to channel. '
'This URL should be treated as an opaque blob, and not parsed. Read-only.')
c.argument('files_folder', type=validate_file_or_dict,
help='driveItem Expected value: json-string/@json-file.')
c.argument('members', action=AddTeamsMembers, nargs='+', help='')
c.argument('messages', type=validate_file_or_dict, help='A collection of all the messages in the channel. A '
'navigation property. Nullable. Expected value: json-string/@json-file.')
c.argument('tabs', type=validate_file_or_dict, help='A collection of all the tabs in the channel. A navigation '
'property. Expected value: json-string/@json-file.')
with self.argument_context('teams team update-installed-app') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('teams_app_installation_id', type=str, help='key: id of teamsAppInstallation')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('microsoft_graph_entity_id', type=str, help='Read-only.', arg_group='Teams App Definition')
c.argument('azure_ad_app_id', type=str, help='', arg_group='Teams App Definition')
c.argument('description', type=str, help='', arg_group='Teams App Definition')
c.argument('display_name', type=str, help='The name of the app provided by the app developer.',
arg_group='Teams App Definition')
c.argument('last_modified_date_time', help='', arg_group='Teams App Definition')
c.argument('publishing_state', arg_type=get_enum_type(['submitted', 'rejected', 'published',
'unknownFutureValue']), help='', arg_group='Teams App '
'Definition')
c.argument('shortdescription', type=str, help='', arg_group='Teams App Definition')
c.argument('teams_app_id', type=str, help='The ID from the Teams app manifest.', arg_group='Teams App '
'Definition')
c.argument('version', type=str, help='The version number of the application.',
arg_group='Teams App Definition')
c.argument('application', action=AddApplication, nargs='+', help='identity', arg_group='Teams App Definition '
'Created By')
c.argument('device', action=AddApplication, nargs='+', help='identity', arg_group='Teams App Definition '
'Created By')
c.argument('user', action=AddApplication, nargs='+', help='identity', arg_group='Teams App Definition Created '
'By')
c.argument('id1', type=str, help='Read-only.', arg_group='Teams App')
c.argument('microsoft_graph_teams_app_display_name', type=str, help='The name of the catalog app provided by '
'the app developer in the Microsoft Teams zip app package.', arg_group='Teams App')
c.argument('distribution_method', arg_type=get_enum_type(['store', 'organization', 'sideloaded',
'unknownFutureValue']), help='', arg_group='Teams '
'App')
c.argument('external_id', type=str, help='The ID of the catalog provided by the app developer in the Microsoft '
'Teams zip app package.', arg_group='Teams App')
c.argument('app_definitions', type=validate_file_or_dict, help='The details for each version of the app. '
'Expected value: json-string/@json-file.', arg_group='Teams App')
with self.argument_context('teams team update-member') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('conversation_member_id', type=str, help='key: id of conversationMember')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('display_name', type=str, help='The display name of the user.')
c.argument('roles', nargs='+', help='The roles for that user.')
with self.argument_context('teams team update-operation') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('teams_async_operation_id', type=str, help='key: id of teamsAsyncOperation')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('attempts_count', type=int, help='Number of times the operation was attempted before being marked '
'successful or failed.')
c.argument('created_date_time', help='Time when the operation was created.')
c.argument('error', action=AddError, nargs='+', help='operationError')
c.argument('last_action_date_time', help='Time when the async operation was last updated.')
c.argument('operation_type', arg_type=get_enum_type(['invalid', 'cloneTeam', 'archiveTeam', 'unarchiveTeam',
'createTeam', 'unknownFutureValue']), help='')
c.argument('status', arg_type=get_enum_type(['invalid', 'notStarted', 'inProgress', 'succeeded', 'failed',
'unknownFutureValue']), help='')
c.argument('target_resource_id', type=str, help='The ID of the object that\'s created or modified as result of '
'this async operation, typically a team.')
c.argument('target_resource_location', type=str, help='The location of the object that\'s created or modified '
'as result of this async operation. This URL should be treated as an opaque value and not parsed '
'into its component paths.')
with self.argument_context('teams team update-photo') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('height', type=int, help='The height of the photo. Read-only.')
c.argument('width', type=int, help='The width of the photo. Read-only.')
with self.argument_context('teams team update-primary-channel') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('created_date_time', help='Read only. Timestamp at which the channel was created.')
c.argument('description', type=str, help='Optional textual description for the channel.')
c.argument('display_name', type=str, help='Channel name as it will appear to the user in Microsoft Teams.')
c.argument('email', type=str, help='The email address for sending messages to the channel. Read-only.')
c.argument('is_favorite_by_default', arg_type=get_three_state_flag(), help='Indicates whether the channel '
'should automatically be marked \'favorite\' for all members of the team. Can only be set '
'programmatically with Create team. Default: false.')
c.argument('membership_type', arg_type=get_enum_type(['standard', 'private', 'unknownFutureValue']), help='')
c.argument('moderation_settings', action=AddModerationSettings, nargs='+', help='channelModerationSettings')
c.argument('web_url', type=str, help='A hyperlink that will go to the channel in Microsoft Teams. This is the '
'URL that you get when you right-click a channel in Microsoft Teams and select Get link to channel. '
'This URL should be treated as an opaque blob, and not parsed. Read-only.')
c.argument('files_folder', type=validate_file_or_dict,
help='driveItem Expected value: json-string/@json-file.')
c.argument('members', action=AddTeamsMembers, nargs='+', help='')
c.argument('messages', type=validate_file_or_dict, help='A collection of all the messages in the channel. A '
'navigation property. Nullable. Expected value: json-string/@json-file.')
c.argument('tabs', type=validate_file_or_dict, help='A collection of all the tabs in the channel. A navigation '
'property. Expected value: json-string/@json-file.')
with self.argument_context('teams team update-schedule') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('enabled', arg_type=get_three_state_flag(), help='Indicates whether the schedule is enabled for the '
'team. Required.')
c.argument('offer_shift_requests_enabled', arg_type=get_three_state_flag(), help='Indicates whether offer '
'shift requests are enabled for the schedule.')
c.argument('open_shifts_enabled', arg_type=get_three_state_flag(), help='Indicates whether open shifts are '
'enabled for the schedule.')
c.argument('provision_status', arg_type=get_enum_type(['NotStarted', 'Running', 'Completed', 'Failed']),
help='')
c.argument('provision_status_code', type=str, help='Additional information about why schedule provisioning '
'failed.')
c.argument('swap_shifts_requests_enabled', arg_type=get_three_state_flag(), help='Indicates whether swap '
'shifts requests are enabled for the schedule.')
c.argument('time_clock_enabled', arg_type=get_three_state_flag(), help='Indicates whether time clock is '
'enabled for the schedule.')
c.argument('time_off_requests_enabled', arg_type=get_three_state_flag(), help='Indicates whether time off '
'requests are enabled for the schedule.')
c.argument('time_zone', type=str, help='Indicates the time zone of the schedule team using tz database format. '
'Required.')
c.argument('workforce_integration_ids', nargs='+', help='')
c.argument('offer_shift_requests', action=AddOfferShiftRequests, nargs='+', help='')
c.argument('open_shift_change_requests', action=AddOpenShiftChangeRequests, nargs='+', help='')
c.argument('open_shifts', type=validate_file_or_dict, help=' Expected value: json-string/@json-file.')
c.argument('scheduling_groups', action=AddSchedulingGroups, nargs='+', help='The logical grouping of users in '
'the schedule (usually by role).')
c.argument('shifts', type=validate_file_or_dict, help='The shifts in the schedule. Expected value: '
'json-string/@json-file.')
c.argument('swap_shifts_change_requests', action=AddSwapShiftsChangeRequests, nargs='+', help='')
c.argument('time_cards', type=validate_file_or_dict, help=' Expected value: json-string/@json-file.')
c.argument('time_off_reasons', action=AddTimeOffReasons, nargs='+', help='The set of reasons for a time off in '
'the schedule.')
c.argument('time_off_requests', action=AddTimeOffRequests, nargs='+', help='')
c.argument('times_off', type=validate_file_or_dict, help='The instances of times off in the schedule. Expected '
'value: json-string/@json-file.')
c.argument('approved_location', action=AddApprovedLocation, nargs='+', help='geoCoordinates', arg_group='Time '
'Clock Settings')
with self.argument_context('teams team-channel all-message') as c:
c.argument('team_id', type=str, help='key: id of team')
with self.argument_context('teams team-channel complete-migration') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('channel_id', type=str, help='key: id of channel')
with self.argument_context('teams team-channel create-member') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('channel_id', type=str, help='key: id of channel')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('display_name', type=str, help='The display name of the user.')
c.argument('roles', nargs='+', help='The roles for that user.')
with self.argument_context('teams team-channel create-message') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('channel_id', type=str, help='key: id of channel')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('attachments', action=AddAttachments, nargs='+', help='Attached files. Attachments are currently '
'read-only – sending attachments is not supported.')
c.argument('body', action=AddBody, nargs='+', help='itemBody')
c.argument('channel_identity', action=AddChannelIdentity, nargs='+', help='channelIdentity')
c.argument('chat_id', type=str, help='')
c.argument('created_date_time', help='Read only. Timestamp of when the chat message was created.')
c.argument('deleted_date_time', help='Read only. Timestamp at which the chat message was deleted, or null if '
'not deleted.')
c.argument('etag', type=str, help='Read-only. Version number of the chat message.')
c.argument('importance', arg_type=get_enum_type(['normal', 'high', 'urgent']), help='')
c.argument('last_edited_date_time', help='Read only. Timestamp when edits to the chat message were made. '
'Triggers an \'Edited\' flag in the Microsoft Teams UI. If no edits are made the value is null.')
c.argument('last_modified_date_time', help='Read only. Timestamp when the chat message is created (initial '
'setting) or edited, including when a reaction is added or removed.')
c.argument('locale', type=str, help='Locale of the chat message set by the client.')
c.argument('mentions', type=validate_file_or_dict, help='List of entities mentioned in the chat message. '
'Currently supports user, bot, team, channel. Expected value: json-string/@json-file.')
c.argument('message_type', arg_type=get_enum_type(['message', 'chatEvent', 'typing']), help='')
c.argument('reactions', type=validate_file_or_dict, help=' Expected value: json-string/@json-file.')
c.argument('reply_to_id', type=str, help='Read-only. Id of the parent chat message or root chat message of the '
'thread. (Only applies to chat messages in channels not chats)')
c.argument('subject', type=str, help='The subject of the chat message, in plaintext.')
c.argument('summary', type=str, help='Summary text of the chat message that could be used for push '
'notifications and summary views or fall back views. Only applies to channel chat messages, not '
'chat messages in a chat.')
c.argument('web_url', type=str, help='')
c.argument('hosted_contents', action=AddHostedContents, nargs='+', help='')
c.argument('replies', type=validate_file_or_dict, help=' Expected value: json-string/@json-file.')
c.argument('dlp_action', arg_type=get_enum_type(['none', 'notifySender', 'blockAccess',
'blockAccessExternal']), help='',
arg_group='Policy Violation')
c.argument('justification_text', type=str, help='Justification text provided by the sender of the message when '
'overriding a policy violation.', arg_group='Policy Violation')
c.argument('policy_tip', action=AddPolicyTip, nargs='+', help='chatMessagePolicyViolationPolicyTip',
arg_group='Policy Violation')
c.argument('user_action', arg_type=get_enum_type(['none', 'override', 'reportFalsePositive']), help='',
arg_group='Policy Violation')
c.argument('verdict_details', arg_type=get_enum_type(['none', 'allowFalsePositiveOverride',
'allowOverrideWithoutJustification',
'allowOverrideWithJustification']), help='',
arg_group='Policy Violation')
c.argument('application', action=AddApplication, nargs='+', help='identity', arg_group='From')
c.argument('device', action=AddApplication, nargs='+', help='identity', arg_group='From')
c.argument('user', action=AddApplication, nargs='+', help='identity', arg_group='From')
with self.argument_context('teams team-channel create-tab') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('channel_id', type=str, help='key: id of channel')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('configuration', action=AddConfiguration, nargs='+', help='teamsTabConfiguration')
c.argument('display_name', type=str, help='Name of the tab.')
c.argument('message_id', type=str, help='')
c.argument('sort_order_index', type=str, help='')
c.argument('teams_app_id', type=str, help='')
c.argument('web_url', type=str, help='Deep link URL of the tab instance. Read only.')
c.argument('microsoft_graph_entity_id', type=str, help='Read-only.', arg_group='Teams App')
c.argument('microsoft_graph_teams_app_display_name', type=str, help='The name of the catalog app provided by '
'the app developer in the Microsoft Teams zip app package.', arg_group='Teams App')
c.argument('distribution_method', arg_type=get_enum_type(['store', 'organization', 'sideloaded',
'unknownFutureValue']), help='', arg_group='Teams '
'App')
c.argument('external_id', type=str, help='The ID of the catalog provided by the app developer in the Microsoft '
'Teams zip app package.', arg_group='Teams App')
c.argument('app_definitions', type=validate_file_or_dict, help='The details for each version of the app. '
'Expected value: json-string/@json-file.', arg_group='Teams App')
with self.argument_context('teams team-channel delete-file-folder') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('channel_id', type=str, help='key: id of channel')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('teams team-channel delete-member') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('channel_id', type=str, help='key: id of channel')
c.argument('conversation_member_id', type=str, help='key: id of conversationMember')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('teams team-channel delete-message') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('channel_id', type=str, help='key: id of channel')
c.argument('chat_message_id', type=str, help='key: id of chatMessage')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('teams team-channel delete-tab') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('channel_id', type=str, help='key: id of channel')
c.argument('teams_tab_id', type=str, help='key: id of teamsTab')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('teams team-channel list-member') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('channel_id', type=str, help='key: id of channel')
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('teams team-channel list-message') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('channel_id', type=str, help='key: id of channel')
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('teams team-channel list-tab') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('channel_id', type=str, help='key: id of channel')
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('teams team-channel set-file-folder-content') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('channel_id', type=str, help='key: id of channel')
c.argument('data', help='New media content.')
with self.argument_context('teams team-channel show-file-folder') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('channel_id', type=str, help='key: id of channel')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('teams team-channel show-file-folder-content') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('channel_id', type=str, help='key: id of channel')
with self.argument_context('teams team-channel show-member') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('channel_id', type=str, help='key: id of channel')
c.argument('conversation_member_id', type=str, help='key: id of conversationMember')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('teams team-channel show-message') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('channel_id', type=str, help='key: id of channel')
c.argument('chat_message_id', type=str, help='key: id of chatMessage')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('teams team-channel show-tab') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('channel_id', type=str, help='key: id of channel')
c.argument('teams_tab_id', type=str, help='key: id of teamsTab')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('teams team-channel update-file-folder') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('channel_id', type=str, help='key: id of channel')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('created_date_time', help='Date and time of item creation. Read-only.')
c.argument('description', type=str, help='Provides a user-visible description of the item. Optional.')
c.argument('e_tag', type=str, help='ETag for the item. Read-only.')
c.argument('last_modified_date_time', help='Date and time the item was last modified. Read-only.')
c.argument('name', type=str, help='The name of the item. Read-write.')
c.argument('web_url', type=str, help='URL that displays the resource in the browser. Read-only.')
c.argument('created_by_user', type=validate_file_or_dict, help='Represents an Azure Active Directory user '
'object. Expected value: json-string/@json-file.')
c.argument('last_modified_by_user', type=validate_file_or_dict, help='Represents an Azure Active Directory '
'user object. Expected value: json-string/@json-file.')
c.argument('drive_id', type=str, help='Unique identifier of the drive instance that contains the item. '
'Read-only.', arg_group='Parent Reference')
c.argument('drive_type', type=str, help='Identifies the type of drive. See [drive][] resource for values.',
arg_group='Parent Reference')
c.argument('microsoft_graph_item_reference_id', type=str, help='Unique identifier of the item in the drive. '
'Read-only.', arg_group='Parent Reference')
c.argument('microsoft_graph_item_reference_name', type=str, help='The name of the item being referenced. '
'Read-only.', arg_group='Parent Reference')
c.argument('path', type=str, help='Path that can be used to navigate to the item. Read-only.',
arg_group='Parent Reference')
c.argument('share_id', type=str, help='A unique identifier for a shared resource that can be accessed via the '
'[Shares][] API.', arg_group='Parent Reference')
c.argument('sharepoint_ids', action=AddSharepointIds, nargs='+', help='sharepointIds', arg_group='Parent '
'Reference')
c.argument('site_id', type=str, help='', arg_group='Parent Reference')
c.argument('application', action=AddApplication, nargs='+', help='identity', arg_group='Last Modified By')
c.argument('device', action=AddApplication, nargs='+', help='identity', arg_group='Last Modified By')
c.argument('user', action=AddApplication, nargs='+', help='identity', arg_group='Last Modified By')
c.argument('microsoft_graph_identity_application', action=AddApplication, nargs='+', help='identity',
arg_group='Created By')
c.argument('microsoft_graph_identity_device', action=AddApplication, nargs='+', help='identity',
arg_group='Created By')
c.argument('microsoft_graph_identity_user', action=AddApplication, nargs='+', help='identity',
arg_group='Created By')
c.argument('audio', action=AddAudio, nargs='+', help='audio')
c.argument('content', help='The content stream, if the item represents a file.')
c.argument('c_tag', type=str, help='An eTag for the content of the item. This eTag is not changed if only the '
'metadata is changed. Note This property is not returned if the item is a folder. Read-only.')
c.argument('file_system_info', action=AddFileSystemInfo, nargs='+', help='fileSystemInfo')
c.argument('image', action=AddImage, nargs='+', help='image')
c.argument('location', arg_type=get_location_type(self.cli_ctx))
c.argument('photo', action=AddTeamsChannelsPhoto, nargs='+', help='photo')
c.argument('publication', action=AddPublication, nargs='+', help='publicationFacet')
c.argument('root', type=validate_file_or_dict, help='root Expected value: json-string/@json-file.')
c.argument('microsoft_graph_sharepoint_ids', action=AddSharepointIds, nargs='+', help='sharepointIds')
c.argument('size', type=int, help='Size of the item in bytes. Read-only.')
c.argument('video', action=AddVideo, nargs='+', help='video')
c.argument('web_dav_url', type=str, help='WebDAV compatible URL for the item.')
c.argument('activities', type=validate_file_or_dict, help='The list of recent activities that took place on '
'this item. Expected value: json-string/@json-file.')
c.argument('children', type=validate_file_or_dict, help='Collection containing Item objects for the immediate '
'children of Item. Only items representing folders have children. Read-only. Nullable. Expected '
'value: json-string/@json-file.')
c.argument('list_item', type=validate_file_or_dict, help='listItem Expected value: json-string/@json-file.')
c.argument('permissions', type=validate_file_or_dict, help='The set of permissions for the item. Read-only. '
'Nullable. Expected value: json-string/@json-file.')
c.argument('subscriptions', action=AddSubscriptions, nargs='+', help='The set of subscriptions on the item. '
'Only supported on the root of a drive.')
c.argument('thumbnails', type=validate_file_or_dict, help='Collection containing [ThumbnailSet][] objects '
'associated with the item. For more info, see [getting thumbnails][]. Read-only. Nullable. Expected '
'value: json-string/@json-file.')
c.argument('versions', action=AddVersions, nargs='+', help='The list of previous versions of the item. For '
'more info, see [getting previous versions][]. Read-only. Nullable.')
c.argument('microsoft_graph_entity_id', type=str, help='Read-only.', arg_group='Analytics')
c.argument('all_time', type=validate_file_or_dict, help='itemActivityStat Expected value: '
'json-string/@json-file.', arg_group='Analytics')
c.argument('item_activity_stats', type=validate_file_or_dict, help=' Expected value: json-string/@json-file.',
arg_group='Analytics')
c.argument('last_seven_days', type=validate_file_or_dict, help='itemActivityStat Expected value: '
'json-string/@json-file.', arg_group='Analytics')
c.argument('id1', type=str, help='Read-only.', arg_group='Workbook')
c.argument('microsoft_graph_workbook_application', action=AddMicrosoftGraphWorkbookApplication, nargs='+',
help='workbookApplication', arg_group='Workbook')
c.argument('comments', type=validate_file_or_dict, help=' Expected value: json-string/@json-file.',
arg_group='Workbook')
c.argument('functions', action=AddFunctions, nargs='+', help='workbookFunctions', arg_group='Workbook')
c.argument('names', type=validate_file_or_dict, help='Represents a collection of workbook scoped named items '
'(named ranges and constants). Read-only. Expected value: json-string/@json-file.',
arg_group='Workbook')
c.argument('operations', type=validate_file_or_dict, help='The status of workbook operations. Getting an '
'operation collection is not supported, but you can get the status of a long-running operation if '
'the Location header is returned in the response. Read-only. Expected value: '
'json-string/@json-file.', arg_group='Workbook')
c.argument('tables', type=validate_file_or_dict, help='Represents a collection of tables associated with the '
'workbook. Read-only. Expected value: json-string/@json-file.', arg_group='Workbook')
c.argument('worksheets', type=validate_file_or_dict, help='Represents a collection of worksheets associated '
'with the workbook. Read-only. Expected value: json-string/@json-file.', arg_group='Workbook')
c.argument('microsoft_graph_special_folder_name', type=str, help='The unique identifier for this item in the '
'/drive/special collection', arg_group='Special Folder')
c.argument('owner', type=validate_file_or_dict, help='identitySet Expected value: json-string/@json-file.',
arg_group='Shared')
c.argument('scope', type=str, help='Indicates the scope of how the item is shared: anonymous, organization, or '
'users. Read-only.', arg_group='Shared')
c.argument('shared_by', type=validate_file_or_dict, help='identitySet Expected value: json-string/@json-file.',
arg_group='Shared')
c.argument('shared_date_time', help='The UTC date and time when the item was shared. Read-only.',
arg_group='Shared')
c.argument('on_click_telemetry_url', type=str, help='A callback URL that can be used to record telemetry '
'information. The application should issue a GET on this URL if the user interacts with this item '
'to improve the quality of results.', arg_group='Search Result')
c.argument('created_by', type=validate_file_or_dict,
help='identitySet Expected value: json-string/@json-file.', arg_group='Remote Item')
c.argument('microsoft_graph_remote_item_created_date_time_created_date_time', help='Date and time of item '
'creation. Read-only.', arg_group='Remote Item')
c.argument('file', type=validate_file_or_dict, help='file Expected value: json-string/@json-file.',
arg_group='Remote Item')
c.argument('microsoft_graph_file_system_info_file_system_info', action=AddFileSystemInfo, nargs='+',
help='fileSystemInfo', arg_group='Remote Item')
c.argument('folder', type=validate_file_or_dict, help='folder Expected value: json-string/@json-file.',
arg_group='Remote Item')
c.argument('microsoft_graph_remote_item_id', type=str, help='Unique identifier for the remote item in its '
'drive. Read-only.', arg_group='Remote Item')
c.argument('microsoft_graph_image', action=AddImage, nargs='+', help='image', arg_group='Remote Item')
c.argument('last_modified_by', type=validate_file_or_dict, help='identitySet Expected value: '
'json-string/@json-file.', arg_group='Remote Item')
c.argument('microsoft_graph_remote_item_last_modified_date_time_last_modified_date_time', help='Date and time '
'the item was last modified. Read-only.', arg_group='Remote Item')
c.argument('microsoft_graph_remote_item_name', type=str, help='Optional. Filename of the remote item. '
'Read-only.', arg_group='Remote Item')
c.argument('package', action=AddPackage, nargs='+', help='package', arg_group='Remote Item')
c.argument('parent_reference', type=validate_file_or_dict, help='itemReference Expected value: '
'json-string/@json-file.', arg_group='Remote Item')
c.argument('shared', type=validate_file_or_dict, help='shared Expected value: json-string/@json-file.',
arg_group='Remote Item')
c.argument('sharepoint_ids1', action=AddSharepointIds, nargs='+', help='sharepointIds',
arg_group='Remote Item')
c.argument('integer_size', type=int, help='Size of the remote item. Read-only.', arg_group='Remote Item')
c.argument('special_folder', action=AddSpecialFolder, nargs='+', help='specialFolder',
arg_group='Remote Item')
c.argument('microsoft_graph_video', action=AddVideo, nargs='+', help='video', arg_group='Remote Item')
c.argument('microsoft_graph_remote_item_web_dav_url_web_dav_url', type=str, help='DAV compatible URL for the '
'item.', arg_group='Remote Item')
c.argument('microsoft_graph_remote_item_web_url', type=str, help='URL that displays the resource in the '
'browser. Read-only.', arg_group='Remote Item')
c.argument('queued_date_time', help='Date and time the pending binary operation was queued in UTC time. '
'Read-only.', arg_group='Pending Operations Pending Content Update')
c.argument('type_', options_list=['--type'], type=str, help='A string indicating the type of package. While '
'oneNote is the only currently defined value, you should expect other package types to be returned '
'and handle them accordingly.', arg_group='Package')
c.argument('child_count', type=int, help='Number of children contained immediately within this container.',
arg_group='Folder')
c.argument('view', action=AddView, nargs='+', help='folderView', arg_group='Folder')
c.argument('hashes', action=AddHashes, nargs='+', help='hashes', arg_group='File')
c.argument('mime_type', type=str, help='The MIME type for the file. This is determined by logic on the server '
'and might not be the value provided when the file was uploaded. Read-only.', arg_group='File')
c.argument('processing_metadata', arg_type=get_three_state_flag(), help='', arg_group='File')
c.argument('state', type=str, help='Represents the state of the deleted item.', arg_group='Deleted')
c.argument('album', action=AddAlbum, nargs='+', help='album', arg_group='Bundle')
c.argument('integer_child_count', type=int, help='', arg_group='Bundle')
with self.argument_context('teams team-channel update-member') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('channel_id', type=str, help='key: id of channel')
c.argument('conversation_member_id', type=str, help='key: id of conversationMember')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('display_name', type=str, help='The display name of the user.')
c.argument('roles', nargs='+', help='The roles for that user.')
with self.argument_context('teams team-channel update-message') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('channel_id', type=str, help='key: id of channel')
c.argument('chat_message_id', type=str, help='key: id of chatMessage')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('attachments', action=AddAttachments, nargs='+', help='Attached files. Attachments are currently '
'read-only – sending attachments is not supported.')
c.argument('body', action=AddBody, nargs='+', help='itemBody')
c.argument('channel_identity', action=AddChannelIdentity, nargs='+', help='channelIdentity')
c.argument('chat_id', type=str, help='')
c.argument('created_date_time', help='Read only. Timestamp of when the chat message was created.')
c.argument('deleted_date_time', help='Read only. Timestamp at which the chat message was deleted, or null if '
'not deleted.')
c.argument('etag', type=str, help='Read-only. Version number of the chat message.')
c.argument('importance', arg_type=get_enum_type(['normal', 'high', 'urgent']), help='')
c.argument('last_edited_date_time', help='Read only. Timestamp when edits to the chat message were made. '
'Triggers an \'Edited\' flag in the Microsoft Teams UI. If no edits are made the value is null.')
c.argument('last_modified_date_time', help='Read only. Timestamp when the chat message is created (initial '
'setting) or edited, including when a reaction is added or removed.')
c.argument('locale', type=str, help='Locale of the chat message set by the client.')
c.argument('mentions', type=validate_file_or_dict, help='List of entities mentioned in the chat message. '
'Currently supports user, bot, team, channel. Expected value: json-string/@json-file.')
c.argument('message_type', arg_type=get_enum_type(['message', 'chatEvent', 'typing']), help='')
c.argument('reactions', type=validate_file_or_dict, help=' Expected value: json-string/@json-file.')
c.argument('reply_to_id', type=str, help='Read-only. Id of the parent chat message or root chat message of the '
'thread. (Only applies to chat messages in channels not chats)')
c.argument('subject', type=str, help='The subject of the chat message, in plaintext.')
c.argument('summary', type=str, help='Summary text of the chat message that could be used for push '
'notifications and summary views or fall back views. Only applies to channel chat messages, not '
'chat messages in a chat.')
c.argument('web_url', type=str, help='')
c.argument('hosted_contents', action=AddHostedContents, nargs='+', help='')
c.argument('replies', type=validate_file_or_dict, help=' Expected value: json-string/@json-file.')
c.argument('dlp_action', arg_type=get_enum_type(['none', 'notifySender', 'blockAccess',
'blockAccessExternal']), help='',
arg_group='Policy Violation')
c.argument('justification_text', type=str, help='Justification text provided by the sender of the message when '
'overriding a policy violation.', arg_group='Policy Violation')
c.argument('policy_tip', action=AddPolicyTip, nargs='+', help='chatMessagePolicyViolationPolicyTip',
arg_group='Policy Violation')
c.argument('user_action', arg_type=get_enum_type(['none', 'override', 'reportFalsePositive']), help='',
arg_group='Policy Violation')
c.argument('verdict_details', arg_type=get_enum_type(['none', 'allowFalsePositiveOverride',
'allowOverrideWithoutJustification',
'allowOverrideWithJustification']), help='',
arg_group='Policy Violation')
c.argument('application', action=AddApplication, nargs='+', help='identity', arg_group='From')
c.argument('device', action=AddApplication, nargs='+', help='identity', arg_group='From')
c.argument('user', action=AddApplication, nargs='+', help='identity', arg_group='From')
with self.argument_context('teams team-channel update-tab') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('channel_id', type=str, help='key: id of channel')
c.argument('teams_tab_id', type=str, help='key: id of teamsTab')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('configuration', action=AddConfiguration, nargs='+', help='teamsTabConfiguration')
c.argument('display_name', type=str, help='Name of the tab.')
c.argument('message_id', type=str, help='')
c.argument('sort_order_index', type=str, help='')
c.argument('teams_app_id', type=str, help='')
c.argument('web_url', type=str, help='Deep link URL of the tab instance. Read only.')
c.argument('microsoft_graph_entity_id', type=str, help='Read-only.', arg_group='Teams App')
c.argument('microsoft_graph_teams_app_display_name', type=str, help='The name of the catalog app provided by '
'the app developer in the Microsoft Teams zip app package.', arg_group='Teams App')
c.argument('distribution_method', arg_type=get_enum_type(['store', 'organization', 'sideloaded',
'unknownFutureValue']), help='', arg_group='Teams '
'App')
c.argument('external_id', type=str, help='The ID of the catalog provided by the app developer in the Microsoft '
'Teams zip app package.', arg_group='Teams App')
c.argument('app_definitions', type=validate_file_or_dict, help='The details for each version of the app. '
'Expected value: json-string/@json-file.', arg_group='Teams App')
with self.argument_context('teams team-channel-member add') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('channel_id', type=str, help='key: id of channel')
c.argument('values', action=AddTeamsChannelsMembersValues, nargs='+', help='')
with self.argument_context('teams team-channel-message create-hosted-content') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('channel_id', type=str, help='key: id of channel')
c.argument('chat_message_id', type=str, help='key: id of chatMessage')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('content_bytes', help='')
c.argument('content_type', type=str, help='')
with self.argument_context('teams team-channel-message create-reply') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('channel_id', type=str, help='key: id of channel')
c.argument('chat_message_id', type=str, help='key: id of chatMessage')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('attachments', action=AddAttachments, nargs='+', help='Attached files. Attachments are currently '
'read-only – sending attachments is not supported.')
c.argument('body', action=AddBody, nargs='+', help='itemBody')
c.argument('channel_identity', action=AddChannelIdentity, nargs='+', help='channelIdentity')
c.argument('chat_id', type=str, help='')
c.argument('created_date_time', help='Read only. Timestamp of when the chat message was created.')
c.argument('deleted_date_time', help='Read only. Timestamp at which the chat message was deleted, or null if '
'not deleted.')
c.argument('etag', type=str, help='Read-only. Version number of the chat message.')
c.argument('importance', arg_type=get_enum_type(['normal', 'high', 'urgent']), help='')
c.argument('last_edited_date_time', help='Read only. Timestamp when edits to the chat message were made. '
'Triggers an \'Edited\' flag in the Microsoft Teams UI. If no edits are made the value is null.')
c.argument('last_modified_date_time', help='Read only. Timestamp when the chat message is created (initial '
'setting) or edited, including when a reaction is added or removed.')
c.argument('locale', type=str, help='Locale of the chat message set by the client.')
c.argument('mentions', type=validate_file_or_dict, help='List of entities mentioned in the chat message. '
'Currently supports user, bot, team, channel. Expected value: json-string/@json-file.')
c.argument('message_type', arg_type=get_enum_type(['message', 'chatEvent', 'typing']), help='')
c.argument('reactions', type=validate_file_or_dict, help=' Expected value: json-string/@json-file.')
c.argument('reply_to_id', type=str, help='Read-only. Id of the parent chat message or root chat message of the '
'thread. (Only applies to chat messages in channels not chats)')
c.argument('subject', type=str, help='The subject of the chat message, in plaintext.')
c.argument('summary', type=str, help='Summary text of the chat message that could be used for push '
'notifications and summary views or fall back views. Only applies to channel chat messages, not '
'chat messages in a chat.')
c.argument('web_url', type=str, help='')
c.argument('hosted_contents', action=AddHostedContents, nargs='+', help='')
c.argument('replies', type=validate_file_or_dict, help=' Expected value: json-string/@json-file.')
c.argument('dlp_action', arg_type=get_enum_type(['none', 'notifySender', 'blockAccess',
'blockAccessExternal']), help='',
arg_group='Policy Violation')
c.argument('justification_text', type=str, help='Justification text provided by the sender of the message when '
'overriding a policy violation.', arg_group='Policy Violation')
c.argument('policy_tip', action=AddPolicyTip, nargs='+', help='chatMessagePolicyViolationPolicyTip',
arg_group='Policy Violation')
c.argument('user_action', arg_type=get_enum_type(['none', 'override', 'reportFalsePositive']), help='',
arg_group='Policy Violation')
c.argument('verdict_details', arg_type=get_enum_type(['none', 'allowFalsePositiveOverride',
'allowOverrideWithoutJustification',
'allowOverrideWithJustification']), help='',
arg_group='Policy Violation')
c.argument('application', action=AddApplication, nargs='+', help='identity', arg_group='From')
c.argument('device', action=AddApplication, nargs='+', help='identity', arg_group='From')
c.argument('user', action=AddApplication, nargs='+', help='identity', arg_group='From')
with self.argument_context('teams team-channel-message delete-hosted-content') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('channel_id', type=str, help='key: id of channel')
c.argument('chat_message_id', type=str, help='key: id of chatMessage')
c.argument('chat_message_hosted_content_id', type=str, help='key: id of chatMessageHostedContent')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('teams team-channel-message delete-reply') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('channel_id', type=str, help='key: id of channel')
c.argument('chat_message_id', type=str, help='key: id of chatMessage')
c.argument('chat_message_id1', type=str, help='key: id of chatMessage')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('teams team-channel-message delta') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('channel_id', type=str, help='key: id of channel')
with self.argument_context('teams team-channel-message list-hosted-content') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('channel_id', type=str, help='key: id of channel')
c.argument('chat_message_id', type=str, help='key: id of chatMessage')
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('teams team-channel-message list-reply') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('channel_id', type=str, help='key: id of channel')
c.argument('chat_message_id', type=str, help='key: id of chatMessage')
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('teams team-channel-message set-hosted-content-content') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('channel_id', type=str, help='key: id of channel')
c.argument('chat_message_id', type=str, help='key: id of chatMessage')
c.argument('chat_message_hosted_content_id', type=str, help='key: id of chatMessageHostedContent')
c.argument('data', help='New media content.')
with self.argument_context('teams team-channel-message show-hosted-content') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('channel_id', type=str, help='key: id of channel')
c.argument('chat_message_id', type=str, help='key: id of chatMessage')
c.argument('chat_message_hosted_content_id', type=str, help='key: id of chatMessageHostedContent')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('teams team-channel-message show-hosted-content-content') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('channel_id', type=str, help='key: id of channel')
c.argument('chat_message_id', type=str, help='key: id of chatMessage')
c.argument('chat_message_hosted_content_id', type=str, help='key: id of chatMessageHostedContent')
with self.argument_context('teams team-channel-message show-reply') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('channel_id', type=str, help='key: id of channel')
c.argument('chat_message_id', type=str, help='key: id of chatMessage')
c.argument('chat_message_id1', type=str, help='key: id of chatMessage')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('teams team-channel-message update-hosted-content') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('channel_id', type=str, help='key: id of channel')
c.argument('chat_message_id', type=str, help='key: id of chatMessage')
c.argument('chat_message_hosted_content_id', type=str, help='key: id of chatMessageHostedContent')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('content_bytes', help='')
c.argument('content_type', type=str, help='')
with self.argument_context('teams team-channel-message update-reply') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('channel_id', type=str, help='key: id of channel')
c.argument('chat_message_id', type=str, help='key: id of chatMessage')
c.argument('chat_message_id1', type=str, help='key: id of chatMessage')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('attachments', action=AddAttachments, nargs='+', help='Attached files. Attachments are currently '
'read-only – sending attachments is not supported.')
c.argument('body', action=AddBody, nargs='+', help='itemBody')
c.argument('channel_identity', action=AddChannelIdentity, nargs='+', help='channelIdentity')
c.argument('chat_id', type=str, help='')
c.argument('created_date_time', help='Read only. Timestamp of when the chat message was created.')
c.argument('deleted_date_time', help='Read only. Timestamp at which the chat message was deleted, or null if '
'not deleted.')
c.argument('etag', type=str, help='Read-only. Version number of the chat message.')
c.argument('importance', arg_type=get_enum_type(['normal', 'high', 'urgent']), help='')
c.argument('last_edited_date_time', help='Read only. Timestamp when edits to the chat message were made. '
'Triggers an \'Edited\' flag in the Microsoft Teams UI. If no edits are made the value is null.')
c.argument('last_modified_date_time', help='Read only. Timestamp when the chat message is created (initial '
'setting) or edited, including when a reaction is added or removed.')
c.argument('locale', type=str, help='Locale of the chat message set by the client.')
c.argument('mentions', type=validate_file_or_dict, help='List of entities mentioned in the chat message. '
'Currently supports user, bot, team, channel. Expected value: json-string/@json-file.')
c.argument('message_type', arg_type=get_enum_type(['message', 'chatEvent', 'typing']), help='')
c.argument('reactions', type=validate_file_or_dict, help=' Expected value: json-string/@json-file.')
c.argument('reply_to_id', type=str, help='Read-only. Id of the parent chat message or root chat message of the '
'thread. (Only applies to chat messages in channels not chats)')
c.argument('subject', type=str, help='The subject of the chat message, in plaintext.')
c.argument('summary', type=str, help='Summary text of the chat message that could be used for push '
'notifications and summary views or fall back views. Only applies to channel chat messages, not '
'chat messages in a chat.')
c.argument('web_url', type=str, help='')
c.argument('hosted_contents', action=AddHostedContents, nargs='+', help='')
c.argument('replies', type=validate_file_or_dict, help=' Expected value: json-string/@json-file.')
c.argument('dlp_action', arg_type=get_enum_type(['none', 'notifySender', 'blockAccess',
'blockAccessExternal']), help='',
arg_group='Policy Violation')
c.argument('justification_text', type=str, help='Justification text provided by the sender of the message when '
'overriding a policy violation.', arg_group='Policy Violation')
c.argument('policy_tip', action=AddPolicyTip, nargs='+', help='chatMessagePolicyViolationPolicyTip',
arg_group='Policy Violation')
c.argument('user_action', arg_type=get_enum_type(['none', 'override', 'reportFalsePositive']), help='',
arg_group='Policy Violation')
c.argument('verdict_details', arg_type=get_enum_type(['none', 'allowFalsePositiveOverride',
'allowOverrideWithoutJustification',
'allowOverrideWithJustification']), help='',
arg_group='Policy Violation')
c.argument('application', action=AddApplication, nargs='+', help='identity', arg_group='From')
c.argument('device', action=AddApplication, nargs='+', help='identity', arg_group='From')
c.argument('user', action=AddApplication, nargs='+', help='identity', arg_group='From')
with self.argument_context('teams team-channel-message-reply delta') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('channel_id', type=str, help='key: id of channel')
c.argument('chat_message_id', type=str, help='key: id of chatMessage')
with self.argument_context('teams team-channel-tab delete-ref-team-app') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('channel_id', type=str, help='key: id of channel')
c.argument('teams_tab_id', type=str, help='key: id of teamsTab')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('teams team-channel-tab set-ref-team-app') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('channel_id', type=str, help='key: id of channel')
c.argument('teams_tab_id', type=str, help='key: id of teamsTab')
c.argument('body', type=validate_file_or_dict, help='New navigation property ref values Expected value: '
'json-string/@json-file.')
with self.argument_context('teams team-channel-tab show-ref-team-app') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('channel_id', type=str, help='key: id of channel')
c.argument('teams_tab_id', type=str, help='key: id of teamsTab')
with self.argument_context('teams team-channel-tab show-team-app') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('channel_id', type=str, help='key: id of channel')
c.argument('teams_tab_id', type=str, help='key: id of teamsTab')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('teams team-installed-app delete-ref-team-app') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('teams_app_installation_id', type=str, help='key: id of teamsAppInstallation')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('teams team-installed-app delete-ref-team-app-definition') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('teams_app_installation_id', type=str, help='key: id of teamsAppInstallation')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('teams team-installed-app set-ref-team-app') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('teams_app_installation_id', type=str, help='key: id of teamsAppInstallation')
c.argument('body', type=validate_file_or_dict, help='New navigation property ref values Expected value: '
'json-string/@json-file.')
with self.argument_context('teams team-installed-app set-ref-team-app-definition') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('teams_app_installation_id', type=str, help='key: id of teamsAppInstallation')
c.argument('body', type=validate_file_or_dict, help='New navigation property ref values Expected value: '
'json-string/@json-file.')
with self.argument_context('teams team-installed-app show-ref-team-app') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('teams_app_installation_id', type=str, help='key: id of teamsAppInstallation')
with self.argument_context('teams team-installed-app show-ref-team-app-definition') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('teams_app_installation_id', type=str, help='key: id of teamsAppInstallation')
with self.argument_context('teams team-installed-app show-team-app') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('teams_app_installation_id', type=str, help='key: id of teamsAppInstallation')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('teams team-installed-app show-team-app-definition') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('teams_app_installation_id', type=str, help='key: id of teamsAppInstallation')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('teams team-installed-app upgrade') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('teams_app_installation_id', type=str, help='key: id of teamsAppInstallation')
with self.argument_context('teams team-member add') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('values', action=AddTeamsMembersValues, nargs='+', help='')
with self.argument_context('teams team-primary-channel complete-migration') as c:
c.argument('team_id', type=str, help='key: id of team')
with self.argument_context('teams team-primary-channel create-member') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('display_name', type=str, help='The display name of the user.')
c.argument('roles', nargs='+', help='The roles for that user.')
with self.argument_context('teams team-primary-channel create-message') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('attachments', action=AddAttachments, nargs='+', help='Attached files. Attachments are currently '
'read-only – sending attachments is not supported.')
c.argument('body', action=AddBody, nargs='+', help='itemBody')
c.argument('channel_identity', action=AddChannelIdentity, nargs='+', help='channelIdentity')
c.argument('chat_id', type=str, help='')
c.argument('created_date_time', help='Read only. Timestamp of when the chat message was created.')
c.argument('deleted_date_time', help='Read only. Timestamp at which the chat message was deleted, or null if '
'not deleted.')
c.argument('etag', type=str, help='Read-only. Version number of the chat message.')
c.argument('importance', arg_type=get_enum_type(['normal', 'high', 'urgent']), help='')
c.argument('last_edited_date_time', help='Read only. Timestamp when edits to the chat message were made. '
'Triggers an \'Edited\' flag in the Microsoft Teams UI. If no edits are made the value is null.')
c.argument('last_modified_date_time', help='Read only. Timestamp when the chat message is created (initial '
'setting) or edited, including when a reaction is added or removed.')
c.argument('locale', type=str, help='Locale of the chat message set by the client.')
c.argument('mentions', type=validate_file_or_dict, help='List of entities mentioned in the chat message. '
'Currently supports user, bot, team, channel. Expected value: json-string/@json-file.')
c.argument('message_type', arg_type=get_enum_type(['message', 'chatEvent', 'typing']), help='')
c.argument('reactions', type=validate_file_or_dict, help=' Expected value: json-string/@json-file.')
c.argument('reply_to_id', type=str, help='Read-only. Id of the parent chat message or root chat message of the '
'thread. (Only applies to chat messages in channels not chats)')
c.argument('subject', type=str, help='The subject of the chat message, in plaintext.')
c.argument('summary', type=str, help='Summary text of the chat message that could be used for push '
'notifications and summary views or fall back views. Only applies to channel chat messages, not '
'chat messages in a chat.')
c.argument('web_url', type=str, help='')
c.argument('hosted_contents', action=AddHostedContents, nargs='+', help='')
c.argument('replies', type=validate_file_or_dict, help=' Expected value: json-string/@json-file.')
c.argument('dlp_action', arg_type=get_enum_type(['none', 'notifySender', 'blockAccess',
'blockAccessExternal']), help='',
arg_group='Policy Violation')
c.argument('justification_text', type=str, help='Justification text provided by the sender of the message when '
'overriding a policy violation.', arg_group='Policy Violation')
c.argument('policy_tip', action=AddPolicyTip, nargs='+', help='chatMessagePolicyViolationPolicyTip',
arg_group='Policy Violation')
c.argument('user_action', arg_type=get_enum_type(['none', 'override', 'reportFalsePositive']), help='',
arg_group='Policy Violation')
c.argument('verdict_details', arg_type=get_enum_type(['none', 'allowFalsePositiveOverride',
'allowOverrideWithoutJustification',
'allowOverrideWithJustification']), help='',
arg_group='Policy Violation')
c.argument('application', action=AddApplication, nargs='+', help='identity', arg_group='From')
c.argument('device', action=AddApplication, nargs='+', help='identity', arg_group='From')
c.argument('user', action=AddApplication, nargs='+', help='identity', arg_group='From')
with self.argument_context('teams team-primary-channel create-tab') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('configuration', action=AddConfiguration, nargs='+', help='teamsTabConfiguration')
c.argument('display_name', type=str, help='Name of the tab.')
c.argument('message_id', type=str, help='')
c.argument('sort_order_index', type=str, help='')
c.argument('teams_app_id', type=str, help='')
c.argument('web_url', type=str, help='Deep link URL of the tab instance. Read only.')
c.argument('microsoft_graph_entity_id', type=str, help='Read-only.', arg_group='Teams App')
c.argument('microsoft_graph_teams_app_display_name', type=str, help='The name of the catalog app provided by '
'the app developer in the Microsoft Teams zip app package.', arg_group='Teams App')
c.argument('distribution_method', arg_type=get_enum_type(['store', 'organization', 'sideloaded',
'unknownFutureValue']), help='', arg_group='Teams '
'App')
c.argument('external_id', type=str, help='The ID of the catalog provided by the app developer in the Microsoft '
'Teams zip app package.', arg_group='Teams App')
c.argument('app_definitions', type=validate_file_or_dict, help='The details for each version of the app. '
'Expected value: json-string/@json-file.', arg_group='Teams App')
with self.argument_context('teams team-primary-channel delete-file-folder') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('teams team-primary-channel delete-member') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('conversation_member_id', type=str, help='key: id of conversationMember')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('teams team-primary-channel delete-message') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('chat_message_id', type=str, help='key: id of chatMessage')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('teams team-primary-channel delete-tab') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('teams_tab_id', type=str, help='key: id of teamsTab')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('teams team-primary-channel list-member') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('teams team-primary-channel list-message') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('teams team-primary-channel list-tab') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('teams team-primary-channel set-file-folder-content') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('data', help='New media content.')
with self.argument_context('teams team-primary-channel show-file-folder') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('teams team-primary-channel show-file-folder-content') as c:
c.argument('team_id', type=str, help='key: id of team')
with self.argument_context('teams team-primary-channel show-member') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('conversation_member_id', type=str, help='key: id of conversationMember')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('teams team-primary-channel show-message') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('chat_message_id', type=str, help='key: id of chatMessage')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('teams team-primary-channel show-tab') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('teams_tab_id', type=str, help='key: id of teamsTab')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('teams team-primary-channel update-file-folder') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('created_date_time', help='Date and time of item creation. Read-only.')
c.argument('description', type=str, help='Provides a user-visible description of the item. Optional.')
c.argument('e_tag', type=str, help='ETag for the item. Read-only.')
c.argument('last_modified_date_time', help='Date and time the item was last modified. Read-only.')
c.argument('name', type=str, help='The name of the item. Read-write.')
c.argument('web_url', type=str, help='URL that displays the resource in the browser. Read-only.')
c.argument('created_by_user', type=validate_file_or_dict, help='Represents an Azure Active Directory user '
'object. Expected value: json-string/@json-file.')
c.argument('last_modified_by_user', type=validate_file_or_dict, help='Represents an Azure Active Directory '
'user object. Expected value: json-string/@json-file.')
c.argument('drive_id', type=str, help='Unique identifier of the drive instance that contains the item. '
'Read-only.', arg_group='Parent Reference')
c.argument('drive_type', type=str, help='Identifies the type of drive. See [drive][] resource for values.',
arg_group='Parent Reference')
c.argument('microsoft_graph_item_reference_id', type=str, help='Unique identifier of the item in the drive. '
'Read-only.', arg_group='Parent Reference')
c.argument('microsoft_graph_item_reference_name', type=str, help='The name of the item being referenced. '
'Read-only.', arg_group='Parent Reference')
c.argument('path', type=str, help='Path that can be used to navigate to the item. Read-only.',
arg_group='Parent Reference')
c.argument('share_id', type=str, help='A unique identifier for a shared resource that can be accessed via the '
'[Shares][] API.', arg_group='Parent Reference')
c.argument('sharepoint_ids', action=AddSharepointIds, nargs='+', help='sharepointIds', arg_group='Parent '
'Reference')
c.argument('site_id', type=str, help='', arg_group='Parent Reference')
c.argument('application', action=AddApplication, nargs='+', help='identity', arg_group='Last Modified By')
c.argument('device', action=AddApplication, nargs='+', help='identity', arg_group='Last Modified By')
c.argument('user', action=AddApplication, nargs='+', help='identity', arg_group='Last Modified By')
c.argument('microsoft_graph_identity_application', action=AddApplication, nargs='+', help='identity',
arg_group='Created By')
c.argument('microsoft_graph_identity_device', action=AddApplication, nargs='+', help='identity',
arg_group='Created By')
c.argument('microsoft_graph_identity_user', action=AddApplication, nargs='+', help='identity',
arg_group='Created By')
c.argument('audio', action=AddAudio, nargs='+', help='audio')
c.argument('content', help='The content stream, if the item represents a file.')
c.argument('c_tag', type=str, help='An eTag for the content of the item. This eTag is not changed if only the '
'metadata is changed. Note This property is not returned if the item is a folder. Read-only.')
c.argument('file_system_info', action=AddFileSystemInfo, nargs='+', help='fileSystemInfo')
c.argument('image', action=AddImage, nargs='+', help='image')
c.argument('location', arg_type=get_location_type(self.cli_ctx))
c.argument('photo', action=AddTeamsChannelsPhoto, nargs='+', help='photo')
c.argument('publication', action=AddPublication, nargs='+', help='publicationFacet')
c.argument('root', type=validate_file_or_dict, help='root Expected value: json-string/@json-file.')
c.argument('microsoft_graph_sharepoint_ids', action=AddSharepointIds, nargs='+', help='sharepointIds')
c.argument('size', type=int, help='Size of the item in bytes. Read-only.')
c.argument('video', action=AddVideo, nargs='+', help='video')
c.argument('web_dav_url', type=str, help='WebDAV compatible URL for the item.')
c.argument('activities', type=validate_file_or_dict, help='The list of recent activities that took place on '
'this item. Expected value: json-string/@json-file.')
c.argument('children', type=validate_file_or_dict, help='Collection containing Item objects for the immediate '
'children of Item. Only items representing folders have children. Read-only. Nullable. Expected '
'value: json-string/@json-file.')
c.argument('list_item', type=validate_file_or_dict, help='listItem Expected value: json-string/@json-file.')
c.argument('permissions', type=validate_file_or_dict, help='The set of permissions for the item. Read-only. '
'Nullable. Expected value: json-string/@json-file.')
c.argument('subscriptions', action=AddSubscriptions, nargs='+', help='The set of subscriptions on the item. '
'Only supported on the root of a drive.')
c.argument('thumbnails', type=validate_file_or_dict, help='Collection containing [ThumbnailSet][] objects '
'associated with the item. For more info, see [getting thumbnails][]. Read-only. Nullable. Expected '
'value: json-string/@json-file.')
c.argument('versions', action=AddVersions, nargs='+', help='The list of previous versions of the item. For '
'more info, see [getting previous versions][]. Read-only. Nullable.')
c.argument('microsoft_graph_entity_id', type=str, help='Read-only.', arg_group='Analytics')
c.argument('all_time', type=validate_file_or_dict, help='itemActivityStat Expected value: '
'json-string/@json-file.', arg_group='Analytics')
c.argument('item_activity_stats', type=validate_file_or_dict, help=' Expected value: json-string/@json-file.',
arg_group='Analytics')
c.argument('last_seven_days', type=validate_file_or_dict, help='itemActivityStat Expected value: '
'json-string/@json-file.', arg_group='Analytics')
c.argument('id1', type=str, help='Read-only.', arg_group='Workbook')
c.argument('microsoft_graph_workbook_application', action=AddMicrosoftGraphWorkbookApplication, nargs='+',
help='workbookApplication', arg_group='Workbook')
c.argument('comments', type=validate_file_or_dict, help=' Expected value: json-string/@json-file.',
arg_group='Workbook')
c.argument('functions', action=AddFunctions, nargs='+', help='workbookFunctions', arg_group='Workbook')
c.argument('names', type=validate_file_or_dict, help='Represents a collection of workbook scoped named items '
'(named ranges and constants). Read-only. Expected value: json-string/@json-file.',
arg_group='Workbook')
c.argument('operations', type=validate_file_or_dict, help='The status of workbook operations. Getting an '
'operation collection is not supported, but you can get the status of a long-running operation if '
'the Location header is returned in the response. Read-only. Expected value: '
'json-string/@json-file.', arg_group='Workbook')
c.argument('tables', type=validate_file_or_dict, help='Represents a collection of tables associated with the '
'workbook. Read-only. Expected value: json-string/@json-file.', arg_group='Workbook')
c.argument('worksheets', type=validate_file_or_dict, help='Represents a collection of worksheets associated '
'with the workbook. Read-only. Expected value: json-string/@json-file.', arg_group='Workbook')
c.argument('microsoft_graph_special_folder_name', type=str, help='The unique identifier for this item in the '
'/drive/special collection', arg_group='Special Folder')
c.argument('owner', type=validate_file_or_dict, help='identitySet Expected value: json-string/@json-file.',
arg_group='Shared')
c.argument('scope', type=str, help='Indicates the scope of how the item is shared: anonymous, organization, or '
'users. Read-only.', arg_group='Shared')
c.argument('shared_by', type=validate_file_or_dict, help='identitySet Expected value: json-string/@json-file.',
arg_group='Shared')
c.argument('shared_date_time', help='The UTC date and time when the item was shared. Read-only.',
arg_group='Shared')
c.argument('on_click_telemetry_url', type=str, help='A callback URL that can be used to record telemetry '
'information. The application should issue a GET on this URL if the user interacts with this item '
'to improve the quality of results.', arg_group='Search Result')
c.argument('created_by', type=validate_file_or_dict,
help='identitySet Expected value: json-string/@json-file.', arg_group='Remote Item')
c.argument('microsoft_graph_remote_item_created_date_time_created_date_time', help='Date and time of item '
'creation. Read-only.', arg_group='Remote Item')
c.argument('file', type=validate_file_or_dict, help='file Expected value: json-string/@json-file.',
arg_group='Remote Item')
c.argument('microsoft_graph_file_system_info_file_system_info', action=AddFileSystemInfo, nargs='+',
help='fileSystemInfo', arg_group='Remote Item')
c.argument('folder', type=validate_file_or_dict, help='folder Expected value: json-string/@json-file.',
arg_group='Remote Item')
c.argument('microsoft_graph_remote_item_id', type=str, help='Unique identifier for the remote item in its '
'drive. Read-only.', arg_group='Remote Item')
c.argument('microsoft_graph_image', action=AddImage, nargs='+', help='image', arg_group='Remote Item')
c.argument('last_modified_by', type=validate_file_or_dict, help='identitySet Expected value: '
'json-string/@json-file.', arg_group='Remote Item')
c.argument('microsoft_graph_remote_item_last_modified_date_time_last_modified_date_time', help='Date and time '
'the item was last modified. Read-only.', arg_group='Remote Item')
c.argument('microsoft_graph_remote_item_name', type=str, help='Optional. Filename of the remote item. '
'Read-only.', arg_group='Remote Item')
c.argument('package', action=AddPackage, nargs='+', help='package', arg_group='Remote Item')
c.argument('parent_reference', type=validate_file_or_dict, help='itemReference Expected value: '
'json-string/@json-file.', arg_group='Remote Item')
c.argument('shared', type=validate_file_or_dict, help='shared Expected value: json-string/@json-file.',
arg_group='Remote Item')
c.argument('sharepoint_ids1', action=AddSharepointIds, nargs='+', help='sharepointIds',
arg_group='Remote Item')
c.argument('integer_size', type=int, help='Size of the remote item. Read-only.', arg_group='Remote Item')
c.argument('special_folder', action=AddSpecialFolder, nargs='+', help='specialFolder',
arg_group='Remote Item')
c.argument('microsoft_graph_video', action=AddVideo, nargs='+', help='video', arg_group='Remote Item')
c.argument('microsoft_graph_remote_item_web_dav_url_web_dav_url', type=str, help='DAV compatible URL for the '
'item.', arg_group='Remote Item')
c.argument('microsoft_graph_remote_item_web_url', type=str, help='URL that displays the resource in the '
'browser. Read-only.', arg_group='Remote Item')
c.argument('queued_date_time', help='Date and time the pending binary operation was queued in UTC time. '
'Read-only.', arg_group='Pending Operations Pending Content Update')
c.argument('type_', options_list=['--type'], type=str, help='A string indicating the type of package. While '
'oneNote is the only currently defined value, you should expect other package types to be returned '
'and handle them accordingly.', arg_group='Package')
c.argument('child_count', type=int, help='Number of children contained immediately within this container.',
arg_group='Folder')
c.argument('view', action=AddView, nargs='+', help='folderView', arg_group='Folder')
c.argument('hashes', action=AddHashes, nargs='+', help='hashes', arg_group='File')
c.argument('mime_type', type=str, help='The MIME type for the file. This is determined by logic on the server '
'and might not be the value provided when the file was uploaded. Read-only.', arg_group='File')
c.argument('processing_metadata', arg_type=get_three_state_flag(), help='', arg_group='File')
c.argument('state', type=str, help='Represents the state of the deleted item.', arg_group='Deleted')
c.argument('album', action=AddAlbum, nargs='+', help='album', arg_group='Bundle')
c.argument('integer_child_count', type=int, help='', arg_group='Bundle')
with self.argument_context('teams team-primary-channel update-member') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('conversation_member_id', type=str, help='key: id of conversationMember')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('display_name', type=str, help='The display name of the user.')
c.argument('roles', nargs='+', help='The roles for that user.')
with self.argument_context('teams team-primary-channel update-message') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('chat_message_id', type=str, help='key: id of chatMessage')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('attachments', action=AddAttachments, nargs='+', help='Attached files. Attachments are currently '
'read-only – sending attachments is not supported.')
c.argument('body', action=AddBody, nargs='+', help='itemBody')
c.argument('channel_identity', action=AddChannelIdentity, nargs='+', help='channelIdentity')
c.argument('chat_id', type=str, help='')
c.argument('created_date_time', help='Read only. Timestamp of when the chat message was created.')
c.argument('deleted_date_time', help='Read only. Timestamp at which the chat message was deleted, or null if '
'not deleted.')
c.argument('etag', type=str, help='Read-only. Version number of the chat message.')
c.argument('importance', arg_type=get_enum_type(['normal', 'high', 'urgent']), help='')
c.argument('last_edited_date_time', help='Read only. Timestamp when edits to the chat message were made. '
'Triggers an \'Edited\' flag in the Microsoft Teams UI. If no edits are made the value is null.')
c.argument('last_modified_date_time', help='Read only. Timestamp when the chat message is created (initial '
'setting) or edited, including when a reaction is added or removed.')
c.argument('locale', type=str, help='Locale of the chat message set by the client.')
c.argument('mentions', type=validate_file_or_dict, help='List of entities mentioned in the chat message. '
'Currently supports user, bot, team, channel. Expected value: json-string/@json-file.')
c.argument('message_type', arg_type=get_enum_type(['message', 'chatEvent', 'typing']), help='')
c.argument('reactions', type=validate_file_or_dict, help=' Expected value: json-string/@json-file.')
c.argument('reply_to_id', type=str, help='Read-only. Id of the parent chat message or root chat message of the '
'thread. (Only applies to chat messages in channels not chats)')
c.argument('subject', type=str, help='The subject of the chat message, in plaintext.')
c.argument('summary', type=str, help='Summary text of the chat message that could be used for push '
'notifications and summary views or fall back views. Only applies to channel chat messages, not '
'chat messages in a chat.')
c.argument('web_url', type=str, help='')
c.argument('hosted_contents', action=AddHostedContents, nargs='+', help='')
c.argument('replies', type=validate_file_or_dict, help=' Expected value: json-string/@json-file.')
c.argument('dlp_action', arg_type=get_enum_type(['none', 'notifySender', 'blockAccess',
'blockAccessExternal']), help='',
arg_group='Policy Violation')
c.argument('justification_text', type=str, help='Justification text provided by the sender of the message when '
'overriding a policy violation.', arg_group='Policy Violation')
c.argument('policy_tip', action=AddPolicyTip, nargs='+', help='chatMessagePolicyViolationPolicyTip',
arg_group='Policy Violation')
c.argument('user_action', arg_type=get_enum_type(['none', 'override', 'reportFalsePositive']), help='',
arg_group='Policy Violation')
c.argument('verdict_details', arg_type=get_enum_type(['none', 'allowFalsePositiveOverride',
'allowOverrideWithoutJustification',
'allowOverrideWithJustification']), help='',
arg_group='Policy Violation')
c.argument('application', action=AddApplication, nargs='+', help='identity', arg_group='From')
c.argument('device', action=AddApplication, nargs='+', help='identity', arg_group='From')
c.argument('user', action=AddApplication, nargs='+', help='identity', arg_group='From')
with self.argument_context('teams team-primary-channel update-tab') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('teams_tab_id', type=str, help='key: id of teamsTab')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('configuration', action=AddConfiguration, nargs='+', help='teamsTabConfiguration')
c.argument('display_name', type=str, help='Name of the tab.')
c.argument('message_id', type=str, help='')
c.argument('sort_order_index', type=str, help='')
c.argument('teams_app_id', type=str, help='')
c.argument('web_url', type=str, help='Deep link URL of the tab instance. Read only.')
c.argument('microsoft_graph_entity_id', type=str, help='Read-only.', arg_group='Teams App')
c.argument('microsoft_graph_teams_app_display_name', type=str, help='The name of the catalog app provided by '
'the app developer in the Microsoft Teams zip app package.', arg_group='Teams App')
c.argument('distribution_method', arg_type=get_enum_type(['store', 'organization', 'sideloaded',
'unknownFutureValue']), help='', arg_group='Teams '
'App')
c.argument('external_id', type=str, help='The ID of the catalog provided by the app developer in the Microsoft '
'Teams zip app package.', arg_group='Teams App')
c.argument('app_definitions', type=validate_file_or_dict, help='The details for each version of the app. '
'Expected value: json-string/@json-file.', arg_group='Teams App')
with self.argument_context('teams team-primary-channel-member add') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('values', action=AddTeamsPrimarychannelMembersValues, nargs='+', help='')
with self.argument_context('teams team-primary-channel-message create-hosted-content') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('chat_message_id', type=str, help='key: id of chatMessage')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('content_bytes', help='')
c.argument('content_type', type=str, help='')
with self.argument_context('teams team-primary-channel-message create-reply') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('chat_message_id', type=str, help='key: id of chatMessage')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('attachments', action=AddAttachments, nargs='+', help='Attached files. Attachments are currently '
'read-only – sending attachments is not supported.')
c.argument('body', action=AddBody, nargs='+', help='itemBody')
c.argument('channel_identity', action=AddChannelIdentity, nargs='+', help='channelIdentity')
c.argument('chat_id', type=str, help='')
c.argument('created_date_time', help='Read only. Timestamp of when the chat message was created.')
c.argument('deleted_date_time', help='Read only. Timestamp at which the chat message was deleted, or null if '
'not deleted.')
c.argument('etag', type=str, help='Read-only. Version number of the chat message.')
c.argument('importance', arg_type=get_enum_type(['normal', 'high', 'urgent']), help='')
c.argument('last_edited_date_time', help='Read only. Timestamp when edits to the chat message were made. '
'Triggers an \'Edited\' flag in the Microsoft Teams UI. If no edits are made the value is null.')
c.argument('last_modified_date_time', help='Read only. Timestamp when the chat message is created (initial '
'setting) or edited, including when a reaction is added or removed.')
c.argument('locale', type=str, help='Locale of the chat message set by the client.')
c.argument('mentions', type=validate_file_or_dict, help='List of entities mentioned in the chat message. '
'Currently supports user, bot, team, channel. Expected value: json-string/@json-file.')
c.argument('message_type', arg_type=get_enum_type(['message', 'chatEvent', 'typing']), help='')
c.argument('reactions', type=validate_file_or_dict, help=' Expected value: json-string/@json-file.')
c.argument('reply_to_id', type=str, help='Read-only. Id of the parent chat message or root chat message of the '
'thread. (Only applies to chat messages in channels not chats)')
c.argument('subject', type=str, help='The subject of the chat message, in plaintext.')
c.argument('summary', type=str, help='Summary text of the chat message that could be used for push '
'notifications and summary views or fall back views. Only applies to channel chat messages, not '
'chat messages in a chat.')
c.argument('web_url', type=str, help='')
c.argument('hosted_contents', action=AddHostedContents, nargs='+', help='')
c.argument('replies', type=validate_file_or_dict, help=' Expected value: json-string/@json-file.')
c.argument('dlp_action', arg_type=get_enum_type(['none', 'notifySender', 'blockAccess',
'blockAccessExternal']), help='',
arg_group='Policy Violation')
c.argument('justification_text', type=str, help='Justification text provided by the sender of the message when '
'overriding a policy violation.', arg_group='Policy Violation')
c.argument('policy_tip', action=AddPolicyTip, nargs='+', help='chatMessagePolicyViolationPolicyTip',
arg_group='Policy Violation')
c.argument('user_action', arg_type=get_enum_type(['none', 'override', 'reportFalsePositive']), help='',
arg_group='Policy Violation')
c.argument('verdict_details', arg_type=get_enum_type(['none', 'allowFalsePositiveOverride',
'allowOverrideWithoutJustification',
'allowOverrideWithJustification']), help='',
arg_group='Policy Violation')
c.argument('application', action=AddApplication, nargs='+', help='identity', arg_group='From')
c.argument('device', action=AddApplication, nargs='+', help='identity', arg_group='From')
c.argument('user', action=AddApplication, nargs='+', help='identity', arg_group='From')
with self.argument_context('teams team-primary-channel-message delete-hosted-content') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('chat_message_id', type=str, help='key: id of chatMessage')
c.argument('chat_message_hosted_content_id', type=str, help='key: id of chatMessageHostedContent')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('teams team-primary-channel-message delete-reply') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('chat_message_id', type=str, help='key: id of chatMessage')
c.argument('chat_message_id1', type=str, help='key: id of chatMessage')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('teams team-primary-channel-message delta') as c:
c.argument('team_id', type=str, help='key: id of team')
with self.argument_context('teams team-primary-channel-message list-hosted-content') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('chat_message_id', type=str, help='key: id of chatMessage')
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('teams team-primary-channel-message list-reply') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('chat_message_id', type=str, help='key: id of chatMessage')
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('teams team-primary-channel-message set-hosted-content-content') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('chat_message_id', type=str, help='key: id of chatMessage')
c.argument('chat_message_hosted_content_id', type=str, help='key: id of chatMessageHostedContent')
c.argument('data', help='New media content.')
with self.argument_context('teams team-primary-channel-message show-hosted-content') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('chat_message_id', type=str, help='key: id of chatMessage')
c.argument('chat_message_hosted_content_id', type=str, help='key: id of chatMessageHostedContent')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('teams team-primary-channel-message show-hosted-content-content') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('chat_message_id', type=str, help='key: id of chatMessage')
c.argument('chat_message_hosted_content_id', type=str, help='key: id of chatMessageHostedContent')
with self.argument_context('teams team-primary-channel-message show-reply') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('chat_message_id', type=str, help='key: id of chatMessage')
c.argument('chat_message_id1', type=str, help='key: id of chatMessage')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('teams team-primary-channel-message update-hosted-content') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('chat_message_id', type=str, help='key: id of chatMessage')
c.argument('chat_message_hosted_content_id', type=str, help='key: id of chatMessageHostedContent')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('content_bytes', help='')
c.argument('content_type', type=str, help='')
with self.argument_context('teams team-primary-channel-message update-reply') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('chat_message_id', type=str, help='key: id of chatMessage')
c.argument('chat_message_id1', type=str, help='key: id of chatMessage')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('attachments', action=AddAttachments, nargs='+', help='Attached files. Attachments are currently '
'read-only – sending attachments is not supported.')
c.argument('body', action=AddBody, nargs='+', help='itemBody')
c.argument('channel_identity', action=AddChannelIdentity, nargs='+', help='channelIdentity')
c.argument('chat_id', type=str, help='')
c.argument('created_date_time', help='Read only. Timestamp of when the chat message was created.')
c.argument('deleted_date_time', help='Read only. Timestamp at which the chat message was deleted, or null if '
'not deleted.')
c.argument('etag', type=str, help='Read-only. Version number of the chat message.')
c.argument('importance', arg_type=get_enum_type(['normal', 'high', 'urgent']), help='')
c.argument('last_edited_date_time', help='Read only. Timestamp when edits to the chat message were made. '
'Triggers an \'Edited\' flag in the Microsoft Teams UI. If no edits are made the value is null.')
c.argument('last_modified_date_time', help='Read only. Timestamp when the chat message is created (initial '
'setting) or edited, including when a reaction is added or removed.')
c.argument('locale', type=str, help='Locale of the chat message set by the client.')
c.argument('mentions', type=validate_file_or_dict, help='List of entities mentioned in the chat message. '
'Currently supports user, bot, team, channel. Expected value: json-string/@json-file.')
c.argument('message_type', arg_type=get_enum_type(['message', 'chatEvent', 'typing']), help='')
c.argument('reactions', type=validate_file_or_dict, help=' Expected value: json-string/@json-file.')
c.argument('reply_to_id', type=str, help='Read-only. Id of the parent chat message or root chat message of the '
'thread. (Only applies to chat messages in channels not chats)')
c.argument('subject', type=str, help='The subject of the chat message, in plaintext.')
c.argument('summary', type=str, help='Summary text of the chat message that could be used for push '
'notifications and summary views or fall back views. Only applies to channel chat messages, not '
'chat messages in a chat.')
c.argument('web_url', type=str, help='')
c.argument('hosted_contents', action=AddHostedContents, nargs='+', help='')
c.argument('replies', type=validate_file_or_dict, help=' Expected value: json-string/@json-file.')
c.argument('dlp_action', arg_type=get_enum_type(['none', 'notifySender', 'blockAccess',
'blockAccessExternal']), help='',
arg_group='Policy Violation')
c.argument('justification_text', type=str, help='Justification text provided by the sender of the message when '
'overriding a policy violation.', arg_group='Policy Violation')
c.argument('policy_tip', action=AddPolicyTip, nargs='+', help='chatMessagePolicyViolationPolicyTip',
arg_group='Policy Violation')
c.argument('user_action', arg_type=get_enum_type(['none', 'override', 'reportFalsePositive']), help='',
arg_group='Policy Violation')
c.argument('verdict_details', arg_type=get_enum_type(['none', 'allowFalsePositiveOverride',
'allowOverrideWithoutJustification',
'allowOverrideWithJustification']), help='',
arg_group='Policy Violation')
c.argument('application', action=AddApplication, nargs='+', help='identity', arg_group='From')
c.argument('device', action=AddApplication, nargs='+', help='identity', arg_group='From')
c.argument('user', action=AddApplication, nargs='+', help='identity', arg_group='From')
with self.argument_context('teams team-primary-channel-message-reply delta') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('chat_message_id', type=str, help='key: id of chatMessage')
with self.argument_context('teams team-primary-channel-tab delete-ref-team-app') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('teams_tab_id', type=str, help='key: id of teamsTab')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('teams team-primary-channel-tab set-ref-team-app') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('teams_tab_id', type=str, help='key: id of teamsTab')
c.argument('body', type=validate_file_or_dict, help='New navigation property ref values Expected value: '
'json-string/@json-file.')
with self.argument_context('teams team-primary-channel-tab show-ref-team-app') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('teams_tab_id', type=str, help='key: id of teamsTab')
with self.argument_context('teams team-primary-channel-tab show-team-app') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('teams_tab_id', type=str, help='key: id of teamsTab')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('teams team-schedule create-offer-shift-request') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('created_date_time', help='The Timestamp type represents date and time information using ISO 8601 '
'format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 would look like this: '
'\'2014-01-01T00:00:00Z\'')
c.argument('last_modified_date_time', help='The Timestamp type represents date and time information using ISO '
'8601 format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 would look like '
'this: \'2014-01-01T00:00:00Z\'')
c.argument('application', action=AddApplication, nargs='+', help='identity', arg_group='Last Modified By')
c.argument('device', action=AddApplication, nargs='+', help='identity', arg_group='Last Modified By')
c.argument('user', action=AddApplication, nargs='+', help='identity', arg_group='Last Modified By')
c.argument('microsoft_graph_identity_application', action=AddApplication, nargs='+', help='identity',
arg_group='Created By')
c.argument('microsoft_graph_identity_device', action=AddApplication, nargs='+', help='identity',
arg_group='Created By')
c.argument('microsoft_graph_identity_user', action=AddApplication, nargs='+', help='identity',
arg_group='Created By')
c.argument('assigned_to', arg_type=get_enum_type(['sender', 'recipient', 'manager', 'system',
'unknownFutureValue']), help='')
c.argument('manager_action_date_time', help='')
c.argument('manager_action_message', type=str, help='')
c.argument('manager_user_id', type=str, help='')
c.argument('sender_date_time', help='')
c.argument('sender_message', type=str, help='')
c.argument('sender_user_id', type=str, help='')
c.argument('state', arg_type=get_enum_type(['pending', 'approved', 'declined', 'unknownFutureValue']),
help='')
c.argument('recipient_action_date_time', help='The Timestamp type represents date and time information using '
'ISO 8601 format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 would look '
'like this: \'2014-01-01T00:00:00Z\'')
c.argument('recipient_action_message', type=str, help='Custom message sent by recipient of the offer shift '
'request.')
c.argument('recipient_user_id', type=str, help='User ID of the recipient of the offer shift request.')
c.argument('sender_shift_id', type=str, help='User ID of the sender of the offer shift request.')
with self.argument_context('teams team-schedule create-open-shift') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('created_date_time', help='The Timestamp type represents date and time information using ISO 8601 '
'format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 would look like this: '
'\'2014-01-01T00:00:00Z\'')
c.argument('last_modified_date_time', help='The Timestamp type represents date and time information using ISO '
'8601 format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 would look like '
'this: \'2014-01-01T00:00:00Z\'')
c.argument('application', action=AddApplication, nargs='+', help='identity', arg_group='Last Modified By')
c.argument('device', action=AddApplication, nargs='+', help='identity', arg_group='Last Modified By')
c.argument('user', action=AddApplication, nargs='+', help='identity', arg_group='Last Modified By')
c.argument('microsoft_graph_identity_application', action=AddApplication, nargs='+', help='identity',
arg_group='Created By')
c.argument('microsoft_graph_identity_device', action=AddApplication, nargs='+', help='identity',
arg_group='Created By')
c.argument('microsoft_graph_identity_user', action=AddApplication, nargs='+', help='identity',
arg_group='Created By')
c.argument('draft_open_shift', action=AddDraftOpenShift, nargs='+', help='openShiftItem')
c.argument('is_staged_for_deletion', arg_type=get_three_state_flag(), help='')
c.argument('scheduling_group_id', type=str,
help='ID for the scheduling group that the open shift belongs to.')
c.argument('shared_open_shift', action=AddDraftOpenShift, nargs='+', help='openShiftItem')
with self.argument_context('teams team-schedule create-open-shift-change-request') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('created_date_time', help='The Timestamp type represents date and time information using ISO 8601 '
'format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 would look like this: '
'\'2014-01-01T00:00:00Z\'')
c.argument('last_modified_date_time', help='The Timestamp type represents date and time information using ISO '
'8601 format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 would look like '
'this: \'2014-01-01T00:00:00Z\'')
c.argument('application', action=AddApplication, nargs='+', help='identity', arg_group='Last Modified By')
c.argument('device', action=AddApplication, nargs='+', help='identity', arg_group='Last Modified By')
c.argument('user', action=AddApplication, nargs='+', help='identity', arg_group='Last Modified By')
c.argument('microsoft_graph_identity_application', action=AddApplication, nargs='+', help='identity',
arg_group='Created By')
c.argument('microsoft_graph_identity_device', action=AddApplication, nargs='+', help='identity',
arg_group='Created By')
c.argument('microsoft_graph_identity_user', action=AddApplication, nargs='+', help='identity',
arg_group='Created By')
c.argument('assigned_to', arg_type=get_enum_type(['sender', 'recipient', 'manager', 'system',
'unknownFutureValue']), help='')
c.argument('manager_action_date_time', help='')
c.argument('manager_action_message', type=str, help='')
c.argument('manager_user_id', type=str, help='')
c.argument('sender_date_time', help='')
c.argument('sender_message', type=str, help='')
c.argument('sender_user_id', type=str, help='')
c.argument('state', arg_type=get_enum_type(['pending', 'approved', 'declined', 'unknownFutureValue']),
help='')
c.argument('open_shift_id', type=str, help='ID for the open shift.')
with self.argument_context('teams team-schedule create-scheduling-group') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('created_date_time', help='The Timestamp type represents date and time information using ISO 8601 '
'format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 would look like this: '
'\'2014-01-01T00:00:00Z\'')
c.argument('last_modified_date_time', help='The Timestamp type represents date and time information using ISO '
'8601 format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 would look like '
'this: \'2014-01-01T00:00:00Z\'')
c.argument('application', action=AddApplication, nargs='+', help='identity', arg_group='Last Modified By')
c.argument('device', action=AddApplication, nargs='+', help='identity', arg_group='Last Modified By')
c.argument('user', action=AddApplication, nargs='+', help='identity', arg_group='Last Modified By')
c.argument('microsoft_graph_identity_application', action=AddApplication, nargs='+', help='identity',
arg_group='Created By')
c.argument('microsoft_graph_identity_device', action=AddApplication, nargs='+', help='identity',
arg_group='Created By')
c.argument('microsoft_graph_identity_user', action=AddApplication, nargs='+', help='identity',
arg_group='Created By')
c.argument('display_name', type=str, help='The display name for the schedulingGroup. Required.')
c.argument('is_active', arg_type=get_three_state_flag(), help='Indicates whether the schedulingGroup can be '
'used when creating new entities or updating existing ones. Required.')
c.argument('user_ids', nargs='+', help='The list of user IDs that are a member of the schedulingGroup. '
'Required.')
with self.argument_context('teams team-schedule create-shift') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('created_date_time', help='The Timestamp type represents date and time information using ISO 8601 '
'format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 would look like this: '
'\'2014-01-01T00:00:00Z\'')
c.argument('last_modified_date_time', help='The Timestamp type represents date and time information using ISO '
'8601 format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 would look like '
'this: \'2014-01-01T00:00:00Z\'')
c.argument('application', action=AddApplication, nargs='+', help='identity', arg_group='Last Modified By')
c.argument('device', action=AddApplication, nargs='+', help='identity', arg_group='Last Modified By')
c.argument('user', action=AddApplication, nargs='+', help='identity', arg_group='Last Modified By')
c.argument('microsoft_graph_identity_application', action=AddApplication, nargs='+', help='identity',
arg_group='Created By')
c.argument('microsoft_graph_identity_device', action=AddApplication, nargs='+', help='identity',
arg_group='Created By')
c.argument('microsoft_graph_identity_user', action=AddApplication, nargs='+', help='identity',
arg_group='Created By')
c.argument('is_staged_for_deletion', arg_type=get_three_state_flag(), help='')
c.argument('scheduling_group_id', type=str, help='ID of the scheduling group the shift is part of. Required.')
c.argument('user_id', type=str, help='ID of the user assigned to the shift. Required.')
c.argument('end_date_time', help='', arg_group='Shared Shift')
c.argument('start_date_time', help='', arg_group='Shared Shift')
c.argument('theme', arg_type=get_enum_type(['white', 'blue', 'green', 'purple', 'pink', 'yellow', 'gray',
'darkBlue', 'darkGreen', 'darkPurple', 'darkPink', 'darkYellow',
'unknownFutureValue']), help='', arg_group='Shared Shift')
c.argument('activities', action=AddActivities, nargs='+', help='An incremental part of a shift which can cover '
'details of when and where an employee is during their shift. For example, an assignment or a '
'scheduled break or lunch. Required.', arg_group='Shared Shift')
c.argument('display_name', type=str, help='The shift label of the shiftItem.', arg_group='Shared Shift')
c.argument('notes', type=str, help='The shift notes for the shiftItem.', arg_group='Shared Shift')
c.argument('microsoft_graph_schedule_entity_end_date_time_end_date_time', help='', arg_group='Draft Shift')
c.argument('microsoft_graph_schedule_entity_start_date_time_start_date_time', help='',
arg_group='Draft Shift')
c.argument('microsoft_graph_schedule_entity_theme', arg_type=get_enum_type(['white', 'blue', 'green', 'purple',
'pink', 'yellow', 'gray',
'darkBlue', 'darkGreen',
'darkPurple', 'darkPink',
'darkYellow',
'unknownFutureValue']), help='',
arg_group='Draft Shift')
c.argument('microsoft_graph_shift_item_activities', action=AddActivities, nargs='+', help='An incremental part '
'of a shift which can cover details of when and where an employee is during their shift. For '
'example, an assignment or a scheduled break or lunch. Required.', arg_group='Draft Shift')
c.argument('microsoft_graph_shift_item_display_name', type=str, help='The shift label of the shiftItem.',
arg_group='Draft Shift')
c.argument('microsoft_graph_shift_item_notes', type=str, help='The shift notes for the shiftItem.',
arg_group='Draft Shift')
with self.argument_context('teams team-schedule create-swap-shift-change-request') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('created_date_time', help='The Timestamp type represents date and time information using ISO 8601 '
'format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 would look like this: '
'\'2014-01-01T00:00:00Z\'')
c.argument('last_modified_date_time', help='The Timestamp type represents date and time information using ISO '
'8601 format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 would look like '
'this: \'2014-01-01T00:00:00Z\'')
c.argument('application', action=AddApplication, nargs='+', help='identity', arg_group='Last Modified By')
c.argument('device', action=AddApplication, nargs='+', help='identity', arg_group='Last Modified By')
c.argument('user', action=AddApplication, nargs='+', help='identity', arg_group='Last Modified By')
c.argument('microsoft_graph_identity_application', action=AddApplication, nargs='+', help='identity',
arg_group='Created By')
c.argument('microsoft_graph_identity_device', action=AddApplication, nargs='+', help='identity',
arg_group='Created By')
c.argument('microsoft_graph_identity_user', action=AddApplication, nargs='+', help='identity',
arg_group='Created By')
c.argument('assigned_to', arg_type=get_enum_type(['sender', 'recipient', 'manager', 'system',
'unknownFutureValue']), help='')
c.argument('manager_action_date_time', help='')
c.argument('manager_action_message', type=str, help='')
c.argument('manager_user_id', type=str, help='')
c.argument('sender_date_time', help='')
c.argument('sender_message', type=str, help='')
c.argument('sender_user_id', type=str, help='')
c.argument('state', arg_type=get_enum_type(['pending', 'approved', 'declined', 'unknownFutureValue']),
help='')
c.argument('recipient_action_date_time', help='The Timestamp type represents date and time information using '
'ISO 8601 format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 would look '
'like this: \'2014-01-01T00:00:00Z\'')
c.argument('recipient_action_message', type=str, help='Custom message sent by recipient of the offer shift '
'request.')
c.argument('recipient_user_id', type=str, help='User ID of the recipient of the offer shift request.')
c.argument('sender_shift_id', type=str, help='User ID of the sender of the offer shift request.')
c.argument('recipient_shift_id', type=str, help='ShiftId for the recipient user with whom the request is to '
'swap.')
with self.argument_context('teams team-schedule create-time-card') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('created_date_time', help='The Timestamp type represents date and time information using ISO 8601 '
'format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 would look like this: '
'\'2014-01-01T00:00:00Z\'')
c.argument('last_modified_date_time', help='The Timestamp type represents date and time information using ISO '
'8601 format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 would look like '
'this: \'2014-01-01T00:00:00Z\'')
c.argument('application', action=AddApplication, nargs='+', help='identity', arg_group='Last Modified By')
c.argument('device', action=AddApplication, nargs='+', help='identity', arg_group='Last Modified By')
c.argument('user', action=AddApplication, nargs='+', help='identity', arg_group='Last Modified By')
c.argument('microsoft_graph_identity_application', action=AddApplication, nargs='+', help='identity',
arg_group='Created By')
c.argument('microsoft_graph_identity_device', action=AddApplication, nargs='+', help='identity',
arg_group='Created By')
c.argument('microsoft_graph_identity_user', action=AddApplication, nargs='+', help='identity',
arg_group='Created By')
c.argument('breaks', type=validate_file_or_dict, help=' Expected value: json-string/@json-file.')
c.argument('confirmed_by', arg_type=get_enum_type(['none', 'user', 'manager', 'unknownFutureValue']), help='')
c.argument('notes', action=AddBody, nargs='+', help='itemBody')
c.argument('state', arg_type=get_enum_type(['clockedIn', 'onBreak', 'clockedOut', 'unknownFutureValue']),
help='')
c.argument('user_id', type=str, help='')
c.argument('microsoft_graph_time_card_entry_breaks', type=validate_file_or_dict, help=' Expected value: '
'json-string/@json-file.', arg_group='Original Entry')
c.argument('clock_in_event', type=validate_file_or_dict, help='timeCardEvent Expected value: '
'json-string/@json-file.', arg_group='Original Entry')
c.argument('clock_out_event', type=validate_file_or_dict, help='timeCardEvent Expected value: '
'json-string/@json-file.', arg_group='Original Entry')
c.argument('at_approved_location', arg_type=get_three_state_flag(), help='', arg_group='Clock Out Event')
c.argument('date_time', help='', arg_group='Clock Out Event')
c.argument('microsoft_graph_item_body_notes', action=AddBody, nargs='+', help='itemBody', arg_group='Clock Out '
'Event')
c.argument('boolean_at_approved_location', arg_type=get_three_state_flag(), help='',
arg_group='Clock In Event')
c.argument('microsoft_graph_time_card_event_date_time', help='', arg_group='Clock In Event')
c.argument('notes1', action=AddBody, nargs='+', help='itemBody', arg_group='Clock In Event')
with self.argument_context('teams team-schedule create-time-off') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('created_date_time', help='The Timestamp type represents date and time information using ISO 8601 '
'format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 would look like this: '
'\'2014-01-01T00:00:00Z\'')
c.argument('last_modified_date_time', help='The Timestamp type represents date and time information using ISO '
'8601 format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 would look like '
'this: \'2014-01-01T00:00:00Z\'')
c.argument('application', action=AddApplication, nargs='+', help='identity', arg_group='Last Modified By')
c.argument('device', action=AddApplication, nargs='+', help='identity', arg_group='Last Modified By')
c.argument('user', action=AddApplication, nargs='+', help='identity', arg_group='Last Modified By')
c.argument('microsoft_graph_identity_application', action=AddApplication, nargs='+', help='identity',
arg_group='Created By')
c.argument('microsoft_graph_identity_device', action=AddApplication, nargs='+', help='identity',
arg_group='Created By')
c.argument('microsoft_graph_identity_user', action=AddApplication, nargs='+', help='identity',
arg_group='Created By')
c.argument('draft_time_off', action=AddDraftTimeOff, nargs='+', help='timeOffItem')
c.argument('is_staged_for_deletion', arg_type=get_three_state_flag(), help='')
c.argument('shared_time_off', action=AddDraftTimeOff, nargs='+', help='timeOffItem')
c.argument('user_id', type=str, help='ID of the user assigned to the timeOff. Required.')
with self.argument_context('teams team-schedule create-time-off-reason') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('created_date_time', help='The Timestamp type represents date and time information using ISO 8601 '
'format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 would look like this: '
'\'2014-01-01T00:00:00Z\'')
c.argument('last_modified_date_time', help='The Timestamp type represents date and time information using ISO '
'8601 format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 would look like '
'this: \'2014-01-01T00:00:00Z\'')
c.argument('application', action=AddApplication, nargs='+', help='identity', arg_group='Last Modified By')
c.argument('device', action=AddApplication, nargs='+', help='identity', arg_group='Last Modified By')
c.argument('user', action=AddApplication, nargs='+', help='identity', arg_group='Last Modified By')
c.argument('microsoft_graph_identity_application', action=AddApplication, nargs='+', help='identity',
arg_group='Created By')
c.argument('microsoft_graph_identity_device', action=AddApplication, nargs='+', help='identity',
arg_group='Created By')
c.argument('microsoft_graph_identity_user', action=AddApplication, nargs='+', help='identity',
arg_group='Created By')
c.argument('display_name', type=str, help='The name of the timeOffReason. Required.')
c.argument('icon_type', arg_type=get_enum_type(['none', 'car', 'calendar', 'running', 'plane', 'firstAid',
'doctor', 'notWorking', 'clock', 'juryDuty', 'globe', 'cup',
'phone', 'weather', 'umbrella', 'piggyBank', 'dog', 'cake',
'trafficCone', 'pin', 'sunny', 'unknownFutureValue']),
help='')
c.argument('is_active', arg_type=get_three_state_flag(), help='Indicates whether the timeOffReason can be used '
'when creating new entities or updating existing ones. Required.')
with self.argument_context('teams team-schedule create-time-off-request') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('created_date_time', help='The Timestamp type represents date and time information using ISO 8601 '
'format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 would look like this: '
'\'2014-01-01T00:00:00Z\'')
c.argument('last_modified_date_time', help='The Timestamp type represents date and time information using ISO '
'8601 format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 would look like '
'this: \'2014-01-01T00:00:00Z\'')
c.argument('application', action=AddApplication, nargs='+', help='identity', arg_group='Last Modified By')
c.argument('device', action=AddApplication, nargs='+', help='identity', arg_group='Last Modified By')
c.argument('user', action=AddApplication, nargs='+', help='identity', arg_group='Last Modified By')
c.argument('microsoft_graph_identity_application', action=AddApplication, nargs='+', help='identity',
arg_group='Created By')
c.argument('microsoft_graph_identity_device', action=AddApplication, nargs='+', help='identity',
arg_group='Created By')
c.argument('microsoft_graph_identity_user', action=AddApplication, nargs='+', help='identity',
arg_group='Created By')
c.argument('assigned_to', arg_type=get_enum_type(['sender', 'recipient', 'manager', 'system',
'unknownFutureValue']), help='')
c.argument('manager_action_date_time', help='')
c.argument('manager_action_message', type=str, help='')
c.argument('manager_user_id', type=str, help='')
c.argument('sender_date_time', help='')
c.argument('sender_message', type=str, help='')
c.argument('sender_user_id', type=str, help='')
c.argument('state', arg_type=get_enum_type(['pending', 'approved', 'declined', 'unknownFutureValue']),
help='')
c.argument('end_date_time', help='The Timestamp type represents date and time information using ISO 8601 '
'format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 would look like this: '
'\'2014-01-01T00:00:00Z\'')
c.argument('start_date_time', help='The Timestamp type represents date and time information using ISO 8601 '
'format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 would look like this: '
'\'2014-01-01T00:00:00Z\'')
c.argument('time_off_reason_id', type=str, help='The reason for the time off.')
with self.argument_context('teams team-schedule delete-offer-shift-request') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('offer_shift_request_id', type=str, help='key: id of offerShiftRequest')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('teams team-schedule delete-open-shift') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('open_shift_id', type=str, help='key: id of openShift')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('teams team-schedule delete-open-shift-change-request') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('open_shift_change_request_id', type=str, help='key: id of openShiftChangeRequest')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('teams team-schedule delete-scheduling-group') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('scheduling_group_id', type=str, help='key: id of schedulingGroup')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('teams team-schedule delete-shift') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('shift_id', type=str, help='key: id of shift')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('teams team-schedule delete-swap-shift-change-request') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('swap_shifts_change_request_id', type=str, help='key: id of swapShiftsChangeRequest')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('teams team-schedule delete-time-card') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('time_card_id', type=str, help='key: id of timeCard')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('teams team-schedule delete-time-off') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('time_off_id', type=str, help='key: id of timeOff')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('teams team-schedule delete-time-off-reason') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('time_off_reason_id', type=str, help='key: id of timeOffReason')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('teams team-schedule delete-time-off-request') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('time_off_request_id', type=str, help='key: id of timeOffRequest')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('teams team-schedule list-offer-shift-request') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('teams team-schedule list-open-shift') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('teams team-schedule list-open-shift-change-request') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('teams team-schedule list-scheduling-group') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('teams team-schedule list-shift') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('teams team-schedule list-swap-shift-change-request') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('teams team-schedule list-time-card') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('teams team-schedule list-time-off') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('teams team-schedule list-time-off-reason') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('teams team-schedule list-time-off-request') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('teams team-schedule share') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('notify_team', arg_type=get_three_state_flag(), help='')
c.argument('start_date_time', help='')
c.argument('end_date_time', help='')
with self.argument_context('teams team-schedule show-offer-shift-request') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('offer_shift_request_id', type=str, help='key: id of offerShiftRequest')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('teams team-schedule show-open-shift') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('open_shift_id', type=str, help='key: id of openShift')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('teams team-schedule show-open-shift-change-request') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('open_shift_change_request_id', type=str, help='key: id of openShiftChangeRequest')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('teams team-schedule show-scheduling-group') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('scheduling_group_id', type=str, help='key: id of schedulingGroup')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('teams team-schedule show-shift') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('shift_id', type=str, help='key: id of shift')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('teams team-schedule show-swap-shift-change-request') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('swap_shifts_change_request_id', type=str, help='key: id of swapShiftsChangeRequest')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('teams team-schedule show-time-card') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('time_card_id', type=str, help='key: id of timeCard')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('teams team-schedule show-time-off') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('time_off_id', type=str, help='key: id of timeOff')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('teams team-schedule show-time-off-reason') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('time_off_reason_id', type=str, help='key: id of timeOffReason')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('teams team-schedule show-time-off-request') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('time_off_request_id', type=str, help='key: id of timeOffRequest')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('teams team-schedule update-offer-shift-request') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('offer_shift_request_id', type=str, help='key: id of offerShiftRequest')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('created_date_time', help='The Timestamp type represents date and time information using ISO 8601 '
'format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 would look like this: '
'\'2014-01-01T00:00:00Z\'')
c.argument('last_modified_date_time', help='The Timestamp type represents date and time information using ISO '
'8601 format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 would look like '
'this: \'2014-01-01T00:00:00Z\'')
c.argument('application', action=AddApplication, nargs='+', help='identity', arg_group='Last Modified By')
c.argument('device', action=AddApplication, nargs='+', help='identity', arg_group='Last Modified By')
c.argument('user', action=AddApplication, nargs='+', help='identity', arg_group='Last Modified By')
c.argument('microsoft_graph_identity_application', action=AddApplication, nargs='+', help='identity',
arg_group='Created By')
c.argument('microsoft_graph_identity_device', action=AddApplication, nargs='+', help='identity',
arg_group='Created By')
c.argument('microsoft_graph_identity_user', action=AddApplication, nargs='+', help='identity',
arg_group='Created By')
c.argument('assigned_to', arg_type=get_enum_type(['sender', 'recipient', 'manager', 'system',
'unknownFutureValue']), help='')
c.argument('manager_action_date_time', help='')
c.argument('manager_action_message', type=str, help='')
c.argument('manager_user_id', type=str, help='')
c.argument('sender_date_time', help='')
c.argument('sender_message', type=str, help='')
c.argument('sender_user_id', type=str, help='')
c.argument('state', arg_type=get_enum_type(['pending', 'approved', 'declined', 'unknownFutureValue']),
help='')
c.argument('recipient_action_date_time', help='The Timestamp type represents date and time information using '
'ISO 8601 format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 would look '
'like this: \'2014-01-01T00:00:00Z\'')
c.argument('recipient_action_message', type=str, help='Custom message sent by recipient of the offer shift '
'request.')
c.argument('recipient_user_id', type=str, help='User ID of the recipient of the offer shift request.')
c.argument('sender_shift_id', type=str, help='User ID of the sender of the offer shift request.')
with self.argument_context('teams team-schedule update-open-shift') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('open_shift_id', type=str, help='key: id of openShift')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('created_date_time', help='The Timestamp type represents date and time information using ISO 8601 '
'format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 would look like this: '
'\'2014-01-01T00:00:00Z\'')
c.argument('last_modified_date_time', help='The Timestamp type represents date and time information using ISO '
'8601 format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 would look like '
'this: \'2014-01-01T00:00:00Z\'')
c.argument('application', action=AddApplication, nargs='+', help='identity', arg_group='Last Modified By')
c.argument('device', action=AddApplication, nargs='+', help='identity', arg_group='Last Modified By')
c.argument('user', action=AddApplication, nargs='+', help='identity', arg_group='Last Modified By')
c.argument('microsoft_graph_identity_application', action=AddApplication, nargs='+', help='identity',
arg_group='Created By')
c.argument('microsoft_graph_identity_device', action=AddApplication, nargs='+', help='identity',
arg_group='Created By')
c.argument('microsoft_graph_identity_user', action=AddApplication, nargs='+', help='identity',
arg_group='Created By')
c.argument('draft_open_shift', action=AddDraftOpenShift, nargs='+', help='openShiftItem')
c.argument('is_staged_for_deletion', arg_type=get_three_state_flag(), help='')
c.argument('scheduling_group_id', type=str,
help='ID for the scheduling group that the open shift belongs to.')
c.argument('shared_open_shift', action=AddDraftOpenShift, nargs='+', help='openShiftItem')
with self.argument_context('teams team-schedule update-open-shift-change-request') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('open_shift_change_request_id', type=str, help='key: id of openShiftChangeRequest')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('created_date_time', help='The Timestamp type represents date and time information using ISO 8601 '
'format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 would look like this: '
'\'2014-01-01T00:00:00Z\'')
c.argument('last_modified_date_time', help='The Timestamp type represents date and time information using ISO '
'8601 format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 would look like '
'this: \'2014-01-01T00:00:00Z\'')
c.argument('application', action=AddApplication, nargs='+', help='identity', arg_group='Last Modified By')
c.argument('device', action=AddApplication, nargs='+', help='identity', arg_group='Last Modified By')
c.argument('user', action=AddApplication, nargs='+', help='identity', arg_group='Last Modified By')
c.argument('microsoft_graph_identity_application', action=AddApplication, nargs='+', help='identity',
arg_group='Created By')
c.argument('microsoft_graph_identity_device', action=AddApplication, nargs='+', help='identity',
arg_group='Created By')
c.argument('microsoft_graph_identity_user', action=AddApplication, nargs='+', help='identity',
arg_group='Created By')
c.argument('assigned_to', arg_type=get_enum_type(['sender', 'recipient', 'manager', 'system',
'unknownFutureValue']), help='')
c.argument('manager_action_date_time', help='')
c.argument('manager_action_message', type=str, help='')
c.argument('manager_user_id', type=str, help='')
c.argument('sender_date_time', help='')
c.argument('sender_message', type=str, help='')
c.argument('sender_user_id', type=str, help='')
c.argument('state', arg_type=get_enum_type(['pending', 'approved', 'declined', 'unknownFutureValue']),
help='')
c.argument('open_shift_id', type=str, help='ID for the open shift.')
with self.argument_context('teams team-schedule update-scheduling-group') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('scheduling_group_id', type=str, help='key: id of schedulingGroup')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('created_date_time', help='The Timestamp type represents date and time information using ISO 8601 '
'format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 would look like this: '
'\'2014-01-01T00:00:00Z\'')
c.argument('last_modified_date_time', help='The Timestamp type represents date and time information using ISO '
'8601 format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 would look like '
'this: \'2014-01-01T00:00:00Z\'')
c.argument('application', action=AddApplication, nargs='+', help='identity', arg_group='Last Modified By')
c.argument('device', action=AddApplication, nargs='+', help='identity', arg_group='Last Modified By')
c.argument('user', action=AddApplication, nargs='+', help='identity', arg_group='Last Modified By')
c.argument('microsoft_graph_identity_application', action=AddApplication, nargs='+', help='identity',
arg_group='Created By')
c.argument('microsoft_graph_identity_device', action=AddApplication, nargs='+', help='identity',
arg_group='Created By')
c.argument('microsoft_graph_identity_user', action=AddApplication, nargs='+', help='identity',
arg_group='Created By')
c.argument('display_name', type=str, help='The display name for the schedulingGroup. Required.')
c.argument('is_active', arg_type=get_three_state_flag(), help='Indicates whether the schedulingGroup can be '
'used when creating new entities or updating existing ones. Required.')
c.argument('user_ids', nargs='+', help='The list of user IDs that are a member of the schedulingGroup. '
'Required.')
with self.argument_context('teams team-schedule update-shift') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('shift_id', type=str, help='key: id of shift')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('created_date_time', help='The Timestamp type represents date and time information using ISO 8601 '
'format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 would look like this: '
'\'2014-01-01T00:00:00Z\'')
c.argument('last_modified_date_time', help='The Timestamp type represents date and time information using ISO '
'8601 format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 would look like '
'this: \'2014-01-01T00:00:00Z\'')
c.argument('application', action=AddApplication, nargs='+', help='identity', arg_group='Last Modified By')
c.argument('device', action=AddApplication, nargs='+', help='identity', arg_group='Last Modified By')
c.argument('user', action=AddApplication, nargs='+', help='identity', arg_group='Last Modified By')
c.argument('microsoft_graph_identity_application', action=AddApplication, nargs='+', help='identity',
arg_group='Created By')
c.argument('microsoft_graph_identity_device', action=AddApplication, nargs='+', help='identity',
arg_group='Created By')
c.argument('microsoft_graph_identity_user', action=AddApplication, nargs='+', help='identity',
arg_group='Created By')
c.argument('is_staged_for_deletion', arg_type=get_three_state_flag(), help='')
c.argument('scheduling_group_id', type=str, help='ID of the scheduling group the shift is part of. Required.')
c.argument('user_id', type=str, help='ID of the user assigned to the shift. Required.')
c.argument('end_date_time', help='', arg_group='Shared Shift')
c.argument('start_date_time', help='', arg_group='Shared Shift')
c.argument('theme', arg_type=get_enum_type(['white', 'blue', 'green', 'purple', 'pink', 'yellow', 'gray',
'darkBlue', 'darkGreen', 'darkPurple', 'darkPink', 'darkYellow',
'unknownFutureValue']), help='', arg_group='Shared Shift')
c.argument('activities', action=AddActivities, nargs='+', help='An incremental part of a shift which can cover '
'details of when and where an employee is during their shift. For example, an assignment or a '
'scheduled break or lunch. Required.', arg_group='Shared Shift')
c.argument('display_name', type=str, help='The shift label of the shiftItem.', arg_group='Shared Shift')
c.argument('notes', type=str, help='The shift notes for the shiftItem.', arg_group='Shared Shift')
c.argument('microsoft_graph_schedule_entity_end_date_time_end_date_time', help='', arg_group='Draft Shift')
c.argument('microsoft_graph_schedule_entity_start_date_time_start_date_time', help='',
arg_group='Draft Shift')
c.argument('microsoft_graph_schedule_entity_theme', arg_type=get_enum_type(['white', 'blue', 'green', 'purple',
'pink', 'yellow', 'gray',
'darkBlue', 'darkGreen',
'darkPurple', 'darkPink',
'darkYellow',
'unknownFutureValue']), help='',
arg_group='Draft Shift')
c.argument('microsoft_graph_shift_item_activities', action=AddActivities, nargs='+', help='An incremental part '
'of a shift which can cover details of when and where an employee is during their shift. For '
'example, an assignment or a scheduled break or lunch. Required.', arg_group='Draft Shift')
c.argument('microsoft_graph_shift_item_display_name', type=str, help='The shift label of the shiftItem.',
arg_group='Draft Shift')
c.argument('microsoft_graph_shift_item_notes', type=str, help='The shift notes for the shiftItem.',
arg_group='Draft Shift')
with self.argument_context('teams team-schedule update-swap-shift-change-request') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('swap_shifts_change_request_id', type=str, help='key: id of swapShiftsChangeRequest')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('created_date_time', help='The Timestamp type represents date and time information using ISO 8601 '
'format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 would look like this: '
'\'2014-01-01T00:00:00Z\'')
c.argument('last_modified_date_time', help='The Timestamp type represents date and time information using ISO '
'8601 format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 would look like '
'this: \'2014-01-01T00:00:00Z\'')
c.argument('application', action=AddApplication, nargs='+', help='identity', arg_group='Last Modified By')
c.argument('device', action=AddApplication, nargs='+', help='identity', arg_group='Last Modified By')
c.argument('user', action=AddApplication, nargs='+', help='identity', arg_group='Last Modified By')
c.argument('microsoft_graph_identity_application', action=AddApplication, nargs='+', help='identity',
arg_group='Created By')
c.argument('microsoft_graph_identity_device', action=AddApplication, nargs='+', help='identity',
arg_group='Created By')
c.argument('microsoft_graph_identity_user', action=AddApplication, nargs='+', help='identity',
arg_group='Created By')
c.argument('assigned_to', arg_type=get_enum_type(['sender', 'recipient', 'manager', 'system',
'unknownFutureValue']), help='')
c.argument('manager_action_date_time', help='')
c.argument('manager_action_message', type=str, help='')
c.argument('manager_user_id', type=str, help='')
c.argument('sender_date_time', help='')
c.argument('sender_message', type=str, help='')
c.argument('sender_user_id', type=str, help='')
c.argument('state', arg_type=get_enum_type(['pending', 'approved', 'declined', 'unknownFutureValue']),
help='')
c.argument('recipient_action_date_time', help='The Timestamp type represents date and time information using '
'ISO 8601 format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 would look '
'like this: \'2014-01-01T00:00:00Z\'')
c.argument('recipient_action_message', type=str, help='Custom message sent by recipient of the offer shift '
'request.')
c.argument('recipient_user_id', type=str, help='User ID of the recipient of the offer shift request.')
c.argument('sender_shift_id', type=str, help='User ID of the sender of the offer shift request.')
c.argument('recipient_shift_id', type=str, help='ShiftId for the recipient user with whom the request is to '
'swap.')
with self.argument_context('teams team-schedule update-time-card') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('time_card_id', type=str, help='key: id of timeCard')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('created_date_time', help='The Timestamp type represents date and time information using ISO 8601 '
'format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 would look like this: '
'\'2014-01-01T00:00:00Z\'')
c.argument('last_modified_date_time', help='The Timestamp type represents date and time information using ISO '
'8601 format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 would look like '
'this: \'2014-01-01T00:00:00Z\'')
c.argument('application', action=AddApplication, nargs='+', help='identity', arg_group='Last Modified By')
c.argument('device', action=AddApplication, nargs='+', help='identity', arg_group='Last Modified By')
c.argument('user', action=AddApplication, nargs='+', help='identity', arg_group='Last Modified By')
c.argument('microsoft_graph_identity_application', action=AddApplication, nargs='+', help='identity',
arg_group='Created By')
c.argument('microsoft_graph_identity_device', action=AddApplication, nargs='+', help='identity',
arg_group='Created By')
c.argument('microsoft_graph_identity_user', action=AddApplication, nargs='+', help='identity',
arg_group='Created By')
c.argument('breaks', type=validate_file_or_dict, help=' Expected value: json-string/@json-file.')
c.argument('confirmed_by', arg_type=get_enum_type(['none', 'user', 'manager', 'unknownFutureValue']), help='')
c.argument('notes', action=AddBody, nargs='+', help='itemBody')
c.argument('state', arg_type=get_enum_type(['clockedIn', 'onBreak', 'clockedOut', 'unknownFutureValue']),
help='')
c.argument('user_id', type=str, help='')
c.argument('microsoft_graph_time_card_entry_breaks', type=validate_file_or_dict, help=' Expected value: '
'json-string/@json-file.', arg_group='Original Entry')
c.argument('clock_in_event', type=validate_file_or_dict, help='timeCardEvent Expected value: '
'json-string/@json-file.', arg_group='Original Entry')
c.argument('clock_out_event', type=validate_file_or_dict, help='timeCardEvent Expected value: '
'json-string/@json-file.', arg_group='Original Entry')
c.argument('at_approved_location', arg_type=get_three_state_flag(), help='', arg_group='Clock Out Event')
c.argument('date_time', help='', arg_group='Clock Out Event')
c.argument('microsoft_graph_item_body_notes', action=AddBody, nargs='+', help='itemBody', arg_group='Clock Out '
'Event')
c.argument('boolean_at_approved_location', arg_type=get_three_state_flag(), help='',
arg_group='Clock In Event')
c.argument('microsoft_graph_time_card_event_date_time', help='', arg_group='Clock In Event')
c.argument('notes1', action=AddBody, nargs='+', help='itemBody', arg_group='Clock In Event')
with self.argument_context('teams team-schedule update-time-off') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('time_off_id', type=str, help='key: id of timeOff')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('created_date_time', help='The Timestamp type represents date and time information using ISO 8601 '
'format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 would look like this: '
'\'2014-01-01T00:00:00Z\'')
c.argument('last_modified_date_time', help='The Timestamp type represents date and time information using ISO '
'8601 format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 would look like '
'this: \'2014-01-01T00:00:00Z\'')
c.argument('application', action=AddApplication, nargs='+', help='identity', arg_group='Last Modified By')
c.argument('device', action=AddApplication, nargs='+', help='identity', arg_group='Last Modified By')
c.argument('user', action=AddApplication, nargs='+', help='identity', arg_group='Last Modified By')
c.argument('microsoft_graph_identity_application', action=AddApplication, nargs='+', help='identity',
arg_group='Created By')
c.argument('microsoft_graph_identity_device', action=AddApplication, nargs='+', help='identity',
arg_group='Created By')
c.argument('microsoft_graph_identity_user', action=AddApplication, nargs='+', help='identity',
arg_group='Created By')
c.argument('draft_time_off', action=AddDraftTimeOff, nargs='+', help='timeOffItem')
c.argument('is_staged_for_deletion', arg_type=get_three_state_flag(), help='')
c.argument('shared_time_off', action=AddDraftTimeOff, nargs='+', help='timeOffItem')
c.argument('user_id', type=str, help='ID of the user assigned to the timeOff. Required.')
with self.argument_context('teams team-schedule update-time-off-reason') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('time_off_reason_id', type=str, help='key: id of timeOffReason')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('created_date_time', help='The Timestamp type represents date and time information using ISO 8601 '
'format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 would look like this: '
'\'2014-01-01T00:00:00Z\'')
c.argument('last_modified_date_time', help='The Timestamp type represents date and time information using ISO '
'8601 format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 would look like '
'this: \'2014-01-01T00:00:00Z\'')
c.argument('application', action=AddApplication, nargs='+', help='identity', arg_group='Last Modified By')
c.argument('device', action=AddApplication, nargs='+', help='identity', arg_group='Last Modified By')
c.argument('user', action=AddApplication, nargs='+', help='identity', arg_group='Last Modified By')
c.argument('microsoft_graph_identity_application', action=AddApplication, nargs='+', help='identity',
arg_group='Created By')
c.argument('microsoft_graph_identity_device', action=AddApplication, nargs='+', help='identity',
arg_group='Created By')
c.argument('microsoft_graph_identity_user', action=AddApplication, nargs='+', help='identity',
arg_group='Created By')
c.argument('display_name', type=str, help='The name of the timeOffReason. Required.')
c.argument('icon_type', arg_type=get_enum_type(['none', 'car', 'calendar', 'running', 'plane', 'firstAid',
'doctor', 'notWorking', 'clock', 'juryDuty', 'globe', 'cup',
'phone', 'weather', 'umbrella', 'piggyBank', 'dog', 'cake',
'trafficCone', 'pin', 'sunny', 'unknownFutureValue']),
help='')
c.argument('is_active', arg_type=get_three_state_flag(), help='Indicates whether the timeOffReason can be used '
'when creating new entities or updating existing ones. Required.')
with self.argument_context('teams team-schedule update-time-off-request') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('time_off_request_id', type=str, help='key: id of timeOffRequest')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('created_date_time', help='The Timestamp type represents date and time information using ISO 8601 '
'format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 would look like this: '
'\'2014-01-01T00:00:00Z\'')
c.argument('last_modified_date_time', help='The Timestamp type represents date and time information using ISO '
'8601 format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 would look like '
'this: \'2014-01-01T00:00:00Z\'')
c.argument('application', action=AddApplication, nargs='+', help='identity', arg_group='Last Modified By')
c.argument('device', action=AddApplication, nargs='+', help='identity', arg_group='Last Modified By')
c.argument('user', action=AddApplication, nargs='+', help='identity', arg_group='Last Modified By')
c.argument('microsoft_graph_identity_application', action=AddApplication, nargs='+', help='identity',
arg_group='Created By')
c.argument('microsoft_graph_identity_device', action=AddApplication, nargs='+', help='identity',
arg_group='Created By')
c.argument('microsoft_graph_identity_user', action=AddApplication, nargs='+', help='identity',
arg_group='Created By')
c.argument('assigned_to', arg_type=get_enum_type(['sender', 'recipient', 'manager', 'system',
'unknownFutureValue']), help='')
c.argument('manager_action_date_time', help='')
c.argument('manager_action_message', type=str, help='')
c.argument('manager_user_id', type=str, help='')
c.argument('sender_date_time', help='')
c.argument('sender_message', type=str, help='')
c.argument('sender_user_id', type=str, help='')
c.argument('state', arg_type=get_enum_type(['pending', 'approved', 'declined', 'unknownFutureValue']),
help='')
c.argument('end_date_time', help='The Timestamp type represents date and time information using ISO 8601 '
'format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 would look like this: '
'\'2014-01-01T00:00:00Z\'')
c.argument('start_date_time', help='The Timestamp type represents date and time information using ISO 8601 '
'format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 would look like this: '
'\'2014-01-01T00:00:00Z\'')
c.argument('time_off_reason_id', type=str, help='The reason for the time off.')
with self.argument_context('teams team-schedule-time-card clock-in') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('at_approved_location', arg_type=get_three_state_flag(), help='')
c.argument('on_behalf_of_user_id', type=str, help='')
c.argument('notes', action=AddBody, nargs='+', help='itemBody')
with self.argument_context('teams team-schedule-time-card clock-out') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('time_card_id', type=str, help='key: id of timeCard')
c.argument('at_approved_location', arg_type=get_three_state_flag(), help='')
c.argument('notes', action=AddBody, nargs='+', help='itemBody')
with self.argument_context('teams team-schedule-time-card confirm') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('time_card_id', type=str, help='key: id of timeCard')
with self.argument_context('teams team-schedule-time-card end-break') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('time_card_id', type=str, help='key: id of timeCard')
c.argument('at_approved_location', arg_type=get_three_state_flag(), help='')
c.argument('notes', action=AddBody, nargs='+', help='itemBody')
with self.argument_context('teams team-schedule-time-card start-break') as c:
c.argument('team_id', type=str, help='key: id of team')
c.argument('time_card_id', type=str, help='key: id of timeCard')
c.argument('at_approved_location', arg_type=get_three_state_flag(), help='')
c.argument('notes', action=AddBody, nargs='+', help='itemBody')
with self.argument_context('teams teamwork-teamwork show-teamwork') as c:
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('teams teamwork-teamwork update-teamwork') as c:
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('workforce_integrations', type=validate_file_or_dict, help=' Expected value: '
'json-string/@json-file.')
with self.argument_context('teams teamwork create-workforce-integration') as c:
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('created_date_time', help='The Timestamp type represents date and time information using ISO 8601 '
'format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 would look like this: '
'\'2014-01-01T00:00:00Z\'')
c.argument('last_modified_date_time', help='The Timestamp type represents date and time information using ISO '
'8601 format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 would look like '
'this: \'2014-01-01T00:00:00Z\'')
c.argument('application', action=AddApplication, nargs='+', help='identity', arg_group='Last Modified By')
c.argument('device', action=AddApplication, nargs='+', help='identity', arg_group='Last Modified By')
c.argument('user', action=AddApplication, nargs='+', help='identity', arg_group='Last Modified By')
c.argument('microsoft_graph_identity_application', action=AddApplication, nargs='+', help='identity',
arg_group='Created By')
c.argument('microsoft_graph_identity_device', action=AddApplication, nargs='+', help='identity',
arg_group='Created By')
c.argument('microsoft_graph_identity_user', action=AddApplication, nargs='+', help='identity',
arg_group='Created By')
c.argument('api_version', type=int, help='API version for the call back URL. Start with 1.')
c.argument('display_name', type=str, help='Name of the workforce integration.')
c.argument('eligibility_filtering_enabled_entities', arg_type=get_enum_type(['none', 'swapRequest',
'offerShiftRequest',
'unknownFutureValue']), help='')
c.argument('encryption', action=AddEncryption, nargs='+', help='workforceIntegrationEncryption')
c.argument('is_active', arg_type=get_three_state_flag(), help='Indicates whether this workforce integration is '
'currently active and available.')
c.argument('supported_entities', arg_type=get_enum_type(['none', 'shift', 'swapRequest',
'userShiftPreferences', 'openShift',
'openShiftRequest', 'offerShiftRequest',
'unknownFutureValue']), help='')
c.argument('supports', arg_type=get_enum_type(['none', 'shift', 'swapRequest', 'userShiftPreferences',
'openShift', 'openShiftRequest', 'offerShiftRequest',
'unknownFutureValue']), help='')
c.argument('url', type=str, help='Workforce Integration URL for callbacks from the Shifts service.')
with self.argument_context('teams teamwork delete-workforce-integration') as c:
c.argument('workforce_integration_id', type=str, help='key: id of workforceIntegration')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('teams teamwork list-workforce-integration') as c:
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('teams teamwork show-workforce-integration') as c:
c.argument('workforce_integration_id', type=str, help='key: id of workforceIntegration')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('teams teamwork update-workforce-integration') as c:
c.argument('workforce_integration_id', type=str, help='key: id of workforceIntegration')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('created_date_time', help='The Timestamp type represents date and time information using ISO 8601 '
'format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 would look like this: '
'\'2014-01-01T00:00:00Z\'')
c.argument('last_modified_date_time', help='The Timestamp type represents date and time information using ISO '
'8601 format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 would look like '
'this: \'2014-01-01T00:00:00Z\'')
c.argument('application', action=AddApplication, nargs='+', help='identity', arg_group='Last Modified By')
c.argument('device', action=AddApplication, nargs='+', help='identity', arg_group='Last Modified By')
c.argument('user', action=AddApplication, nargs='+', help='identity', arg_group='Last Modified By')
c.argument('microsoft_graph_identity_application', action=AddApplication, nargs='+', help='identity',
arg_group='Created By')
c.argument('microsoft_graph_identity_device', action=AddApplication, nargs='+', help='identity',
arg_group='Created By')
c.argument('microsoft_graph_identity_user', action=AddApplication, nargs='+', help='identity',
arg_group='Created By')
c.argument('api_version', type=int, help='API version for the call back URL. Start with 1.')
c.argument('display_name', type=str, help='Name of the workforce integration.')
c.argument('eligibility_filtering_enabled_entities', arg_type=get_enum_type(['none', 'swapRequest',
'offerShiftRequest',
'unknownFutureValue']), help='')
c.argument('encryption', action=AddEncryption, nargs='+', help='workforceIntegrationEncryption')
c.argument('is_active', arg_type=get_three_state_flag(), help='Indicates whether this workforce integration is '
'currently active and available.')
c.argument('supported_entities', arg_type=get_enum_type(['none', 'shift', 'swapRequest',
'userShiftPreferences', 'openShift',
'openShiftRequest', 'offerShiftRequest',
'unknownFutureValue']), help='')
c.argument('supports', arg_type=get_enum_type(['none', 'shift', 'swapRequest', 'userShiftPreferences',
'openShift', 'openShiftRequest', 'offerShiftRequest',
'unknownFutureValue']), help='')
c.argument('url', type=str, help='Workforce Integration URL for callbacks from the Shifts service.')
with self.argument_context('teams user create-chat') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('created_date_time', help='')
c.argument('last_updated_date_time', help='')
c.argument('topic', type=str, help='')
c.argument('installed_apps', type=validate_file_or_dict, help=' Expected value: json-string/@json-file.')
c.argument('members', action=AddUsersMembers, nargs='+', help='')
c.argument('messages', type=validate_file_or_dict, help=' Expected value: json-string/@json-file.')
c.argument('tabs', type=validate_file_or_dict, help=' Expected value: json-string/@json-file.')
with self.argument_context('teams user create-joined-team') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('classification', type=str, help='An optional label. Typically describes the data or business '
'sensitivity of the team. Must match one of a pre-configured set in the tenant\'s directory.')
c.argument('created_date_time', help='')
c.argument('description', type=str, help='An optional description for the team.')
c.argument('display_name', type=str, help='The name of the team.')
c.argument('fun_settings', action=AddFunSettings, nargs='+', help='teamFunSettings')
c.argument('guest_settings', action=AddGuestSettings, nargs='+', help='teamGuestSettings')
c.argument('internal_id', type=str, help='A unique ID for the team that has been used in a few places such as '
'the audit log/Office 365 Management Activity API.')
c.argument('is_archived', arg_type=get_three_state_flag(), help='Whether this team is in read-only mode.')
c.argument('is_membership_limited_to_owners', arg_type=get_three_state_flag(), help='')
c.argument('member_settings', action=AddMemberSettings, nargs='+', help='teamMemberSettings')
c.argument('messaging_settings', action=AddMessagingSettings, nargs='+', help='teamMessagingSettings')
c.argument('specialization', arg_type=get_enum_type(['none', 'educationStandard', 'educationClass',
'educationProfessionalLearningCommunity',
'educationStaff', 'healthcareStandard',
'healthcareCareCoordination', 'unknownFutureValue']),
help='')
c.argument('visibility', arg_type=get_enum_type(['private', 'public', 'hiddenMembership',
'unknownFutureValue']), help='')
c.argument('web_url', type=str, help='A hyperlink that will go to the team in the Microsoft Teams client. This '
'is the URL that you get when you right-click a team in the Microsoft Teams client and select Get '
'link to team. This URL should be treated as an opaque blob, and not parsed.')
c.argument('channels', type=validate_file_or_dict, help='The collection of channels & messages associated with '
'the team. Expected value: json-string/@json-file.')
c.argument('group', type=validate_file_or_dict, help='Represents an Azure Active Directory object. The '
'directoryObject type is the base type for many other directory entity types. Expected value: '
'json-string/@json-file.')
c.argument('installed_apps', type=validate_file_or_dict, help='The apps installed in this team. Expected '
'value: json-string/@json-file.')
c.argument('members', action=AddGroupsMembers, nargs='+', help='Members and owners of the team.')
c.argument('operations', type=validate_file_or_dict, help='The async operations that ran or are running on '
'this team. Expected value: json-string/@json-file.')
c.argument('owners', type=validate_file_or_dict, help=' Expected value: json-string/@json-file.')
c.argument('photo', action=AddGroupsPhoto, nargs='+', help='profilePhoto')
c.argument('primary_channel', type=validate_file_or_dict, help='channel Expected value: '
'json-string/@json-file.')
c.argument('microsoft_graph_entity_id', type=str, help='Read-only.', arg_group='Template')
c.argument('id1', type=str, help='Read-only.', arg_group='Schedule')
c.argument('enabled', arg_type=get_three_state_flag(), help='Indicates whether the schedule is enabled for the '
'team. Required.', arg_group='Schedule')
c.argument('offer_shift_requests_enabled', arg_type=get_three_state_flag(), help='Indicates whether offer '
'shift requests are enabled for the schedule.', arg_group='Schedule')
c.argument('open_shifts_enabled', arg_type=get_three_state_flag(), help='Indicates whether open shifts are '
'enabled for the schedule.', arg_group='Schedule')
c.argument('provision_status', arg_type=get_enum_type(['NotStarted', 'Running', 'Completed', 'Failed']),
help='', arg_group='Schedule')
c.argument('provision_status_code', type=str, help='Additional information about why schedule provisioning '
'failed.', arg_group='Schedule')
c.argument('swap_shifts_requests_enabled', arg_type=get_three_state_flag(), help='Indicates whether swap '
'shifts requests are enabled for the schedule.', arg_group='Schedule')
c.argument('time_clock_enabled', arg_type=get_three_state_flag(), help='Indicates whether time clock is '
'enabled for the schedule.', arg_group='Schedule')
c.argument('time_off_requests_enabled', arg_type=get_three_state_flag(), help='Indicates whether time off '
'requests are enabled for the schedule.', arg_group='Schedule')
c.argument('time_zone', type=str, help='Indicates the time zone of the schedule team using tz database format. '
'Required.', arg_group='Schedule')
c.argument('workforce_integration_ids', nargs='+', help='', arg_group='Schedule')
c.argument('offer_shift_requests', action=AddOfferShiftRequests, nargs='+', help='', arg_group='Schedule')
c.argument('open_shift_change_requests', action=AddOpenShiftChangeRequests, nargs='+', help='',
arg_group='Schedule')
c.argument('open_shifts', type=validate_file_or_dict, help=' Expected value: json-string/@json-file.',
arg_group='Schedule')
c.argument('scheduling_groups', action=AddSchedulingGroups, nargs='+', help='The logical grouping of users in '
'the schedule (usually by role).', arg_group='Schedule')
c.argument('shifts', type=validate_file_or_dict, help='The shifts in the schedule. Expected value: '
'json-string/@json-file.', arg_group='Schedule')
c.argument('swap_shifts_change_requests', action=AddSwapShiftsChangeRequests, nargs='+', help='',
arg_group='Schedule')
c.argument('time_cards', type=validate_file_or_dict, help=' Expected value: json-string/@json-file.',
arg_group='Schedule')
c.argument('time_off_reasons', action=AddTimeOffReasons, nargs='+', help='The set of reasons for a time off in '
'the schedule.', arg_group='Schedule')
c.argument('time_off_requests', action=AddTimeOffRequests, nargs='+', help='', arg_group='Schedule')
c.argument('times_off', type=validate_file_or_dict, help='The instances of times off in the schedule. Expected '
'value: json-string/@json-file.', arg_group='Schedule')
c.argument('approved_location', action=AddApprovedLocation, nargs='+', help='geoCoordinates',
arg_group='Schedule Time Clock Settings')
c.argument('show_in_teams_search_and_suggestions', arg_type=get_three_state_flag(), help='',
arg_group='Discovery Settings')
with self.argument_context('teams user delete-chat') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('chat_id', type=str, help='key: id of chat')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('teams user delete-joined-team') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('team_id', type=str, help='key: id of team')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('teams user delete-teamwork') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('teams user list-chat') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('teams user list-joined-team') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('teams user show-chat') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('chat_id', type=str, help='key: id of chat')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('teams user show-joined-team') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('team_id', type=str, help='key: id of team')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('teams user show-teamwork') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('teams user update-chat') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('chat_id', type=str, help='key: id of chat')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('created_date_time', help='')
c.argument('last_updated_date_time', help='')
c.argument('topic', type=str, help='')
c.argument('installed_apps', type=validate_file_or_dict, help=' Expected value: json-string/@json-file.')
c.argument('members', action=AddUsersMembers, nargs='+', help='')
c.argument('messages', type=validate_file_or_dict, help=' Expected value: json-string/@json-file.')
c.argument('tabs', type=validate_file_or_dict, help=' Expected value: json-string/@json-file.')
with self.argument_context('teams user update-joined-team') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('team_id', type=str, help='key: id of team')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('classification', type=str, help='An optional label. Typically describes the data or business '
'sensitivity of the team. Must match one of a pre-configured set in the tenant\'s directory.')
c.argument('created_date_time', help='')
c.argument('description', type=str, help='An optional description for the team.')
c.argument('display_name', type=str, help='The name of the team.')
c.argument('fun_settings', action=AddFunSettings, nargs='+', help='teamFunSettings')
c.argument('guest_settings', action=AddGuestSettings, nargs='+', help='teamGuestSettings')
c.argument('internal_id', type=str, help='A unique ID for the team that has been used in a few places such as '
'the audit log/Office 365 Management Activity API.')
c.argument('is_archived', arg_type=get_three_state_flag(), help='Whether this team is in read-only mode.')
c.argument('is_membership_limited_to_owners', arg_type=get_three_state_flag(), help='')
c.argument('member_settings', action=AddMemberSettings, nargs='+', help='teamMemberSettings')
c.argument('messaging_settings', action=AddMessagingSettings, nargs='+', help='teamMessagingSettings')
c.argument('specialization', arg_type=get_enum_type(['none', 'educationStandard', 'educationClass',
'educationProfessionalLearningCommunity',
'educationStaff', 'healthcareStandard',
'healthcareCareCoordination', 'unknownFutureValue']),
help='')
c.argument('visibility', arg_type=get_enum_type(['private', 'public', 'hiddenMembership',
'unknownFutureValue']), help='')
c.argument('web_url', type=str, help='A hyperlink that will go to the team in the Microsoft Teams client. This '
'is the URL that you get when you right-click a team in the Microsoft Teams client and select Get '
'link to team. This URL should be treated as an opaque blob, and not parsed.')
c.argument('channels', type=validate_file_or_dict, help='The collection of channels & messages associated with '
'the team. Expected value: json-string/@json-file.')
c.argument('group', type=validate_file_or_dict, help='Represents an Azure Active Directory object. The '
'directoryObject type is the base type for many other directory entity types. Expected value: '
'json-string/@json-file.')
c.argument('installed_apps', type=validate_file_or_dict, help='The apps installed in this team. Expected '
'value: json-string/@json-file.')
c.argument('members', action=AddGroupsMembers, nargs='+', help='Members and owners of the team.')
c.argument('operations', type=validate_file_or_dict, help='The async operations that ran or are running on '
'this team. Expected value: json-string/@json-file.')
c.argument('owners', type=validate_file_or_dict, help=' Expected value: json-string/@json-file.')
c.argument('photo', action=AddGroupsPhoto, nargs='+', help='profilePhoto')
c.argument('primary_channel', type=validate_file_or_dict, help='channel Expected value: '
'json-string/@json-file.')
c.argument('microsoft_graph_entity_id', type=str, help='Read-only.', arg_group='Template')
c.argument('id1', type=str, help='Read-only.', arg_group='Schedule')
c.argument('enabled', arg_type=get_three_state_flag(), help='Indicates whether the schedule is enabled for the '
'team. Required.', arg_group='Schedule')
c.argument('offer_shift_requests_enabled', arg_type=get_three_state_flag(), help='Indicates whether offer '
'shift requests are enabled for the schedule.', arg_group='Schedule')
c.argument('open_shifts_enabled', arg_type=get_three_state_flag(), help='Indicates whether open shifts are '
'enabled for the schedule.', arg_group='Schedule')
c.argument('provision_status', arg_type=get_enum_type(['NotStarted', 'Running', 'Completed', 'Failed']),
help='', arg_group='Schedule')
c.argument('provision_status_code', type=str, help='Additional information about why schedule provisioning '
'failed.', arg_group='Schedule')
c.argument('swap_shifts_requests_enabled', arg_type=get_three_state_flag(), help='Indicates whether swap '
'shifts requests are enabled for the schedule.', arg_group='Schedule')
c.argument('time_clock_enabled', arg_type=get_three_state_flag(), help='Indicates whether time clock is '
'enabled for the schedule.', arg_group='Schedule')
c.argument('time_off_requests_enabled', arg_type=get_three_state_flag(), help='Indicates whether time off '
'requests are enabled for the schedule.', arg_group='Schedule')
c.argument('time_zone', type=str, help='Indicates the time zone of the schedule team using tz database format. '
'Required.', arg_group='Schedule')
c.argument('workforce_integration_ids', nargs='+', help='', arg_group='Schedule')
c.argument('offer_shift_requests', action=AddOfferShiftRequests, nargs='+', help='', arg_group='Schedule')
c.argument('open_shift_change_requests', action=AddOpenShiftChangeRequests, nargs='+', help='',
arg_group='Schedule')
c.argument('open_shifts', type=validate_file_or_dict, help=' Expected value: json-string/@json-file.',
arg_group='Schedule')
c.argument('scheduling_groups', action=AddSchedulingGroups, nargs='+', help='The logical grouping of users in '
'the schedule (usually by role).', arg_group='Schedule')
c.argument('shifts', type=validate_file_or_dict, help='The shifts in the schedule. Expected value: '
'json-string/@json-file.', arg_group='Schedule')
c.argument('swap_shifts_change_requests', action=AddSwapShiftsChangeRequests, nargs='+', help='',
arg_group='Schedule')
c.argument('time_cards', type=validate_file_or_dict, help=' Expected value: json-string/@json-file.',
arg_group='Schedule')
c.argument('time_off_reasons', action=AddTimeOffReasons, nargs='+', help='The set of reasons for a time off in '
'the schedule.', arg_group='Schedule')
c.argument('time_off_requests', action=AddTimeOffRequests, nargs='+', help='', arg_group='Schedule')
c.argument('times_off', type=validate_file_or_dict, help='The instances of times off in the schedule. Expected '
'value: json-string/@json-file.', arg_group='Schedule')
c.argument('approved_location', action=AddApprovedLocation, nargs='+', help='geoCoordinates',
arg_group='Schedule Time Clock Settings')
c.argument('show_in_teams_search_and_suggestions', arg_type=get_three_state_flag(), help='',
arg_group='Discovery Settings')
with self.argument_context('teams user update-teamwork') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('installed_apps', type=validate_file_or_dict, help='The apps installed in the personal scope of '
'this user. Expected value: json-string/@json-file.')
with self.argument_context('teams user-teamwork create-installed-app') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('microsoft_graph_entity_id', type=str, help='Read-only.', arg_group='Teams App Definition')
c.argument('azure_ad_app_id', type=str, help='', arg_group='Teams App Definition')
c.argument('description', type=str, help='', arg_group='Teams App Definition')
c.argument('display_name', type=str, help='The name of the app provided by the app developer.',
arg_group='Teams App Definition')
c.argument('last_modified_date_time', help='', arg_group='Teams App Definition')
c.argument('publishing_state', arg_type=get_enum_type(['submitted', 'rejected', 'published',
'unknownFutureValue']), help='', arg_group='Teams App '
'Definition')
c.argument('shortdescription', type=str, help='', arg_group='Teams App Definition')
c.argument('teams_app_id', type=str, help='The ID from the Teams app manifest.', arg_group='Teams App '
'Definition')
c.argument('version', type=str, help='The version number of the application.',
arg_group='Teams App Definition')
c.argument('application', action=AddApplication, nargs='+', help='identity', arg_group='Teams App Definition '
'Created By')
c.argument('device', action=AddApplication, nargs='+', help='identity', arg_group='Teams App Definition '
'Created By')
c.argument('user', action=AddApplication, nargs='+', help='identity', arg_group='Teams App Definition Created '
'By')
c.argument('id1', type=str, help='Read-only.', arg_group='Teams App')
c.argument('microsoft_graph_teams_app_display_name', type=str, help='The name of the catalog app provided by '
'the app developer in the Microsoft Teams zip app package.', arg_group='Teams App')
c.argument('distribution_method', arg_type=get_enum_type(['store', 'organization', 'sideloaded',
'unknownFutureValue']), help='', arg_group='Teams '
'App')
c.argument('external_id', type=str, help='The ID of the catalog provided by the app developer in the Microsoft '
'Teams zip app package.', arg_group='Teams App')
c.argument('app_definitions', type=validate_file_or_dict, help='The details for each version of the app. '
'Expected value: json-string/@json-file.', arg_group='Teams App')
c.argument('id2', type=str, help='Read-only.', arg_group='Chat')
c.argument('created_date_time', help='', arg_group='Chat')
c.argument('last_updated_date_time', help='', arg_group='Chat')
c.argument('topic', type=str, help='', arg_group='Chat')
c.argument('installed_apps', type=validate_file_or_dict, help=' Expected value: json-string/@json-file.',
arg_group='Chat')
c.argument('members', action=AddUsersMembers, nargs='+', help='', arg_group='Chat')
c.argument('messages', type=validate_file_or_dict, help=' Expected value: json-string/@json-file.',
arg_group='Chat')
c.argument('tabs', type=validate_file_or_dict, help=' Expected value: json-string/@json-file.',
arg_group='Chat')
with self.argument_context('teams user-teamwork delete-installed-app') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('user_scope_teams_app_installation_id', type=str, help='key: id of userScopeTeamsAppInstallation')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('teams user-teamwork list-installed-app') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('teams user-teamwork show-installed-app') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('user_scope_teams_app_installation_id', type=str, help='key: id of userScopeTeamsAppInstallation')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('teams user-teamwork update-installed-app') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('user_scope_teams_app_installation_id', type=str, help='key: id of userScopeTeamsAppInstallation')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('microsoft_graph_entity_id', type=str, help='Read-only.', arg_group='Teams App Definition')
c.argument('azure_ad_app_id', type=str, help='', arg_group='Teams App Definition')
c.argument('description', type=str, help='', arg_group='Teams App Definition')
c.argument('display_name', type=str, help='The name of the app provided by the app developer.',
arg_group='Teams App Definition')
c.argument('last_modified_date_time', help='', arg_group='Teams App Definition')
c.argument('publishing_state', arg_type=get_enum_type(['submitted', 'rejected', 'published',
'unknownFutureValue']), help='', arg_group='Teams App '
'Definition')
c.argument('shortdescription', type=str, help='', arg_group='Teams App Definition')
c.argument('teams_app_id', type=str, help='The ID from the Teams app manifest.', arg_group='Teams App '
'Definition')
c.argument('version', type=str, help='The version number of the application.',
arg_group='Teams App Definition')
c.argument('application', action=AddApplication, nargs='+', help='identity', arg_group='Teams App Definition '
'Created By')
c.argument('device', action=AddApplication, nargs='+', help='identity', arg_group='Teams App Definition '
'Created By')
c.argument('user', action=AddApplication, nargs='+', help='identity', arg_group='Teams App Definition Created '
'By')
c.argument('id1', type=str, help='Read-only.', arg_group='Teams App')
c.argument('microsoft_graph_teams_app_display_name', type=str, help='The name of the catalog app provided by '
'the app developer in the Microsoft Teams zip app package.', arg_group='Teams App')
c.argument('distribution_method', arg_type=get_enum_type(['store', 'organization', 'sideloaded',
'unknownFutureValue']), help='', arg_group='Teams '
'App')
c.argument('external_id', type=str, help='The ID of the catalog provided by the app developer in the Microsoft '
'Teams zip app package.', arg_group='Teams App')
c.argument('app_definitions', type=validate_file_or_dict, help='The details for each version of the app. '
'Expected value: json-string/@json-file.', arg_group='Teams App')
c.argument('id2', type=str, help='Read-only.', arg_group='Chat')
c.argument('created_date_time', help='', arg_group='Chat')
c.argument('last_updated_date_time', help='', arg_group='Chat')
c.argument('topic', type=str, help='', arg_group='Chat')
c.argument('installed_apps', type=validate_file_or_dict, help=' Expected value: json-string/@json-file.',
arg_group='Chat')
c.argument('members', action=AddUsersMembers, nargs='+', help='', arg_group='Chat')
c.argument('messages', type=validate_file_or_dict, help=' Expected value: json-string/@json-file.',
arg_group='Chat')
c.argument('tabs', type=validate_file_or_dict, help=' Expected value: json-string/@json-file.',
arg_group='Chat')
with self.argument_context('teams user-teamwork-installed-app delete-ref-chat') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('user_scope_teams_app_installation_id', type=str, help='key: id of userScopeTeamsAppInstallation')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('teams user-teamwork-installed-app set-ref-chat') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('user_scope_teams_app_installation_id', type=str, help='key: id of userScopeTeamsAppInstallation')
c.argument('body', type=validate_file_or_dict, help='New navigation property ref values Expected value: '
'json-string/@json-file.')
with self.argument_context('teams user-teamwork-installed-app show-chat') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('user_scope_teams_app_installation_id', type=str, help='key: id of userScopeTeamsAppInstallation')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('teams user-teamwork-installed-app show-ref-chat') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('user_scope_teams_app_installation_id', type=str, help='key: id of userScopeTeamsAppInstallation')
| 78.145755
| 121
| 0.634702
| 39,453
| 312,036
| 4.885357
| 0.019644
| 0.1116
| 0.062036
| 0.049102
| 0.990531
| 0.989924
| 0.988679
| 0.985364
| 0.98444
| 0.982718
| 0
| 0.005158
| 0.228877
| 312,036
| 3,992
| 122
| 78.165331
| 0.795822
| 0.001622
| 0
| 0.893565
| 0
| 0.001629
| 0.444262
| 0.069529
| 0
| 0
| 0
| 0
| 0
| 1
| 0.000272
| false
| 0
| 0.004073
| 0
| 0.004344
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f0c090de85dddfbd20c171ef4711b116d67361f2
| 1,405
|
py
|
Python
|
problems/8.py
|
christofferaakre/project-euler
|
4b42802233be10e4a592798205171fb5156dae6b
|
[
"MIT"
] | null | null | null |
problems/8.py
|
christofferaakre/project-euler
|
4b42802233be10e4a592798205171fb5156dae6b
|
[
"MIT"
] | null | null | null |
problems/8.py
|
christofferaakre/project-euler
|
4b42802233be10e4a592798205171fb5156dae6b
|
[
"MIT"
] | null | null | null |
from main import Solver
solver = Solver()
input = "7316717653133062491922511967442657474235534919493496983520312774506326239578318016984801869478851843858615607891129494954595017379583319528532088055111254069874715852386305071569329096329522744304355766896648950445244523161731856403098711121722383113622298934233803081353362766142828064444866452387493035890729629049156044077239071381051585930796086670172427121883998797908792274921901699720888093776657273330010533678812202354218097512545405947522435258490771167055601360483958644670632441572215539753697817977846174064955149290862569321978468622482839722413756570560574902614079729686524145351004748216637048440319989000889524345065854122758866688116427171479924442928230863465674813919123162824586178664583591245665294765456828489128831426076900422421902267105562632111110937054421750694165896040807198403850962455444362981230987879927244284909188845801561660979191338754992005240636899125607176060588611646710940507754100225698315520005593572972571636269561882670428252483600823257530420752963450"
def biggest_product_of_adjacents(n):
biggest_product = 1
for i in range(0, len(input) - n - 1):
product = 1
for j in range(i, i + n):
product *= int(input[j])
if product > biggest_product:
biggest_product = product
return biggest_product
solver.solve(8, biggest_product_of_adjacents(13))
| 78.055556
| 1,010
| 0.892527
| 57
| 1,405
| 21.824561
| 0.45614
| 0.067524
| 0.025723
| 0.040193
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.778809
| 0.079715
| 1,405
| 17
| 1,011
| 82.647059
| 0.183295
| 0
| 0
| 0
| 0
| 0
| 0.711744
| 0.711744
| 0
| 1
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.076923
| 0
| 0.230769
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
9bf4f1a9d6c6af5fe92f6425bc9d18a9d42a8c72
| 37,472
|
py
|
Python
|
tests/test_control.py
|
madkote/fastapi-plugins
|
04d251c4c88317e1c8f35dad66771020dcb35112
|
[
"MIT"
] | 211
|
2019-11-20T11:19:44.000Z
|
2022-03-28T08:43:27.000Z
|
tests/test_control.py
|
madkote/fastapi-plugins
|
04d251c4c88317e1c8f35dad66771020dcb35112
|
[
"MIT"
] | 16
|
2020-01-24T14:31:30.000Z
|
2021-09-23T10:27:39.000Z
|
tests/test_control.py
|
madkote/fastapi-plugins
|
04d251c4c88317e1c8f35dad66771020dcb35112
|
[
"MIT"
] | 12
|
2020-07-25T14:33:46.000Z
|
2022-01-11T06:42:32.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# tests.test_control
'''
:author: madkote
:contact: madkote(at)bluewin.ch
:copyright: Copyright 2021, madkote RES
tests.test_control
------------------
Control plugin tests
'''
from __future__ import absolute_import
import asyncio
import typing
import unittest
import fastapi
import pydantic
import pytest
import starlette.testclient
import fastapi_plugins
from fastapi_plugins.memcached import memcached_plugin
from fastapi_plugins.memcached import MemcachedSettings
from . import VERSION
from . import d2json
__all__ = []
__author__ = 'madkote <madkote(at)bluewin.ch>'
__version__ = '.'.join(str(x) for x in VERSION)
__copyright__ = 'Copyright 2021, madkote RES'
class DummyPluginHealthOK(
fastapi_plugins.Plugin,
fastapi_plugins.ControlHealthMixin
):
async def init_app(
self,
app: fastapi.FastAPI,
config: pydantic.BaseSettings=None, # @UnusedVariable
*args, # @UnusedVariable
**kwargs # @UnusedVariable
) -> None:
app.state.DUMMY_PLUGIN_HEALTH_OK = self
async def health(self) -> typing.Dict:
return dict(dummy='OK')
class DummyPluginHealthOKOnce(
fastapi_plugins.Plugin,
fastapi_plugins.ControlHealthMixin
):
async def init_app(
self,
app: fastapi.FastAPI,
config: pydantic.BaseSettings=None, # @UnusedVariable
*args, # @UnusedVariable
**kwargs # @UnusedVariable
) -> None:
self.counter = 0
app.state.DUMMY_PLUGIN_HEALTH_OK_ONCE = self
async def health(self) -> typing.Dict:
if self.counter > 0:
raise Exception('Health check failed')
else:
self.counter += 1
return dict(dummy='OK')
class DummyPluginHealthFail(
fastapi_plugins.Plugin,
fastapi_plugins.ControlHealthMixin
):
async def init_app(
self,
app: fastapi.FastAPI,
config: pydantic.BaseSettings=None, # @UnusedVariable
*args, # @UnusedVariable
**kwargs # @UnusedVariable
) -> None:
app.state.DUMMY_PLUGIN_HEALTH_FAIL = self
async def health(self) -> typing.Dict:
raise Exception('Health check failed')
class DummyPluginHealthNotDefined(
fastapi_plugins.Plugin,
fastapi_plugins.ControlHealthMixin
):
async def init_app(
self,
app: fastapi.FastAPI,
config: pydantic.BaseSettings=None, # @UnusedVariable
*args, # @UnusedVariable
**kwargs # @UnusedVariable
) -> None:
app.state.DUMMY_PLUGIN_NOT_DEFINED = self
@pytest.mark.control
class ControlTest(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def make_app(self, config=None, version=None, environ=None, plugins=None):
if plugins is None:
plugins = []
app = fastapi_plugins.register_middleware(fastapi.FastAPI())
if config is None:
config = fastapi_plugins.ControlSettings()
@app.on_event('startup')
async def on_startup() -> None:
for p in plugins:
await p.init_app(app, config)
await p.init()
kwargs = {}
if version:
kwargs.update(**dict(version=version))
if environ:
kwargs.update(**dict(environ=environ))
await fastapi_plugins.control_plugin.init_app(app, config, **kwargs) # noqa E501
await fastapi_plugins.control_plugin.init()
@app.on_event('shutdown')
async def on_shutdown() -> None:
await fastapi_plugins.control_plugin.terminate()
for p in plugins:
await p.terminate()
return app
# =========================================================================
# CONTROLLER
# =========================================================================
def test_controller_environ(self):
async def _test():
c = fastapi_plugins.Controller()
res = await c.get_environ()
exp = {}
self.assertTrue(d2json(exp) == d2json(res), 'environ failed')
event_loop = asyncio.new_event_loop()
asyncio.set_event_loop(event_loop)
coro = asyncio.coroutine(_test)
event_loop.run_until_complete(coro())
event_loop.close()
def test_controller_environ_custom(self):
async def _test():
exp = dict(ping='pong')
c = fastapi_plugins.Controller(environ=exp)
res = await c.get_environ()
self.assertTrue(d2json(exp) == d2json(res), 'environ failed')
event_loop = asyncio.new_event_loop()
asyncio.set_event_loop(event_loop)
coro = asyncio.coroutine(_test)
event_loop.run_until_complete(coro())
event_loop.close()
def test_controller_health(self):
async def _test():
c = fastapi_plugins.Controller()
exp = dict(status=True, checks=[])
res = (await c.get_health()).dict()
self.assertTrue(
d2json(exp) == d2json(res),
'health failed: %s != %s' % (exp, res)
)
event_loop = asyncio.new_event_loop()
asyncio.set_event_loop(event_loop)
coro = asyncio.coroutine(_test)
event_loop.run_until_complete(coro())
event_loop.close()
def test_controller_health_plugin_ok(self):
async def _test():
app = fastapi_plugins.register_middleware(fastapi.FastAPI())
config = fastapi_plugins.ControlSettings()
dummy = DummyPluginHealthOK()
await dummy.init_app(app, config)
await dummy.init()
try:
c = fastapi_plugins.Controller()
c.plugins.append(('DUMMY_PLUGIN_OK', dummy))
exp = dict(
status=True,
checks=[
dict(
name='DUMMY_PLUGIN_OK',
status=True,
details=dict(dummy='OK')
)
]
)
res = (await c.get_health()).dict()
self.assertTrue(
d2json(exp) == d2json(res),
'health failed: %s != %s' % (exp, res)
)
finally:
await dummy.terminate()
event_loop = asyncio.new_event_loop()
asyncio.set_event_loop(event_loop)
coro = asyncio.coroutine(_test)
event_loop.run_until_complete(coro())
event_loop.close()
def test_controller_health_plugin_notdefined(self):
async def _test():
app = fastapi_plugins.register_middleware(fastapi.FastAPI())
config = fastapi_plugins.ControlSettings()
dummy = DummyPluginHealthNotDefined()
await dummy.init_app(app, config)
await dummy.init()
try:
c = fastapi_plugins.Controller()
c.plugins.append(('DUMMY_PLUGIN_NOT_DEFINED', dummy))
exp = dict(
status=True,
checks=[
dict(
name='DUMMY_PLUGIN_NOT_DEFINED',
status=True,
details={}
)
]
)
res = (await c.get_health()).dict()
self.assertTrue(
d2json(exp) == d2json(res),
'health failed: %s != %s' % (exp, res)
)
finally:
await dummy.terminate()
event_loop = asyncio.new_event_loop()
asyncio.set_event_loop(event_loop)
coro = asyncio.coroutine(_test)
event_loop.run_until_complete(coro())
event_loop.close()
def test_controller_health_plugin_failing(self):
async def _test():
app = fastapi_plugins.register_middleware(fastapi.FastAPI())
config = fastapi_plugins.ControlSettings()
dummy = DummyPluginHealthFail()
await dummy.init_app(app, config)
await dummy.init()
try:
c = fastapi_plugins.Controller()
c.plugins.append(('DUMMY_PLUGIN_HEALTH_FAIL', dummy))
exp = dict(
status=False,
checks=[
dict(
name='DUMMY_PLUGIN_HEALTH_FAIL',
status=False,
details=dict(error='Health check failed')
)
]
)
res = (await c.get_health()).dict()
self.assertTrue(
d2json(exp) == d2json(res),
'health failed: %s != %s' % (exp, res)
)
finally:
await dummy.terminate()
event_loop = asyncio.new_event_loop()
asyncio.set_event_loop(event_loop)
coro = asyncio.coroutine(_test)
event_loop.run_until_complete(coro())
event_loop.close()
def test_controller_heartbeat(self):
async def _test():
c = fastapi_plugins.Controller()
exp = True
res = await c.get_heart_beat()
self.assertTrue(
exp == res,
'heart beat failed: %s != %s' % (exp, res)
)
event_loop = asyncio.new_event_loop()
asyncio.set_event_loop(event_loop)
coro = asyncio.coroutine(_test)
event_loop.run_until_complete(coro())
event_loop.close()
def test_controller_version(self):
async def _test():
from fastapi_plugins.control import DEFAULT_CONTROL_VERSION
c = fastapi_plugins.Controller()
r = await c.get_version()
self.assertTrue(r == DEFAULT_CONTROL_VERSION, 'version failed')
event_loop = asyncio.new_event_loop()
asyncio.set_event_loop(event_loop)
coro = asyncio.coroutine(_test)
event_loop.run_until_complete(coro())
event_loop.close()
def test_controller_version_custom(self):
async def _test():
v = '1.2.3'
c = fastapi_plugins.Controller(version=v)
r = await c.get_version()
self.assertTrue(r == v, 'version failed')
event_loop = asyncio.new_event_loop()
asyncio.set_event_loop(event_loop)
coro = asyncio.coroutine(_test)
event_loop.run_until_complete(coro())
event_loop.close()
# =========================================================================
# ROUTER
# =========================================================================
def test_router_environ(self):
event_loop = asyncio.new_event_loop()
asyncio.set_event_loop(event_loop)
try:
client = starlette.testclient.TestClient(self.make_app())
with client as c:
endpoint = '/control/environ'
response = c.get(endpoint)
exp = 200
res = response.status_code
self.assertTrue(exp == res, '[%s] status code : %s != %s' % (endpoint, exp, res)) # noqa E501
exp = dict(environ={})
res = response.json()
self.assertTrue(d2json(exp) == d2json(res), '[%s] json : %s != %s' % (endpoint, exp, res)) # noqa E501
finally:
event_loop.close()
def test_router_environ_custom(self):
myenviron = dict(ping='pong')
event_loop = asyncio.new_event_loop()
asyncio.set_event_loop(event_loop)
try:
client = starlette.testclient.TestClient(
self.make_app(environ=myenviron)
)
with client as c:
endpoint = '/control/environ'
response = c.get(endpoint)
exp = 200
res = response.status_code
self.assertTrue(exp == res, '[%s] status code : %s != %s' % (endpoint, exp, res)) # noqa E501
exp = dict(environ=myenviron)
res = response.json()
self.assertTrue(d2json(exp) == d2json(res), '[%s] json : %s != %s' % (endpoint, exp, res)) # noqa E501
finally:
event_loop.close()
def test_router_version(self):
from fastapi_plugins.control import DEFAULT_CONTROL_VERSION
event_loop = asyncio.new_event_loop()
asyncio.set_event_loop(event_loop)
try:
client = starlette.testclient.TestClient(self.make_app())
with client as c:
endpoint = '/control/version'
response = c.get(endpoint)
exp = 200
res = response.status_code
self.assertTrue(exp == res, '[%s] status code : %s != %s' % (endpoint, exp, res)) # noqa E501
exp = {'version': DEFAULT_CONTROL_VERSION}
res = response.json()
self.assertTrue(d2json(exp) == d2json(res), '[%s] json : %s != %s' % (endpoint, exp, res)) # noqa E501
finally:
event_loop.close()
def test_router_version_custom(self):
myversion = '1.2.3'
event_loop = asyncio.new_event_loop()
asyncio.set_event_loop(event_loop)
try:
client = starlette.testclient.TestClient(
self.make_app(version=myversion)
)
with client as c:
endpoint = '/control/version'
response = c.get(endpoint)
exp = 200
res = response.status_code
self.assertTrue(exp == res, '[%s] status code : %s != %s' % (endpoint, exp, res)) # noqa E501
exp = {'version': myversion}
res = response.json()
self.assertTrue(d2json(exp) == d2json(res), '[%s] json : %s != %s' % (endpoint, exp, res)) # noqa E501
finally:
event_loop.close()
def test_router_heartbeat(self):
event_loop = asyncio.new_event_loop()
asyncio.set_event_loop(event_loop)
try:
client = starlette.testclient.TestClient(
self.make_app()
)
with client as c:
endpoint = '/control/heartbeat'
response = c.get(endpoint)
exp = 200
res = response.status_code
self.assertTrue(exp == res, '[%s] status code : %s != %s' % (endpoint, exp, res)) # noqa E501
exp = dict(is_alive=True)
res = response.json()
self.assertTrue(d2json(exp) == d2json(res), '[%s] json : %s != %s' % (endpoint, exp, res)) # noqa E501
finally:
event_loop.close()
def test_router_health(self):
event_loop = asyncio.new_event_loop()
asyncio.set_event_loop(event_loop)
try:
client = starlette.testclient.TestClient(
self.make_app()
)
with client as c:
endpoint = '/control/health'
response = c.get(endpoint)
exp = 200
res = response.status_code
self.assertTrue(exp == res, '[%s] status code : %s != %s' % (endpoint, exp, res)) # noqa E501
exp = dict(status=True, checks=[])
res = response.json()
self.assertTrue(d2json(exp) == d2json(res), '[%s] json : %s != %s' % (endpoint, exp, res)) # noqa E501
finally:
event_loop.close()
def test_router_health_with_plugins(self):
event_loop = asyncio.new_event_loop()
asyncio.set_event_loop(event_loop)
try:
client = starlette.testclient.TestClient(
self.make_app(
plugins=[
DummyPluginHealthNotDefined(),
DummyPluginHealthOK()
]
)
)
with client as c:
endpoint = '/control/health'
response = c.get(endpoint)
exp = 200
res = response.status_code
self.assertTrue(exp == res, '[%s] status code : %s != %s' % (endpoint, exp, res)) # noqa E501
exp = dict(
status=True,
checks=[
dict(
name='DUMMY_PLUGIN_NOT_DEFINED',
status=True,
details={}
),
dict(
name='DUMMY_PLUGIN_HEALTH_OK',
status=True,
details=dict(dummy='OK')
)
]
)
res = response.json()
self.assertTrue(d2json(exp) == d2json(res), '[%s] json : %s != %s' % (endpoint, exp, res)) # noqa E501
finally:
event_loop.close()
def test_router_health_with_plugins_full(self):
event_loop = asyncio.new_event_loop()
asyncio.set_event_loop(event_loop)
try:
class MyConfig(
fastapi_plugins.RedisSettings,
fastapi_plugins.SchedulerSettings,
fastapi_plugins.ControlSettings,
MemcachedSettings
):
pass
client = starlette.testclient.TestClient(
self.make_app(
config=MyConfig(),
plugins=[
fastapi_plugins.redis_plugin,
fastapi_plugins.scheduler_plugin,
memcached_plugin
]
)
)
with client as c:
endpoint = '/control/health'
response = c.get(endpoint)
exp = 200
res = response.status_code
self.assertTrue(exp == res, '[%s] status code : %s != %s' % (endpoint, exp, res)) # noqa E501
exp = dict(
status=True,
checks=[
{
'name': 'REDIS',
'status': True,
'details': {
'redis_type': 'redis',
'redis_address': 'redis://localhost:6379/0',
'redis_pong': 'PONG'
}
},
{
'name': 'AIOJOBS_SCHEDULER',
'status': True,
'details': {
'jobs': 0,
'active': 0,
'pending': 0,
'limit': 100,
'closed': False
}
},
{
'name': 'MEMCACHED',
'status': True,
'details': {
'host': 'localhost',
'port': 11211,
'version': '1.6.9'
}
}
]
)
res = response.json()
self.assertTrue(d2json(exp) == d2json(res), '[%s] json : %s != %s' % (endpoint, exp, res)) # noqa E501
finally:
event_loop.close()
def test_router_health_with_plugins_broken(self):
event_loop = asyncio.new_event_loop()
asyncio.set_event_loop(event_loop)
try:
client = starlette.testclient.TestClient(
self.make_app(
plugins=[
DummyPluginHealthNotDefined(),
DummyPluginHealthOK(),
DummyPluginHealthOKOnce()
]
)
)
with client as c:
endpoint = '/control/health'
response = c.get(endpoint)
exp = 417
res = response.status_code
self.assertTrue(exp == res, '[%s] status code : %s != %s' % (endpoint, exp, res)) # noqa E501
exp = {
'detail': {
'status': False,
'checks': [
{
'name': 'DUMMY_PLUGIN_NOT_DEFINED',
'status': True,
'details': {}
},
{
'name': 'DUMMY_PLUGIN_HEALTH_OK',
'status': True,
'details': {'dummy': 'OK'}
},
{
'name': 'DUMMY_PLUGIN_HEALTH_OK_ONCE',
'status': False,
'details': {'error': 'Health check failed'}
}
]
}
}
res = response.json()
self.assertTrue(d2json(exp) == d2json(res), '[%s] json : %s != %s' % (endpoint, exp, res)) # noqa E501
finally:
event_loop.close()
def test_router_health_with_plugins_broken_init(self):
event_loop = asyncio.new_event_loop()
asyncio.set_event_loop(event_loop)
try:
client = starlette.testclient.TestClient(
self.make_app(
plugins=[
DummyPluginHealthFail(),
DummyPluginHealthOK()
]
)
)
try:
with client as c:
endpoint = '/control/version'
c.get(endpoint)
except fastapi_plugins.ControlError:
pass
else:
self.fail('health on app initialization should fail')
finally:
event_loop.close()
def test_router_prefix_custom(self):
from fastapi_plugins.control import DEFAULT_CONTROL_VERSION
event_loop = asyncio.new_event_loop()
asyncio.set_event_loop(event_loop)
try:
config = fastapi_plugins.ControlSettings(
control_router_prefix='outofcontrol'
)
client = starlette.testclient.TestClient(
self.make_app(config=config)
)
with client as c:
#
endpoint = '/%s/health' % config.control_router_prefix
response = c.get(endpoint)
exp = 200
res = response.status_code
self.assertTrue(exp == res, '[%s] status code : %s != %s' % (endpoint, exp, res)) # noqa E501
exp = dict(status=True, checks=[])
res = response.json()
self.assertTrue(d2json(exp) == d2json(res), '[%s] json : %s != %s' % (endpoint, exp, res)) # noqa E501
#
endpoint = '/%s/version' % config.control_router_prefix
response = c.get(endpoint)
exp = 200
res = response.status_code
self.assertTrue(exp == res, '[%s] status code : %s != %s' % (endpoint, exp, res)) # noqa E501
exp = {'version': DEFAULT_CONTROL_VERSION}
res = response.json()
self.assertTrue(d2json(exp) == d2json(res), '[%s] json : %s != %s' % (endpoint, exp, res)) # noqa E501
finally:
event_loop.close()
def test_router_prefix_version_custom(self):
event_loop = asyncio.new_event_loop()
asyncio.set_event_loop(event_loop)
try:
myversion = '3.2.1'
config = fastapi_plugins.ControlSettings(
control_router_prefix='outofcontrol'
)
client = starlette.testclient.TestClient(
self.make_app(config=config, version=myversion)
)
with client as c:
#
endpoint = '/%s/health' % config.control_router_prefix
response = c.get(endpoint)
exp = 200
res = response.status_code
self.assertTrue(exp == res, '[%s] status code : %s != %s' % (endpoint, exp, res)) # noqa E501
exp = dict(status=True, checks=[])
res = response.json()
self.assertTrue(d2json(exp) == d2json(res), '[%s] json : %s != %s' % (endpoint, exp, res)) # noqa E501
#
endpoint = '/%s/version' % config.control_router_prefix
response = c.get(endpoint)
exp = 200
res = response.status_code
self.assertTrue(exp == res, '[%s] status code : %s != %s' % (endpoint, exp, res)) # noqa E501
exp = {'version': myversion}
res = response.json()
self.assertTrue(d2json(exp) == d2json(res), '[%s] json : %s != %s' % (endpoint, exp, res)) # noqa E501
finally:
event_loop.close()
def test_router_disable(self):
event_loop = asyncio.new_event_loop()
asyncio.set_event_loop(event_loop)
try:
config = fastapi_plugins.ControlSettings(
control_enable_environ=False,
control_enable_health=False,
control_enable_heartbeat=False,
control_enable_version=False
)
client = starlette.testclient.TestClient(
self.make_app(config=config)
)
with client as c:
#
endpoint = '/control/environ'
response = c.get(endpoint)
exp = 404
res = response.status_code
self.assertTrue(exp == res, '[%s] status code : %s != %s' % (endpoint, exp, res)) # noqa E501
#
endpoint = '/control/version'
response = c.get(endpoint)
exp = 404
res = response.status_code
self.assertTrue(exp == res, '[%s] status code : %s != %s' % (endpoint, exp, res)) # noqa E501
#
endpoint = '/control/heartbeat'
response = c.get(endpoint)
exp = 404
res = response.status_code
self.assertTrue(exp == res, '[%s] status code : %s != %s' % (endpoint, exp, res)) # noqa E501
#
endpoint = '/control/health'
response = c.get(endpoint)
exp = 404
res = response.status_code
self.assertTrue(exp == res, '[%s] status code : %s != %s' % (endpoint, exp, res)) # noqa E501
finally:
event_loop.close()
def test_router_disable_environ(self):
event_loop = asyncio.new_event_loop()
asyncio.set_event_loop(event_loop)
try:
from fastapi_plugins.control import DEFAULT_CONTROL_VERSION
config = fastapi_plugins.ControlSettings(
control_enable_environ=False,
control_enable_health=True,
control_enable_heartbeat=True,
control_enable_version=True
)
client = starlette.testclient.TestClient(
self.make_app(config=config)
)
with client as c:
#
endpoint = '/control/environ'
response = c.get(endpoint)
exp = 404
res = response.status_code
self.assertTrue(exp == res, '[%s] status code : %s != %s' % (endpoint, exp, res)) # noqa E501
#
endpoint = '/control/version'
response = c.get(endpoint)
exp = 200
res = response.status_code
self.assertTrue(exp == res, '[%s] status code : %s != %s' % (endpoint, exp, res)) # noqa E501
exp = {'version': DEFAULT_CONTROL_VERSION}
res = response.json()
self.assertTrue(d2json(exp) == d2json(res), '[%s] json : %s != %s' % (endpoint, exp, res)) # noqa E501
#
endpoint = '/control/heartbeat'
response = c.get(endpoint)
exp = 200
res = response.status_code
self.assertTrue(exp == res, '[%s] status code : %s != %s' % (endpoint, exp, res)) # noqa E501
exp = {'is_alive': True}
res = response.json()
self.assertTrue(d2json(exp) == d2json(res), '[%s] json : %s != %s' % (endpoint, exp, res)) # noqa E501
#
endpoint = '/control/health'
response = c.get(endpoint)
exp = 200
res = response.status_code
self.assertTrue(exp == res, '[%s] status code : %s != %s' % (endpoint, exp, res)) # noqa E501
exp = dict(status=True, checks=[])
res = response.json()
self.assertTrue(d2json(exp) == d2json(res), '[%s] json : %s != %s' % (endpoint, exp, res)) # noqa E501
finally:
event_loop.close()
def test_router_disable_version(self):
event_loop = asyncio.new_event_loop()
asyncio.set_event_loop(event_loop)
try:
config = fastapi_plugins.ControlSettings(
control_enable_environ=True,
control_enable_heartbeat=True,
control_enable_health=True,
control_enable_version=False
)
client = starlette.testclient.TestClient(
self.make_app(config=config)
)
with client as c:
#
endpoint = '/control/environ'
response = c.get(endpoint)
exp = 200
res = response.status_code
self.assertTrue(exp == res, '[%s] status code : %s != %s' % (endpoint, exp, res)) # noqa E501
exp = dict(environ={})
res = response.json()
self.assertTrue(d2json(exp) == d2json(res), '[%s] json : %s != %s' % (endpoint, exp, res)) # noqa E501
#
endpoint = '/control/version'
response = c.get(endpoint)
exp = 404
res = response.status_code
self.assertTrue(exp == res, '[%s] status code : %s != %s' % (endpoint, exp, res)) # noqa E501
#
endpoint = '/control/heartbeat'
response = c.get(endpoint)
exp = 200
res = response.status_code
self.assertTrue(exp == res, '[%s] status code : %s != %s' % (endpoint, exp, res)) # noqa E501
exp = {'is_alive': True}
res = response.json()
self.assertTrue(d2json(exp) == d2json(res), '[%s] json : %s != %s' % (endpoint, exp, res)) # noqa E501
#
endpoint = '/control/health'
response = c.get(endpoint)
exp = 200
res = response.status_code
self.assertTrue(exp == res, '[%s] status code : %s != %s' % (endpoint, exp, res)) # noqa E501
exp = dict(status=True, checks=[])
res = response.json()
self.assertTrue(d2json(exp) == d2json(res), '[%s] json : %s != %s' % (endpoint, exp, res)) # noqa E501
finally:
event_loop.close()
def test_router_disable_health(self):
event_loop = asyncio.new_event_loop()
asyncio.set_event_loop(event_loop)
try:
from fastapi_plugins.control import DEFAULT_CONTROL_VERSION
config = fastapi_plugins.ControlSettings(
control_enable_environ=True,
control_enable_health=False,
control_enable_heartbeat=True,
control_enable_version=True
)
client = starlette.testclient.TestClient(
self.make_app(config=config, plugins=[DummyPluginHealthFail()])
)
with client as c:
#
endpoint = '/control/environ'
response = c.get(endpoint)
exp = 200
res = response.status_code
self.assertTrue(exp == res, '[%s] status code : %s != %s' % (endpoint, exp, res)) # noqa E501
exp = dict(environ={})
res = response.json()
self.assertTrue(d2json(exp) == d2json(res), '[%s] json : %s != %s' % (endpoint, exp, res)) # noqa E501
#
endpoint = '/control/version'
response = c.get(endpoint)
exp = 200
res = response.status_code
self.assertTrue(exp == res, '[%s] status code : %s != %s' % (endpoint, exp, res)) # noqa E501
exp = {'version': DEFAULT_CONTROL_VERSION}
res = response.json()
self.assertTrue(d2json(exp) == d2json(res), '[%s] json : %s != %s' % (endpoint, exp, res)) # noqa E501
#
endpoint = '/control/heartbeat'
response = c.get(endpoint)
exp = 200
res = response.status_code
self.assertTrue(exp == res, '[%s] status code : %s != %s' % (endpoint, exp, res)) # noqa E501
exp = {'is_alive': True}
res = response.json()
self.assertTrue(d2json(exp) == d2json(res), '[%s] json : %s != %s' % (endpoint, exp, res)) # noqa E501
#
endpoint = '/control/health'
response = c.get(endpoint)
exp = 404
res = response.status_code
self.assertTrue(exp == res, '[%s] status code : %s != %s' % (endpoint, exp, res)) # noqa E501
finally:
event_loop.close()
def test_router_disable_heartbeat(self):
event_loop = asyncio.new_event_loop()
asyncio.set_event_loop(event_loop)
try:
from fastapi_plugins.control import DEFAULT_CONTROL_VERSION
config = fastapi_plugins.ControlSettings(
control_enable_environ=True,
control_enable_health=True,
control_enable_heartbeat=False,
control_enable_version=True
)
client = starlette.testclient.TestClient(
self.make_app(config=config)
)
with client as c:
#
endpoint = '/control/environ'
response = c.get(endpoint)
exp = 200
res = response.status_code
self.assertTrue(exp == res, '[%s] status code : %s != %s' % (endpoint, exp, res)) # noqa E501
exp = dict(environ={})
res = response.json()
self.assertTrue(d2json(exp) == d2json(res), '[%s] json : %s != %s' % (endpoint, exp, res)) # noqa E501
#
endpoint = '/control/version'
response = c.get(endpoint)
exp = 200
res = response.status_code
self.assertTrue(exp == res, '[%s] status code : %s != %s' % (endpoint, exp, res)) # noqa E501
exp = {'version': DEFAULT_CONTROL_VERSION}
res = response.json()
self.assertTrue(d2json(exp) == d2json(res), '[%s] json : %s != %s' % (endpoint, exp, res)) # noqa E501
#
endpoint = '/control/heartbeat'
response = c.get(endpoint)
exp = 404
res = response.status_code
self.assertTrue(exp == res, '[%s] status code : %s != %s' % (endpoint, exp, res)) # noqa E501
#
endpoint = '/control/health'
response = c.get(endpoint)
exp = 200
res = response.status_code
self.assertTrue(exp == res, '[%s] status code : %s != %s' % (endpoint, exp, res)) # noqa E501
exp = dict(status=True, checks=[])
res = response.json()
self.assertTrue(d2json(exp) == d2json(res), '[%s] json : %s != %s' % (endpoint, exp, res)) # noqa E501
finally:
event_loop.close()
if __name__ == "__main__":
# import sys;sys.argv = ['', 'Test.testName']
unittest.main()
| 40.335845
| 119
| 0.483027
| 3,463
| 37,472
| 5.052556
| 0.054577
| 0.071498
| 0.033149
| 0.043093
| 0.870835
| 0.843345
| 0.838372
| 0.815626
| 0.791736
| 0.786706
| 0
| 0.016999
| 0.405022
| 37,472
| 928
| 120
| 40.37931
| 0.767795
| 0.03616
| 0
| 0.738869
| 0
| 0
| 0.081878
| 0.007277
| 0
| 0
| 0
| 0
| 0.080626
| 1
| 0.034898
| false
| 0.004813
| 0.022864
| 0
| 0.068592
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
50513c49e9384ee8374c93257bda92f350682b4b
| 317
|
py
|
Python
|
SimCalorimetry/HGCalSimProducers/python/hgcHitAssociation_cfi.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 852
|
2015-01-11T21:03:51.000Z
|
2022-03-25T21:14:00.000Z
|
SimCalorimetry/HGCalSimProducers/python/hgcHitAssociation_cfi.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 30,371
|
2015-01-02T00:14:40.000Z
|
2022-03-31T23:26:05.000Z
|
SimCalorimetry/HGCalSimProducers/python/hgcHitAssociation_cfi.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 3,240
|
2015-01-02T05:53:18.000Z
|
2022-03-31T17:24:21.000Z
|
from SimCalorimetry.HGCalAssociatorProducers.layerClusterAssociatorByEnergyScore_cfi import layerClusterAssociatorByEnergyScore as lcAssocByEnergyScoreProducer
from SimCalorimetry.HGCalAssociatorProducers.simClusterAssociatorByEnergyScore_cfi import simClusterAssociatorByEnergyScore as scAssocByEnergyScoreProducer
| 79.25
| 159
| 0.946372
| 18
| 317
| 16.555556
| 0.555556
| 0.120805
| 0.281879
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.041009
| 317
| 3
| 160
| 105.666667
| 0.980263
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
505aa41c67cd2ed4b63a6e36f75de2f82d28a41f
| 1,337
|
py
|
Python
|
accounts/urls.py
|
Alwin1847207/Hackathon
|
473adea12822cbe2be9a7525ac29391659f0ab6b
|
[
"bzip2-1.0.6"
] | null | null | null |
accounts/urls.py
|
Alwin1847207/Hackathon
|
473adea12822cbe2be9a7525ac29391659f0ab6b
|
[
"bzip2-1.0.6"
] | null | null | null |
accounts/urls.py
|
Alwin1847207/Hackathon
|
473adea12822cbe2be9a7525ac29391659f0ab6b
|
[
"bzip2-1.0.6"
] | null | null | null |
from django.urls import path, include
from accounts import views
urlpatterns = [
path('', views.indx, name='accounts'),
<<<<<<< HEAD
path('/signup', views.signup, name='signup'),
path('logout', views.logout, name='logout'),
path('/login', views.loginn, name='login'),
path('/usertpe/<int:pk>/', views.usertype, name='usertpe/'),
path('/userSelection/<int:id>', views.UserSelection, name='userSelection'),
path('dashboard', views.dashboard, name='dashboard'),
path('/organiser', include('Organizer.urls'), name='organiserIndex'),
path('/sponsor', include('Sponsor.urls'), name='sponsorIndex'),
path('/participant/', include('Participant.urls'), name='participantIndex'),
=======
path('signup', views.signup, name='signup'),
path('logout', views.logout, name='logout'),
path('login', views.loginn, name='login'),
path('usertpe/<int:pk>/', views.usertype, name='usertpe/'),
path('userSelection/<int:id>', views.UserSelection, name='userSelection'),
path('dashboard', views.dashboard, name='dashboard'),
path('organiser', include('Organizer.urls'), name='organiserIndex'),
path('sponsor', include('Sponsor.urls'), name='sponsorIndex'),
path('participant/', include('Participant.urls'), name='participantIndex'),
>>>>>>> 00486efd62bd717f2eaff3e6c9f80a737c54bef9
]
| 46.103448
| 80
| 0.670157
| 140
| 1,337
| 6.4
| 0.221429
| 0.053571
| 0.033482
| 0.046875
| 0.850446
| 0.850446
| 0.850446
| 0.850446
| 0.850446
| 0.850446
| 0
| 0.018581
| 0.114435
| 1,337
| 28
| 81
| 47.75
| 0.738176
| 0
| 0
| 0.153846
| 0
| 0
| 0.346298
| 0.033657
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.076923
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
aca13ddd1f522190f254afaf3832df6b06bbc2a3
| 452
|
py
|
Python
|
webdjango/signals/WebDjangoSignals.py
|
myog-io/WebDjangular
|
73d3c40aa449eec5acc59d4493ee94059bddabbd
|
[
"MIT"
] | 1
|
2018-09-14T15:17:19.000Z
|
2018-09-14T15:17:19.000Z
|
webdjango/signals/WebDjangoSignals.py
|
MyOwnGamesLLC/WebDjangular
|
73d3c40aa449eec5acc59d4493ee94059bddabbd
|
[
"MIT"
] | 41
|
2018-12-16T16:58:54.000Z
|
2019-02-22T20:08:58.000Z
|
webdjango/signals/WebDjangoSignals.py
|
myog-io/WebDjangular
|
73d3c40aa449eec5acc59d4493ee94059bddabbd
|
[
"MIT"
] | 1
|
2019-12-10T09:32:49.000Z
|
2019-12-10T09:32:49.000Z
|
from django.dispatch import Signal
pre_init_serializer = Signal(
providing_args=["serializer", "args", "kwargs"], use_caching=True
)
post_init_serializer = Signal(
providing_args=["serializer", "args", "kwargs"], use_caching=True
)
pre_init_filter = Signal(
providing_args=["serializer", "args", "kwargs"], use_caching=True
)
post_init_filter = Signal(
providing_args=["serializer", "args", "kwargs"], use_caching=True
)
| 30.133333
| 70
| 0.707965
| 53
| 452
| 5.735849
| 0.301887
| 0.197368
| 0.25
| 0.381579
| 0.881579
| 0.881579
| 0.881579
| 0.881579
| 0.881579
| 0.881579
| 0
| 0
| 0.14823
| 452
| 14
| 71
| 32.285714
| 0.78961
| 0
| 0
| 0.307692
| 0
| 0
| 0.182648
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.076923
| 0
| 0.076923
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
acae396920d61d3600aee2e70255b3f5aee64288
| 52,628
|
py
|
Python
|
custompackage/.ipynb_checkpoints/load_architecture-checkpoint.py
|
ilennaj/ktree_constraints
|
2a25e93c9b4f113caf633b08abb3e48e1c566c59
|
[
"CC0-1.0"
] | 4
|
2021-03-11T21:46:41.000Z
|
2021-12-01T06:32:42.000Z
|
custompackage/.ipynb_checkpoints/load_architecture-checkpoint.py
|
ilennaj/ktree_constraints
|
2a25e93c9b4f113caf633b08abb3e48e1c566c59
|
[
"CC0-1.0"
] | null | null | null |
custompackage/.ipynb_checkpoints/load_architecture-checkpoint.py
|
ilennaj/ktree_constraints
|
2a25e93c9b4f113caf633b08abb3e48e1c566c59
|
[
"CC0-1.0"
] | 1
|
2021-08-12T19:32:37.000Z
|
2021-08-12T19:32:37.000Z
|
import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
import custompackage.sl_custom as slc
from torch import Tensor
from torch.nn.init import xavier_uniform_
from torch.nn.init import constant_
from torch.nn.init import xavier_normal_
from torch.nn.parameter import Parameter
from torch.nn import Module
from torch import functional as F
import numpy as np
import math
def kronecker(matrix1, matrix2):
return torch.ger(matrix1.view(-1), matrix2.view(-1)).reshape(*(matrix1.size() + matrix2.size())).permute([0, 2, 1, 3]).reshape(matrix1.size(0) * matrix2.size(0), matrix1.size(1) * matrix2.size(1))
class NCK(nn.Module):
def __init__(self, alpha=1, beta=0.6, gamma=1, learn=True, scale=1):
super(NCK, self).__init__()
if learn:
self.alpha = Parameter(torch.tensor([float(alpha)]).requires_grad_()) # create a tensor out of alpha
self.beta = Parameter(torch.tensor([float(beta)]).requires_grad_()) # create a tensor out of beta
self.gamma = Parameter(torch.tensor([float(gamma)]).requires_grad_()) # create a tensor out of gamma
else:
self.alpha = alpha
self.beta = beta
self.gamma = gamma
self.scale = scale
def forward(self, x):
return(self.alpha*self.f_Na(x) + self.beta*self.f_Ca(x) + self.gamma*self.f_K(x))
def f_Na(self, x, a=0.0878, b=113.68, c=6.39, d=8.98):
x = x*self.scale
return(a*(x-b)/(1+torch.exp((c - x/d))))
def f_Ca(self, x, a=0.129, b=69.62, c=-4.40, d=4.25):
x = x*self.scale
return(a*(x-b)/(1+torch.exp((c - x/d))))
def f_K (self, x, a=2.23, b=0.132, c=16.74, d=0.436):
x = x*self.scale
return(a/(d+torch.exp(-b*(x-c))))
class SQGL(nn.Module):
def __init__(self, alpha=1, beta=0.6, gamma=1, learn=True, scale=1, atten=1, linscale=1):
super(SQGL, self).__init__()
self.learn = learn
if learn:
self.alpha = Parameter(torch.tensor([float(alpha)]).requires_grad_()) # create a tensor out of alpha
self.beta = Parameter(torch.tensor([float(beta)]).requires_grad_()) # create a tensor out of beta
self.gamma = Parameter(torch.tensor([float(gamma)]).requires_grad_()) # create a tensor out of gamma
else:
self.alpha = alpha
self.beta = beta
self.gamma = gamma
self.scale = scale
self.atten = atten
self.linscale = linscale
def forward(self, x):
if self.learn:
self.alpha.data = torch.abs(self.alpha.data)
self.beta.data = torch.abs(self.beta.data)
self.gamma.data = torch.abs(self.gamma.data)
I_ion = self.alpha*self.f_Na(x) + self.beta*self.f_Ca(x) + self.gamma*self.f_K(x)
return((x*self.linscale + I_ion)*self.atten) #Attenuation 0.5
def f_Na(self, x, a=0.0878, b=113.68, c=6.39, d=8.98):
x = x*self.scale
return(-a*(x-b)/(1+torch.exp((c - x/d).clamp(-60, 60))))
def f_Ca(self, x, a=0.129, b=69.62, c=-4.40, d=4.25):
x = x*self.scale
return(-a*(x-b)/(1+torch.exp((c - x/d).clamp(-60, 60))))
def f_K (self, x, a=2.23, b=0.132, c=16.74, d=0.436):
x = x*self.scale
return(-a/(d+torch.exp((-b*(x-c)).clamp(-60, 60))))
class Synapse(nn.Module):
def __init__(self, in_features: int, Ep = 50, Em = -70, E0 = -65, learn: bool = True):
super(Synapse, self).__init__()
self.in_features = in_features
# Note: Find reversal potential sources or choose new reversal potentials
self.Ep = Ep
self.Em = Em
self.E0 = E0
self.learn = learn
# Initialize presynaptic activation dynamics(a1) and synapse size (a2, weights)
self.register_parameter('ap1', Parameter(torch.Tensor(1, in_features)))
self.register_parameter('ap2', Parameter(torch.Tensor(1, in_features)))
self.register_parameter('am1', Parameter(torch.Tensor(1, in_features)))
self.register_parameter('am2', Parameter(torch.Tensor(1, in_features)))
self.register_parameter('g0', Parameter(torch.Tensor(1, in_features)))
torch.nn.init.normal_(self.ap1, mean=0.0, std=math.sqrt(2/(self.ap1.shape[1])))
torch.nn.init.normal_(self.ap2, mean=0.0, std=math.sqrt(2/(self.ap2.shape[1])))
torch.nn.init.normal_(self.am1, mean=0.0, std=math.sqrt(2/(self.am1.shape[1])))
torch.nn.init.normal_(self.am2, mean=0.0, std=math.sqrt(2/(self.am2.shape[1])))
torch.nn.init.normal_(self.g0, mean=0.0, std=math.sqrt(2/(self.g0.shape[1])))
def forward(self, x):
top = self.g_n(x, self.ap1, self.ap2) * self.Ep + self.g_n(x, self.am1, self.am2) * self.Em + self.g0*self.E0
bottom = self.g_n(x, self.ap1, self.ap2) + self.g_n(x, self.am1, self.am2) + self.g0 # Should I assume 1/R ~= 0?
return(top / bottom)
def g_n(self, x, a1, a2):
return(torch.exp((a1 * x + a2).clamp(-60, 60)))
class F_Na(nn.Module):
def __init__(self, scale=1):
super(F_Na, self).__init__()
self.scale = scale
def forward(self, x, a=0.0878, b=113.68, c=6.39, d=8.98):
x = x*self.scale
return(-a*(x-b)/(1+torch.exp((c - x/d).clamp(-60, 60))))
class F_Ca(nn.Module):
def __init__(self, scale=1):
super(F_Ca, self).__init__()
self.scale = scale
def forward(self, x, a=0.129, b=69.62, c=-4.40, d=4.25):
x = x*self.scale
return(-a*(x-b)/(1+torch.exp((c - x/d).clamp(-60, 60))))
class F_K(nn.Module):
def __init__(self, scale=1):
super(F_K, self).__init__()
self.scale = scale
def forward(self, x, a=2.23, b=0.132, c=16.74, d=0.436):
x = x*self.scale
return(-a/(d+torch.exp((-b*(x-c)).clamp(-60, 60))))
class Hinge_loss(nn.Module):
def __init__(self, margin = 1, reduction='mean'):
super(Hinge_loss, self).__init__()
self.margin = margin
self.reduction = reduction
def forward(self, y, target):
if self.reduction == 'sum':
return(torch.sum(torch.max(torch.Tensor([0]).cuda(), self.margin - y*target)))
elif self.reduction == 'mean':
return(torch.mean(torch.max(torch.Tensor([0]).cuda(), self.margin - y*target)))
else:
return(torch.max(torch.Tensor([0]).cuda(), self.margin - y*target))
class simple_fcnn(nn.Module):
'''
2 layer feed forward neural network.
Will use leaky ReLU activation functions.
Activation = {'relu', 'linear','nck','sqgl'}
'''
def __init__(self, Input_size=3072, Hidden_size=3072, Output_size=1, Activation="relu",
alpha=1, beta=0.6, gamma=1, learn=True, scale=1, atten=1, leak=0.01):
super(simple_fcnn, self).__init__()
'''
Inputs: Input_size, Hidden_size, Output_size, Activation
'''
# Initialize architecture parameters
self.Input_size = Input_size
self.Hidden_size = Hidden_size
self.Output_size = Output_size
self.Activation = Activation
self.learn = learn
self.scale = scale
self.atten = atten
self.leak = leak
# Initialize weights through He initialization (by default in nn.Linear)
self.i2h = nn.Linear(Input_size, Hidden_size, bias=True)
self.i2h.bias = torch.nn.Parameter(torch.zeros_like(self.i2h.bias))
# self.i2h.weight = torch.nn.init.normal_(self.i2h.weight, mean=0.0, std=math.sqrt(2/(Input_size)))
self.i2h.weight = torch.nn.init.kaiming_normal_(self.i2h.weight, a=0.01)
# Initialize densly connected output layer
self.h2o = nn.Linear(Hidden_size, Output_size)
self.h2o.bias = torch.nn.Parameter(torch.zeros_like(self.h2o.bias))
self.h2o.weight = torch.nn.init.kaiming_normal_(self.h2o.weight, a=0.01)
# Initialize nonlinearities
self.relu = nn.LeakyReLU(negative_slope=self.leak)
self.sigmoid = nn.Sigmoid()
if Activation=='nck':
self.nck = NCK(alpha, beta, gamma, learn=self.learn, scale=self.scale)
if Activation=='sqgl':
self.sqgl = SQGL(alpha, beta, gamma, learn=self.learn, scale=self.scale, atten=self.atten)
def forward(self, x):
'''
Forward step for network. Establishes Architecture.
Inputs: Input
Outputs: Output
'''
# Prepare input for appropriate architecture
# Set Activation function to calculate hidden layer
if self.Activation == 'relu':
Hidden = self.relu(self.i2h(x))
elif self.Activation == 'nck':
Hidden = self.nck(self.i2h(x))
elif self.Activation == 'sqgl':
Hidden = self.sqgl(self.i2h(x))
else:
Hidden = self.i2h(x)
# Calculate Output layer
# Output = self.sigmoid(self.h2o(Hidden))
Output = self.h2o(Hidden)
return(Output)
class ktree_gen(nn.Module):
'''
k-Tree neural network
'''
def __init__(self, ds='mnist', Activation="relu", Sparse=True,
Input_order=None, Repeats=1, Padded=False,
alpha=1, beta=0.6, gamma=1, learn=True, scale=1, atten=1):
super(ktree_gen, self).__init__()
'''
Inputs: ds (dataset), activation, sparse, input_order, repeats, padded
'''
# Initialize architecture parameters
self.ds = ds
self.Activation = Activation
self.Sparse = Sparse
self.Input_order = Input_order
self.Repeats = Repeats
self.learn = learn
self.scale = scale
self.atten = atten
# Initialize weights
# Set biases to 0
# Set kaiming initialize weights with gain to correct for sparsity
# Set freeze masks
#Specify tree dimensions
# If using 28x28 datasets...
if (ds == 'mnist') or (ds == 'fmnist') or (ds == 'kmnist') or (ds == 'emnist'):
# If padded, use 1024 sized tree, completely binary tree
if Padded:
self.k = [1024, 512, 256, 128, 64, 32, 16, 8, 4, 2, 1]
# If not padded, use 784 sized tree,
# 7:1 between layers 1 and 2, and layers 2 and 3
else:
self.k = [784, 112, 16, 8, 4, 2, 1]
# If using 3x32x32 datasets...
elif (ds == 'svhn') or (ds == 'cifar10'):
# Use 3072 sized tree
# 3:1 between layers 1 and 2, otherwise binary
self.k = [3072, 1024, 512, 256, 128, 64, 32, 16, 8, 4, 2, 1]
# If using 16x16 datasets...
elif ds == 'usps':
# Use 256 sized tree
self.k = [256, 128, 64, 32, 16, 8, 4, 2, 1]
else:
print('Select a dataset')
return(None)
# Make layers of tree architecture
# Name each layer in each subtree for reference later
self.names = np.empty((self.Repeats, len(self.k)-1),dtype=object)
# Initialize freeze mask for use in training loop
self.freeze_mask_set = []
# For each repeat or subtree, make a sparse layer that is initialized correctly
for j in range(self.Repeats):
# For each layer within each subtree
for i in range(len(self.k)-1):
# Assign name of the layer, indexed by layer (i) and subtree (j)
name = ''.join(['w',str(j),'_',str(i)])
# Initialize the layer with the appropriate name
self.add_module(name, nn.Linear(self.k[i],self.k[i+1]))
# Set bias of layer to zeros
self._modules[name].bias = nn.Parameter(torch.zeros_like(self._modules[name].bias))
# Use custom method to re-initialize the layer weights and create freeze mask for that layer
self._modules[name].weight.data, freeze_mask = self.initialize(self._modules[name])
# Add the layer name to the list of names
self.names[j,i] = name
# Set the freeze mask for the first subtree, which should be the same for all subtrees
if j < 1:
self.freeze_mask_set.append(freeze_mask)
# Initialize root node, aka soma node aka output node
self.root = nn.Linear(Repeats, 1)
# Initialize nonlinearities
self.relu = nn.LeakyReLU()
self.sigmoid = nn.Sigmoid()
self.nck = NCK(alpha, beta, gamma, learn=self.learn, scale=self.scale)
self.sqgl = SQGL(alpha, beta, gamma, learn=self.learn, scale=self.scale, atten=self.atten)
def forward(self, x):
'''
Forward step for network. Establishes Architecture.
Inputs: Input
Outputs: Output
'''
y_out = []
# Step through every layer in each subtree of model, applying nonlinearities
for j in range(self.Repeats):
y = x
for i in range(len(self.k)-1):
if self.Activation == 'relu':
y = self.relu(self._modules[self.names[j,i]](y))
elif self.Activation == 'nck':
y = self.nck(self._modules[self.names[j,i]](y))
elif self.Activation == 'sqgl':
y = self.sqgl(self._modules[self.names[j,i]](y))
else:
y = self._modules[self.names[j,i]](y)
# keep track of pen-ultimate layer outputs
y_out.append(y)
# Calculate final output, joining the outputs of each subtree together
# output = self.sigmoid(self.root(torch.cat((y_out), dim=1)))
output = self.root(torch.cat((y_out), dim=1))
return(output)
def initialize(self, layer):
# Kaiming initialize weights accounting for sparsity
# Extract weights from layer we are reinitializing
weights = layer.weight.data
# If sparse, change the initializations based on density (sparsity)
if self.Sparse:
if weights.shape[1] == 3072: # first layer of 3x32x32 image datasets
inp_block = torch.ones((1,3))
elif (weights.shape[1] == 784) or (weights.shape[1] == 112): # first or second layer of 28x28 datasets
inp_block = torch.ones((1,7))
else:
inp_block = torch.ones((1,2)) # all other layers (or 32x32)
# Set up mask for where each node receives a set of inputs of equal size to the input block
inp_mask = kronecker(torch.eye(weights.shape[0]), inp_block)
# Calculate density
density = len(np.where(inp_mask)[0])/len(inp_mask.reshape(-1))
# Generate Kaiming initialization with gain = 1/density
weights = torch.nn.init.normal_(weights, mean=0.0, std=math.sqrt(2/(weights.shape[1]*density)))
# Where no inputs will be received, set weights to zero
weights[inp_mask == 0] = 0
else: # If not sparse, use typical kaiming normalization
weights = torch.nn.init.normal_(weights, mean=0.0, std=math.sqrt(2/(weights.shape[1])))
# Generate freeze mask for use in training to keep weights initialized to zero at zero
mask_gen = torch.zeros_like(weights)
# Indicate where weights are equal to zero
freeze_mask = mask_gen == weights
return(weights, freeze_mask)
class synapse_fcnn(nn.Module):
'''
2 layer feed forward neural network.
Will use leaky ReLU activation functions.
Activation = {'relu', 'linear','nck','sqgl'}
'''
def __init__(self, Input_size=3072, Hidden_size=3072, Output_size=1, Activation="relu",
alpha=1, beta=0.6, gamma=1, learn=True, scale=1, atten=1, leak=0.01):
super(synapse_fcnn, self).__init__()
'''
Inputs: Input_size, Hidden_size, Output_size, Activation
'''
# Initialize architecture parameters
self.Input_size = Input_size
self.Hidden_size = Hidden_size
self.Output_size = Output_size
self.Activation = Activation
self.learn = learn
self.scale = scale
self.atten = atten
self.leak = leak
# Initialize Synapse layer
self.syn = Synapse(Input_size)
# Initialize weights through He initialization (by default in nn.Linear)
self.i2h = nn.Linear(Input_size, Hidden_size, bias=True)
self.i2h.bias = torch.nn.Parameter(torch.zeros_like(self.i2h.bias))
# self.i2h.weight = torch.nn.init.normal_(self.i2h.weight, mean=0.0, std=math.sqrt(2/(Input_size)))
self.i2h.weight = torch.nn.init.kaiming_normal_(self.i2h.weight, a=0.01)
# Initialize densly connected output layer
self.h2o = nn.Linear(Hidden_size, Output_size)
self.h2o.bias = torch.nn.Parameter(torch.zeros_like(self.h2o.bias))
self.h2o.weight = torch.nn.init.kaiming_normal_(self.h2o.weight, a=0.01)
# Initialize nonlinearities
self.relu = nn.LeakyReLU(negative_slope=self.leak)
self.sigmoid = nn.Sigmoid()
self.swish = nn.Hardswish()
if Activation=='nck':
self.nck = NCK(alpha, beta, gamma, learn=self.learn, scale=self.scale)
if Activation=='sqgl':
self.sqgl = SQGL(alpha, beta, gamma, learn=self.learn, scale=self.scale, atten=self.atten)
def forward(self, x):
'''
Forward step for network. Establishes Architecture.
Inputs: Input
Outputs: Output
'''
# Receive inputs into synapse layer
x = self.syn(x)
# Set Activation function to calculate hidden layer
if self.Activation == 'relu':
Hidden = self.relu(self.i2h(x))
elif self.Activation == 'nck':
Hidden = self.nck(self.i2h(x))
elif self.Activation == 'sqgl':
Hidden = self.sqgl(self.i2h(x))
elif self.Activation == 'swish':
Hidden = self.swish(self.i2h(x))
else:
Hidden = self.i2h(x)
# Calculate Output layer
# Output = self.sigmoid(self.h2o(Hidden))
Output = self.h2o(Hidden)
return(Output)
class synapse_ktree_gen(nn.Module):
'''
k-Tree neural network
'''
def __init__(self, ds='mnist', Activation="relu", Sparse=True,
Input_order=None, Repeats=1, Padded=False,
alpha=1, beta=0.6, gamma=1, learn=True, scale=1, atten=1):
super(synapse_ktree_gen, self).__init__()
'''
Inputs: ds (dataset), activation, sparse, input_order, repeats, padded
'''
# Initialize architecture parameters
self.ds = ds
self.Activation = Activation
self.Sparse = Sparse
self.Input_order = Input_order
self.Repeats = Repeats
self.learn = learn
self.scale = scale
self.atten = atten
# Initialize weights
# Set biases to 0
# Set kaiming initialize weights with gain to correct for sparsity
# Set freeze masks
#Specify tree dimensions
# If using 28x28 datasets...
if (ds == 'mnist') or (ds == 'fmnist') or (ds == 'kmnist') or (ds == 'emnist'):
# If padded, use 1024 sized tree, completely binary tree
if Padded:
self.k = [1024, 512, 256, 128, 64, 32, 16, 8, 4, 2, 1]
# If not padded, use 784 sized tree,
# 7:1 between layers 1 and 2, and layers 2 and 3
else:
self.k = [784, 112, 16, 8, 4, 2, 1]
# If using 3x32x32 datasets...
elif (ds == 'svhn') or (ds == 'cifar10'):
# Use 3072 sized tree
# 3:1 between layers 1 and 2, otherwise binary
self.k = [3072, 1024, 512, 256, 128, 64, 32, 16, 8, 4, 2, 1]
# If using 16x16 datasets...
elif ds == 'usps':
# Use 256 sized tree
self.k = [256, 128, 64, 32, 16, 8, 4, 2, 1]
else:
print('Select a dataset')
return(None)
# Make layers of tree architecture
# Name each layer in each subtree for reference later
self.names = np.empty((self.Repeats, len(self.k)-1),dtype=object)
self.syn_names = np.empty((self.Repeats), dtype=object)
# Initialize freeze mask for use in training loop
self.freeze_mask_set = []
# For each repeat or subtree, make a sparse layer that is initialized correctly
for j in range(self.Repeats):
#Initialize synapse layer for each subtree
syn_name = ''.join(['s',str(j)])
self.add_module(syn_name, Synapse(self.k[0]))
self.syn_names[j] = syn_name
# For each layer within each subtree
for i in range(len(self.k)-1):
# Assign name of the layer, indexed by layer (i) and subtree (j)
name = ''.join(['w',str(j),'_',str(i)])
# Initialize the layer with the appropriate name
self.add_module(name, nn.Linear(self.k[i],self.k[i+1]))
# Set bias of layer to zeros
self._modules[name].bias = nn.Parameter(torch.zeros_like(self._modules[name].bias))
# Use custom method to re-initialize the layer weights and create freeze mask for that layer
self._modules[name].weight.data, freeze_mask = self.initialize(self._modules[name])
# Add the layer name to the list of names
self.names[j,i] = name
# Set the freeze mask for the first subtree, which should be the same for all subtrees
if j < 1:
self.freeze_mask_set.append(freeze_mask)
# Initialize root node, aka soma node aka output node
self.root = nn.Linear(Repeats, 1)
# Initialize nonlinearities
self.relu = nn.LeakyReLU()
self.sigmoid = nn.Sigmoid()
self.nck = NCK(alpha, beta, gamma, learn=self.learn, scale=self.scale)
self.sqgl = SQGL(alpha, beta, gamma, learn=self.learn, scale=self.scale, atten=self.atten)
def forward(self, x):
'''
Forward step for network. Establishes Architecture.
Inputs: Input
Outputs: Output
'''
y_out = []
# Step through every layer in each subtree of model, applying nonlinearities
for j in range(self.Repeats):
y = self._modules[self.syn_names[j]](x) # Synapse layer for each subtree
for i in range(len(self.k)-1):
if self.Activation == 'relu':
y = self.relu(self._modules[self.names[j,i]](y))
elif self.Activation == 'nck':
y = self.nck(self._modules[self.names[j,i]](y))
elif self.Activation == 'sqgl':
y = self.sqgl(self._modules[self.names[j,i]](y))
else:
y = self._modules[self.names[j,i]](y)
# keep track of pen-ultimate layer outputs
y_out.append(y)
# Calculate final output, joining the outputs of each subtree together
# output = self.sigmoid(self.root(torch.cat((y_out), dim=1)))
output = self.root(torch.cat((y_out), dim=1))
return(output)
def initialize(self, layer):
# Kaiming initialize weights accounting for sparsity
# Extract weights from layer we are reinitializing
weights = layer.weight.data
# If sparse, change the initializations based on density (sparsity)
if self.Sparse:
if weights.shape[1] == 3072: # first layer of 3x32x32 image datasets
inp_block = torch.ones((1,3))
elif (weights.shape[1] == 784) or (weights.shape[1] == 112): # first or second layer of 28x28 datasets
inp_block = torch.ones((1,7))
else:
inp_block = torch.ones((1,2)) # all other layers (or 32x32)
# Set up mask for where each node receives a set of inputs of equal size to the input block
inp_mask = kronecker(torch.eye(weights.shape[0]), inp_block)
# Calculate density
density = len(np.where(inp_mask)[0])/len(inp_mask.reshape(-1))
# Generate Kaiming initialization with gain = 1/density
weights = torch.nn.init.normal_(weights, mean=0.0, std=math.sqrt(2/(weights.shape[1]*density)))
# Where no inputs will be received, set weights to zero
weights[inp_mask == 0] = 0
else: # If not sparse, use typical kaiming normalization
weights = torch.nn.init.normal_(weights, mean=0.0, std=math.sqrt(2/(weights.shape[1])))
# Generate freeze mask for use in training to keep weights initialized to zero at zero
mask_gen = torch.zeros_like(weights)
# Indicate where weights are equal to zero
freeze_mask = mask_gen == weights
return(weights, freeze_mask)
class ktree_sparse(nn.Module):
'''
k-Tree neural network
'''
def __init__(self, ds='mnist', Activation="relu",
Input_order=None, Repeats=1, Padded=True,
alpha=1, beta=1, gamma=1, learn=True, scale=1, atten=1,
synapse=True, leak=0.01, Node_vary=True, positive=False):
super(ktree_sparse, self).__init__()
'''
Inputs: ds (dataset), activation, sparse, input_order, repeats, padded
'''
# Initialize architecture parameters
self.ds = ds
self.Activation = Activation
self.Input_order = Input_order
self.Repeats = Repeats
self.learn = learn
self.scale = scale
self.atten = atten
self.synapse = synapse
self.leak = leak
self.Node_vary = Node_vary
self.positive = positive
# Initialize weights
# Set biases to 0
# Set kaiming initialize weights with gain to correct for sparsity
# Set freeze masks
#Specify tree dimensions
# If using 28x28 datasets...
if (ds == 'mnist') or (ds == 'fmnist') or (ds == 'kmnist') or (ds == 'emnist'):
# If padded, use 1024 sized tree, completely binary tree
if Padded:
self.k = [1024, 512, 256, 128, 64, 32, 16, 8, 4, 2, 1]
# If not padded, use 784 sized tree,
# 7:1 between layers 1 and 2, and layers 2 and 3
else:
self.k = [784, 112, 16, 8, 4, 2, 1]
# If using 3x32x32 datasets...
elif (ds == 'svhn') or (ds == 'cifar10'):
# Use 3072 sized tree
# 3:1 between layers 1 and 2, otherwise binary
self.k = [3072, 1024, 512, 256, 128, 64, 32, 16, 8, 4, 2, 1]
# If using 16x16 datasets...
elif ds == 'usps':
# Use 256 sized tree
self.k = [256, 128, 64, 32, 16, 8, 4, 2, 1]
else:
print('Select a dataset')
return(None)
# Make layers of tree architecture
# Name each layer in each subtree for reference later
self.names = np.empty((self.Repeats, len(self.k)-1),dtype=object)
if self.synapse:
self.syn_names = np.empty((self.Repeats), dtype=object)
if self.Node_vary:
self.sqgl_names = np.empty((self.Repeats, len(self.k)-1),dtype=object)
# Initialize freeze mask for use in training loop
self.freeze_mask_set = []
# For each repeat or subtree, make a sparse layer that is initialized correctly
for j in range(self.Repeats):
if self.synapse:
#Initialize synapse layer for each subtree
syn_name = ''.join(['s',str(j)])
self.add_module(syn_name, Synapse(self.k[0]))
self.syn_names[j] = syn_name
if self.Node_vary == True:
for i in range(len(self.k)-1):
# Assign name of each sqgl layer, indexed by layer (i) and subtree (j)
sqgl_name = ''.join(['sq',str(j),'_',str(i)])
# Initialize the layer with the appropriate name
self.add_module(sqgl_name, slc.SparseLinear(3*self.k[i+1],
self.k[i+1],
connectivity=self.sqgl_connectivity(self.k[i+1]),
bias=False,
sqgl_true=True))
# Add activation layer name to list of names
self.sqgl_names[j,i] = sqgl_name
# For each layer within each subtree
for i in range(len(self.k)-1):
# Assign name of the layer, indexed by layer (i) and subtree (j)
name = ''.join(['w',str(j),'_',str(i)])
# Initialize the layer with the appropriate name
self.add_module(name, slc.SparseLinear(self.k[i],
self.k[i+1],
connectivity=self.layer_connectivity(self.k[i]),
bias=True,
positive=self.positive))
# Add the layer name to the list of names
self.names[j,i] = name
self._modules[self.names[j,i]].bias.data.zero_()
# Initialize root node, aka soma node aka output node
self.root = nn.Linear(Repeats, 1, bias=True)
self.root.bias.data.zero_()
# Initialize nonlinearities
self.relu = nn.LeakyReLU(negative_slope=leak)
self.sigmoid = nn.Sigmoid()
self.swish = nn.Hardswish()
self.nck = NCK(alpha, beta, gamma, learn=self.learn, scale=self.scale)
self.sqgl = SQGL(alpha, beta, gamma, learn=self.learn, scale=self.scale, atten=self.atten)
if self.Node_vary == True:
self.f_na = F_Na(self.scale)
self.f_ca = F_Ca(self.scale)
self.f_k = F_K(self.scale)
def forward(self, x):
'''
Forward step for network. Establishes Architecture.
Inputs: Input
Outputs: Output
'''
y_out = []
# Step through every layer in each subtree of model, applying nonlinearities
for j in range(self.Repeats):
if self.synapse:
y = self._modules[self.syn_names[j]](x) # Synapse layer for each subtree
else:
y = x
for i in range(len(self.k)-1):
if self.Activation == 'relu':
y = self.relu(self._modules[self.names[j,i]](y))
elif self.Activation == 'nck':
y = self.nck(self._modules[self.names[j,i]](y))
elif self.Activation == 'sqgl':
if self.Node_vary == True: # If varying sqgl by node
# First put through linear layer
y = self._modules[self.names[j,i]](y)
# Then do step one of sqgl nonlinearity
interm_act = torch.cat((self.f_na(y), self.f_ca(y), self.f_k(y)), axis=1)
# Weighted sum of nonlinearities, added to linear component
# Then multiplied by an attenuation factor
y = self.atten*(y + self._modules[self.sqgl_names[j,i]](interm_act))
else:
y = self.sqgl(self._modules[self.names[j,i]](y))
elif self.Activation == 'sigmoid':
y = self.sigmoid(self._modules[self.names[j,i]](y))
elif self.Activation == 'silu':
y = self.silu(self._modules[self.names[j,i]](y))
elif self.Activation == 'swish':
y = self.swish(self._modules[self.names[j,i]](y))
else:
y = self._modules[self.names[j,i]](y)
# keep track of pen-ultimate layer outputs
y_out.append(y)
# Calculate final output, joining the outputs of each subtree together
# output = self.sigmoid(self.root(torch.cat((y_out), dim=1)))
output = self.root(torch.cat((y_out), dim=1))
return(output)
def layer_connectivity(self, in_features):
if in_features == 3072:
inp_block = torch.ones((1,3))
elif (in_features == 784) or (in_features == 112): # first or second layer of 28x28 datasets
inp_block = torch.ones((1,7))
else:
inp_block = torch.ones((1,2)) # all other layers (or 32x32)
inp_mask = kronecker(torch.eye(int(in_features/inp_block.size()[1])), inp_block)
ix = inp_mask.nonzero(as_tuple=False)
return(ix.t())
def sqgl_connectivity(self, in_features):
# Only works as an activation function layer if input to this layer is 1x1x3N where N is original input size
inp_block = torch.eye(in_features)
inp_mask = torch.cat((inp_block, inp_block, inp_block), axis=0)
inp_mask = inp_mask.t()
ix = inp_mask.nonzero(as_tuple=False)
return(ix.t())
class asym_tree_gen(nn.Module):
'''
asym-Tree neural network
'''
def __init__(self, ds='mnist', Activation="relu",
Input_order=None, Repeats=1, Padded=True,
alpha=1, beta=0.6, gamma=1, learn=True, scale=1, atten=1,
synapse=False, tree=None, leak=0.01):
super(asym_tree_gen, self).__init__()
'''
Inputs: ds (dataset), activation, sparse, input_order, repeats, padded
'''
# Initialize architecture parameters
self.ds = ds
self.Activation = Activation
self.Input_order = Input_order
self.Repeats = Repeats
self.learn = learn
self.scale = scale
self.atten = atten
self.Synapse = synapse
self.tree = tree
self.num_leaves = len(self.find_all_leaf_ids(self.tree))
self.leak = leak
if self.tree is None:
raise TypeError('Did not specify tree')
# Initialize weights
# Set biases to 0
# Set kaiming initialize weights with gain to correct for sparsity
# Set freeze masks
#Specify tree dimensions
# If using 28x28 datasets...
if (ds == 'mnist') or (ds == 'fmnist') or (ds == 'kmnist') or (ds == 'emnist'):
# If padded, use 1024 sized tree, completely binary tree
if Padded:
self.input_size = 1024
# If not padded, use 784 sized tree,
else:
self.input_size = 784
# If using 3x32x32 datasets...
elif (ds == 'svhn') or (ds == 'cifar10'):
# Use 3072 sized tree
self.input_size = 3072
# If using 16x16 datasets...
elif ds == 'usps':
# Use 256 sized tree
self.input_size = 256
else:
print('Select a dataset')
return(None)
# Make layers of tree architecture
# Make list of connectivity matrices for purpose of making sparse layers
self.connectivity_matrices = self.seq_adj_mat(self.tree)
# Name each layer in each subtree for reference later
self.names = np.empty((self.Repeats, len(self.connectivity_matrices)),dtype=object)
if self.Synapse:
self.syn_names = np.empty((self.Repeats), dtype=object)
# For each repeat or subtree, make a sparse layer that is initialized correctly
for j in range(self.Repeats):
if self.Synapse:
#Initialize synapse layer for each subtree
syn_name = ''.join(['s',str(j)])
self.add_module(syn_name, Synapse(self.input_size))
self.syn_names[j] = syn_name
# For each layer within each subtree
for i, connectivity_matrix in enumerate(self.connectivity_matrices):
# Assign name of the layer, indexed by layer (i) and subtree (j)
name = ''.join(['w',str(j),'_',str(i)])
# Initialize the layer with the appropriate name
self.add_module(name, slc.SparseLinear(self.num_leaves + len(self.tree),
len(self.tree),
connectivity=connectivity_matrix,
bias=True))
# print(i,connectivity_matrix)
# print(self._modules[name])
# Add the layer name to the list of names
self.names[j,i] = name
self._modules[self.names[j,i]].bias.data.zero_()
# Initialize root node, aka soma node aka output node
self.root = nn.Linear(Repeats, 1, bias=True)
self.root.bias.data.zero_()
# Initialize nonlinearities
self.relu = nn.LeakyReLU(negative_slope=self.leak)
self.sigmoid = nn.Sigmoid()
self.nck = NCK(alpha, beta, gamma, learn=self.learn, scale=self.scale)
self.sqgl = SQGL(alpha, beta, gamma, learn=self.learn, scale=self.scale, atten=self.atten)
def find_all_leaf_ids(self,tree):
"""
just see which nodes are not in the list
like in tree = [-1,0,0,1,1,2,2]
leaves are [3, 4, 5, 6]
"""
all_leaf_ids = [i for i in range(len(tree)) if i not in tree and tree[i] is not None]
return(np.array(all_leaf_ids))
def find_all_branch_ids(self,tree):
'''
just see which nodes are not in the list
like in tree = [-1,0,0,1,1,2,2]
leaves are [3, 4, 5, 6]
'''
all_branch_ids = [i for i in range(len(tree)) if i in tree and tree[i] is not None]
return(np.array(all_branch_ids))
def path_lengths(self, tree):
paths = np.zeros(len(tree))
for i in range(len(tree)):
node = i
branch = node
path_length = 0
while branch != 0:
branch = tree[node]
node = branch
path_length += 1
paths[i] = path_length
return(paths.astype(int))
def seq_adj_mat(self, tree):
paths = self.path_lengths(tree)
leaves = self.find_all_leaf_ids(tree)
branches = self.find_all_branch_ids(tree)
ids = np.arange(len(tree))
i = 1
j = 1
adj_mats = []
for path_len in reversed(range(max(paths)+1)):
adj_mat = np.zeros((len(tree), len(tree) + self.num_leaves))
idx = np.where(path_len == paths, True, False)
nodes = ids[idx]
leaf_nodes = list(filter(lambda x: x in leaves, nodes))
branch_nodes = list(filter(lambda x: x in branches, nodes))
for leaf in leaf_nodes:
adj_mat[leaf, len(leaves) - i] = 1
i += 1
for branch in branch_nodes:
adj_mat[branch, len(tree) + len(leaves) - j] = 1
j += 1
adj_mat[branch, len(tree) + len(leaves) - j] = 1
j += 1
# Change adj_mats into sparse format
sparse_idx = []
idxs = np.where(adj_mat)
for idx in idxs:
sparse_idx.append(list(idx))
sparse_idx = torch.LongTensor(sparse_idx)
adj_mats.append(sparse_idx)
return(adj_mats)
def forward(self, x):
'''
Forward step for network. Establishes Architecture.
Inputs: Input
Outputs: Output
'''
y_out = []
# Step through every layer in each subtree of model, applying nonlinearities
for j in range(self.Repeats):
y = x.clone()
# print(self.num_leaves)
if y.shape[1] < self.num_leaves:
filler = torch.zeros((y.shape[0], self.num_leaves)).cuda()
filler[:,:y.shape[1]] = y
y = filler
elif y.shape[1] > self.num_leaves:
y = y[:, :self.num_leaves]
hidden = torch.cat((y, torch.zeros(y.shape[0], len(self.tree)).cuda()), dim=1)
for i in range(len(self.connectivity_matrices)):
# print(i,j)
if self.Activation == 'relu':
hidden = self.relu(self._modules[self.names[j,i]](hidden))
hidden = torch.cat((y, hidden), dim=1)
elif self.Activation == 'nck':
hidden = self.nck(self._modules[self.names[j,i]](hidden))
hidden = torch.cat((y, hidden), dim=1)
elif self.Activation == 'sqgl':
hidden = self.sqgl(self._modules[self.names[j,i]](hidden))
hidden = torch.cat((y, hidden), dim=1)
else:
hidden = self._modules[self.names[j,i]](hidden)
hidden = torch.cat((y, hidden), dim=1)
# # keep track of pen-ultimate layer outputs
y_out.append(hidden[:,self.num_leaves].reshape(y.shape[0],1))
# print('yout',y_out[0].shape)
# Calculate final output, joining the outputs of each subtree together
# output = self.sigmoid(self.root(torch.cat((y_out), dim=1)))
output = self.root(torch.cat((y_out), dim=1))
# output = y
return(output)
def prepare_tree(n):
return(np.array(n[0][:,-1]).astype(int)-1)
def find_all_leaf_ids(tree):
"""
just see which nodes are not in the list
like in tree = [-1,0,0,1,1,2,2]
leaves are [3, 4, 5, 6]
"""
all_leaf_ids = [i for i in range(len(tree)) if i not in tree and tree[i] is not None]
return(np.array(all_leaf_ids))
def find_target_tree(mcn_trees):
leaves = np.zeros((len(mcn_trees),2))
for i in range(len(mcn_trees)):
n = mcn_trees[i]
tree = (n[0][:,-1]).astype(int) -1
leaves[i,1] = len(find_all_leaf_ids(tree))
leaves[i,0] = i
num_leafs = leaves[0,1]
if num_leafs > 14**2 and num_leafs < 18**2 :
targ_diff = 256
if num_leafs > 26**2 and num_leafs < 34**2 :
targ_diff = 1024
if num_leafs > 3*30**2 and num_leafs < 3*34**2 :
targ_diff = 3072
leaves = np.concatenate((leaves,abs(leaves[:,1]-targ_diff).reshape(-1,1)),1)
# print(leaves)
target = np.argmin(leaves[:,2])
print('Target Tree:', leaves[target,:])
return(target)
class ktree_synapse(nn.Module):
'''
k-Tree neural network
'''
def __init__(self, ds='mnist', Activation="relu",
Input_order=None, Repeats=1, Padded=True,
alpha=1, beta=1, gamma=1, learn=True, scale=1, atten=1,
leak=0.01, Node_vary=True, positive=False):
super(ktree_synapse, self).__init__()
'''
Inputs: ds (dataset), activation, sparse, input_order, repeats, padded
'''
# Initialize architecture parameters
self.ds = ds
self.Activation = Activation
self.Input_order = Input_order
self.Repeats = Repeats
self.learn = learn
self.scale = scale
self.atten = atten
self.leak = leak
self.Node_vary = Node_vary
self.positive = positive
# Initialize weights
# Set biases to 0
# Set kaiming initialize weights with gain to correct for sparsity
# Set freeze masks
#Specify tree dimensions
# If using 28x28 datasets...
if (ds == 'mnist') or (ds == 'fmnist') or (ds == 'kmnist') or (ds == 'emnist'):
# If padded, use 1024 sized tree, completely binary tree
if Padded:
self.k = [1024, 512, 256, 128, 64, 32, 16, 8, 4, 2, 1]
# If not padded, use 784 sized tree,
# 7:1 between layers 1 and 2, and layers 2 and 3
else:
self.k = [784, 112, 16, 8, 4, 2, 1]
# If using 3x32x32 datasets...
elif (ds == 'svhn') or (ds == 'cifar10'):
# Use 3072 sized tree
# 3:1 between layers 1 and 2, otherwise binary
self.k = [3072, 1024, 512, 256, 128, 64, 32, 16, 8, 4, 2, 1]
# If using 16x16 datasets...
elif ds == 'usps':
# Use 256 sized tree
self.k = [256, 128, 64, 32, 16, 8, 4, 2, 1]
else:
print('Select a dataset')
return(None)
# Make layers of tree architecture
# Name each layer in each subtree for reference later
self.names = np.empty((self.Repeats, len(self.k)-1),dtype=object)
self.syn_names = np.empty((self.Repeats), dtype=object)
if self.Node_vary:
self.sqgl_names = np.empty((self.Repeats, len(self.k)-1),dtype=object)
# Initialize freeze mask for use in training loop
self.freeze_mask_set = []
# For each repeat or subtree, make a sparse layer that is initialized correctly
for j in range(self.Repeats):
#Initialize synapse layer for each subtree
syn_name = ''.join(['s',str(j)])
self.add_module(syn_name, Synapse(self.k[0]))
self.syn_names[j] = syn_name
if self.Node_vary == True:
for i in range(len(self.k)-1):
# Assign name of each sqgl layer, indexed by layer (i) and subtree (j)
sqgl_name = ''.join(['sq',str(j),'_',str(i)])
# Initialize the layer with the appropriate name
self.add_module(sqgl_name, slc.SparseLinear(3*self.k[i+1],
self.k[i+1],
connectivity=self.sqgl_connectivity(self.k[i+1]),
bias=False,
sqgl_true=True))
# Add activation layer name to list of names
self.sqgl_names[j,i] = sqgl_name
# For each layer within each subtree
for i in range(len(self.k)-1):
# Assign name of the layer, indexed by layer (i) and subtree (j)
name = ''.join(['w',str(j),'_',str(i)])
# Initialize the layer with the appropriate name
self.add_module(name, slc.SparseLinear(self.k[i],
self.k[i+1],
connectivity=self.layer_connectivity(self.k[i]),
bias=True,
positive=self.positive))
# Add the layer name to the list of names
self.names[j,i] = name
self._modules[self.names[j,i]].bias.data.zero_()
# Initialize root node, aka soma node aka output node
self.root = nn.Linear(Repeats, 1, bias=True)
self.root.bias.data.zero_()
# Initialize nonlinearities
self.relu = nn.LeakyReLU(negative_slope=leak)
self.sigmoid = nn.Sigmoid()
self.swish = nn.Hardswish()
self.nck = NCK(alpha, beta, gamma, learn=self.learn, scale=self.scale)
if self.Node_vary == True:
self.f_na = F_Na(self.scale)
self.f_ca = F_Ca(self.scale)
self.f_k = F_K(self.scale)
else:
self.sqgl = SQGL(alpha, beta, gamma, learn=self.learn, scale=self.scale, atten=self.atten)
def forward(self, x):
'''
Forward step for network. Establishes Architecture.
Inputs: Input
Outputs: Output
'''
y_out = []
loss = 0
hinge_loss = Hinge_loss(margin=0.5)
# Step through every layer in each subtree of model, applying nonlinearities
for j in range(self.Repeats):
y = self._modules[self.syn_names[j]](x) # Synapse layer for each subtree
for i in range(len(self.k)-1):
if self.Activation == 'relu':
y = self.relu(self._modules[self.names[j,i]](y))
loss += self.hinge_criterion(y)
elif self.Activation == 'nck':
y = self.nck(self._modules[self.names[j,i]](y))
loss += self.hinge_criterion(y)
elif self.Activation == 'sqgl':
if self.Node_vary == True: # If varying sqgl by node
# First put through linear layer
y = self._modules[self.names[j,i]](y)
# Then do step one of sqgl nonlinearity
interm_act = torch.cat((self.f_na(y), self.f_ca(y), self.f_k(y)), axis=1)
# Weighted sum of nonlinearities, added to linear component
# Then multiplied by an attenuation factor
y = self.atten*(y + self._modules[self.sqgl_names[j,i]](interm_act))
#Calculate loss
loss += self.hinge_criterion(y)
else:
y = self.sqgl(self._modules[self.names[j,i]](y))
loss += self.hinge_criterion(y)
elif self.Activation == 'sigmoid':
y = self.sigmoid(self._modules[self.names[j,i]](y))
loss += self.hinge_criterion(y)
elif self.Activation == 'silu':
y = self.silu(self._modules[self.names[j,i]](y))
loss += self.hinge_criterion(y)
elif self.Activation == 'swish':
y = self.swish(self._modules[self.names[j,i]](y))
loss += self.hinge_criterion(y)
else:
y = self._modules[self.names[j,i]](y)
loss += self.hinge_criterion(y)
# keep track of pen-ultimate layer outputs
y_out.append(y)
# Calculate final output, joining the outputs of each subtree together
# output = self.sigmoid(self.root(torch.cat((y_out), dim=1)))
output = self.root(torch.cat((y_out), dim=1))
return(output, loss)
def layer_connectivity(self, in_features):
if in_features == 3072:
inp_block = torch.ones((1,3))
elif (in_features == 784) or (in_features == 112): # first or second layer of 28x28 datasets
inp_block = torch.ones((1,7))
else:
inp_block = torch.ones((1,2)) # all other layers (or 32x32)
inp_mask = kronecker(torch.eye(int(in_features/inp_block.size()[1])), inp_block)
ix = inp_mask.nonzero(as_tuple=False)
return(ix.t())
def sqgl_connectivity(self, in_features):
# Only works as an activation function layer if input to this layer is 1x1x3N where N is original input size
inp_block = torch.eye(in_features)
inp_mask = torch.cat((inp_block, inp_block, inp_block), axis=0)
inp_mask = inp_mask.t()
ix = inp_mask.nonzero(as_tuple=False)
return(ix.t())
def hinge_criterion(self, activity):
# Specify targets with same size as layer
hinge_loss = Hinge_loss(margin=0.5)
target = - torch.ones_like(activity)
# Loss = hinge(v-vmax) + hinge(vmin-v)
return(hinge_loss(activity-51, target) + hinge_loss(-71-activity, target))
| 42.1024
| 200
| 0.547674
| 6,871
| 52,628
| 4.100131
| 0.062582
| 0.009052
| 0.009939
| 0.014057
| 0.882756
| 0.872391
| 0.861458
| 0.85191
| 0.844775
| 0.828944
| 0
| 0.0356
| 0.336551
| 52,628
| 1,250
| 201
| 42.1024
| 0.771251
| 0.225336
| 0
| 0.744966
| 0
| 0
| 0.013401
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.067114
| false
| 0
| 0.01745
| 0.005369
| 0.104698
| 0.008054
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c57ec75da3006ec44756edcd1a07672efc8946f7
| 4,938
|
py
|
Python
|
tests/agents/network/test_BlockDNS.py
|
nafri-irfan96/ychaos
|
33542ef061b25f7a3770cb40c10c394dc123c475
|
[
"Apache-2.0"
] | 1
|
2021-09-27T16:18:33.000Z
|
2021-09-27T16:18:33.000Z
|
tests/agents/network/test_BlockDNS.py
|
nafri-irfan96/ychaos
|
33542ef061b25f7a3770cb40c10c394dc123c475
|
[
"Apache-2.0"
] | null | null | null |
tests/agents/network/test_BlockDNS.py
|
nafri-irfan96/ychaos
|
33542ef061b25f7a3770cb40c10c394dc123c475
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021, Yahoo
# Licensed under the terms of the Apache 2.0 license. See the LICENSE file in the project root for terms
import os
import subprocess
from unittest import TestCase
from mockito import any, unstub, verify, when
from ychaos.agents.agent import AgentState
from ychaos.agents.exceptions import AgentError
from ychaos.agents.network.iptables import DNSBlock, DNSBlockConfig
class TestBlockDNSConfig(TestCase):
def test_block_dns_setup(self):
config = DNSBlockConfig()
agent = DNSBlock(config)
agent.setup()
agent.monitor() # coverage
self.assertEqual(agent.current_state, AgentState.SETUP)
def test_block_dns_teardown_does_not_modify_iptables_rule_when_in_setup(self):
config = DNSBlockConfig()
agent = DNSBlock(config)
agent.setup()
self.assertEqual(agent.current_state, AgentState.SETUP)
when(subprocess).run(
any,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
).thenReturn(subprocess.CompletedProcess(args=[], returncode=0))
agent.teardown()
verify(subprocess, times=0).run(
any,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
def test_block_dns_run(self):
config = DNSBlockConfig()
agent = DNSBlock(config)
agent.setup()
self.assertEqual(agent.current_state, AgentState.SETUP)
when(os).geteuid().thenReturn(0)
when(subprocess).run(
"sudo /sbin/iptables -I OUTPUT -p udp --dport 53 -j DROP -w 3".split(),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
).thenReturn(subprocess.CompletedProcess(args=[], returncode=0))
when(subprocess).run(
f"sudo /sbin/iptables -I OUTPUT -p tcp --dport 53 -j DROP -w 3".split(),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
).thenReturn(subprocess.CompletedProcess(args=[], returncode=0))
agent.run()
self.assertEqual(agent.current_state, AgentState.RUNNING)
def test_block_dns_run_raises_io_error(self):
config = DNSBlockConfig()
agent = DNSBlock(config)
agent.setup()
self.assertEqual(agent.current_state, AgentState.SETUP)
when(os).geteuid().thenReturn(0)
when(subprocess).run(
"sudo /sbin/iptables -I OUTPUT -p udp --dport 53 -j DROP -w 3".split(),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
).thenReturn(subprocess.CompletedProcess(args=[], returncode=0))
when(subprocess).run(
f"sudo /sbin/iptables -I OUTPUT -p tcp --dport 53 -j DROP -w 3".split(),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
).thenReturn(subprocess.CompletedProcess(args=[], returncode=1))
with self.assertRaises(IOError):
agent.run()
def test_block_dns_teardown_restores_after_running(self):
config = DNSBlockConfig()
agent = DNSBlock(config)
agent.setup()
self.assertEqual(agent.current_state, AgentState.SETUP)
agent.advance_state(AgentState.RUNNING)
when(subprocess).run(
"sudo /sbin/iptables -D OUTPUT -p udp --dport 53 -j DROP -w 3".split(),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
).thenReturn(subprocess.CompletedProcess(args=[], returncode=0))
when(subprocess).run(
"sudo /sbin/iptables -D OUTPUT -p tcp --dport 53 -j DROP -w 3".split(),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
).thenReturn(subprocess.CompletedProcess(args=[], returncode=0))
agent.teardown()
verify(subprocess, times=2).run(
any,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
def test_block_dns_teardown_raises_error_when_failed(self):
config = DNSBlockConfig()
agent = DNSBlock(config)
agent.setup()
self.assertEqual(agent.current_state, AgentState.SETUP)
agent.advance_state(AgentState.RUNNING)
when(subprocess).run(
"sudo /sbin/iptables -D OUTPUT -p udp --dport 53 -j DROP -w 3".split(),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
).thenReturn(subprocess.CompletedProcess(args=[], returncode=0))
when(subprocess).run(
"sudo /sbin/iptables -D OUTPUT -p tcp --dport 53 -j DROP -w 3".split(),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
).thenReturn(subprocess.CompletedProcess(args=[], returncode=1))
with self.assertRaises(AgentError):
agent.teardown()
verify(subprocess, times=2).run(
any,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
def tearDown(self) -> None:
unstub()
| 32.064935
| 105
| 0.624949
| 538
| 4,938
| 5.652416
| 0.180297
| 0.11049
| 0.078921
| 0.102598
| 0.82144
| 0.796777
| 0.782966
| 0.767511
| 0.765538
| 0.744821
| 0
| 0.012121
| 0.264885
| 4,938
| 153
| 106
| 32.27451
| 0.82562
| 0.027136
| 0
| 0.787611
| 0
| 0
| 0.100021
| 0
| 0
| 0
| 0
| 0
| 0.079646
| 1
| 0.061947
| false
| 0
| 0.061947
| 0
| 0.132743
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c5a0b6eddf51d7b9b140e0450f5565c3eaa05aa5
| 213
|
py
|
Python
|
src/cms/views/bed_target_groups/__init__.py
|
digitalfabrik/coldaid-backend
|
b769510570d5921e30876565263813c0362994e2
|
[
"Apache-2.0"
] | 4
|
2019-12-05T16:45:17.000Z
|
2020-05-09T07:26:34.000Z
|
src/cms/views/bed_target_groups/__init__.py
|
digitalfabrik/coldaid-backend
|
b769510570d5921e30876565263813c0362994e2
|
[
"Apache-2.0"
] | 56
|
2019-12-05T12:31:37.000Z
|
2021-01-07T15:47:45.000Z
|
src/cms/views/bed_target_groups/__init__.py
|
digitalfabrik/coldaid-backend
|
b769510570d5921e30876565263813c0362994e2
|
[
"Apache-2.0"
] | 2
|
2019-12-11T09:52:26.000Z
|
2020-05-09T07:26:38.000Z
|
"""
Python standard Init-File
"""
from .bed_target_group_actions import delete_bed_target_group
from .bed_target_group_view import BedTargetGroupView
from .bed_target_group_list_view import BedTargetGroupListView
| 30.428571
| 62
| 0.873239
| 29
| 213
| 5.965517
| 0.517241
| 0.208092
| 0.323699
| 0.312139
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.079812
| 213
| 6
| 63
| 35.5
| 0.882653
| 0.117371
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
c5b042f4b326db28b70c281fd1a227d11c88daaa
| 14,144
|
py
|
Python
|
DGBO_GN-batch/gen_synthetic_dataset.py
|
csjtx1021/Scalable_and_Parallel_Deep_Bayesian_Optimization_on_Attributed_Graphs
|
68c3d1119be6cafdb32d00dbc8a291047c1639a4
|
[
"MIT"
] | 6
|
2020-10-19T05:10:33.000Z
|
2022-01-17T04:33:45.000Z
|
DGBO_GN-batch/gen_synthetic_dataset.py
|
csjtx1021/Scalable_and_Parallel_Deep_Bayesian_Optimization_on_Attributed_Graphs
|
68c3d1119be6cafdb32d00dbc8a291047c1639a4
|
[
"MIT"
] | null | null | null |
DGBO_GN-batch/gen_synthetic_dataset.py
|
csjtx1021/Scalable_and_Parallel_Deep_Bayesian_Optimization_on_Attributed_Graphs
|
68c3d1119be6cafdb32d00dbc8a291047c1639a4
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 18 10:18:43 2017
@author: cuijiaxu
"""
import numpy as np
import multiprocessing as mp
import networkx as nx
#from gensim.models import Word2Vec
from itertools import chain, combinations
from collections import defaultdict
import os,sys, copy, time, math, pickle
import itertools
import scipy.io
#import pynauty
import random
from scipy.spatial.distance import pdist, squareform
#import pyGPs
import scipy.stats
import pylab as pl
#import GraphMeasure
def gen_syn_ds(OUTPUTDIR):
print "genrating synthetic dataset..."
GraphSet=[]
numgraphs=2000
n_set=np.array((20,30,40,50,60,70,80,90,100,110))#for ER and BA
p_set=np.array((0.075,0.1,0.125,0.15,0.175,0.2,0.225,0.25,0.275,0.3))#for ER
m_set=np.array((1,2,3,4,5,6,7,8,9,10))#for BA
rseed_set=np.array((314150,312213,434234,264852,231255,659956,435347,898232,675665,234690))
key=-1
##ER
for n in range(len(n_set)):
for p in range(len(p_set)):
for rseed in range(len(rseed_set)):
key+=1
G1=nx.fast_gnp_random_graph(n_set[n],p_set[p],rseed_set[rseed])
nx.write_edgelist(G1, "%s/%s-ER_%s_%s_%s.edgelist"%(OUTPUTDIR,key,n_set[n],p_set[p],rseed_set[rseed]))
GraphSet.append(G1)
if np.mod(key,50)==0:
nx.draw_circular(G1, with_labels=True, font_weight='bold')
pl.savefig("%s/%s-ER_%s_%s_%s.svg"%(OUTPUTDIR,key,n_set[n],p_set[p],rseed_set[rseed]))
# pl.show()
##BA
for n in range(len(n_set)):
for m in range(len(m_set)):
for rseed in range(len(rseed_set)):
key+=1
G1=nx.barabasi_albert_graph(n_set[n],m_set[m],rseed_set[rseed])
nx.write_edgelist(G1, "%s/%s-BA_%s_%s_%s.edgelist"%(OUTPUTDIR,key,n_set[n],m_set[m],rseed_set[rseed]))
GraphSet.append(G1)
if np.mod(key,50)==0:
nx.draw_circular(G1, with_labels=True, font_weight='bold')
pl.savefig("%s/%s-BA_%s_%s_%s.svg"%(OUTPUTDIR,key,n_set[n],m_set[m],rseed_set[rseed]))
# pl.show()
def get_syn_ds_name_idx(idx):
numgraphs=2000
n_set=np.array((20,30,40,50,60,70,80,90,100,110))#for ER and BA
p_set=np.array((0.075,0.1,0.125,0.15,0.175,0.2,0.225,0.25,0.275,0.3))#for ER
m_set=np.array((1,2,3,4,5,6,7,8,9,10))#for BA
rseed_set=np.array((314150,312213,434234,264852,231255,659956,435347,898232,675665,234690))
filename="%s-ER_%s_%s_%s.edgelist"%(idx,n_set[n],p_set[p],rseed_set[rseed])
def read_syn_ds(OUTPUTDIR):
print "loading synthetic dataset..."
GraphSet=[]
numgraphs=500
n_set=np.array((20,30,40,50,60))#for ER and BA
p_set=np.array((0.1,0.15,0.2,0.25,0.3))#for ER
m_set=np.array((1,2,3,4,5))#for BA
rseed_set=np.array((314150,312213,434234,264852,231255,659956,435347,898232,675665,234690))
#
nodenum_max=0
nodenum_min=1000000000
edgenum_max=0
edgenum_min=1000000000
avgdeg_max=0
avgdeg_min=1000000000
avgbet_max=0
avgbet_min=1000000000
avgclo_max=0
avgclo_min=1000000000
avgclu_max=0
avgclu_min=1000000000
num_cliques_max=0
num_cliques_min=100000000
num_con_max=0
num_con_min=100000000
# test=[]
key=-1
##ER
for n in range(len(n_set)):
for p in range(len(p_set)):
for rseed in range(len(rseed_set)):
key+=1
G1=nx.read_edgelist("%s/%s-ER_%s_%s_%s.edgelist"%(OUTPUTDIR,key,n_set[n],p_set[p],rseed_set[rseed]))
GraphSet.append(G1)
"""
nodenum_max=max(nodenum_max,G1.number_of_nodes())
nodenum_min=min(nodenum_min,G1.number_of_nodes())
edgenum_max=max(edgenum_max,G1.number_of_edges())
edgenum_min=min(edgenum_min,G1.number_of_edges())
avgdeg_max=max(avgdeg_max,np.mean(nx.degree_centrality(G1).values()))
avgdeg_min=min(avgdeg_min,np.mean(nx.degree_centrality(G1).values()))
avgbet_max=max(avgbet_max,np.mean(nx.betweenness_centrality(G1).values()))
avgbet_min=min(avgbet_min,np.mean(nx.betweenness_centrality(G1).values()))
#avgclo_max=max(avgclo_max,np.mean(nx.closeness_centrality(G1).values()))
#avgclo_min=min(avgclo_min,np.mean(nx.closeness_centrality(G1).values()))
avgclu_max=max(avgclu_max,nx.average_clustering(G1))
avgclu_min=min(avgclu_min,nx.average_clustering(G1))
# num_cliques_max=max(num_cliques_max,nx.graph_number_of_cliques(G1))
# num_cliques_min=min(num_cliques_min,nx.graph_number_of_cliques(G1))
# num_con_max=max(num_con_max,nx.number_connected_components(G1))
# num_con_min=min(num_con_min,nx.number_connected_components(G1))
# c=nx.degree_histogram(G1)
# idxc=c.index(max(c))
# num_con_max=max(num_con_max,idxc+1)
# num_con_min=min(num_con_min,idxc+1)
# print idxc+1
# test.append(idxc+1)
"""
##BA
for n in range(len(n_set)):
for m in range(len(m_set)):
for rseed in range(len(rseed_set)):
key+=1
G1=nx.read_edgelist("%s/%s-BA_%s_%s_%s.edgelist"%(OUTPUTDIR,key,n_set[n],m_set[m],rseed_set[rseed]))
GraphSet.append(G1)
"""
nodenum_max=max(nodenum_max,G1.number_of_nodes())
nodenum_min=min(nodenum_min,G1.number_of_nodes())
edgenum_max=max(edgenum_max,G1.number_of_edges())
edgenum_min=min(edgenum_min,G1.number_of_edges())
avgdeg_max=max(avgdeg_max,np.mean(nx.degree_centrality(G1).values()))
avgdeg_min=min(avgdeg_min,np.mean(nx.degree_centrality(G1).values()))
avgbet_max=max(avgbet_max,np.mean(nx.betweenness_centrality(G1).values()))
avgbet_min=min(avgbet_min,np.mean(nx.betweenness_centrality(G1).values()))
# avgclo_max=max(avgclo_max,np.mean(nx.closeness_centrality(G1).values()))
# avgclo_min=min(avgclo_min,np.mean(nx.closeness_centrality(G1).values()))
avgclu_max=max(avgclu_max,nx.average_clustering(G1))
avgclu_min=min(avgclu_min,nx.average_clustering(G1))
# num_cliques_max=max(num_cliques_max,nx.graph_number_of_cliques(G1))
# num_cliques_min=min(num_cliques_min,nx.graph_number_of_cliques(G1))
# num_con_max=max(num_con_max,nx.number_connected_components(G1))
# num_con_min=min(num_con_min,nx.number_connected_components(G1))
# c=nx.degree_histogram(G1)
# idxc=c.index(max(c))
# num_con_max=max(num_con_max,idxc+1)
# num_con_min=min(num_con_min,idxc+1)
# print idxc+1
# test.append(idxc+1)
"""
# print num_con_max,num_con_min
# print num_cliques_max,num_cliques_min ,num_con_max,num_con_min
# print nodenum_max,nodenum_min,edgenum_max,edgenum_min,avgdeg_max,avgdeg_min,avgbet_max,avgbet_min,avgclo_max,avgclo_min,avgclu_max,avgclu_min
# print nodenum_max,nodenum_min,edgenum_max,edgenum_min,avgdeg_max,avgdeg_min,avgbet_max,avgbet_min,avgclu_max,avgclu_min
# pl.plot(range(500),test)
return GraphSet
def read_syn_ds_2000(OUTPUTDIR):
print "loading synthetic dataset..."
GraphSet=[]
numgraphs=2000
n_set=np.array((20,30,40,50,60,70,80,90,100,110))#for ER and BA
p_set=np.array((0.075,0.1,0.125,0.15,0.175,0.2,0.225,0.25,0.275,0.3))#for ER
m_set=np.array((1,2,3,4,5,6,7,8,9,10))#for BA
rseed_set=np.array((314150,312213,434234,264852,231255,659956,435347,898232,675665,234690))
#
nodenum_max=0
nodenum_min=1000000000
edgenum_max=0
edgenum_min=1000000000
avgdeg_max=0
avgdeg_min=1000000000
avgbet_max=0
avgbet_min=1000000000
avgclo_max=0
avgclo_min=1000000000
avgclu_max=0
avgclu_min=1000000000
num_cliques_max=0
num_cliques_min=100000000
num_con_max=0
num_con_min=100000000
# test=[]
key=-1
##ER
for n in range(len(n_set)):
for p in range(len(p_set)):
for rseed in range(len(rseed_set)):
key+=1
G1=nx.read_edgelist("%s/%s-ER_%s_%s_%s.edgelist"%(OUTPUTDIR,key,n_set[n],p_set[p],rseed_set[rseed]))
GraphSet.append(G1)
"""
nodenum_max=max(nodenum_max,G1.number_of_nodes())
nodenum_min=min(nodenum_min,G1.number_of_nodes())
edgenum_max=max(edgenum_max,G1.number_of_edges())
edgenum_min=min(edgenum_min,G1.number_of_edges())
avgdeg_max=max(avgdeg_max,np.mean(nx.degree_centrality(G1).values()))
avgdeg_min=min(avgdeg_min,np.mean(nx.degree_centrality(G1).values()))
avgbet_max=max(avgbet_max,np.mean(nx.betweenness_centrality(G1).values()))
avgbet_min=min(avgbet_min,np.mean(nx.betweenness_centrality(G1).values()))
#avgclo_max=max(avgclo_max,np.mean(nx.closeness_centrality(G1).values()))
#avgclo_min=min(avgclo_min,np.mean(nx.closeness_centrality(G1).values()))
avgclu_max=max(avgclu_max,nx.average_clustering(G1))
avgclu_min=min(avgclu_min,nx.average_clustering(G1))
# num_cliques_max=max(num_cliques_max,nx.graph_number_of_cliques(G1))
# num_cliques_min=min(num_cliques_min,nx.graph_number_of_cliques(G1))
# num_con_max=max(num_con_max,nx.number_connected_components(G1))
# num_con_min=min(num_con_min,nx.number_connected_components(G1))
# c=nx.degree_histogram(G1)
# idxc=c.index(max(c))
# num_con_max=max(num_con_max,idxc+1)
# num_con_min=min(num_con_min,idxc+1)
# print idxc+1
# test.append(idxc+1)
"""
##BA
for n in range(len(n_set)):
for m in range(len(m_set)):
for rseed in range(len(rseed_set)):
key+=1
G1=nx.read_edgelist("%s/%s-BA_%s_%s_%s.edgelist"%(OUTPUTDIR,key,n_set[n],m_set[m],rseed_set[rseed]))
GraphSet.append(G1)
"""
nodenum_max=max(nodenum_max,G1.number_of_nodes())
nodenum_min=min(nodenum_min,G1.number_of_nodes())
edgenum_max=max(edgenum_max,G1.number_of_edges())
edgenum_min=min(edgenum_min,G1.number_of_edges())
avgdeg_max=max(avgdeg_max,np.mean(nx.degree_centrality(G1).values()))
avgdeg_min=min(avgdeg_min,np.mean(nx.degree_centrality(G1).values()))
avgbet_max=max(avgbet_max,np.mean(nx.betweenness_centrality(G1).values()))
avgbet_min=min(avgbet_min,np.mean(nx.betweenness_centrality(G1).values()))
# avgclo_max=max(avgclo_max,np.mean(nx.closeness_centrality(G1).values()))
# avgclo_min=min(avgclo_min,np.mean(nx.closeness_centrality(G1).values()))
avgclu_max=max(avgclu_max,nx.average_clustering(G1))
avgclu_min=min(avgclu_min,nx.average_clustering(G1))
# num_cliques_max=max(num_cliques_max,nx.graph_number_of_cliques(G1))
# num_cliques_min=min(num_cliques_min,nx.graph_number_of_cliques(G1))
# num_con_max=max(num_con_max,nx.number_connected_components(G1))
# num_con_min=min(num_con_min,nx.number_connected_components(G1))
# c=nx.degree_histogram(G1)
# idxc=c.index(max(c))
# num_con_max=max(num_con_max,idxc+1)
# num_con_min=min(num_con_min,idxc+1)
# print idxc+1
# test.append(idxc+1)
"""
# print num_con_max,num_con_min
# print num_cliques_max,num_cliques_min ,num_con_max,num_con_min
# print nodenum_max,nodenum_min,edgenum_max,edgenum_min,avgdeg_max,avgdeg_min,avgbet_max,avgbet_min,avgclo_max,avgclo_min,avgclu_max,avgclu_min
# print nodenum_max,nodenum_min,edgenum_max,edgenum_min,avgdeg_max,avgdeg_min,avgbet_max,avgbet_min,avgclu_max,avgclu_min
# pl.plot(range(500),test)
return GraphSet
def statistics_info(GraphSet):
x1set=[]
x2set=[]
x3set=[]
x4set=[]
for idx in range(len(GraphSet)):
G=GraphSet[idx]
nodenum=G.number_of_nodes()
edgenum=G.number_of_edges()
avgdeg=np.mean(nx.degree_centrality(G).values())
avgbet=np.mean(nx.betweenness_centrality(G).values())
x1=(nodenum-12.0)/(60.0-12.0)
x2=(edgenum-11.0)/(579.0-11.0)
x3=(avgdeg-0.0333)/(0.3948-0.0333)
x4=(avgbet-0.0116)/(0.1683-0.0116)
x1set.append(x1)
x2set.append(x2)
x3set.append(x3)
x4set.append(x4)
pl.figure(1)
pl.hist(np.array(x1set))
pl.title('x1')
pl.figure(2)
pl.hist(np.array(x2set))
pl.title('x2')
pl.figure(3)
pl.hist(np.array(x3set))
pl.title('x3')
pl.figure(4)
pl.hist(np.array(x4set))
pl.title('x4')
pl.show()
if __name__ == "__main__":
OUTPUTDIR="datasets/synthetic_datasets_2000"
#gen_syn_ds(OUTPUTDIR)
read_syn_ds(OUTPUTDIR)
#statistics_info(read_syn_ds(OUTPUTDIR))
| 46.990033
| 146
| 0.602022
| 2,073
| 14,144
| 3.840328
| 0.097443
| 0.033162
| 0.026127
| 0.016581
| 0.862329
| 0.854164
| 0.851903
| 0.842482
| 0.842482
| 0.822635
| 0
| 0.083936
| 0.26718
| 14,144
| 300
| 147
| 47.146667
| 0.684129
| 0.081731
| 0
| 0.603774
| 0
| 0
| 0.059304
| 0.041333
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.075472
| null | null | 0.018868
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c5ecaee4addc619678819d205e71153d4f372c1b
| 620
|
py
|
Python
|
eval_covid20cases_timm-regnetx_002_RandomBrightnessContrast.py
|
BrunoKrinski/segtool
|
cb604b5f38104c43a76450136e37c3d1c4b6d275
|
[
"MIT"
] | null | null | null |
eval_covid20cases_timm-regnetx_002_RandomBrightnessContrast.py
|
BrunoKrinski/segtool
|
cb604b5f38104c43a76450136e37c3d1c4b6d275
|
[
"MIT"
] | null | null | null |
eval_covid20cases_timm-regnetx_002_RandomBrightnessContrast.py
|
BrunoKrinski/segtool
|
cb604b5f38104c43a76450136e37c3d1c4b6d275
|
[
"MIT"
] | null | null | null |
import os
ls=["python main.py --configs configs/eval_covid20cases_unetplusplus_timm-regnetx_002_0_RandomBrightnessContrast.yml",
"python main.py --configs configs/eval_covid20cases_unetplusplus_timm-regnetx_002_1_RandomBrightnessContrast.yml",
"python main.py --configs configs/eval_covid20cases_unetplusplus_timm-regnetx_002_2_RandomBrightnessContrast.yml",
"python main.py --configs configs/eval_covid20cases_unetplusplus_timm-regnetx_002_3_RandomBrightnessContrast.yml",
"python main.py --configs configs/eval_covid20cases_unetplusplus_timm-regnetx_002_4_RandomBrightnessContrast.yml",
]
for l in ls:
os.system(l)
| 56.363636
| 118
| 0.866129
| 80
| 620
| 6.3375
| 0.3
| 0.098619
| 0.118343
| 0.187377
| 0.883629
| 0.883629
| 0.883629
| 0.883629
| 0.883629
| 0.883629
| 0
| 0.050934
| 0.05
| 620
| 11
| 119
| 56.363636
| 0.809847
| 0
| 0
| 0
| 0
| 0
| 0.89372
| 0.692432
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.111111
| 0
| 0.111111
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
c5ef2e8c59a8186345c55bf98c78d7d1353e1d73
| 4,232
|
py
|
Python
|
test/test_modify_contact.py
|
AndreyTracevsky/AddressBook
|
8f27fb333ee18956c9a272c04b948aaec929e60e
|
[
"Apache-2.0"
] | null | null | null |
test/test_modify_contact.py
|
AndreyTracevsky/AddressBook
|
8f27fb333ee18956c9a272c04b948aaec929e60e
|
[
"Apache-2.0"
] | null | null | null |
test/test_modify_contact.py
|
AndreyTracevsky/AddressBook
|
8f27fb333ee18956c9a272c04b948aaec929e60e
|
[
"Apache-2.0"
] | null | null | null |
from model.contact import Contact
def test_modify_contact_firstname(app):
if app.contact.count() == 0:
app.contact.create_contact(Contact(firstname = "add contact", email = "test"))
old_contacts = app.contact.get_contact_list()
app.contact.modify_first_contact(Contact(firstname = "Vasilii"))
new_contacts = app.contact.get_contact_list()
assert len(old_contacts) == len(new_contacts)
def test_modify_contact_middlename(app):
if app.contact.count() == 0:
app.contact.create_contact(Contact(firstname = "add contact", email = "test"))
old_contacts = app.contact.get_contact_list()
app.contact.modify_first_contact(Contact(middlename = "Vasilievich"))
new_contacts = app.contact.get_contact_list()
assert len(old_contacts) == len(new_contacts)
def test_modify_contact_lastname(app):
if app.contact.count() == 0:
app.contact.create_contact(Contact(firstname = "add contact", email = "test"))
old_contacts = app.contact.get_contact_list()
app.contact.modify_first_contact(Contact(lastname="Vasilkov"))
new_contacts = app.contact.get_contact_list()
assert len(old_contacts) == len(new_contacts)
def test_modify_contact_nickname(app):
if app.contact.count() == 0:
app.contact.create_contact(Contact(firstname = "add contact", email = "test"))
old_contacts = app.contact.get_contact_list()
app.contact.modify_first_contact(Contact(nickname = "ZverOK"))
new_contacts = app.contact.get_contact_list()
assert len(old_contacts) == len(new_contacts)
def test_modify_contact_title(app):
if app.contact.count() == 0:
app.contact.create_contact(Contact(firstname = "add contact", email = "test"))
old_contacts = app.contact.get_contact_list()
app.contact.modify_first_contact(Contact(title = "Automated"))
new_contacts = app.contact.get_contact_list()
assert len(old_contacts) == len(new_contacts)
def test_modify_contact_company(app):
if app.contact.count() == 0:
app.contact.create_contact(Contact(firstname = "add contact", email = "test"))
old_contacts = app.contact.get_contact_list()
app.contact.modify_first_contact(Contact(company = "Nike"))
new_contacts = app.contact.get_contact_list()
assert len(old_contacts) == len(new_contacts)
def test_modify_contact_address(app):
if app.contact.count() == 0:
app.contact.create_contact(Contact(firstname = "add contact", email = "test"))
old_contacts = app.contact.get_contact_list()
app.contact.modify_first_contact(Contact(address = "M.Tanka 34/1"))
new_contacts = app.contact.get_contact_list()
assert len(old_contacts) == len(new_contacts)
def test_modify_contact_phone_home(app):
if app.contact.count() == 0:
app.contact.create_contact(Contact(firstname = "add contact", email = "test"))
old_contacts = app.contact.get_contact_list()
app.contact.modify_first_contact(Contact(phone_home = "80171111111"))
new_contacts = app.contact.get_contact_list()
assert len(old_contacts) == len(new_contacts)
def test_modify_contact_phone_mobile(app):
if app.contact.count() == 0:
app.contact.create_contact(Contact(firstname = "add contact", email = "test"))
old_contacts = app.contact.get_contact_list()
app.contact.modify_first_contact(Contact(phone_mobile = "80442222222"))
new_contacts = app.contact.get_contact_list()
assert len(old_contacts) == len(new_contacts)
def test_modify_contact_phone_work(app):
if app.contact.count() == 0:
app.contact.create_contact(Contact(firstname = "add contact", email = "test"))
old_contacts = app.contact.get_contact_list()
app.contact.modify_first_contact(Contact(phone_work = "80443333333"))
new_contacts = app.contact.get_contact_list()
assert len(old_contacts) == len(new_contacts)
def test_modify_contact_email(app):
if app.contact.count() == 0:
app.contact.create_contact(Contact(firstname = "add contact", email = "test"))
old_contacts = app.contact.get_contact_list()
app.contact.modify_first_contact(Contact(email = "tester@ya.ru"))
new_contacts = app.contact.get_contact_list()
assert len(old_contacts) == len(new_contacts)
| 41.90099
| 86
| 0.727788
| 566
| 4,232
| 5.159011
| 0.086572
| 0.188356
| 0.135616
| 0.158219
| 0.9
| 0.9
| 0.9
| 0.9
| 0.9
| 0.9
| 0
| 0.013009
| 0.146267
| 4,232
| 100
| 87
| 42.32
| 0.795184
| 0
| 0
| 0.705128
| 0
| 0
| 0.063091
| 0
| 0
| 0
| 0
| 0
| 0.141026
| 1
| 0.141026
| false
| 0
| 0.012821
| 0
| 0.153846
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c5fe0ac02cb8345f7a4aca525c25645d941801b5
| 14,267
|
py
|
Python
|
test/test_function/test_lambda_expression.py
|
takahish/lispy
|
8a6eaf209d1564d20b457cbac7428b78dc529241
|
[
"Apache-2.0"
] | 4
|
2018-04-07T09:11:29.000Z
|
2021-11-20T03:02:07.000Z
|
test/test_function/test_lambda_expression.py
|
takahish/clispy
|
8a6eaf209d1564d20b457cbac7428b78dc529241
|
[
"Apache-2.0"
] | null | null | null |
test/test_function/test_lambda_expression.py
|
takahish/clispy
|
8a6eaf209d1564d20b457cbac7428b78dc529241
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Takahiro Ishikawa. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import unittest
from clispy.evaluator import Evaluator
from clispy.expander import Expander
from clispy.function.lambda_expression import Lambda
from clispy.package import PackageManager
from clispy.parser import Parser
from clispy.type import Integer
class LambdaUnitTestCase(unittest.TestCase):
"""This test is to check clispy.function_.lambda_expression.Lambda.
Lambda is base of user defined function and macro.
"""
def testLambda(self):
"""Checks an instance of Lambda and object official representation.
"""
# Makes an instance of Lambda.
forms = Parser.parse('((x) (* x x x))')
lambda_func = Lambda(
forms,
PackageManager.current_package.env['VARIABLE'],
PackageManager.current_package.env['FUNCTION'],
PackageManager.current_package.env['MACRO']
)
# Checks lambda function.
self.assertTrue(callable(lambda_func))
# Checks official representation.
self.assertRegex(str(lambda_func), r"<FUNCTION LAMBDA \{[0-9A-Z]+\}")
def testLambda_properties(self):
"""Checks object properties. Properties are as follow,
self.params: Parameters.
self.forms: Body (forms).
self.var_env: Lexical variable environment.
self.func_env: Lexical function environment.
self.macro_env: Lexical macro environment.
"""
# makes an instance of Lmabda.
forms = Parser.parse('((x) (* x x x))')
lambda_func = Lambda(
forms,
PackageManager.current_package.env['VARIABLE'],
PackageManager.current_package.env['FUNCTION'],
PackageManager.current_package.env['MACRO']
)
# Checks properties.
# Checks lambda_func.params.
self.assertEqual(lambda_func.params, ['X'])
# Checks lambda_func.forms.
self.assertEqual(str(lambda_func.forms), '(* X X X)')
# Checks lambda_func lexical scope.
self.assertTrue(lambda_func.var_env is PackageManager.current_package.env['VARIABLE'])
self.assertTrue(lambda_func.func_env is PackageManager.current_package.env['FUNCTION'])
self.assertTrue(lambda_func.macro_env is PackageManager.current_package.env['MACRO'])
def testLambda_call(self):
"""Checks call method of Lambda. The body of Lmabda is expanded
and executed when the method is called.
"""
# Makes an instance of Lambda.
forms = Parser.parse('((x) (* x x x))')
lambda_func = Lambda(
forms,
PackageManager.current_package.env['VARIABLE'],
PackageManager.current_package.env['FUNCTION'],
PackageManager.current_package.env['MACRO']
)
# Checks call.
retval = lambda_func(
Parser.parse('(2)'),
PackageManager.current_package.env['VARIABLE'],
PackageManager.current_package.env['FUNCTION'],
PackageManager.current_package.env['MACRO']
)
# Checks return value.
self.assertTrue(retval, Integer(8))
def testLambda_call_evaluate_argument(self):
"""Arguments are evaluated before the call method is executed.
"""
# Makes an instance of Lambda.
forms = Parser.parse('((x) (* x x x))')
lambda_func = Lambda(
forms,
PackageManager.current_package.env['VARIABLE'],
PackageManager.current_package.env['FUNCTION'],
PackageManager.current_package.env['MACRO']
)
# Checks call.
retval = lambda_func(
Parser.parse('((* 2 2 2))'), # an argument is expression.
PackageManager.current_package.env['VARIABLE'],
PackageManager.current_package.env['FUNCTION'],
PackageManager.current_package.env['MACRO']
)
# Checks return value.
self.assertTrue(retval, Integer(512))
def testLambda_call_expand_argument(self):
"""Arguments are expanded before the call method is executed.
"""
# Makes an instance of Lambda.
forms = Parser.parse('((x) (* x x x))')
lambda_func = Lambda(
forms,
PackageManager.current_package.env['VARIABLE'],
PackageManager.current_package.env['FUNCTION'],
PackageManager.current_package.env['MACRO']
)
# Define macro.
forms = Parser.parse('(defmacro cube (x) `(* ,x ,x ,x))')
exp = Expander.expand(
forms,
PackageManager.current_package.env['VARIABLE'],
PackageManager.current_package.env['FUNCTION'],
PackageManager.current_package.env['MACRO']
)
Evaluator.eval(
exp,
PackageManager.current_package.env['VARIABLE'],
PackageManager.current_package.env['FUNCTION'],
PackageManager.current_package.env['MACRO']
)
# Checks call.
retval = lambda_func(
Parser.parse('((cube 2))'), # an argument is expression.
PackageManager.current_package.env['VARIABLE'],
PackageManager.current_package.env['FUNCTION'],
PackageManager.current_package.env['MACRO']
)
# Checks return value.
self.assertTrue(retval, Integer(512))
def testLambda_properties_optional_accessor(self):
"""Checks a propertie of optional accessor for arguments.
"""
# Makes an instance of Lmabda.
forms = Parser.parse('((x &optional y) (* x x x))')
lambda_func = Lambda(
forms,
PackageManager.current_package.env['VARIABLE'],
PackageManager.current_package.env['FUNCTION'],
PackageManager.current_package.env['MACRO']
)
# Checks properties.
# Checks lambda_func.params.
self.assertEqual(lambda_func.params, ['X', 'Y'])
# Checks lambda_func.accessor_index.
self.assertEqual(lambda_func.accessor_index['&OPTIONAL'], 1)
def testLambda_properties_rest_accessor(self):
"""Checks a propertie of rest accessor for arguments.
"""
# Makes an instance of Lmabda.
forms = Parser.parse('((x &rest y) (* x x x))')
lambda_func = Lambda(
forms,
PackageManager.current_package.env['VARIABLE'],
PackageManager.current_package.env['FUNCTION'],
PackageManager.current_package.env['MACRO']
)
# Checks properties.
# Checks lambda_func.params.
self.assertEqual(lambda_func.params, ['X', 'Y'])
# Checks lambda_func.accessor_index.
self.assertEqual(lambda_func.accessor_index['&REST'], 1)
def testLambda_properties_keyword_accessor(self):
# Makes an instance of Lambda.
forms = Parser.parse('((x &key y) (* x x x))')
lambda_func = Lambda(
forms,
PackageManager.current_package.env['VARIABLE'],
PackageManager.current_package.env['FUNCTION'],
PackageManager.current_package.env['MACRO']
)
# Checks properties.
# Checks lambda_func.params.
self.assertEqual(lambda_func.params, ['X', 'Y'])
# Checks lambda_func.accessor_index.
self.assertEqual(lambda_func.accessor_index['&KEY'], 1)
def testLambda_call_optional_argument(self):
"""Checks assigning optinal arguments.
"""
# Makes an instance of Lmabda.
forms = Parser.parse('((x &optional y) (if y (* x x) (* x x x))))')
lambda_func = Lambda(
forms,
PackageManager.current_package.env['VARIABLE'],
PackageManager.current_package.env['FUNCTION'],
PackageManager.current_package.env['MACRO']
)
# Checks call.
# When an optional argumet is not given, Null() is set to an argument.
retval = lambda_func(
Parser.parse('(2)'),
PackageManager.current_package.env['VARIABLE'],
PackageManager.current_package.env['FUNCTION'],
PackageManager.current_package.env['MACRO']
)
# retval is result of (* x x x).
self.assertTrue(retval is Integer(8))
# When an optional argumet is given, this is set to an argument.
retval = lambda_func(
Parser.parse('(2 t)'),
PackageManager.current_package.env['VARIABLE'],
PackageManager.current_package.env['FUNCTION'],
PackageManager.current_package.env['MACRO']
)
# retval is result of (* x x).
self.assertTrue(retval is Integer(4))
def testLambda_call_optional_argument_with_default_value(self):
"""Checks assigning optinal arguments.
"""
# Makes an instance of Lmabda.
forms = Parser.parse('((x &optional (y t)) (if y (* x x) (* x x x))))')
lambda_func = Lambda(
forms,
PackageManager.current_package.env['VARIABLE'],
PackageManager.current_package.env['FUNCTION'],
PackageManager.current_package.env['MACRO']
)
# Checks call.
# When an optional argumet is not given, Null() is set to an argument.
retval = lambda_func(
Parser.parse('(2)'),
PackageManager.current_package.env['VARIABLE'],
PackageManager.current_package.env['FUNCTION'],
PackageManager.current_package.env['MACRO']
)
# retval is result of (* x x x).
self.assertTrue(retval is Integer(4))
# When an optional argumet is given, this is set to an argument.
retval = lambda_func(
Parser.parse('(2 nil)'),
PackageManager.current_package.env['VARIABLE'],
PackageManager.current_package.env['FUNCTION'],
PackageManager.current_package.env['MACRO']
)
# retval is result of (* x x).
self.assertTrue(retval is Integer(8))
def testLambda_call_rest_argument(self):
"""Checks assigning rest arguments.
"""
# Makes an instance of Lmabda.
forms = Parser.parse('((x &rest y) (cons x y))')
lambda_func = Lambda(
forms,
PackageManager.current_package.env['VARIABLE'],
PackageManager.current_package.env['FUNCTION'],
PackageManager.current_package.env['MACRO']
)
# Checks call.
# When an optional argumet is not given, Null() is set to an argument.
retval = lambda_func(
Parser.parse('(1 2 3 4 5)'),
PackageManager.current_package.env['VARIABLE'],
PackageManager.current_package.env['FUNCTION'],
PackageManager.current_package.env['MACRO']
)
# retval is result of being given &rest parameter.
self.assertEqual(str(retval), '(1 2 3 4 5)')
def testLambda_call_keyword_argument(self):
"""Checks assigning keyword arguments.
"""
# Makes an instance of Lmabda.
forms = Parser.parse('((x &key y) (if y (* x x) (* x x x))))')
lambda_func = Lambda(
forms,
PackageManager.current_package.env['VARIABLE'],
PackageManager.current_package.env['FUNCTION'],
PackageManager.current_package.env['MACRO']
)
# Checks call.
# When a keyword argumet is not given, Null() is set to an argument.
retval = lambda_func(
Parser.parse('(2)'),
PackageManager.current_package.env['VARIABLE'],
PackageManager.current_package.env['FUNCTION'],
PackageManager.current_package.env['MACRO']
)
# retval is result of (* x x x).
self.assertTrue(retval is Integer(8))
# When an keyword argumet is given, this is set to an argument.
retval = lambda_func(
Parser.parse('(2 :y t)'),
PackageManager.current_package.env['VARIABLE'],
PackageManager.current_package.env['FUNCTION'],
PackageManager.current_package.env['MACRO']
)
# retval is result of (* x x).
self.assertTrue(retval is Integer(4))
def testLambda_call_keyword_argument_with_default_value(self):
"""Checks assigning keyword arguments.
"""
# Makes an instance of Lmabda.
forms = Parser.parse('((x &key (y t)) (if y (* x x) (* x x x))))')
lambda_func = Lambda(
forms,
PackageManager.current_package.env['VARIABLE'],
PackageManager.current_package.env['FUNCTION'],
PackageManager.current_package.env['MACRO']
)
# Checks call.
# When a keyword argumet is not given, Null() is set to an argument.
retval = lambda_func(
Parser.parse('(2)'),
PackageManager.current_package.env['VARIABLE'],
PackageManager.current_package.env['FUNCTION'],
PackageManager.current_package.env['MACRO']
)
# retval is result of (* x x x).
self.assertTrue(retval is Integer(4))
# When an keyword argumet is given, this is set to an argument.
retval = lambda_func(
Parser.parse('(2 :y nil)'),
PackageManager.current_package.env['VARIABLE'],
PackageManager.current_package.env['FUNCTION'],
PackageManager.current_package.env['MACRO']
)
# retval is result of (* x x).
self.assertTrue(retval is Integer(8))
| 36.488491
| 95
| 0.606294
| 1,534
| 14,267
| 5.518905
| 0.112125
| 0.208363
| 0.277817
| 0.307583
| 0.780061
| 0.762816
| 0.742618
| 0.736948
| 0.736948
| 0.729506
| 0
| 0.004964
| 0.279947
| 14,267
| 390
| 96
| 36.582051
| 0.819138
| 0.249737
| 0
| 0.644144
| 0
| 0
| 0.106399
| 0
| 0
| 0
| 0
| 0
| 0.112613
| 1
| 0.058559
| false
| 0
| 0.031532
| 0
| 0.094595
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
a8638ada9ca0da9ca27f9b77d1e3ea4c20a522da
| 18,138
|
py
|
Python
|
scripts/main_12_binary_classification_00.py
|
AshivDhondea/ENSC813_Project
|
3606abff4b9e42282b5f7a6971f0554704bb037d
|
[
"MIT"
] | 2
|
2020-05-01T17:00:37.000Z
|
2020-05-14T09:03:16.000Z
|
scripts/main_12_binary_classification_00.py
|
AshivDhondea/ENSC813_Project
|
3606abff4b9e42282b5f7a6971f0554704bb037d
|
[
"MIT"
] | null | null | null |
scripts/main_12_binary_classification_00.py
|
AshivDhondea/ENSC813_Project
|
3606abff4b9e42282b5f7a6971f0554704bb037d
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 13 03:57:32 2020
Binary classification
Ensembling the classifiers
using normalized correlation combination
Saving the results in tex files for easy importing in the report
@author: Ashiv Hans Dhondea
"""
# --------------------------------------------------------------------------- #
# Import the necessary packages
# numpy for linear algebra, cv2 for image processing
# glob and os to navigate directories
import numpy as np
import pandas as pd
import os
import sys
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
# --------------------------------------------------------------------------- #
# Sort out utilities for file naming
# get the name of this script
file_name = os.path.basename(sys.argv[0]);
if file_name[-3:] == '.py':
script_name = file_name[:-3];
elif file_name[-3:] == '.ipynb':
script_name = file_name[:-6];
else:
script_name = 'main_xx';
full_name = script_name+'_';
# --------------------------------------------------------------------------- #
# Classification task number 3 is Honda v. Toyota -> task = 2
names_1 = ['Audi','Lexus','Honda'];
names_2 = ['BMW','Mercedes-Benz','Toyota'];
"""
Model 1 - Simple CNN implemented in
'main_08_binary_classification_00.py'
Model 2 - implemented in
'main_09_binary_classification_00.py'
Model 3 - implemented in
'main_10_binary_classification_00.py'
Model 4 - implemented in
'main_11_binary_classification_00.py'
"""
script_names = ['main_08_binary_classification_00','main_09_binary_classification_00','main_10_binary_classification_00','main_11_binary_classification_00'];
model_names = ['__model_1','__model_2','__model_3','__model_4'];
num_comparisons = len(names_2);
num_methods = len(model_names);
# --------------------------------------------------------------------------- #
# Audi vs BMW classification
comparison = 0;
print('Classifying %s vs. %s' %(names_1[comparison],names_2[comparison]))
model = 0;
y_test_1 = np.load(script_names[0]+'_'+names_1[comparison]+'_'+names_2[comparison]+model_names[model]+'y_test.npy')
y_pred_test_continuous_arr_1 = np.zeros([num_methods,len(y_test_1)],dtype=np.float64);
y_pred_train_continuous_1 = np.load(script_names[0]+'_'+names_1[comparison]+'_'+names_2[comparison]+model_names[model]+'y_pred_train_continuous.npy')
y_pred_train_continuous_arr_1 = np.zeros([num_methods,len(y_pred_train_continuous_1)],dtype=np.float64);
y_train_1 = np.load(script_names[0]+'_'+names_1[comparison]+'_'+names_2[comparison]+model_names[model]+'y_train.npy')
for script in range(0,len(script_names)):
npy_file_name = script_names[script]+'_'+names_1[comparison]+'_'+names_2[comparison]+model_names[script];
y_pred_test_continuous_arr_1[script,:] = np.ravel(np.load(npy_file_name+'y_pred_test_continuous.npy'));
y_pred_train_continuous_arr_1[script,:] = np.ravel(np.load(npy_file_name+'y_pred_train_continuous.npy'));
r_y_pred_train_1 = np.dot(y_train_1.astype(np.float32), y_pred_train_continuous_arr_1[0,:])/len(y_pred_train_continuous_arr_1[0,:]);
r_y_pred_train_2 = np.dot(y_train_1.astype(np.float32), y_pred_train_continuous_arr_1[1,:])/len(y_pred_train_continuous_arr_1[1,:]);
r_y_pred_train_3 = np.dot(y_train_1.astype(np.float32), y_pred_train_continuous_arr_1[2,:])/len(y_pred_train_continuous_arr_1[2,:]);
r_y_pred_train_4 = np.dot(y_train_1.astype(np.float32), y_pred_train_continuous_arr_1[3,:])/len(y_pred_train_continuous_arr_1[3,:]);
# Correlation vector
r_mat = np.array([[r_y_pred_train_1],[r_y_pred_train_2],[r_y_pred_train_3],[r_y_pred_train_4]])
# normalized correlation vector
a_weights = r_mat/np.sum(r_mat);
# binarize predictions
y_pred_test_1_b = (y_pred_test_continuous_arr_1[0,:] > 0.5).astype(int)
y_pred_test_2_b = (y_pred_test_continuous_arr_1[1,:] > 0.5).astype(int)
y_pred_test_3_b = (y_pred_test_continuous_arr_1[2,:] > 0.5).astype(int)
y_pred_test_4_b = (y_pred_test_continuous_arr_1[3,:] > 0.5).astype(int)
# model average
prob_arr = np.transpose(y_pred_test_continuous_arr_1);
y_pred_test_continuous_mean = np.dot(prob_arr,a_weights);
y_pred_test_mean_b = (y_pred_test_continuous_mean > 0.5).astype(int);
# compute accuracies
y_pred_test_1_b_acc = np.mean(y_pred_test_1_b.ravel() == y_test_1)
y_pred_test_2_b_acc = np.mean(y_pred_test_2_b.ravel() == y_test_1)
y_pred_test_3_b_acc = np.mean(y_pred_test_3_b.ravel() == y_test_1)
y_pred_test_4_b_acc = np.mean(y_pred_test_4_b.ravel() == y_test_1)
y_pred_test_mean_acc = np.mean(y_pred_test_mean_b.ravel() == y_test_1)
columns_names = ['Accuracy'];
method_names = ['Model 1','Model 2','Model 3','Model 4','Ensemble'];
data_results = np.array([y_pred_test_1_b_acc,y_pred_test_2_b_acc,y_pred_test_3_b_acc,y_pred_test_4_b_acc,y_pred_test_mean_acc])
df_table_acc = pd.DataFrame(data = data_results,index=method_names, columns=columns_names)
print('Binary classification task %d' %(comparison+1))
print(df_table_acc)
results_table_name = full_name+'_'+names_1[comparison]+'_'+names_2[comparison];
with open(results_table_name+'_accuracy.tex', 'w') as texfile:
texfile.write(df_table_acc.to_latex())
report = classification_report(y_test_1,y_pred_test_1_b, target_names=[names_1[comparison],names_2[comparison]], output_dict=True);
classification_report_df = pd.DataFrame(report).transpose();
with open(results_table_name+method_names[0]+'.tex', 'w') as texfile:
texfile.write(classification_report_df.to_latex())
report = classification_report(y_test_1,y_pred_test_2_b, target_names=[names_1[comparison],names_2[comparison]], output_dict=True);
classification_report_df = pd.DataFrame(report).transpose();
with open(results_table_name+method_names[1]+'.tex', 'w') as texfile:
texfile.write(classification_report_df.to_latex())
report = classification_report(y_test_1,y_pred_test_3_b, target_names=[names_1[comparison],names_2[comparison]], output_dict=True);
classification_report_df = pd.DataFrame(report).transpose();
with open(results_table_name+method_names[2]+'.tex', 'w') as texfile:
texfile.write(classification_report_df.to_latex())
report = classification_report(y_test_1,y_pred_test_4_b, target_names=[names_1[comparison],names_2[comparison]], output_dict=True);
classification_report_df = pd.DataFrame(report).transpose();
with open(results_table_name+method_names[3]+'.tex', 'w') as texfile:
texfile.write(classification_report_df.to_latex())
report = classification_report(y_test_1,y_pred_test_mean_b, target_names=[names_1[comparison],names_2[comparison]], output_dict=True);
classification_report_df = pd.DataFrame(report).transpose();
with open(results_table_name+method_names[4]+'.tex', 'w') as texfile:
texfile.write(classification_report_df.to_latex())
confusion_matrix_test = confusion_matrix(y_test_1,y_pred_test_mean_b);
confusion_matrix_test_df = pd.DataFrame(confusion_matrix_test).transpose();
with open(results_table_name+method_names[4]+'_ensemble_confusion_matrix.tex', 'w') as texfile:
texfile.write(confusion_matrix_test_df.to_latex());
# --------------------------------------------------------------------------- #
# Lexus vs Mercedes-Benz classification
comparison = 1;
print('Classifying %s vs. %s' %(names_1[comparison],names_2[comparison]))
model = 0;
y_test_1 = np.load(script_names[0]+'_'+names_1[comparison]+'_'+names_2[comparison]+model_names[model]+'y_test.npy')
y_pred_test_continuous_arr_1 = np.zeros([num_methods,len(y_test_1)],dtype=np.float64);
y_pred_train_continuous_1 = np.load(script_names[0]+'_'+names_1[comparison]+'_'+names_2[comparison]+model_names[model]+'y_pred_train_continuous.npy')
y_pred_train_continuous_arr_1 = np.zeros([num_methods,len(y_pred_train_continuous_1)],dtype=np.float64);
y_train_1 = np.load(script_names[0]+'_'+names_1[comparison]+'_'+names_2[comparison]+model_names[model]+'y_train.npy')
for script in range(0,len(script_names)):
npy_file_name = script_names[script]+'_'+names_1[comparison]+'_'+names_2[comparison]+model_names[script];
y_pred_test_continuous_arr_1[script,:] = np.ravel(np.load(npy_file_name+'y_pred_test_continuous.npy'));
y_pred_train_continuous_arr_1[script,:] = np.ravel(np.load(npy_file_name+'y_pred_train_continuous.npy'));
r_y_pred_train_1 = np.dot(y_train_1.astype(np.float32), y_pred_train_continuous_arr_1[0,:])/len(y_pred_train_continuous_arr_1[0,:]);
r_y_pred_train_2 = np.dot(y_train_1.astype(np.float32), y_pred_train_continuous_arr_1[1,:])/len(y_pred_train_continuous_arr_1[1,:]);
r_y_pred_train_3 = np.dot(y_train_1.astype(np.float32), y_pred_train_continuous_arr_1[2,:])/len(y_pred_train_continuous_arr_1[2,:]);
r_y_pred_train_4 = np.dot(y_train_1.astype(np.float32), y_pred_train_continuous_arr_1[3,:])/len(y_pred_train_continuous_arr_1[3,:]);
# Correlation vector
r_mat = np.array([[r_y_pred_train_1],[r_y_pred_train_2],[r_y_pred_train_3],[r_y_pred_train_4]])
# normalized correlation vector
a_weights = r_mat/np.sum(r_mat);
# binarize predictions
y_pred_test_1_b = (y_pred_test_continuous_arr_1[0,:] > 0.5).astype(int)
y_pred_test_2_b = (y_pred_test_continuous_arr_1[1,:] > 0.5).astype(int)
y_pred_test_3_b = (y_pred_test_continuous_arr_1[2,:] > 0.5).astype(int)
y_pred_test_4_b = (y_pred_test_continuous_arr_1[3,:] > 0.5).astype(int)
# model average
prob_arr = np.transpose(y_pred_test_continuous_arr_1);
y_pred_test_continuous_mean = np.dot(prob_arr,a_weights);
y_pred_test_mean_b = (y_pred_test_continuous_mean > 0.5).astype(int);
# compute accuracies
y_pred_test_1_b_acc = np.mean(y_pred_test_1_b.ravel() == y_test_1)
y_pred_test_2_b_acc = np.mean(y_pred_test_2_b.ravel() == y_test_1)
y_pred_test_3_b_acc = np.mean(y_pred_test_3_b.ravel() == y_test_1)
y_pred_test_4_b_acc = np.mean(y_pred_test_4_b.ravel() == y_test_1)
y_pred_test_mean_acc = np.mean(y_pred_test_mean_b.ravel() == y_test_1)
columns_names = ['Accuracy'];
method_names = ['Model 1','Model 2','Model 3','Model 4','Ensemble'];
data_results = np.array([y_pred_test_1_b_acc,y_pred_test_2_b_acc,y_pred_test_3_b_acc,y_pred_test_4_b_acc,y_pred_test_mean_acc])
df_table_acc = pd.DataFrame(data = data_results,index=method_names, columns=columns_names)
print('Binary classification task %d' %(comparison+1))
print(df_table_acc)
results_table_name = full_name+'_'+names_1[comparison]+'_'+names_2[comparison];
with open(results_table_name+'_accuracy.tex', 'w') as texfile:
texfile.write(df_table_acc.to_latex())
report = classification_report(y_test_1,y_pred_test_1_b, target_names=[names_1[comparison],names_2[comparison]], output_dict=True);
classification_report_df = pd.DataFrame(report).transpose();
with open(results_table_name+method_names[0]+'.tex', 'w') as texfile:
texfile.write(classification_report_df.to_latex())
report = classification_report(y_test_1,y_pred_test_2_b, target_names=[names_1[comparison],names_2[comparison]], output_dict=True);
classification_report_df = pd.DataFrame(report).transpose();
with open(results_table_name+method_names[1]+'.tex', 'w') as texfile:
texfile.write(classification_report_df.to_latex())
report = classification_report(y_test_1,y_pred_test_3_b, target_names=[names_1[comparison],names_2[comparison]], output_dict=True);
classification_report_df = pd.DataFrame(report).transpose();
with open(results_table_name+method_names[2]+'.tex', 'w') as texfile:
texfile.write(classification_report_df.to_latex())
report = classification_report(y_test_1,y_pred_test_4_b, target_names=[names_1[comparison],names_2[comparison]], output_dict=True);
classification_report_df = pd.DataFrame(report).transpose();
with open(results_table_name+method_names[3]+'.tex', 'w') as texfile:
texfile.write(classification_report_df.to_latex())
report = classification_report(y_test_1,y_pred_test_mean_b, target_names=[names_1[comparison],names_2[comparison]], output_dict=True);
classification_report_df = pd.DataFrame(report).transpose();
with open(results_table_name+method_names[4]+'.tex', 'w') as texfile:
texfile.write(classification_report_df.to_latex())
confusion_matrix_test = confusion_matrix(y_test_1,y_pred_test_mean_b);
confusion_matrix_test_df = pd.DataFrame(confusion_matrix_test).transpose();
with open(results_table_name+method_names[4]+'_ensemble_confusion_matrix.tex', 'w') as texfile:
texfile.write(confusion_matrix_test_df.to_latex());
# --------------------------------------------------------------------------- #
# Honda vs Toyota classification
comparison = 2;
model = 0;
print('Classifying %s vs. %s' %(names_1[comparison],names_2[comparison]))
y_test_1 = np.load(script_names[0]+'_'+names_1[comparison]+'_'+names_2[comparison]+model_names[model]+'y_test.npy')
y_pred_test_continuous_arr_1 = np.zeros([num_methods,len(y_test_1)],dtype=np.float64);
y_pred_train_continuous_1 = np.load(script_names[0]+'_'+names_1[comparison]+'_'+names_2[comparison]+model_names[model]+'y_pred_train_continuous.npy')
y_pred_train_continuous_arr_1 = np.zeros([num_methods,len(y_pred_train_continuous_1)],dtype=np.float64);
y_train_1 = np.load(script_names[0]+'_'+names_1[comparison]+'_'+names_2[comparison]+model_names[model]+'y_train.npy')
for script in range(0,len(script_names)):
npy_file_name = script_names[script]+'_'+names_1[comparison]+'_'+names_2[comparison]+model_names[script];
y_pred_test_continuous_arr_1[script,:] = np.ravel(np.load(npy_file_name+'y_pred_test_continuous.npy'));
y_pred_train_continuous_arr_1[script,:] = np.ravel(np.load(npy_file_name+'y_pred_train_continuous.npy'));
r_y_pred_train_1 = np.dot(y_train_1.astype(np.float32), y_pred_train_continuous_arr_1[0,:])/len(y_pred_train_continuous_arr_1[0,:]);
r_y_pred_train_2 = np.dot(y_train_1.astype(np.float32), y_pred_train_continuous_arr_1[1,:])/len(y_pred_train_continuous_arr_1[1,:]);
r_y_pred_train_3 = np.dot(y_train_1.astype(np.float32), y_pred_train_continuous_arr_1[2,:])/len(y_pred_train_continuous_arr_1[2,:]);
r_y_pred_train_4 = np.dot(y_train_1.astype(np.float32), y_pred_train_continuous_arr_1[3,:])/len(y_pred_train_continuous_arr_1[3,:]);
# Correlation vector
r_mat = np.array([[r_y_pred_train_1],[r_y_pred_train_2],[r_y_pred_train_3],[r_y_pred_train_4]])
# normalized correlation vector
a_weights = r_mat/np.sum(r_mat);
# binarize predictions
y_pred_test_1_b = (y_pred_test_continuous_arr_1[0,:] > 0.5).astype(int)
y_pred_test_2_b = (y_pred_test_continuous_arr_1[1,:] > 0.5).astype(int)
y_pred_test_3_b = (y_pred_test_continuous_arr_1[2,:] > 0.5).astype(int)
y_pred_test_4_b = (y_pred_test_continuous_arr_1[3,:] > 0.5).astype(int)
# model average
prob_arr = np.transpose(y_pred_test_continuous_arr_1);
y_pred_test_continuous_mean = np.dot(prob_arr,a_weights);
y_pred_test_mean_b = (y_pred_test_continuous_mean > 0.5).astype(int);
# compute accuracies
y_pred_test_1_b_acc = np.mean(y_pred_test_1_b.ravel() == y_test_1)
y_pred_test_2_b_acc = np.mean(y_pred_test_2_b.ravel() == y_test_1)
y_pred_test_3_b_acc = np.mean(y_pred_test_3_b.ravel() == y_test_1)
y_pred_test_4_b_acc = np.mean(y_pred_test_4_b.ravel() == y_test_1)
y_pred_test_mean_acc = np.mean(y_pred_test_mean_b.ravel() == y_test_1)
columns_names = ['Accuracy'];
method_names = ['Model 1','Model 2','Model 3','Model 4','Ensemble'];
data_results = np.array([y_pred_test_1_b_acc,y_pred_test_2_b_acc,y_pred_test_3_b_acc,y_pred_test_4_b_acc,y_pred_test_mean_acc])
df_table_acc = pd.DataFrame(data = data_results,index=method_names, columns=columns_names)
print('Binary classification task %d' %(comparison+1))
print(df_table_acc)
results_table_name = full_name+'_'+names_1[comparison]+'_'+names_2[comparison];
with open(results_table_name+'_accuracy.tex', 'w') as texfile:
texfile.write(df_table_acc.to_latex())
with open(results_table_name+'_accuracy.tex', 'w') as texfile:
texfile.write(df_table_acc.to_latex())
report = classification_report(y_test_1,y_pred_test_1_b, target_names=[names_1[comparison],names_2[comparison]], output_dict=True);
classification_report_df = pd.DataFrame(report).transpose();
with open(results_table_name+method_names[0]+'.tex', 'w') as texfile:
texfile.write(classification_report_df.to_latex())
report = classification_report(y_test_1,y_pred_test_2_b, target_names=[names_1[comparison],names_2[comparison]], output_dict=True);
classification_report_df = pd.DataFrame(report).transpose();
with open(results_table_name+method_names[1]+'.tex', 'w') as texfile:
texfile.write(classification_report_df.to_latex())
report = classification_report(y_test_1,y_pred_test_3_b, target_names=[names_1[comparison],names_2[comparison]], output_dict=True);
classification_report_df = pd.DataFrame(report).transpose();
with open(results_table_name+method_names[2]+'.tex', 'w') as texfile:
texfile.write(classification_report_df.to_latex())
report = classification_report(y_test_1,y_pred_test_4_b, target_names=[names_1[comparison],names_2[comparison]], output_dict=True);
classification_report_df = pd.DataFrame(report).transpose();
with open(results_table_name+method_names[3]+'.tex', 'w') as texfile:
texfile.write(classification_report_df.to_latex())
report = classification_report(y_test_1,y_pred_test_mean_b, target_names=[names_1[comparison],names_2[comparison]], output_dict=True);
classification_report_df = pd.DataFrame(report).transpose();
with open(results_table_name+method_names[4]+'.tex', 'w') as texfile:
texfile.write(classification_report_df.to_latex())
confusion_matrix_test = confusion_matrix(y_test_1,y_pred_test_mean_b);
confusion_matrix_test_df = pd.DataFrame(confusion_matrix_test).transpose();
with open(results_table_name+method_names[4]+'_ensemble_confusion_matrix.tex', 'w') as texfile:
texfile.write(confusion_matrix_test_df.to_latex());
# --------------------------------------------------------------------------- #
| 51.675214
| 158
| 0.748429
| 2,963
| 18,138
| 4.111711
| 0.057712
| 0.071411
| 0.079783
| 0.068949
| 0.921612
| 0.901502
| 0.901502
| 0.901502
| 0.901502
| 0.899122
| 0
| 0.028963
| 0.090087
| 18,138
| 350
| 159
| 51.822857
| 0.709222
| 0.078895
| 0
| 0.890995
| 0
| 0
| 0.065856
| 0.028563
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.028436
| 0
| 0.028436
| 0.042654
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
764685fc5badc7c4028fc5b27bd1eb512f5a4a88
| 99,894
|
py
|
Python
|
gym/common/protobuf/vnf_bd_pb2.py
|
raphaelvrosa/gym
|
f1d20b444050ab0f445681ae39e93ffd44610f21
|
[
"Apache-2.0"
] | 3
|
2020-03-13T20:18:22.000Z
|
2021-03-21T20:23:00.000Z
|
gym/common/protobuf/vnf_bd_pb2.py
|
raphaelvrosa/gym
|
f1d20b444050ab0f445681ae39e93ffd44610f21
|
[
"Apache-2.0"
] | null | null | null |
gym/common/protobuf/vnf_bd_pb2.py
|
raphaelvrosa/gym
|
f1d20b444050ab0f445681ae39e93ffd44610f21
|
[
"Apache-2.0"
] | 1
|
2020-12-02T18:06:47.000Z
|
2020-12-02T18:06:47.000Z
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: vnf_bd.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='vnf_bd.proto',
package='gym',
syntax='proto3',
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x0cvnf_bd.proto\x12\x03gym\"\xa7\x0f\n\x08Scenario\x12\'\n\x05links\x18\x01 \x03(\x0b\x32\x18.gym.Scenario.LinksEntry\x12\'\n\x05nodes\x18\x02 \x03(\x0b\x32\x18.gym.Scenario.NodesEntry\x12-\n\x08policies\x18\x03 \x03(\x0b\x32\x1b.gym.Scenario.PoliciesEntry\x1aZ\n\x04Link\x12\n\n\x02id\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x0f\n\x07network\x18\x03 \x01(\t\x12\x0c\n\x04type\x18\x04 \x01(\t\x12\x19\n\x11\x63onnection_points\x18\x05 \x03(\t\x1a\xab\x0b\n\x04Node\x12\n\n\x02id\x18\x01 \x01(\t\x12\r\n\x05image\x18\x02 \x01(\t\x12\x0e\n\x06\x66ormat\x18\x03 \x01(\t\x12\x0c\n\x04type\x18\x04 \x01(\t\x12\x0c\n\x04role\x18\x05 \x01(\t\x12/\n\tresources\x18\x06 \x01(\x0b\x32\x1c.gym.Scenario.Node.Resources\x12\x43\n\x11\x63onnection_points\x18\x07 \x03(\x0b\x32(.gym.Scenario.Node.ConnectionPointsEntry\x12\x34\n\tlifecycle\x18\x08 \x03(\x0b\x32!.gym.Scenario.Node.LifecycleEntry\x12<\n\rrelationships\x18\t \x03(\x0b\x32%.gym.Scenario.Node.RelationshipsEntry\x1aO\n\x0f\x43onnectionPoint\x12\n\n\x02id\x18\x01 \x01(\t\x12\x0f\n\x07\x61\x64\x64ress\x18\x02 \x01(\t\x12\x11\n\tinterface\x18\x03 \x01(\t\x12\x0c\n\x04type\x18\x04 \x01(\t\x1a\xa2\x03\n\tLifecycle\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x16\n\x0eimplementation\x18\x02 \x03(\t\x12@\n\nparameters\x18\x03 \x03(\x0b\x32,.gym.Scenario.Node.Lifecycle.ParametersEntry\x12\x38\n\x08workflow\x18\x04 \x01(\x0e\x32&.gym.Scenario.Node.Lifecycle.Workflows\x1a)\n\tParameter\x12\r\n\x05input\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t\x1aY\n\x0fParametersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x35\n\x05value\x18\x02 \x01(\x0b\x32&.gym.Scenario.Node.Lifecycle.Parameter:\x02\x38\x01\"m\n\tWorkflows\x12\x18\n\x14VNFBDWORKFLOWS_UNSET\x10\x00\x12\n\n\x06\x63reate\x10\x01\x12\r\n\tconfigure\x10\x02\x12\t\n\x05start\x10\x03\x12\x08\n\x04stop\x10\x04\x12\n\n\x06\x64\x65lete\x10\x05\x12\n\n\x06\x63ustom\x10\x06\x1a:\n\x0cRelationship\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0e\n\x06target\x18\x02 \x01(\t\x12\x0c\n\x04type\x18\x03 \x01(\t\x1a\xbb\x02\n\tResources\x12-\n\x03\x63pu\x18\x01 \x01(\x0b\x32 .gym.Scenario.Node.Resources.Cpu\x12\x33\n\x06memory\x18\x02 \x01(\x0b\x32#.gym.Scenario.Node.Resources.Memory\x12\x35\n\x07storage\x18\x03 \x01(\x0b\x32$.gym.Scenario.Node.Resources.Storage\x1a\x35\n\x03\x43pu\x12\x0e\n\x06\x63pu_bw\x18\x01 \x01(\t\x12\x0f\n\x07pinning\x18\x02 \x01(\t\x12\r\n\x05vcpus\x18\x03 \x01(\x04\x1a$\n\x06Memory\x12\x0c\n\x04size\x18\x01 \x01(\x04\x12\x0c\n\x04unit\x18\x02 \x01(\t\x1a\x36\n\x07Storage\x12\x0c\n\x04size\x18\x01 \x01(\x04\x12\x0c\n\x04unit\x18\x02 \x01(\t\x12\x0f\n\x07volumes\x18\x03 \x01(\t\x1a[\n\x15\x43onnectionPointsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x31\n\x05value\x18\x02 \x01(\x0b\x32\".gym.Scenario.Node.ConnectionPoint:\x02\x38\x01\x1aN\n\x0eLifecycleEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12+\n\x05value\x18\x02 \x01(\x0b\x32\x1c.gym.Scenario.Node.Lifecycle:\x02\x38\x01\x1aU\n\x12RelationshipsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12.\n\x05value\x18\x02 \x01(\x0b\x32\x1f.gym.Scenario.Node.Relationship:\x02\x38\x01\x1a\x45\n\x06Policy\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0e\n\x06\x61\x63tion\x18\x02 \x01(\t\x12\x0f\n\x07targets\x18\x03 \x01(\t\x12\x0c\n\x04type\x18\x04 \x01(\t\x1a@\n\nLinksEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12!\n\x05value\x18\x02 \x01(\x0b\x32\x12.gym.Scenario.Link:\x02\x38\x01\x1a@\n\nNodesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12!\n\x05value\x18\x02 \x01(\x0b\x32\x12.gym.Scenario.Node:\x02\x38\x01\x1a\x45\n\rPoliciesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12#\n\x05value\x18\x02 \x01(\x0b\x32\x14.gym.Scenario.Policy:\x02\x38\x01\"\xca\x0f\n\x05VnfBd\x12\n\n\x02id\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x0f\n\x07version\x18\x03 \x01(\t\x12\x0e\n\x06\x61uthor\x18\x04 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x05 \x01(\t\x12+\n\x0b\x65xperiments\x18\x06 \x01(\x0b\x32\x16.gym.VnfBd.Experiments\x12\x1f\n\x08scenario\x18\x07 \x01(\x0b\x32\r.gym.Scenario\x12+\n\x0bproceedings\x18\x08 \x01(\x0b\x32\x16.gym.VnfBd.Proceedings\x1a,\n\x0b\x45xperiments\x12\r\n\x05tests\x18\x01 \x01(\r\x12\x0e\n\x06trials\x18\x02 \x01(\r\x1a\xc7\r\n\x0bProceedings\x12:\n\nattributes\x18\x01 \x03(\x0b\x32&.gym.VnfBd.Proceedings.AttributesEntry\x12\x32\n\x06\x61gents\x18\x02 \x03(\x0b\x32\".gym.VnfBd.Proceedings.AgentsEntry\x12\x36\n\x08monitors\x18\x03 \x03(\x0b\x32$.gym.VnfBd.Proceedings.MonitorsEntry\x1a(\n\tAttribute\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t\x1a\xd6\x04\n\x05\x41gent\x12\x0c\n\x04uuid\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12:\n\x07probers\x18\x03 \x03(\x0b\x32).gym.VnfBd.Proceedings.Agent.ProbersEntry\x1a\x9f\x03\n\x06Prober\x12\n\n\x02id\x18\x01 \x01(\r\x12\x11\n\tinstances\x18\x02 \x01(\x04\x12\x0c\n\x04name\x18\x03 \x01(\t\x12G\n\nparameters\x18\x04 \x03(\x0b\x32\x33.gym.VnfBd.Proceedings.Agent.Prober.ParametersEntry\x12\x38\n\x05sched\x18\x05 \x01(\x0b\x32).gym.VnfBd.Proceedings.Agent.Prober.Sched\x1a)\n\tParameter\x12\r\n\x05input\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t\x1aX\n\x05Sched\x12\x0c\n\x04\x66rom\x18\x01 \x01(\r\x12\r\n\x05until\x18\x02 \x01(\r\x12\x10\n\x08\x64uration\x18\x03 \x01(\r\x12\x10\n\x08interval\x18\x04 \x01(\r\x12\x0e\n\x06repeat\x18\x05 \x01(\r\x1a`\n\x0fParametersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12<\n\x05value\x18\x02 \x01(\x0b\x32-.gym.VnfBd.Proceedings.Agent.Prober.Parameter:\x02\x38\x01\x1aS\n\x0cProbersEntry\x12\x0b\n\x03key\x18\x01 \x01(\r\x12\x32\n\x05value\x18\x02 \x01(\x0b\x32#.gym.VnfBd.Proceedings.Agent.Prober:\x02\x38\x01\x1a\x99\x05\n\x07Monitor\x12\x0c\n\x04uuid\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12@\n\tlisteners\x18\x03 \x03(\x0b\x32-.gym.VnfBd.Proceedings.Monitor.ListenersEntry\x1a%\n\x04Host\x12\x0c\n\x04node\x18\x01 \x01(\t\x12\x0f\n\x07setting\x18\x02 \x01(\t\x1a\xad\x03\n\x08Listener\x12\n\n\x02id\x18\x01 \x01(\r\x12\x11\n\tinstances\x18\x02 \x01(\x04\x12\x0c\n\x04name\x18\x03 \x01(\t\x12K\n\nparameters\x18\x04 \x03(\x0b\x32\x37.gym.VnfBd.Proceedings.Monitor.Listener.ParametersEntry\x12<\n\x05sched\x18\x05 \x01(\x0b\x32-.gym.VnfBd.Proceedings.Monitor.Listener.Sched\x1a)\n\tParameter\x12\r\n\x05input\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t\x1aX\n\x05Sched\x12\x0c\n\x04\x66rom\x18\x01 \x01(\r\x12\r\n\x05until\x18\x02 \x01(\r\x12\x10\n\x08\x64uration\x18\x03 \x01(\r\x12\x10\n\x08interval\x18\x04 \x01(\r\x12\x0e\n\x06repeat\x18\x05 \x01(\r\x1a\x64\n\x0fParametersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12@\n\x05value\x18\x02 \x01(\x0b\x32\x31.gym.VnfBd.Proceedings.Monitor.Listener.Parameter:\x02\x38\x01\x1aY\n\x0eListenersEntry\x12\x0b\n\x03key\x18\x01 \x01(\r\x12\x36\n\x05value\x18\x02 \x01(\x0b\x32\'.gym.VnfBd.Proceedings.Monitor.Listener:\x02\x38\x01\x1aS\n\x0f\x41ttributesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12/\n\x05value\x18\x02 \x01(\x0b\x32 .gym.VnfBd.Proceedings.Attribute:\x02\x38\x01\x1aK\n\x0b\x41gentsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12+\n\x05value\x18\x02 \x01(\x0b\x32\x1c.gym.VnfBd.Proceedings.Agent:\x02\x38\x01\x1aO\n\rMonitorsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12-\n\x05value\x18\x02 \x01(\x0b\x32\x1e.gym.VnfBd.Proceedings.Monitor:\x02\x38\x01\x62\x06proto3'
)
_SCENARIO_NODE_LIFECYCLE_WORKFLOWS = _descriptor.EnumDescriptor(
name='Workflows',
full_name='gym.Scenario.Node.Lifecycle.Workflows',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='VNFBDWORKFLOWS_UNSET', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='create', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='configure', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='start', index=3, number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='stop', index=4, number=4,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='delete', index=5, number=5,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='custom', index=6, number=6,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=960,
serialized_end=1069,
)
_sym_db.RegisterEnumDescriptor(_SCENARIO_NODE_LIFECYCLE_WORKFLOWS)
_SCENARIO_LINK = _descriptor.Descriptor(
name='Link',
full_name='gym.Scenario.Link',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='gym.Scenario.Link.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='name', full_name='gym.Scenario.Link.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='network', full_name='gym.Scenario.Link.network', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='type', full_name='gym.Scenario.Link.type', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='connection_points', full_name='gym.Scenario.Link.connection_points', index=4,
number=5, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=163,
serialized_end=253,
)
_SCENARIO_NODE_CONNECTIONPOINT = _descriptor.Descriptor(
name='ConnectionPoint',
full_name='gym.Scenario.Node.ConnectionPoint',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='gym.Scenario.Node.ConnectionPoint.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='address', full_name='gym.Scenario.Node.ConnectionPoint.address', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='interface', full_name='gym.Scenario.Node.ConnectionPoint.interface', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='type', full_name='gym.Scenario.Node.ConnectionPoint.type', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=569,
serialized_end=648,
)
_SCENARIO_NODE_LIFECYCLE_PARAMETER = _descriptor.Descriptor(
name='Parameter',
full_name='gym.Scenario.Node.Lifecycle.Parameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='input', full_name='gym.Scenario.Node.Lifecycle.Parameter.input', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='gym.Scenario.Node.Lifecycle.Parameter.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=826,
serialized_end=867,
)
_SCENARIO_NODE_LIFECYCLE_PARAMETERSENTRY = _descriptor.Descriptor(
name='ParametersEntry',
full_name='gym.Scenario.Node.Lifecycle.ParametersEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='gym.Scenario.Node.Lifecycle.ParametersEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='gym.Scenario.Node.Lifecycle.ParametersEntry.value', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'8\001',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=869,
serialized_end=958,
)
_SCENARIO_NODE_LIFECYCLE = _descriptor.Descriptor(
name='Lifecycle',
full_name='gym.Scenario.Node.Lifecycle',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='gym.Scenario.Node.Lifecycle.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='implementation', full_name='gym.Scenario.Node.Lifecycle.implementation', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='parameters', full_name='gym.Scenario.Node.Lifecycle.parameters', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='workflow', full_name='gym.Scenario.Node.Lifecycle.workflow', index=3,
number=4, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[_SCENARIO_NODE_LIFECYCLE_PARAMETER, _SCENARIO_NODE_LIFECYCLE_PARAMETERSENTRY, ],
enum_types=[
_SCENARIO_NODE_LIFECYCLE_WORKFLOWS,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=651,
serialized_end=1069,
)
_SCENARIO_NODE_RELATIONSHIP = _descriptor.Descriptor(
name='Relationship',
full_name='gym.Scenario.Node.Relationship',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='gym.Scenario.Node.Relationship.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='target', full_name='gym.Scenario.Node.Relationship.target', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='type', full_name='gym.Scenario.Node.Relationship.type', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1071,
serialized_end=1129,
)
_SCENARIO_NODE_RESOURCES_CPU = _descriptor.Descriptor(
name='Cpu',
full_name='gym.Scenario.Node.Resources.Cpu',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='cpu_bw', full_name='gym.Scenario.Node.Resources.Cpu.cpu_bw', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='pinning', full_name='gym.Scenario.Node.Resources.Cpu.pinning', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='vcpus', full_name='gym.Scenario.Node.Resources.Cpu.vcpus', index=2,
number=3, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1300,
serialized_end=1353,
)
_SCENARIO_NODE_RESOURCES_MEMORY = _descriptor.Descriptor(
name='Memory',
full_name='gym.Scenario.Node.Resources.Memory',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='size', full_name='gym.Scenario.Node.Resources.Memory.size', index=0,
number=1, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='unit', full_name='gym.Scenario.Node.Resources.Memory.unit', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1355,
serialized_end=1391,
)
_SCENARIO_NODE_RESOURCES_STORAGE = _descriptor.Descriptor(
name='Storage',
full_name='gym.Scenario.Node.Resources.Storage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='size', full_name='gym.Scenario.Node.Resources.Storage.size', index=0,
number=1, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='unit', full_name='gym.Scenario.Node.Resources.Storage.unit', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='volumes', full_name='gym.Scenario.Node.Resources.Storage.volumes', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1393,
serialized_end=1447,
)
_SCENARIO_NODE_RESOURCES = _descriptor.Descriptor(
name='Resources',
full_name='gym.Scenario.Node.Resources',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='cpu', full_name='gym.Scenario.Node.Resources.cpu', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='memory', full_name='gym.Scenario.Node.Resources.memory', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='storage', full_name='gym.Scenario.Node.Resources.storage', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[_SCENARIO_NODE_RESOURCES_CPU, _SCENARIO_NODE_RESOURCES_MEMORY, _SCENARIO_NODE_RESOURCES_STORAGE, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1132,
serialized_end=1447,
)
_SCENARIO_NODE_CONNECTIONPOINTSENTRY = _descriptor.Descriptor(
name='ConnectionPointsEntry',
full_name='gym.Scenario.Node.ConnectionPointsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='gym.Scenario.Node.ConnectionPointsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='gym.Scenario.Node.ConnectionPointsEntry.value', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'8\001',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1449,
serialized_end=1540,
)
_SCENARIO_NODE_LIFECYCLEENTRY = _descriptor.Descriptor(
name='LifecycleEntry',
full_name='gym.Scenario.Node.LifecycleEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='gym.Scenario.Node.LifecycleEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='gym.Scenario.Node.LifecycleEntry.value', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'8\001',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1542,
serialized_end=1620,
)
_SCENARIO_NODE_RELATIONSHIPSENTRY = _descriptor.Descriptor(
name='RelationshipsEntry',
full_name='gym.Scenario.Node.RelationshipsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='gym.Scenario.Node.RelationshipsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='gym.Scenario.Node.RelationshipsEntry.value', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'8\001',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1622,
serialized_end=1707,
)
_SCENARIO_NODE = _descriptor.Descriptor(
name='Node',
full_name='gym.Scenario.Node',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='gym.Scenario.Node.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='image', full_name='gym.Scenario.Node.image', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='format', full_name='gym.Scenario.Node.format', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='type', full_name='gym.Scenario.Node.type', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='role', full_name='gym.Scenario.Node.role', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='resources', full_name='gym.Scenario.Node.resources', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='connection_points', full_name='gym.Scenario.Node.connection_points', index=6,
number=7, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='lifecycle', full_name='gym.Scenario.Node.lifecycle', index=7,
number=8, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='relationships', full_name='gym.Scenario.Node.relationships', index=8,
number=9, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[_SCENARIO_NODE_CONNECTIONPOINT, _SCENARIO_NODE_LIFECYCLE, _SCENARIO_NODE_RELATIONSHIP, _SCENARIO_NODE_RESOURCES, _SCENARIO_NODE_CONNECTIONPOINTSENTRY, _SCENARIO_NODE_LIFECYCLEENTRY, _SCENARIO_NODE_RELATIONSHIPSENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=256,
serialized_end=1707,
)
_SCENARIO_POLICY = _descriptor.Descriptor(
name='Policy',
full_name='gym.Scenario.Policy',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='gym.Scenario.Policy.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='action', full_name='gym.Scenario.Policy.action', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='targets', full_name='gym.Scenario.Policy.targets', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='type', full_name='gym.Scenario.Policy.type', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1709,
serialized_end=1778,
)
_SCENARIO_LINKSENTRY = _descriptor.Descriptor(
name='LinksEntry',
full_name='gym.Scenario.LinksEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='gym.Scenario.LinksEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='gym.Scenario.LinksEntry.value', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'8\001',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1780,
serialized_end=1844,
)
_SCENARIO_NODESENTRY = _descriptor.Descriptor(
name='NodesEntry',
full_name='gym.Scenario.NodesEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='gym.Scenario.NodesEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='gym.Scenario.NodesEntry.value', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'8\001',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1846,
serialized_end=1910,
)
_SCENARIO_POLICIESENTRY = _descriptor.Descriptor(
name='PoliciesEntry',
full_name='gym.Scenario.PoliciesEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='gym.Scenario.PoliciesEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='gym.Scenario.PoliciesEntry.value', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'8\001',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1912,
serialized_end=1981,
)
_SCENARIO = _descriptor.Descriptor(
name='Scenario',
full_name='gym.Scenario',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='links', full_name='gym.Scenario.links', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='nodes', full_name='gym.Scenario.nodes', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='policies', full_name='gym.Scenario.policies', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[_SCENARIO_LINK, _SCENARIO_NODE, _SCENARIO_POLICY, _SCENARIO_LINKSENTRY, _SCENARIO_NODESENTRY, _SCENARIO_POLICIESENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=22,
serialized_end=1981,
)
_VNFBD_EXPERIMENTS = _descriptor.Descriptor(
name='Experiments',
full_name='gym.VnfBd.Experiments',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='tests', full_name='gym.VnfBd.Experiments.tests', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='trials', full_name='gym.VnfBd.Experiments.trials', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2196,
serialized_end=2240,
)
_VNFBD_PROCEEDINGS_ATTRIBUTE = _descriptor.Descriptor(
name='Attribute',
full_name='gym.VnfBd.Proceedings.Attribute',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='gym.VnfBd.Proceedings.Attribute.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='gym.VnfBd.Proceedings.Attribute.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2426,
serialized_end=2466,
)
_VNFBD_PROCEEDINGS_AGENT_PROBER_PARAMETER = _descriptor.Descriptor(
name='Parameter',
full_name='gym.VnfBd.Proceedings.Agent.Prober.Parameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='input', full_name='gym.VnfBd.Proceedings.Agent.Prober.Parameter.input', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='gym.VnfBd.Proceedings.Agent.Prober.Parameter.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=826,
serialized_end=867,
)
_VNFBD_PROCEEDINGS_AGENT_PROBER_SCHED = _descriptor.Descriptor(
name='Sched',
full_name='gym.VnfBd.Proceedings.Agent.Prober.Sched',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='from', full_name='gym.VnfBd.Proceedings.Agent.Prober.Sched.from', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='until', full_name='gym.VnfBd.Proceedings.Agent.Prober.Sched.until', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='duration', full_name='gym.VnfBd.Proceedings.Agent.Prober.Sched.duration', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='interval', full_name='gym.VnfBd.Proceedings.Agent.Prober.Sched.interval', index=3,
number=4, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='repeat', full_name='gym.VnfBd.Proceedings.Agent.Prober.Sched.repeat', index=4,
number=5, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2796,
serialized_end=2884,
)
_VNFBD_PROCEEDINGS_AGENT_PROBER_PARAMETERSENTRY = _descriptor.Descriptor(
name='ParametersEntry',
full_name='gym.VnfBd.Proceedings.Agent.Prober.ParametersEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='gym.VnfBd.Proceedings.Agent.Prober.ParametersEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='gym.VnfBd.Proceedings.Agent.Prober.ParametersEntry.value', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'8\001',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2886,
serialized_end=2982,
)
_VNFBD_PROCEEDINGS_AGENT_PROBER = _descriptor.Descriptor(
name='Prober',
full_name='gym.VnfBd.Proceedings.Agent.Prober',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='gym.VnfBd.Proceedings.Agent.Prober.id', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='instances', full_name='gym.VnfBd.Proceedings.Agent.Prober.instances', index=1,
number=2, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='name', full_name='gym.VnfBd.Proceedings.Agent.Prober.name', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='parameters', full_name='gym.VnfBd.Proceedings.Agent.Prober.parameters', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='sched', full_name='gym.VnfBd.Proceedings.Agent.Prober.sched', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[_VNFBD_PROCEEDINGS_AGENT_PROBER_PARAMETER, _VNFBD_PROCEEDINGS_AGENT_PROBER_SCHED, _VNFBD_PROCEEDINGS_AGENT_PROBER_PARAMETERSENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2567,
serialized_end=2982,
)
_VNFBD_PROCEEDINGS_AGENT_PROBERSENTRY = _descriptor.Descriptor(
name='ProbersEntry',
full_name='gym.VnfBd.Proceedings.Agent.ProbersEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='gym.VnfBd.Proceedings.Agent.ProbersEntry.key', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='gym.VnfBd.Proceedings.Agent.ProbersEntry.value', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'8\001',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2984,
serialized_end=3067,
)
_VNFBD_PROCEEDINGS_AGENT = _descriptor.Descriptor(
name='Agent',
full_name='gym.VnfBd.Proceedings.Agent',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='uuid', full_name='gym.VnfBd.Proceedings.Agent.uuid', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='name', full_name='gym.VnfBd.Proceedings.Agent.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='probers', full_name='gym.VnfBd.Proceedings.Agent.probers', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[_VNFBD_PROCEEDINGS_AGENT_PROBER, _VNFBD_PROCEEDINGS_AGENT_PROBERSENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2469,
serialized_end=3067,
)
_VNFBD_PROCEEDINGS_MONITOR_HOST = _descriptor.Descriptor(
name='Host',
full_name='gym.VnfBd.Proceedings.Monitor.Host',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='node', full_name='gym.VnfBd.Proceedings.Monitor.Host.node', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='setting', full_name='gym.VnfBd.Proceedings.Monitor.Host.setting', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3175,
serialized_end=3212,
)
_VNFBD_PROCEEDINGS_MONITOR_LISTENER_PARAMETER = _descriptor.Descriptor(
name='Parameter',
full_name='gym.VnfBd.Proceedings.Monitor.Listener.Parameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='input', full_name='gym.VnfBd.Proceedings.Monitor.Listener.Parameter.input', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='gym.VnfBd.Proceedings.Monitor.Listener.Parameter.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=826,
serialized_end=867,
)
_VNFBD_PROCEEDINGS_MONITOR_LISTENER_SCHED = _descriptor.Descriptor(
name='Sched',
full_name='gym.VnfBd.Proceedings.Monitor.Listener.Sched',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='from', full_name='gym.VnfBd.Proceedings.Monitor.Listener.Sched.from', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='until', full_name='gym.VnfBd.Proceedings.Monitor.Listener.Sched.until', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='duration', full_name='gym.VnfBd.Proceedings.Monitor.Listener.Sched.duration', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='interval', full_name='gym.VnfBd.Proceedings.Monitor.Listener.Sched.interval', index=3,
number=4, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='repeat', full_name='gym.VnfBd.Proceedings.Monitor.Listener.Sched.repeat', index=4,
number=5, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2796,
serialized_end=2884,
)
_VNFBD_PROCEEDINGS_MONITOR_LISTENER_PARAMETERSENTRY = _descriptor.Descriptor(
name='ParametersEntry',
full_name='gym.VnfBd.Proceedings.Monitor.Listener.ParametersEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='gym.VnfBd.Proceedings.Monitor.Listener.ParametersEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='gym.VnfBd.Proceedings.Monitor.Listener.ParametersEntry.value', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'8\001',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3544,
serialized_end=3644,
)
_VNFBD_PROCEEDINGS_MONITOR_LISTENER = _descriptor.Descriptor(
name='Listener',
full_name='gym.VnfBd.Proceedings.Monitor.Listener',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='gym.VnfBd.Proceedings.Monitor.Listener.id', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='instances', full_name='gym.VnfBd.Proceedings.Monitor.Listener.instances', index=1,
number=2, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='name', full_name='gym.VnfBd.Proceedings.Monitor.Listener.name', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='parameters', full_name='gym.VnfBd.Proceedings.Monitor.Listener.parameters', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='sched', full_name='gym.VnfBd.Proceedings.Monitor.Listener.sched', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[_VNFBD_PROCEEDINGS_MONITOR_LISTENER_PARAMETER, _VNFBD_PROCEEDINGS_MONITOR_LISTENER_SCHED, _VNFBD_PROCEEDINGS_MONITOR_LISTENER_PARAMETERSENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3215,
serialized_end=3644,
)
_VNFBD_PROCEEDINGS_MONITOR_LISTENERSENTRY = _descriptor.Descriptor(
name='ListenersEntry',
full_name='gym.VnfBd.Proceedings.Monitor.ListenersEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='gym.VnfBd.Proceedings.Monitor.ListenersEntry.key', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='gym.VnfBd.Proceedings.Monitor.ListenersEntry.value', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'8\001',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3646,
serialized_end=3735,
)
_VNFBD_PROCEEDINGS_MONITOR = _descriptor.Descriptor(
name='Monitor',
full_name='gym.VnfBd.Proceedings.Monitor',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='uuid', full_name='gym.VnfBd.Proceedings.Monitor.uuid', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='name', full_name='gym.VnfBd.Proceedings.Monitor.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='listeners', full_name='gym.VnfBd.Proceedings.Monitor.listeners', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[_VNFBD_PROCEEDINGS_MONITOR_HOST, _VNFBD_PROCEEDINGS_MONITOR_LISTENER, _VNFBD_PROCEEDINGS_MONITOR_LISTENERSENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3070,
serialized_end=3735,
)
_VNFBD_PROCEEDINGS_ATTRIBUTESENTRY = _descriptor.Descriptor(
name='AttributesEntry',
full_name='gym.VnfBd.Proceedings.AttributesEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='gym.VnfBd.Proceedings.AttributesEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='gym.VnfBd.Proceedings.AttributesEntry.value', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'8\001',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3737,
serialized_end=3820,
)
_VNFBD_PROCEEDINGS_AGENTSENTRY = _descriptor.Descriptor(
name='AgentsEntry',
full_name='gym.VnfBd.Proceedings.AgentsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='gym.VnfBd.Proceedings.AgentsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='gym.VnfBd.Proceedings.AgentsEntry.value', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'8\001',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3822,
serialized_end=3897,
)
_VNFBD_PROCEEDINGS_MONITORSENTRY = _descriptor.Descriptor(
name='MonitorsEntry',
full_name='gym.VnfBd.Proceedings.MonitorsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='gym.VnfBd.Proceedings.MonitorsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='gym.VnfBd.Proceedings.MonitorsEntry.value', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'8\001',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3899,
serialized_end=3978,
)
_VNFBD_PROCEEDINGS = _descriptor.Descriptor(
name='Proceedings',
full_name='gym.VnfBd.Proceedings',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='attributes', full_name='gym.VnfBd.Proceedings.attributes', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='agents', full_name='gym.VnfBd.Proceedings.agents', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='monitors', full_name='gym.VnfBd.Proceedings.monitors', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[_VNFBD_PROCEEDINGS_ATTRIBUTE, _VNFBD_PROCEEDINGS_AGENT, _VNFBD_PROCEEDINGS_MONITOR, _VNFBD_PROCEEDINGS_ATTRIBUTESENTRY, _VNFBD_PROCEEDINGS_AGENTSENTRY, _VNFBD_PROCEEDINGS_MONITORSENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2243,
serialized_end=3978,
)
_VNFBD = _descriptor.Descriptor(
name='VnfBd',
full_name='gym.VnfBd',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='gym.VnfBd.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='name', full_name='gym.VnfBd.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='version', full_name='gym.VnfBd.version', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='author', full_name='gym.VnfBd.author', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='description', full_name='gym.VnfBd.description', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='experiments', full_name='gym.VnfBd.experiments', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='scenario', full_name='gym.VnfBd.scenario', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='proceedings', full_name='gym.VnfBd.proceedings', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[_VNFBD_EXPERIMENTS, _VNFBD_PROCEEDINGS, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1984,
serialized_end=3978,
)
_SCENARIO_LINK.containing_type = _SCENARIO
_SCENARIO_NODE_CONNECTIONPOINT.containing_type = _SCENARIO_NODE
_SCENARIO_NODE_LIFECYCLE_PARAMETER.containing_type = _SCENARIO_NODE_LIFECYCLE
_SCENARIO_NODE_LIFECYCLE_PARAMETERSENTRY.fields_by_name['value'].message_type = _SCENARIO_NODE_LIFECYCLE_PARAMETER
_SCENARIO_NODE_LIFECYCLE_PARAMETERSENTRY.containing_type = _SCENARIO_NODE_LIFECYCLE
_SCENARIO_NODE_LIFECYCLE.fields_by_name['parameters'].message_type = _SCENARIO_NODE_LIFECYCLE_PARAMETERSENTRY
_SCENARIO_NODE_LIFECYCLE.fields_by_name['workflow'].enum_type = _SCENARIO_NODE_LIFECYCLE_WORKFLOWS
_SCENARIO_NODE_LIFECYCLE.containing_type = _SCENARIO_NODE
_SCENARIO_NODE_LIFECYCLE_WORKFLOWS.containing_type = _SCENARIO_NODE_LIFECYCLE
_SCENARIO_NODE_RELATIONSHIP.containing_type = _SCENARIO_NODE
_SCENARIO_NODE_RESOURCES_CPU.containing_type = _SCENARIO_NODE_RESOURCES
_SCENARIO_NODE_RESOURCES_MEMORY.containing_type = _SCENARIO_NODE_RESOURCES
_SCENARIO_NODE_RESOURCES_STORAGE.containing_type = _SCENARIO_NODE_RESOURCES
_SCENARIO_NODE_RESOURCES.fields_by_name['cpu'].message_type = _SCENARIO_NODE_RESOURCES_CPU
_SCENARIO_NODE_RESOURCES.fields_by_name['memory'].message_type = _SCENARIO_NODE_RESOURCES_MEMORY
_SCENARIO_NODE_RESOURCES.fields_by_name['storage'].message_type = _SCENARIO_NODE_RESOURCES_STORAGE
_SCENARIO_NODE_RESOURCES.containing_type = _SCENARIO_NODE
_SCENARIO_NODE_CONNECTIONPOINTSENTRY.fields_by_name['value'].message_type = _SCENARIO_NODE_CONNECTIONPOINT
_SCENARIO_NODE_CONNECTIONPOINTSENTRY.containing_type = _SCENARIO_NODE
_SCENARIO_NODE_LIFECYCLEENTRY.fields_by_name['value'].message_type = _SCENARIO_NODE_LIFECYCLE
_SCENARIO_NODE_LIFECYCLEENTRY.containing_type = _SCENARIO_NODE
_SCENARIO_NODE_RELATIONSHIPSENTRY.fields_by_name['value'].message_type = _SCENARIO_NODE_RELATIONSHIP
_SCENARIO_NODE_RELATIONSHIPSENTRY.containing_type = _SCENARIO_NODE
_SCENARIO_NODE.fields_by_name['resources'].message_type = _SCENARIO_NODE_RESOURCES
_SCENARIO_NODE.fields_by_name['connection_points'].message_type = _SCENARIO_NODE_CONNECTIONPOINTSENTRY
_SCENARIO_NODE.fields_by_name['lifecycle'].message_type = _SCENARIO_NODE_LIFECYCLEENTRY
_SCENARIO_NODE.fields_by_name['relationships'].message_type = _SCENARIO_NODE_RELATIONSHIPSENTRY
_SCENARIO_NODE.containing_type = _SCENARIO
_SCENARIO_POLICY.containing_type = _SCENARIO
_SCENARIO_LINKSENTRY.fields_by_name['value'].message_type = _SCENARIO_LINK
_SCENARIO_LINKSENTRY.containing_type = _SCENARIO
_SCENARIO_NODESENTRY.fields_by_name['value'].message_type = _SCENARIO_NODE
_SCENARIO_NODESENTRY.containing_type = _SCENARIO
_SCENARIO_POLICIESENTRY.fields_by_name['value'].message_type = _SCENARIO_POLICY
_SCENARIO_POLICIESENTRY.containing_type = _SCENARIO
_SCENARIO.fields_by_name['links'].message_type = _SCENARIO_LINKSENTRY
_SCENARIO.fields_by_name['nodes'].message_type = _SCENARIO_NODESENTRY
_SCENARIO.fields_by_name['policies'].message_type = _SCENARIO_POLICIESENTRY
_VNFBD_EXPERIMENTS.containing_type = _VNFBD
_VNFBD_PROCEEDINGS_ATTRIBUTE.containing_type = _VNFBD_PROCEEDINGS
_VNFBD_PROCEEDINGS_AGENT_PROBER_PARAMETER.containing_type = _VNFBD_PROCEEDINGS_AGENT_PROBER
_VNFBD_PROCEEDINGS_AGENT_PROBER_SCHED.containing_type = _VNFBD_PROCEEDINGS_AGENT_PROBER
_VNFBD_PROCEEDINGS_AGENT_PROBER_PARAMETERSENTRY.fields_by_name['value'].message_type = _VNFBD_PROCEEDINGS_AGENT_PROBER_PARAMETER
_VNFBD_PROCEEDINGS_AGENT_PROBER_PARAMETERSENTRY.containing_type = _VNFBD_PROCEEDINGS_AGENT_PROBER
_VNFBD_PROCEEDINGS_AGENT_PROBER.fields_by_name['parameters'].message_type = _VNFBD_PROCEEDINGS_AGENT_PROBER_PARAMETERSENTRY
_VNFBD_PROCEEDINGS_AGENT_PROBER.fields_by_name['sched'].message_type = _VNFBD_PROCEEDINGS_AGENT_PROBER_SCHED
_VNFBD_PROCEEDINGS_AGENT_PROBER.containing_type = _VNFBD_PROCEEDINGS_AGENT
_VNFBD_PROCEEDINGS_AGENT_PROBERSENTRY.fields_by_name['value'].message_type = _VNFBD_PROCEEDINGS_AGENT_PROBER
_VNFBD_PROCEEDINGS_AGENT_PROBERSENTRY.containing_type = _VNFBD_PROCEEDINGS_AGENT
_VNFBD_PROCEEDINGS_AGENT.fields_by_name['probers'].message_type = _VNFBD_PROCEEDINGS_AGENT_PROBERSENTRY
_VNFBD_PROCEEDINGS_AGENT.containing_type = _VNFBD_PROCEEDINGS
_VNFBD_PROCEEDINGS_MONITOR_HOST.containing_type = _VNFBD_PROCEEDINGS_MONITOR
_VNFBD_PROCEEDINGS_MONITOR_LISTENER_PARAMETER.containing_type = _VNFBD_PROCEEDINGS_MONITOR_LISTENER
_VNFBD_PROCEEDINGS_MONITOR_LISTENER_SCHED.containing_type = _VNFBD_PROCEEDINGS_MONITOR_LISTENER
_VNFBD_PROCEEDINGS_MONITOR_LISTENER_PARAMETERSENTRY.fields_by_name['value'].message_type = _VNFBD_PROCEEDINGS_MONITOR_LISTENER_PARAMETER
_VNFBD_PROCEEDINGS_MONITOR_LISTENER_PARAMETERSENTRY.containing_type = _VNFBD_PROCEEDINGS_MONITOR_LISTENER
_VNFBD_PROCEEDINGS_MONITOR_LISTENER.fields_by_name['parameters'].message_type = _VNFBD_PROCEEDINGS_MONITOR_LISTENER_PARAMETERSENTRY
_VNFBD_PROCEEDINGS_MONITOR_LISTENER.fields_by_name['sched'].message_type = _VNFBD_PROCEEDINGS_MONITOR_LISTENER_SCHED
_VNFBD_PROCEEDINGS_MONITOR_LISTENER.containing_type = _VNFBD_PROCEEDINGS_MONITOR
_VNFBD_PROCEEDINGS_MONITOR_LISTENERSENTRY.fields_by_name['value'].message_type = _VNFBD_PROCEEDINGS_MONITOR_LISTENER
_VNFBD_PROCEEDINGS_MONITOR_LISTENERSENTRY.containing_type = _VNFBD_PROCEEDINGS_MONITOR
_VNFBD_PROCEEDINGS_MONITOR.fields_by_name['listeners'].message_type = _VNFBD_PROCEEDINGS_MONITOR_LISTENERSENTRY
_VNFBD_PROCEEDINGS_MONITOR.containing_type = _VNFBD_PROCEEDINGS
_VNFBD_PROCEEDINGS_ATTRIBUTESENTRY.fields_by_name['value'].message_type = _VNFBD_PROCEEDINGS_ATTRIBUTE
_VNFBD_PROCEEDINGS_ATTRIBUTESENTRY.containing_type = _VNFBD_PROCEEDINGS
_VNFBD_PROCEEDINGS_AGENTSENTRY.fields_by_name['value'].message_type = _VNFBD_PROCEEDINGS_AGENT
_VNFBD_PROCEEDINGS_AGENTSENTRY.containing_type = _VNFBD_PROCEEDINGS
_VNFBD_PROCEEDINGS_MONITORSENTRY.fields_by_name['value'].message_type = _VNFBD_PROCEEDINGS_MONITOR
_VNFBD_PROCEEDINGS_MONITORSENTRY.containing_type = _VNFBD_PROCEEDINGS
_VNFBD_PROCEEDINGS.fields_by_name['attributes'].message_type = _VNFBD_PROCEEDINGS_ATTRIBUTESENTRY
_VNFBD_PROCEEDINGS.fields_by_name['agents'].message_type = _VNFBD_PROCEEDINGS_AGENTSENTRY
_VNFBD_PROCEEDINGS.fields_by_name['monitors'].message_type = _VNFBD_PROCEEDINGS_MONITORSENTRY
_VNFBD_PROCEEDINGS.containing_type = _VNFBD
_VNFBD.fields_by_name['experiments'].message_type = _VNFBD_EXPERIMENTS
_VNFBD.fields_by_name['scenario'].message_type = _SCENARIO
_VNFBD.fields_by_name['proceedings'].message_type = _VNFBD_PROCEEDINGS
DESCRIPTOR.message_types_by_name['Scenario'] = _SCENARIO
DESCRIPTOR.message_types_by_name['VnfBd'] = _VNFBD
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Scenario = _reflection.GeneratedProtocolMessageType('Scenario', (_message.Message,), {
'Link' : _reflection.GeneratedProtocolMessageType('Link', (_message.Message,), {
'DESCRIPTOR' : _SCENARIO_LINK,
'__module__' : 'vnf_bd_pb2'
# @@protoc_insertion_point(class_scope:gym.Scenario.Link)
})
,
'Node' : _reflection.GeneratedProtocolMessageType('Node', (_message.Message,), {
'ConnectionPoint' : _reflection.GeneratedProtocolMessageType('ConnectionPoint', (_message.Message,), {
'DESCRIPTOR' : _SCENARIO_NODE_CONNECTIONPOINT,
'__module__' : 'vnf_bd_pb2'
# @@protoc_insertion_point(class_scope:gym.Scenario.Node.ConnectionPoint)
})
,
'Lifecycle' : _reflection.GeneratedProtocolMessageType('Lifecycle', (_message.Message,), {
'Parameter' : _reflection.GeneratedProtocolMessageType('Parameter', (_message.Message,), {
'DESCRIPTOR' : _SCENARIO_NODE_LIFECYCLE_PARAMETER,
'__module__' : 'vnf_bd_pb2'
# @@protoc_insertion_point(class_scope:gym.Scenario.Node.Lifecycle.Parameter)
})
,
'ParametersEntry' : _reflection.GeneratedProtocolMessageType('ParametersEntry', (_message.Message,), {
'DESCRIPTOR' : _SCENARIO_NODE_LIFECYCLE_PARAMETERSENTRY,
'__module__' : 'vnf_bd_pb2'
# @@protoc_insertion_point(class_scope:gym.Scenario.Node.Lifecycle.ParametersEntry)
})
,
'DESCRIPTOR' : _SCENARIO_NODE_LIFECYCLE,
'__module__' : 'vnf_bd_pb2'
# @@protoc_insertion_point(class_scope:gym.Scenario.Node.Lifecycle)
})
,
'Relationship' : _reflection.GeneratedProtocolMessageType('Relationship', (_message.Message,), {
'DESCRIPTOR' : _SCENARIO_NODE_RELATIONSHIP,
'__module__' : 'vnf_bd_pb2'
# @@protoc_insertion_point(class_scope:gym.Scenario.Node.Relationship)
})
,
'Resources' : _reflection.GeneratedProtocolMessageType('Resources', (_message.Message,), {
'Cpu' : _reflection.GeneratedProtocolMessageType('Cpu', (_message.Message,), {
'DESCRIPTOR' : _SCENARIO_NODE_RESOURCES_CPU,
'__module__' : 'vnf_bd_pb2'
# @@protoc_insertion_point(class_scope:gym.Scenario.Node.Resources.Cpu)
})
,
'Memory' : _reflection.GeneratedProtocolMessageType('Memory', (_message.Message,), {
'DESCRIPTOR' : _SCENARIO_NODE_RESOURCES_MEMORY,
'__module__' : 'vnf_bd_pb2'
# @@protoc_insertion_point(class_scope:gym.Scenario.Node.Resources.Memory)
})
,
'Storage' : _reflection.GeneratedProtocolMessageType('Storage', (_message.Message,), {
'DESCRIPTOR' : _SCENARIO_NODE_RESOURCES_STORAGE,
'__module__' : 'vnf_bd_pb2'
# @@protoc_insertion_point(class_scope:gym.Scenario.Node.Resources.Storage)
})
,
'DESCRIPTOR' : _SCENARIO_NODE_RESOURCES,
'__module__' : 'vnf_bd_pb2'
# @@protoc_insertion_point(class_scope:gym.Scenario.Node.Resources)
})
,
'ConnectionPointsEntry' : _reflection.GeneratedProtocolMessageType('ConnectionPointsEntry', (_message.Message,), {
'DESCRIPTOR' : _SCENARIO_NODE_CONNECTIONPOINTSENTRY,
'__module__' : 'vnf_bd_pb2'
# @@protoc_insertion_point(class_scope:gym.Scenario.Node.ConnectionPointsEntry)
})
,
'LifecycleEntry' : _reflection.GeneratedProtocolMessageType('LifecycleEntry', (_message.Message,), {
'DESCRIPTOR' : _SCENARIO_NODE_LIFECYCLEENTRY,
'__module__' : 'vnf_bd_pb2'
# @@protoc_insertion_point(class_scope:gym.Scenario.Node.LifecycleEntry)
})
,
'RelationshipsEntry' : _reflection.GeneratedProtocolMessageType('RelationshipsEntry', (_message.Message,), {
'DESCRIPTOR' : _SCENARIO_NODE_RELATIONSHIPSENTRY,
'__module__' : 'vnf_bd_pb2'
# @@protoc_insertion_point(class_scope:gym.Scenario.Node.RelationshipsEntry)
})
,
'DESCRIPTOR' : _SCENARIO_NODE,
'__module__' : 'vnf_bd_pb2'
# @@protoc_insertion_point(class_scope:gym.Scenario.Node)
})
,
'Policy' : _reflection.GeneratedProtocolMessageType('Policy', (_message.Message,), {
'DESCRIPTOR' : _SCENARIO_POLICY,
'__module__' : 'vnf_bd_pb2'
# @@protoc_insertion_point(class_scope:gym.Scenario.Policy)
})
,
'LinksEntry' : _reflection.GeneratedProtocolMessageType('LinksEntry', (_message.Message,), {
'DESCRIPTOR' : _SCENARIO_LINKSENTRY,
'__module__' : 'vnf_bd_pb2'
# @@protoc_insertion_point(class_scope:gym.Scenario.LinksEntry)
})
,
'NodesEntry' : _reflection.GeneratedProtocolMessageType('NodesEntry', (_message.Message,), {
'DESCRIPTOR' : _SCENARIO_NODESENTRY,
'__module__' : 'vnf_bd_pb2'
# @@protoc_insertion_point(class_scope:gym.Scenario.NodesEntry)
})
,
'PoliciesEntry' : _reflection.GeneratedProtocolMessageType('PoliciesEntry', (_message.Message,), {
'DESCRIPTOR' : _SCENARIO_POLICIESENTRY,
'__module__' : 'vnf_bd_pb2'
# @@protoc_insertion_point(class_scope:gym.Scenario.PoliciesEntry)
})
,
'DESCRIPTOR' : _SCENARIO,
'__module__' : 'vnf_bd_pb2'
# @@protoc_insertion_point(class_scope:gym.Scenario)
})
_sym_db.RegisterMessage(Scenario)
_sym_db.RegisterMessage(Scenario.Link)
_sym_db.RegisterMessage(Scenario.Node)
_sym_db.RegisterMessage(Scenario.Node.ConnectionPoint)
_sym_db.RegisterMessage(Scenario.Node.Lifecycle)
_sym_db.RegisterMessage(Scenario.Node.Lifecycle.Parameter)
_sym_db.RegisterMessage(Scenario.Node.Lifecycle.ParametersEntry)
_sym_db.RegisterMessage(Scenario.Node.Relationship)
_sym_db.RegisterMessage(Scenario.Node.Resources)
_sym_db.RegisterMessage(Scenario.Node.Resources.Cpu)
_sym_db.RegisterMessage(Scenario.Node.Resources.Memory)
_sym_db.RegisterMessage(Scenario.Node.Resources.Storage)
_sym_db.RegisterMessage(Scenario.Node.ConnectionPointsEntry)
_sym_db.RegisterMessage(Scenario.Node.LifecycleEntry)
_sym_db.RegisterMessage(Scenario.Node.RelationshipsEntry)
_sym_db.RegisterMessage(Scenario.Policy)
_sym_db.RegisterMessage(Scenario.LinksEntry)
_sym_db.RegisterMessage(Scenario.NodesEntry)
_sym_db.RegisterMessage(Scenario.PoliciesEntry)
VnfBd = _reflection.GeneratedProtocolMessageType('VnfBd', (_message.Message,), {
'Experiments' : _reflection.GeneratedProtocolMessageType('Experiments', (_message.Message,), {
'DESCRIPTOR' : _VNFBD_EXPERIMENTS,
'__module__' : 'vnf_bd_pb2'
# @@protoc_insertion_point(class_scope:gym.VnfBd.Experiments)
})
,
'Proceedings' : _reflection.GeneratedProtocolMessageType('Proceedings', (_message.Message,), {
'Attribute' : _reflection.GeneratedProtocolMessageType('Attribute', (_message.Message,), {
'DESCRIPTOR' : _VNFBD_PROCEEDINGS_ATTRIBUTE,
'__module__' : 'vnf_bd_pb2'
# @@protoc_insertion_point(class_scope:gym.VnfBd.Proceedings.Attribute)
})
,
'Agent' : _reflection.GeneratedProtocolMessageType('Agent', (_message.Message,), {
'Prober' : _reflection.GeneratedProtocolMessageType('Prober', (_message.Message,), {
'Parameter' : _reflection.GeneratedProtocolMessageType('Parameter', (_message.Message,), {
'DESCRIPTOR' : _VNFBD_PROCEEDINGS_AGENT_PROBER_PARAMETER,
'__module__' : 'vnf_bd_pb2'
# @@protoc_insertion_point(class_scope:gym.VnfBd.Proceedings.Agent.Prober.Parameter)
})
,
'Sched' : _reflection.GeneratedProtocolMessageType('Sched', (_message.Message,), {
'DESCRIPTOR' : _VNFBD_PROCEEDINGS_AGENT_PROBER_SCHED,
'__module__' : 'vnf_bd_pb2'
# @@protoc_insertion_point(class_scope:gym.VnfBd.Proceedings.Agent.Prober.Sched)
})
,
'ParametersEntry' : _reflection.GeneratedProtocolMessageType('ParametersEntry', (_message.Message,), {
'DESCRIPTOR' : _VNFBD_PROCEEDINGS_AGENT_PROBER_PARAMETERSENTRY,
'__module__' : 'vnf_bd_pb2'
# @@protoc_insertion_point(class_scope:gym.VnfBd.Proceedings.Agent.Prober.ParametersEntry)
})
,
'DESCRIPTOR' : _VNFBD_PROCEEDINGS_AGENT_PROBER,
'__module__' : 'vnf_bd_pb2'
# @@protoc_insertion_point(class_scope:gym.VnfBd.Proceedings.Agent.Prober)
})
,
'ProbersEntry' : _reflection.GeneratedProtocolMessageType('ProbersEntry', (_message.Message,), {
'DESCRIPTOR' : _VNFBD_PROCEEDINGS_AGENT_PROBERSENTRY,
'__module__' : 'vnf_bd_pb2'
# @@protoc_insertion_point(class_scope:gym.VnfBd.Proceedings.Agent.ProbersEntry)
})
,
'DESCRIPTOR' : _VNFBD_PROCEEDINGS_AGENT,
'__module__' : 'vnf_bd_pb2'
# @@protoc_insertion_point(class_scope:gym.VnfBd.Proceedings.Agent)
})
,
'Monitor' : _reflection.GeneratedProtocolMessageType('Monitor', (_message.Message,), {
'Host' : _reflection.GeneratedProtocolMessageType('Host', (_message.Message,), {
'DESCRIPTOR' : _VNFBD_PROCEEDINGS_MONITOR_HOST,
'__module__' : 'vnf_bd_pb2'
# @@protoc_insertion_point(class_scope:gym.VnfBd.Proceedings.Monitor.Host)
})
,
'Listener' : _reflection.GeneratedProtocolMessageType('Listener', (_message.Message,), {
'Parameter' : _reflection.GeneratedProtocolMessageType('Parameter', (_message.Message,), {
'DESCRIPTOR' : _VNFBD_PROCEEDINGS_MONITOR_LISTENER_PARAMETER,
'__module__' : 'vnf_bd_pb2'
# @@protoc_insertion_point(class_scope:gym.VnfBd.Proceedings.Monitor.Listener.Parameter)
})
,
'Sched' : _reflection.GeneratedProtocolMessageType('Sched', (_message.Message,), {
'DESCRIPTOR' : _VNFBD_PROCEEDINGS_MONITOR_LISTENER_SCHED,
'__module__' : 'vnf_bd_pb2'
# @@protoc_insertion_point(class_scope:gym.VnfBd.Proceedings.Monitor.Listener.Sched)
})
,
'ParametersEntry' : _reflection.GeneratedProtocolMessageType('ParametersEntry', (_message.Message,), {
'DESCRIPTOR' : _VNFBD_PROCEEDINGS_MONITOR_LISTENER_PARAMETERSENTRY,
'__module__' : 'vnf_bd_pb2'
# @@protoc_insertion_point(class_scope:gym.VnfBd.Proceedings.Monitor.Listener.ParametersEntry)
})
,
'DESCRIPTOR' : _VNFBD_PROCEEDINGS_MONITOR_LISTENER,
'__module__' : 'vnf_bd_pb2'
# @@protoc_insertion_point(class_scope:gym.VnfBd.Proceedings.Monitor.Listener)
})
,
'ListenersEntry' : _reflection.GeneratedProtocolMessageType('ListenersEntry', (_message.Message,), {
'DESCRIPTOR' : _VNFBD_PROCEEDINGS_MONITOR_LISTENERSENTRY,
'__module__' : 'vnf_bd_pb2'
# @@protoc_insertion_point(class_scope:gym.VnfBd.Proceedings.Monitor.ListenersEntry)
})
,
'DESCRIPTOR' : _VNFBD_PROCEEDINGS_MONITOR,
'__module__' : 'vnf_bd_pb2'
# @@protoc_insertion_point(class_scope:gym.VnfBd.Proceedings.Monitor)
})
,
'AttributesEntry' : _reflection.GeneratedProtocolMessageType('AttributesEntry', (_message.Message,), {
'DESCRIPTOR' : _VNFBD_PROCEEDINGS_ATTRIBUTESENTRY,
'__module__' : 'vnf_bd_pb2'
# @@protoc_insertion_point(class_scope:gym.VnfBd.Proceedings.AttributesEntry)
})
,
'AgentsEntry' : _reflection.GeneratedProtocolMessageType('AgentsEntry', (_message.Message,), {
'DESCRIPTOR' : _VNFBD_PROCEEDINGS_AGENTSENTRY,
'__module__' : 'vnf_bd_pb2'
# @@protoc_insertion_point(class_scope:gym.VnfBd.Proceedings.AgentsEntry)
})
,
'MonitorsEntry' : _reflection.GeneratedProtocolMessageType('MonitorsEntry', (_message.Message,), {
'DESCRIPTOR' : _VNFBD_PROCEEDINGS_MONITORSENTRY,
'__module__' : 'vnf_bd_pb2'
# @@protoc_insertion_point(class_scope:gym.VnfBd.Proceedings.MonitorsEntry)
})
,
'DESCRIPTOR' : _VNFBD_PROCEEDINGS,
'__module__' : 'vnf_bd_pb2'
# @@protoc_insertion_point(class_scope:gym.VnfBd.Proceedings)
})
,
'DESCRIPTOR' : _VNFBD,
'__module__' : 'vnf_bd_pb2'
# @@protoc_insertion_point(class_scope:gym.VnfBd)
})
_sym_db.RegisterMessage(VnfBd)
_sym_db.RegisterMessage(VnfBd.Experiments)
_sym_db.RegisterMessage(VnfBd.Proceedings)
_sym_db.RegisterMessage(VnfBd.Proceedings.Attribute)
_sym_db.RegisterMessage(VnfBd.Proceedings.Agent)
_sym_db.RegisterMessage(VnfBd.Proceedings.Agent.Prober)
_sym_db.RegisterMessage(VnfBd.Proceedings.Agent.Prober.Parameter)
_sym_db.RegisterMessage(VnfBd.Proceedings.Agent.Prober.Sched)
_sym_db.RegisterMessage(VnfBd.Proceedings.Agent.Prober.ParametersEntry)
_sym_db.RegisterMessage(VnfBd.Proceedings.Agent.ProbersEntry)
_sym_db.RegisterMessage(VnfBd.Proceedings.Monitor)
_sym_db.RegisterMessage(VnfBd.Proceedings.Monitor.Host)
_sym_db.RegisterMessage(VnfBd.Proceedings.Monitor.Listener)
_sym_db.RegisterMessage(VnfBd.Proceedings.Monitor.Listener.Parameter)
_sym_db.RegisterMessage(VnfBd.Proceedings.Monitor.Listener.Sched)
_sym_db.RegisterMessage(VnfBd.Proceedings.Monitor.Listener.ParametersEntry)
_sym_db.RegisterMessage(VnfBd.Proceedings.Monitor.ListenersEntry)
_sym_db.RegisterMessage(VnfBd.Proceedings.AttributesEntry)
_sym_db.RegisterMessage(VnfBd.Proceedings.AgentsEntry)
_sym_db.RegisterMessage(VnfBd.Proceedings.MonitorsEntry)
_SCENARIO_NODE_LIFECYCLE_PARAMETERSENTRY._options = None
_SCENARIO_NODE_CONNECTIONPOINTSENTRY._options = None
_SCENARIO_NODE_LIFECYCLEENTRY._options = None
_SCENARIO_NODE_RELATIONSHIPSENTRY._options = None
_SCENARIO_LINKSENTRY._options = None
_SCENARIO_NODESENTRY._options = None
_SCENARIO_POLICIESENTRY._options = None
_VNFBD_PROCEEDINGS_AGENT_PROBER_PARAMETERSENTRY._options = None
_VNFBD_PROCEEDINGS_AGENT_PROBERSENTRY._options = None
_VNFBD_PROCEEDINGS_MONITOR_LISTENER_PARAMETERSENTRY._options = None
_VNFBD_PROCEEDINGS_MONITOR_LISTENERSENTRY._options = None
_VNFBD_PROCEEDINGS_ATTRIBUTESENTRY._options = None
_VNFBD_PROCEEDINGS_AGENTSENTRY._options = None
_VNFBD_PROCEEDINGS_MONITORSENTRY._options = None
# @@protoc_insertion_point(module_scope)
| 44.161804
| 7,181
| 0.751386
| 12,560
| 99,894
| 5.634156
| 0.032086
| 0.046011
| 0.068466
| 0.064099
| 0.854476
| 0.805017
| 0.757889
| 0.727846
| 0.70166
| 0.685042
| 0
| 0.034046
| 0.125843
| 99,894
| 2,261
| 7,182
| 44.181336
| 0.776336
| 0.029501
| 0
| 0.716235
| 1
| 0.000941
| 0.167344
| 0.123207
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.001882
| 0
| 0.001882
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
76772a7b638b0274112781a14a649b9fa820e40b
| 6,583
|
py
|
Python
|
tests/sortedsets_tests.py
|
gmr/tredis
|
2e91c6a58a35460be0525c51ac6a98fde3b506ad
|
[
"BSD-3-Clause"
] | 22
|
2015-11-16T18:24:23.000Z
|
2019-01-22T06:41:51.000Z
|
tests/sortedsets_tests.py
|
gmr/tredis
|
2e91c6a58a35460be0525c51ac6a98fde3b506ad
|
[
"BSD-3-Clause"
] | 8
|
2016-01-26T21:55:15.000Z
|
2020-11-17T18:00:13.000Z
|
tests/sortedsets_tests.py
|
gmr/tredis
|
2e91c6a58a35460be0525c51ac6a98fde3b506ad
|
[
"BSD-3-Clause"
] | 9
|
2015-11-28T19:32:14.000Z
|
2020-10-19T06:47:26.000Z
|
import mock
from tornado import testing
from tredis import exceptions
from . import base
class SortedSetTests(base.AsyncTestCase):
@testing.gen_test
def test_zadd_single(self):
key, value = self.uuid4(2)
result = yield self.client.zadd(key, '1', value)
self.assertEqual(result, 1)
@testing.gen_test
def test_zadd_multiple(self):
key, value1, value2, value3 = self.uuid4(4)
result = yield self.client.zadd(key, '1', value1, '2', value2,
'3', value3)
self.assertEqual(result, 3)
@testing.gen_test
def test_zadd_dict(self):
key, value1, value2, value3 = self.uuid4(4)
result = yield self.client.zadd(key, {'1': value1, '2': value2,
'3': value3})
self.assertEqual(result, 3)
@testing.gen_test
def test_zadd_multiple_dupe(self):
key, value1, value2, value3 = self.uuid4(4)
result = yield self.client.zadd(key, '1', value1, '2', value2,
'3', value3, '4', value3)
self.assertEqual(result, 3)
@testing.gen_test
def test_zadd_ch(self):
key, value1, value2, value3 = self.uuid4(4)
result = yield self.client.zadd(key, '1', value1, '2', value2)
self.assertEqual(result, 2)
result = yield self.client.zadd(key, '2', value1, '3', value2,
'4', value3, ch=True)
self.assertEqual(result, 3)
@testing.gen_test
def test_zadd_xx(self):
key, value1, value2, value3 = self.uuid4(4)
result = yield self.client.zadd(key, '1', value1, '2', value2)
self.assertEqual(result, 2)
result = yield self.client.zadd(key, '2', value1, '3', value2,
'4', value3, xx=True)
self.assertEqual(result, 0)
@testing.gen_test
def test_zadd_nx(self):
key, value1, value2, value3 = self.uuid4(4)
result = yield self.client.zadd(key, '1', value1, '2', value2)
self.assertEqual(result, 2)
result = yield self.client.zadd(key, '2', value1, '3', value2,
'4', value3, nx=True, ch=True)
self.assertEqual(result, 1)
@testing.gen_test
def test_zadd_incr(self):
key, value = self.uuid4(2)
result = yield self.client.zadd(key, '1', value)
self.assertEqual(result, 1)
result = yield self.client.zadd(key, '10', value, incr=True)
self.assertEqual(result, b'11')
@testing.gen_test
def test_zadd_with_error(self):
key, score, value = self.uuid4(3)
self._execute_result = exceptions.RedisError('Test Exception')
with mock.patch.object(self.client, '_execute', self._execute):
with self.assertRaises(exceptions.RedisError):
yield self.client.zadd(key, score, value)
@testing.gen_test
def test_zcard_with_extant_set(self):
key, value1, value2, value3 = self.uuid4(4)
result = yield self.client.zadd(key, '1', value1, '2', value2,
'3', value3)
self.assertEqual(result, 3)
result = yield self.client.zcard(key)
self.assertEqual(result, 3)
@testing.gen_test
def test_zcard_with_nonextant_set(self):
key = self.uuid4()
result = yield self.client.zcard(key)
self.assertEqual(result, 0)
@testing.gen_test
def test_zrangebyscore(self):
key, value1, value2, value3 = self.uuid4(4)
result = yield self.client.zadd(key, '1', value1, '2', value2,
'3', value3)
self.assertEqual(result, 3)
result = yield self.client.zrangebyscore(key, '1', '2')
self.assertListEqual(result, [value1, value2])
@testing.gen_test
def test_zrangebyscore_withitems(self):
key, value1, value2, value3 = self.uuid4(4)
result = yield self.client.zadd(key, '1', value1, '2', value2,
'3', value3)
self.assertEqual(result, 3)
result = yield self.client.zrangebyscore(key, '1', '2',
with_scores=True)
self.assertListEqual(result, [value1, b'1', value2, b'2'])
@testing.gen_test
def test_zrangebyscore_offset(self):
key, value1, value2, value3 = self.uuid4(4)
result = yield self.client.zadd(key, '1', value1, '2', value2,
'3', value3)
self.assertEqual(result, 3)
result = yield self.client.zrangebyscore(key, '1', '2',
offset=1, count=20)
self.assertListEqual(result, [value2])
@testing.gen_test
def test_zrangebyscore_count(self):
key, value1, value2, value3 = self.uuid4(4)
result = yield self.client.zadd(key, '1', value1, '2', value2,
'3', value3)
self.assertEqual(result, 3)
result = yield self.client.zrangebyscore(key, '1', '3',
offset=0, count=1)
self.assertListEqual(result, [value1])
@testing.gen_test
def test_zremrangebyscore(self):
key, value1, value2, value3 = self.uuid4(4)
result = yield self.client.zadd(key, '1', value1, '2', value2,
'3', value3)
self.assertEqual(result, 3)
result = yield self.client.zremrangebyscore(key, '1', '2')
self.assertEqual(result, 2)
@testing.gen_test
def test_zremrangebyscore_inf(self):
key, value1, value2, value3 = self.uuid4(4)
result = yield self.client.zadd(key, '1', value1, '2', value2,
'3', value3)
self.assertEqual(result, 3)
result = yield self.client.zremrangebyscore(key, '(1', 'inf')
self.assertEqual(result, 2)
@testing.gen_test
def test_zscore_with_member_of_set(self):
key, value1, value2, value3 = self.uuid4(4)
result = yield self.client.zadd(key, '1', value1, '2', value2,
'3', value3)
self.assertEqual(result, 3)
result = yield self.client.zscore(key, value1)
self.assertEqual(result, b'1')
@testing.gen_test
def test_zscore_with_nonmember_of_set(self):
key, value1 = self.uuid4(2)
result = yield self.client.zscore(key, value1)
self.assertEqual(result, None)
| 39.419162
| 71
| 0.563725
| 776
| 6,583
| 4.689433
| 0.090206
| 0.087936
| 0.127782
| 0.173124
| 0.829898
| 0.815059
| 0.764496
| 0.713383
| 0.713383
| 0.644133
| 0
| 0.051747
| 0.313079
| 6,583
| 166
| 72
| 39.656627
| 0.752985
| 0
| 0
| 0.636364
| 0
| 0
| 0.014431
| 0
| 0
| 0
| 0
| 0
| 0.216783
| 1
| 0.132867
| false
| 0
| 0.027972
| 0
| 0.167832
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7688fc92c897ede515ed83c65f35a5b94f7d4d17
| 1,418
|
py
|
Python
|
logger.py
|
misc77/wordDSEGenerator
|
f879a8140f322b1f59cd45d0f512327cd9a648ed
|
[
"MIT"
] | null | null | null |
logger.py
|
misc77/wordDSEGenerator
|
f879a8140f322b1f59cd45d0f512327cd9a648ed
|
[
"MIT"
] | 1
|
2020-11-10T14:18:33.000Z
|
2020-11-10T14:18:33.000Z
|
logger.py
|
misc77/wordDSEGenerator
|
f879a8140f322b1f59cd45d0f512327cd9a648ed
|
[
"MIT"
] | null | null | null |
import logging
import configProvider
import configparser
from resources import Resources
def getLogger():
logLevel = configProvider.getConfigEntryOrDefault('Logging', 'APPLICATION_LOG_LEVEL', logging.DEBUG)
logger = logging.getLogger("DSEGenerator")
logger.setLevel(logLevel)
fileHandler = logging.FileHandler(Resources.getLogFile())
fileHandler.setLevel(logLevel)
consoleHandler = logging.StreamHandler()
consoleHandler.setLevel(logLevel)
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
fileHandler.setFormatter(formatter)
consoleHandler.setFormatter(formatter)
logger.addHandler(fileHandler)
logger.addHandler(consoleHandler)
return logger
def getLoggerCtx(context):
logLevel = configProvider.getConfigEntryOrDefault('Logging', 'APPLICATION_LOG_LEVEL', logging.DEBUG)
logger = logging.getLogger(context)
logger.setLevel(logLevel)
fileHandler = logging.FileHandler(Resources.getLogFile())
fileHandler.setLevel(logLevel)
consoleHandler = logging.StreamHandler()
consoleHandler.setLevel(logLevel)
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
fileHandler.setFormatter(formatter)
consoleHandler.setFormatter(formatter)
logger.addHandler(fileHandler)
logger.addHandler(consoleHandler)
return logger
| 38.324324
| 105
| 0.74189
| 125
| 1,418
| 8.384
| 0.256
| 0.091603
| 0.085878
| 0.099237
| 0.874046
| 0.874046
| 0.874046
| 0.874046
| 0.874046
| 0.874046
| 0
| 0
| 0.156559
| 1,418
| 37
| 106
| 38.324324
| 0.876254
| 0
| 0
| 0.75
| 0
| 0
| 0.124367
| 0.030369
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0
| 0.125
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
768fd3657a7863da03e95ea6d670825f79ad7212
| 3,250
|
py
|
Python
|
migrations/0045_auto_20190225_1821.py
|
audaciouscode/PassiveDataKit-Django
|
ed1e00c436801b9f49a3e0e6657c2adb6b2ba3d4
|
[
"Apache-2.0"
] | 5
|
2016-01-26T19:19:44.000Z
|
2018-12-12T18:04:04.000Z
|
migrations/0045_auto_20190225_1821.py
|
audacious-software/PassiveDataKit-Django
|
da91a375c075ceec938f2c9bb6b011f9f019b024
|
[
"Apache-2.0"
] | 6
|
2020-02-17T20:16:28.000Z
|
2021-12-13T21:51:20.000Z
|
migrations/0045_auto_20190225_1821.py
|
audacious-software/PassiveDataKit-Django
|
da91a375c075ceec938f2c9bb6b011f9f019b024
|
[
"Apache-2.0"
] | 4
|
2020-01-29T15:36:58.000Z
|
2021-06-01T18:55:26.000Z
|
# pylint: skip-file
# -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2019-02-25 18:21
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
import django.db.models.deletion
from ..models import install_supports_jsonfield
class Migration(migrations.Migration):
dependencies = [
('passive_data_kit', '0044_dataserverapitoken'),
]
if install_supports_jsonfield():
operations = [
migrations.CreateModel(
name='AppConfiguration',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=1024)),
('id_pattern', models.CharField(max_length=1024)),
('context_pattern', models.CharField(default='.*', max_length=1024)),
('configuration_json', django.contrib.postgres.fields.jsonb.JSONField()),
('evaluate_order', models.IntegerField(default=1)),
('is_valid', models.BooleanField(default=False)),
('is_enabled', models.BooleanField(default=True)),
],
),
migrations.AlterField(
model_name='datafile',
name='data_bundle',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='data_files', to='passive_data_kit.DataBundle'),
),
migrations.AlterField(
model_name='datafile',
name='data_point',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='data_files', to='passive_data_kit.DataPoint'),
),
]
else:
operations = [
migrations.CreateModel(
name='AppConfiguration',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=1024)),
('id_pattern', models.CharField(max_length=1024)),
('context_pattern', models.CharField(default='.*', max_length=1024)),
('configuration_json', models.TextField(max_length=34359738368)),
('evaluate_order', models.IntegerField(default=1)),
('is_valid', models.BooleanField(default=False)),
('is_enabled', models.BooleanField(default=True)),
],
),
migrations.AlterField(
model_name='datafile',
name='data_bundle',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='data_files', to='passive_data_kit.DataBundle'),
),
migrations.AlterField(
model_name='datafile',
name='data_point',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='data_files', to='passive_data_kit.DataPoint'),
),
]
| 46.428571
| 170
| 0.582462
| 317
| 3,250
| 5.769716
| 0.280757
| 0.034992
| 0.042646
| 0.060142
| 0.823401
| 0.788409
| 0.788409
| 0.788409
| 0.788409
| 0.788409
| 0
| 0.02563
| 0.291692
| 3,250
| 69
| 171
| 47.101449
| 0.768897
| 0.026769
| 0
| 0.766667
| 1
| 0
| 0.145932
| 0.040836
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.083333
| 0.066667
| 0
| 0.1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 9
|
4f24614860941426487d1c898cb9c8d2e5316e0c
| 5,079
|
py
|
Python
|
tests/test_deque.py
|
duruyi/fastrq
|
39b64f278a234acf231b62d684494bad13d8feb7
|
[
"MIT"
] | 17
|
2018-11-24T08:02:25.000Z
|
2022-02-25T15:43:23.000Z
|
tests/test_deque.py
|
duruyi/fastrq
|
39b64f278a234acf231b62d684494bad13d8feb7
|
[
"MIT"
] | null | null | null |
tests/test_deque.py
|
duruyi/fastrq
|
39b64f278a234acf231b62d684494bad13d8feb7
|
[
"MIT"
] | 1
|
2019-03-14T04:55:04.000Z
|
2019-03-14T04:55:04.000Z
|
import time
import unittest
from fastrq.deque import Deque, CappedDeque, OfCappedDeque
class TestDeque(unittest.TestCase):
def setUp(self):
self.queue = Deque("fastrq_deque")
self.queue.destruct()
def tearDown(self):
self.queue.destruct()
def test_push_pop(self):
ql = self.queue.push_back([1, 2])
self.assertEqual(ql, 2)
self.assertEqual(self.queue.length(), 2)
head = self.queue.pop_front()
self.assertEqual(head, '1')
self.assertEqual(self.queue.length(), 1)
self.assertEqual(len(self.queue), 1)
self.queue.push_front([3, 4, 5, 6, 7])
head3 = self.queue.pop_front(3)
self.assertEqual(head3, ['7', '6', '5'])
self.assertEqual(self.queue.pop_back(3), ['2', '3', '4'])
self.assertEqual(self.queue.pop_back(), None)
def test_push_e(self):
self.assertEqual(self.queue.push_front_ne(1), 1)
self.assertFalse(self.queue.push_back_ne(1))
self.queue.destruct()
self.assertFalse(self.queue.push_front_ae(1))
self.queue.push_front(1)
self.assertEqual(self.queue.push_front_ae(1), 2)
def test_push_ni(self):
self.assertEqual(self.queue.push_back_ni(1), (1, True))
self.assertEqual(self.queue.push_back_ni(2), (2, True))
self.assertEqual(self.queue.push_back_ni(4), (3, True))
self.assertEqual(self.queue.push_back_ni(4), (3, False))
self.assertEqual(self.queue.push_back_ni('apple'), (4, True))
self.assertEqual(self.queue.pop_front(), '1')
self.assertEqual(self.queue.pop_back(), 'apple')
def test_range(self):
self.queue.push_back([1, 2, 3, 4])
self.assertEqual(self.queue.range(0, -1), ['1', '2', '3', '4'])
self.assertEqual(self.queue.range(0, 2), ['1', '2', '3'])
self.assertEqual(self.queue.range(0, 0), ['1'])
self.queue.destruct()
self.assertEqual(self.queue.range(0, -1), [])
def test_expire(self):
self.queue.push_back([1, 2])
self.assertEqual(self.queue.ttl(), -1)
self.queue.expire(10)
self.assertEqual(self.queue.ttl(), 10)
time.sleep(11)
self.assertEqual(self.queue.ttl(), -2)
class TestCappedDeque(unittest.TestCase):
def setUp(self):
self.queue = CappedDeque("fastrq_capped_deque", 3)
self.queue.destruct()
def tearDown(self):
self.queue.destruct()
def test_push(self):
self.queue.push_back([1, 2])
self.queue.push_front(3)
self.assertEqual(self.queue.range(0, -1), ['3', '1', '2'])
self.assertEqual(self.queue.push_back(4), 'err_qf')
def test_push_e(self):
self.assertEqual(self.queue.push_front_ne(1), 1)
self.assertFalse(self.queue.push_back_ne(1))
self.queue.destruct()
self.assertFalse(self.queue.push_front_ae(1))
self.queue.push_front(1)
self.assertEqual(self.queue.push_front_ae(1), 2)
def test_push_ni(self):
self.assertEqual(self.queue.push_back_ni(1), (1, True))
self.assertEqual(self.queue.push_back_ni(1), (1, False))
self.assertEqual(self.queue.push_front_ni(2), (2, True))
self.assertEqual(self.queue.push_front_ni(2), (2, False))
self.assertEqual(self.queue.push_back_ni(3), (3, True))
self.assertEqual(self.queue.push_back_ni(3), 'err_qf')
self.assertEqual(self.queue.push_back_ni(4), 'err_qf')
self.assertEqual(self.queue.pop_front(), '2')
self.assertEqual(self.queue.pop_back(), '3')
class TestOfCappedDeque(unittest.TestCase):
def setUp(self):
self.queue = OfCappedDeque("fastrq_of_capped_deque", 3)
self.queue.destruct()
def tearDown(self):
self.queue.destruct()
def test_push(self):
self.assertEqual(self.queue.push_back([1, 2]), (2, []))
self.assertEqual(self.queue.push_front(3), (3, []))
self.assertEqual(self.queue.push_back(4), (3, ['3']))
self.assertEqual(self.queue.pop_front(), '1')
self.assertEqual(self.queue.pop_back(), '4')
def test_push_e(self):
self.assertEqual(self.queue.push_front_ne(1), (1, []))
self.assertFalse(self.queue.push_back_ne(1))
self.queue.destruct()
self.assertFalse(self.queue.push_front_ae(1))
self.queue.push_front(1)
self.assertEqual(self.queue.push_front_ae(1), (2, []))
def test_push_ni(self):
self.assertEqual(self.queue.push_back_ni('apple'), (1, [], True))
self.assertEqual(self.queue.push_back_ni('banana'), (2, [], True))
self.assertEqual(self.queue.push_back_ni('banana'), (2, [], False))
self.assertEqual(self.queue.push_front_ni('pear'), (3, [], True))
self.assertEqual(self.queue.push_front_ni('pear'), (3, [], False))
self.assertEqual(self.queue.push_front_ni('grape'), (3, ['banana'], True))
self.assertEqual(self.queue.pop_front(), 'grape')
self.assertEqual(self.queue.pop_back(), 'apple')
| 38.477273
| 82
| 0.62276
| 703
| 5,079
| 4.339972
| 0.085349
| 0.235988
| 0.298918
| 0.377581
| 0.867584
| 0.811209
| 0.778433
| 0.616847
| 0.528351
| 0.462144
| 0
| 0.0309
| 0.209884
| 5,079
| 131
| 83
| 38.770992
| 0.72938
| 0
| 0
| 0.420561
| 0
| 0
| 0.029736
| 0.004332
| 0
| 0
| 0
| 0
| 0.542056
| 1
| 0.158879
| false
| 0
| 0.028037
| 0
| 0.214953
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4f339e21a690e3ce557adcf61b159a4c40e4f406
| 2,607
|
py
|
Python
|
imlib/transform.py
|
potekang/shallow-fake
|
24b67ee4249d90f53db1516cdf262644a21a56be
|
[
"MIT"
] | null | null | null |
imlib/transform.py
|
potekang/shallow-fake
|
24b67ee4249d90f53db1516cdf262644a21a56be
|
[
"MIT"
] | null | null | null |
imlib/transform.py
|
potekang/shallow-fake
|
24b67ee4249d90f53db1516cdf262644a21a56be
|
[
"MIT"
] | null | null | null |
import numpy as np
import skimage.color as color
import skimage.transform as transform
rgb2gray = color.rgb2gray
gray2rgb = color.gray2rgb
imresize = transform.resize
imrescale = transform.rescale
def immerge(images, n_rows=None, n_cols=None, padding=0, pad_value=0):
"""Merge images to an image with (n_rows * h) * (n_cols * w).
Parameters
----------
images : numpy.array or object which can be converted to numpy.array
Images in shape of N * H * W(* C=1 or 3).
"""
images = np.array(images)
n = images.shape[0]
if n_rows:
n_rows = max(min(n_rows, n), 1)
n_cols = int(n - 0.5) // n_rows + 1
elif n_cols:
n_cols = max(min(n_cols, n), 1)
n_rows = int(n - 0.5) // n_cols + 1
else:
n_rows = int(n ** 0.5)
n_cols = int(n - 0.5) // n_rows + 1
h, w = images.shape[1], images.shape[2]
shape = (h * n_rows + padding * (n_rows - 1),
w * n_cols + padding * (n_cols - 1))
if images.ndim == 4:
shape += (images.shape[3],)
img = np.full(shape, pad_value, dtype=images.dtype)
for idx, image in enumerate(images):
i = idx % n_cols
j = idx // n_cols
img[j * (h + padding):j * (h + padding) + h,
i * (w + padding):i * (w + padding) + w, ...] = image
return img
def immerge_(images, n_rows=None, n_cols=None, padding=0, pad_value=0):
"""Merge images to an image with (n_rows * h) * (n_cols * w).
Parameters
----------
images : numpy.array or object which can be converted to numpy.array
Images in shape of N * H * W(* C=1 or 3).
"""
images = np.array(images)
n = images.shape[0]
if n_rows:
n_rows = max(min(n_rows, n), 1)
n_cols = int(n - 0.5) // n_rows + 1
elif n_cols:
n_cols = max(min(n_cols, n), 1)
n_rows = int(n - 0.5) // n_cols + 1
else:
n_rows = int(n ** 0.5)
n_cols = int(n - 0.5) // n_rows + 1
h, w = images.shape[1], images.shape[2]
n_rows = 1;
n_cols = 1;
padding = 0;
#print(h);
#print(w);
#shape = (h * n_rows + padding * (n_rows - 1),
# w * n_cols + padding * (n_cols - 1))
shape = (h * n_rows, w * n_cols )
if images.ndim == 4:
shape += (images.shape[3],)
img = np.full(shape, pad_value, dtype=images.dtype)
for idx, image in enumerate(images):
i = idx % n_cols
j = idx // n_cols
img[j * (h + padding):j * (h + padding) + h,
i * (w + padding):i * (w + padding) + w, ...] = image
#print(np.shape(img))
return img
| 28.648352
| 72
| 0.540084
| 421
| 2,607
| 3.213777
| 0.154394
| 0.096083
| 0.029564
| 0.035477
| 0.818921
| 0.818921
| 0.818921
| 0.818921
| 0.818921
| 0.818921
| 0
| 0.030387
| 0.305715
| 2,607
| 90
| 73
| 28.966667
| 0.717127
| 0.20023
| 0
| 0.736842
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035088
| false
| 0
| 0.052632
| 0
| 0.122807
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
96d6e84d11dc4119e4ef50096a79bc6c48617039
| 5,140
|
py
|
Python
|
entangld/test/test_gets.py
|
DaxBot/python-entangld
|
24aafad6c0235fb02ac94fe28ae46c3eb6a158e0
|
[
"MIT"
] | null | null | null |
entangld/test/test_gets.py
|
DaxBot/python-entangld
|
24aafad6c0235fb02ac94fe28ae46c3eb6a158e0
|
[
"MIT"
] | null | null | null |
entangld/test/test_gets.py
|
DaxBot/python-entangld
|
24aafad6c0235fb02ac94fe28ae46c3eb6a158e0
|
[
"MIT"
] | null | null | null |
import unittest
import asyncio
from .helpers import block_until
from .. import entangld
class LocalGetExamples(unittest.TestCase):
def setUp(self):
self.store = entangld.Entangld()
self.store.set("some_data",0.0)
async def get_later():
await asyncio.sleep(0.01)
return 1
async def get_async():
return 1
def get_now():
return 1
self.store.set("later",get_later)
self.store.set("async",get_async)
self.store.set("now",get_now)
def tearDown(self):
self.store.subscriptions = []
del self.store
def test_simple_get(self):
"""Get data locally
"""
self.assertEqual(0.0,block_until(self.store.get("some_data")))
def test_get_function(self):
"""Get data from a function
"""
self.assertEqual(1,block_until(self.store.get("now")))
def test_get_async_function(self):
"""Get data from an async function
"""
self.assertEqual(1,block_until(self.store.get("async")))
def test_get_async_function_delay(self):
"""Get data from a delaying async function
"""
self.assertEqual(1,block_until(self.store.get("later")))
class LocalSynchronousGetExamples(unittest.TestCase):
def setUp(self):
self.store = entangld.Entangld()
self.store.set("some_data",0.0)
async def get_later():
await asyncio.sleep(0.01)
return 1
async def get_async():
return 1
def get_now():
return 1
self.store.set("later",get_later)
self.store.set("async",get_async)
self.store.set("now",get_now)
def tearDown(self):
self.store.subscriptions = []
del self.store
def test_simple_get(self):
"""Get data locally
"""
self.assertEqual(0.0,self.store.get_sync("some_data"))
def test_get_function(self):
"""Get data from a function
"""
self.assertEqual(1,self.store.get_sync("now"))
def test_get_async_function(self):
"""Get data from an async function
"""
self.assertEqual(1,self.store.get_sync("async"))
def test_get_async_function_delay(self):
"""Get data from a delaying async function
"""
self.assertEqual(1,self.store.get_sync("later"))
class LocalGetExamples(unittest.TestCase):
def setUp(self):
self.store = entangld.Entangld()
self.store.set("some_data",0.0)
async def get_later():
await asyncio.sleep(0.01)
return 1
async def get_async():
return 1
def get_now():
return 1
self.store.set("later",get_later)
self.store.set("async",get_async)
self.store.set("now",get_now)
def tearDown(self):
self.store.subscriptions = []
del self.store
def test_simple_get(self):
"""Get data locally
"""
self.assertEqual(0.0,block_until(self.store.get("some_data")))
def test_get_function(self):
"""Get data from a function
"""
self.assertEqual(1,block_until(self.store.get("now")))
def test_get_async_function(self):
"""Get data from an async function
"""
self.assertEqual(1,block_until(self.store.get("async")))
def test_get_async_function_delay(self):
"""Get data from a delaying async function
"""
self.assertEqual(1,block_until(self.store.get("later")))
class RemoteGetExamples(unittest.TestCase):
def setUp(self):
self.store = entangld.Entangld()
self.remote = entangld.Entangld()
self.store.transmit(lambda msg, obj: obj.receive_sync(msg,self.store))
self.remote.transmit(lambda msg, obj: obj.receive_sync(msg,self.remote))
self.store.attach("other",self.remote)
self.remote.set("some_data",0.0)
async def get_later():
await asyncio.sleep(0.01)
return 1
async def get_async():
return 1
def get_now():
return 1
self.remote.set("later",get_later)
self.remote.set("async",get_async)
self.remote.set("now",get_now)
def tearDown(self):
self.store.subscriptions = []
self.remote.subscriptions = []
del self.store
del self.remote
def test_simple_get(self):
"""Get data remote
"""
self.assertEqual(0.0,block_until(self.store.get("other.some_data")))
def test_get_function(self):
"""Get remote data from a function
"""
self.assertEqual(1,block_until(self.store.get("other.now")))
def test_get_async_function(self):
"""Get remote data from an async function
"""
self.assertEqual(1,block_until(self.store.get("other.async")))
def test_get_async_function_delay(self):
"""Get remote data from a delaying async function
"""
self.assertEqual(1,block_until(self.store.get("other.later")))
if __name__ == "__main__":
unittest.main()
| 27.634409
| 80
| 0.601946
| 656
| 5,140
| 4.559451
| 0.082317
| 0.129388
| 0.064193
| 0.076229
| 0.890338
| 0.876964
| 0.869274
| 0.860247
| 0.832832
| 0.768305
| 0
| 0.013926
| 0.273541
| 5,140
| 185
| 81
| 27.783784
| 0.787092
| 0.118677
| 0
| 0.767857
| 0
| 0
| 0.047973
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 1
| 0.25
| false
| 0
| 0.035714
| 0.035714
| 0.428571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
8c573ea0957a787f6bc98546a09a25835b1809f9
| 87,501
|
py
|
Python
|
test/scenarios/msgraphuser/output/users_v1_0/azext_users_v1_0/generated/custom.py
|
necusjz/autorest.az
|
08dd5d35bd7f54f306917d0f1dfa1be4520c4059
|
[
"MIT"
] | null | null | null |
test/scenarios/msgraphuser/output/users_v1_0/azext_users_v1_0/generated/custom.py
|
necusjz/autorest.az
|
08dd5d35bd7f54f306917d0f1dfa1be4520c4059
|
[
"MIT"
] | null | null | null |
test/scenarios/msgraphuser/output/users_v1_0/azext_users_v1_0/generated/custom.py
|
necusjz/autorest.az
|
08dd5d35bd7f54f306917d0f1dfa1be4520c4059
|
[
"MIT"
] | null | null | null |
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
# pylint: disable=line-too-long
# pylint: disable=too-many-lines
def users_user_list(client,
orderby=None,
select=None,
expand=None):
return client.list_user(orderby=orderby,
select=select,
expand=expand)
def users_user_show(client,
user_id,
select=None,
expand=None):
return client.get_user(user_id=user_id,
select=select,
expand=expand)
def users_user_create(client,
id_=None,
deleted_date_time=None,
account_enabled=None,
age_group=None,
assigned_licenses=None,
assigned_plans=None,
business_phones=None,
city=None,
company_name=None,
consent_provided_for_minor=None,
country=None,
created_date_time=None,
creation_type=None,
department=None,
display_name=None,
employee_id=None,
external_user_state=None,
external_user_state_change_date_time=None,
fax_number=None,
given_name=None,
identities=None,
im_addresses=None,
is_resource_account=None,
job_title=None,
last_password_change_date_time=None,
legal_age_group_classification=None,
license_assignment_states=None,
mail=None,
mail_nickname=None,
mobile_phone=None,
office_location=None,
on_premises_distinguished_name=None,
on_premises_domain_name=None,
on_premises_extension_attributes=None,
on_premises_immutable_id=None,
on_premises_last_sync_date_time=None,
on_premises_provisioning_errors=None,
on_premises_sam_account_name=None,
on_premises_security_identifier=None,
on_premises_sync_enabled=None,
on_premises_user_principal_name=None,
other_mails=None,
password_policies=None,
password_profile=None,
postal_code=None,
preferred_language=None,
provisioned_plans=None,
proxy_addresses=None,
show_in_address_list=None,
sign_in_sessions_valid_from_date_time=None,
state=None,
street_address=None,
surname=None,
usage_location=None,
user_principal_name=None,
user_type=None,
device_enrollment_limit=None,
about_me=None,
birthday=None,
hire_date=None,
interests=None,
my_site=None,
past_projects=None,
preferred_name=None,
responsibilities=None,
schools=None,
skills=None,
app_role_assignments=None,
created_objects=None,
direct_reports=None,
license_details=None,
manager=None,
member_of=None,
oauth2_permission_grants=None,
owned_devices=None,
owned_objects=None,
registered_devices=None,
scoped_role_member_of=None,
transitive_member_of=None,
calendar=None,
calendar_groups=None,
calendars=None,
calendar_view=None,
contact_folders=None,
contacts=None,
events=None,
mail_folders=None,
messages=None,
people=None,
photo=None,
photos=None,
drive=None,
drives=None,
followed_sites=None,
extensions=None,
managed_devices=None,
managed_app_registrations=None,
device_management_troubleshooting_events=None,
activities=None,
online_meetings=None,
joined_teams=None,
body_contains=None,
body_or_subject_contains=None,
categories=None,
from_addresses=None,
has_attachments=None,
header_contains=None,
importance=None,
exceptions_is_approval_request=None,
exceptions_is_automatic_forward=None,
exceptions_is_automatic_reply=None,
exceptions_is_encrypted=None,
exceptions_is_meeting_request=None,
exceptions_is_meeting_response=None,
exceptions_is_non_delivery_report=None,
exceptions_is_permission_controlled=None,
exceptions_is_read_receipt=None,
exceptions_is_signed=None,
exceptions_is_voicemail=None,
message_action_flag=None,
not_sent_to_me=None,
recipient_contains=None,
sender_contains=None,
sensitivity=None,
sent_cc_me=None,
sent_only_to_me=None,
sent_to_addresses=None,
sent_to_me=None,
sent_to_or_cc_me=None,
subject_contains=None,
within_size_range=None,
microsoft_graph_message_rule_predicates_body_contains=None,
microsoft_graph_message_rule_predicates_body_or_subject_contains_body_or_subject_contains=None,
microsoft_graph_message_rule_predicates_categories=None,
microsoft_graph_message_rule_predicates_from_addresses=None,
boolean_has_attachments=None,
microsoft_graph_message_rule_predicates_header_contains=None,
microsoft_graph_importance=None,
is_approval_request=None,
is_automatic_forward=None,
is_automatic_reply=None,
is_encrypted=None,
is_meeting_request=None,
is_meeting_response=None,
is_non_delivery_report=None,
is_permission_controlled=None,
is_read_receipt=None,
is_signed=None,
is_voicemail=None,
microsoft_graph_message_action_flag_message_action_flag=None,
boolean_not_sent_to_me=None,
microsoft_graph_message_rule_predicates_recipient_contains=None,
microsoft_graph_message_rule_predicates_sender_contains=None,
microsoft_graph_sensitivity=None,
boolean_sent_cc_me=None,
boolean_sent_only_to_me=None,
microsoft_graph_message_rule_predicates_sent_to_addresses_sent_to_addresses=None,
boolean_sent_to_me=None,
boolean_sent_to_or_cc_me=None,
microsoft_graph_message_rule_predicates_subject_contains=None,
microsoft_graph_size_range_within_size_range=None,
microsoft_graph_entity_id=None,
notebooks=None,
operations=None,
pages=None,
resources=None,
section_groups=None,
sections=None,
id1=None,
contribution_to_content_discovery_as_organization_disabled=None,
contribution_to_content_discovery_disabled=None,
id2=None,
microsoft_graph_change_tracked_entity_created_date_time_created_date_time=None,
last_modified_date_time=None,
application=None,
device=None,
user=None,
availability=None,
id3=None,
shared=None,
trending=None,
used=None,
id4=None,
plans=None,
tasks=None,
id5=None,
master_categories=None,
id6=None,
overrides=None,
archive_folder=None,
automatic_replies_setting=None,
date_format=None,
delegate_meeting_message_delivery_options=None,
language=None,
time_format=None,
time_zone=None,
working_hours=None):
body = {}
if id_ is not None:
body['id'] = id_
if deleted_date_time is not None:
body['deleted_date_time'] = deleted_date_time
if account_enabled is not None:
body['account_enabled'] = account_enabled
if age_group is not None:
body['age_group'] = age_group
if assigned_licenses is not None:
body['assigned_licenses'] = assigned_licenses
if assigned_plans is not None:
body['assigned_plans'] = assigned_plans
if business_phones is not None:
body['business_phones'] = business_phones
if city is not None:
body['city'] = city
if company_name is not None:
body['company_name'] = company_name
if consent_provided_for_minor is not None:
body['consent_provided_for_minor'] = consent_provided_for_minor
if country is not None:
body['country'] = country
if created_date_time is not None:
body['created_date_time'] = created_date_time
if creation_type is not None:
body['creation_type'] = creation_type
if department is not None:
body['department'] = department
if display_name is not None:
body['display_name'] = display_name
if employee_id is not None:
body['employee_id'] = employee_id
if external_user_state is not None:
body['external_user_state'] = external_user_state
if external_user_state_change_date_time is not None:
body['external_user_state_change_date_time'] = external_user_state_change_date_time
if fax_number is not None:
body['fax_number'] = fax_number
if given_name is not None:
body['given_name'] = given_name
if identities is not None:
body['identities'] = identities
if im_addresses is not None:
body['im_addresses'] = im_addresses
if is_resource_account is not None:
body['is_resource_account'] = is_resource_account
if job_title is not None:
body['job_title'] = job_title
if last_password_change_date_time is not None:
body['last_password_change_date_time'] = last_password_change_date_time
if legal_age_group_classification is not None:
body['legal_age_group_classification'] = legal_age_group_classification
if license_assignment_states is not None:
body['license_assignment_states'] = license_assignment_states
if mail is not None:
body['mail'] = mail
if mail_nickname is not None:
body['mail_nickname'] = mail_nickname
if mobile_phone is not None:
body['mobile_phone'] = mobile_phone
if office_location is not None:
body['office_location'] = office_location
if on_premises_distinguished_name is not None:
body['on_premises_distinguished_name'] = on_premises_distinguished_name
if on_premises_domain_name is not None:
body['on_premises_domain_name'] = on_premises_domain_name
if on_premises_extension_attributes is not None:
body['on_premises_extension_attributes'] = on_premises_extension_attributes
if on_premises_immutable_id is not None:
body['on_premises_immutable_id'] = on_premises_immutable_id
if on_premises_last_sync_date_time is not None:
body['on_premises_last_sync_date_time'] = on_premises_last_sync_date_time
if on_premises_provisioning_errors is not None:
body['on_premises_provisioning_errors'] = on_premises_provisioning_errors
if on_premises_sam_account_name is not None:
body['on_premises_sam_account_name'] = on_premises_sam_account_name
if on_premises_security_identifier is not None:
body['on_premises_security_identifier'] = on_premises_security_identifier
if on_premises_sync_enabled is not None:
body['on_premises_sync_enabled'] = on_premises_sync_enabled
if on_premises_user_principal_name is not None:
body['on_premises_user_principal_name'] = on_premises_user_principal_name
if other_mails is not None:
body['other_mails'] = other_mails
if password_policies is not None:
body['password_policies'] = password_policies
if password_profile is not None:
body['password_profile'] = password_profile
if postal_code is not None:
body['postal_code'] = postal_code
if preferred_language is not None:
body['preferred_language'] = preferred_language
if provisioned_plans is not None:
body['provisioned_plans'] = provisioned_plans
if proxy_addresses is not None:
body['proxy_addresses'] = proxy_addresses
if show_in_address_list is not None:
body['show_in_address_list'] = show_in_address_list
if sign_in_sessions_valid_from_date_time is not None:
body['sign_in_sessions_valid_from_date_time'] = sign_in_sessions_valid_from_date_time
if state is not None:
body['state'] = state
if street_address is not None:
body['street_address'] = street_address
if surname is not None:
body['surname'] = surname
if usage_location is not None:
body['usage_location'] = usage_location
if user_principal_name is not None:
body['user_principal_name'] = user_principal_name
if user_type is not None:
body['user_type'] = user_type
if device_enrollment_limit is not None:
body['device_enrollment_limit'] = device_enrollment_limit
if about_me is not None:
body['about_me'] = about_me
if birthday is not None:
body['birthday'] = birthday
if hire_date is not None:
body['hire_date'] = hire_date
if interests is not None:
body['interests'] = interests
if my_site is not None:
body['my_site'] = my_site
if past_projects is not None:
body['past_projects'] = past_projects
if preferred_name is not None:
body['preferred_name'] = preferred_name
if responsibilities is not None:
body['responsibilities'] = responsibilities
if schools is not None:
body['schools'] = schools
if skills is not None:
body['skills'] = skills
if app_role_assignments is not None:
body['app_role_assignments'] = app_role_assignments
if created_objects is not None:
body['created_objects'] = created_objects
if direct_reports is not None:
body['direct_reports'] = direct_reports
if license_details is not None:
body['license_details'] = license_details
if manager is not None:
body['manager'] = manager
if member_of is not None:
body['member_of'] = member_of
if oauth2_permission_grants is not None:
body['oauth2_permission_grants'] = oauth2_permission_grants
if owned_devices is not None:
body['owned_devices'] = owned_devices
if owned_objects is not None:
body['owned_objects'] = owned_objects
if registered_devices is not None:
body['registered_devices'] = registered_devices
if scoped_role_member_of is not None:
body['scoped_role_member_of'] = scoped_role_member_of
if transitive_member_of is not None:
body['transitive_member_of'] = transitive_member_of
if calendar is not None:
body['calendar'] = calendar
if calendar_groups is not None:
body['calendar_groups'] = calendar_groups
if calendars is not None:
body['calendars'] = calendars
if calendar_view is not None:
body['calendar_view'] = calendar_view
if contact_folders is not None:
body['contact_folders'] = contact_folders
if contacts is not None:
body['contacts'] = contacts
if events is not None:
body['events'] = events
if mail_folders is not None:
body['mail_folders'] = mail_folders
if messages is not None:
body['messages'] = messages
if people is not None:
body['people'] = people
if photo is not None:
body['photo'] = photo
if photos is not None:
body['photos'] = photos
if drive is not None:
body['drive'] = drive
if drives is not None:
body['drives'] = drives
if followed_sites is not None:
body['followed_sites'] = followed_sites
if extensions is not None:
body['extensions'] = extensions
if managed_devices is not None:
body['managed_devices'] = managed_devices
if managed_app_registrations is not None:
body['managed_app_registrations'] = managed_app_registrations
if device_management_troubleshooting_events is not None:
body['device_management_troubleshooting_events'] = device_management_troubleshooting_events
if activities is not None:
body['activities'] = activities
if online_meetings is not None:
body['online_meetings'] = online_meetings
if joined_teams is not None:
body['joined_teams'] = joined_teams
body['exceptions'] = {}
if body_contains is not None:
body['exceptions']['body_contains'] = body_contains
if body_or_subject_contains is not None:
body['exceptions']['body_or_subject_contains'] = body_or_subject_contains
if categories is not None:
body['exceptions']['categories'] = categories
if from_addresses is not None:
body['exceptions']['from_addresses'] = from_addresses
if has_attachments is not None:
body['exceptions']['has_attachments'] = has_attachments
if header_contains is not None:
body['exceptions']['header_contains'] = header_contains
if importance is not None:
body['exceptions']['importance'] = importance
if exceptions_is_approval_request is not None:
body['exceptions']['is_approval_request'] = exceptions_is_approval_request
if exceptions_is_automatic_forward is not None:
body['exceptions']['is_automatic_forward'] = exceptions_is_automatic_forward
if exceptions_is_automatic_reply is not None:
body['exceptions']['is_automatic_reply'] = exceptions_is_automatic_reply
if exceptions_is_encrypted is not None:
body['exceptions']['is_encrypted'] = exceptions_is_encrypted
if exceptions_is_meeting_request is not None:
body['exceptions']['is_meeting_request'] = exceptions_is_meeting_request
if exceptions_is_meeting_response is not None:
body['exceptions']['is_meeting_response'] = exceptions_is_meeting_response
if exceptions_is_non_delivery_report is not None:
body['exceptions']['is_non_delivery_report'] = exceptions_is_non_delivery_report
if exceptions_is_permission_controlled is not None:
body['exceptions']['is_permission_controlled'] = exceptions_is_permission_controlled
if exceptions_is_read_receipt is not None:
body['exceptions']['is_read_receipt'] = exceptions_is_read_receipt
if exceptions_is_signed is not None:
body['exceptions']['is_signed'] = exceptions_is_signed
if exceptions_is_voicemail is not None:
body['exceptions']['is_voicemail'] = exceptions_is_voicemail
if message_action_flag is not None:
body['exceptions']['message_action_flag'] = message_action_flag
if not_sent_to_me is not None:
body['exceptions']['not_sent_to_me'] = not_sent_to_me
if recipient_contains is not None:
body['exceptions']['recipient_contains'] = recipient_contains
if sender_contains is not None:
body['exceptions']['sender_contains'] = sender_contains
if sensitivity is not None:
body['exceptions']['sensitivity'] = sensitivity
if sent_cc_me is not None:
body['exceptions']['sent_cc_me'] = sent_cc_me
if sent_only_to_me is not None:
body['exceptions']['sent_only_to_me'] = sent_only_to_me
if sent_to_addresses is not None:
body['exceptions']['sent_to_addresses'] = sent_to_addresses
if sent_to_me is not None:
body['exceptions']['sent_to_me'] = sent_to_me
if sent_to_or_cc_me is not None:
body['exceptions']['sent_to_or_cc_me'] = sent_to_or_cc_me
if subject_contains is not None:
body['exceptions']['subject_contains'] = subject_contains
if within_size_range is not None:
body['exceptions']['within_size_range'] = within_size_range
if len(body['exceptions']) == 0:
del body['exceptions']
body['conditions'] = {}
if microsoft_graph_message_rule_predicates_body_contains is not None:
body['conditions']['body_contains'] = microsoft_graph_message_rule_predicates_body_contains
if microsoft_graph_message_rule_predicates_body_or_subject_contains_body_or_subject_contains is not None:
body['conditions']['body_or_subject_contains'] = microsoft_graph_message_rule_predicates_body_or_subject_contains_body_or_subject_contains
if microsoft_graph_message_rule_predicates_categories is not None:
body['conditions']['categories'] = microsoft_graph_message_rule_predicates_categories
if microsoft_graph_message_rule_predicates_from_addresses is not None:
body['conditions']['from_addresses'] = microsoft_graph_message_rule_predicates_from_addresses
if boolean_has_attachments is not None:
body['conditions']['has_attachments'] = boolean_has_attachments
if microsoft_graph_message_rule_predicates_header_contains is not None:
body['conditions']['header_contains'] = microsoft_graph_message_rule_predicates_header_contains
if microsoft_graph_importance is not None:
body['conditions']['importance'] = microsoft_graph_importance
if is_approval_request is not None:
body['conditions']['is_approval_request'] = is_approval_request
if is_automatic_forward is not None:
body['conditions']['is_automatic_forward'] = is_automatic_forward
if is_automatic_reply is not None:
body['conditions']['is_automatic_reply'] = is_automatic_reply
if is_encrypted is not None:
body['conditions']['is_encrypted'] = is_encrypted
if is_meeting_request is not None:
body['conditions']['is_meeting_request'] = is_meeting_request
if is_meeting_response is not None:
body['conditions']['is_meeting_response'] = is_meeting_response
if is_non_delivery_report is not None:
body['conditions']['is_non_delivery_report'] = is_non_delivery_report
if is_permission_controlled is not None:
body['conditions']['is_permission_controlled'] = is_permission_controlled
if is_read_receipt is not None:
body['conditions']['is_read_receipt'] = is_read_receipt
if is_signed is not None:
body['conditions']['is_signed'] = is_signed
if is_voicemail is not None:
body['conditions']['is_voicemail'] = is_voicemail
if microsoft_graph_message_action_flag_message_action_flag is not None:
body['conditions']['message_action_flag'] = microsoft_graph_message_action_flag_message_action_flag
if boolean_not_sent_to_me is not None:
body['conditions']['not_sent_to_me'] = boolean_not_sent_to_me
if microsoft_graph_message_rule_predicates_recipient_contains is not None:
body['conditions']['recipient_contains'] = microsoft_graph_message_rule_predicates_recipient_contains
if microsoft_graph_message_rule_predicates_sender_contains is not None:
body['conditions']['sender_contains'] = microsoft_graph_message_rule_predicates_sender_contains
if microsoft_graph_sensitivity is not None:
body['conditions']['sensitivity'] = microsoft_graph_sensitivity
if boolean_sent_cc_me is not None:
body['conditions']['sent_cc_me'] = boolean_sent_cc_me
if boolean_sent_only_to_me is not None:
body['conditions']['sent_only_to_me'] = boolean_sent_only_to_me
if microsoft_graph_message_rule_predicates_sent_to_addresses_sent_to_addresses is not None:
body['conditions']['sent_to_addresses'] = microsoft_graph_message_rule_predicates_sent_to_addresses_sent_to_addresses
if boolean_sent_to_me is not None:
body['conditions']['sent_to_me'] = boolean_sent_to_me
if boolean_sent_to_or_cc_me is not None:
body['conditions']['sent_to_or_cc_me'] = boolean_sent_to_or_cc_me
if microsoft_graph_message_rule_predicates_subject_contains is not None:
body['conditions']['subject_contains'] = microsoft_graph_message_rule_predicates_subject_contains
if microsoft_graph_size_range_within_size_range is not None:
body['conditions']['within_size_range'] = microsoft_graph_size_range_within_size_range
if len(body['conditions']) == 0:
del body['conditions']
body['onenote'] = {}
if microsoft_graph_entity_id is not None:
body['onenote']['id'] = microsoft_graph_entity_id
if notebooks is not None:
body['onenote']['notebooks'] = notebooks
if operations is not None:
body['onenote']['operations'] = operations
if pages is not None:
body['onenote']['pages'] = pages
if resources is not None:
body['onenote']['resources'] = resources
if section_groups is not None:
body['onenote']['section_groups'] = section_groups
if sections is not None:
body['onenote']['sections'] = sections
if len(body['onenote']) == 0:
del body['onenote']
body['settings'] = {}
if id1 is not None:
body['settings']['id'] = id1
if contribution_to_content_discovery_as_organization_disabled is not None:
body['settings']['contribution_to_content_discovery_as_organization_disabled'] = contribution_to_content_discovery_as_organization_disabled
if contribution_to_content_discovery_disabled is not None:
body['settings']['contribution_to_content_discovery_disabled'] = contribution_to_content_discovery_disabled
body['settings']['shift_preferences'] = {}
if id2 is not None:
body['settings']['shift_preferences']['id'] = id2
if microsoft_graph_change_tracked_entity_created_date_time_created_date_time is not None:
body['settings']['shift_preferences']['created_date_time'] = microsoft_graph_change_tracked_entity_created_date_time_created_date_time
if last_modified_date_time is not None:
body['settings']['shift_preferences']['last_modified_date_time'] = last_modified_date_time
body['settings']['shift_preferences']['last_modified_by'] = {}
if application is not None:
body['settings']['shift_preferences']['last_modified_by']['application'] = application
if device is not None:
body['settings']['shift_preferences']['last_modified_by']['device'] = device
if user is not None:
body['settings']['shift_preferences']['last_modified_by']['user'] = user
if len(body['settings']['shift_preferences']['last_modified_by']) == 0:
del body['settings']['shift_preferences']['last_modified_by']
if availability is not None:
body['settings']['shift_preferences']['availability'] = availability
if len(body['settings']['shift_preferences']) == 0:
del body['settings']['shift_preferences']
if len(body['settings']) == 0:
del body['settings']
body['insights'] = {}
if id3 is not None:
body['insights']['id'] = id3
if shared is not None:
body['insights']['shared'] = shared
if trending is not None:
body['insights']['trending'] = trending
if used is not None:
body['insights']['used'] = used
if len(body['insights']) == 0:
del body['insights']
body['planner'] = {}
if id4 is not None:
body['planner']['id'] = id4
if plans is not None:
body['planner']['plans'] = plans
if tasks is not None:
body['planner']['tasks'] = tasks
if len(body['planner']) == 0:
del body['planner']
body['outlook'] = {}
if id5 is not None:
body['outlook']['id'] = id5
if master_categories is not None:
body['outlook']['master_categories'] = master_categories
if len(body['outlook']) == 0:
del body['outlook']
body['inference_classification'] = {}
if id6 is not None:
body['inference_classification']['id'] = id6
if overrides is not None:
body['inference_classification']['overrides'] = overrides
if len(body['inference_classification']) == 0:
del body['inference_classification']
body['mailbox_settings'] = {}
if archive_folder is not None:
body['mailbox_settings']['archive_folder'] = archive_folder
if automatic_replies_setting is not None:
body['mailbox_settings']['automatic_replies_setting'] = automatic_replies_setting
if date_format is not None:
body['mailbox_settings']['date_format'] = date_format
if delegate_meeting_message_delivery_options is not None:
body['mailbox_settings']['delegate_meeting_message_delivery_options'] = delegate_meeting_message_delivery_options
if language is not None:
body['mailbox_settings']['language'] = language
if time_format is not None:
body['mailbox_settings']['time_format'] = time_format
if time_zone is not None:
body['mailbox_settings']['time_zone'] = time_zone
if working_hours is not None:
body['mailbox_settings']['working_hours'] = working_hours
if len(body['mailbox_settings']) == 0:
del body['mailbox_settings']
return client.create_user(body=body)
def users_user_update(client,
user_id,
id_=None,
deleted_date_time=None,
account_enabled=None,
age_group=None,
assigned_licenses=None,
assigned_plans=None,
business_phones=None,
city=None,
company_name=None,
consent_provided_for_minor=None,
country=None,
created_date_time=None,
creation_type=None,
department=None,
display_name=None,
employee_id=None,
external_user_state=None,
external_user_state_change_date_time=None,
fax_number=None,
given_name=None,
identities=None,
im_addresses=None,
is_resource_account=None,
job_title=None,
last_password_change_date_time=None,
legal_age_group_classification=None,
license_assignment_states=None,
mail=None,
mail_nickname=None,
mobile_phone=None,
office_location=None,
on_premises_distinguished_name=None,
on_premises_domain_name=None,
on_premises_extension_attributes=None,
on_premises_immutable_id=None,
on_premises_last_sync_date_time=None,
on_premises_provisioning_errors=None,
on_premises_sam_account_name=None,
on_premises_security_identifier=None,
on_premises_sync_enabled=None,
on_premises_user_principal_name=None,
other_mails=None,
password_policies=None,
password_profile=None,
postal_code=None,
preferred_language=None,
provisioned_plans=None,
proxy_addresses=None,
show_in_address_list=None,
sign_in_sessions_valid_from_date_time=None,
state=None,
street_address=None,
surname=None,
usage_location=None,
user_principal_name=None,
user_type=None,
device_enrollment_limit=None,
about_me=None,
birthday=None,
hire_date=None,
interests=None,
my_site=None,
past_projects=None,
preferred_name=None,
responsibilities=None,
schools=None,
skills=None,
app_role_assignments=None,
created_objects=None,
direct_reports=None,
license_details=None,
manager=None,
member_of=None,
oauth2_permission_grants=None,
owned_devices=None,
owned_objects=None,
registered_devices=None,
scoped_role_member_of=None,
transitive_member_of=None,
calendar=None,
calendar_groups=None,
calendars=None,
calendar_view=None,
contact_folders=None,
contacts=None,
events=None,
mail_folders=None,
messages=None,
people=None,
photo=None,
photos=None,
drive=None,
drives=None,
followed_sites=None,
extensions=None,
managed_devices=None,
managed_app_registrations=None,
device_management_troubleshooting_events=None,
activities=None,
online_meetings=None,
joined_teams=None,
body_contains=None,
body_or_subject_contains=None,
categories=None,
from_addresses=None,
has_attachments=None,
header_contains=None,
importance=None,
exceptions_is_approval_request=None,
exceptions_is_automatic_forward=None,
exceptions_is_automatic_reply=None,
exceptions_is_encrypted=None,
exceptions_is_meeting_request=None,
exceptions_is_meeting_response=None,
exceptions_is_non_delivery_report=None,
exceptions_is_permission_controlled=None,
exceptions_is_read_receipt=None,
exceptions_is_signed=None,
exceptions_is_voicemail=None,
message_action_flag=None,
not_sent_to_me=None,
recipient_contains=None,
sender_contains=None,
sensitivity=None,
sent_cc_me=None,
sent_only_to_me=None,
sent_to_addresses=None,
sent_to_me=None,
sent_to_or_cc_me=None,
subject_contains=None,
within_size_range=None,
microsoft_graph_message_rule_predicates_body_contains=None,
microsoft_graph_message_rule_predicates_body_or_subject_contains_body_or_subject_contains=None,
microsoft_graph_message_rule_predicates_categories=None,
microsoft_graph_message_rule_predicates_from_addresses=None,
boolean_has_attachments=None,
microsoft_graph_message_rule_predicates_header_contains=None,
microsoft_graph_importance=None,
is_approval_request=None,
is_automatic_forward=None,
is_automatic_reply=None,
is_encrypted=None,
is_meeting_request=None,
is_meeting_response=None,
is_non_delivery_report=None,
is_permission_controlled=None,
is_read_receipt=None,
is_signed=None,
is_voicemail=None,
microsoft_graph_message_action_flag_message_action_flag=None,
boolean_not_sent_to_me=None,
microsoft_graph_message_rule_predicates_recipient_contains=None,
microsoft_graph_message_rule_predicates_sender_contains=None,
microsoft_graph_sensitivity=None,
boolean_sent_cc_me=None,
boolean_sent_only_to_me=None,
microsoft_graph_message_rule_predicates_sent_to_addresses_sent_to_addresses=None,
boolean_sent_to_me=None,
boolean_sent_to_or_cc_me=None,
microsoft_graph_message_rule_predicates_subject_contains=None,
microsoft_graph_size_range_within_size_range=None,
microsoft_graph_entity_id=None,
notebooks=None,
operations=None,
pages=None,
resources=None,
section_groups=None,
sections=None,
id1=None,
contribution_to_content_discovery_as_organization_disabled=None,
contribution_to_content_discovery_disabled=None,
id2=None,
microsoft_graph_change_tracked_entity_created_date_time_created_date_time=None,
last_modified_date_time=None,
application=None,
device=None,
user=None,
availability=None,
id3=None,
shared=None,
trending=None,
used=None,
id4=None,
plans=None,
tasks=None,
id5=None,
master_categories=None,
id6=None,
overrides=None,
archive_folder=None,
automatic_replies_setting=None,
date_format=None,
delegate_meeting_message_delivery_options=None,
language=None,
time_format=None,
time_zone=None,
working_hours=None):
body = {}
if id_ is not None:
body['id'] = id_
if deleted_date_time is not None:
body['deleted_date_time'] = deleted_date_time
if account_enabled is not None:
body['account_enabled'] = account_enabled
if age_group is not None:
body['age_group'] = age_group
if assigned_licenses is not None:
body['assigned_licenses'] = assigned_licenses
if assigned_plans is not None:
body['assigned_plans'] = assigned_plans
if business_phones is not None:
body['business_phones'] = business_phones
if city is not None:
body['city'] = city
if company_name is not None:
body['company_name'] = company_name
if consent_provided_for_minor is not None:
body['consent_provided_for_minor'] = consent_provided_for_minor
if country is not None:
body['country'] = country
if created_date_time is not None:
body['created_date_time'] = created_date_time
if creation_type is not None:
body['creation_type'] = creation_type
if department is not None:
body['department'] = department
if display_name is not None:
body['display_name'] = display_name
if employee_id is not None:
body['employee_id'] = employee_id
if external_user_state is not None:
body['external_user_state'] = external_user_state
if external_user_state_change_date_time is not None:
body['external_user_state_change_date_time'] = external_user_state_change_date_time
if fax_number is not None:
body['fax_number'] = fax_number
if given_name is not None:
body['given_name'] = given_name
if identities is not None:
body['identities'] = identities
if im_addresses is not None:
body['im_addresses'] = im_addresses
if is_resource_account is not None:
body['is_resource_account'] = is_resource_account
if job_title is not None:
body['job_title'] = job_title
if last_password_change_date_time is not None:
body['last_password_change_date_time'] = last_password_change_date_time
if legal_age_group_classification is not None:
body['legal_age_group_classification'] = legal_age_group_classification
if license_assignment_states is not None:
body['license_assignment_states'] = license_assignment_states
if mail is not None:
body['mail'] = mail
if mail_nickname is not None:
body['mail_nickname'] = mail_nickname
if mobile_phone is not None:
body['mobile_phone'] = mobile_phone
if office_location is not None:
body['office_location'] = office_location
if on_premises_distinguished_name is not None:
body['on_premises_distinguished_name'] = on_premises_distinguished_name
if on_premises_domain_name is not None:
body['on_premises_domain_name'] = on_premises_domain_name
if on_premises_extension_attributes is not None:
body['on_premises_extension_attributes'] = on_premises_extension_attributes
if on_premises_immutable_id is not None:
body['on_premises_immutable_id'] = on_premises_immutable_id
if on_premises_last_sync_date_time is not None:
body['on_premises_last_sync_date_time'] = on_premises_last_sync_date_time
if on_premises_provisioning_errors is not None:
body['on_premises_provisioning_errors'] = on_premises_provisioning_errors
if on_premises_sam_account_name is not None:
body['on_premises_sam_account_name'] = on_premises_sam_account_name
if on_premises_security_identifier is not None:
body['on_premises_security_identifier'] = on_premises_security_identifier
if on_premises_sync_enabled is not None:
body['on_premises_sync_enabled'] = on_premises_sync_enabled
if on_premises_user_principal_name is not None:
body['on_premises_user_principal_name'] = on_premises_user_principal_name
if other_mails is not None:
body['other_mails'] = other_mails
if password_policies is not None:
body['password_policies'] = password_policies
if password_profile is not None:
body['password_profile'] = password_profile
if postal_code is not None:
body['postal_code'] = postal_code
if preferred_language is not None:
body['preferred_language'] = preferred_language
if provisioned_plans is not None:
body['provisioned_plans'] = provisioned_plans
if proxy_addresses is not None:
body['proxy_addresses'] = proxy_addresses
if show_in_address_list is not None:
body['show_in_address_list'] = show_in_address_list
if sign_in_sessions_valid_from_date_time is not None:
body['sign_in_sessions_valid_from_date_time'] = sign_in_sessions_valid_from_date_time
if state is not None:
body['state'] = state
if street_address is not None:
body['street_address'] = street_address
if surname is not None:
body['surname'] = surname
if usage_location is not None:
body['usage_location'] = usage_location
if user_principal_name is not None:
body['user_principal_name'] = user_principal_name
if user_type is not None:
body['user_type'] = user_type
if device_enrollment_limit is not None:
body['device_enrollment_limit'] = device_enrollment_limit
if about_me is not None:
body['about_me'] = about_me
if birthday is not None:
body['birthday'] = birthday
if hire_date is not None:
body['hire_date'] = hire_date
if interests is not None:
body['interests'] = interests
if my_site is not None:
body['my_site'] = my_site
if past_projects is not None:
body['past_projects'] = past_projects
if preferred_name is not None:
body['preferred_name'] = preferred_name
if responsibilities is not None:
body['responsibilities'] = responsibilities
if schools is not None:
body['schools'] = schools
if skills is not None:
body['skills'] = skills
if app_role_assignments is not None:
body['app_role_assignments'] = app_role_assignments
if created_objects is not None:
body['created_objects'] = created_objects
if direct_reports is not None:
body['direct_reports'] = direct_reports
if license_details is not None:
body['license_details'] = license_details
if manager is not None:
body['manager'] = manager
if member_of is not None:
body['member_of'] = member_of
if oauth2_permission_grants is not None:
body['oauth2_permission_grants'] = oauth2_permission_grants
if owned_devices is not None:
body['owned_devices'] = owned_devices
if owned_objects is not None:
body['owned_objects'] = owned_objects
if registered_devices is not None:
body['registered_devices'] = registered_devices
if scoped_role_member_of is not None:
body['scoped_role_member_of'] = scoped_role_member_of
if transitive_member_of is not None:
body['transitive_member_of'] = transitive_member_of
if calendar is not None:
body['calendar'] = calendar
if calendar_groups is not None:
body['calendar_groups'] = calendar_groups
if calendars is not None:
body['calendars'] = calendars
if calendar_view is not None:
body['calendar_view'] = calendar_view
if contact_folders is not None:
body['contact_folders'] = contact_folders
if contacts is not None:
body['contacts'] = contacts
if events is not None:
body['events'] = events
if mail_folders is not None:
body['mail_folders'] = mail_folders
if messages is not None:
body['messages'] = messages
if people is not None:
body['people'] = people
if photo is not None:
body['photo'] = photo
if photos is not None:
body['photos'] = photos
if drive is not None:
body['drive'] = drive
if drives is not None:
body['drives'] = drives
if followed_sites is not None:
body['followed_sites'] = followed_sites
if extensions is not None:
body['extensions'] = extensions
if managed_devices is not None:
body['managed_devices'] = managed_devices
if managed_app_registrations is not None:
body['managed_app_registrations'] = managed_app_registrations
if device_management_troubleshooting_events is not None:
body['device_management_troubleshooting_events'] = device_management_troubleshooting_events
if activities is not None:
body['activities'] = activities
if online_meetings is not None:
body['online_meetings'] = online_meetings
if joined_teams is not None:
body['joined_teams'] = joined_teams
body['exceptions'] = {}
if body_contains is not None:
body['exceptions']['body_contains'] = body_contains
if body_or_subject_contains is not None:
body['exceptions']['body_or_subject_contains'] = body_or_subject_contains
if categories is not None:
body['exceptions']['categories'] = categories
if from_addresses is not None:
body['exceptions']['from_addresses'] = from_addresses
if has_attachments is not None:
body['exceptions']['has_attachments'] = has_attachments
if header_contains is not None:
body['exceptions']['header_contains'] = header_contains
if importance is not None:
body['exceptions']['importance'] = importance
if exceptions_is_approval_request is not None:
body['exceptions']['is_approval_request'] = exceptions_is_approval_request
if exceptions_is_automatic_forward is not None:
body['exceptions']['is_automatic_forward'] = exceptions_is_automatic_forward
if exceptions_is_automatic_reply is not None:
body['exceptions']['is_automatic_reply'] = exceptions_is_automatic_reply
if exceptions_is_encrypted is not None:
body['exceptions']['is_encrypted'] = exceptions_is_encrypted
if exceptions_is_meeting_request is not None:
body['exceptions']['is_meeting_request'] = exceptions_is_meeting_request
if exceptions_is_meeting_response is not None:
body['exceptions']['is_meeting_response'] = exceptions_is_meeting_response
if exceptions_is_non_delivery_report is not None:
body['exceptions']['is_non_delivery_report'] = exceptions_is_non_delivery_report
if exceptions_is_permission_controlled is not None:
body['exceptions']['is_permission_controlled'] = exceptions_is_permission_controlled
if exceptions_is_read_receipt is not None:
body['exceptions']['is_read_receipt'] = exceptions_is_read_receipt
if exceptions_is_signed is not None:
body['exceptions']['is_signed'] = exceptions_is_signed
if exceptions_is_voicemail is not None:
body['exceptions']['is_voicemail'] = exceptions_is_voicemail
if message_action_flag is not None:
body['exceptions']['message_action_flag'] = message_action_flag
if not_sent_to_me is not None:
body['exceptions']['not_sent_to_me'] = not_sent_to_me
if recipient_contains is not None:
body['exceptions']['recipient_contains'] = recipient_contains
if sender_contains is not None:
body['exceptions']['sender_contains'] = sender_contains
if sensitivity is not None:
body['exceptions']['sensitivity'] = sensitivity
if sent_cc_me is not None:
body['exceptions']['sent_cc_me'] = sent_cc_me
if sent_only_to_me is not None:
body['exceptions']['sent_only_to_me'] = sent_only_to_me
if sent_to_addresses is not None:
body['exceptions']['sent_to_addresses'] = sent_to_addresses
if sent_to_me is not None:
body['exceptions']['sent_to_me'] = sent_to_me
if sent_to_or_cc_me is not None:
body['exceptions']['sent_to_or_cc_me'] = sent_to_or_cc_me
if subject_contains is not None:
body['exceptions']['subject_contains'] = subject_contains
if within_size_range is not None:
body['exceptions']['within_size_range'] = within_size_range
if len(body['exceptions']) == 0:
del body['exceptions']
body['conditions'] = {}
if microsoft_graph_message_rule_predicates_body_contains is not None:
body['conditions']['body_contains'] = microsoft_graph_message_rule_predicates_body_contains
if microsoft_graph_message_rule_predicates_body_or_subject_contains_body_or_subject_contains is not None:
body['conditions']['body_or_subject_contains'] = microsoft_graph_message_rule_predicates_body_or_subject_contains_body_or_subject_contains
if microsoft_graph_message_rule_predicates_categories is not None:
body['conditions']['categories'] = microsoft_graph_message_rule_predicates_categories
if microsoft_graph_message_rule_predicates_from_addresses is not None:
body['conditions']['from_addresses'] = microsoft_graph_message_rule_predicates_from_addresses
if boolean_has_attachments is not None:
body['conditions']['has_attachments'] = boolean_has_attachments
if microsoft_graph_message_rule_predicates_header_contains is not None:
body['conditions']['header_contains'] = microsoft_graph_message_rule_predicates_header_contains
if microsoft_graph_importance is not None:
body['conditions']['importance'] = microsoft_graph_importance
if is_approval_request is not None:
body['conditions']['is_approval_request'] = is_approval_request
if is_automatic_forward is not None:
body['conditions']['is_automatic_forward'] = is_automatic_forward
if is_automatic_reply is not None:
body['conditions']['is_automatic_reply'] = is_automatic_reply
if is_encrypted is not None:
body['conditions']['is_encrypted'] = is_encrypted
if is_meeting_request is not None:
body['conditions']['is_meeting_request'] = is_meeting_request
if is_meeting_response is not None:
body['conditions']['is_meeting_response'] = is_meeting_response
if is_non_delivery_report is not None:
body['conditions']['is_non_delivery_report'] = is_non_delivery_report
if is_permission_controlled is not None:
body['conditions']['is_permission_controlled'] = is_permission_controlled
if is_read_receipt is not None:
body['conditions']['is_read_receipt'] = is_read_receipt
if is_signed is not None:
body['conditions']['is_signed'] = is_signed
if is_voicemail is not None:
body['conditions']['is_voicemail'] = is_voicemail
if microsoft_graph_message_action_flag_message_action_flag is not None:
body['conditions']['message_action_flag'] = microsoft_graph_message_action_flag_message_action_flag
if boolean_not_sent_to_me is not None:
body['conditions']['not_sent_to_me'] = boolean_not_sent_to_me
if microsoft_graph_message_rule_predicates_recipient_contains is not None:
body['conditions']['recipient_contains'] = microsoft_graph_message_rule_predicates_recipient_contains
if microsoft_graph_message_rule_predicates_sender_contains is not None:
body['conditions']['sender_contains'] = microsoft_graph_message_rule_predicates_sender_contains
if microsoft_graph_sensitivity is not None:
body['conditions']['sensitivity'] = microsoft_graph_sensitivity
if boolean_sent_cc_me is not None:
body['conditions']['sent_cc_me'] = boolean_sent_cc_me
if boolean_sent_only_to_me is not None:
body['conditions']['sent_only_to_me'] = boolean_sent_only_to_me
if microsoft_graph_message_rule_predicates_sent_to_addresses_sent_to_addresses is not None:
body['conditions']['sent_to_addresses'] = microsoft_graph_message_rule_predicates_sent_to_addresses_sent_to_addresses
if boolean_sent_to_me is not None:
body['conditions']['sent_to_me'] = boolean_sent_to_me
if boolean_sent_to_or_cc_me is not None:
body['conditions']['sent_to_or_cc_me'] = boolean_sent_to_or_cc_me
if microsoft_graph_message_rule_predicates_subject_contains is not None:
body['conditions']['subject_contains'] = microsoft_graph_message_rule_predicates_subject_contains
if microsoft_graph_size_range_within_size_range is not None:
body['conditions']['within_size_range'] = microsoft_graph_size_range_within_size_range
if len(body['conditions']) == 0:
del body['conditions']
body['onenote'] = {}
if microsoft_graph_entity_id is not None:
body['onenote']['id'] = microsoft_graph_entity_id
if notebooks is not None:
body['onenote']['notebooks'] = notebooks
if operations is not None:
body['onenote']['operations'] = operations
if pages is not None:
body['onenote']['pages'] = pages
if resources is not None:
body['onenote']['resources'] = resources
if section_groups is not None:
body['onenote']['section_groups'] = section_groups
if sections is not None:
body['onenote']['sections'] = sections
if len(body['onenote']) == 0:
del body['onenote']
body['settings'] = {}
if id1 is not None:
body['settings']['id'] = id1
if contribution_to_content_discovery_as_organization_disabled is not None:
body['settings']['contribution_to_content_discovery_as_organization_disabled'] = contribution_to_content_discovery_as_organization_disabled
if contribution_to_content_discovery_disabled is not None:
body['settings']['contribution_to_content_discovery_disabled'] = contribution_to_content_discovery_disabled
body['settings']['shift_preferences'] = {}
if id2 is not None:
body['settings']['shift_preferences']['id'] = id2
if microsoft_graph_change_tracked_entity_created_date_time_created_date_time is not None:
body['settings']['shift_preferences']['created_date_time'] = microsoft_graph_change_tracked_entity_created_date_time_created_date_time
if last_modified_date_time is not None:
body['settings']['shift_preferences']['last_modified_date_time'] = last_modified_date_time
body['settings']['shift_preferences']['last_modified_by'] = {}
if application is not None:
body['settings']['shift_preferences']['last_modified_by']['application'] = application
if device is not None:
body['settings']['shift_preferences']['last_modified_by']['device'] = device
if user is not None:
body['settings']['shift_preferences']['last_modified_by']['user'] = user
if len(body['settings']['shift_preferences']['last_modified_by']) == 0:
del body['settings']['shift_preferences']['last_modified_by']
if availability is not None:
body['settings']['shift_preferences']['availability'] = availability
if len(body['settings']['shift_preferences']) == 0:
del body['settings']['shift_preferences']
if len(body['settings']) == 0:
del body['settings']
body['insights'] = {}
if id3 is not None:
body['insights']['id'] = id3
if shared is not None:
body['insights']['shared'] = shared
if trending is not None:
body['insights']['trending'] = trending
if used is not None:
body['insights']['used'] = used
if len(body['insights']) == 0:
del body['insights']
body['planner'] = {}
if id4 is not None:
body['planner']['id'] = id4
if plans is not None:
body['planner']['plans'] = plans
if tasks is not None:
body['planner']['tasks'] = tasks
if len(body['planner']) == 0:
del body['planner']
body['outlook'] = {}
if id5 is not None:
body['outlook']['id'] = id5
if master_categories is not None:
body['outlook']['master_categories'] = master_categories
if len(body['outlook']) == 0:
del body['outlook']
body['inference_classification'] = {}
if id6 is not None:
body['inference_classification']['id'] = id6
if overrides is not None:
body['inference_classification']['overrides'] = overrides
if len(body['inference_classification']) == 0:
del body['inference_classification']
body['mailbox_settings'] = {}
if archive_folder is not None:
body['mailbox_settings']['archive_folder'] = archive_folder
if automatic_replies_setting is not None:
body['mailbox_settings']['automatic_replies_setting'] = automatic_replies_setting
if date_format is not None:
body['mailbox_settings']['date_format'] = date_format
if delegate_meeting_message_delivery_options is not None:
body['mailbox_settings']['delegate_meeting_message_delivery_options'] = delegate_meeting_message_delivery_options
if language is not None:
body['mailbox_settings']['language'] = language
if time_format is not None:
body['mailbox_settings']['time_format'] = time_format
if time_zone is not None:
body['mailbox_settings']['time_zone'] = time_zone
if working_hours is not None:
body['mailbox_settings']['working_hours'] = working_hours
if len(body['mailbox_settings']) == 0:
del body['mailbox_settings']
return client.update_user(user_id=user_id,
body=body)
def users_user_delete(client,
user_id,
if_match=None):
return client.delete_user(user_id=user_id,
if_match=if_match)
def users_user_create_extension(client,
user_id,
id_=None):
body = {}
if id_ is not None:
body['id'] = id_
return client.create_extensions(user_id=user_id,
body=body)
def users_user_create_license_detail(client,
user_id,
id_=None,
service_plans=None,
sku_id=None,
sku_part_number=None):
body = {}
if id_ is not None:
body['id'] = id_
if service_plans is not None:
body['service_plans'] = service_plans
if sku_id is not None:
body['sku_id'] = sku_id
if sku_part_number is not None:
body['sku_part_number'] = sku_part_number
return client.create_license_details(user_id=user_id,
body=body)
def users_user_create_photo(client,
user_id,
id_=None,
height=None,
width=None):
body = {}
if id_ is not None:
body['id'] = id_
if height is not None:
body['height'] = height
if width is not None:
body['width'] = width
return client.create_photos(user_id=user_id,
body=body)
def users_user_create_ref_created_object(client,
user_id,
body):
return client.create_ref_created_objects(user_id=user_id,
body=body)
def users_user_create_ref_direct_report(client,
user_id,
body):
return client.create_ref_direct_reports(user_id=user_id,
body=body)
def users_user_create_ref_member_of(client,
user_id,
body):
return client.create_ref_member_of(user_id=user_id,
body=body)
def users_user_create_ref_oauth2_permission_grant(client,
user_id,
body):
return client.create_ref_oauth2_permission_grants(user_id=user_id,
body=body)
def users_user_create_ref_owned_device(client,
user_id,
body):
return client.create_ref_owned_devices(user_id=user_id,
body=body)
def users_user_create_ref_owned_object(client,
user_id,
body):
return client.create_ref_owned_objects(user_id=user_id,
body=body)
def users_user_create_ref_registered_device(client,
user_id,
body):
return client.create_ref_registered_devices(user_id=user_id,
body=body)
def users_user_create_ref_transitive_member_of(client,
user_id,
body):
return client.create_ref_transitive_member_of(user_id=user_id,
body=body)
def users_user_delete_extension(client,
user_id,
extension_id,
if_match=None):
return client.delete_extensions(user_id=user_id,
extension_id=extension_id,
if_match=if_match)
def users_user_delete_license_detail(client,
user_id,
license_details_id,
if_match=None):
return client.delete_license_details(user_id=user_id,
license_details_id=license_details_id,
if_match=if_match)
def users_user_delete_outlook(client,
user_id,
if_match=None):
return client.delete_outlook(user_id=user_id,
if_match=if_match)
def users_user_delete_photo(client,
user_id,
profile_photo_id=None,
if_match=None):
if user_id is not None and profile_photo_id is not None:
return client.delete_photos(user_id=user_id,
profile_photo_id=profile_photo_id,
if_match=if_match)
return client.delete_photo(user_id=user_id,
if_match=if_match)
def users_user_delete_ref_manager(client,
user_id,
if_match=None):
return client.delete_ref_manager(user_id=user_id,
if_match=if_match)
def users_user_delete_setting(client,
user_id,
if_match=None):
return client.delete_settings(user_id=user_id,
if_match=if_match)
def users_user_list_created_object(client,
user_id,
orderby=None,
select=None,
expand=None):
return client.list_created_objects(user_id=user_id,
orderby=orderby,
select=select,
expand=expand)
def users_user_list_direct_report(client,
user_id,
orderby=None,
select=None,
expand=None):
return client.list_direct_reports(user_id=user_id,
orderby=orderby,
select=select,
expand=expand)
def users_user_list_extension(client,
user_id,
orderby=None,
select=None,
expand=None):
return client.list_extensions(user_id=user_id,
orderby=orderby,
select=select,
expand=expand)
def users_user_list_license_detail(client,
user_id,
orderby=None,
select=None,
expand=None):
return client.list_license_details(user_id=user_id,
orderby=orderby,
select=select,
expand=expand)
def users_user_list_member_of(client,
user_id,
orderby=None,
select=None,
expand=None):
return client.list_member_of(user_id=user_id,
orderby=orderby,
select=select,
expand=expand)
def users_user_list_oauth2_permission_grant(client,
user_id,
orderby=None,
select=None,
expand=None):
return client.list_oauth2_permission_grants(user_id=user_id,
orderby=orderby,
select=select,
expand=expand)
def users_user_list_owned_device(client,
user_id,
orderby=None,
select=None,
expand=None):
return client.list_owned_devices(user_id=user_id,
orderby=orderby,
select=select,
expand=expand)
def users_user_list_owned_object(client,
user_id,
orderby=None,
select=None,
expand=None):
return client.list_owned_objects(user_id=user_id,
orderby=orderby,
select=select,
expand=expand)
def users_user_list_photo(client,
user_id,
orderby=None,
select=None,
expand=None):
return client.list_photos(user_id=user_id,
orderby=orderby,
select=select,
expand=expand)
def users_user_list_ref_created_object(client,
user_id,
orderby=None):
return client.list_ref_created_objects(user_id=user_id,
orderby=orderby)
def users_user_list_ref_direct_report(client,
user_id,
orderby=None):
return client.list_ref_direct_reports(user_id=user_id,
orderby=orderby)
def users_user_list_ref_member_of(client,
user_id,
orderby=None):
return client.list_ref_member_of(user_id=user_id,
orderby=orderby)
def users_user_list_ref_oauth2_permission_grant(client,
user_id,
orderby=None):
return client.list_ref_oauth2_permission_grants(user_id=user_id,
orderby=orderby)
def users_user_list_ref_owned_device(client,
user_id,
orderby=None):
return client.list_ref_owned_devices(user_id=user_id,
orderby=orderby)
def users_user_list_ref_owned_object(client,
user_id,
orderby=None):
return client.list_ref_owned_objects(user_id=user_id,
orderby=orderby)
def users_user_list_ref_registered_device(client,
user_id,
orderby=None):
return client.list_ref_registered_devices(user_id=user_id,
orderby=orderby)
def users_user_list_ref_transitive_member_of(client,
user_id,
orderby=None):
return client.list_ref_transitive_member_of(user_id=user_id,
orderby=orderby)
def users_user_list_registered_device(client,
user_id,
orderby=None,
select=None,
expand=None):
return client.list_registered_devices(user_id=user_id,
orderby=orderby,
select=select,
expand=expand)
def users_user_list_transitive_member_of(client,
user_id,
orderby=None,
select=None,
expand=None):
return client.list_transitive_member_of(user_id=user_id,
orderby=orderby,
select=select,
expand=expand)
def users_user_set_ref_manager(client,
user_id,
body):
return client.set_ref_manager(user_id=user_id,
body=body)
def users_user_show_extension(client,
user_id,
extension_id,
select=None,
expand=None):
return client.get_extensions(user_id=user_id,
extension_id=extension_id,
select=select,
expand=expand)
def users_user_show_license_detail(client,
user_id,
license_details_id,
select=None,
expand=None):
return client.get_license_details(user_id=user_id,
license_details_id=license_details_id,
select=select,
expand=expand)
def users_user_show_manager(client,
user_id,
select=None,
expand=None):
return client.get_manager(user_id=user_id,
select=select,
expand=expand)
def users_user_show_outlook(client,
user_id,
select=None,
expand=None):
return client.get_outlook(user_id=user_id,
select=select,
expand=expand)
def users_user_show_photo(client,
user_id,
profile_photo_id=None,
select=None,
expand=None):
if user_id is not None and profile_photo_id is not None:
return client.get_photos(user_id=user_id,
profile_photo_id=profile_photo_id,
select=select,
expand=expand)
return client.get_photo(user_id=user_id,
select=select,
expand=expand)
def users_user_show_ref_manager(client,
user_id):
return client.get_ref_manager(user_id=user_id)
def users_user_show_setting(client,
user_id,
select=None,
expand=None):
return client.get_settings(user_id=user_id,
select=select,
expand=expand)
def users_user_update_extension(client,
user_id,
extension_id,
id_=None):
body = {}
if id_ is not None:
body['id'] = id_
return client.update_extensions(user_id=user_id,
extension_id=extension_id,
body=body)
def users_user_update_license_detail(client,
user_id,
license_details_id,
id_=None,
service_plans=None,
sku_id=None,
sku_part_number=None):
body = {}
if id_ is not None:
body['id'] = id_
if service_plans is not None:
body['service_plans'] = service_plans
if sku_id is not None:
body['sku_id'] = sku_id
if sku_part_number is not None:
body['sku_part_number'] = sku_part_number
return client.update_license_details(user_id=user_id,
license_details_id=license_details_id,
body=body)
def users_user_update_outlook(client,
user_id,
id_=None,
master_categories=None):
body = {}
if id_ is not None:
body['id'] = id_
if master_categories is not None:
body['master_categories'] = master_categories
return client.update_outlook(user_id=user_id,
body=body)
def users_user_update_photo(client,
user_id,
profile_photo_id=None,
id_=None,
height=None,
width=None):
body = {}
if id_ is not None:
body['id'] = id_
if height is not None:
body['height'] = height
if width is not None:
body['width'] = width
if user_id is not None and profile_photo_id is not None:
return client.update_photos(user_id=user_id,
profile_photo_id=profile_photo_id,
body=body)
return client.update_photo(user_id=user_id,
body=body)
def users_user_update_setting(client,
user_id,
id_=None,
contribution_to_content_discovery_as_organization_disabled=None,
contribution_to_content_discovery_disabled=None,
microsoft_graph_entity_id=None,
created_date_time=None,
last_modified_date_time=None,
application=None,
device=None,
user=None,
availability=None):
body = {}
if id_ is not None:
body['id'] = id_
if contribution_to_content_discovery_as_organization_disabled is not None:
body['contribution_to_content_discovery_as_organization_disabled'] = contribution_to_content_discovery_as_organization_disabled
if contribution_to_content_discovery_disabled is not None:
body['contribution_to_content_discovery_disabled'] = contribution_to_content_discovery_disabled
body['shift_preferences'] = {}
if microsoft_graph_entity_id is not None:
body['shift_preferences']['id'] = microsoft_graph_entity_id
if created_date_time is not None:
body['shift_preferences']['created_date_time'] = created_date_time
if last_modified_date_time is not None:
body['shift_preferences']['last_modified_date_time'] = last_modified_date_time
body['shift_preferences']['last_modified_by'] = {}
if application is not None:
body['shift_preferences']['last_modified_by']['application'] = application
if device is not None:
body['shift_preferences']['last_modified_by']['device'] = device
if user is not None:
body['shift_preferences']['last_modified_by']['user'] = user
if len(body['shift_preferences']['last_modified_by']) == 0:
del body['shift_preferences']['last_modified_by']
if availability is not None:
body['shift_preferences']['availability'] = availability
if len(body['shift_preferences']) == 0:
del body['shift_preferences']
return client.update_settings(user_id=user_id,
body=body)
def users_user_outlook_create_master_category(client,
user_id,
id_=None,
color=None,
display_name=None):
body = {}
if id_ is not None:
body['id'] = id_
if color is not None:
body['color'] = color
if display_name is not None:
body['display_name'] = display_name
return client.create_master_categories(user_id=user_id,
body=body)
def users_user_outlook_delete_master_category(client,
user_id,
outlook_category_id,
if_match=None):
return client.delete_master_categories(user_id=user_id,
outlook_category_id=outlook_category_id,
if_match=if_match)
def users_user_outlook_list_master_category(client,
user_id,
orderby=None,
select=None,
expand=None):
return client.list_master_categories(user_id=user_id,
orderby=orderby,
select=select,
expand=expand)
def users_user_outlook_show_master_category(client,
user_id,
outlook_category_id,
select=None,
expand=None):
return client.get_master_categories(user_id=user_id,
outlook_category_id=outlook_category_id,
select=select,
expand=expand)
def users_user_outlook_update_master_category(client,
user_id,
outlook_category_id,
id_=None,
color=None,
display_name=None):
body = {}
if id_ is not None:
body['id'] = id_
if color is not None:
body['color'] = color
if display_name is not None:
body['display_name'] = display_name
return client.update_master_categories(user_id=user_id,
outlook_category_id=outlook_category_id,
body=body)
def users_user_setting_delete_shift_preference(client,
user_id,
if_match=None):
return client.delete_shift_preferences(user_id=user_id,
if_match=if_match)
def users_user_setting_show_shift_preference(client,
user_id,
select=None,
expand=None):
return client.get_shift_preferences(user_id=user_id,
select=select,
expand=expand)
def users_user_setting_update_shift_preference(client,
user_id,
id_=None,
created_date_time=None,
last_modified_date_time=None,
application=None,
device=None,
user=None,
availability=None):
body = {}
if id_ is not None:
body['id'] = id_
if created_date_time is not None:
body['created_date_time'] = created_date_time
if last_modified_date_time is not None:
body['last_modified_date_time'] = last_modified_date_time
body['last_modified_by'] = {}
if application is not None:
body['last_modified_by']['application'] = application
if device is not None:
body['last_modified_by']['device'] = device
if user is not None:
body['last_modified_by']['user'] = user
if len(body['last_modified_by']) == 0:
del body['last_modified_by']
if availability is not None:
body['availability'] = availability
return client.update_shift_preferences(user_id=user_id,
body=body)
| 45.47869
| 147
| 0.580348
| 9,461
| 87,501
| 4.987528
| 0.03266
| 0.076631
| 0.084112
| 0.119842
| 0.982622
| 0.973213
| 0.963761
| 0.956598
| 0.938521
| 0.914489
| 0
| 0.001319
| 0.350145
| 87,501
| 1,923
| 148
| 45.50234
| 0.828518
| 0.005714
| 0
| 0.91443
| 0
| 0
| 0.11367
| 0.025842
| 0
| 0
| 0
| 0
| 0
| 1
| 0.034676
| false
| 0.010067
| 0.006711
| 0.026286
| 0.07774
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4febd304d82e50721c70b4bc213392eed9cad639
| 78,867
|
py
|
Python
|
savecode/threeyears/idownserver/config_outputstandard.py
|
Octoberr/swm0920
|
8f05a6b91fc205960edd57f9076facec04f49a1a
|
[
"Apache-2.0"
] | 2
|
2019-05-19T11:54:26.000Z
|
2019-05-19T12:03:49.000Z
|
savecode/threeyears/idownserver/config_outputstandard.py
|
Octoberr/swm0920
|
8f05a6b91fc205960edd57f9076facec04f49a1a
|
[
"Apache-2.0"
] | 1
|
2020-11-27T07:55:15.000Z
|
2020-11-27T07:55:15.000Z
|
savecode/threeyears/idownserver/config_outputstandard.py
|
Octoberr/swm0920
|
8f05a6b91fc205960edd57f9076facec04f49a1a
|
[
"Apache-2.0"
] | 2
|
2021-09-06T18:06:12.000Z
|
2021-12-31T07:44:43.000Z
|
"""配置输出标准"""
# -*- coding:utf-8 -*-
import os
from datacontract import EStandardDataType
from outputmanagement import (
ECrypto,
EDataName,
OutputDataConfig,
OutputFieldConfig,
OutputPlatformConfig,
OutputStandardConfig,
)
stdconfig = OutputStandardConfig(
platforms=[
OutputPlatformConfig(
"zplus",
enabled=True,
datas=[
OutputDataConfig(
datatype=EStandardDataType.Task,
suffix="idown_task",
dataname=EDataName.Guid,
fields=[
OutputFieldConfig(
# 控制端输出子任务到采集端时,不输出此字段
destfield="progress",
srcfield="progress",
isfiltered=True,
),
OutputFieldConfig(
destfield="platform",
srcfield="platform",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="taskid",
srcfield="taskid",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="parenttaskid",
srcfield="parenttaskid",
isrequired=False,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="batchid",
srcfield="batchid",
isrequired=False,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="tokenid",
srcfield="tokenid",
isrequired=False,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="parentbatchid",
srcfield="parentbatchid",
isrequired=False,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="tasktype",
srcfield="tasktype",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="tokentype",
srcfield="tokentype",
isrequired=False,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="apptype",
srcfield="apptype",
isrequired=False,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="forcedownload",
srcfield="forcedownload",
isrequired=False,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="casenode",
srcfield="casenode",
isrequired=False,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="input",
srcfield="input",
isrequired=False,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="preglobaltelcode",
srcfield="preglobaltelcode",
isrequired=False,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="preaccount",
srcfield="preaccount",
isrequired=False,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="globaltelcode",
srcfield="globaltelcode",
isrequired=False,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="phone",
srcfield="phone",
isrequired=False,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="account",
srcfield="account",
isrequired=False,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="password",
srcfield="password",
isrequired=False,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="url",
srcfield="url",
isrequired=False,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="host",
srcfield="host",
isrequired=False,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="cookie",
srcfield="cookie",
isrequired=False,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="source",
srcfield="source",
isrequired=False,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="cmdid",
srcfield="cmdid",
isrequired=False,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="cmd",
srcfield="cmd",
isrequired=False,
crypto=ECrypto.Base64,
dftval=None,
),
],
),
OutputDataConfig(
datatype=EStandardDataType.TaskBack,
suffix="idown_task_back",
dataname=EDataName.Guid,
fields=[
OutputFieldConfig(
destfield="taskid",
srcfield="taskid",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="state",
srcfield="state",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="batchcompletecount",
srcfield="batchcompletecount",
isrequired=False,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="recvmsg",
srcfield="recvmsg",
isrequired=False,
crypto=ECrypto.Base64,
dftval=None,
),
OutputFieldConfig(
destfield="progress",
srcfield="progress",
isrequired=False,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="time",
srcfield="time",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="result",
srcfield="result",
isrequired=False,
crypto=ECrypto.Base64,
dftval=None,
),
OutputFieldConfig(
destfield="sequence",
srcfield="sequence",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
],
),
OutputDataConfig(
datatype=EStandardDataType.TaskBatchBack,
suffix="idown_btask_back",
dataname=EDataName.Guid,
fields=[
OutputFieldConfig(
destfield="taskid",
srcfield="taskid",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="batchid",
srcfield="batchid",
isrequired=False,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="state",
srcfield="state",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="recvmsg",
srcfield="recvmsg",
isrequired=False,
crypto=ECrypto.Base64,
dftval=None,
),
OutputFieldConfig(
destfield="time",
srcfield="time",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="result",
srcfield="result",
isrequired=False,
crypto=ECrypto.Base64,
dftval=None,
),
OutputFieldConfig(
destfield="progress",
srcfield="progress",
isrequired=False,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="sequence",
srcfield="sequence",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
],
),
OutputDataConfig(
datatype=EStandardDataType.IDownCmd,
suffix="idown_cmd",
dataname=EDataName.Guid,
fields=[
OutputFieldConfig(
destfield="platform",
srcfield="platform",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="cmdid",
srcfield="cmdid",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="cmd",
srcfield="cmd",
isrequired=True,
crypto=ECrypto.Base64,
dftval=None,
),
OutputFieldConfig(
destfield="source",
srcfield="source",
isrequired=False,
crypto=ECrypto.Null,
dftval=None,
),
],
),
OutputDataConfig(
datatype=EStandardDataType.TaskCmdBack,
suffix="idown_cmd_back",
dataname=EDataName.Guid,
fields=[
OutputFieldConfig(
destfield="cmdid",
srcfield="cmdid",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="clientid",
srcfield="clientid",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="source",
srcfield="source",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="state",
srcfield="state",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="recvmsg",
srcfield="recvmsg",
isrequired=False,
crypto=ECrypto.Base64,
dftval=None,
),
OutputFieldConfig(
destfield="time",
srcfield="time",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="sequence",
srcfield="sequence",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="platform",
srcfield="platform",
isrequired=False,
crypto=ECrypto.Null,
dftval=None,
isfiltered=True,
),
],
),
OutputDataConfig(
datatype=EStandardDataType.IScanTask,
suffix="iscan_task",
dataname=EDataName.Guid,
fields=[
OutputFieldConfig(
destfield="periodnum",
srcfield="periodnum",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="createtime",
srcfield="createtime",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="taskid",
srcfield="taskid",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="source",
srcfield="source",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="scantype",
srcfield="scantype",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="cmdid",
srcfield="cmdid",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="cmd",
srcfield="cmd",
isrequired=True,
crypto=ECrypto.Base64,
dftval=None,
),
],
),
OutputDataConfig(
datatype=EStandardDataType.IscanTaskBack,
suffix="iscan_task_back",
dataname=EDataName.Guid,
fields=[
OutputFieldConfig(
destfield="periodnum",
srcfield="periodnum",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="taskid",
srcfield="taskid",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="state",
srcfield="state",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="recvmsg",
srcfield="recvmsg",
isrequired=False,
crypto=ECrypto.Base64,
dftval=None,
),
OutputFieldConfig(
destfield="time",
srcfield="time",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="sequence",
srcfield="sequence",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
],
),
OutputDataConfig(
datatype=EStandardDataType.IScoutTask,
suffix="iscout_task",
dataname=EDataName.Guid,
fields=[
OutputFieldConfig(
destfield="periodnum",
srcfield="periodnum",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="platform",
srcfield="platform",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="createtime",
srcfield="createtime",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="taskid",
srcfield="taskid",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="batchid",
srcfield="batchid",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="source",
srcfield="source",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="objecttype",
srcfield="objecttype",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="object",
srcfield="object",
isrequired=True,
crypto=ECrypto.Base64,
dftval=None,
),
OutputFieldConfig(
destfield="cmdid",
srcfield="cmdid",
isrequired=False,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="cmd",
srcfield="cmd",
isrequired=False,
crypto=ECrypto.Base64,
dftval=None,
),
],
),
OutputDataConfig(
datatype=EStandardDataType.IscoutTaskBack,
suffix="iscout_task_back",
dataname=EDataName.Guid,
fields=[
OutputFieldConfig(
destfield="periodnum",
srcfield="periodnum",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="taskid",
srcfield="taskid",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="state",
srcfield="state",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="progress",
srcfield="progress",
isrequired=False,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="elapsed",
srcfield="elapsed",
isrequired=False,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="recvmsg",
srcfield="recvmsg",
isrequired=False,
crypto=ECrypto.Base64,
dftval=None,
),
OutputFieldConfig(
destfield="time",
srcfield="time",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="sequence",
srcfield="sequence",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
],
),
OutputDataConfig(
datatype=EStandardDataType.IscoutBtaskBack,
suffix="iscout_btask_back",
dataname=EDataName.Guid,
fields=[
OutputFieldConfig(
destfield="periodnum",
srcfield="periodnum",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="taskid",
srcfield="taskid",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="batchid",
srcfield="batchid",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="state",
srcfield="state",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="progress",
srcfield="progress",
isrequired=False,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="elapsed",
srcfield="elapsed",
isrequired=False,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="recvmsg",
srcfield="recvmsg",
isrequired=False,
crypto=ECrypto.Base64,
dftval=None,
),
OutputFieldConfig(
destfield="time",
srcfield="time",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="sequence",
srcfield="sequence",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
],
),
OutputDataConfig(
datatype=EStandardDataType.Autotask,
suffix="automated_task",
dataname=EDataName.Guid,
fields=[
OutputFieldConfig(
destfield="platform",
srcfield="platform",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="source",
srcfield="source",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="taskid",
srcfield="taskid",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="batchid",
srcfield="batchid",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="autotasktype",
srcfield="autotasktype",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="createtime",
srcfield="createtime",
isrequired=False,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="cmdid",
srcfield="cmdid",
isrequired=False,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="cmd",
srcfield="cmd",
isrequired=False,
crypto=ECrypto.Base64,
dftval=None,
),
],
),
OutputDataConfig(
enable=False, # 暂不输出此回馈数据,不回传中心
datatype=EStandardDataType.Autotaskback,
suffix="automated_task_back",
dataname=EDataName.Guid,
fields=[
OutputFieldConfig(
destfield="taskid",
srcfield="taskid",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="periodnum",
srcfield="periodnum",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="state",
srcfield="state",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="recvmsg",
srcfield="recvmsg",
isrequired=False,
crypto=ECrypto.Base64,
dftval=None,
),
OutputFieldConfig(
destfield="progress",
srcfield="progress",
isrequired=False,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="endtime",
srcfield="endtime",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="source",
srcfield="source",
isrequired=False,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="sequence",
srcfield="sequence",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
],
),
OutputDataConfig(
enable=False, # 暂不输出此回馈数据,不回传中心
datatype=EStandardDataType.AutoBatchTaskBack,
suffix="automated_btask_back",
dataname=EDataName.Guid,
fields=[
OutputFieldConfig(
destfield="taskid",
srcfield="taskid",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="batchid",
srcfield="batchid",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="periodnum",
srcfield="periodnum",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="state",
srcfield="state",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="recvmsg",
srcfield="recvmsg",
isrequired=False,
crypto=ECrypto.Base64,
dftval=None,
),
OutputFieldConfig(
destfield="progress",
srcfield="progress",
isrequired=False,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="endtime",
srcfield="endtime",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="source",
srcfield="source",
isrequired=False,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="sequence",
srcfield="sequence",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
],
),
],
),
OutputPlatformConfig(
"zplan",
enabled=True,
datas=[
OutputDataConfig(
datatype=EStandardDataType.Task,
suffix="idown_task",
dataname=EDataName.Guid,
fields=[
OutputFieldConfig(
# 控制端输出子任务到采集端时,不输出此字段
destfield="progress",
srcfield="progress",
isfiltered=True,
),
OutputFieldConfig(
destfield="platform",
srcfield="platform",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="taskid",
srcfield="taskid",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="parenttaskid",
srcfield="parenttaskid",
isrequired=False,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="batchid",
srcfield="batchid",
isrequired=False,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="tokenid",
srcfield="tokenid",
isrequired=False,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="parentbatchid",
srcfield="parentbatchid",
isrequired=False,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="tasktype",
srcfield="tasktype",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="tokentype",
srcfield="tokentype",
isrequired=False,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="apptype",
srcfield="apptype",
isrequired=False,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="forcedownload",
srcfield="forcedownload",
isrequired=False,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="casenode",
srcfield="casenode",
isrequired=False,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="input",
srcfield="input",
isrequired=False,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="preglobaltelcode",
srcfield="preglobaltelcode",
isrequired=False,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="preaccount",
srcfield="preaccount",
isrequired=False,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="globaltelcode",
srcfield="globaltelcode",
isrequired=False,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="phone",
srcfield="phone",
isrequired=False,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="account",
srcfield="account",
isrequired=False,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="password",
srcfield="password",
isrequired=False,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="url",
srcfield="url",
isrequired=False,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="host",
srcfield="host",
isrequired=False,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="cookie",
srcfield="cookie",
isrequired=False,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="source",
srcfield="source",
isrequired=False,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="cmdid",
srcfield="cmdid",
isrequired=False,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="cmd",
srcfield="cmd",
isrequired=False,
crypto=ECrypto.Base64,
dftval=None,
),
],
),
OutputDataConfig(
datatype=EStandardDataType.TaskBack,
suffix="idown_task_back",
dataname=EDataName.Guid,
fields=[
OutputFieldConfig(
destfield="taskid",
srcfield="taskid",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="state",
srcfield="state",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="batchcompletecount",
srcfield="batchcompletecount",
isrequired=False,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="recvmsg",
srcfield="recvmsg",
isrequired=False,
crypto=ECrypto.Base64,
dftval=None,
),
OutputFieldConfig(
destfield="progress",
srcfield="progress",
isrequired=False,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="time",
srcfield="time",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="result",
srcfield="result",
isrequired=False,
crypto=ECrypto.Base64,
dftval=None,
),
OutputFieldConfig(
destfield="sequence",
srcfield="sequence",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
],
),
OutputDataConfig(
datatype=EStandardDataType.TaskBatchBack,
suffix="idown_btask_back",
dataname=EDataName.Guid,
fields=[
OutputFieldConfig(
destfield="taskid",
srcfield="taskid",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="batchid",
srcfield="batchid",
isrequired=False,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="state",
srcfield="state",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="recvmsg",
srcfield="recvmsg",
isrequired=False,
crypto=ECrypto.Base64,
dftval=None,
),
OutputFieldConfig(
destfield="time",
srcfield="time",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="result",
srcfield="result",
isrequired=False,
crypto=ECrypto.Base64,
dftval=None,
),
OutputFieldConfig(
destfield="progress",
srcfield="progress",
isrequired=False,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="sequence",
srcfield="sequence",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
],
),
OutputDataConfig(
datatype=EStandardDataType.IDownCmd,
suffix="idown_cmd",
dataname=EDataName.Guid,
fields=[
OutputFieldConfig(
destfield="platform",
srcfield="platform",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="cmdid",
srcfield="cmdid",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="cmd",
srcfield="cmd",
isrequired=True,
crypto=ECrypto.Base64,
dftval=None,
),
OutputFieldConfig(
destfield="source",
srcfield="source",
isrequired=False,
crypto=ECrypto.Null,
dftval=None,
),
],
),
OutputDataConfig(
datatype=EStandardDataType.TaskCmdBack,
suffix="idown_cmd_back",
dataname=EDataName.Guid,
fields=[
OutputFieldConfig(
destfield="cmdid",
srcfield="cmdid",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="clientid",
srcfield="clientid",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="source",
srcfield="source",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="state",
srcfield="state",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="recvmsg",
srcfield="recvmsg",
isrequired=False,
crypto=ECrypto.Base64,
dftval=None,
),
OutputFieldConfig(
destfield="time",
srcfield="time",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="sequence",
srcfield="sequence",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="platform",
srcfield="platform",
isrequired=False,
crypto=ECrypto.Null,
dftval=None,
isfiltered=True,
),
],
),
OutputDataConfig(
datatype=EStandardDataType.IScanTask,
suffix="iscan_task",
dataname=EDataName.Guid,
fields=[
OutputFieldConfig(
destfield="periodnum",
srcfield="periodnum",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="createtime",
srcfield="createtime",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="taskid",
srcfield="taskid",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="source",
srcfield="source",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="scantype",
srcfield="scantype",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="cmdid",
srcfield="cmdid",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="cmd",
srcfield="cmd",
isrequired=True,
crypto=ECrypto.Base64,
dftval=None,
),
],
),
OutputDataConfig(
datatype=EStandardDataType.IscanTaskBack,
suffix="iscan_task_back",
dataname=EDataName.Guid,
fields=[
OutputFieldConfig(
destfield="periodnum",
srcfield="periodnum",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="taskid",
srcfield="taskid",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="state",
srcfield="state",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="recvmsg",
srcfield="recvmsg",
isrequired=False,
crypto=ECrypto.Base64,
dftval=None,
),
OutputFieldConfig(
destfield="time",
srcfield="time",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="sequence",
srcfield="sequence",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
],
),
OutputDataConfig(
datatype=EStandardDataType.IScoutTask,
suffix="iscout_task",
dataname=EDataName.Guid,
fields=[
OutputFieldConfig(
destfield="periodnum",
srcfield="periodnum",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="platform",
srcfield="platform",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="createtime",
srcfield="createtime",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="taskid",
srcfield="taskid",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="batchid",
srcfield="batchid",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="source",
srcfield="source",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="objecttype",
srcfield="objecttype",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="object",
srcfield="object",
isrequired=True,
crypto=ECrypto.Base64,
dftval=None,
),
OutputFieldConfig(
destfield="cmdid",
srcfield="cmdid",
isrequired=False,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="cmd",
srcfield="cmd",
isrequired=False,
crypto=ECrypto.Base64,
dftval=None,
),
],
),
OutputDataConfig(
datatype=EStandardDataType.IscoutTaskBack,
suffix="iscout_task_back",
dataname=EDataName.Guid,
fields=[
OutputFieldConfig(
destfield="periodnum",
srcfield="periodnum",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="taskid",
srcfield="taskid",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="state",
srcfield="state",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="progress",
srcfield="progress",
isrequired=False,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="elapsed",
srcfield="elapsed",
isrequired=False,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="recvmsg",
srcfield="recvmsg",
isrequired=False,
crypto=ECrypto.Base64,
dftval=None,
),
OutputFieldConfig(
destfield="time",
srcfield="time",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="sequence",
srcfield="sequence",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
],
),
OutputDataConfig(
datatype=EStandardDataType.IscoutBtaskBack,
suffix="iscout_btask_back",
dataname=EDataName.Guid,
fields=[
OutputFieldConfig(
destfield="periodnum",
srcfield="periodnum",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="taskid",
srcfield="taskid",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="batchid",
srcfield="batchid",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="state",
srcfield="state",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="progress",
srcfield="progress",
isrequired=False,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="elapsed",
srcfield="elapsed",
isrequired=False,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="recvmsg",
srcfield="recvmsg",
isrequired=False,
crypto=ECrypto.Base64,
dftval=None,
),
OutputFieldConfig(
destfield="time",
srcfield="time",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="sequence",
srcfield="sequence",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
],
),
OutputDataConfig(
datatype=EStandardDataType.Autotask,
suffix="automated_task",
dataname=EDataName.Guid,
fields=[
OutputFieldConfig(
destfield="platform",
srcfield="platform",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="source",
srcfield="source",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="taskid",
srcfield="taskid",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="batchid",
srcfield="batchid",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="autotasktype",
srcfield="autotasktype",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="createtime",
srcfield="createtime",
isrequired=False,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="cmdid",
srcfield="cmdid",
isrequired=False,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="cmd",
srcfield="cmd",
isrequired=False,
crypto=ECrypto.Base64,
dftval=None,
),
],
),
OutputDataConfig(
enable=False, # 暂不输出此回馈数据,不回传中心
datatype=EStandardDataType.Autotaskback,
suffix="automated_task_back",
dataname=EDataName.Guid,
fields=[
OutputFieldConfig(
destfield="taskid",
srcfield="taskid",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="periodnum",
srcfield="periodnum",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="state",
srcfield="state",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="recvmsg",
srcfield="recvmsg",
isrequired=False,
crypto=ECrypto.Base64,
dftval=None,
),
OutputFieldConfig(
destfield="progress",
srcfield="progress",
isrequired=False,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="endtime",
srcfield="endtime",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="source",
srcfield="source",
isrequired=False,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="sequence",
srcfield="sequence",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
],
),
OutputDataConfig(
enable=False, # 暂不输出此回馈数据,不回传中心
datatype=EStandardDataType.AutoBatchTaskBack,
suffix="automated_btask_back",
dataname=EDataName.Guid,
fields=[
OutputFieldConfig(
destfield="taskid",
srcfield="taskid",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="batchid",
srcfield="batchid",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="periodnum",
srcfield="periodnum",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="state",
srcfield="state",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="recvmsg",
srcfield="recvmsg",
isrequired=False,
crypto=ECrypto.Base64,
dftval=None,
),
OutputFieldConfig(
destfield="progress",
srcfield="progress",
isrequired=False,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="endtime",
srcfield="endtime",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="source",
srcfield="source",
isrequired=False,
crypto=ECrypto.Null,
dftval=None,
),
OutputFieldConfig(
destfield="sequence",
srcfield="sequence",
isrequired=True,
crypto=ECrypto.Null,
dftval=None,
),
],
),
],
),
]
)
| 42.152325
| 65
| 0.306516
| 3,180
| 78,867
| 7.588679
| 0.032075
| 0.252113
| 0.23272
| 0.310293
| 0.988977
| 0.988977
| 0.988977
| 0.988977
| 0.988977
| 0.988977
| 0
| 0.002247
| 0.633167
| 78,867
| 1,870
| 66
| 42.174866
| 0.831876
| 0.001699
| 0
| 0.991407
| 0
| 0
| 0.047027
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.002148
| 0.001611
| 0
| 0.001611
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 11
|
8b08f69e6f0e5eb0e8ce4166041963dbce880767
| 395
|
py
|
Python
|
rcnn/lib/python3.6/site-packages/tensorflow/debugging/__init__.py
|
dreamingweaver/making_passportImage
|
68f23411780ff82abe934dfae5fc04acb80f2c49
|
[
"MIT"
] | 1
|
2019-01-12T13:17:32.000Z
|
2019-01-12T13:17:32.000Z
|
rcnn/lib/python3.6/site-packages/tensorflow/debugging/__init__.py
|
dreamingweaver/making_passportImage
|
68f23411780ff82abe934dfae5fc04acb80f2c49
|
[
"MIT"
] | null | null | null |
rcnn/lib/python3.6/site-packages/tensorflow/debugging/__init__.py
|
dreamingweaver/making_passportImage
|
68f23411780ff82abe934dfae5fc04acb80f2c49
|
[
"MIT"
] | null | null | null |
# This file is MACHINE GENERATED! Do not edit.
# Generated by: tensorflow/python/tools/api/generator/create_python_api.py script.
"""Public API for tf.debugging namespace.
"""
from __future__ import print_function
from tensorflow.python import check_numerics
from tensorflow.python import is_finite
from tensorflow.python import is_inf
from tensorflow.python import is_nan
del print_function
| 28.214286
| 82
| 0.825316
| 58
| 395
| 5.413793
| 0.568966
| 0.254777
| 0.254777
| 0.33121
| 0.267516
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.113924
| 395
| 13
| 83
| 30.384615
| 0.897143
| 0.417722
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.833333
| 0
| 0.833333
| 0.333333
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
8b1b406a21485113db8140abe1a1cad814f6187c
| 7,084
|
py
|
Python
|
tests/test_routes.py
|
trailrunnervolunteers/trv-website
|
cba66812d18caf3fa584219c6249302ea41b7cbc
|
[
"MIT"
] | null | null | null |
tests/test_routes.py
|
trailrunnervolunteers/trv-website
|
cba66812d18caf3fa584219c6249302ea41b7cbc
|
[
"MIT"
] | 10
|
2022-03-07T15:36:05.000Z
|
2022-03-22T19:34:40.000Z
|
tests/test_routes.py
|
trailrunnervolunteers/trv-website
|
cba66812d18caf3fa584219c6249302ea41b7cbc
|
[
"MIT"
] | 2
|
2022-03-06T01:11:33.000Z
|
2022-03-06T01:14:54.000Z
|
import pytest
@pytest.mark.parametrize("method", ("get", "put"))
def test_create_volunteer_wrong_method(client, method):
"""/volunteer only supports POST for creation"""
call = getattr(client, method)
response = call("/api/volunteer")
# Should return HTTP 405 "Method Not Allowed"
assert response.status_code == 405
def test_create_volunteer(client):
"""/volunteer only supports POST for creation"""
response = client.post("/api/volunteer")
assert response.status_code == 200
json = response.get_json()
assert json["volunteer_id"] == 0
def test_update_volunteer_wrong_method(client):
"""Update happens via PUT but try POST"""
response = client.post("/api/volunteer/1")
assert response.status_code == 405
def test_update_volunteer(client):
"""Update a volunteer"""
id = 123
response = client.put(f"/api/volunteer/{id}")
assert response.status_code == 200
json = response.get_json()
assert json["volunteer_id"] == id
@pytest.mark.parametrize("method", ("get", "put"))
def test_get_volunteers_wrong_method(client, method):
"""/volunteers only supports POST, try it with others"""
call = getattr(client, method)
response = call("/api/volunteers")
# Should return HTTP 405 "Method Not Allowed"
assert response.status_code == 405
def test_list_volunteers(client):
response = client.post("/api/volunteers")
assert response.status_code == 200
# There's no content yet except an empty list
assert "volunteers" in response.get_json()
def test_get_volunteer(client):
"""Get one volunteer"""
id = 123
response = client.get(f"/api/volunteer/{id}")
# TODO: test that a volunteer which doesn't exists returns 404
assert response.status_code == 200
json = response.get_json()
assert json["volunteer_id"] == id
@pytest.mark.parametrize("method", ("get", "put"))
def test_create_event_wrong_method(client, method):
"""/event only supports POST for creation"""
call = getattr(client, method)
response = call("/api/event")
# Should return HTTP 405 "Method Not Allowed"
assert response.status_code == 405
def test_create_event(client):
"""/event only supports POST for creation"""
response = client.post("/api/event")
assert response.status_code == 200
json = response.get_json()
assert json["event_id"] == 0
def test_update_event_wrong_method(client):
"""Update happens via PUT but try POST"""
response = client.post("/api/event/1")
assert response.status_code == 405
def test_update_event(client):
"""Update an event"""
id = 123
response = client.put(f"/api/event/{id}")
assert response.status_code == 200
json = response.get_json()
assert json["event_id"] == id
@pytest.mark.parametrize("method", ("get", "put"))
def test_get_event_wrong_method(client, method):
"""/events only supports POST, try it with others"""
call = getattr(client, method)
response = call("/api/event")
# Should return HTTP 405 "Method Not Allowed"
assert response.status_code == 405
def test_list_event(client):
response = client.post("/api/events")
assert response.status_code == 200
# There's no content yet except an empty list
assert "events" in response.get_json()
def test_get_event(client):
"""Get one event"""
id = 123
response = client.get(f"/api/event/{id}")
# TODO: test that an event which doesn't exists returns 404
assert response.status_code == 200
json = response.get_json()
assert json["event_id"] == id
def test_list_event_participants_post(client):
"""Listing participants only supports GET, no need to filter by POST"""
response = client.post(f"/api/event/789/participants")
assert response.status_code == 405
def test_list_event_participants(client):
event_id = 456
response = client.get(f"/api/event/{event_id}/participants")
assert response.status_code == 200
# There's no content yet except an empty list
assert "participants" in response.get_json()
def test_update_event_participant_post(client):
"""Updating participants happens via PUT"""
response = client.post(f"/api/event/789/participant/234")
assert response.status_code == 405
def test_update_event_participant(client):
response = client.put(f"/api/event/789/participant/234")
assert response.status_code == 200
json = response.get_json()
assert json["attended"] == True
def test_list_event_pictures_post(client):
"""Listing participants only supports GET, no need to filter by POST"""
response = client.post("/api/event/789/pictures")
assert response.status_code == 405
def test_list_event_pictures(client):
response = client.get("/api/event/345/pictures")
assert response.status_code == 200
# There's no content yet except an empty list
assert "pictures" in response.get_json()
@pytest.mark.parametrize("method", ("get", "put"))
def test_create_group_wrong_method(client, method):
"""/group only supports POST for creation"""
call = getattr(client, method)
response = call("/api/group")
# Should return HTTP 405 "Method Not Allowed"
assert response.status_code == 405
def test_create_group(client):
"""/group only supports POST for creation"""
response = client.post("/api/group")
assert response.status_code == 200
json = response.get_json()
assert json["group_id"] == 0
def test_update_group_wrong_method(client):
"""Update happens via PUT but try POST"""
response = client.post("/api/group/1")
assert response.status_code == 405
def test_update_group(client):
"""Update a group"""
id = 123
response = client.put(f"/api/group/{id}")
assert response.status_code == 200
json = response.get_json()
assert json["group_id"] == id
def test_list_groups(client):
response = client.get("/api/groups")
assert response.status_code == 200
json = response.get_json()
assert {"group_id": 0, "name": "TRV"} in json["groups"]
def test_get_group(client):
"""Get one volunteer"""
id = 123
response = client.get(f"/api/group/{id}")
# TODO: test that a volunteer which doesn't exists returns 404
assert response.status_code == 200
json = response.get_json()
assert json["group_id"] == id
assert json["name"] == "TRV"
def test_create_picture(client):
"""/picture only supports POST for creation"""
response = client.post("/api/event/123/picture")
assert response.status_code == 200
json = response.get_json()
assert json["picture_id"] == 0
def test_update_picture_wrong_method(client):
"""Update happens via PUT but try POST"""
response = client.post("/api/event/456/picture/1")
assert response.status_code == 405
def test_update_picture(client):
"""Update a group"""
id = 123
response = client.put(f"/api/event/345/picture/{id}")
assert response.status_code == 200
json = response.get_json()
assert json["picture_id"] == id
| 26.935361
| 75
| 0.685488
| 960
| 7,084
| 4.907292
| 0.092708
| 0.043091
| 0.123116
| 0.147739
| 0.863935
| 0.77457
| 0.749522
| 0.720654
| 0.713012
| 0.612184
| 0
| 0.030198
| 0.186618
| 7,084
| 262
| 76
| 27.038168
| 0.7874
| 0.195652
| 0
| 0.489051
| 0
| 0
| 0.133549
| 0.04308
| 0
| 0
| 0
| 0.003817
| 0.343066
| 1
| 0.211679
| false
| 0
| 0.007299
| 0
| 0.218978
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
50910a45a48be7a0e40e18d6eae9a6ed7f58c4ea
| 26,482
|
gyp
|
Python
|
syzygy/test_data/test_data.gyp
|
nzeh/syzygy
|
3573e3d458dbb4285753c28a7cb42ced739f9f55
|
[
"Apache-2.0"
] | 343
|
2015-01-07T05:58:44.000Z
|
2022-03-15T14:55:21.000Z
|
syzygy/test_data/test_data.gyp
|
nzeh/syzygy-nzeh
|
3757e53f850644721284073de318e218224dd411
|
[
"Apache-2.0"
] | 61
|
2015-03-19T18:20:21.000Z
|
2019-10-23T12:58:23.000Z
|
syzygy/test_data/test_data.gyp
|
nzeh/syzygy-nzeh
|
3757e53f850644721284073de318e218224dd411
|
[
"Apache-2.0"
] | 66
|
2015-01-20T15:35:05.000Z
|
2021-11-25T16:49:41.000Z
|
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
{
'variables': {
'chromium_code': 1,
},
'targets': [
{
'target_name': 'copy_test_dll',
'type': 'none',
'msvs_cygwin_shell': 0,
'sources': [
],
'dependencies': [
'<(src)/syzygy/pe/pe.gyp:test_dll',
],
'copies': [
{
'destination': '<(PRODUCT_DIR)/test_data',
'files': [
'<(PRODUCT_DIR)/test_dll.dll',
'<(PRODUCT_DIR)/test_dll.dll.pdb',
],
},
],
},
{
'target_name': 'copy_test_dll_compilands',
'type': 'none',
'msvs_cygwin_shell': 0,
'sources': [
],
'dependencies': [
'<(src)/syzygy/pe/pe.gyp:test_dll',
],
'copies': [
{
'destination': '<(PRODUCT_DIR)/test_data',
'files': [
# We rely on pe.gyp:test_dll producing these
# intermediate/auxiliary output files.
'<(PRODUCT_DIR)/obj/syzygy/pe/test_dll.gen/'
'test_dll_label_test_func.obj',
'<(PRODUCT_DIR)/export_dll.dll.lib',
'<(PRODUCT_DIR)/obj/syzygy/pe/test_dll_no_private_symbols.lib',
],
},
],
},
{
'target_name': 'call_trace_instrumented_test_dll',
'type': 'none',
'msvs_cygwin_shell': 0,
'sources': [
],
'dependencies': [
'<(src)/syzygy/instrument/instrument.gyp:instrument',
'copy_test_dll',
],
'actions': [
{
'action_name': 'call_trace_instrument_test_data_test_dll',
'inputs': [
'<(PRODUCT_DIR)/instrument.exe',
'<(PRODUCT_DIR)/test_data/test_dll.dll',
'<(PRODUCT_DIR)/test_data/test_dll.dll.pdb',
],
'outputs': [
'<(PRODUCT_DIR)/test_data/call_trace_instrumented_test_dll.dll',
'<(PRODUCT_DIR)/test_data/call_trace_instrumented_test_dll.dll.pdb',
],
'action': [
'<(PRODUCT_DIR)/instrument.exe',
'--mode=calltrace',
'--input-image=<(PRODUCT_DIR)/test_data/test_dll.dll',
'--input-pdb=<(PRODUCT_DIR)/test_data/test_dll.dll.pdb',
'--output-image='
'<(PRODUCT_DIR)/test_data/call_trace_instrumented_test_dll.dll',
'--output-pdb=<(PRODUCT_DIR)/test_data/'
'call_trace_instrumented_test_dll.dll.pdb',
'--overwrite',
],
},
],
},
{
'target_name': 'profile_instrumented_test_dll',
'type': 'none',
'msvs_cygwin_shell': 0,
'sources': [
],
'dependencies': [
'<(src)/syzygy/instrument/instrument.gyp:instrument',
'copy_test_dll',
],
'actions': [
{
'action_name': 'profile_instrument_test_data_test_dll',
'inputs': [
'<(PRODUCT_DIR)/instrument.exe',
'<(PRODUCT_DIR)/test_data/test_dll.dll',
'<(PRODUCT_DIR)/test_data/test_dll.dll.pdb',
],
'outputs': [
'<(PRODUCT_DIR)/test_data/profile_instrumented_test_dll.dll',
'<(PRODUCT_DIR)/test_data/profile_instrumented_test_dll.dll.pdb',
],
'action': [
'<(PRODUCT_DIR)/instrument.exe',
'--mode=profile',
'--input-image=<(PRODUCT_DIR)/test_data/test_dll.dll',
'--input-pdb=<(PRODUCT_DIR)/test_data/test_dll.dll.pdb',
'--output-image=<(PRODUCT_DIR)/test_data/'
'profile_instrumented_test_dll.dll',
'--output-pdb=<(PRODUCT_DIR)/test_data/'
'profile_instrumented_test_dll.dll.pdb',
'--overwrite',
],
},
],
},
{
'target_name': 'basic_block_entry_instrumented_test_dll',
'type': 'none',
'msvs_cygwin_shell': 0,
'sources': [
],
'dependencies': [
'<(src)/syzygy/instrument/instrument.gyp:instrument',
'copy_test_dll',
],
'actions': [
{
'action_name': 'basic_block_entry_instrument_test_data_test_dll',
'inputs': [
'<(PRODUCT_DIR)/instrument.exe',
'<(PRODUCT_DIR)/test_data/test_dll.dll',
'<(PRODUCT_DIR)/test_data/test_dll.dll.pdb',
],
'outputs': [
'<(PRODUCT_DIR)/test_data/'
'basic_block_entry_instrumented_test_dll.dll',
'<(PRODUCT_DIR)/test_data/'
'basic_block_entry_instrumented_test_dll.dll.pdb',
],
'action': [
'<(PRODUCT_DIR)/instrument.exe',
'--mode=bbentry',
'--input-image=<(PRODUCT_DIR)/test_data/test_dll.dll',
'--input-pdb=<(PRODUCT_DIR)/test_data/test_dll.dll.pdb',
'--output-image=<(PRODUCT_DIR)/test_data/'
'basic_block_entry_instrumented_test_dll.dll',
'--output-pdb=<(PRODUCT_DIR)/test_data/'
'basic_block_entry_instrumented_test_dll.dll.pdb',
'--overwrite',
],
},
],
},
{
'target_name': 'branch_instrumented_test_dll',
'type': 'none',
'msvs_cygwin_shell': 0,
'sources': [
],
'dependencies': [
'<(src)/syzygy/instrument/instrument.gyp:instrument',
'copy_test_dll',
],
'actions': [
{
'action_name': 'branch_instrument_test_data_test_dll',
'inputs': [
'<(PRODUCT_DIR)/instrument.exe',
'<(PRODUCT_DIR)/test_data/test_dll.dll',
'<(PRODUCT_DIR)/test_data/test_dll.dll.pdb',
],
'outputs': [
'<(PRODUCT_DIR)/test_data/branch_instrumented_test_dll.dll',
'<(PRODUCT_DIR)/test_data/branch_instrumented_test_dll.dll.pdb',
],
'action': [
'<(PRODUCT_DIR)/instrument.exe',
'--mode=branch',
'--input-image=<(PRODUCT_DIR)/test_data/test_dll.dll',
'--input-pdb=<(PRODUCT_DIR)/test_data/test_dll.dll.pdb',
'--output-image=<(PRODUCT_DIR)/test_data/'
'branch_instrumented_test_dll.dll',
'--output-pdb=<(PRODUCT_DIR)/test_data/'
'branch_instrumented_test_dll.dll.pdb',
'--overwrite',
],
},
],
},
{
'target_name': 'coverage_instrumented_test_dll',
'type': 'none',
'msvs_cygwin_shell': 0,
'sources': [
],
'dependencies': [
'<(src)/syzygy/instrument/instrument.gyp:instrument',
'copy_test_dll',
],
'actions': [
{
'action_name': 'coverage_instrument_test_data_test_dll',
'inputs': [
'<(PRODUCT_DIR)/instrument.exe',
'<(PRODUCT_DIR)/test_data/test_dll.dll',
'<(PRODUCT_DIR)/test_data/test_dll.dll.pdb',
],
'outputs': [
'<(PRODUCT_DIR)/test_data/coverage_instrumented_test_dll.dll',
'<(PRODUCT_DIR)/test_data/coverage_instrumented_test_dll.dll.pdb',
],
'action': [
'<(PRODUCT_DIR)/instrument.exe',
'--mode=coverage',
'--input-image=<(PRODUCT_DIR)/test_data/test_dll.dll',
'--input-pdb=<(PRODUCT_DIR)/test_data/test_dll.dll.pdb',
'--output-image=<(PRODUCT_DIR)/test_data/'
'coverage_instrumented_test_dll.dll',
'--output-pdb=<(PRODUCT_DIR)/test_data/'
'coverage_instrumented_test_dll.dll.pdb',
'--overwrite',
],
},
],
},
{
'target_name': 'asan_instrumented_test_dll',
'type': 'none',
'msvs_cygwin_shell': 0,
'sources': [
],
'dependencies': [
'<(src)/syzygy/instrument/instrument.gyp:instrument',
'copy_test_dll',
],
'actions': [
{
'action_name': 'asan_instrument_test_data_test_dll',
'inputs': [
'<(PRODUCT_DIR)/instrument.exe',
'<(PRODUCT_DIR)/test_data/test_dll.dll',
'<(PRODUCT_DIR)/test_data/test_dll.dll.pdb',
],
'outputs': [
'<(PRODUCT_DIR)/test_data/asan_instrumented_test_dll.dll',
'<(PRODUCT_DIR)/test_data/asan_instrumented_test_dll.dll.pdb',
],
'action': [
'<(PRODUCT_DIR)/instrument.exe',
'--mode=asan',
'--input-image=<(PRODUCT_DIR)/test_data/test_dll.dll',
'--input-pdb=<(PRODUCT_DIR)/test_data/test_dll.dll.pdb',
'--output-image=<(PRODUCT_DIR)/test_data/'
'asan_instrumented_test_dll.dll',
'--output-pdb=<(PRODUCT_DIR)/test_data/'
'asan_instrumented_test_dll.dll.pdb',
'--overwrite',
],
},
],
},
{
'target_name': 'randomized_test_dll',
'type': 'none',
'msvs_cygwin_shell': 0,
'sources': [
],
'dependencies': [
'<(src)/syzygy/relink/relink.gyp:relink',
'copy_test_dll'
],
'actions': [
{
'action_name': 'randomize_test_data_test_dll',
'inputs': [
'<(PRODUCT_DIR)/relink.exe',
'<(PRODUCT_DIR)/test_data/test_dll.dll',
'<(PRODUCT_DIR)/test_data/test_dll.dll.pdb',
],
'outputs': [
'<(PRODUCT_DIR)/test_data/randomized_test_dll.dll',
'<(PRODUCT_DIR)/test_data/randomized_test_dll.dll.pdb',
],
'action': [
'<(PRODUCT_DIR)/relink.exe',
'--seed=0',
'--input-image=<(PRODUCT_DIR)/test_data/test_dll.dll',
'--input-pdb=<(PRODUCT_DIR)/test_data/test_dll.dll.pdb',
'--output-image=<(PRODUCT_DIR)/test_data/randomized_test_dll.dll',
'--output-pdb=<(PRODUCT_DIR)/test_data/randomized_test_dll.dll.pdb',
'--overwrite',
],
},
],
},
{
'target_name': 'signed_test_dll',
'type': 'none',
'msvs_cygwin_shell': 0,
'sources': [
],
'dependencies': [
'copy_test_dll',
],
'actions': [
{
'action_name': 'sign_test_dll',
'inputs': [
'<(src)/syzygy/test_data/syzygy.pfx',
'<(src)/syzygy/test_data/sign_image.bat',
'<(PRODUCT_DIR)/test_data/test_dll.dll',
],
'outputs': [
'<(PRODUCT_DIR)/test_data/signed_test_dll.dll',
],
'action': [
'<(src)/syzygy/test_data/sign_image.bat',
# This tool requires Windows-style paths, hence the use of
# backslashes.
'<(PRODUCT_DIR)\\test_data\\test_dll.dll',
'<(PRODUCT_DIR)\\test_data\\signed_test_dll.dll',
],
},
],
},
{
'target_name': 'memprof_instrumented_memprof_harness',
'type': 'none',
'msvs_cygwin_shell': 0,
'sources': [
],
'dependencies': [
'<(src)/syzygy/instrument/instrument.gyp:instrument',
'<(src)/syzygy/agent/memprof/memprof.gyp:memprof_harness',
],
'actions': [
{
'action_name': 'memprof_instrument_memprof_harness',
'inputs': [
'<(PRODUCT_DIR)/instrument.exe',
'<(PRODUCT_DIR)/memprof_harness.exe',
'<(PRODUCT_DIR)/memprof_harness.exe.pdb',
],
'outputs': [
'<(PRODUCT_DIR)/test_data/'
'memprof_instrumented_memprof_harness.exe',
'<(PRODUCT_DIR)/test_data/'
'memprof_instrumented_memprof_harness.exe.pdb',
],
'action': [
'<(PRODUCT_DIR)/instrument.exe',
'--mode=asan',
'--agent=memprof.dll',
'--input-image=<(PRODUCT_DIR)/memprof_harness.exe',
'--input-pdb=<(PRODUCT_DIR)/memprof_harness.exe.pdb',
'--output-image=<(PRODUCT_DIR)/test_data/'
'memprof_instrumented_memprof_harness.exe',
'--output-pdb=<(PRODUCT_DIR)/test_data/'
'memprof_instrumented_memprof_harness.exe.pdb',
'--overwrite',
],
},
],
},
# TODO(rogerm): The GYP snippets to generate the trace files are all
# pretty much identical to one other if parameterized by the mode,
# dll/pdb name, and output directory. Find a way to consolidate to
# a reusable rule or gypi.
{
'target_name': 'call_trace_traces',
'type': 'none',
'msvs_cygwin_shell': 0,
'sources': [
'generate_traces.py',
],
'dependencies': [
'<(src)/syzygy/agent/call_trace/call_trace.gyp:call_trace_client',
'<(src)/syzygy/trace/service/service.gyp:call_trace_service_exe',
'call_trace_instrumented_test_dll',
],
'actions': [
{
'action_name': 'generate_call_trace_traces',
'inputs': [
'<(PRODUCT_DIR)/call_trace_client.dll',
'<(PRODUCT_DIR)/call_trace_service.exe',
'<(PRODUCT_DIR)/test_data/call_trace_instrumented_test_dll.dll',
'<(PRODUCT_DIR)/test_data/call_trace_instrumented_test_dll.dll.pdb',
'<(src)/syzygy/test_data/generate_traces.py',
],
'outputs': [
'<(PRODUCT_DIR)/test_data/call_trace_traces/trace-1.bin',
'<(PRODUCT_DIR)/test_data/call_trace_traces/trace-2.bin',
'<(PRODUCT_DIR)/test_data/call_trace_traces/trace-3.bin',
'<(PRODUCT_DIR)/test_data/call_trace_traces/trace-4.bin',
],
'action': [
'<(python_exe)',
'<(src)/syzygy/test_data/generate_traces.py',
'--output-dir=<(PRODUCT_DIR)/test_data/call_trace_traces',
'--instrumented-image='
'<(PRODUCT_DIR)/test_data/call_trace_instrumented_test_dll.dll',
'--verbose',
# The build-dir arg must be last to work around a bug in the
# interaction between GYP and VS2010.
# See: http://code.google.com/p/gyp/issues/detail?id=272
'--build-dir=<(PRODUCT_DIR)',
],
},
],
},
{
'target_name': 'profile_traces',
'type': 'none',
'msvs_cygwin_shell': 0,
'sources': [
'generate_traces.py',
],
'dependencies': [
'<(src)/syzygy/agent/profiler/profiler.gyp:profile_client',
'<(src)/syzygy/trace/service/service.gyp:call_trace_service_exe',
'profile_instrumented_test_dll',
],
'actions': [
{
'action_name': 'generate_profile_traces',
'inputs': [
'<(PRODUCT_DIR)/profile_client.dll',
'<(PRODUCT_DIR)/call_trace_service.exe',
'<(PRODUCT_DIR)/test_data/profile_instrumented_test_dll.dll',
'<(PRODUCT_DIR)/test_data/profile_instrumented_test_dll.dll.pdb',
'<(src)/syzygy/test_data/generate_traces.py',
],
'outputs': [
'<(PRODUCT_DIR)/test_data/profile_traces/trace-1.bin',
'<(PRODUCT_DIR)/test_data/profile_traces/trace-2.bin',
'<(PRODUCT_DIR)/test_data/profile_traces/trace-3.bin',
'<(PRODUCT_DIR)/test_data/profile_traces/trace-4.bin',
],
'action': [
'<(python_exe)',
'<(src)/syzygy/test_data/generate_traces.py',
'--output-dir=<(PRODUCT_DIR)/test_data/profile_traces',
'--instrumented-image='
'<(PRODUCT_DIR)/test_data/profile_instrumented_test_dll.dll',
'--verbose',
# The build-dir arg must be last to work around a bug in the
# interaction between GYP and VS2010.
# See: http://code.google.com/p/gyp/issues/detail?id=272
'--build-dir=<(PRODUCT_DIR)',
],
},
],
},
{
'target_name': 'test_dll_order_json',
'type': 'none',
'msvs_cygwin_shell': 0,
'dependencies': [
'call_trace_traces',
'call_trace_instrumented_test_dll',
'<(src)/syzygy/reorder/reorder.gyp:reorder',
],
'actions': [
{
'action_name': 'generate_test_dll_order_file',
'inputs': [
'<(PRODUCT_DIR)/reorder.exe',
'<(PRODUCT_DIR)/test_data/call_trace_instrumented_test_dll.dll',
'<(PRODUCT_DIR)/test_data/call_trace_instrumented_test_dll.dll.pdb',
'<(PRODUCT_DIR)/test_data/call_trace_traces/trace-1.bin',
'<(PRODUCT_DIR)/test_data/call_trace_traces/trace-2.bin',
'<(PRODUCT_DIR)/test_data/call_trace_traces/trace-3.bin',
'<(PRODUCT_DIR)/test_data/call_trace_traces/trace-4.bin',
],
'outputs': [
'<(PRODUCT_DIR)/test_data/test_dll_order.json',
],
'action': [
'<(PRODUCT_DIR)/reorder.exe',
'--instrumented-image='
'<(PRODUCT_DIR)/test_data/call_trace_instrumented_test_dll.dll',
'--output-file=<(PRODUCT_DIR)/test_data/test_dll_order.json',
'<(PRODUCT_DIR)/test_data/call_trace_traces/trace-1.bin',
'<(PRODUCT_DIR)/test_data/call_trace_traces/trace-2.bin',
'<(PRODUCT_DIR)/test_data/call_trace_traces/trace-3.bin',
'<(PRODUCT_DIR)/test_data/call_trace_traces/trace-4.bin',
],
}
],
},
{
'target_name': 'coverage_traces',
'type': 'none',
'msvs_cygwin_shell': 0,
'sources': [
'generate_traces.py',
],
'dependencies': [
'<(src)/syzygy/agent/coverage/coverage.gyp:coverage_client',
'<(src)/syzygy/trace/service/service.gyp:call_trace_service_exe',
'coverage_instrumented_test_dll',
],
'actions': [
{
'action_name': 'generate_coverage_traces',
'inputs': [
'<(PRODUCT_DIR)/coverage_client.dll',
'<(PRODUCT_DIR)/call_trace_service.exe',
'<(PRODUCT_DIR)/test_data/coverage_instrumented_test_dll.dll',
'<(PRODUCT_DIR)/test_data/coverage_instrumented_test_dll.dll.pdb',
'<(src)/syzygy/test_data/generate_traces.py',
],
'outputs': [
'<(PRODUCT_DIR)/test_data/coverage_traces/trace-1.bin',
'<(PRODUCT_DIR)/test_data/coverage_traces/trace-2.bin',
'<(PRODUCT_DIR)/test_data/coverage_traces/trace-3.bin',
'<(PRODUCT_DIR)/test_data/coverage_traces/trace-4.bin',
],
'action': [
'<(python_exe)',
'<(src)/syzygy/test_data/generate_traces.py',
'--output-dir=<(PRODUCT_DIR)/test_data/coverage_traces',
'--instrumented-image='
'<(PRODUCT_DIR)/test_data/coverage_instrumented_test_dll.dll',
'--verbose',
# The build-dir arg must be last to work around a bug in the
# interaction between GYP and VS2010.
# See: http://code.google.com/p/gyp/issues/detail?id=272
'--build-dir=<(PRODUCT_DIR)',
],
},
],
},
{
'target_name': 'basic_block_entry_traces',
'type': 'none',
'msvs_cygwin_shell': 0,
'sources': [
'generate_traces.py',
],
'dependencies': [
'<(src)/syzygy/agent/basic_block_entry/basic_block_entry.gyp:'
'basic_block_entry_client',
'<(src)/syzygy/trace/service/service.gyp:call_trace_service_exe',
'basic_block_entry_instrumented_test_dll',
],
'actions': [
{
'action_name': 'generate_basic_block_entry_traces',
'inputs': [
'<(PRODUCT_DIR)/basic_block_entry_client.dll',
'<(PRODUCT_DIR)/call_trace_service.exe',
'<(PRODUCT_DIR)/test_data/'
'basic_block_entry_instrumented_test_dll.dll',
'<(PRODUCT_DIR)/test_data/'
'basic_block_entry_instrumented_test_dll.dll.pdb',
'<(src)/syzygy/test_data/generate_traces.py',
],
'outputs': [
'<(PRODUCT_DIR)/test_data/basic_block_entry_traces/trace-1.bin',
'<(PRODUCT_DIR)/test_data/basic_block_entry_traces/trace-2.bin',
'<(PRODUCT_DIR)/test_data/basic_block_entry_traces/trace-3.bin',
'<(PRODUCT_DIR)/test_data/basic_block_entry_traces/trace-4.bin',
],
'action': [
'<(python_exe)',
'<(src)/syzygy/test_data/generate_traces.py',
'--output-dir=<(PRODUCT_DIR)/test_data/basic_block_entry_traces',
'--instrumented-image=<(PRODUCT_DIR)/test_data/'
'basic_block_entry_instrumented_test_dll.dll',
'--verbose',
# The build-dir arg must be last to work around a bug in the
# interaction between GYP and VS2010.
# See: http://code.google.com/p/gyp/issues/detail?id=272
'--build-dir=<(PRODUCT_DIR)',
],
},
],
},
{
'target_name': 'basic_block_entry_counts',
'type': 'none',
'msvs_cygwin_shell': 0,
'sources': [
],
'dependencies': [
'basic_block_entry_traces',
'<(src)/syzygy/grinder/grinder.gyp:grinder',
],
'actions': [
{
'action_name': 'generate_basic_block_entry_counts',
'inputs': [
'<(PRODUCT_DIR)/grinder.exe',
'<(PRODUCT_DIR)/test_data/basic_block_entry_traces/trace-1.bin',
'<(PRODUCT_DIR)/test_data/basic_block_entry_traces/trace-2.bin',
'<(PRODUCT_DIR)/test_data/basic_block_entry_traces/trace-3.bin',
'<(PRODUCT_DIR)/test_data/basic_block_entry_traces/trace-4.bin',
],
'outputs': [
'<(PRODUCT_DIR)/test_data/basic_block_entry_traces/'
'entry_counts.json',
],
'action': [
'<(PRODUCT_DIR)/grinder.exe',
'--mode=bbentry',
'--output-file=<(PRODUCT_DIR)/test_data/basic_block_entry_traces/'
'entry_counts.json',
'<(PRODUCT_DIR)/test_data/basic_block_entry_traces/trace-1.bin',
'<(PRODUCT_DIR)/test_data/basic_block_entry_traces/trace-2.bin',
'<(PRODUCT_DIR)/test_data/basic_block_entry_traces/trace-3.bin',
'<(PRODUCT_DIR)/test_data/basic_block_entry_traces/trace-4.bin',
],
},
],
},
{
'target_name': 'branch_traces',
'type': 'none',
'msvs_cygwin_shell': 0,
'sources': [
'generate_traces.py',
],
'dependencies': [
'<(src)/syzygy/agent/basic_block_entry/basic_block_entry.gyp:'
'basic_block_entry_client',
'<(src)/syzygy/trace/service/service.gyp:call_trace_service_exe',
'branch_instrumented_test_dll',
],
'actions': [
{
'action_name': 'generate_branch_traces',
'inputs': [
'<(PRODUCT_DIR)/basic_block_entry_client.dll',
'<(PRODUCT_DIR)/call_trace_service.exe',
'<(PRODUCT_DIR)/test_data/'
'branch_instrumented_test_dll.dll',
'<(PRODUCT_DIR)/test_data/'
'branch_instrumented_test_dll.dll.pdb',
'<(src)/syzygy/test_data/generate_traces.py',
],
'outputs': [
'<(PRODUCT_DIR)/test_data/branch_traces/trace-1.bin',
'<(PRODUCT_DIR)/test_data/branch_traces/trace-2.bin',
'<(PRODUCT_DIR)/test_data/branch_traces/trace-3.bin',
'<(PRODUCT_DIR)/test_data/branch_traces/trace-4.bin',
],
'action': [
'<(python_exe)',
'<(src)/syzygy/test_data/generate_traces.py',
'--output-dir=<(PRODUCT_DIR)/test_data/branch_traces',
'--instrumented-image=<(PRODUCT_DIR)/test_data/'
'branch_instrumented_test_dll.dll',
'--verbose',
# The build-dir arg must be last to work around a bug in the
# interaction between GYP and VS2010.
# See: http://code.google.com/p/gyp/issues/detail?id=272
'--build-dir=<(PRODUCT_DIR)',
],
},
],
},
{
'target_name': 'memprof_traces',
'type': 'none',
'msvs_cygwin_shell': 0,
'sources': [
'generate_traces.py',
],
'dependencies': [
'<(src)/syzygy/agent/memprof/memprof.gyp:memprof',
'<(src)/syzygy/trace/service/service.gyp:call_trace_service_exe',
'memprof_instrumented_memprof_harness',
],
'actions': [
{
'action_name': 'generate_memprof_traces',
'inputs': [
'<(PRODUCT_DIR)/memprof.dll',
'<(PRODUCT_DIR)/call_trace_service.exe',
'<(PRODUCT_DIR)/test_data/'
'memprof_instrumented_memprof_harness.exe',
'<(PRODUCT_DIR)/test_data/'
'memprof_instrumented_memprof_harness.exe.pdb',
'<(src)/syzygy/test_data/generate_traces.py',
],
'outputs': [
'<(PRODUCT_DIR)/test_data/memprof_traces/trace-1.bin',
],
'action': [
'<(python_exe)',
'<(src)/syzygy/test_data/generate_traces.py',
'--env=SYZYGY_MEMPROF_OPTIONS=--stack-trace-tracking '
'--serialize-timestamps',
'--instrumented-image=<(PRODUCT_DIR)/test_data/'
'memprof_instrumented_memprof_harness.exe',
'--iterations=1',
'--output-dir=<(PRODUCT_DIR)/test_data/memprof_traces',
'--verbose',
# The build-dir arg must be last to work around a bug in the
# interaction between GYP and VS2010.
# See: http://code.google.com/p/gyp/issues/detail?id=272
'--build-dir=<(PRODUCT_DIR)',
],
},
],
},
],
}
| 36.326475
| 80
| 0.54977
| 2,781
| 26,482
| 4.883136
| 0.074793
| 0.133284
| 0.140206
| 0.177614
| 0.858542
| 0.844845
| 0.819514
| 0.768336
| 0.715906
| 0.703903
| 0
| 0.005862
| 0.297863
| 26,482
| 728
| 81
| 36.376374
| 0.724481
| 0.07001
| 0
| 0.709302
| 0
| 0
| 0.604441
| 0.501139
| 0
| 0
| 0
| 0.001374
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 11
|
0fed1a6bce9a3625a44ad4322d1c590a3e2234bd
| 140
|
py
|
Python
|
restler/authentication_settings.py
|
SmartSleepIoT/SmartSleepCoding
|
21c19489f0c477cbfbabd3a1d232f526f84a9e49
|
[
"BSD-3-Clause"
] | null | null | null |
restler/authentication_settings.py
|
SmartSleepIoT/SmartSleepCoding
|
21c19489f0c477cbfbabd3a1d232f526f84a9e49
|
[
"BSD-3-Clause"
] | 41
|
2021-10-20T17:54:59.000Z
|
2022-02-02T20:43:53.000Z
|
restler/authentication_settings.py
|
SmartSleepIoT/SmartSleepCoding
|
21c19489f0c477cbfbabd3a1d232f526f84a9e49
|
[
"BSD-3-Clause"
] | null | null | null |
print("{'user1':{}, 'user2':{}}")
print("Authorization: Bearer valid_unit_test_token")
print("Authorization: Bearer shadow_unit_test_token")
| 46.666667
| 53
| 0.757143
| 17
| 140
| 5.882353
| 0.588235
| 0.36
| 0.48
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.015038
| 0.05
| 140
| 3
| 53
| 46.666667
| 0.736842
| 0
| 0
| 0
| 0
| 0
| 0.787234
| 0.304965
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 7
|
0ffe48ded8a28d4e99f7a8806ac5c3f9ab001d0b
| 21,365
|
py
|
Python
|
usersec/migrations/0003_link_version_objects_to_nonversion_objects.py
|
bihealth/hpc-access
|
ff606b18b18230af2876a791ca706d3b24addb59
|
[
"MIT"
] | null | null | null |
usersec/migrations/0003_link_version_objects_to_nonversion_objects.py
|
bihealth/hpc-access
|
ff606b18b18230af2876a791ca706d3b24addb59
|
[
"MIT"
] | 27
|
2022-02-11T15:51:24.000Z
|
2022-03-31T12:11:20.000Z
|
usersec/migrations/0003_link_version_objects_to_nonversion_objects.py
|
bihealth/hpc-access
|
ff606b18b18230af2876a791ca706d3b24addb59
|
[
"MIT"
] | null | null | null |
# Generated by Django 4.0.2 on 2022-03-21 13:37
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
("usersec", "0002_hpcgroup_adjustments"),
]
operations = [
migrations.RemoveField(
model_name="hpcgroupcreaterequest",
name="delegate",
),
migrations.RemoveField(
model_name="hpcuserchangerequest",
name="group",
),
migrations.RemoveField(
model_name="hpcusercreaterequest",
name="group",
),
migrations.RemoveField(
model_name="hpcusercreaterequestversion",
name="group",
),
migrations.RemoveField(
model_name="hpcuserdeleterequest",
name="group",
),
migrations.AddField(
model_name="hpcgroupchangerequestversion",
name="belongs_to",
field=models.ForeignKey(
help_text="Object this version belongs to",
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="version_history",
to="usersec.hpcgroupchangerequest",
),
),
migrations.AddField(
model_name="hpcgroupcreaterequest",
name="delegate_email",
field=models.CharField(
blank=True,
help_text="Email address of the delegate",
max_length=64,
null=True,
),
),
migrations.AddField(
model_name="hpcgroupcreaterequest",
name="member_emails",
field=models.TextField(
blank=True,
help_text="Email addresses of the group members, comma separated",
null=True,
),
),
migrations.AddField(
model_name="hpcgroupcreaterequestversion",
name="belongs_to",
field=models.ForeignKey(
help_text="Object this version belongs to",
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="version_history",
to="usersec.hpcgroupcreaterequest",
),
),
migrations.AddField(
model_name="hpcgroupcreaterequestversion",
name="delegate_email",
field=models.CharField(
blank=True,
help_text="Email address of the delegate",
max_length=64,
null=True,
),
),
migrations.AddField(
model_name="hpcgroupcreaterequestversion",
name="member_emails",
field=models.TextField(
blank=True,
help_text="Email addresses of the group members, comma separated",
null=True,
),
),
migrations.AddField(
model_name="hpcgroupdeleterequestversion",
name="belongs_to",
field=models.ForeignKey(
help_text="Object this version belongs to",
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="version_history",
to="usersec.hpcgroupdeleterequest",
),
),
migrations.AddField(
model_name="hpcgroupversion",
name="belongs_to",
field=models.ForeignKey(
help_text="Object this version belongs to",
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="version_history",
to="usersec.hpcgroup",
),
),
migrations.AddField(
model_name="hpcuserchangerequest",
name="user",
field=models.ForeignKey(
help_text="User the request belongs to",
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="%(class)s",
to="usersec.hpcuser",
),
),
migrations.AddField(
model_name="hpcuserchangerequestversion",
name="belongs_to",
field=models.ForeignKey(
help_text="Object this version belongs to",
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="version_history",
to="usersec.hpcuserchangerequest",
),
),
migrations.AddField(
model_name="hpcuserchangerequestversion",
name="user",
field=models.ForeignKey(
help_text="User the request belongs to",
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="%(class)s",
to="usersec.hpcuser",
),
),
migrations.AddField(
model_name="hpcusercreaterequest",
name="user",
field=models.ForeignKey(
help_text="User the request belongs to",
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="%(class)s",
to="usersec.hpcuser",
),
),
migrations.AddField(
model_name="hpcusercreaterequestversion",
name="belongs_to",
field=models.ForeignKey(
help_text="Object this version belongs to",
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="version_history",
to="usersec.hpcusercreaterequest",
),
),
migrations.AddField(
model_name="hpcusercreaterequestversion",
name="user",
field=models.ForeignKey(
help_text="User the request belongs to",
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="%(class)s",
to="usersec.hpcuser",
),
),
migrations.AddField(
model_name="hpcuserdeleterequest",
name="user",
field=models.ForeignKey(
help_text="User the request belongs to",
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="%(class)s",
to="usersec.hpcuser",
),
),
migrations.AddField(
model_name="hpcuserdeleterequestversion",
name="belongs_to",
field=models.ForeignKey(
help_text="Object this version belongs to",
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="version_history",
to="usersec.hpcuserdeleterequest",
),
),
migrations.AddField(
model_name="hpcuserdeleterequestversion",
name="user",
field=models.ForeignKey(
help_text="User the request belongs to",
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="%(class)s",
to="usersec.hpcuser",
),
),
migrations.AddField(
model_name="hpcuserversion",
name="belongs_to",
field=models.ForeignKey(
help_text="Object this version belongs to",
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="version_history",
to="usersec.hpcuser",
),
),
migrations.AlterField(
model_name="hpcgroup",
name="folder",
field=models.CharField(
help_text="Path to the group folder on the cluster",
max_length=64,
),
),
migrations.AlterField(
model_name="hpcgroup",
name="name",
field=models.CharField(help_text="Name of the group on the cluster", max_length=64),
),
migrations.AlterField(
model_name="hpcgroupchangerequest",
name="comment",
field=models.TextField(
blank=True,
help_text="Comment on request or revision",
null=True,
),
),
migrations.AlterField(
model_name="hpcgroupchangerequest",
name="group",
field=models.ForeignKey(
help_text="Group the request belongs to",
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="%(class)s",
to="usersec.hpcgroup",
),
),
migrations.AlterField(
model_name="hpcgroupchangerequest",
name="requester",
field=models.ForeignKey(
help_text="User creating the request",
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="%(class)s_requester",
to=settings.AUTH_USER_MODEL,
),
),
migrations.AlterField(
model_name="hpcgroupchangerequestversion",
name="comment",
field=models.TextField(
blank=True,
help_text="Comment on request or revision",
null=True,
),
),
migrations.AlterField(
model_name="hpcgroupchangerequestversion",
name="group",
field=models.ForeignKey(
help_text="Group the request belongs to",
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="%(class)s",
to="usersec.hpcgroup",
),
),
migrations.AlterField(
model_name="hpcgroupchangerequestversion",
name="requester",
field=models.ForeignKey(
help_text="User creating the request",
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="%(class)s_requester",
to=settings.AUTH_USER_MODEL,
),
),
migrations.AlterField(
model_name="hpcgroupcreaterequest",
name="comment",
field=models.TextField(
blank=True,
help_text="Comment on request or revision",
null=True,
),
),
migrations.AlterField(
model_name="hpcgroupcreaterequest",
name="group",
field=models.ForeignKey(
help_text="Group the request belongs to",
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="%(class)s",
to="usersec.hpcgroup",
),
),
migrations.AlterField(
model_name="hpcgroupcreaterequest",
name="requester",
field=models.ForeignKey(
help_text="User creating the request",
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="%(class)s_requester",
to=settings.AUTH_USER_MODEL,
),
),
migrations.AlterField(
model_name="hpcgroupcreaterequestversion",
name="comment",
field=models.TextField(
blank=True,
help_text="Comment on request or revision",
null=True,
),
),
migrations.AlterField(
model_name="hpcgroupcreaterequestversion",
name="group",
field=models.ForeignKey(
help_text="Group the request belongs to",
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="%(class)s",
to="usersec.hpcgroup",
),
),
migrations.AlterField(
model_name="hpcgroupcreaterequestversion",
name="requester",
field=models.ForeignKey(
help_text="User creating the request",
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="%(class)s_requester",
to=settings.AUTH_USER_MODEL,
),
),
migrations.AlterField(
model_name="hpcgroupdeleterequest",
name="comment",
field=models.TextField(
blank=True,
help_text="Comment on request or revision",
null=True,
),
),
migrations.AlterField(
model_name="hpcgroupdeleterequest",
name="group",
field=models.ForeignKey(
help_text="Group the request belongs to",
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="%(class)s",
to="usersec.hpcgroup",
),
),
migrations.AlterField(
model_name="hpcgroupdeleterequest",
name="requester",
field=models.ForeignKey(
help_text="User creating the request",
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="%(class)s_requester",
to=settings.AUTH_USER_MODEL,
),
),
migrations.AlterField(
model_name="hpcgroupdeleterequestversion",
name="comment",
field=models.TextField(
blank=True,
help_text="Comment on request or revision",
null=True,
),
),
migrations.AlterField(
model_name="hpcgroupdeleterequestversion",
name="group",
field=models.ForeignKey(
help_text="Group the request belongs to",
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="%(class)s",
to="usersec.hpcgroup",
),
),
migrations.AlterField(
model_name="hpcgroupdeleterequestversion",
name="requester",
field=models.ForeignKey(
help_text="User creating the request",
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="%(class)s_requester",
to=settings.AUTH_USER_MODEL,
),
),
migrations.AlterField(
model_name="hpcgroupversion",
name="folder",
field=models.CharField(
help_text="Path to the group folder on the cluster",
max_length=64,
),
),
migrations.AlterField(
model_name="hpcgroupversion",
name="name",
field=models.CharField(help_text="Name of the group on the cluster", max_length=64),
),
migrations.AlterField(
model_name="hpcuserchangerequest",
name="comment",
field=models.TextField(
blank=True,
help_text="Comment on request or revision",
null=True,
),
),
migrations.AlterField(
model_name="hpcuserchangerequest",
name="requester",
field=models.ForeignKey(
help_text="User creating the request",
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="%(class)s_requester",
to=settings.AUTH_USER_MODEL,
),
),
migrations.AlterField(
model_name="hpcuserchangerequestversion",
name="comment",
field=models.TextField(
blank=True,
help_text="Comment on request or revision",
null=True,
),
),
migrations.AlterField(
model_name="hpcuserchangerequestversion",
name="requester",
field=models.ForeignKey(
help_text="User creating the request",
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="%(class)s_requester",
to=settings.AUTH_USER_MODEL,
),
),
migrations.AlterField(
model_name="hpcusercreaterequest",
name="comment",
field=models.TextField(
blank=True,
help_text="Comment on request or revision",
null=True,
),
),
migrations.AlterField(
model_name="hpcusercreaterequest",
name="requester",
field=models.ForeignKey(
help_text="User creating the request",
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="%(class)s_requester",
to=settings.AUTH_USER_MODEL,
),
),
migrations.AlterField(
model_name="hpcusercreaterequestversion",
name="comment",
field=models.TextField(
blank=True,
help_text="Comment on request or revision",
null=True,
),
),
migrations.AlterField(
model_name="hpcusercreaterequestversion",
name="requester",
field=models.ForeignKey(
help_text="User creating the request",
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="%(class)s_requester",
to=settings.AUTH_USER_MODEL,
),
),
migrations.AlterField(
model_name="hpcuserdeleterequest",
name="comment",
field=models.TextField(
blank=True,
help_text="Comment on request or revision",
null=True,
),
),
migrations.AlterField(
model_name="hpcuserdeleterequest",
name="requester",
field=models.ForeignKey(
help_text="User creating the request",
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="%(class)s_requester",
to=settings.AUTH_USER_MODEL,
),
),
migrations.AlterField(
model_name="hpcuserdeleterequestversion",
name="comment",
field=models.TextField(
blank=True,
help_text="Comment on request or revision",
null=True,
),
),
migrations.AlterField(
model_name="hpcuserdeleterequestversion",
name="requester",
field=models.ForeignKey(
help_text="User creating the request",
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="%(class)s_requester",
to=settings.AUTH_USER_MODEL,
),
),
migrations.AlterUniqueTogether(
name="hpcgroup",
unique_together={("name",)},
),
migrations.AlterUniqueTogether(
name="hpcgroupchangerequestversion",
unique_together={("belongs_to", "version")},
),
migrations.AlterUniqueTogether(
name="hpcgroupcreaterequestversion",
unique_together={("belongs_to", "version")},
),
migrations.AlterUniqueTogether(
name="hpcgroupdeleterequestversion",
unique_together={("belongs_to", "version")},
),
migrations.AlterUniqueTogether(
name="hpcgroupversion",
unique_together={("name", "version")},
),
migrations.AlterUniqueTogether(
name="hpcuser",
unique_together={("username",)},
),
migrations.AlterUniqueTogether(
name="hpcuserchangerequestversion",
unique_together={("belongs_to", "version")},
),
migrations.AlterUniqueTogether(
name="hpcuserdeleterequestversion",
unique_together={("belongs_to", "version")},
),
migrations.AlterUniqueTogether(
name="hpcuserversion",
unique_together={("username", "version")},
),
migrations.RemoveField(
model_name="hpcgroupcreaterequestversion",
name="delegate",
),
migrations.RemoveField(
model_name="hpcuserchangerequestversion",
name="group",
),
migrations.RemoveField(
model_name="hpcuserdeleterequestversion",
name="group",
),
]
| 35.082102
| 96
| 0.515656
| 1,718
| 21,365
| 6.256112
| 0.058207
| 0.050242
| 0.079084
| 0.091738
| 0.886304
| 0.880071
| 0.749349
| 0.749349
| 0.714831
| 0.714831
| 0
| 0.002371
| 0.388018
| 21,365
| 608
| 97
| 35.139803
| 0.819656
| 0.002106
| 0
| 0.950166
| 1
| 0
| 0.213669
| 0.066188
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.004983
| 0
| 0.009967
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
e83bcdfe5a6f7dbf43a9b471fb6776d62de8c38d
| 10,271
|
py
|
Python
|
Facebook.py
|
Babuperumana/Fbvdo_downloader
|
bbcd9761181de082df4cecb20274af20b63c4f21
|
[
"MIT"
] | null | null | null |
Facebook.py
|
Babuperumana/Fbvdo_downloader
|
bbcd9761181de082df4cecb20274af20b63c4f21
|
[
"MIT"
] | null | null | null |
Facebook.py
|
Babuperumana/Fbvdo_downloader
|
bbcd9761181de082df4cecb20274af20b63c4f21
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import os
os.system('clear')
import signal
def keyboardInterruptHandler(signal, frame):
print("\nპროგრამა გაითიშა.".format(signal))
exit(0)
signal.signal(signal.SIGINT, keyboardInterruptHandler)
import pyfiglet
result = pyfiglet.figlet_format("Facebook.com")
print(result)
print("Facebook-Video-Downloader-HD ფეისბუქიდან ვიდეოების გადმომწერი")
print("--------------------------------------------------------------------")
print("https://github.com/AnonymousFromGeorgia/Facebook-Video-Downloader-HD")
print("--------------------------------------------------------------------")
from datetime import datetime
from tqdm import tqdm
import requests
import re
url = input("შეიყვანეთ ვიდეოს ბმული (URL): ")
x = re.match(r'^(https:|)[/][/]www.([^/]+[.])*facebook.com', url)
if x:
html = requests.get(url).content.decode('utf-8')
else:
print("--------------------------------------------------")
print("მითითებული ვიდეო ვერ მოიძებნა.")
print("--------------------------------------------------")
print("პროგრამის ავტორი: გიო რგი")
print("--------------------------------------------------")
print("YouTube - https://youtube.com/AnonymousFromGeorgia")
print("--------------------------------------------------")
print("Github - https://github.com/AnonymousFromGeorgia")
print("--------------------------------------------------")
print("Facebook - https://facebook.com/anonimaluri")
print("--------------------------------------------------")
print("Twitter - https://twitter.com/anonimaluri")
print("--------------------------------------------------")
print("ანონიმუსი საქართველოდან - Anonymous From Georgia")
print("--------------------------------------------------")
exit()
_qualityhd = re.search('hd_src:"https', html)
_qualitysd = re.search('sd_src:"https', html)
_hd = re.search('hd_src:null', html)
_sd = re.search('sd_src:null', html)
list = []
_thelist = [_qualityhd, _qualitysd, _hd, _sd]
for id,val in enumerate(_thelist):
if val != None:
list.append(id)
try:
if len(list) == 2:
if 0 in list and 1 in list:
_input_1 = str(input("\nდააჭირეთ კლავიშ 'A'-ს რათა გადმოწეროთ ვიდეო HD ხარისხში.\nდააჭირეთ კლავიშ 'B'-ს რათა გადმოწეროთ ვიდეო SD ხარისხში.\n: ")).upper()
if _input_1 == 'A':
print("\nმიმდინარეობს ვიდეოს გადმოწერა HD ხარისხით.")
video_url = re.search(r'hd_src:"(.+?)"', html).group(1)
file_size_request = requests.get(video_url, stream=True)
file_size = int(file_size_request.headers['Content-Length'])
block_size = 1024
filename = datetime.strftime(datetime.now(), '%Y-%m-%d-%H-%M-%S')
t=tqdm(total=file_size, unit='B', unit_scale=True, desc=filename, ascii=True)
with open(filename + '.mp4', 'wb') as f:
for data in file_size_request.iter_content(block_size):
t.update(len(data))
f.write(data)
t.close()
print("--------------------------------------------------")
print("ვიდეოს გადმოწერა წარმატებით დასრულდა.")
print("--------------------------------------------------")
print("პროგრამის ავტორი: გიო რგი")
print("--------------------------------------------------")
print("YouTube - https://youtube.com/AnonymousFromGeorgia")
print("--------------------------------------------------")
print("Github - https://github.com/AnonymousFromGeorgia")
print("--------------------------------------------------")
print("Facebook - https://facebook.com/anonimaluri")
print("--------------------------------------------------")
print("Twitter - https://twitter.com/anonimaluri")
print("--------------------------------------------------")
print("ანონიმუსი საქართველოდან - Anonymous From Georgia")
print("--------------------------------------------------")
if _input_1 == 'B':
print("\nმიმდინარეობს ვიდეოს გადმოწერა SD ხარისხით.")
video_url = re.search(r'sd_src:"(.+?)"', html).group(1)
file_size_request = requests.get(video_url, stream=True)
file_size = int(file_size_request.headers['Content-Length'])
block_size = 1024
filename = datetime.strftime(datetime.now(), '%Y-%m-%d-%H-%M-%S')
t=tqdm(total=file_size, unit='B', unit_scale=True, desc=filename, ascii=True)
with open(filename + '.mp4', 'wb') as f:
for data in file_size_request.iter_content(block_size):
t.update(len(data))
f.write(data)
t.close()
print("--------------------------------------------------")
print("ვიდეოს გადმოწერა წარმატებით დასრულდა.")
print("--------------------------------------------------")
print("პროგრამის ავტორი: გიო რგი")
print("--------------------------------------------------")
print("YouTube - https://youtube.com/AnonymousFromGeorgia")
print("--------------------------------------------------")
print("Github - https://github.com/AnonymousFromGeorgia")
print("--------------------------------------------------")
print("Facebook - https://facebook.com/anonimaluri")
print("--------------------------------------------------")
print("Twitter - https://twitter.com/anonimaluri")
print("--------------------------------------------------")
print("ანონიმუსი საქართველოდან - Anonymous From Georgia")
print("--------------------------------------------------")
if len(list) == 2:
if 1 in list and 2 in list:
_input_2 = str(input("ბოდიში! სამწუხაროდ ვიდეო არაა ხელმისაწვდომი HD ხარისხში. გნებავთ, რომ მაინც გადმოწეროთ? ('Y' ან 'N'): ")).upper()
if _input_2 == 'Y':
print("\nმიმდინარეობს ვიდეოს გადმოწერა SD ხარისხით.")
video_url = re.search(r'sd_src:"(.+?)"', html).group(1)
file_size_request = requests.get(video_url, stream=True)
file_size = int(file_size_request.headers['Content-Length'])
block_size = 1024
filename = datetime.strftime(datetime.now(), '%Y-%m-%d-%H-%M-%S')
t=tqdm(total=file_size, unit='B', unit_scale=True, desc=filename, ascii=True)
with open(filename + '.mp4', 'wb') as f:
for data in file_size_request.iter_content(block_size):
t.update(len(data))
f.write(data)
t.close()
print("--------------------------------------------------")
print("ვიდეოს გადმოწერა წარმატებით დასრულდა.")
print("--------------------------------------------------")
print("პროგრამის ავტორი: გიო რგი")
print("--------------------------------------------------")
print("YouTube - https://youtube.com/AnonymousFromGeorgia")
print("--------------------------------------------------")
print("Github - https://github.com/AnonymousFromGeorgia")
print("--------------------------------------------------")
print("Facebook - https://facebook.com/anonimaluri")
print("--------------------------------------------------")
print("Twitter - https://twitter.com/anonimaluri")
print("--------------------------------------------------")
print("ანონიმუსი საქართველოდან - Anonymous From Georgia")
print("--------------------------------------------------")
if _input_2 == 'N':
exit()
if len(list) == 2:
if 0 in list and 3 in list:
_input_2 = str(input("ბოდიში! სამწუხაროდ ვიდეო არაა ხელმისაწვდომი SD ხარისხში. გნებავთ, რომ მაინც გადმოწეროთ? ('Y' ან 'N'): \n")).upper()
if _input_2 == 'Y':
print("\nმიმდინარეობს ვიდეოს გადმოწერა HD ხარისხით.")
video_url = re.search(r'hd_src:"(.+?)"', html).group(1)
file_size_request = requests.get(video_url, stream=True)
file_size = int(file_size_request.headers['Content-Length'])
block_size = 1024
filename = datetime.strftime(datetime.now(), '%Y-%m-%d-%H-%M-%S')
t=tqdm(total=file_size, unit='B', unit_scale=True, desc=filename, ascii=True)
with open(filename + '.mp4', 'wb') as f:
for data in file_size_request.iter_content(block_size):
t.update(len(data))
f.write(data)
t.close()
print("ვიდეოს გადმოწერა წარმატებით დასრულდა.")
print("--------------------------------------------------")
print("Author of the program: Babu")
print("--------------------------------------------------")
print("YouTube - https://youtube.com/AnonymousFromGeorgia")
print("--------------------------------------------------")
print("Github - https://github.com/AnonymousFromGeorgia")
print("--------------------------------------------------")
print("Facebook - https://facebook.com/anonimaluri")
print("--------------------------------------------------")
print("Twitter - https://twitter.com/anonimaluri")
print("--------------------------------------------------")
print("Anonymous from Georgia - Anonymous From Georgia")
print("--------------------------------------------------")
if _input_2 == 'N':
exit()
except(KeyboardInterrupt):
print("\nThe program is off.")
| 53.217617
| 165
| 0.429559
| 867
| 10,271
| 4.981546
| 0.185698
| 0.081037
| 0.041676
| 0.076407
| 0.768233
| 0.765918
| 0.765918
| 0.755962
| 0.73906
| 0.718685
| 0
| 0.005767
| 0.257132
| 10,271
| 192
| 166
| 53.494792
| 0.560288
| 0.001947
| 0
| 0.748571
| 0
| 0.017143
| 0.445659
| 0.210439
| 0
| 0
| 0
| 0
| 0
| 1
| 0.005714
| false
| 0
| 0.04
| 0
| 0.045714
| 0.485714
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 7
|
fa0d93438adcee59e6b32cb1f2b3c21038b49431
| 5,626
|
py
|
Python
|
tests/test_actual.py
|
TalAmuyal/py-cpuinfo
|
6423f9a0ee46c8e4be21a70d38b6ebafb9b6d645
|
[
"MIT"
] | 211
|
2015-01-13T20:00:20.000Z
|
2022-03-24T19:01:43.000Z
|
tests/test_actual.py
|
TalAmuyal/py-cpuinfo
|
6423f9a0ee46c8e4be21a70d38b6ebafb9b6d645
|
[
"MIT"
] | 133
|
2015-01-20T01:27:31.000Z
|
2022-03-14T09:45:16.000Z
|
tests/test_actual.py
|
TalAmuyal/py-cpuinfo
|
6423f9a0ee46c8e4be21a70d38b6ebafb9b6d645
|
[
"MIT"
] | 50
|
2015-01-13T20:00:23.000Z
|
2021-09-24T15:24:06.000Z
|
import unittest
from cpuinfo import *
import helpers
class TestActual(unittest.TestCase):
def setUp(self):
helpers.backup_data_source(cpuinfo)
def tearDown(self):
helpers.restore_data_source(cpuinfo)
def test_all(self):
os_type = helpers.get_os_type()
if os_type == 'BeOS':
self.assertEqual({}, cpuinfo._get_cpu_info_from_registry())
self.assertEqual({}, cpuinfo._get_cpu_info_from_cpufreq_info())
self.assertEqual({}, cpuinfo._get_cpu_info_from_lscpu())
self.assertEqual({}, cpuinfo._get_cpu_info_from_proc_cpuinfo())
self.assertEqual({}, cpuinfo._get_cpu_info_from_sysctl())
self.assertEqual({}, cpuinfo._get_cpu_info_from_kstat())
self.assertEqual({}, cpuinfo._get_cpu_info_from_dmesg())
self.assertEqual({}, cpuinfo._get_cpu_info_from_cat_var_run_dmesg_boot())
self.assertTrue(len(cpuinfo._get_cpu_info_from_sysinfo()) > 0)
#self.assertTrue(len(cpuinfo._get_cpu_info_from_cpuid()) > 0)
self.assertTrue(len(cpuinfo.get_cpu_info()) > 0)
elif os_type == 'BSD':
self.assertEqual({}, cpuinfo._get_cpu_info_from_registry())
self.assertEqual({}, cpuinfo._get_cpu_info_from_cpufreq_info())
self.assertEqual({}, cpuinfo._get_cpu_info_from_lscpu())
self.assertEqual({}, cpuinfo._get_cpu_info_from_proc_cpuinfo())
self.assertEqual({}, cpuinfo._get_cpu_info_from_sysctl())
self.assertEqual({}, cpuinfo._get_cpu_info_from_kstat())
self.assertTrue(len(cpuinfo._get_cpu_info_from_dmesg()) > 0)
self.assertEqual({}, cpuinfo._get_cpu_info_from_cat_var_run_dmesg_boot())
self.assertEqual({}, cpuinfo._get_cpu_info_from_sysinfo())
# FIXME: This fails by segfaulting for some reason
#self.assertEqual({}, cpuinfo._get_cpu_info_from_cpuid())
self.assertTrue(len(cpuinfo.get_cpu_info()) > 0)
elif os_type == 'Cygwin':
self.assertEqual({}, cpuinfo._get_cpu_info_from_registry())
self.assertEqual({}, cpuinfo._get_cpu_info_from_cpufreq_info())
self.assertEqual({}, cpuinfo._get_cpu_info_from_lscpu())
self.assertTrue(len(cpuinfo._get_cpu_info_from_proc_cpuinfo()) > 0)
self.assertEqual({}, cpuinfo._get_cpu_info_from_sysctl())
self.assertEqual({}, cpuinfo._get_cpu_info_from_kstat())
self.assertEqual({}, cpuinfo._get_cpu_info_from_dmesg())
self.assertEqual({}, cpuinfo._get_cpu_info_from_cat_var_run_dmesg_boot())
self.assertEqual({}, cpuinfo._get_cpu_info_from_sysinfo())
# FIXME: This fails by segfaulting for some reason
#self.assertEqual({}, cpuinfo._get_cpu_info_from_cpuid())
self.assertTrue(len(cpuinfo.get_cpu_info()) > 0)
elif os_type == 'MacOS':
self.assertEqual({}, cpuinfo._get_cpu_info_from_registry())
self.assertEqual({}, cpuinfo._get_cpu_info_from_cpufreq_info())
self.assertEqual({}, cpuinfo._get_cpu_info_from_lscpu())
self.assertEqual({}, cpuinfo._get_cpu_info_from_proc_cpuinfo())
self.assertTrue(len(cpuinfo._get_cpu_info_from_sysctl()) > 0)
self.assertEqual({}, cpuinfo._get_cpu_info_from_kstat())
self.assertEqual({}, cpuinfo._get_cpu_info_from_dmesg())
self.assertEqual({}, cpuinfo._get_cpu_info_from_cat_var_run_dmesg_boot())
self.assertEqual({}, cpuinfo._get_cpu_info_from_sysinfo())
# FIXME: This fails by segfaulting for some reason
#self.assertEqual({}, cpuinfo._get_cpu_info_from_cpuid())
self.assertTrue(len(cpuinfo.get_cpu_info()) > 0)
elif os_type == 'Linux':
self.assertEqual({}, cpuinfo._get_cpu_info_from_registry())
self.assertEqual({}, cpuinfo._get_cpu_info_from_cpufreq_info())
#self.assertTrue(len(cpuinfo._get_cpu_info_from_lscpu()) > 0)
self.assertTrue(len(cpuinfo._get_cpu_info_from_proc_cpuinfo()) > 0)
self.assertEqual({}, cpuinfo._get_cpu_info_from_sysctl())
self.assertEqual({}, cpuinfo._get_cpu_info_from_kstat())
self.assertEqual({}, cpuinfo._get_cpu_info_from_dmesg())
self.assertEqual({}, cpuinfo._get_cpu_info_from_cat_var_run_dmesg_boot())
self.assertEqual({}, cpuinfo._get_cpu_info_from_sysinfo())
#self.assertTrue(len(cpuinfo._get_cpu_info_from_cpuid()) > 0)
self.assertTrue(len(cpuinfo.get_cpu_info()) > 0)
elif os_type == 'Solaris':
self.assertEqual({}, cpuinfo._get_cpu_info_from_registry())
self.assertEqual({}, cpuinfo._get_cpu_info_from_cpufreq_info())
self.assertEqual({}, cpuinfo._get_cpu_info_from_lscpu())
self.assertEqual({}, cpuinfo._get_cpu_info_from_proc_cpuinfo())
self.assertEqual({}, cpuinfo._get_cpu_info_from_sysctl())
self.assertTrue(len(cpuinfo._get_cpu_info_from_kstat()) > 0)
self.assertEqual({}, cpuinfo._get_cpu_info_from_dmesg())
self.assertEqual({}, cpuinfo._get_cpu_info_from_cat_var_run_dmesg_boot())
self.assertEqual({}, cpuinfo._get_cpu_info_from_sysinfo())
# FIXME: This fails by segfaulting for some reason
#self.assertEqual({}, cpuinfo._get_cpu_info_from_cpuid())
self.assertTrue(len(cpuinfo.get_cpu_info()) > 0)
elif os_type == 'Windows':
self.assertTrue(len(cpuinfo._get_cpu_info_from_registry()) > 0)
self.assertEqual({}, cpuinfo._get_cpu_info_from_cpufreq_info())
self.assertEqual({}, cpuinfo._get_cpu_info_from_lscpu())
self.assertEqual({}, cpuinfo._get_cpu_info_from_proc_cpuinfo())
self.assertEqual({}, cpuinfo._get_cpu_info_from_sysctl())
self.assertEqual({}, cpuinfo._get_cpu_info_from_kstat())
self.assertEqual({}, cpuinfo._get_cpu_info_from_dmesg())
self.assertEqual({}, cpuinfo._get_cpu_info_from_cat_var_run_dmesg_boot())
self.assertEqual({}, cpuinfo._get_cpu_info_from_sysinfo())
#self.assertTrue(len(cpuinfo._get_cpu_info_from_cpuid()) > 0)
self.assertTrue(len(cpuinfo.get_cpu_info()) > 0)
else:
raise AssertionError('Unexpected OS type "{0}".'.format(os_type))
| 51.614679
| 76
| 0.764842
| 787
| 5,626
| 4.931385
| 0.07878
| 0.198402
| 0.257923
| 0.337284
| 0.923731
| 0.923731
| 0.914455
| 0.914455
| 0.85983
| 0.859315
| 0
| 0.003737
| 0.096338
| 5,626
| 108
| 77
| 52.092593
| 0.759638
| 0.117312
| 0
| 0.727273
| 0
| 0
| 0.01252
| 0
| 0
| 0
| 0
| 0.009259
| 0.795455
| 1
| 0.034091
| false
| 0
| 0.034091
| 0
| 0.079545
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
fa297e2470ee272c9cc96cb0be3c3e5a10b1d168
| 6,545
|
py
|
Python
|
loldib/getratings/models/NA/na_kennen/na_kennen_bot.py
|
koliupy/loldib
|
c9ab94deb07213cdc42b5a7c26467cdafaf81b7f
|
[
"Apache-2.0"
] | null | null | null |
loldib/getratings/models/NA/na_kennen/na_kennen_bot.py
|
koliupy/loldib
|
c9ab94deb07213cdc42b5a7c26467cdafaf81b7f
|
[
"Apache-2.0"
] | null | null | null |
loldib/getratings/models/NA/na_kennen/na_kennen_bot.py
|
koliupy/loldib
|
c9ab94deb07213cdc42b5a7c26467cdafaf81b7f
|
[
"Apache-2.0"
] | null | null | null |
from getratings.models.ratings import Ratings
class NA_Kennen_Bot_Aatrox(Ratings):
pass
class NA_Kennen_Bot_Ahri(Ratings):
pass
class NA_Kennen_Bot_Akali(Ratings):
pass
class NA_Kennen_Bot_Alistar(Ratings):
pass
class NA_Kennen_Bot_Amumu(Ratings):
pass
class NA_Kennen_Bot_Anivia(Ratings):
pass
class NA_Kennen_Bot_Annie(Ratings):
pass
class NA_Kennen_Bot_Ashe(Ratings):
pass
class NA_Kennen_Bot_AurelionSol(Ratings):
pass
class NA_Kennen_Bot_Azir(Ratings):
pass
class NA_Kennen_Bot_Bard(Ratings):
pass
class NA_Kennen_Bot_Blitzcrank(Ratings):
pass
class NA_Kennen_Bot_Brand(Ratings):
pass
class NA_Kennen_Bot_Braum(Ratings):
pass
class NA_Kennen_Bot_Caitlyn(Ratings):
pass
class NA_Kennen_Bot_Camille(Ratings):
pass
class NA_Kennen_Bot_Cassiopeia(Ratings):
pass
class NA_Kennen_Bot_Chogath(Ratings):
pass
class NA_Kennen_Bot_Corki(Ratings):
pass
class NA_Kennen_Bot_Darius(Ratings):
pass
class NA_Kennen_Bot_Diana(Ratings):
pass
class NA_Kennen_Bot_Draven(Ratings):
pass
class NA_Kennen_Bot_DrMundo(Ratings):
pass
class NA_Kennen_Bot_Ekko(Ratings):
pass
class NA_Kennen_Bot_Elise(Ratings):
pass
class NA_Kennen_Bot_Evelynn(Ratings):
pass
class NA_Kennen_Bot_Ezreal(Ratings):
pass
class NA_Kennen_Bot_Fiddlesticks(Ratings):
pass
class NA_Kennen_Bot_Fiora(Ratings):
pass
class NA_Kennen_Bot_Fizz(Ratings):
pass
class NA_Kennen_Bot_Galio(Ratings):
pass
class NA_Kennen_Bot_Gangplank(Ratings):
pass
class NA_Kennen_Bot_Garen(Ratings):
pass
class NA_Kennen_Bot_Gnar(Ratings):
pass
class NA_Kennen_Bot_Gragas(Ratings):
pass
class NA_Kennen_Bot_Graves(Ratings):
pass
class NA_Kennen_Bot_Hecarim(Ratings):
pass
class NA_Kennen_Bot_Heimerdinger(Ratings):
pass
class NA_Kennen_Bot_Illaoi(Ratings):
pass
class NA_Kennen_Bot_Irelia(Ratings):
pass
class NA_Kennen_Bot_Ivern(Ratings):
pass
class NA_Kennen_Bot_Janna(Ratings):
pass
class NA_Kennen_Bot_JarvanIV(Ratings):
pass
class NA_Kennen_Bot_Jax(Ratings):
pass
class NA_Kennen_Bot_Jayce(Ratings):
pass
class NA_Kennen_Bot_Jhin(Ratings):
pass
class NA_Kennen_Bot_Jinx(Ratings):
pass
class NA_Kennen_Bot_Kalista(Ratings):
pass
class NA_Kennen_Bot_Karma(Ratings):
pass
class NA_Kennen_Bot_Karthus(Ratings):
pass
class NA_Kennen_Bot_Kassadin(Ratings):
pass
class NA_Kennen_Bot_Katarina(Ratings):
pass
class NA_Kennen_Bot_Kayle(Ratings):
pass
class NA_Kennen_Bot_Kayn(Ratings):
pass
class NA_Kennen_Bot_Kennen(Ratings):
pass
class NA_Kennen_Bot_Khazix(Ratings):
pass
class NA_Kennen_Bot_Kindred(Ratings):
pass
class NA_Kennen_Bot_Kled(Ratings):
pass
class NA_Kennen_Bot_KogMaw(Ratings):
pass
class NA_Kennen_Bot_Leblanc(Ratings):
pass
class NA_Kennen_Bot_LeeSin(Ratings):
pass
class NA_Kennen_Bot_Leona(Ratings):
pass
class NA_Kennen_Bot_Lissandra(Ratings):
pass
class NA_Kennen_Bot_Lucian(Ratings):
pass
class NA_Kennen_Bot_Lulu(Ratings):
pass
class NA_Kennen_Bot_Lux(Ratings):
pass
class NA_Kennen_Bot_Malphite(Ratings):
pass
class NA_Kennen_Bot_Malzahar(Ratings):
pass
class NA_Kennen_Bot_Maokai(Ratings):
pass
class NA_Kennen_Bot_MasterYi(Ratings):
pass
class NA_Kennen_Bot_MissFortune(Ratings):
pass
class NA_Kennen_Bot_MonkeyKing(Ratings):
pass
class NA_Kennen_Bot_Mordekaiser(Ratings):
pass
class NA_Kennen_Bot_Morgana(Ratings):
pass
class NA_Kennen_Bot_Nami(Ratings):
pass
class NA_Kennen_Bot_Nasus(Ratings):
pass
class NA_Kennen_Bot_Nautilus(Ratings):
pass
class NA_Kennen_Bot_Nidalee(Ratings):
pass
class NA_Kennen_Bot_Nocturne(Ratings):
pass
class NA_Kennen_Bot_Nunu(Ratings):
pass
class NA_Kennen_Bot_Olaf(Ratings):
pass
class NA_Kennen_Bot_Orianna(Ratings):
pass
class NA_Kennen_Bot_Ornn(Ratings):
pass
class NA_Kennen_Bot_Pantheon(Ratings):
pass
class NA_Kennen_Bot_Poppy(Ratings):
pass
class NA_Kennen_Bot_Quinn(Ratings):
pass
class NA_Kennen_Bot_Rakan(Ratings):
pass
class NA_Kennen_Bot_Rammus(Ratings):
pass
class NA_Kennen_Bot_RekSai(Ratings):
pass
class NA_Kennen_Bot_Renekton(Ratings):
pass
class NA_Kennen_Bot_Rengar(Ratings):
pass
class NA_Kennen_Bot_Riven(Ratings):
pass
class NA_Kennen_Bot_Rumble(Ratings):
pass
class NA_Kennen_Bot_Ryze(Ratings):
pass
class NA_Kennen_Bot_Sejuani(Ratings):
pass
class NA_Kennen_Bot_Shaco(Ratings):
pass
class NA_Kennen_Bot_Shen(Ratings):
pass
class NA_Kennen_Bot_Shyvana(Ratings):
pass
class NA_Kennen_Bot_Singed(Ratings):
pass
class NA_Kennen_Bot_Sion(Ratings):
pass
class NA_Kennen_Bot_Sivir(Ratings):
pass
class NA_Kennen_Bot_Skarner(Ratings):
pass
class NA_Kennen_Bot_Sona(Ratings):
pass
class NA_Kennen_Bot_Soraka(Ratings):
pass
class NA_Kennen_Bot_Swain(Ratings):
pass
class NA_Kennen_Bot_Syndra(Ratings):
pass
class NA_Kennen_Bot_TahmKench(Ratings):
pass
class NA_Kennen_Bot_Taliyah(Ratings):
pass
class NA_Kennen_Bot_Talon(Ratings):
pass
class NA_Kennen_Bot_Taric(Ratings):
pass
class NA_Kennen_Bot_Teemo(Ratings):
pass
class NA_Kennen_Bot_Thresh(Ratings):
pass
class NA_Kennen_Bot_Tristana(Ratings):
pass
class NA_Kennen_Bot_Trundle(Ratings):
pass
class NA_Kennen_Bot_Tryndamere(Ratings):
pass
class NA_Kennen_Bot_TwistedFate(Ratings):
pass
class NA_Kennen_Bot_Twitch(Ratings):
pass
class NA_Kennen_Bot_Udyr(Ratings):
pass
class NA_Kennen_Bot_Urgot(Ratings):
pass
class NA_Kennen_Bot_Varus(Ratings):
pass
class NA_Kennen_Bot_Vayne(Ratings):
pass
class NA_Kennen_Bot_Veigar(Ratings):
pass
class NA_Kennen_Bot_Velkoz(Ratings):
pass
class NA_Kennen_Bot_Vi(Ratings):
pass
class NA_Kennen_Bot_Viktor(Ratings):
pass
class NA_Kennen_Bot_Vladimir(Ratings):
pass
class NA_Kennen_Bot_Volibear(Ratings):
pass
class NA_Kennen_Bot_Warwick(Ratings):
pass
class NA_Kennen_Bot_Xayah(Ratings):
pass
class NA_Kennen_Bot_Xerath(Ratings):
pass
class NA_Kennen_Bot_XinZhao(Ratings):
pass
class NA_Kennen_Bot_Yasuo(Ratings):
pass
class NA_Kennen_Bot_Yorick(Ratings):
pass
class NA_Kennen_Bot_Zac(Ratings):
pass
class NA_Kennen_Bot_Zed(Ratings):
pass
class NA_Kennen_Bot_Ziggs(Ratings):
pass
class NA_Kennen_Bot_Zilean(Ratings):
pass
class NA_Kennen_Bot_Zyra(Ratings):
pass
| 15.695444
| 46
| 0.766692
| 972
| 6,545
| 4.736626
| 0.151235
| 0.209818
| 0.389661
| 0.479583
| 0.803432
| 0.803432
| 0
| 0
| 0
| 0
| 0
| 0
| 0.169748
| 6,545
| 416
| 47
| 15.733173
| 0.847258
| 0
| 0
| 0.498195
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.498195
| 0.00361
| 0
| 0.501805
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
|
0
| 8
|
fa465f14d7c2aa192af8f47750c27f610542c69d
| 30,509
|
py
|
Python
|
testcases/generated/ipanti_test.py
|
Tanc009/jdcloud-cli
|
4e11de77c68501f44e7026c0ad1c24e5d043197e
|
[
"Apache-2.0"
] | null | null | null |
testcases/generated/ipanti_test.py
|
Tanc009/jdcloud-cli
|
4e11de77c68501f44e7026c0ad1c24e5d043197e
|
[
"Apache-2.0"
] | null | null | null |
testcases/generated/ipanti_test.py
|
Tanc009/jdcloud-cli
|
4e11de77c68501f44e7026c0ad1c24e5d043197e
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
import unittest
import os
import json
class IpantiTest(unittest.TestCase):
def test_describe_ddo_sattack_logs(self):
cmd = """python ../../main.py ipanti describe-ddo-sattack-logs --start-time 'xxx' --end-time 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_describe_ccattack_logs(self):
cmd = """python ../../main.py ipanti describe-ccattack-logs --start-time 'xxx' --end-time 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_describe_ccattack_log_details(self):
cmd = """python ../../main.py ipanti describe-ccattack-log-details --start-time 'xxx' --end-time 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_describe_attack_statistics(self):
cmd = """python ../../main.py ipanti describe-attack-statistics --start-time 'xxx' --end-time 'xxx' --type '5'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_describe_attack_type_count(self):
cmd = """python ../../main.py ipanti describe-attack-type-count --start-time 'xxx' --end-time 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_describe_ddo_sgraph(self):
cmd = """python ../../main.py ipanti describe-ddo-sgraph --start-time 'xxx' --end-time 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_describe_fwd_graph(self):
cmd = """python ../../main.py ipanti describe-fwd-graph --start-time 'xxx' --end-time 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_describe_ccgraph(self):
cmd = """python ../../main.py ipanti describe-ccgraph --start-time 'xxx' --end-time 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_describe_forward_rules(self):
cmd = """python ../../main.py ipanti describe-forward-rules --instance-id 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_create_forward_rule(self):
cmd = """python ../../main.py ipanti create-forward-rule --instance-id 'xxx' --forward-rule-spec '{"":""}'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_describe_forward_rule(self):
cmd = """python ../../main.py ipanti describe-forward-rule --instance-id 'xxx' --forward-rule-id 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_modify_forward_rule(self):
cmd = """python ../../main.py ipanti modify-forward-rule --instance-id 'xxx' --forward-rule-id 'xxx' --forward-rule-spec '{"":""}'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_delete_forward_rule(self):
cmd = """python ../../main.py ipanti delete-forward-rule --instance-id 'xxx' --forward-rule-id 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_switch_forward_rule_protect(self):
cmd = """python ../../main.py ipanti switch-forward-rule-protect --instance-id 'xxx' --forward-rule-id 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_switch_forward_rule_origin(self):
cmd = """python ../../main.py ipanti switch-forward-rule-origin --instance-id 'xxx' --forward-rule-id 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_describe_protection_rule_of_forward_rule(self):
cmd = """python ../../main.py ipanti describe-protection-rule-of-forward-rule --instance-id 'xxx' --forward-rule-id 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_modify_protection_rule_of_forward_rule(self):
cmd = """python ../../main.py ipanti modify-protection-rule-of-forward-rule --instance-id 'xxx' --forward-rule-id 'xxx' --forward-protection-rule-spec '{"":""}'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_describe_geo_areas(self):
cmd = """python ../../main.py ipanti describe-geo-areas """
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_describe_black_list_rule_of_forward_rule(self):
cmd = """python ../../main.py ipanti describe-black-list-rule-of-forward-rule --instance-id 'xxx' --forward-rule-id 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_modify_black_list_rule_of_forward_rule(self):
cmd = """python ../../main.py ipanti modify-black-list-rule-of-forward-rule --instance-id 'xxx' --forward-rule-id 'xxx' --modify-spec '{"":""}'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_enable_black_list_rule_of_forward_rule(self):
cmd = """python ../../main.py ipanti enable-black-list-rule-of-forward-rule --instance-id 'xxx' --forward-rule-id 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_disable_black_list_rule_of_forward_rule(self):
cmd = """python ../../main.py ipanti disable-black-list-rule-of-forward-rule --instance-id 'xxx' --forward-rule-id 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_describe_white_list_rule_of_forward_rule(self):
cmd = """python ../../main.py ipanti describe-white-list-rule-of-forward-rule --instance-id 'xxx' --forward-rule-id 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_modify_white_list_rule_of_forward_rule(self):
cmd = """python ../../main.py ipanti modify-white-list-rule-of-forward-rule --instance-id 'xxx' --forward-rule-id 'xxx' --modify-spec '{"":""}'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_enable_white_list_rule_of_forward_rule(self):
cmd = """python ../../main.py ipanti enable-white-list-rule-of-forward-rule --instance-id 'xxx' --forward-rule-id 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_disable_white_list_rule_of_forward_rule(self):
cmd = """python ../../main.py ipanti disable-white-list-rule-of-forward-rule --instance-id 'xxx' --forward-rule-id 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_describe_instances(self):
cmd = """python ../../main.py ipanti describe-instances """
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_create_instance(self):
cmd = """python ../../main.py ipanti create-instance --create-instance-spec '{"":""}'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_describe_instance(self):
cmd = """python ../../main.py ipanti describe-instance --instance-id 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_modify_instance_name(self):
cmd = """python ../../main.py ipanti modify-instance-name --instance-id 'xxx' --rename-instance-spec '{"":""}'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_modify_epb(self):
cmd = """python ../../main.py ipanti modify-epb --instance-id 'xxx' --modify-instance-epbspec '{"":""}'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_describe_alarm_config(self):
cmd = """python ../../main.py ipanti describe-alarm-config --instance-id 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_modify_alarm_config(self):
cmd = """python ../../main.py ipanti modify-alarm-config --instance-id 'xxx' --alarm-config-spec '{"":""}'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_describe_name_list(self):
cmd = """python ../../main.py ipanti describe-name-list """
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_describe_protection_statistics(self):
cmd = """python ../../main.py ipanti describe-protection-statistics """
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_check_name(self):
cmd = """python ../../main.py ipanti check-name --name 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_describe_vpc_ip_list(self):
cmd = """python ../../main.py ipanti describe-vpc-ip-list """
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_describe_cps_ip_list(self):
cmd = """python ../../main.py ipanti describe-cps-ip-list """
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_describe_ip_sets(self):
cmd = """python ../../main.py ipanti describe-ip-sets --instance-id 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_create_ip_set(self):
cmd = """python ../../main.py ipanti create-ip-set --instance-id 'xxx' --ip-set-spec '{"":""}'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_describe_ip_set(self):
cmd = """python ../../main.py ipanti describe-ip-set --instance-id 'xxx' --ip-set-id 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_delete_ip_set(self):
cmd = """python ../../main.py ipanti delete-ip-set --instance-id 'xxx' --ip-set-id 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_describe_ip_set_usage(self):
cmd = """python ../../main.py ipanti describe-ip-set-usage --instance-id 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_describe_web_rules(self):
cmd = """python ../../main.py ipanti describe-web-rules --instance-id 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_create_web_rule(self):
cmd = """python ../../main.py ipanti create-web-rule --instance-id 'xxx' --web-rule-spec '{"":""}'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_describe_web_rule(self):
cmd = """python ../../main.py ipanti describe-web-rule --instance-id 'xxx' --web-rule-id 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_modify_web_rule(self):
cmd = """python ../../main.py ipanti modify-web-rule --instance-id 'xxx' --web-rule-id 'xxx' --web-rule-spec '{"":""}'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_delete_web_rule(self):
cmd = """python ../../main.py ipanti delete-web-rule --instance-id 'xxx' --web-rule-id 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_switch_web_rule_protect(self):
cmd = """python ../../main.py ipanti switch-web-rule-protect --instance-id 'xxx' --web-rule-id 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_switch_web_rule_origin(self):
cmd = """python ../../main.py ipanti switch-web-rule-origin --instance-id 'xxx' --web-rule-id 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_enable_web_rule_cc(self):
cmd = """python ../../main.py ipanti enable-web-rule-cc --instance-id 'xxx' --web-rule-id 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_disable_web_rule_cc(self):
cmd = """python ../../main.py ipanti disable-web-rule-cc --instance-id 'xxx' --web-rule-id 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_enable_web_rule_ccobserver_mode(self):
cmd = """python ../../main.py ipanti enable-web-rule-ccobserver-mode --instance-id 'xxx' --web-rule-id 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_disable_web_rule_ccobserver_mode(self):
cmd = """python ../../main.py ipanti disable-web-rule-ccobserver-mode --instance-id 'xxx' --web-rule-id 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_describe_ccprotection_rules_of_web_rule(self):
cmd = """python ../../main.py ipanti describe-ccprotection-rules-of-web-rule --instance-id 'xxx' --web-rule-id 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_create_ccprotection_rule_of_web_rule(self):
cmd = """python ../../main.py ipanti create-ccprotection-rule-of-web-rule --instance-id 'xxx' --web-rule-id 'xxx' --cc-protection-rule-spec '{"":""}'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_describe_ccprotection_rule_of_web_rule(self):
cmd = """python ../../main.py ipanti describe-ccprotection-rule-of-web-rule --instance-id 'xxx' --web-rule-id 'xxx' --cc-protection-rule-id 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_modify_ccprotection_rule_of_web_rule(self):
cmd = """python ../../main.py ipanti modify-ccprotection-rule-of-web-rule --instance-id 'xxx' --web-rule-id 'xxx' --cc-protection-rule-id 'xxx' --cc-protection-rule-spec '{"":""}'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_delete_ccprotection_rule_of_web_rule(self):
cmd = """python ../../main.py ipanti delete-ccprotection-rule-of-web-rule --instance-id 'xxx' --web-rule-id 'xxx' --cc-protection-rule-id 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_enable_ccprotection_rule_of_web_rule(self):
cmd = """python ../../main.py ipanti enable-ccprotection-rule-of-web-rule --instance-id 'xxx' --web-rule-id 'xxx' --cc-protection-rule-id 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_disable_ccprotection_rule_of_web_rule(self):
cmd = """python ../../main.py ipanti disable-ccprotection-rule-of-web-rule --instance-id 'xxx' --web-rule-id 'xxx' --cc-protection-rule-id 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_describe_ccprotection_config_of_web_rule(self):
cmd = """python ../../main.py ipanti describe-ccprotection-config-of-web-rule --instance-id 'xxx' --web-rule-id 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_modify_ccprotection_config_of_web_rule(self):
cmd = """python ../../main.py ipanti modify-ccprotection-config-of-web-rule --instance-id 'xxx' --web-rule-id 'xxx' --cc-protection-config-spec '{"":""}'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_describe_ccprotection_default_config_of_web_rule(self):
cmd = """python ../../main.py ipanti describe-ccprotection-default-config-of-web-rule --instance-id 'xxx' --web-rule-id 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_modify_cert_info(self):
cmd = """python ../../main.py ipanti modify-cert-info --instance-id 'xxx' --web-rule-id 'xxx' --cert-info-modify-spec '{"":""}'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_describe_web_rule_black_list_usage(self):
cmd = """python ../../main.py ipanti describe-web-rule-black-list-usage --instance-id 'xxx' --web-rule-id 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_describe_black_list_rules_of_web_rule(self):
cmd = """python ../../main.py ipanti describe-black-list-rules-of-web-rule --instance-id 'xxx' --web-rule-id 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_create_black_list_rule_of_web_rule(self):
cmd = """python ../../main.py ipanti create-black-list-rule-of-web-rule --instance-id 'xxx' --web-rule-id 'xxx' --web-black-list-rule-spec '{"":""}'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_describe_black_list_rule_of_web_rule(self):
cmd = """python ../../main.py ipanti describe-black-list-rule-of-web-rule --instance-id 'xxx' --web-rule-id 'xxx' --web-black-list-rule-id 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_modify_black_list_rule_of_web_rule(self):
cmd = """python ../../main.py ipanti modify-black-list-rule-of-web-rule --instance-id 'xxx' --web-rule-id 'xxx' --web-black-list-rule-id 'xxx' --web-black-list-rule-spec '{"":""}'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_delete_black_list_rule_of_web_rule(self):
cmd = """python ../../main.py ipanti delete-black-list-rule-of-web-rule --instance-id 'xxx' --web-rule-id 'xxx' --web-black-list-rule-id 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_enable_web_rule_black_list(self):
cmd = """python ../../main.py ipanti enable-web-rule-black-list --instance-id 'xxx' --web-rule-id 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_enable_black_list_rule_of_web_rule(self):
cmd = """python ../../main.py ipanti enable-black-list-rule-of-web-rule --instance-id 'xxx' --web-rule-id 'xxx' --web-black-list-rule-id 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_disable_web_rule_black_list(self):
cmd = """python ../../main.py ipanti disable-web-rule-black-list --instance-id 'xxx' --web-rule-id 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_disable_black_list_rule_of_web_rule(self):
cmd = """python ../../main.py ipanti disable-black-list-rule-of-web-rule --instance-id 'xxx' --web-rule-id 'xxx' --web-black-list-rule-id 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_describe_web_rule_white_list_usage(self):
cmd = """python ../../main.py ipanti describe-web-rule-white-list-usage --instance-id 'xxx' --web-rule-id 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_describe_white_list_rules_of_web_rule(self):
cmd = """python ../../main.py ipanti describe-white-list-rules-of-web-rule --instance-id 'xxx' --web-rule-id 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_create_white_list_rule_of_web_rule(self):
cmd = """python ../../main.py ipanti create-white-list-rule-of-web-rule --instance-id 'xxx' --web-rule-id 'xxx' --web-white-list-rule-spec '{"":""}'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_describe_white_list_rule_of_web_rule(self):
cmd = """python ../../main.py ipanti describe-white-list-rule-of-web-rule --instance-id 'xxx' --web-rule-id 'xxx' --web-white-list-rule-id 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_modify_white_list_rule_of_web_rule(self):
cmd = """python ../../main.py ipanti modify-white-list-rule-of-web-rule --instance-id 'xxx' --web-rule-id 'xxx' --web-white-list-rule-id 'xxx' --web-white-list-rule-spec '{"":""}'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_delete_white_list_rule_of_web_rule(self):
cmd = """python ../../main.py ipanti delete-white-list-rule-of-web-rule --instance-id 'xxx' --web-rule-id 'xxx' --web-white-list-rule-id 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_enable_web_rule_white_list(self):
cmd = """python ../../main.py ipanti enable-web-rule-white-list --instance-id 'xxx' --web-rule-id 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_enable_white_list_rule_of_web_rule(self):
cmd = """python ../../main.py ipanti enable-white-list-rule-of-web-rule --instance-id 'xxx' --web-rule-id 'xxx' --web-white-list-rule-id 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_disable_web_rule_white_list(self):
cmd = """python ../../main.py ipanti disable-web-rule-white-list --instance-id 'xxx' --web-rule-id 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_disable_white_list_rule_of_web_rule(self):
cmd = """python ../../main.py ipanti disable-white-list-rule-of-web-rule --instance-id 'xxx' --web-rule-id 'xxx' --web-white-list-rule-id 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_describe_web_rule_black_list_geo_areas(self):
cmd = """python ../../main.py ipanti describe-web-rule-black-list-geo-areas """
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_describe_web_rule_white_list_geo_areas(self):
cmd = """python ../../main.py ipanti describe-web-rule-white-list-geo-areas """
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
| 37.71199
| 191
| 0.60569
| 3,942
| 30,509
| 4.581684
| 0.039828
| 0.039034
| 0.062621
| 0.081889
| 0.94956
| 0.946182
| 0.944964
| 0.921322
| 0.883561
| 0.8458
| 0
| 0.000434
| 0.244026
| 30,509
| 808
| 192
| 37.758663
| 0.782648
| 0.020781
| 0
| 0.709625
| 0
| 0.115824
| 0.298724
| 0.086144
| 0
| 0
| 0
| 0
| 0.141925
| 1
| 0.141925
| false
| 0
| 0.004894
| 0
| 0.14845
| 0.141925
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d710e1f67c4c77e907a35c2015734cab4608ac99
| 4,292
|
py
|
Python
|
PyBank/main.py
|
MAICATRAN/python-challenge
|
748ea0b3c480740a19a3710725899d62e01bdcdc
|
[
"Apache-2.0"
] | null | null | null |
PyBank/main.py
|
MAICATRAN/python-challenge
|
748ea0b3c480740a19a3710725899d62e01bdcdc
|
[
"Apache-2.0"
] | null | null | null |
PyBank/main.py
|
MAICATRAN/python-challenge
|
748ea0b3c480740a19a3710725899d62e01bdcdc
|
[
"Apache-2.0"
] | null | null | null |
<<<<<<< HEAD
import os
import csv
budget_data_csv = os.path.join("C:\MAICA\MONASH\Python\python-challenge\PyBank\Resources", "budget_data.csv")
months = []
profit_loss = []
count_month = 0
current_month = 0
net_change = 0
prev_month = 0
profit_loss_change = 0
with open(budget_data_csv) as csvfile:
csv_header = next(csvfile)
csvreader = csv.reader(csvfile, delimiter=",")
for row in csvreader:
count_month = count_month + 1
current_month = int(row[1])
net_change += current_month
if (count_month) == 1:
prev_month = current_month
else:
profit_loss_change = current_month - prev_month
months.append(row[0])
profit_loss.append(profit_loss_change)
prev_month = current_month
Total_profit_loss = sum(profit_loss)
Average_profit_loss = round(Total_profit_loss/(count_month -1),2)
max_value = max(profit_loss)
min_value = min(profit_loss)
highest_month_index = profit_loss.index(max_value)
lowest_month_index = profit_loss.index(min_value)
greatest_month = months[highest_month_index]
lowest_month = months[lowest_month_index]
print("Financial Analysis")
print("-------------------------------")
print(f"Total Months: {count_month}")
print(f"Total: ${net_change}")
print(f"Average Change: ${Average_profit_loss}")
print(f"Greatest Increase in Profits: {greatest_month} (${max_value})")
print(f"Greatest Decrease in Profits: {lowest_month} (${min_value})")
budget_file = os.path.join("C:\\MAICA\\MONASH\\Python\\python-challenge\\PyBank\\analysis", "budget_data.text")
with open(budget_file, "w") as outfile:
outfile.write("Financial Analysis")
outfile.write("-------------------------------\n")
outfile.write(f"Total Months: {count_month}\n")
outfile.write(f"Total: ${net_change}\n")
outfile.write(f"Average Change: ${Average_profit_loss}\n")
outfile.write(f"Greatest Increase in Profits: {greatest_month} (${max_value})\n")
outfile.write(f"Greatest Decrease in Profits: {lowest_month} (${min_value})\n")
=======
import os
import csv
budget_data_csv = os.path.join("C:\MAICA\MONASH\Python\python-challenge\PyBank\Resources", "budget_data.csv")
months = []
profit_loss = []
count_month = 0
current_month = 0
net_change = 0
prev_month = 0
profit_loss_change = 0
with open(budget_data_csv) as csvfile:
csv_header = next(csvfile)
csvreader = csv.reader(csvfile, delimiter=",")
for row in csvreader:
count_month = count_month + 1
current_month = int(row[1])
net_change += current_month
if (count_month) == 1:
prev_month = current_month
else:
profit_loss_change = current_month - prev_month
months.append(row[0])
profit_loss.append(profit_loss_change)
prev_month = current_month
Total_profit_loss = sum(profit_loss)
Average_profit_loss = round(Total_profit_loss/(count_month -1),2)
max_value = max(profit_loss)
min_value = min(profit_loss)
highest_month_index = profit_loss.index(max_value)
lowest_month_index = profit_loss.index(min_value)
greatest_month = months[highest_month_index]
lowest_month = months[lowest_month_index]
print("Financial Analysis")
print("-------------------------------")
print(f"Total Months: {count_month}")
print(f"Total: ${net_change}")
print(f"Average Change: ${Average_profit_loss}")
print(f"Greatest Increase in Profits: {greatest_month} (${max_value})")
print(f"Greatest Decrease in Profits: {lowest_month} (${min_value})")
budget_file = os.path.join("C:\\MAICA\\MONASH\\Python\\python-challenge\\PyBank\\analysis", "budget_data.text")
with open(budget_file, "w") as outfile:
outfile.write("Financial Analysis")
outfile.write("-------------------------------\n")
outfile.write(f"Total Months: {count_month}\n")
outfile.write(f"Total: ${net_change}\n")
outfile.write(f"Average Change: ${Average_profit_loss}\n")
outfile.write(f"Greatest Increase in Profits: {greatest_month} (${max_value})\n")
outfile.write(f"Greatest Decrease in Profits: {lowest_month} (${min_value})\n")
>>>>>>> 16cefa0361fdc1efdbbfa4c4a19743b570eb55b1
| 27.512821
| 111
| 0.669152
| 560
| 4,292
| 4.860714
| 0.121429
| 0.110213
| 0.047759
| 0.051433
| 0.983835
| 0.983835
| 0.983835
| 0.983835
| 0.983835
| 0.983835
| 0
| 0.011888
| 0.176841
| 4,292
| 155
| 112
| 27.690323
| 0.758562
| 0
| 0
| 0.969697
| 0
| 0
| 0.312209
| 0.105778
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.040404
| null | null | 0.141414
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
d73ede2a11fa2331c72622e69edfd2b34e0df5f7
| 59,243
|
py
|
Python
|
pyordle_single_file.py
|
cmglee/pyordle
|
cee4dc600866faab9ff03245a25901cdd105cf08
|
[
"MIT"
] | 2
|
2022-02-27T12:57:50.000Z
|
2022-03-01T19:08:18.000Z
|
pyordle_single_file.py
|
cmglee/pyordle
|
cee4dc600866faab9ff03245a25901cdd105cf08
|
[
"MIT"
] | null | null | null |
pyordle_single_file.py
|
cmglee/pyordle
|
cee4dc600866faab9ff03245a25901cdd105cf08
|
[
"MIT"
] | null | null | null |
"""
Python 3 implementation of a popular 2022 word game by CMG Lee licensed under CC-BY-SA 4.0
Usage: python3 pyordle.py [GAME_MODE] [0 for no GUI or 1 to use tkinter] [ANSWER for testing]
where GAME_MODE is one of
game: Player guesses a word computer picks
hint: Player guesses a word computer picks (possible words shown)
solver: Computer solves a word player or http://hellowordl.net picks
demo: Computer plays against itself
TODO: Reimplement as a class
TODO: Improve error-handling
"""
import sys, time, random
DEFAULT_ANSWER = None
DEFAULT_GAME_MODE = 'game'
FIRST_GUESS = 'RAISE'
LABEL_PX = 70
COLOUR_BG = 'white'
COLOUR_MARK_L = ['white', 'grey', 'yellow', 'lime']
FREQUENCY_TIEBREAK_D = {letter:i_letter * 0.01 for (i_letter, letter) in
enumerate('EARIOTNSLCUDPMHGBFYWKVXZJQ'[::-1])}
FORMAT_KEYBOARD_MARK_L = [' {} ', ' ' , '<{}>', '[{}]'] ## 0=white, 1=grey, 2=amber, 3=green
FORMAT_GUESS_MARK_L = [' {} ', ' {} ', '<{}>', '[{}]']
N_LETTER = 5
FILE_ANSWER = 'pyordle_answers.txt'
FILE_VALID = 'pyordle_valid.txt'
KEYBOARD = '''
Q W E R T Y U I O P
A S D F G H J K L
Z X C V B N M'''
PRAISE_L = ['Genius', 'Magnificent', 'Impressive', 'Splendid', 'Great', 'Phew']
n_guess = len(PRAISE_L)
## Remove possible words not matching evaluation
def trim_possible(possible_s, guess, guess_mark_l):
for word in possible_s.copy():
if evaluate_guess(guess, word) != guess_mark_l:
possible_s.remove(word)
## Choose next guess
## TODO: Implement a better algorithm using word commonness
def choose_guess(possible_s):
n_possible = len(possible_s)
## Rank letters by descending frequency in possible_s
frequency_d = {}
for word in possible_s:
for letter in word:
if letter not in frequency_d: frequency_d[letter] = 0
frequency_d[letter] += 1
# print(sorted(frequency_d.items(), key=lambda x:x[1])[:9])
## Assign scores to words in possible_s
score_d = {}
for word in possible_s:
score_d[word] = sum([frequency_d[letter] + FREQUENCY_TIEBREAK_D[letter]
for letter in word]) / (N_LETTER - len(set(word)) + 2)
sorted_score_l = sorted(list(score_d), key=lambda word:score_d[word], reverse=True)
print(', '.join(['{}:{:.2f}'.format(word, score_d[word]) for word in sorted_score_l][:5]))
if is_gui:
label_title.configure(text=','.join(['{}'.format(word) for word in sorted_score_l][:5]))
return sorted_score_l[0]
## Input guess for computer's word
def input_guess(i_guess, n_guess, valid_s):
global gui_input ## must set as global as value will be changed
gui_input = ''
prompt_guess = 'Enter guess {} out of {}: '.format(i_guess, n_guess)
if game_mode == 'GAME' or i_guess == 1:
display_message(prompt_guess, False)
while True:
if is_gui:
window.mainloop()
guess = gui_input
else:
guess = raw_input('\n{}'.format(prompt_guess)).strip().upper()
if guess in valid_s: break
display_message('"{}" is not in the word list.'.format(guess))
if i_guess == 1:
print('\n[ ] = letter exactly right, < > = letter in wrong position')
return guess
## Input results for computer's guess
def input_evaluation(guess):
prompt_mark = '''
Play {} and then enter the result as 5 digits, such as 12231
where 1=totally wrong, 2=wrong position, 3=exactly right: '''.format(guess)
return [int(mark) for mark in raw_input(prompt_mark).strip()]
## Evaluate result for a given answer
def evaluate_guess(guess, answer):
guess_mark_l = [1] * N_LETTER
for (i_letter_answer, letter_answer) in enumerate(answer):
## Mark greens
if guess[i_letter_answer] == letter_answer:
guess_mark_l[i_letter_answer] = 3
else:
## Mark ambers and greys
for (i_letter_guess, letter_guess) in enumerate(guess):
if letter_guess == letter_answer and guess_mark_l[i_letter_guess] < 2:
guess_mark_l[i_letter_guess] = 2
break
return guess_mark_l
## Animate letter
def animate_letter(label, i_fraction):
label.configure(width=LABEL_PX * i_fraction // 5)
label.update()
time.sleep(0.03)
## Output result
def display_result(guess, i_guess, guess_mark_l, keyboard_mark_d):
## Update keyboard
for (i_letter_guess, letter_guess) in enumerate(guess):
if keyboard_mark_d[letter_guess] < guess_mark_l[i_letter_guess]:
keyboard_mark_d[letter_guess] = guess_mark_l[i_letter_guess]
## Print evaluation
output = '\n'
for (i_letter, letter) in enumerate(guess):
output += ' ' + FORMAT_GUESS_MARK_L[guess_mark_l[i_letter]].format(letter)
print(output)
## Print keyboard
output = KEYBOARD
for letter in keyboard_mark_d:
output = output.replace(letter, FORMAT_KEYBOARD_MARK_L[keyboard_mark_d[letter]].format(letter))
print(output)
if is_gui:
for (i_letter, letter) in enumerate(guess):
label = label_ll[i_guess][i_letter]
for i_frame in range(5): animate_letter(label, 4.5 - i_frame)
label.configure(bg=COLOUR_MARK_L[guess_mark_l[i_letter]])
for i_frame in range(5): animate_letter(label, 1 + i_frame)
button_d[letter].configure(bg=COLOUR_MARK_L[keyboard_mark_d[letter]])
## Output message
def display_message(message, is_print=True):
if is_print:
print('\n{}'.format(message))
if is_gui:
for length in range(len(message)):
label_title.configure(text=message[:length + 1])
label_title.update()
time.sleep(0.01)
## Make compatible with both Python 2 and 3
try: raw_input
except NameError: raw_input = input
## Set game mode (GAME by default)
game_mode = (sys.argv[1] if len(sys.argv) > 1 else DEFAULT_GAME_MODE).upper()
print('PYORDLE IN {} MODE'.format(game_mode))
## TODO: Make GUI for solver mode
is_gui = game_mode != 'SOLVER' and (len(sys.argv) <= 2 or sys.argv[2] == '1')
## Set up GUI if needed
## TODO: Run all program logic in mainloop instead of starting and stopping it
## TODO: Make window resizable
if is_gui:
try: import tkinter as tk, tkinter.font as tkFont
except ImportError: is_gui = False
if is_gui:
window = tk.Tk(className='Pyordle')
window.configure(bg=COLOUR_BG)
window.resizable(False, False)
window.bind_all('<Key>', lambda event:input_key(event.char))
window.bind_all('<Control-c>', exit)
tkFont.nametofont('TkDefaultFont').configure(family='Courier', size=18)
frame_header = tk.Frame(master=window, bg=COLOUR_BG)
frame_guess = tk.Frame(master=window, bg=COLOUR_BG)
frame_footer = tk.Frame(master=window, bg=COLOUR_BG, padx=20, pady=4)
frame_header.pack()
frame_guess .pack()
frame_footer.pack()
label_title = tk.Label(master=frame_header, font=('sans-serif', 10), bg=COLOUR_BG)
label_title.pack()
## Draw guesses
image_dummy = tk.PhotoImage()
label_ll = [None]
for i_guess in range(n_guess):
label_l = []
for i_letter in range(N_LETTER):
label = tk.Label(master=frame_guess, width=LABEL_PX, height=LABEL_PX, relief='groove',
image=image_dummy, compound=tk.CENTER, bg=COLOUR_BG, text=' ')
label.grid(row=i_guess, column=i_letter, padx=2, pady=4)
label_l.append(label)
label_ll.append(label_l)
## Draw keyboard
button_d = {}
button_label_d = {'ENTER':'\u21B5', 'BACKSPACE': '\u232B'}
for (i_row, row) in enumerate(KEYBOARD.strip().split('\n')):
i_column = i_row % 2
if i_row == 2: row = 'ENTER {} BACKSPACE'.format(row)
for (i_key, key) in enumerate(row.split()):
columnspan = 3 if key in button_label_d else 2
width = 2 if key in button_label_d else 1
button_d[key] = tk.Button(master=frame_footer, width=width, height=1, padx=1, pady=1,
relief='raised', bg=COLOUR_BG, text=button_label_d.get(key, key),
command=lambda key=key:input_key(key)) ## see Python closure
button_d[key].grid(row=i_row, column=i_column, columnspan=columnspan, padx=2, pady=2)
i_column += columnspan
gui_input = '' ## input from GUI
def input_key(key): ## callback on keypress or click on GUI
global gui_input ## must set as global as value will be changed
key = key.upper()
if key == '\x1b': ## Escape
exit()
elif key == '\x08' or key == 'BACKSPACE':
if len(gui_input) > 0:
gui_input = gui_input[:-1]
label_ll[i_guess][len(gui_input)].configure(text=' ') ## label shrinks if text=''
elif key == '\x0d' or key == 'ENTER':
window.quit()
elif key >= 'A' and key <= 'Z' and len(gui_input) < N_LETTER:
label_ll[i_guess][len(gui_input)].configure(text=key)
gui_input += key
## Read words
import base64, bz2
answer_s = set([line.strip().upper() for line in bz2.decompress(base64.b64decode('QlpoOTFBWSZTWf3MTswAB3LBgAAQP///8GAfzpa7HXQUBtgTTKGANMR8HexsY3eJKgAHHQEtyAHcR7t6A1702bIXPVWwZ71Ae6JSWdB3FbaZeVOJw7nbJiJgEACImoIqf6ACCJCiAap+CJJT0jQ9E0Bqn4RBBBFTynqCmjQBoAAEp6RBEiRPFNHqExPn7+HTJ8FzTaMfRLBssWZGiEJK+NXmpna8PWiX1WeSP96kChvOZ6NKepd71QeKv+4Xm0PFx5/6mEfe7tQMhoMzM/xIDNL+3m82M6VdV1e1RGYqbQRzNPty7thFJgI8ZjDyhOmWMLCkwyDhGEB/SU12vk2PjWtV7xp+emMweX4KqIUvr6ZDd/TZeE/k8Fkv2PWUWZGz6i4hx51m4ZtdzJvTOVIiLo7Ls7uPO1NBa1iuUFaqxg5EfTleHDzlSxF7T2+KOeayWlxHt+bKLfX3Q+xw51h596oMyEXd0VQDKWMFDJk0xfglFfoBLPYZVIwB99Mm7z86bU+2FyYdNgtotDrFW1NBnNLW/2fH77jS7ulOIEHgWPGAmZICWd+vbE6SKlPKsh7GzR/tEDMNStHumQXli1BDf3EFZcbi15seINKNxSpuwkHfVu5dvozX8ZV7kfdv/WmEAS82ZXiE2HjsTTt3q0/V+oqyviTPSyDupuj1fs5IzVr8lVkpRfjokp+YZVEqpLy1ZhQMEQ8xW/nwQbpcm3P4KjABtMqmqc236FCu9g1pxz8P7HycgJIAW7Jx+NCQlSafKb2GMec+yS1qTN0JhRs5lezYMZzHUuDepTTGKftzasUMsTCHCEp1EssLMhfwzJ2Y5T13Piqr93otanwFmILXXNWCUkRu6pNZIv5h7PVfeuXgK+uOk/emoiaMqd3Uaw69G+PdQlAjgBEAM6NODXqbZSTqq4mo5pifIJQsSY2zO9R9mideBTRmUlv7bXB4HEgC/7xMtrs5atRLilhYqHcynw4Mc9zFMEhqhGvUoBd77k/Jc96ZSv8CT83dhoIE5dxL2kRb+2qd3pdfcyX95vL9gulKop65LqNGya8pGN6V2A+FEwiGy7k3hWyGHEMleJOHFHGhpj/WPq/ovNfhKS/s8X6EDYjFVPjwik5bXFMh8FQWpUBgY5G6YpeZdtdPEt4/ZlV4cEK5MIVRckDChrwxAiehqFN/cKfGL3T+zz367llFnCK+eO+vH0Dj3hVDhA7sqUzg0aLgoERUr9c/Q8cuvIuKx2OYg7MTaILSLDE7sG95tYozlI4TwIUPGxtW4dF9fCe0ch+rKcIG1AME8l8EECmJIDvJaJOoamR3M1LxU6rmaEHnxbqYKvoNX350mN8DagBuVcy9hgOZkZKjESVak03kpPW/LNcpNufVgUt0QZnqGXw3jK+Kju6sRz+n4zaA775b6ivgAoo11+3GMG38OHBpI9Us86hhtg4y+qXl6Lmp726utfHZyZYd6+Hvdh/VoGB4Tc2R56rogExNaA6XyTMQ+ECnzRXKoMhDIg8nkrcVYucCEnShnsyZb37HNcjfxvMw3rmXIsfXgyVk7oNiUFAS5Bpq7Mj4pasEmT861gmKyrH4Ur1PlwYxfqwVr6qsjEmtDQanzyStmSHMMX0xSkF2ybSnfhL9y3kIC4RgyjDY8u2KoKUqSwfU5juI5JOONwh3pt5sRMb+6EGaHzWoPenrptqBFKadUwKA3Oo5WZwVjziYr7vsH7w8tj4elUNZniOzshHK4ksYN43u2X67wMrcBNUFO3ZMolcz2yAFiwaOFDLQojxvJZ3h6N9+RiAhqP5St2fNX63/h5pkArUkn/a/x7yr5QdGhi9F+w+tGown64yQSWxoKKP1kAWKQVKbHHRuxBMw6Ayd/KxBEVsEeER0/f0PoGe1P12CuNvEylcZOY03/V0no3YQdsqFlonjylS0vpJ3poxwUkrIoG4grTzVXrbCzYGEmKWfpRW57INzJo3g0AZkGzX5vjuLvx5o2NI6Gw4wg7kHgyH4TReI4N5rTwkehTiy2ZK61yr+lvrNWmQJnYyhrN4bPQljgriIxDA8/AnwRXELQQEQBFDpHILlO1yxVzYpUG0EvL/MVYG9opkK0EKYIPTmAamiC5KWPLIEGJHEREE0o4FCFi8NHsvBuBRVa8GEAssWHU/OumxsDblUyAojD7qjDfIS4s4XHXPfyQVrT+DexnIo3diYVUDUWd1ceEtSP6QU2VLcO3TQBPLAlspxVLhKSvTmnDHcF6Tnhal5lRsJP4qwunRwarTOlpQtOzYrPBcEZXx1QIunRpcPa5X6/qHzdIXcUISZpN8atkcHbWY+IfMyWBAEDYA/3839IfAoqx3vNqnUoroxwzwjLoKPzdec5y9AfZ6n4xecKtY2vHCb1baY4okpEmqtnytJPMMhUp025N93ZrTnij7p4/gLlNEOiLDQ68mFzQ93klBq7SDg0UJjLIhdYOx5EEky20VjUw8h7KRKl0WSIN2Ko9UOH+D1EH4z9a6U3DiYNYlLQ1xE4n/WYHWg3vLK3q9zuqtr8PJsvQ4rWpCnzy6CzN9vBSqMsN3WNbkKuQU12BZ9z/Hm+3VHkgFepNSq40TsIIeHWvv5m/aBaDbRBD861VGuq/kLG5ROXZxwQWqzDWr+nPvtwGgUiA1TKEqXgd/9WxkZ8KydoxfwUOV0JVTAyPMWXXtDTBEiDBBzWwSH46HV7QBEpaM6QxfZpHeSpolt+qxEzG7LvGXbLhl0OS8XNAa/vy6qjuoEEriRR2xuRaFWhuVGlKFAW2h7KAZBIEMCESKoIVQ8vDHGsZMrNuh1icdZ4jxbs+V28TqBYfZlFbYII0vKzTw4KK65l3IWesAwu3Jl6ONh1qncRLh1h2BrY1j3tnrKMcaCCQnCCYiKKxDOaIlkgonJq5FeFVGlMzLMkt7et8v/HGb4hzzoubnhly72rtm2hybLlW0bvGpyHPx2+APqQ12FUfhCoNuJR5bfnsGny+peJfN5eCGcUG4XFet9fz19fsf5FZH9vbX3P7Pf5wEGoRkaqzBesz+QqoynRyUv3Ucf5/kTr7tS7RMvK+g9sa67B/fePSvD1mOqIjItQjb3z72g9XmxsdxBqadH20ricxy3yOkNNzZn0bdilFkokIk9it3fEa5wsFaBTe21vl/wijtNYrEgeFpsdYsuoZAQoKDi29vTeMVpl7JvODPWJvSc/BCjLImdR2TBt0VA1g1iMDY4c2XJ5sp7WyFgCjvusuRxfDzGLac5Pay/O0W9gF44Lh/OTDn+PvH7mld2E6thy3J5DOi7PEdUOFOYhfLFrLJdK6o+XIIm+B4JigsKqvofCd4gAS6mWjRAsQINiKRFPGyf9h1uhbKHo/4VVBCSCTWnRx7DgR6RiIzwZ00HoJWr9rOTN+IIlmgF8EC0Tn6IGAhnOF75PdHDQYsAQVBYi8EDzUwr6roBjdXuj9OcKB7SHVSFhVGw0Cgw4vsAXo1dwHWyX4wQAoNskbZux27fEycVpGJCIbDb2nQ9ulNa9h/3aPkgwY+lA3yWBtroaKrakYYHguKiaUxAJ18mXxImiv5Mmu4h7fkKATTo2iIdDSJ/fOnTxHqZv6N2DBzeUH7udGvAW+7g93PvYO79n6IbT71IEdMq+Gyw7ooqLhXgSigONamcQPDwjioVM8/UhyvfCdsbrWt5xQwnSZIqnTnsLHFSc6/oIYQY+Ua7dUcq/KbC5et6ZTc/s3+qejbgNOsq+e1fCUexcGrxXrtF3xuwvOb+ofNnrzWAmD/H19ufd5Be1XFYPf8YYnQivWKqNhoP1rfKObY1s4TBTqIZKDBRqZiYGv3H9a/tQvtMmAnbxCbbhVYVAP7vPlhr07xyTGjB7DVfceQyVwkLskmgTFg+uQDaHOTuEg4qT8KXM388odpY7sw+J9OtEMV1mXyhaD+auz7StTRl4Br9UJpHordwwqvGomzvborbQBpAWqoZphOy5JY3h/EDV7rez/fT1CqtKjWPuhqBcHksYxUi4AePzAowp8KyspVSuCOoQ3IO+UyE62/Hb29Xl5WIjsZQbvN5bL3MrGqmt2ViGO5f8Qw3X7+QWw2ihoku4UFteNUGCd3rXmaqlC20/XBp+TTM/Pw/cD9pLSXu7xphBIsUchEsiXJeMsgIjGI2qSTEsakwbFIYktaPks1K1qTLuMG/qUa87z8poUuf444mMk8slyRclt1OJCrRdWi+3eIW1wSUwiKiSRJjbSIbUBhjVqFxpsiiI5DqSCMOkoW2Cgi0Liotw/t6bN2SF6+aOSGxpNiiN0baLmYkkohMiIZEBGRSMBhDCEb7nzzHaTvwvexkP32MoFHwhTQa0ypPO52ibZ5r/GJxsvTFHUbrqX+3PeH+FRTQZOogzu79Wqel/kbzzGup67JoopN/29WsFHbbHovzuBvTPCaSmJMlc0sWiUMiCiggYHhPtfFEX+ER8eErWBg6/zVE6h8hOxb+gWneHXt7Ly/ZxJ1b11wwD7IR+h9mcp0IwwLR7wVL7tErsNPt7vgM0+R1Q1bq3aq0CtZHz0feCW+gOsCalTgppOGUBojpLY46Xvpn4OBHGzmu6YDCTTieNrRIYTIV9MmbpUolWLLfQEeXKpwcfH8zPXtfVr2fC0XPi24QTPV5ydYNvZdmRFRFihQCTmpewFmoQJ8LBcuF4y5T/em4yxZsBjq6lX3qTzrlXYvvtD378Mc5uk/S5GIrqkrn3mRU80IsTO4g2SupPGGK6Vk3wuVoZXi1B0igytp8J2I2i4t5qvnevM6BT1Xay1Qae6K9J+TrQSqkv3RkP3418zX2nbXf4L66q+vJyLPAZAdJkAT60/XmM9nZq0/e5SljpIl7hFUTayGt3fJhc/kLbpGlYZozOOr5dFtp2RLil383fpx/ffBo+QgkTLlyKJrJA5YR2RwOSHwjvatXiULmlKHlLFVENrNkaKWC03zuoFhUJuCCoEjZu0IacbjqBEyGNxRBOnCJI00W240yScFC1DwGtuXTguNilJMoqxmIoH63HL0QgUWJuNgTOb47PhRJ3dzMRCbErOjI7EaMSkFJOrzRziBWZayzzhgHt8e+qvWr59oEeArj16lUmwL9QoKvDRNG9Z53SLX9SJ7mIPzK1PsiQzgyMARR+e20VyClKfFaQs3rnzp6O84+zOh50JOSKcxs76p14fM0sZkBCOD2ihUjiczrnExrvmpzliZgM6QIhAJLjbkCgM9tA3VxEHiEi4S0VJAQnCQH4O8d77YZTjGDw97E5MtDLy2/NUVm9VWMrDm+ZYzWAXgvTyxu7kxgjokojYZYKBLZKKDaE8wrj0NcWTfKp8FxxOoSEmVJMY6PHfQ+g8ZL9+MdHHS2MqcKvpXvdWWSIDmdrnUoEERxzrYhbpauCVYSy20vvM1dCOsM23UNYuTEw57BO+Ji9jx616m2sazUCrHxqm627tbsju8frUiEJ4dwKO6OHoRpk825nS1DQPzF0WYzirqfi4Vabc7zmzcuGcqZzbQiaHGvm6gaaKw3ixjUWnlz0y3plXsq6S8gN/KPZXdSLWDgcjSdH0wthJlCAwbMeKUJJGO5KSxE6hBBiUzLmEJthLMjpEQ23ZnTIOHQcDhUlBQiUFrcB0Nj17LfbUlrrwdZzZfrXXZx7EnCSC19w8zWsKJJSbjbKKKcXBKanEI9Uy1whTbCVF9IID5vo8lLL0McM95XJwTZSQc4nJIwhLhUhKFjEtpbEbuxuSo22tItfTdZJs97Pb3ed5dZzcNa22XFppiqSSMpRIDG7sjGXES9nO4WXufedvy+7vRql5Pd7FPeTpdRkY5BIQtsuTIXY7NGJClUEJaWNRZDmAsXiqHWGWWdCRkREJCUA2g+ac/3lNSp9MxQe/C9TccboDejgk18hhEa9KAq7XO7/VEKgg+dun1c2aLqjgBKR9Q3oeiFe90Jgl9/V2dnfGIyyixbkfKlyC8+25CelzdZTHIgGLhAic8aQKpW2SkdSK7J5vJmdqLFwD5Vq1UbSGyKYFHrt330k1Xr+PNv4c+8m9FnMHEP/ghZLFrKGR4pLg6nydEU7IpCycNBBM3X+eAWn6lcc12pOxBlSP5MJSOIa5S/P6Tq6bJhXIYPw2Ez9yS7psZmC7NghM9T3QvEGtW94v9z5RPHPAAEgIC2cqQZRDfrRiemZlpf3qZpjh9XWPBvpYovjI1Vr50dxPzZMczYWS+WedxdxUChxqKWC/HUYG8Jf+sDGQPMfHf2RcImMirMi8s0kBDw6wAbYdOoztQP2FveFK7QkNciIOxAChkge+apXNQpoUfLJa/Oq2Wc38xN55ocwLA4QqKqekggq5G/b5QFeuspY+AC7Nb5olXU0xIZeLRnABQhj3HU31d7yFDkNgx8Gm0TJmAMMl9FXcZPU+rvEL/pe6SvPeS+vhkj183uDT0jy0F7nJtGnQTmPfJf+ufFL9gW0q1GID2Hhnk226mDejAQTca59bBzQiC975gh8rxzrhAXjyRc8jG1487HDPMwRPjFdzaWkkW2qIl+fLDEqzuauXmkWhB0watIkvid7LuTRsSlAtqjUzXrzG9TSY3POFdSldz86mvm/jqYvfq7ThtPWifUKiFtHIUjS1WMQWn4QWV5GMIOJbklYlKYhFyGJVskWlWqMQiC1bEUI0uP02ONZCBaCjrMyraImNGSZkq2gkiUQf0fsKk60J/bZ+/1MxCL8tftyR2CYu960gcri0SjuzUP4kOcMhOVs0syonAmtuV4u43ndKcxdOLzOMpeCj355GCI4IJEjwSIEFEuOUS4hPxNoRMgO1kY76fqc+pbXeFwMdIVR36/u5zeHbVKADqLxroiEFjfhgyePfDzjl84oz5jFhjaoCA8kuF3pn6fdqiJipBaZ1nzsm8SkH8R4b/rrBgym/TgXBseFDFwFkHe99aWL5IwPW/Z0kCUO2yTGyHGBCIkUkQiVTAKywUdaPae6Mtseh8iJPZ2dDJu2gcXjWxm4zSaMJwveVDVjaPMkfHgxhhaVb7CErnWqzV1z32PT9H4kSRIWESiC2kUG06kYkiUckIhiB4ZKKZJKO+/Q2+UvIanfaZ7fEYgyV5S9ztSM4+SQY0HILpmfiDGRlBmemT3Wn7l4baJ8+zc7+j4PUkYUovGCqIP+/wR+Cf2lBoQrbVYlE5vX3P3z8XcsYZNUouN+FRs0Xc08zx3Oi3nJ4AZZGLpla9PPHA1wt6s8sLPJpLye3HhRUhKCk2yyMBR5C0t5qElpcQNaMbqogtAYSUAd6QrmtZgRBslFUSSsOClk/1cs7uDhMatSYt2H9TQi33XNa4biLvd6uXr+3eeVQJTQcQHcCquXInhJs76XHAZAHFzo8dhlZ8w+FnIz63oneah7M3f6mmSdkpoBDVsCzI6nQm3VoujNp/NPjUhPClTFaavIkoqtI8OsNkMqnEoakZK1xbNTOHJiazWVF2zagyhplDhERExPxP81yK+Ul0JS66SqDxzo13nW6GAratve8XfMynPM64ykJoIhBUudXV0VXeLoMiRb3ZYJK7VNTkOTdIjVaUT4fWjzk3W6cYKdYKIot5r67IgwUeIg3tOjCEj7QmQiqabLDLKCYWUIh8BEBMfE5Jl8VdQ1ZSfKl4Vq+aSo7i7Mqaa8Tmw342HPzz1oXm8PuqJXHVLhEAEgBnsjrr16xU51UEMYQ2uCEBu9UsBaFogg+p1bZBBrZ89vKRQRPic+4Biq0CnuiUVbdz796+yfd7haxxpvZ3SQ0wUNEjga6Gd+PjnIWiiCSzo7lsJPF+VfWKe4a91tdgf/ACiHRkAlQNVAcPRdGAbsHOMVED6E4EQ0b+CwuPUp/YrJ9yyPB/dKnDxYXDACRE4iNESl1qZPwdxAIhennfonG+oclBLbCpOtVnY+bQ5Tpj9g7Gh6hbaCkwb5e3yxAFq+7FA7mayX5bjRXOGG2iTend+C79LXvPnYsAHjRC/zyOyPqsBjOrf5bxeB85j8oi+kDtPXvAMQ/5xgXfbfTsemt/Y9IB0I2Z6OP/UVgK9UF8mOp3y4lH/xJmpRfi7deHpyQbpRIL3/gTJAGhPG1ZK/8k9Uy19LHWJJ0HyhwweD4OyhJOJyMtVYyQfGzyjzT1zYXm170h/FxxkEFvypwg4zf+QjjbDZpRNwjGI0CilbUhKAInCjc4kUkwadUhMS/rWs01R3CBcm2GiVu8c7P475U2QIQJdFq/o/qz8XjcqJqrdZi+ttkhlcZzUjLoxE5hQbaNK0MdMuMRwwQdMVBEpXCtCqqUFsh7o1LjdWGJmZAZmYgVhQBARQJr91jtPFfGv7Rv1eXsdCgu1xy/HxYfqT3u60mqX+B/OOjBW0VpLwrHGkRAVJKqUdCp8rpo/+0tcykfVlZ1izFBtZ1WFltj5G530Kv251jPlZxSqat+i4nRjs8fd1PhFXvVs7ecXbczRWwiIEOR/kVaUxTeG7FWAc2INpB70hLLJeKXkEIfv1ppUZ/LyJbAVe18ewob/QbLGKV/dzphrJ2e1LuirnGMpyM/d3ejqVMMjpq/FhVbpXRhtjOc5yebNYrZtkyHAfeVAcBtAMhSCBvvH2m735kmNq1uVykijjTyuCMO51PkXVQmPSDv6v6QWV39UL9lsHAJCskY/+LuSKcKEh+5idmA=')).decode('utf-8').strip().split('\n')])
# print(len(answer_s), list(answer_s)[0])
valid_s = set([line.strip().upper() for line in bz2.decompress(base64.b64decode('QlpoOTFBWSZTWdiS1EEAFgfBgAAQP///8GCRpqZS+vru3bFnce9vTe2mtdk+971PKbu1959d6Zavu889Xfd31vXxxpk67bb3HTvfN1fI+b1nXvcuz3s6t3VTfVd3nZr6bm2t9dzH3wqVHoa6NEgEiqAdOQDokUKKgkt4PoDbAfVDEAAApIDqR4PhKJ2YqCgoGyTaaKKGgANEVFAYzPvPovrASAiqrsKNZa0d4fazZhQKHpoDQBoCbefV9isgwMMrfaTvuXp3bvbvDd11eLW3vu97QzR7taZ1iAbVhtiiQiyBu99VSvthbDQAUQIpSltttStZ3eNH0Ac1gGq99bx3jbvpe3td5bg7XPXgF7tvV9VVGmlNsrQKBQGpBWmSsmbeLZ9ACKn3nlew8ds3WuwGyix7bvnbNs0GrRiEol31EfRQKYc8oAUc994zd3N8mIqfgEACBSCKn4AEEhE1IIp+RoQmhBEIGp5pIhCCmiaIGp7UQCCaQykgaekEIQgiTKDSUhUedHtmujINM9Ef4hl703JdTmv8KTObsn+FxyL8J70Owt3FHXkFogIBwBfsf5HrLEf0gIIYmgHrzr55OiOLG4h3/qqFgvHBkxz6VDm3oqZaGGZ2ZicvJeNwy85IxgbCyBi1XRGoIiTATBYiOpHbrvNVPNnaLCTGu8w17NhC935DTeS9W4dzhwH5JE5NbMB9beY0Q8TMwgexbtJcPxbCPkOngirRcaQh6KTdk2hvkJC+qGRTQgp4ISilBgoLgDDwbzfKXstI72bAGsk9iQgjEH4RpLs6rfhgMtX2IBeAHyX7yzjUTBwye6KH6jyLbk4JkQVRK5SV4jG0JtunqoCb05OcORsPqhA/TdCU0UHqi2IUTGgqODjwBgBGOxMTMjCT97zG9ixJT4Yu7d9u/IIGhXRgevF00LQbsc5d3eTKYgWC7N31Lun1XVOhbppgcrMp+7m+PVloHHyHfId3qInx8joIAAXdwecDFT9MTlXiItlrmpw1Dy6LFtxpmRVaYEnLh9fPFac+H79XqOMj7fVci6rbS8e14MDsmRmTXK8gyCzhD963VU+DJhYgOszYSUdjEKkIFguqxgI3RONo/hk+TPWnP7nJ1vLpi7K9eTsWdODvildEFtdat25UvbnklhJ/oboc3kO/KdqEhcBr76dxGBun7+xCqxmy0RNqjpV/JPb9W04wyGLv0euFcTsXvuuvlr6I/pCvtsy2pDrp+powTQI7bhCNdPARfEZ7La6Of5h3hjgPBJPoa3SlKRy7ogCJfNV8lMgh3681HGxPLJ0wuzIefoc8nQgWSA0BrGRxA0Jd9QQ/r1m0tmu8pvNPLRb+Tyfr02Rd+7rAI2JmGW/CyqKnN5JrHqqEhwUvnMTbEUw4DESQnnERYrgnOjOObN0HMHfz1uVYGMeGK9zXwU9pmGf39GmqsepuYBrpOM90YHRNoScVGh/Mw4zz9tbUivRxhWIN7/d5TT2DQ55LOOEHHEuX629wcO6yZlqs6d3kXo/cmcmb+kO23iYqG/MtHg4r9Uwk8drIgQEPYLHDq/twEWqHfQr1GH51Esa9RvfqsDr7TwJGuMndu736SLh2yAjkFc3MQ7QTDDk0pn+wP4cOfObla6z+OoCK2WSVyV6X00yrH0TKBs+j4I+3cGihsMQA2pfyYKL2uUymA8bKDfnXvmPcC0PYf0qGldrLGZmIqcX/IOeYvycySHVzsIZiTzCRCI+VGZlgB2SGywW4rNzGhv5h1YKl3D7P4Kf1cbucCOZLQIHUO8dw89h34ulVb8n43u973K8+6UoLbixeFU4QAeP4ez3fs5wkYCJenQqCdx2HEI74kytGG5IBX2bDfCPwPMec1KWA65dCesO1ArMQw371lL0Zf5qKR2PfzHS+C6F8A4R3I1zfvndqjlORaRoZ/A1L4HYZCx4z7u02aMjkNydvcHgPEWmAapNVDv6GXa7Mv1jSahbDigwGkcj0EJDzl6JYxBRwaHa++IfIhEEDIZPXSBBwnPyiDg9Hg4Wq2q5LLEYhwMGOTdFDk4d2owGIYiZKq3MNFvR8NccD19b1iXDCe30UHpyLo8u9TUo4gQm1McIcHo0IQoy8NGcPgDRbUHfJuafRjSkAY3HEQ83CpZ5as21MkbLaxF91mbmJWIQ3fVzTdOVOY1JY8ustfvn5PR7F0LWtuYuxpnCb1cX+fnN7bSxYtz2hI5qVYW4hrDUPnAMeHFzzsUOpEepx1ObriKuH4zYc8bmmMzHJ3KlbefMQq1G21pHMS3A5PTh05mzbe133dtXYz3Jy7PPuFn0MXZzybI2uwBSv34qyrJ6BJc6agMfQ9KYc1rylmmodrnL3EW+T7HfZtFowaEB/R6k0TPo33lUWTefbTQ1Ea3LPH0izosEP7uZPl5L9NosZ+kJ+z848aYREOgkQ3NuyDVRmH/kIaDY6LZG7r5pPD7I23iLMvn7dho7443bJtOUz02DR/CFMSt3Wg22AgX94+q8lPqpk2ZTG5TqQXDt87hQ1yBjELDhxzEbd9ENd8FnTLHczEOk5KA7TNXZ6DlrLxcQ8cEJ1fzQrhxyLb/pGtP0JfwXfz3lLha54nwxWvcyvqoU8/y6ooq4abT9B2e3+OcCwN7jgiB91DsrR+WOOqwn7Eqq+fVqrbRbcEfYoW5tI/cXwqqqafjPHiAY45k1sqi1627jWq0UgGikoQccPUnEFEUgR2QA42Nj4jGFaeURqRHFebY0svv/koyCdtZvudEfK6/KWieclE1Oe3JAxwfazVe/GtiQ4tgzzb3y8vDHvfk11zcQda6LgItIdrHXW3PG/fZdWRvBpnSMpz3Mqe+hzEisoTNdNHJ+UMDuZXYhWHKQgI1EMmwGo8pg5t2ehkApXqLuX4HVRS6KrZGvU1WYEeTafGNQ1p3mtvKwAFbiT4BttteLmJdJejZqZ1EDjkVPzfYvf31ko+mr8fwh4CaafXuDjfCT3baLM28kqLh9Oy5gSzxp2phpXjz1Ku7PlmPIjTYvhMsMbysmijy9itSA1Ts7n1ldEMjQ8Cg5yePBxOj+DQkbDX7NAjWu3QfsuOx2HajJ8n7zy/BZlyP+1d+jgyIoWU2Lc3Ps57dilzufVVaZRbeGDMthaFsX/uuT8I4a+P++G9ltSo4Y6Q6E/jhKyrxeBE6jcElI5vWtezHUrtnsXSaeOcTLiZjuKw+SkcFU1H4z9yQ8HXjoo55qeiMMpOsd+P2LXttGDQsKJ36u0p8fih2qWrlZOFH5EclpxT6FDg7TsHw9QWnocIxsX6MSM9TIpTbN/bQYTxO8f88z1Hgsb4R80dhhJ5DV1D+hxVu8O8+qnlKtTJP954zGs9OpwfIW+KxycZD+KOtmV/VLmgmQhmkfKm7sdzqZpDOmZkXrdZnbfBaRWd3JEDj4sav+PPkUU76qXfm+7pHOWqtY0NV/fkuKY4TXJX1WzuLfuiF1OfIuib6nMpx1+nqsAienDPtDm5XDmnjaTL06F7zJ4M5vwmKOu/XB16FQtfxxZH0R8P6UKzpHf9dnZ+Ea/r+AG9CXw4vPUNOhqkmf5TtkeejF5fjVHhVD/PRsAaa9z99XfRHszqpUvuVoyD1XyVZuSZZ4effvVsqHOxuYYDApZfhjRsOG+BrECA1J366NPbXD45iH3b5Ku8SR6Ia8eoFWwly6dyQ6vxsxmHA2Qa3o5Qh1ExoIXHpLmwjstGWko4zpV0FfDssvHJGafw9iW63PRHhvr/GpQHe1J/P2d+yIh26zuOkwwaFwHUNUp+Ak8QcitFmsFyplQgY38u0rrnTXt1bCVyB9bM7JIRHUERDM49J02ezQZgNAgGy46t+EAwIM9vobxrcKzCPOPLRaoW79OqZRwha9FfXL89Z4pVUQtMWSFcmXRvvGI1e0RUTfzgmwVCwNtP1mKUULw8F2c8fsutzdX5SW6vVRIEd7cB15fd42h6z6nT762bx0rGm63kPzoCfb2YZ8i2+139He6ZYNvj+rC92sisg7yD41j7Y76iXzfZryzXnOH3nB9ZDXfKUDU73tu6r7e4h1qIeODZfJY2ELtNgRhjsPJ4ICy/HZOxj2eOgBpgMOHx1+9eJBqs2e47XzVYhpDi8guIHrxXKkVJcSSBKTtWre3t2q3cVQdCAxFIzx8m65K0U11KXJcLLvKD58asXQGzDR5jhxC65ghoAJAvMbfFyRP6btpcDYes9/vzFVZ5HZYtALDjQhuzoM/JoQIFft1KVO7hOsccaH/c/SprvzsbHb2Lt9dUv/LwqjcX9g9u2av1F5vYSaVa7de/U0EuTe+M/liFHkWMZK/SQwFTl44EDx+XwieXexIiDGzckBAiuTQaU7sEZADhle4wtKjcbJyw4z+DUTXDg4FLmdRT9au/AV733Mk8OxXcsV77h1T0NiBur7vE1iXqNO444X7PYgSWnGcRxjt08SyZkW9ZVBhycyqI+hTXXlKn87r+37mjvjjb77MPq2YQuhcQWahrZiNsf7YX6x4dXTZExxm+wqzNs50GQbDOVz16jQaR7F0SsPIzUbpd3rWf3k6NNpfg/bhdn7prhYs25JFP00HnhMRHEYhXY5iYRY7knCeIx6CxZrm6G12fF/RetFHqUIGvhg3CEdIUwSYdxbckgOQoTiiCzzEQKmYS8DUVqZloyWYzjMZDfy62I1BZugCbEgWTzpGkcEsGY7PMZnCm7qLBGsrfUKvu3cepMYAem99KdsEGgb2RZiIzMzw1bUtFcxzXn7MiD3p8pxqn540Ctmc7N71k5MSaYbKfQmCgHLt/KLqi496mzFO77Dwn9wYxu48+YiLFwOfUEIKWMbFKNa8djMzHESJd3LdPEWdIh2hS0P0EIE2NagpAIGIWBh09eVxHU5uLPcXRIKnLhpfxmeE6lafpZnaYjEHGzFUb583elwqJxY5ANxZkyiIUG2Ey+DUZU7m0bzal2p+bZACjIRF7byadCo4HIyFxFUsga2VbwLeNwtNfbC9dXh5xCszN4hrecREIX6FhvgbNb961c0PB9gpcFz59FB0nxEZKDOOCC0AniYhp4LuHVNI98RiSpGr6tNfINhgKJgztPSkOejQHzkh2prNJ3XUL4oHPwywqiAYKxNKtJhRZOFHEBH7KP7ukAOCBmE9RhQy1BsjreRv6x7h/fMQHomHU/qmgKJbKhDTErFQpHsIbFUeNBgiiUyJRaLPIZAMEYiqnSLEJyA7n7soYH4G/7311AH0eDzFBwMh9xZaPQwIXj0T+xDPgViPW1q15GBAs+i/DwO36HYZe3GATbhzk0R6rs5XCJDOTXHYnxypGnLU5zdearVZp/RBmZXOSObOD2gyd04ZRhF0nrTp8fGk1nK0xo1ExUirs/4r4KgoiIYc6xYcMGj2A08JsFoaVl7lFhmGraE7gzjebSURwgIqQNIqf6Zc8UShGF75dHRWdoYeT6aCIK9kI0i23Zz8c31Kn9a3jZR9bc0ybmKI4UR+kKidclkKt3LGSQHX666f0Rvn3PTTmU46Ag8d3xh3i33MOnPO8kvcHyTw4skji7l5kg0aU3c5HfOczmrliQYMLz1S6K9cnFLfbwXlD0JDNCxJxcq1y9zEYxFryVNkfwpwK709aCNIECBiFMU1RXE8P0ha5R3eIXRFLZqt4VdXGnb2MeZcOCbXhh9tMQ2EWmJxM+6bGWPTSZyXzKSJGVS4/B1kGg9c+ReiajURa/veo77v8wnA6AXeE0/kpyi8Nq9d0LrND/KOMiXw6ATHQkIiOvJX6VdyvskQWvmvVSLYb13S/gnbbYJZPAYETGcfyITyptNc8uPeFGdiIzUjGYdi8WIHB99ZyrDV1mniR15LsLyTvlXg4rbnZ4cbvWYVGfn34j7h9CNVrayJkUyX6Re898imCzINiGDmi8fNjGFMX8tCzdFIatqI+tLVpgau7tKrGGXwPtbuHGLNxLphtPNFMX76gDqSvsRC/TCv1zqeOMjzZ2OQ2N24rKOkz15vKbnWH0AAGgkW9liWCG7K25242vPNVuXoz+tno0UWdvxrkO6BBQ4zLhdlk2w30NrB2mO/Cg48DjAWm4nH3qtZckbIwCPQzn58+V+Fymxb/H5Mzn+3HUZiz2x3TWhtoYUbRV73OnAavW8/ticB9K3NIAUKDJV4hiBkQsRfuox5cOY+U4obH9KEQUwgdcxdIPkB69lsjc5Uas9hnCRzZ5Eqc/KeOrqFL9IjkVOCBAos2n7UzW5caYxxO6LEMDiY67nruSJr2egILbHMnxMVh25/E6Hx8iqnqK7SD26gQFaUiSGiF90niJhzA+GBBCBq469LAb0n8xURJTviCH8LIIIeVeYyGcEcEdgv0+BsNtpd25djoci9vbeTLZzPx4wBCOoCmKWdxHDPwOC7Iy/bmXprHUELhgIr3e1bk4w2N+b/DS3+OJsankS/epv7/76qvQIYsnfJh2atkQ4HGybWOXdq1SOT5sUZ1Bxhwsb7qqP9im/cPt78mzEBF9cDxbU29KP1YrSXSWpbc7Pteovswx955jpxl7zzUnqI0T2DgSFRMDL0S0GRjepb9kpL9brEGK1yuuj+sn5X5SaGfIct4/nu5lZ7Tpuc0caTRFvADEtx0OIqPsLP1+rexezlPwWwCWlvju/XKUhTYxMTNDA/W6rUrG5FQvVtdU/frW6sw4QrfvKnAm7xzqae7HbtGdxK29I3EOkgEkQMCJ0nCDXZH4cokMD8dVb6RIagzjm9PHRjHbuDJbrE8kUuB655dL99HjA2N7vMH0ain3irjqzxAp3J/P8ue9aoE9tT7XFKkYuxSE6crD8NDdP6SJSwnrFB6YNYIfJwTXot5adqHHLx0umFFSioJg9FNBnbmlmAtdDDOkPq9ZB5xtDAyqU+bPORQzm4SH7ouNxlZhkxzK5NSKthCzVSSRLKO+n9tZIbPR9r78W95mr2Uzh/qa03W7Sa3IsMJFJN3MHHwvt56HvjWY/22AVjTFDbG8nLX5XMdXniaZnEWlGudxoIfOYgadxPdup4yWxaPOxS36LamOmOFqxHMXjs3DTS+jvWZygBWnwEu3d5mHt47PU+h4G2B2dOdQWffU2JSZ3q69EMWe9ysKl2LZxW4BiKKkgQ5fxpTkVWm/1/f0/msuzobA4I/Wq11+RXgkwqZBIQgZJEtroT2+H+GfH777PqfOvlcM4cMxk9Bx16v0lvl+4kdMMMG8BsdoxQl1tpR3cjrRfhJKSdxdWbfDhjQ88mdFqRNR+RAawSeDPXUcQEr9Ry0cv5W3ZTqMpof5HWJo3/Q+X8mXP7XpeVpbLQuGve5+Tu7F4ERqd6cEtbDGg+5hNehn2WJijWDhUQdMx22mURhIH2ct+ywPnH6fWerZv09iS0d5w5G4aNp0yjb5DUCLEOXMuwAN2pTQApZkwx+niqLcGzIyj4JGLLHIgcxvUrCaol6rCrfVM+YfgvnMEUw2ENTD9d4lbbe4xEL5/Ql+7r2vt6qbwG3hU61N7l+fsAXyIddhwByidu22g0ePUi2ftTq+zvtqtRW3WrUFrRRCuGymL/qHcEttw4cQIzlDOoPmUNDYkJoYYIqNTxEAmXiLnN+njMIX9Fvf5cdXrTUjER4meY7bnV36WXdrJQ+r9Yk0PmpUkhgQ5GDvuV5V2ppwfM6oUVsTlVGbC22ZhWRR5R9XOyPl00J7AlXPn0c0ucmeEboyBL8hrbzgubCGQQSl52EMuyPNLdaRbiM4DHEIf4QltxhpxJ3+e37Ugdbq/ChUUg/SaOXw9w/NHjexCR7x5BYwi6IGqqyKGsaVicD7a7/DrRil/pblf1oRbPXxwhblJKrLQlkSryDle2itxHn+i4yk226OiYmZ7fl/tg2I09z8+nvllPkSP2z8QIoUrXPqqEzirPUShcBKTRnUNLTrS1BcwoaoCcU51lVjtsh0HdKb13/roLiBvOWF1sEvmkzsfiJujOCWvX+v7spLX8GxApNdAcd+If7wzO4hORSPJpvpVUnqiICKquYFO74otdXIph4a932/kq8FJ/7lsdKms/tbrciOYhAffsM6ArXgqa9bikInO/QulVVVVw7VVV6ILkllPXhlWncSpqKoCIZwhlZACr3EL4TimTAMaw28yvm3Wa37tSWAC0+jBDrSvG/af/R31M+yj8RU/lNS99D6vAXusj/0J0GwKL1zMpgb/HP+oS5/QP7Ih3/ed1McHku2fzfs+8dUQ9RHXqpmZ/dquQGC7qoCd/P/PJ5vMXBPxkScrCfqVcxziUyP++lZfIcCEnL1HllYoHC5kM2ZEl+gw/OVnPf9H94gD9Xt9jO4uUkZFPpWF2qcT1j+BBRkIa0nvtmqRYE0GBBeWaLgzqZ1b9/EWWFXEfO1jJe46veeKSiN7/p6AO+z6OZ2IitdOBUV/HnyoKPIftPxeTvaVu7PCDJVqy07d347vMfiHPoMuVb8TqV2fdKRwMJh+tD6CBwyALz813Iruc/27iICNV88pu8PPPKhkXxftPIhmeh8WtlcIAUjSpvqSMPbBgbCqzGtcZ5B7QgDVCvjk1OvInbm7yGwGB3lgpvm4EDRpE+XZuxfpZwRDkv9mQgHPqb3/7LqXDVg3XFqEipdyntW/5mZb7LCHXTTrhfLn+lbqeb/tUo6CqgeBUB3dPMj1fmLlzO8yJx/9f+1PKN/Yxvuodj1jNJS5Pkgy6YkGNbfFbDsrwmCviK4g3ebnDiDAJAXjyGCYRHETgFQivc5WD63Pf5kD+bKSS+eD/P53ecq4lXnq7eJj0EGX9HUpSluU6SrOe5S1OUoqiyiH1RWwu7Skza/iObpmPNk1mX8omlJKbsI/327ocadht0uJc/38vosLyXFGsvPl/kgadyA67e3LNd+JVDXut1S7Dye0v1hn9k2C3SOjRFOeKM3NeIdaGc4qJtsrTMDA57pL0F+u09XcP38IMYluSXy0nZlP8ChTFFLDB/DUh52f0FqV0tMJ+Mz6iUB4K0lVVNzy/3L5n4y7Ch/CIPX6WrTHdqLTfzl1DzcNFEoh4nWMkDqsPsnyX9bGo/fF/OXvJdDoYfooUArKqvJgQzhPwOznP1ZMpbSk0E+Uj0zPz01Eey/dS99moSciMuoJBDkq2n0m2UpOyDMtMDiKn0ro21REtpJ3gdTJjD0yPov3hD/RPrfsaFFjIEm5nlkdZXt7BeDiqqb+fERF0gIq0I8ARC/UP/Rj811ikp/vS/RG+WtbHIimMXlhQIn3hB/AjF+mhyQA4qIkWXIcwLe3nVP6lXMHGx9fJ/TShCbp1v/REHxOpXGYc/j+FOnSX2WVblSWjZLTftbvIQJ4Cx8L65B32dE7gsYI+hcnai+todpVtr/VIR1WK45j5dOrfURiQiygGxeYZAvNEYCjKIItNuhVH5y258XuERc7rj+9ZVxWej/0aWTmd0kwVEe4JA6bST8Y7TLMK9uurRjbxFyHwk5W+Kag3xrNsRiKAdB142Xta1kqTYAtHj08Gw03CAmLsd30ToiOstdJiVM1rOur5VM4wmf0xDI1QsfYTJbwg3KApq9t0nEp6o2FVBDKdqi/rhZ+rj3X7EuhDQYxpupmYzOaAQoadLz5jG/+R9yT7L/v8Xzxm6K/fZ5tfcf7S39NwY1T8ay9JiyqHzU9x1mdqZ3WoV+xLD+p9Isd/qJfkh11sNzwHCZpAIV91xd106shZ/cwdZYiZXkA1UYFCxSSt05TKS4pKsVdirw6eNMDz/unFFk4vcNf+Q3GREiVhMdHGzkNiYj4s85wcsB69vr/QziTEV+XIz+J2dmIQYJ+RmYsFOq+V9qHr0H0a9kVeJVR6l+3Yf4mffONNKCXg3PFLGHupX/ltTMFTFgPIXulrdKSvNytUSBox0UtaLisNdaPb3nh19fPJiRXPaIRLuhato8GVF548U6vad2PaEjU6ziXVbTm+n6rTwvwfCB+l5Zl1+CBtbyW3naeuWqGlUwYRG1rV2qqq9XpzqlsbzW/5gI3HGA8zOQVnpO1DCqf53ILE+CwtXwqfszJdWezYekmmj0vV/vrSnLF+ctWUQ5Bbqi4BPFzlweAQxI4AKvxnwozTvkoe2Yjmp02yUnMxN9kBdQOWPdHDtADQt7ptp7N8aQPvjvLako2Wr8WWpzfmez9H4dn8btVvLtKC/RknPOUtE6usxnne/1BXnQ+zFLaKjLmOL7FDHzMGCr7rq243cZ2tg+vigiIbzao7EOgKpeB7LejpOi4Ztn4w3MaVt5LENn6VII7k1oTMlRNyh9avnzSzbohJjeRo/3eKbTsSH46RPnHZMTkGc0ELbsg1PV+XpV7lb7t4/SU+xx9k84gCm6mIrtzNd7UmqyC1v9BpjTnZakJ3xgWJ7vBGS9X2rbisp/d+B19Kk5z3p0r1XHP5e/aWnLpsr3Xtfs8f3fkj8q+PF7yZ/O2+SbqunCuWkYzOYNLt22J2/0fqbzu7n/npeknqZ+iHg7KnRpWmwWZm67Q+Ijqc5Y13WVB76zTf7pDg2xRN5J5ABbiMnf7GLSzQP3KnxkuXSdelrVArJeKoLgTMzF6zhL64BkkKUj8Ig6ZJxSd66oplMy/eXfMYsMrtq3zB/DixKW7DqjMREI4jyTe0aXK/SqHOvEQ28uk5jlE/cynSBzHl+XkRoRSkmHRH7f7GTlcqtX/t89ajtyotzRa7trKht4u5CEpcvr1LO/EP92Ifrjfv5MKG+V43SIgI7fKhttz4KekCCDuk/VhtV0siptwSUi8yHVV4qWqeopKb+2xsXYahsKWphGIQP3NxCXjTbCJsNbkvGrfESPh90yzefq/s+4a097WpUwcuTpu7ZE68y4VVVtLnKLE2JjnrNZrByAKCCbsYazXLKOes+DiuK0D8R/zN9jXxkp7e7sp4T/R5zPrvD57xd6/SEsNNp5Db2ue/JN31axfIjUzPnLu+KZnZu16+iaw4IhBwfBnUbPXAa620pP0UOcLz3fzExEFB4PknXs+XOvd73+bp2679eDdkzu76j60IhMFmI9jAdIwi/jXc0qlAYUxLRRF8Es2viJvqgOvyz1kuvhytc6zYdIrIulM2Nofb3j3qn6TF+mP4uCQD+mgxQTPw0um5mauB3kqdDJSl3wDF7d9XBEOfgz7lOSyztzMqZMqT9OHfkaoqMYts7Cg+nwqsiAR9PRntTeoYRO806rydsbC1/voKH53r3LyHjqiG7q7cfB5YMfmPZH6ITHRLN6N+CZF+vvgID73+GAP4i6MJhCYBnZwYDfS0TCXdRYtWz1Vh7NSCFs2E34tERxuEAWQzf01RARCu2lPUykkctZUUKJISrJdamsyEo+HXffN7f4+vLlXyECBQj+tQl6lxFJrBN6ymGqtphDZLqRsRRFVRJzfY6bWL7lXLauetkbIQJKioIuTnW3tX0+T74o9CUaI7McizlxYtvWsT0tGb1sHMxJD9Te84eiFkmRwIJzqEtNqKEl3OBMVjSXFS0ynGCrWreooVC2n3KisE1REJkLoUKqcRBtpK5VCmKmhaqFIki0cYSlCloVvKhMAFdOElJEQX3kdCntqW+CXoVHLfIXFocIFC7OCViSoBJT24YEApFEYJiSZCYoaGQCMaiMtKS5dQv7iahKv+1+UoVfHC2Qka68apQgiEECxpJUQESAgUXLiOGje53VvL902Ja5qEyVq0ZmpiYl5lyoUJuOZcqPyR9S8YbDq3rAk5KpqK7F6PSrUIEkrFDITRCQQoaFEJzN7rc3N1iUSJS3G++vqXPJnhHuWudMl4xTlR+CVli6WvXz1S9iZHcqyEqHLMYCITodCiQFCQJUQmUBDpsogIQz/3Q6V181SvHjjdkLCPetTEOmmMhAgXCFPubku4QQlshJlZlUkghVyZBIIgQlFCSZAAlk84+dN11pJBtIhAChBdWncs6UtEuUxTDbM6RHdrxBN53dKNlM1L+qxfjhKb6onEpe8TitNRpCjKijkuoSG0Ml9zqaMGSuaxFXN6TLQmpPUPBaodcWWcqVA9A2CJyuW4496onirfec00ZmOuuPo63TTkPa3uKPIiAiPBR5wpnswXdZEYKqqqS9crL/zIiI69dYqSgIWumo2qCWJiqWnFibdpaIRd0ioyfOzNd06neuudWNUuPohFdOVaIizXVcSRTcGEmRptpxqKUVtT0V2g7XQIylG3ZbbHbGUttAoFtttAAAIyNwEArJXHM+nrSXa5sr0tdkQyEopI7xXW9TdK99X8xeKsnpQlsiFoVCYojoVCiIqpXniuFGChWRCBAkgXZEJddebNGD4n1uEq4NJkCIiAQQu6cJd2tTgIUBCEBuTbmICFRAQqbQBCUiYlHCFLvZo6Nb1aWqdmJFGu61IC2BTystStSKeVUseNSSrdxHEhCIVEVKcAEXLkSQQkCSBcc6NxNxDbCEEIO1OLPdUum+nPdbUcKIS7o6FhFifbuWk+Ly/Liqa1+uKIkTEk4tybbjVORLgNwcaUkgRgmKhKKFEmiFjaUBES2klY29U4ohAtacURDFJH4JUKBCUi0JiUkMgFhEAowhbtpUJMiiJlpiUSJYKKJEkESIFDEpAUiVEJSJiWESIIFLSkZJEiiSIkhijZCkXhgoBQC2RQg7044UJLtsJHxqExLYoZIk52V1vIpafsVigI2v97nZwjhrmPKw33uS3GyLEtEXI4ZCSm3EkJkPTYA7mFIkxWSJfYoUkJACWm9GN0Jf8LV+m7KPwmR/9r1q7/IXN+pSXY20peqVJFvCIwhWAkkVeMJm7iIVnsX2SQUBZCwnSaCwZHbW5B2VlndZ3m88FHj5dzcXH/7jr483V8E/Luc1R2kmJdEJYKNUp75M9c6VCLtqVsvkxJCoUR4QtapuVCYkmhRI356uZ6PO8Jnq29rWgbzmW3dHHHq2uuCAtqic3pTFFo4okDieLHZDbcrJqSwVUNYnkYncxBLu+zq3O+po5zfIXc7VulC0zJSdazZaVizTUMQJXLiF0WWLBRt1QEjJFULAEnTiRQpEm00ZNTki/KpLVLuZ5VHXDs2TrWtl5T7UWJQuCVVMwkHYk3eUoWxQlbThRYlM26GSg3la/zaUbEkCSkSVEKyEyPW3Ey1CcCG4S3lSoeNKEba5wOs3166pU6uqrrVVstVeUaIuYp3ZBp1iKOMHFGJTlPiWjFh7T011cbu7Nm5pBqkyZ1LHVFTrWAazVidsoMEEEO/Mu4EJQziVdQ5tzIa4o1AuVdgIFqMWZp3i1EEmjUwLVOxWmt9CmvDlmsNSXCAgMIlEIEEWKKQo4SJJK54NTRYyEr65vhm+TVVlVyVgqbazVTmTOBd5TVJDQngsoTV20t80iFkjWJcZfN8mf+kuTZGyZA5B6dzY8TZfclKo/tjkIe0J6JSaceWv/SWLaDCTWkqmq2WPXjdJHZM4tUuZvOmBEM2vPznMToNqbdc8xbSxRTcrlEHjLKhXI7ereX3aSpnGDW7AWVub5J7cmYzY7umqISwJOd7d3Q5z595226lyi6mutzTOsr7Y5qtT40r0PcpYGOEXVKRIh3mifnsxOcGKghFNLLTyFFPVOX+FUzPgY0fE/DO1jKOvytbaiHp0JTe00B/RGSqVjlZUkHBRhS7RBd3lnJ3PgMd/v3BkxfqPrK5y89zXxS4U+WGkiaX2dWl7/1O75QYFT+/USRIQGvmu8t/379OcFx+Xle5Ms1OfRBDNpAnuNHh0GTneQnfGoKaFLJ+oLeEVXH3RSJ+vH3scX0lqSLDFqYI1w45UE2KJnSR0mo0s+mTNRfHY9LkIrF4ZKpczTXPYzmnYV0jTOtICxIkKRJmyT8Q/WfL4po1hJxgxJBNsSMGGuPMJOZn7IkURtatdNdrJB55PMKvVmRY2czudr5w/p5MLeLe8mF56vYLDAsym8VmbGLS3gz5LzcOQSoVo4eziE1nO372mxGXkwkE9KteEUKevyTYpY9p+1Lt8fko17+Gvuz3mH+xB3XJaKE/P3RyPSojwXgvf4sS3R7VgLV+xjEZ5mBCc4DzTj2feokREE7OuTFiRRkKFbUlUnOR97jYSfPHmR8qOcpI9c6PeisZHLUsVstXtLWlSoyyRFrdeqERCVt3CvWM4jOa4FOUB+CRtKC9qC8UJjI96FOoTE8N0YR3946hiHCg5J0qfkzJD+Uls6ceMgthD73rIII1aiIcC/poCC35dtxm+Ico0r+fw3N0iMRyhmkn/JxMFDoFGlT3yWMWjzNr+FZtLQ4VTOEM9+91e/JBvX1mjWufad+/BMoNfqIo/3nkV5Xl+Neu9kIEJKZmeTids3tR6zkJDQPE7J8xaf6YykX986BuWnOm595iJ35Mx/YtYJKgXuiSkyS7pcbK3c73BO2QZFv6j7JnhT3L9WgYO8v7JBWlx2PQnJjBZDNRAf7DWHCkxfDEyC/hkEv1e38lFukTC1qA7G9MELbr1iUSEYzTxQXAhrlcDRWoXX8dbr9aArl3vZLLwyOjS6GzvW6JK9fnu3fX0pud/9coPP4p7XLi5u+c+/kYMOulVPxNvb93uqyvX7VZXKtjrTHnXwyKkpIG0Lab1zZ+nSNVlLyCpussTXl23zqtx0prulEn3FGPKRiJ++cWdut9StZJx1tKTFa4XIPUXdKGRb8kucrOMy4/JFmUrp7m3bXvHLUSC73dEk2tNJqvrTNLm92pSlXkzSqtZy40Ql5BPr32wa9vmvhkzdeGdV6zlcuU62Ve6brzTu1AXvbfT3fZ5z78r3vv5u/swm9tU+3nD7ERxC79PeTdJ667r6cnt6Su58itehVZ5oxx3m60n74qWtuuN33PyYTta5q12FGcsEW9G+o1qU6LaUu67Hx2w97y8x5JTJ6XBBp711yPBoLj+p4kU8aZ/JXfzJTqTeN5tKfqHJK4mHsTO39dWXAgCCHoyCo/gvc+cUGoC5NAHA4k0tsfwyjy6wmOP5JQe3HD+wdHlMkEO5V5hwGtRuPj3oCsR3ftA3OaYdqxyHnMXDu9/x1xFMSeItJAqHbpDtesVdax3UFf2b0hMWjT5+wyvk82r6RJO2Q08rvbnj85ZD5oBfUlDBmh7ppe+alCt9ekItEWb+9SNL2V+eWxyrisnvDS1RNP7BM17rfr/fzFhvtZiH6+9JceE852eXHALsPCSodef4Y6Fd6/HjEh4fYgMI6+5Z6/0vXh5YACdfKQ55W0Jj2xyPr4+fc/59XQFM02hUOf8x9Lo8Aj/UChlSpnLU+Iwj7U8CZpfNDNv2Hhta0a1/fGA209XrFUyFkPCXQafakFjZEKc60X4TL8G23QE7FxUHYpOt3FFvZWk/OqtuIJgeR/fsbxcfZmqj1k2doH1mVqgMtwkCOyd93bYUXgs8yzKJmCC39cO3G729GMefixjoDLSvqX2gOTclSw17lXvmmnTarbcq9k7REQUwdeF8y0trnjYnQ9ivJLXegPy/D15pq7yr1vl35WYc7U0UyBhxljq7C4wqmaDbv5ew/vB/i/CDkCCwgVM3xnHOZ7eNsq3p2Ox/YDsQqIbXsD/sknyNLmGhnBaSgPsl+HZOH0o+rhB1Gkkf7GYXkSiBWHRiIjLxqdBH1R9PVSBgOMYYvSYnNVQavz+/oP6qGgmLHmYKIX9Aa76zhv730i+GYhZO6YWZM6UaDqTsArgmQfhsYDbW9DLRALodbJOaR2M6/xzlKnC10eyOB6fku6qleT/Rrv+jdYviuulM7Upu9KRq65as9zyv83mREc30iUd3DeVemb9vCazGizXu3IZDZaesJCY3nH82NEsAJYqm73NxprS0oZQnx9A+/YUFtOEoIznf2x3FCd08aAqXgmyVwgRa52ORW8wkGdvOYry1cc7pWaiOAK7nHQCTLNC6OMAJYHPx7QT9jYKGrvxTtMaZ/OAiH8EzAXyGpDyKQ8XuyAExJbtbpiWnJek+OSeOCTii1U4tpTW0Vv13XyLnMlQ4tKvdKSaOFHdveWZqUmON5wE4d8Gf90k1rPcW1PTNkLOtDrYis2CH0jj7jTpbKbekR3R9Y3/FrT9Z63d7D8hNuWOL3ct2fWq04UU1dN0WVzPi7wj5pSS1dKssOUQWyKf7FzrxTSpb2Xc36HczmqzLeZ1ryXWJSDNDpOdtzzzXjNPc31xxkV3p6ze6OtTjXabwqP70ebCg1PhxZ9Sos7d36wz0lyTYvZUtpILCEx2oPk5ffVuVp7kJp6ZYooXBOy0Jz+ameq8fJwKciF5ScUlTnZWQmGpt+sKJUV7X9/f2dUdiuN+7hNDV+/NW6jodzPIvZRQnWvD/uikmwlKSyZVk05vTN0k9N5vHSd1T4WmQz7o67jN+Mn33xf38+yRlD2aJ4RCv99X2bPMC2RGmzPqevMlUfWN/d5fxgR1mGh1BAOJkw0HGOKYU8B87SH8+r2MqP4fcZsNRqiDfZoiviyqq4l085yiI1nUpSlWXnc56lZ6HIz/vl7t5EpfTVUNjNdZ49+ZV3mZ335vf3dS+i+8NUV3ZuPksqzqfdNmmYits5xrNs2tNUW1fMlbbsI3pe9dOuqY1J2ZEl1PVIpTFvc4iHd3bOMWtjRPy0tzwl+l2e6NtEusNEL/ZfDu9pMzNWckkTvbGG9m8C3mjIe7NWseYzcXpzN+u9XN027tLXedpV/NTlTO9e4/IiIiI8qI1qO61EPrUz83fMzVS+IlrMxrsREV22Zm20zERETmpmeTM/mtQzNnvV27VWzPftEtNUumb+fq175quM9RrTeOLdpuTS3fetSlBmcpPe3cR1KU52pqnfmZM2Ut5/ojTzr3Xnu6dV5zca87yt7mPu6me1Xy7pKs4ozdbSIik978pERh+edRaUclvFL+W+v2gJrOMYvN5DGFxRaUm/aerbjUpvbNRlW1Od1nOc+mZmaz2tKud6pPPHulrazqbM2aOxVlDv3hY1Wlk3SlKfXCv9MdmdV96kLwitHzlsRDeylgzQccWHwxEH/4PvSCH8TC5OR/DvawbAs5pJEA5adFY9Eddj/Hf2vvn9YrbnNPfursxiNq7zMtLaL/kh6Y7z51O199+2d0oUwyVqz7/iULepJH3NeyRjmUdqG5od1YMzIbXja2iOulihaf2mPCGjeVrW7ySuD671Wu2nddNG7S50voD2ldC9WKEuZKZh7jphsr38tmlXoBQm8aLXOm+XoQZ+k3a1JyYjrbEdAJa3Zjb2HPE/D1ZrxkboQzwyNILYBPhZMD463LwzI1xEnMeEMfuxIxJpM/cq3r8FXK0m5OHrIOZMZDRXMZHP7WwPoLvbQaUOQC4Yz3KU+vcO49X03Nu+bSrzOb1mpBJwSH9FXans24KRTqer0YscEJELlpxyQxjv+eKCqtnyranqv64smEDER9BHRk/R1isP9+bJH/YfVxIhJT4fFRKlB2S/zadjz38rdVexPGOxNli2fq95OpWF/i7e/Vv5+9VNzftO9p+CagIDTBMnV0tq5/fglaVl8W/O/IgGkY5bRh/AP6xkSGYCpAaoRm0VENczN5UtNH2zlXeKkP9cYx0kktNUpdtKAVud1GCSiRdbvHsuRQnvHu5uIUkJJ7FPVJK6oW5dKICE+aYUE/stEjoqqKvS5kdcQrbSSADckdtAlkUkl1LpANoTU64dZyAhYRbcKP8ElFccJFccxC/8mOLI6GJREURtRxxKEklQoWta3UQlIlEWRCCIwK31LXh8O6TznUzt4Jba8Zv4Fk948vjXfXfcvAfV121xZEXbFcKgu9azDoDgBj1/1WhJGsi1qnaHMBAsCwcLXVKRRuliurJ7z61pLGlpZJpa32hrQtkTVPlDxR7S2KcaVkSpfJkmCmyP9trlNJkJkSQxBGLLdqr1L1q7qx61atuHRVVFPqVvbTbhy0mKgKiVU8jREBmlybIQQs3av4+niiSFJCQQqsjVbnGqxKGL/OuxTCWxX3r+ZXNJdVTrcWKNCUMhIEii/+w80syonZZ3S01M000PImpaAkqC6lHIi3jLa3OpUrEEWQtiUioiIy51LHvsZrnU61p7d6gcND5eWrijXb1iTTLORcvxzJMwdUU1WNSgWuV2OVSCCtgitiuZFJ+CYuCTvOJSJ1NGyVLZCLe9QCs0hzVqHPrzUpbOI1C1W8qIW3vVKMpwCS21RKCW4kjRELcrOZcUJkQ000okiGKIMaL1SMpNFqBNNrWt7azbu//SpIka1rrkKN44SYuha11KjCKsL8rREBAQkttqFTx5Its4YsdMxHLS0KInTURyXCVCXUpxARCfHESKIkha5Lyx6nlXbnedkdxouCDJuZmm4PoTbdaxpD44022h173LkbUbMkJwpI3NhljVRuUUwBlg1HvtJYs6nSqSXm13es1HrVElrU5aYFqAihLDrnXVvJn1UPu5VFPkxNEwDWMKKETzMqrxd6Wlto9v/3+/u/rNoxcGtBRhRi1p95mQRppbQ9ulVzc6lWVNNOkajGPT2QxNNNGQu4a3dmqRbdCoUt5Nq6UDcib/0jVKwREjGWCDUYOqfUSjFQ3vInYQrFlhE04i6AMQsp1da3iFAg/23HCxIosOwZ3FPrlWKKZfRq5liiSOraZEVwd+R/9MkIIoS1jSn0OF/Llzfu9VGD6rud2qF5q5gIqmnVuTwaNVMSKdPCKUt93rr1/J5uNH5+eTHvzPd3GyE0VaaKbGDPck5VJS2ldT+eZr29TCYR8Ew9IRAq6ytlO2hruRTYTaMS7tqOaqthDe9HO8u7alEBE8GpKAiT2flvwixGk+joSznl0XcmmqG2pIZEiGINXd9mS88ZXJXUuyGzLl0omfVpluBw+99Ubtd44GnrubH47lXZFrzn6KZnJdKWdRGXp3N09dZIhbzerUz9zmUpSlWRVA152VQY22WQmhxQGOZ9VgzqYDaZcjUr6VqITDVEdGnKYx9Y+cYVXmsJRdiQ16hCpDaLRq5FtyJSJi4Qv0r446txHlhxSk8bI6Gm0m/5YkNKw2XQ1ZF6X18FfS99RVarQ+n66mikti6pgl5vk6IswT5WUofM+zVfUph5Gtljhn5hRbUT/igoHcV0221Nx4dorUxL0XmfzLnJy7ZKt5XykmC8GhLG4lWJp3PSnr3gu+mpQkN2OXoiiGj9F9Cv91OF8X1Vaxpkcs4+7uL15Pd+76tauppwEWMimm7sWHRh9y0DiJKJ4l4cE87LMxHpKPPPjq/2l3/nh82LiLTf9s+sl9pE1jAfWoE/q+YqRWCrkiTRcc5lF3OttcGjg0BvqHLrqbhJex/x9hlaWuEFUNd2LtxyPK7kmBxeUUUIQvBPqsTU3aCsCU7dx2o6mgp4JTxe0ftqXkwFOIWEmsrwiUROin0+tlTIE7iUbMd6ytw9Cmo2q5+gbxLeaLGkN5UfKdSuI0VAIoe26XgHwGk7s7TExCWpziqDQo1T+wfT1+Yu8xs/WaSU2TJD43wnQDTQw5c6n+BoRysUDyqHuv5tiwgbMJ+aG65d5FaG6s9rhSD0vDYgLDmaLr0QI/5vJB3vMfk8e/APfm+RL4fxeFKQkhGIFEKRDcNp67q+OVt8r8JvPTw+Hg4nYU5fuIuGFaJ/2GlwF4UfTP3J+J47nyf5Xyw6Nkfax4xpRoG/hlXVVwvN/lZVVG/dTCqtTe3myFiK5zYI8gIiTddNkJV3vq2erSdbiYiV1IguKcsFROlIZ5f7EWj/Pw/MpQYtMemNnuM8eF9UtFKg357i9XsL9u0kl4s6PTEPW7uGy06rgC1/FlAa5xq3NyZblftFNAUV/xEJjozfqtqRin9TwzMX67n/AqeRph7Z4RpivSFTqkiaie18+vYxl1ZZ+CmvXH5cDt/9qct1ML/4fiV8GmEgJoG7ay+Akb6FPBF7dVWzFhI3FRYMTM5PI5jFBszXwXj/YnAh2RQ9VVbBXdbQzjdcbISWQ3nCXAYZRp9wXrYWmRQgEnsy4GOYkt6+QzFWz2xfoM/skoXofPk9Yu65kmKgND4pGIMQam+7LblXpaRxOlS96xGqVxGFXSr2vnW+QG4XxLdF10E45rigG+c2hk1kekDm4HwGfV8+WIy02Xi1QuKOrOKD+sF7pqEVJkB1FebXcf0QzLpVhvx5iN1KYGZKP8HvrVoyd6bNw6Go/gipDC9EHqoI+ddLlL2nEiEKgO9fQ2AoN6EliBZAzYVuSx1isafzFThdbG+mToU7BucVdznNDwxmu5SdpO8tVBOmhp8+SArFYQdg0nKlMiwC/bukuaWKXVMr9n2vu776xv7nMNkL2IrnSxJfrr/f33637iHDof4Gm4AAsHc/hr+/ufO/L4rMjLctiKpe5AtCutPH8ssB0JCK38VA8+F/smiaRD0JmBRJ7vbju5fuvnbK5ynifK1iclM32lkz+DSUP1s9dozH+knANSd/Mr8bVBAdD2R6TfFy7huB5tGgiERv7W/Cw0aWTblQEpy3v+KKV666+yjKF7rq+MMbu8oOmTHDJWCkc/pjM4qcF9wL2TKHetst0o7fOPOkK5RVvW/jqdqloR7kYUSfnLtkfwDdkpLvyvk3dhfV5ovkvmM4z4kh0Ury81anm3zi3PU6Pu1u73lfOMWxkpHj9fVk7btewGyp6fZLn/SlDnbv7vmF/1foi+6UfXlreViERu1t5U8/TpzLY/payEs1/JS9t8naeJX3zFdSXNVk55ERc8Yml4qSD3/LSs9Tjq0dyX7yOvc+fsDljbs3f3ETEX75i046iZOUcS5HrWnrCX3c54k/W1KRIQeiYgNsCPlfAJdC/7GN1IrvMpiKW7pKYr51O6A6FYQhrnL8NbyZAgdOrkSbJdUZsoFnVVXtFGBp3PLU2fu0vpX1XN8UbAzARwdUmulsqAJvVHEeBa3Fr/jy4WwnPAP8WkDmHbden/TGdfu0sPW/AP9rz7xvnVDfkK2TOUSk6WMCHuHQyiJpkn3yWhkguF4395bE0lbYDFZLzR6YQwVLaHkwopTTm2nOXMU6HVxDsBFC2RAeWkgIlKCUc49bF3LuSdXKQl8T6zEMJwdopkQoJvN7lE7AoYRh6mtdIY74ebwCXynreT3h6RaDxqf+9XnOt7nOUW52xwJDhi6mqWRShWaHK9GhTH+L+x8pfv52QezghEYy2TWjKB8tHKk44ga2MBgNPGvsH5guJV6AGahkxMU9Y+RSMRCqvspeh1Ypags1cLnk2sUB0dXG+jUPHqF7FqlbYEBddS4nUrTDOgqI8M9YM9rRxPql5X6vGMantz3cFHgke0Kzed0wfSvbrT251/sB7JTUXvAfbrwomDtbuA6Wn0VJLs9/Kc13LMi9zjltZV9P9p2bHmWaeoiItWv9nCgAh8K6pB6qg9oFa235PwGjoRnCQOh5lt6Ls/SFp+s0dmc5WicU6ksYfqJkpypS5UFa43guWnrznsrIfN73lGVIv7/xIoZHQEZZqkTddeU7W1sJDrrNzcf1vcRdzMr7L+CwgQGQ9syp3pn4sJvsZJEB0g/Gn5ERxPOJdplWAxi3vf+dJ+L3a1gp0xfmCTB9bWe16cspWVfDnYWG9ArOX8Ocf7+W0/7fX7jURZhCsiNy0kyFTmYhFbv9XMSkUL+EJbgj1jUTzf+9KbUeY2R0KhLRAQGNGESbyzS0ixaB+zTItJ0gtNrSZCRlqYU6/v+pTv9fPnN0GmbHr9bya5zIkFfpzOpsOwVQN4K8wzUpVnItpreb8eYmEXGWlnvNxcyDN4mGd4lCpl85iUCoOAmqM3LPmb/3962K6Dv95nV9+YSFHzoULh5PJ6FFiiRiUy4ViSq3CoSm3CTFl+3eBVEgtTikitJtEJPGgg348bxf755rXWqnuXt3S3CquW545LqcucRpE+S8Cc4NSmBBd3t6HN3qtdLPMmUJBAxOBWmxy5AQiEnJNMjabd5OY5Jabdas4VnQNdaS6Zl3uh3kzCSYEtGSQwC0wwAAPd3xeCXBBrW3oJNcxWhQgjbpSYGZzutLShbEusu91aBBEiSBp1t1CBUIQEDaYFyi3Aph04QIRzKd5KpN1a740tkQo6qtcn3VTXdR1Vli5xI7bICCk4i0KTynAKSIQLjvZo8y+mlmzHMKyJIWS4U2ZxfNXu6VUaSi0rnNC7qlDvWKmsUqSiHjBSh9VGHiZwXGRSxDpLL3DZyioGTdyuFiaqZSp1Yh6h3tHAh4M5UQsqpSw9iIh7d7hxmXc1LwfLeBotL1cTGNRqk5FG4wkjgq4tFaOARUg3NvWKxFUScXDgVT4mnExMwkAiS4mSoTxDF8RfIqDhXnpaw3AEhlZZgyK15MUcFrHrz1edR+MncxBJ+Z51nN8SlEWWEfy1Vnd6d3fnLmE+d0XUj8pl9rVe0pElG018OvX37forrWwhHHFXluOo3bm9nuk494tWNyN8msT0KhsCttEEKPGngghYSQYZOpiphkDKKzRiqWmCyqusx5TlFtlNDsWiCnTQ4xAJYnl0q1E2ZBJppSFx3NZZB2c5znOHN3g1Ld1nrMtotN7dS9E3O9F6uzCmQtCBoy7q5aBNAF4/wxxFV5XMp0pSDhoH24KSP9qZlgfcuQA/7Ep6D3EGYipenNiHc4jCzmyVgutXu987/Xzi2LBeCm5mFvfZ5u4VlcGhY47ECig0iTBPAf7hz6rCAngz+mfUx3ztNHDMD9DD29otbqhTREijfr0D8GTY8137+bewHRhtCTZl4M1UmfPcH0nHxvApxBXy/qnZPW6Vi+gN+clSQCz40tW07nh2rSfWq7YrU73q95C7F11xT4/iippMKWIdbonHBxtROW1MZCFMEUaHAIBshxBAz5/08TaM3iPX3Nte+lh++6D/C+P4Njdzi4WKcCdkH51yoJWGuuDmI8dHRlGpRFvcrduIEBVk9eZfRGLEJB20P7Zgbf4k/0Ss6FymfFEVPUzOO3nz7EdGwhqI52Ltc16cpEAgedlR66x4w0T76Z3Xsu4eT7dOA2rMWXRRUWtqXF/+OMdxl05azXXU0WnycQpZrPP+2WQ8xzvbPDewscszTK/f2a3R0ZickyVgv9/y7r4bf15ne83QC2gFtJeTPa/fkfl49vQ/bkJ/S9BKfSP4PoIJNd0+QwjuvSsRVjNMepC2WCDMRmZbctxKQ0ExH5HhvM4eGq18zLaMPINMNDTcU54Ey1qgDuuOx1MsPIa7xzVeY30fCq8/tLSF7eOYU/Dry/WJeEeZvl3EPTiPBM4x673dLnJStygx0RHEJQXh/pXIlaNyOfR9qRAR3XNICTG69LheXwwRyD0WziSKlRvQavE1s+L2SSTMVadkxEccYPFHISNSmYjHVMKhgm2FQNju8QadagTGWNwqqdtttoBbQkgElkrKW2yS22yEshISWTRbUOkqdgUpIqxgQx0GOXKaYRIh0UwmlLzG+lT5CvGtRmn2L2JSevc3TFFU901ze+StczDpafs4KLaI9vNRtGNDL0cEmF6UUEFK/8rz9OTnPvD9WQ31UWR8xr0ROtStTcyvBpV51rm13+bS7Eq0vmevJ0ANppucS2/AF6n2a9X37ulhEm9KvRtqxLOn2RE5zW94s/81OrAyu16R0zsT9BW4vyqlWUYExCc/3cu5P5/tlf9ZN7+zE3mXuynh7spWJgyKd5JpKEhNiHmXYxM1M0FRcxChBU71FJISDmwnlcTDuwwWZwsYd6KGJxESad8Q82VdWOLFzdwoaxYNJAQP/OssUwy2crvWbzYAupOYhsjLUc5vl2zRYi2LEAhXjQJThVSxXNK6TVtsAaoxsiJCMFER1jLIo1fa9fk1689xHEDgNYj0g8w/tJkklTfRxERQuhLa43t1rjWjU84TULO2s1k604ijVPL51PqpdEt5lFK3eEtFdcVSLq+ve9zUZCdBK/ZtlXV4j1I7bban5Rf0miANEnM/RQBXozGHM6EEd6f6Mn+wb4Dlxgemaxx39+Lz5Lfn7i4lg3Djhtvg/B1XaIdo7NqkH/HULcU+PF84L92uztg230IcbTaKY3CODgGMU9XN3yJRiarX7Lrm5BgUnWXlrGJAurFKinNKq1irMmoksmoIJ/TXH/e+tbrXS7xr3shQ0Ru+pSBafJ2RsUMhLS3IPx1CViCEpNeb/f7tdLDqRf35PUQlYs915M0kmRHoSXz565nnRzrfu7prJmdTNFObOkoohRRDIhMhkLshTUaNZFCCmUhSFjsYNGW2xyATJSMZE5VJodRaywapSnfvZcMu3e+5x/s7xc76b+XH2GqaY0D6UJLV5nnhmAoNK5SDEWnDgOcPSas9ADXuRK+BnHgtL9/et+eun1X1XGk8mo9UinsnvhuvIb1rjXp6GfzzrPqZ1QnggmpTAK1MK2UzpaXnrrOs0k3q/03F78zfgKqWEfsE1trjdTi6u9LKYKedWqWj351m7TaWtOe2mw26LtiQAgS9U0krIVWLyYU5NJSSNaXUyCFx3AqSvyaU64ftuT8xJbV7YWHKqkU2qp1XwcWxW7TZUginUtg0Cm0pWtMWIxC19OOvd5zg2tNUpd006KacJkSZ3MxJCcuI1WbpRohQtCCFAIFFmfyb0tNr0aiJQoSsW3MqiJppJ5SMQ4hRwTO3WquTChty1hJN7idomshJCShlMrzSlbVCSIVin9xG/n7tuy4lWSeaqIqjCFSAxZAyC0AaeJhRVPkl5Z10LpzN93ndN7KPMZxgy0jrE2V92rKJnMmapcmqrdYVGp7S0RmOEyNijRRCiyEmJRQpxaawpRZEQCi6N3M76at2bJQosR+VPW3oRU+TTFs60i+nEVtwqK2mLfuj01UmnW9lvRgIMkuPT3RZve8eYRKtPzFLHKME9tD0fsoq3ijVlKRR9fZm5jj1kQhTY4yTUj73gqmqwWWlbVkUkbciag1GxCcYQ05qaIJNgRBJGaTUcT9wIot21OtSNPBRMYRMFGNqRqaJRCKPOLPl29xR1ie+pm/L1nMvXJmIuvRE7qUflQ9EVNuMpYqnjpKRZfHWWIxRPJ48n/Xv1fx3bD4ZDsA0EZ5d4eC0J1kJl3xx6k8Ku37mqVVpMwj/XW62KPpuOup5rt1kaIV5/0xDIjvvWttuZ4ZBgG3UzUIKWjnk89550ECunLw6Dvs533kPRR6MF644TpXrX8MjSjvjeEqWSmDbE0PJbZUy+7R1xRSJ0SyytkjcszNfrmxUbj266NoimLaDUytZhHMFYk5WqiW222215aNyWQzLaSWVlqzMSy8tE3G6pJDHbG2P2us+6q9uwfn4eNSKxDDNxENN5O5VatsuYdw4EVFlnenuTW2LxBZBLUo/zl4GPbnbrjnKqp68upOU6zG889BSrX3IixTW+jXVnOTj/x6wmvPCtKPuTEo49P5c97wkJfXv1zOcY2MnOW1vACyZmW4Blytq971vLRrQ9Y0saWSSFSyxURQgTmZgJECTTKE5YhCkjUajTBCJSSVoMx10VwYSNOYrJWWR0JQtxy0EANhJFJjLbbXSASQkgIbmSaatiE4/j7U0fXyb4uR3fJq5mZ6WC0vHCvw1OEkMkwkkiQgQVtUyY5auqWIgcSU+a8u6p69i1fqYiuUcdelxVN65E8G1I2mNuSEGECNEkjjAdcKwJ0Dssg3CDHXCikhJI02oECEkRIDj1RANW2RUTHRD0XQAmxyEliI66h+szHhjcwMbV2xOZlujp9E08uvV4c0ddtIAJ5lUQjLmGhQ1tSphKgECDU1MMTs7phJtzKuMbSenEMSBlcqqiO0taaWkza7uaLCes+rizhy85JJtY5ZbJPk6OHLRb/w2icVFNPk0HgKJd0UCaEhYeQ2STi2eqE+yWGTGMVFnNOINFWDZakzRNF3VCASSo4k5ux7amnl+2cyGd0uSQCfnXO7lnAJ6dOkd7JnNre5tt9KRthSaerCWuQkTFIxdhyqbcqlMyXLKVwQ0zBRjmrsbbQq9ijtyyx1xuvVcYASQJbG7QYNulC2gCskksUskkZZG6pbNk+l+fkWq1GruKSJPgLLsAH1ceE4bzSlZi3It1p92SgKOySJFE25dzJgwtaKXaHZQRe+Fp5lgXB4bBx2T93HqvzzrEvoQt2ET6ESV2ZpFv64fEV8KiWOVc/1qxBDimzLFHnhGMtfL/1a10lTNfEJh37ndJn+frdENlLwIyN/lPDfz6oHbpnlgZBQZYxEYsLjIUi8QmYdznYIaQEa3Gh+NP7KWOQQKSfuUxFzUEZbxOiylkXR0pgnM1vfxZXa6PbC7yUTQwfbiOXXyiZTWx5M0N/DLS3r4z336c+hCbGxrfPb417jVrYE59JVnxhvShjE3YnJdCM6s2Fb57KVP5iuPmogrZd51D+DClq1x5crgYzH5y4hzcBnTLJ4lVXQtmAWpKRocxX0Uj9rQzT+6qhhD9GAhG3KUkw/7Aw72s/6sOER1FcJM3UgncgRVg6rYfwxO9O+5AdgEMkFTPXZwJbzdtQbn4vJ8DO2cKbfic23oMT0Jdsll7zavFieduud+ZTq1rjY2xJrJPQCycOj+XAMJmkbcqrSjMmigevbnanBLOjbBcckO8adpw5cE5l/aFhj9l3aS72WKXLXQAQn8W2Q9y0yflHNvbAYVE41Eeh6MH+CBmP1d4sdMZO1znXylDzgKyQwZhFJEUvwejUBAOw40u9IEg5V10DZp+DI/hy1msUDooAOQxHvvFwFEhjMjlu370zNwXjs0Hv4tFKzyDfzbvsI15fK+y10izKhsbS/kc8kgew2I9rvyX3XoxJQQBNSjLzi9IeVgKCv2wGhPa5UAQ5wKZTJhs0z5vrnw+n+e/Oza6yt90B9i9c9zsDxZu5dy76FVXQFhu65mV738pSzHnIQa+vWvpj3pMT5VJPOfnV4fPnUJVo+qAeD+ciBCiciuY1gOCGtR3NY08yrVmur0u9c1WXUQ2xOMIJNpw61sz4AQIegTVOhCAB33uiCNJ1F5VQU2xS2Ps/0o+sD3s/8f1cXGmRUs3ZmSgDKlhYNjNv7cUbtD9/mprdSOS1wXtPr76LYAhWnPVKU1LE7WC1d0isscXWqbTdKiA1sMCjFHyClv4uSAClOnXr93p1DMqclQDMs6lK69ywJ+4sz+CNuwjwoG40Go1EGHQZ/CuXnxc7hNSFEDFPwHEUcOcfQ3EKrrvlqBDBRIXVat0dpNEXcvUKxFXXboc/32a3zDRXpDj6Uu56wvxQqd0oMkP8Uov3QqxT0hKIorOWfLBC3OJN+kLCAKF6Mn7gki/TqZzyL6qmcH1ZqFHXYJr+X6lPBwGMryBK9dyh7DagiJmQUAkfIjvpeqFaIz1bGp0yC6BqWCN5Lwijpg+OdPf1RNub+tezTeyIdoiZV2/71gJ7HK2ZYiEfu8pQDztHm823SiyyF81sCFE4bfUPRnC8fx+S+RvCuW/MI07Le7QR8oQ4pKePLXjV+YnJT63F7XkeGaFZttSWOy8x43vh5s5GF+zp/SYPUIaIf5tEGSHxaafh9y9X36Ef4RZ/2KffeAMt+oP0xEVoBMIsC6LrFKUTEmVtkiWpulQ+kvKqU9Ov2DtCFcM4WPtoiDELwWqeGVyxr1WWu2v3/V5dZ5vWsy7qZEFVLfu4S7T267fsxD5A7nb+aKWXM67uVOAmdiuLS81N7zdTMF6S1tGt73fuqZlg9bzPF31ht5Yyi1FxuULINzGOUs8q58aqmZ47p5s4gFrA5+bEM2QkP58U57Ve9n7TR0uGh1zeOHwYoBmZmHZmZuu5kQT/7hjzDUUEo4C6va1pzUlXFwL8QaENCYp6UOpy6Qws1cJMohwY2oITGElr/WezGJI6u/UxvipCB01/v7ouuA/J7n3QMJpt0pv7+eqhSlK2o5m48xZHOxISBFOcEHNiY3Ta/4lQvAkdcahrvvE4RZlIjEUO8eTM5Xe5HS39KSIF2JNgrcV83vcZCx+UfW5UaOoreepyZ9u7ruAn+KVeF/Q72pRcRdDLe45KY5GqH0hI2OgOG1r4t25fXnYfBCBQLG4f0jIKbfSHkBm790/YjUQzH5UIplrhYEPwQEaak964q3zpZ90KW3brrF8aa3LZrKMYWUoy/ZMfTtQEmdTkcGrrDvtlRGERUr+Lzf/tMOsv5osLRHTp8Y2o9NdzH18eYpc5zzyjZb+OVP7vmpbHKUDk59blNrSREUceapnJE1sSXhHyjXXuhn3GA3bbQVDjUBv01AQoC41mmIm7uIkTQZ4stVVVEMusK8oVWqbt9Y/uATBQD0ezn2F1fa7pw/OhMU8R0uRgMc+fSEZP8yIIODX3q8qgyzj/O9vwRBBTA0cc9VlYUpHkLWndU9bYtgrYXEp8+dt4hllKZqqKUMQJ2n3DPWbxJ9QJ8YyJhH6IHYbCK9k7SlY8/z5lU030vvh8wcJVMP8z9S1dJrb8QiJGtvsolCH3BdRJETLV0xuZDnIi+vElPRWMjMZp0jfP2o788oqbZKyrem72+2URz6zUVrYtmy2L8lEu9awRIrSHfk7CDFixcXC7fF7QCB97HmdmZ345Oawt+17AD1jCc91XX3qXR0QB0QxoP96LT70SNUqpmYsSMzFtSZ/TDT00yM7z/IrckAPY9YDT0evU9TzZNE9KM9Vms2pTPnnY+sFqsWKwZ1wiN4t675O/MMKx/WMn67+5nHTaHsebVdIW2yKsDoBRXvqsNm69DmiSO3IJH13lOtpovc7m2LQeS0RSORn4gWjGa4D1io95XOP8kpmfKrLlAxFT/TEB01yODFDo+jSlQ9FEfjbwpXRwyOSlawzww4pISr9jIuovWp/d2g0KiXW2lZiFmZ17E9Txe+uY/yns5gb/oqhz4yjtARSyiq27UhWLvQaKl9U37NzHBp5ESacYVxiYa0wUJnOGzDabbHXNaqzjXcaIjgqyZiRZTkQrahrczIEIlD1K3tfdrEKAiLGGlzlQqECgFC0QxRCEJd5o4Gm29PEk/+3KSBWDokNUGmG8fJrVANYS4nOogkh8Fq1zrCl7q9DtlcQLYEIISF6usWN6uZQVcklzLvfq85He3R0QgXYeNSArXjSnqZUiCF073rMUKiLbSbU2wW2/B96UQxBCFlWybhsdV9MJI8y2Sr31qE0u2m01NrKnmuKWNl3ZBzNUkDEqECamhWa/mVTVeelvp1JzudQ++nMBEt3N0stiCBEdVkxFCSBKCfK736zUbExQEbFCTjmVwMxSBFU0sAChW+YqiFctU04CEgUkJJzk6XarLy7WKKFmtSlLcRsUoiIRebkqFDISwYmIFGp1KSohSQezxc5tbrUbFrrJUUREUJKUJdc3YEtIJEbKCXhUxVhRLqmKWW1IS7bmis0AmJMUAoWEbJFIojBTwuqUkAoSFjSxeXw1PpvWTkkoJdzdTm4F3d3ch3Je5m4nAxh4Z7xEPJhRi0KI1TWICt7V5h3JqneMWgpmHJCfwne/FC9zvqW99d3KV2RzPDLsk6aOeyv0KUaeovNS6tZ1U5KrkjUmNFmL8Fi0QZQco8qlqsyy2qFSt2DWZY80lHghAJB4QlfLlWKIBBEQgUxzjmCnC0K3oC+PKcPuZzOXU2xSOLIwT7Or5dcdidrWZNwq6iEEJdL2dcqFLfUtXjkSQQyL4sqgjoVY5pqIAFCmQWNwrbxsQCVNgkQUQmiASW4bshUQpAi6lf9UMsSYCBIICG07IEJTLQRc5MUJS6lW3dOAhAkCixMAFYk4YDV24kUCECWSJJqRIcY9VoyiZFCD1pVpKtvWpbgIhioSU04iRUJSKpbFdOAUoQNjSdCsY1jHlMsjmOtEjbaUeo9JjIgAgJIJmVElTcqE/yl/u8vy/0oxP/PZV+pEh+oYfUwTkfliMVo5HMZCI1fwrIEBB1g8+/Ra6717cK4RRBkJmiDVwm49RbNyJEdJ/LDXNXGVJn+ABGEeyYxevl9Ib2E30IHXhFgsYIMCGh0hLvX1ylD4z6wR9DPRIybHZ7NfjnzV51VLgA9P5tabO96z0lhZOOw2xdr5DcLZYHWLiOsPSx1znmq4Z8RHIG1vBF8AY+PsNRaB4TB6DufH+P4ss3fZ9eFZF5bvMSzZURa44QbHp50AiGc6Zmb0Bj5fswoQVH8QEDgbZPVCCm+rzUf0BPoL7+L42CjkIxDjbiYo5qKf0K2oHGmx6rGyaataiBxxWKNW0kg9CbasCSKfdMwMdsRFIyuKp1xjEd33/N7ue4eypMNSbMQ0y7epeBGfq9xrZljSD/BKlxVEmyjNMhG2Hio9FKOanISkEicAhAXIIH0B+N04XwgFKEmV24XJ6zh0+TLfZd+TOYNwqgp6rXy1pv9IeuIafYqXaRyLL0sXoyMV6lL4LZyn+zot/zW97oZEdlkIMF5RzerS0RRsj6I5ni5bBWlQs00u+rO/la60rPRC56/DreN3Wezdkc1KU5l2np5cPsUEtxN3xxuYzrXvr16A9XLQy3MCULmWwy3My/X1N5nKXjuqqrqQnfccbiPfuXKX53wulIkfMcfOZi9OXbvws1PSi966HbrRrPeVGawW2BguA7AAhlBHpkilirHbqOtqJvTaqbREOKMpuGgMbx+5rwSncY7W4QnG7+G9pyrKdoI3zYU+hKxry1q5BPwhtW3lUj2G3j3rPX5Ou9UInUiYyeEOYch5xcdO9ZmZw2ZEPrYHwyGoaF5je2XaBbJXpJMwb7tj4zuACCoSISV3VlOQFUR3NPWrQ5c3MWtE+/RNHzawI95/Ot98goDsKjweuzN79713Q4l0ukX/VyqxNO7v9h1x4GbXJZZx3Pn3APVkBsbltjI3G6xuwALJIoQZSEbUg0Nyn45mYiSqtvRWrPKDlYbhuRSVttSII43jrjUb1FHSKCjGnE3JQJI5FQbGoVwjdFCRmho3CvWTGSWSVAWSwCILEhMvRGBx5sJCIK+kU+2de1AUsWjjwRAFczbJF+JjeluhdvAuIZO+Pb+CtZL2xVReMgdEBoCD6RETxfVP3eqaqIBE3IM0LGbYZdjPCv7EiMhxitZpxuIj1aZXPXBGmhNbz+wcFJa8a0mlW/LjTnmsJDTjo46hdLEdkt+pmURErp6pl02tjVvvnLUevbWCynD91ML6OhMi58/N3EeXz1vq+sKuvVbVH+aF569ZIy1LGUaluBuBEiHBobEyS20LpMUGmxtjvPZdzE7vB1oaHhSjlY5ppoaTTHGfgLlN5n5tZzgR4uUYsaZG5cwmDzIsbYZEOS2lalk2BKTlWbvNyGaQUXGCllYVz9weJtuXhJFNrCEeNSWROti466JvI43FHahlkimpJBOWoJWRuyNJgKON2IkTmpGoyuksGkErCOStFjmpI5ApGh1iK2/NL+nr990Xx8dEU7r3KmICESU92tjw5rZNgKNKeXEquxKieFfy9KRxYbnZQZ+sO4Yb9ZLckdzNHocjgF1Xcqysvm+OiWIQIOxoWenPH/Nfclr9GoTNJpnjh2e4i82fv8zdz15OZNCKzNqz6b7al5j1z1Rtqt34gEz1yN0CDoY3gtRJiH2OL/uvhyoT4kiIUWEhsYkRuqgiak313E9EmHOxQPfs8zMv3C++nF+OzXrvB1DHM5kvZ9E6s8WQ26TKal44J5coCNRlCVzG84oBDdEhC0dONpfNoKoRM+ihpNlm29Zv072eccVJFcoTRFeEdmZpO7Y0XbM8ly4+IXL0aOWxdJHiekzd/FDJvSMtkhs8nQTcqOUVs3OQ6HnRryZwlJXO5NnCNRdcbbFFmiOzW76tPZV785l3cOpKhm5HEFvNhaHMz3278ajMspXen1auk0DG5ldsDnLY2SNyV7tm+2m7vUTmLZCPphoELY6G6I1f3zKekTqbvUJ8jvE3ua5pR3OxnGIdVHDBAGFiKqrKVH85zjnfXed1pRelWYSo84A/+mjRSD2MQ9tfeu6Q7y0on163OMfnn0m1xBjXyniCZEpO84d6xDOnV6ixdmZGH8I9I8oYZ5VL55qGVeyq+Nv3mJVyHgQW7MMZWae9tvwLxjoVZteZCWLOM95W5ilUrFb2jVPUaie03yKns7qPzflzvQeMepUhQy1pbbeEjpIcHqJbIp5LoPR8mYYxJ/aE6EWOe8ZnbwS97VlLCM61Un1NnYbO2HnDhh3dl3TKqoVtCrrjtQ4RwY2i2Zm6zRKXnspUFLNpqXNhv3jMu9qYMJ5BuJIS6JVV6cyTH8kBiX8KfMEZaPZ/N4qAghW0iPsq075lNWd/IWjgQt3yJQxjoemhEWyTBJFQ9IQPRjr8tncndfIQ0yEtCUpMsEdodIQVdpKsyeIbZ4eAob6jv+P9/PdSiWkx5OM0s0svk54ZFtSHCU08KlE4E1rX2/vdPRpyd9Hq9fT9e+6qO0+izSLyI6O3XGmVz1Z2R2Kuyhy6sq14mUeuHmwMTjiC8KzQiMEJAWjfsqt9yMHAc2I+H8148EW/k8RsFQz5xN8N1NGpLvvGVErr32r4vyea0eZXjUn5lj2baS9jqT1rONOd9VEzioMUcqZu3AIWIIock8ph5jZlT8YrGnvMXetJKKkRqdyxPFzCNJMtdYVTMEpGs3AmuzFVm7ufg0Kc2t56fySOudQb0nXmiTKt1Nilz6XOaqKx3RYcjA14cGy8QFVjWki2I9HcyJCncNMYwcDSRLl7OylGKQcSrWkQOaPYhIsjhhlDb063POLFSjXhRDPHOOCT7YECIU+v0i26KxDsx45hIgQ9Ztpxq5vKcSIiLeKnmFSaZJEDLjaHUclkM/Ck5SYVa2CqgV1cZPiaitVdv3HNziE7PHeS08cQvhqdrm+6LZaNt0+49E7rXzpUrIKDuS33Vd3r2GsSJ/FDpvmcTw4OMtEJOmtcfyWiCDjMv42cyWREBCAYH7uzcXbDYBEYmMtNNQ9c66sLrSb0zSUshKEMa9Gd9pK2/cRfeEdz4nmy7y+R3HsdjQ66kRuXBEM2ELGMLa3ApuJMgJjDNkAEOhgewUxxMrlRfMM3fVsDY8Kvtmyqp2U4PZ7gsfe4StnMvK1TUvXpZWqQt6D71Obq0DikgotrreMj6FYmdq6OMXVJ3/HMFE6J5o9+Ww3TrB3uJn8fE9ax+Udb+71mYTIUEXOG6VRT6MtT5zbbcvtW+tc3Oqsz25wRPfGTVlExwRLDXZSJj86cwRCAxrnGfa5qc3gxNAglRz4oEqFRD3b6XEcwaCNPB4heQZES9ue4h8cKZpPi+uCddLaY/T90huJqbi2m9NuFhA6At524+KqChhGPJSShoCBcUHhYBqkB2WzVZWom0R3iFZZ8tfdwYoUW9Oojrc4knKaXERKp6aJKIuXN4H29aS7SOfGZutuPJJBAdPxDnMRtpKCupRN9u85sdV3hANbUmnOnjkQsNl8i76YONxFljezqHT71K7LyCoZiIuns1Wd4vvbLZndO+cphmk/IlwdW1RvyoRM25sxDVcmsREzb9siHYsGeDU8vwF3PKoTzzptJVtJKNUksgiCKhpKa1EAQw5c7VoIqN1Obe/FeLLiTFO7MqOuxpD0QaFrMwkT1ccVRmSSGOhMmNKIXrNrIgtrtA0mHbrFS7OAiNlVbMod0EdVOJWta4oW5cxMtcWNpKSbDrhSJfkT3fXkeaszktKba1x005+vsTve3+rszdKhnelWVXwWnkRMc9qZzKqXPLNgFqSRBW9rXvjlokk4iKN21dMss6mlj3PSEzaKirkXlYasiNXXxAcZExBj3CJrL7ERHkRDzzhfM8tNYxv5XTvMzN3EUkpxMS7piZeoUYoiZqFDyht48YCIQRouqVlxEOofV17jWV42Pd3e38VV1e0yZ75917VUrbVT/fZC+tfC6ba5F27M1NHbj8mUpFJyutJc2hmvXbYJKUpMuDK6bedtwIrnaycmgb9UMNdktyTW5vvmjssmGhWOb1NrvO0GzOZ0tKqQMnAwApKbfPORSJpfnKNlwuFzRZwCvJUE0xGl/UMtluzNm4JZuksej1C6uHltCRNpizDVlaN1opmhIi7RhtJX1WT6vOu1PLqgVkqkLLfNW1yPsW6r9ZvjTlWvVqqpo2/F2X2kdMOSZ9zFJ9kPvO+cOrGA3hJe5bpoH0eubRGQygYBWIiKydYKFhyDc3MYWvIc3j4WSDY9+9ILVQ4ghh0NyWbLySnep737Nzq+iZZG14zQUTJ7UtLMeMy/NeQqGYLe22JiMJLJKzIMrycItIXsWu6Sx9/Jw7Ti/UzmOK4HdiI3ZVs9ap+Zy6EIfhc4jolaTRtOizOcGEKSbbfKVfQ6S1SrJkWykeTYmSIjErSqD6Bfigny9hnzsYiMudvDkW+d4XJXoXHk1YNSzT1KOElHn1fMnIWw7pzZsdc4w/ZnhgYh1vfPMWL/0RGyAiemoVW4hLN8lQlYl/CEpocRF+TKUeduc49amrikRCQu2k24iZtTCh24UUJGMaaYlQoSlCiJFHBOY67nUfjHpqG5g3IzKBeEmXCBQTO3qahohllTYyKhd2KFFYQgw6yKF1JwModrSimtYXPHDEmNYqRTZ23ImNvFkLyzz9qIjD1pqEapadRC3S8mFEigkCgbiYSUm++Xvmp2jMqaT3PQcuOtkC22OjGy9rPVgl5cYtSOF+94uCzWd9YIDml1xO1Xp51jnDUkzQ7LsqeREdEJUJIoaRnt1lqIUiG0lBLjQkipbLHjwnO1wvndLu8jhC9O5iNrxwxJdCXZhCS5e+3q0lJGCSfJy7S9Vw3UJK66W5pLBQ/jUJOXCUMgIL1qS6YSMdOSSrVzXrJuIMcRFzRLECZEQl1MUBFUoctRoiRMiGpcJihY7lJSKFE+t9Pzv+yfTU+bl4uJAtELrpxCXfpxEVxrPXMz1rShKiF0RCRyjH27hQEJLbTiIjglABCBIEoCF2KIYlpqjQBShVMNJiUypKbHIQ7pKJPVbeZVGoxrHjysFGsjUHbXWqJ2QnDF5fXEJob5JuG5d4trbC9ZEsu+UIs3Gx7UmGwo5GC7SSpkuUoVELTljvRahSKMdpzCZEIIUJtIm5c3aswAJMgyyBKWxwbKVg7ayIohXNunKZSS6XjhBCiOtOFwiNEKRQPTe5JpQooiIBJLW+nfXL8q/Ge/eueZw4TSw/WoIxYqY7xXcMzg5VMtLpqiGks++7NyEDoPwWS2TWGeIG7MbgQL3kWLArOFGYi5tVEUgxVJ/jMLwYdbMQT33QS9sqwpvZTBemiTTvojkrG0VExdLOejZNF85Y4a9vAznDxDxWSN1hnfVYkA5+XqcmaoOet8pgjAdyL6dQWI20wmRIkoRCP0QWj0cMUsAqKMUWp3BvNJ1g6nFTERBu2tJAbOJjl6hOwTlyIiGyYaPLJlme/Dnzzzb4nx9PlL2CakfdBb82WKErISZcLNPA6+u6NiQdmYarWAWvNunHdNISmoVRmWMoKhwWAL1VStbC7W2uGL1Yi3kquHwemFHWMiIOKlpVYZeGCJsKkyE/OoKIbkBg2xxAwBjWkRrqsnRLa5CS0MopipY+wAGH16ByTe/eJoVrdyrlVMJehRfMwnksN5EQnVO62hKO7ehKhS31fozWsUaIQII3vHjTWP1VdfU5TxbUQcaQRCBPjSWIoXMR+Oza2IuYgB4dgX1DsdxHzWsJXYf1ETPdiG/zp5BIHFGIlcpk5OEcsocZRjXs202BccD1YCHrxfb865LbkUhz3NXRjqQMaM6izoAiEEe863doEgS6mZhcbygTkXYoZjTjhDXZtRVh2ckh35nt8BplY4wOGwOG1V3c7SSSUQcZYLHa8pmZmW2jckjBGWcSrS6ekGiH4pLZUi5PcwIeHaC3ZMizNCHQd9VhS+Ts6iyIT32FEy0UlAQplxFbt6pKMnbBTgF6doFwVEtxDQJhJqq9M2DiIjzXdwIbre3BKQ5LMCZzaxjgED6QNvSekbrOoqQAoj8JLhaIVEMSGd890u51pKNIQhAgH6p4+u9zTJpt7UbQlVCzl/lbMNkNsviWkAn8c8Ny/Myuj3Xr0mkCI8EyEjtwmKRI8XkxFEAopd380z2jc72oAEC3RMwBQ0oFLl3KSXp6paXtvEtnMcSh25BCixMQiFwiPS5M0CECiK1QrbqErICJWpmEEIFDIdWSklIgBQNpXU+Tr7+rnfnmr3Mxp6eb272/M3NE3hGXt3VVLvZNPFw0WhFEe/y2bpk9ZbpVLMvNgQeMp6enp7pMQiKUmEpXFiQ4RE6jo1MAlKTETMobKRE5SjuCMAiLh4UQ+LuaRrDi071W0pydTnMtUytLOJlyJ0M69icuIlonOKRErMV0N0N3yIYyhyrQsEGroUrOKa2a0hFnxeXznKrma07kLuQiTjxFZlMyEzE9k1vx9res0pUePVVpYOAm4KTKvyHiZ3qblZVqFcPAu0+vrNnktnLOy1MO54D8OATYUfrsLVMKcgF2hDmSSbcjoomqbmaQFUgEEzU021nYvztdLhEdghJdC6JQuv45IFLiH3ivFSuEEJD/M3tCol2oNUMCo3SiW/JOeH79TzJXyOoIquOi+ua3s1QLFunIs6BdcQNvlA6Tu8Th4nGLrU0GpoP3p7vmX0j4gXrv3NKRglAQHmzOnuj0Y5CFwGBe+fMZ+P2JiVH7/6UMDs3toQIwjAQ5MXglvsSitbmYJN4wIAPvX8xyP9+gSwlRMaUZ8kzMYxo3FLlQfxQZPthqmUL/KboEDh9anKW6hAURjYcqlMm+yr77hqw3I5gjJGtx0JbvfshtbcQdFk9kJEBDk/8tkAG52NZ85qYnfLu6VOT8tT3FkV8qA1SZ/vEpIJ8ie9fYx/chx4x13Ol7Oju+C6F9Ppp+Unwpl1wgsJjau1Q7HQ+qUyv+4HXpEJO7ogAMdhl4dO26x+GgIFVC6e6zTPRY2QJch5wAEAGClnsEbD4Xn0sJ4IGEc3u5+du7uY9nrYi+7l65YR8wneDuZedctPgjrWzDEh7sp9WUYDVCIiEiG6mMnH84LuZhWY9XK26RG6oeeoG5w3YEFSqIdYN9ns68mp2IFfkOwGAzTRrFnCV5M5OQedt8WWdXDNZy0p9Pxux3p2ZWHeG+FFC+rCICNh/ERB1KrRlywNovTEmrxeDxrNLzxex4VUmNWTPWOsy/XAQ2ZhcREKTQVt0dLu6PzN3Fr5FAO+WrlagSCS2yczBrXeWodHpy68lKXsmPFujVTANMGlB1roZC/0EAgIjEzPWsBVV/CL/izSvuMfjxwZGRz3MzLr3/kV9sfk33bXYjhX7qFtSwD5/P9ZvhsCDTQYF2WURrLPrqQCM6U/DeN65gJtpJBgT+8TEQsKgWldSBDu29xnIrMzKZUCT4vwz+KP0l+kU7B038U+dyBWG7f3p/fnpDiOetRxwgoL4Mcnh+eQ6A+klYcEHq6hPcQ4zelQrA+bPDJdGYZKKrxMREmBqT9wwWRoHDqL6UKy27MWqHIACRJE31r3AAHnQEB0hp6CRJWEwcr48768k5nuvVr/weqCcOhlAawdj9mVEgSQkHrHzEWACEBnwZ7ZA62wQz5gIMgKUsPRsm61UIqAgIAw8NAMf1yDqxBTIuEIx+sqim67X8ijVSx5tFshMICbeMuoM599mEnoLBTjMuuX8T4mXwwJnoI7EYzaHohDoUKkRG3VSMUr8Is3r08jhPpxCnKzSMX/E6epjO9G2HziO1IiVId3ACgXfagDPFL6Ln9fZ75qD578IHDbbSKt25j+y8IREJELRToWGejP+lAG+/9W67/ecg3Lg4V7lyTa7iw9XoLYpqw8x4+qubZtcu0al9p5f+s8SeAHOMPPR1RM6isqbQqcGlixetCwY8ry8iCN/NIa81LWbdU1h6iyG/A4vukFmKm5e/rW3OviLG99b5wMq6oUdIQr1eF/FiNkCKndFPlI9Qx0J05ir8pB1jaxDvEDILCl8yioJo2TuklnjEB/a4DyBtz74zzh75B+ncNEA22XSELkUtkfamEmbpDLDAc5GCNPDqu0Tqqb+izyw0yDpqkfDfzg4DWVkpjJB4JoQFhrSqItoROr7RZJEb80/+XpvvCJ9+BFAhq7DjiYEhKnbyzbOxcf0v7+pfsBnsw8Tp8Ad3uQz+YR/0Xtdfbu2l/Y7FoUY1bcqiKupSBJ/2ZUJ8yabmw3w2usi3ZrTQOc59Wu//SPYbJ28Ml0UEsl0UETLRVqrM3Bk6G/hJI8YiRTrec0tZGmngl4zkkzvo5iczetWAqW7lbTSfci3qZNtFrc2xbkr7OFjI9BI2c1JH/yiVw0PGnRJWom2ZcbUclpBVy0pgISBJOyVIJBQxSJJkKp86eotJOmktARErLbDVKAQMhNzRunOFqcSz1kwauTInD/Vu7laLLBk8tVNkCEm00m0UbQDoDKCGx3Pi36b3zj/lKhBgmQEoCGEtQtCunCTpyKR/9fXb2KQ/dJb2zjhMgReIGpAgFoSY5OhSokdZMkLQyAjSI7z73U4b6pXsWBA2gC0pKJbkBsE2DYAAFBzKt/rVXWZGnqUCBNjiA6I1oX7Kixau1KTTmE61ZbqLJkZ9SqEZcK6hRCCEmmiiJx3MKyHTjkuFIDDMMVZ04ojNNIQyBuA96cKjkodiogY4RnFjTEAKAIQRDGCIECEJNwkz/zhgAgCBFqxYQmIlpZpwkqRQk0RiUiBqBABFAbQVgbAAAAbYwKDHRtDYAxjYwbaYBDEEDcf5P7NCkTEwLSRMptMW6X2JXF9/pH5tSKQKuZUw4bpsTsVlg7XDXuiK40STTbb45q2DHuWM5cbb0SSvei7xDI6sbQ8uCrY5SxhJsxuyZI2m8aKIkVcyjqiwGnXiZkTehG2JtJswVFjEo1Gs0bgC/vxEmKGIIPdyOPWP/OFtQG91mPOi67JKUnl1VTWl4VTH36t7yO5BcHelq/49hCXSqZjWT+K4ifCzJZii/5E+vSB5s47yXi++5XD9il9Ncn5cChlo0EUPe6d3GRUJZinRKO/hwvvifXHUXrws1Y8+jcVuMHe2ecxEbxaFPkqlYE/O+WgZK1z8zaaaSWKHto+5oedlEFC8hKVXP77K9vswOMVUaD62cGT3Tp3Nv7Dw5LITyyNwQY5OBZyYqXZmXux+0oHX2vFTxLPMugtpbJJ9rdlF7GTnIWfTSRNlQp1qoiBzgh4rVzmHKJxjtoGPUNpU5zdvFOMKMUpVSqjMTWTPvu8YqTzfM3Apk74mQ7u0b0w8oucTTWU6Z6XRbidgzsta8YF6kPprzMhtuU8fuvDCoSpUygsmH2e688sjrwnXD8hO0PWZvFK53RTsmnxWpb0WlMSBOF9vXIlHjd6gIvjtiXk+tNZpHb1i3p4Uj9lrPuidBqd3dachR5GIP9u087JmcexHH+JiEnkZVXyZErt9p1votDTzScNWu5+fapgowuj0JWVxVEC3HleA53TOcoWDyeqWzt9OTWExd9DzyRPF2EvPuZnwfqtz3FfJvljeES+1g2uMZKLnd+iFCCeNxBhjGuv7ODgmwz41scWjkLxMG5DLS+65XjIFZWUrF6k5H3pTeh7/DgCAfn96CBD98yUsAi3rq79eclfhv0dSva00v4LQottecvYF8zXlxMJZr86Qq0+drGp/TZpY3erXD999fmnOVXHV/NYhzCmL0S1KdfYQmIdMfkRb25a9DiAyrfkMlsyQR471qJXP0447zO++X9fVfDfQwA1xkcIveRdSLL/kPgm1DhAAKYC+Oz4iJNVZQxdUBvbHOzhPER4TCgdmQjZDPs3zXKZZDZGGvZGrG23e+53sLF3LusTBxSyntEFiTAeOwrSm/MwK5V17y+Z8zt3D3XHteTM8Za5x1q+/VFR97hJDXzF4oNXrSIpIn/Zx6myouOu6VfPEzHkyoGWAATG+ZWWWdY/L1VWMYn9zO7sNKwLsW01L1w1B40CfxsPptE9zKsJcgIEAEtnR9SmWBxCw5wVEnA+eCde5yPOj7pPbKsGd4x+ON4PrHFVRKrShxl83/UtffufnX2UbgXxqW1+skvcmbd88/c85WZfzWtW+R7ounWxQbaToIX6ZeJS9+tX/x1LFUH37opyqODkUi89bsXaXvmg0JRbjmW/ZasUabWVj0Gt+Gb3HSajZHYOIZsadmyQ9Jr1YsH+Vc7iTar1uxJN8ciqZglp6ntP4ayIsQAoFBCWEak65KqOdkzCFSHvkdcfNamSSLH62s4NFdwxiNRALUEIkbCT/4u5IpwoSGxJaiCA==')).decode('utf-8').strip().split('\n')]).union(answer_s)
# print(len(valid_s), list(valid_s)[0])
## Set puzzle
if game_mode != 'SOLVER': answer = DEFAULT_ANSWER or random.choice(list(answer_s))
if len(sys.argv) > 3: answer = sys.argv[3]
answer = answer.upper()
i_guess = 0
keyboard_mark_d = {letter:0 for letter in KEYBOARD if 'A' <= letter <= 'Z'}
status = 0 ## 0=answer not found, 1=answer found, 2=results do not match possible answers
while i_guess < n_guess:
i_guess += 1
## Get guess
if game_mode != 'GAME':
if i_guess == 1:
guess = FIRST_GUESS
possible_s = valid_s.copy() ## set of words matching results so far
else:
trim_possible(possible_s, guess, guess_mark_l)
n_possible = len(possible_s)
print('\nPossible words left: {}'.format(n_possible))
if n_possible < 1:
status = 2
break
guess = choose_guess(possible_s)
if game_mode in ['GAME', 'HINT']:
guess = input_guess(i_guess, n_guess, valid_s)
## Evaluate guess
if game_mode == 'SOLVER':
guess_mark_l = input_evaluation(guess)
else:
guess_mark_l = evaluate_guess(guess, answer)
display_result(guess, i_guess, guess_mark_l, keyboard_mark_d)
if guess_mark_l == [3] * N_LETTER:
status = 1
break
if status == 0:
display_message('Sorry, the answer is {}.'.format(answer))
elif status == 1:
display_message('{}, got {} in {}!'.format(PRAISE_L[i_guess - 1], guess, i_guess))
else:
display_message('No words fit the results above.')
## Wait until Esc is pressed or window is closed
## TODO: Disable backspace and letter keys
if is_gui: window.mainloop()
| 203.584192
| 40,098
| 0.917222
| 2,951
| 59,243
| 18.301593
| 0.546933
| 0.002222
| 0.003518
| 0.001426
| 0.030292
| 0.02457
| 0.020553
| 0.016442
| 0.01185
| 0.01048
| 0
| 0.13278
| 0.036392
| 59,243
| 290
| 40,099
| 204.286207
| 0.813285
| 0.030265
| 0
| 0.146341
| 0
| 0.009756
| 0.860809
| 0.847455
| 0
| 1
| 0
| 0.010345
| 0
| 1
| 0.043902
| false
| 0
| 0.019512
| 0
| 0.082927
| 0.043902
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d755e8829d171553e384347599ac1f24c5638345
| 153
|
py
|
Python
|
models/__init__.py
|
cadkins052/tab-vcr
|
ea713a6ef7ca54eb3123d8729dfc26dc604644c5
|
[
"MIT"
] | 17
|
2019-11-01T04:57:40.000Z
|
2021-04-17T14:49:47.000Z
|
models/__init__.py
|
cadkins052/tab-vcr
|
ea713a6ef7ca54eb3123d8729dfc26dc604644c5
|
[
"MIT"
] | 8
|
2019-11-05T10:18:12.000Z
|
2021-12-22T01:59:28.000Z
|
models/__init__.py
|
cadkins052/tab-vcr
|
ea713a6ef7ca54eb3123d8729dfc26dc604644c5
|
[
"MIT"
] | 8
|
2019-10-28T17:54:54.000Z
|
2021-12-08T02:21:00.000Z
|
from models import *
from models.base_res101_attribute import model
# You can add more models in this folder. like
# from models.base import modelbase
| 25.5
| 46
| 0.797386
| 24
| 153
| 5
| 0.666667
| 0.25
| 0.233333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.023438
| 0.163399
| 153
| 5
| 47
| 30.6
| 0.914063
| 0.509804
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
d116d9a532930d68aa02b348ff24612cec66158c
| 368
|
py
|
Python
|
deegan_client/test.py
|
tadeegan/eiger-application-aware
|
0c1faf6ed2de35d6913dfce7cb1badcc555cbef8
|
[
"Apache-2.0"
] | 3
|
2015-03-05T05:31:19.000Z
|
2015-08-19T10:02:42.000Z
|
deegan_client/test.py
|
tadeegan/eiger
|
0c1faf6ed2de35d6913dfce7cb1badcc555cbef8
|
[
"Apache-2.0"
] | null | null | null |
deegan_client/test.py
|
tadeegan/eiger
|
0c1faf6ed2de35d6913dfce7cb1badcc555cbef8
|
[
"Apache-2.0"
] | null | null | null |
import os
os.system("sshpass -p Bs81tu93 ssh eiger@104.236.140.240 'cd eiger; bash deegan_burn_it_all.bash; bash deegan_datacenter_launcher.bash'")
# 'source ~/.bashrc; echo $local_token; cd eiger; source ~/.bashrc; echo $CASSANDRA_HOME; bash deegan_burn_it_all.bash; export max_mutation_delay_ms=200; ./deegan_datacenter_launcher.bash'")
#os.system("echo $PATH")
| 61.333333
| 193
| 0.774457
| 58
| 368
| 4.655172
| 0.586207
| 0.111111
| 0.103704
| 0.118519
| 0.17037
| 0.17037
| 0
| 0
| 0
| 0
| 0
| 0.057057
| 0.095109
| 368
| 6
| 194
| 61.333333
| 0.753754
| 0.57337
| 0
| 0
| 0
| 0.5
| 0.815789
| 0.506579
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0.5
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
|
0
| 7
|
d12b8e20d08f12a640eefc2ff01d3b29772e84b9
| 2,129
|
py
|
Python
|
scripts/pleftmatrix_gen.py
|
boviex/GBA-FlightSim
|
d16bf876dbe38b437f8d041ffb435cb6aa06a112
|
[
"MIT"
] | 7
|
2021-11-02T16:59:53.000Z
|
2021-12-25T20:14:39.000Z
|
scripts/pleftmatrix_gen.py
|
boviex/GBA-FlightSim
|
d16bf876dbe38b437f8d041ffb435cb6aa06a112
|
[
"MIT"
] | null | null | null |
scripts/pleftmatrix_gen.py
|
boviex/GBA-FlightSim
|
d16bf876dbe38b437f8d041ffb435cb6aa06a112
|
[
"MIT"
] | null | null | null |
# Pointer Finder finds pointers
# Given Rom, Pointer, New Pointer
# prints event format to stdout
import struct
def to_bytes(val):
#pack into little endian, signed halfwords
return struct.pack('<h', val)
def main():
with open('pleftmatrix.dmp', 'wb') as out:
# angle 0
for zdist in range(256):
bytevalue = to_bytes(- (zdist - (zdist>>2) - (zdist>>5)))
out.write(bytevalue)
#angle 1
for zdist in range(256):
bytevalue = to_bytes(- ((zdist>>1) - (zdist>>3)))
out.write(bytevalue)
# angle 2
for zdist in range(256):
bytevalue = to_bytes(0)
out.write(bytevalue)
#angle 3
for zdist in range(256):
bytevalue = to_bytes((zdist>>1) - (zdist>>3))
out.write(bytevalue)
# angle 4
for zdist in range(256):
bytevalue = to_bytes((zdist - (zdist>>2) - (zdist>>5)))
out.write(bytevalue)
#angle 5
for zdist in range(256):
bytevalue = to_bytes(zdist - (zdist>>4) - (zdist>>6))
out.write(bytevalue)
# angle 6
for zdist in range(256):
bytevalue = to_bytes(zdist)
out.write(bytevalue)
#angle 7
for zdist in range(256):
bytevalue = to_bytes((zdist - (zdist>>4) - (zdist>>6)))
out.write(bytevalue)
# angle 8
for zdist in range(256):
bytevalue = to_bytes((zdist - (zdist>>2) - (zdist>>5)))
out.write(bytevalue)
#angle 9
for zdist in range(256):
bytevalue = to_bytes(((zdist>>1) - (zdist>>3)))
out.write(bytevalue)
# angle 10
for zdist in range(256):
bytevalue = to_bytes(0)
out.write(bytevalue)
#angle 11
for zdist in range(256):
bytevalue = to_bytes(- ((zdist>>1) - (zdist>>3)))
out.write(bytevalue)
# angle 12
for zdist in range(256):
bytevalue = to_bytes(- (zdist - (zdist>>2) - (zdist>>5)))
out.write(bytevalue)
#angle 13
for zdist in range(256):
bytevalue = to_bytes(- (zdist - (zdist>>4) - (zdist>>6)))
out.write(bytevalue)
# angle 14
for zdist in range(256):
bytevalue = to_bytes(-zdist)
out.write(bytevalue)
#angle 15
for zdist in range(256):
bytevalue = to_bytes(- (zdist - (zdist>>4) - (zdist>>6)))
out.write(bytevalue)
print("done!")
if __name__ == '__main__':
main()
| 25.650602
| 60
| 0.635979
| 314
| 2,129
| 4.232484
| 0.194268
| 0.089541
| 0.120391
| 0.180587
| 0.817908
| 0.817908
| 0.817908
| 0.817908
| 0.817908
| 0.817908
| 0
| 0.056537
| 0.202442
| 2,129
| 83
| 61
| 25.650602
| 0.726148
| 0.121184
| 0
| 0.75
| 0
| 0
| 0.017288
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035714
| false
| 0
| 0.017857
| 0.017857
| 0.071429
| 0.017857
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
d13f158cbc74c2307f3a4cc54a62bc7bcad704fe
| 7,585
|
py
|
Python
|
runtime/bamboo-pipeline/test/eri_imp_test_use/tests/control/test_pause_and_resume_pipeline.py
|
wheel-w/bamboo-engine
|
482e89c039100142e3b400e6ac0c54df031b8156
|
[
"MIT"
] | 55
|
2021-09-07T11:50:35.000Z
|
2022-03-23T13:19:38.000Z
|
runtime/bamboo-pipeline/test/eri_imp_test_use/tests/control/test_pause_and_resume_pipeline.py
|
wheel-w/bamboo-engine
|
482e89c039100142e3b400e6ac0c54df031b8156
|
[
"MIT"
] | 64
|
2021-09-07T12:04:12.000Z
|
2022-03-29T03:47:18.000Z
|
runtime/bamboo-pipeline/test/eri_imp_test_use/tests/control/test_pause_and_resume_pipeline.py
|
wheel-w/bamboo-engine
|
482e89c039100142e3b400e6ac0c54df031b8156
|
[
"MIT"
] | 20
|
2021-09-07T11:52:08.000Z
|
2022-03-28T08:05:22.000Z
|
# -*- coding: utf-8 -*-
from bamboo_engine.builder import * # noqa
from bamboo_engine.engine import Engine
from pipeline.eri.runtime import BambooDjangoRuntime
from ..utils import * # noqa
def test_pause_and_resume_pipeline_with_nest_parallel():
parallel_count = 5
start = EmptyStartEvent()
pg_1 = ParallelGateway()
pg_2 = ParallelGateway()
sleep_group_1 = []
for _ in range(parallel_count):
act = ServiceActivity(component_code="sleep_timer")
act.component.inputs.bk_timing = Var(type=Var.PLAIN, value=3)
sleep_group_1.append(act)
sleep_group_2 = []
for _ in range(parallel_count):
act = ServiceActivity(component_code="sleep_timer")
act.component.inputs.bk_timing = Var(type=Var.PLAIN, value=3)
sleep_group_2.append(act)
acts_group_1 = [ServiceActivity(component_code="debug_node") for _ in range(parallel_count)]
acts_group_2 = [ServiceActivity(component_code="debug_node") for _ in range(parallel_count)]
cg_1 = ConvergeGateway()
cg_2 = ConvergeGateway()
end = EmptyEndEvent()
for i in range(parallel_count):
sleep_group_1[i].connect(acts_group_1[i])
sleep_group_2[i].connect(acts_group_2[i])
start.extend(pg_1).connect(pg_2, *sleep_group_1).to(pg_2).connect(*sleep_group_2).converge(cg_1).to(pg_1).converge(
cg_2
).extend(end)
pipeline = build_tree(start)
runtime = BambooDjangoRuntime()
engine = Engine(runtime)
engine.run_pipeline(pipeline=pipeline, root_pipeline_data={})
sleep(2)
engine.pause_pipeline(pipeline["id"])
finished = [start.id, pg_1.id, pg_2.id]
finished.extend([a.id for a in sleep_group_1])
finished.extend([a.id for a in sleep_group_2])
sleep(6)
state = runtime.get_state(pipeline["id"])
assert state.name == states.SUSPENDED
assert_all_finish(finished)
not_execute = [cg_1.id, cg_2.id, end.id]
not_execute.extend([a.id for a in acts_group_1])
not_execute.extend([a.id for a in acts_group_2])
assert_not_executed(not_execute)
engine.resume_pipeline(pipeline["id"])
sleep(2)
finished.extend(not_execute)
finished.append(pipeline["id"])
assert_all_finish(finished)
def test_pause_and_resume_pipeline_with_nest_parallel_early_resume():
parallel_count = 5
start = EmptyStartEvent()
pg_1 = ParallelGateway()
pg_2 = ParallelGateway()
sleep_group_1 = []
for _ in range(parallel_count):
act = ServiceActivity(component_code="sleep_timer")
act.component.inputs.bk_timing = Var(type=Var.PLAIN, value=5)
sleep_group_1.append(act)
sleep_group_2 = []
for _ in range(parallel_count):
act = ServiceActivity(component_code="sleep_timer")
act.component.inputs.bk_timing = Var(type=Var.PLAIN, value=5)
sleep_group_2.append(act)
acts_group_1 = [ServiceActivity(component_code="debug_node") for _ in range(parallel_count)]
acts_group_2 = [ServiceActivity(component_code="debug_node") for _ in range(parallel_count)]
cg_1 = ConvergeGateway()
cg_2 = ConvergeGateway()
end = EmptyEndEvent()
for i in range(parallel_count):
sleep_group_1[i].connect(acts_group_1[i])
sleep_group_2[i].connect(acts_group_2[i])
start.extend(pg_1).connect(pg_2, *sleep_group_1).to(pg_2).connect(*sleep_group_2).converge(cg_1).to(pg_1).converge(
cg_2
).extend(end)
pipeline = build_tree(start)
runtime = BambooDjangoRuntime()
engine = Engine(runtime)
engine.run_pipeline(pipeline=pipeline, root_pipeline_data={})
sleep(2)
engine.pause_pipeline(pipeline["id"])
state = runtime.get_state(pipeline["id"])
assert state.name == states.SUSPENDED
engine.resume_pipeline(pipeline["id"])
finished = [start.id, pg_1.id, pg_2.id]
finished.extend([a.id for a in sleep_group_1])
finished.extend([a.id for a in sleep_group_2])
finished.extend([cg_1.id, cg_2.id, end.id])
finished.extend([a.id for a in acts_group_1])
finished.extend([a.id for a in acts_group_2])
finished.append(pipeline["id"])
sleep(10)
assert_all_finish(finished)
def test_pause_and_resume_pipeline_with_subprocess():
subproc_start = EmptyStartEvent()
subproc_act = ServiceActivity(component_code="sleep_timer")
subproc_act.component.inputs.bk_timing = Var(type=Var.PLAIN, value=3)
subproc_end = EmptyEndEvent()
subproc_start.extend(subproc_act).extend(subproc_end)
start = EmptyStartEvent()
subproc = SubProcess(start=subproc_start)
end = EmptyEndEvent()
start.extend(subproc).extend(end)
pipeline = build_tree(start)
runtime = BambooDjangoRuntime()
engine = Engine(runtime)
engine.run_pipeline(pipeline=pipeline, root_pipeline_data={})
sleep(2)
engine.pause_pipeline(pipeline["id"])
sleep(6)
assert_all_finish([start.id, subproc_start.id, subproc_act.id])
state = runtime.get_state(pipeline["id"])
assert state.name == states.SUSPENDED
assert_not_executed([subproc_end.id, end.id])
assert_all_running([subproc.id])
engine.resume_pipeline(pipeline["id"])
sleep(2)
assert_all_finish([pipeline["id"], start.id, subproc.id, end.id, subproc_start.id, subproc_act.id, subproc_end.id])
def test_pause_and_resume_pipeline_with_subprocess_has_parallel():
parallel_count = 5
start = EmptyStartEvent()
pg_1 = ParallelGateway()
pg_2 = ParallelGateway()
sleep_group_1 = []
for _ in range(parallel_count):
act = ServiceActivity(component_code="sleep_timer")
act.component.inputs.bk_timing = Var(type=Var.PLAIN, value=3)
sleep_group_1.append(act)
sleep_group_2 = []
for _ in range(parallel_count):
act = ServiceActivity(component_code="sleep_timer")
act.component.inputs.bk_timing = Var(type=Var.PLAIN, value=3)
sleep_group_2.append(act)
acts_group_1 = [ServiceActivity(component_code="debug_node") for _ in range(parallel_count)]
acts_group_2 = [ServiceActivity(component_code="debug_node") for _ in range(parallel_count)]
cg_1 = ConvergeGateway()
cg_2 = ConvergeGateway()
end = EmptyEndEvent()
for i in range(parallel_count):
sleep_group_1[i].connect(acts_group_1[i])
sleep_group_2[i].connect(acts_group_2[i])
start.extend(pg_1).connect(pg_2, *sleep_group_1).to(pg_2).connect(*sleep_group_2).converge(cg_1).to(pg_1).converge(
cg_2
).extend(end)
parent_start = EmptyStartEvent()
subproc = SubProcess(start=start)
parent_end = EmptyEndEvent()
parent_start.extend(subproc).extend(parent_end)
pipeline = build_tree(parent_start)
runtime = BambooDjangoRuntime()
engine = Engine(runtime)
engine.run_pipeline(pipeline=pipeline, root_pipeline_data={})
sleep(2)
engine.pause_pipeline(pipeline["id"])
sleep(10)
finished = [parent_start.id, start.id, pg_1.id, pg_2.id]
finished.extend([a.id for a in sleep_group_1])
finished.extend([a.id for a in sleep_group_2])
state = runtime.get_state(pipeline["id"])
assert state.name == states.SUSPENDED
assert_all_finish(finished)
assert_all_running([subproc.id])
not_execute = [cg_1.id, cg_2.id, end.id, parent_end.id]
not_execute.extend([a.id for a in acts_group_1])
not_execute.extend([a.id for a in acts_group_2])
assert_not_executed(not_execute)
engine.resume_pipeline(pipeline["id"])
sleep(2)
finished.extend(not_execute)
finished.append(pipeline["id"])
assert_all_finish(finished)
| 30.584677
| 119
| 0.704417
| 1,059
| 7,585
| 4.746931
| 0.083097
| 0.059678
| 0.032823
| 0.059678
| 0.882435
| 0.853392
| 0.845236
| 0.827332
| 0.813805
| 0.790929
| 0
| 0.018269
| 0.177324
| 7,585
| 247
| 120
| 30.708502
| 0.78734
| 0.004087
| 0
| 0.833333
| 0
| 0
| 0.022384
| 0
| 0
| 0
| 0
| 0
| 0.091954
| 1
| 0.022989
| false
| 0
| 0.022989
| 0
| 0.045977
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d1734dc4e60e18781b20c52c259f749eeadb5f14
| 105
|
py
|
Python
|
Utils/Pruebas-Pytorch/pytorch_utils/__init__.py
|
LautaroEst/cs231n-Computer-Vision
|
f353a3d376d452dd1f9ef8979ddc96de91a45605
|
[
"MIT"
] | null | null | null |
Utils/Pruebas-Pytorch/pytorch_utils/__init__.py
|
LautaroEst/cs231n-Computer-Vision
|
f353a3d376d452dd1f9ef8979ddc96de91a45605
|
[
"MIT"
] | null | null | null |
Utils/Pruebas-Pytorch/pytorch_utils/__init__.py
|
LautaroEst/cs231n-Computer-Vision
|
f353a3d376d452dd1f9ef8979ddc96de91a45605
|
[
"MIT"
] | null | null | null |
from pytorch_utils.Datasets import *
from pytorch_utils.utils import *
from pytorch_utils.Models import *
| 35
| 36
| 0.838095
| 15
| 105
| 5.666667
| 0.4
| 0.388235
| 0.564706
| 0.517647
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.104762
| 105
| 3
| 37
| 35
| 0.904255
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
66f82528fdff5e646cf424e9f8f67fda8a16c534
| 176,876
|
py
|
Python
|
fairdiplomacy/models/consts.py
|
RomanGaraev/diplomacy_searchbot
|
bf3f38e5a68bfd3c6fa58e47351fcae3eed88557
|
[
"MIT"
] | 32
|
2021-05-04T17:05:19.000Z
|
2022-03-21T07:56:53.000Z
|
fairdiplomacy/models/consts.py
|
RomanGaraev/diplomacy_searchbot
|
bf3f38e5a68bfd3c6fa58e47351fcae3eed88557
|
[
"MIT"
] | 3
|
2022-01-22T19:44:10.000Z
|
2022-03-02T23:20:52.000Z
|
fairdiplomacy/models/consts.py
|
facebookresearch/diplomacy_searchbot
|
44d6f3272be7567060ba7d0e41f4e44b1bb8b5ca
|
[
"MIT"
] | 10
|
2021-05-07T11:51:29.000Z
|
2022-02-18T18:29:57.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import List
import numpy as np
from .preprocess_adjacency import preprocess_adjacency
LOCS = [
"YOR",
"EDI",
"LON",
"LVP",
"NTH",
"WAL",
"CLY",
"NWG",
"ENG",
"IRI",
"NAO",
"BEL",
"DEN",
"HEL",
"HOL",
"NWY",
"SKA",
"BAR",
"BRE",
"MAO",
"PIC",
"BUR",
"RUH",
"BAL",
"KIE",
"SWE",
"FIN",
"STP",
"STP/NC",
"GAS",
"PAR",
"NAF",
"POR",
"SPA",
"SPA/NC",
"SPA/SC",
"WES",
"MAR",
"MUN",
"BER",
"BOT",
"LVN",
"PRU",
"STP/SC",
"MOS",
"TUN",
"LYO",
"TYS",
"PIE",
"BOH",
"SIL",
"TYR",
"WAR",
"SEV",
"UKR",
"ION",
"TUS",
"NAP",
"ROM",
"VEN",
"GAL",
"VIE",
"TRI",
"ARM",
"BLA",
"RUM",
"ADR",
"AEG",
"ALB",
"APU",
"EAS",
"GRE",
"BUD",
"SER",
"ANK",
"SMY",
"SYR",
"BUL",
"BUL/EC",
"CON",
"BUL/SC",
]
LOC_TYPES = {
"ADR": "WATER",
"AEG": "WATER",
"ALB": "COAST",
"ANK": "COAST",
"APU": "COAST",
"ARM": "COAST",
"BAL": "WATER",
"BAR": "WATER",
"BEL": "COAST",
"BER": "COAST",
"BLA": "WATER",
"BOH": "LAND",
"BOT": "WATER",
"BRE": "COAST",
"BUD": "LAND",
"BUL/EC": "COAST",
"BUL/SC": "COAST",
"BUL": "COAST",
"BUR": "LAND",
"CLY": "COAST",
"CON": "COAST",
"DEN": "COAST",
"EAS": "WATER",
"EDI": "COAST",
"ENG": "WATER",
"FIN": "COAST",
"GAL": "LAND",
"GAS": "COAST",
"GRE": "COAST",
"HEL": "WATER",
"HOL": "COAST",
"ION": "WATER",
"IRI": "WATER",
"KIE": "COAST",
"LON": "COAST",
"LVN": "COAST",
"LVP": "COAST",
"LYO": "WATER",
"MAO": "WATER",
"MAR": "COAST",
"MOS": "LAND",
"MUN": "LAND",
"NAF": "COAST",
"NAO": "WATER",
"NAP": "COAST",
"NWY": "COAST",
"NTH": "WATER",
"NWG": "WATER",
"PAR": "LAND",
"PIC": "COAST",
"PIE": "COAST",
"POR": "COAST",
"PRU": "COAST",
"ROM": "COAST",
"RUH": "LAND",
"RUM": "COAST",
"SER": "LAND",
"SEV": "COAST",
"SIL": "LAND",
"SKA": "WATER",
"SMY": "COAST",
"SPA/NC": "COAST",
"SPA/SC": "COAST",
"SPA": "COAST",
"STP/NC": "COAST",
"STP/SC": "COAST",
"STP": "COAST",
"SWE": "COAST",
"SYR": "COAST",
"TRI": "COAST",
"TUN": "COAST",
"TUS": "COAST",
"TYR": "LAND",
"TYS": "WATER",
"UKR": "LAND",
"VEN": "COAST",
"VIE": "LAND",
"WAL": "COAST",
"WAR": "LAND",
"WES": "WATER",
"YOR": "COAST",
"SWI": "SHUT",
}
POWERS = ["AUSTRIA", "ENGLAND", "FRANCE", "GERMANY", "ITALY", "RUSSIA", "TURKEY"]
POWER2IDX = {v: k for k, v in enumerate(POWERS)}
SEASONS = ["SPRING", "FALL", "WINTER"]
MAX_SEQ_LEN = 17 # can't have 18 orders in one phase or you've already won
N_SCS = 34 # number of supply centers
RAW_ADJACENCY_MATRIX = [
[
0,
1,
1,
1,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
1,
0,
0,
1,
1,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
1,
0,
0,
0,
1,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
1,
1,
0,
0,
0,
1,
1,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
1,
1,
1,
0,
0,
0,
0,
1,
1,
0,
0,
1,
1,
1,
1,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
1,
0,
1,
1,
0,
0,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
1,
0,
1,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
1,
0,
0,
1,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
1,
0,
1,
1,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
1,
0,
1,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
1,
0,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
1,
0,
1,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
1,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
1,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
1,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
0,
1,
1,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
0,
1,
0,
0,
0,
1,
1,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
1,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
1,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
1,
0,
0,
1,
1,
0,
0,
0,
0,
1,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
1,
0,
1,
0,
0,
1,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
1,
1,
1,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
1,
0,
1,
1,
0,
1,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
1,
0,
],
]
ADJACENCY_MATRIX = preprocess_adjacency(np.array(RAW_ADJACENCY_MATRIX))
MASTER_ALIGNMENTS = np.array(
[
[
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
1,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
1,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
1,
],
]
)
COASTAL_HOME_SCS = [
"TRI",
"EDI",
"LVP",
"LON",
"BRE",
"MAR",
"BER",
"KIE",
"NAP",
"ROM",
"VEN",
"SEV",
"STP",
"STP/NC",
"STP/SC",
"ANK",
"CON",
"SMY",
]
LOGIT_MASK_VAL = -1e8
| 12.949411
| 81
| 0.082826
| 13,513
| 176,876
| 1.082883
| 0.01258
| 1.66227
| 2.42042
| 3.140299
| 0.89674
| 0.89674
| 0.896672
| 0.896672
| 0.896672
| 0.896672
| 0
| 0.45054
| 0.835224
| 176,876
| 13,658
| 82
| 12.950359
| 0.051535
| 0.001408
| 0
| 0.975896
| 0
| 0
| 0.005888
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.00022
| 0
| 0.00022
| 0
| 0
| 0
| 1
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 14
|
0f20dee66bf89e03b7bc9335bb238009174bb09d
| 55,780
|
py
|
Python
|
influxdb_client/service/checks_service.py
|
bonitoo-io/influxdb-client-python
|
465476b33648ba399a8f3e13d8780f7b3fe51950
|
[
"MIT"
] | 1
|
2019-09-06T10:06:09.000Z
|
2019-09-06T10:06:09.000Z
|
influxdb_client/service/checks_service.py
|
bonitoo-io/influxdb-client-python
|
465476b33648ba399a8f3e13d8780f7b3fe51950
|
[
"MIT"
] | 5
|
2019-08-06T04:58:58.000Z
|
2019-09-05T09:09:40.000Z
|
influxdb_client/service/checks_service.py
|
bonitoo-io/influxdb-client-python
|
465476b33648ba399a8f3e13d8780f7b3fe51950
|
[
"MIT"
] | 1
|
2019-08-05T05:46:55.000Z
|
2019-08-05T05:46:55.000Z
|
# coding: utf-8
"""
InfluxDB OSS API Service.
The InfluxDB v2 API provides a programmatic interface for all interactions with InfluxDB. Access the InfluxDB API using the `/api/v2/` endpoint. # noqa: E501
OpenAPI spec version: 2.0.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
from influxdb_client.service._base_service import _BaseService
class ChecksService(_BaseService):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None): # noqa: E501,D401,D403
"""ChecksService - a operation defined in OpenAPI."""
if api_client is None:
raise ValueError("Invalid value for `api_client`, must be defined.")
self.api_client = api_client
def create_check(self, post_check, **kwargs): # noqa: E501,D401,D403
"""Add new check.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_check(post_check, async_req=True)
>>> result = thread.get()
:param async_req bool
:param PostCheck post_check: Check to create (required)
:return: Check
If the method is called asynchronously,
returns the request thread.
""" # noqa: E501
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_check_with_http_info(post_check, **kwargs) # noqa: E501
else:
(data) = self.create_check_with_http_info(post_check, **kwargs) # noqa: E501
return data
def create_check_with_http_info(self, post_check, **kwargs): # noqa: E501,D401,D403
"""Add new check.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_check_with_http_info(post_check, async_req=True)
>>> result = thread.get()
:param async_req bool
:param PostCheck post_check: Check to create (required)
:return: Check
If the method is called asynchronously,
returns the request thread.
""" # noqa: E501
local_var_params, path_params, query_params, header_params, body_params = \
self._create_check_prepare(post_check, **kwargs)
return self.api_client.call_api(
'/api/v2/checks', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=[],
files={},
response_type='Check', # noqa: E501
auth_settings=[],
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats={},
urlopen_kw=kwargs.get('urlopen_kw', None))
async def create_check_async(self, post_check, **kwargs): # noqa: E501,D401,D403
"""Add new check.
This method makes an asynchronous HTTP request.
:param async_req bool
:param PostCheck post_check: Check to create (required)
:return: Check
If the method is called asynchronously,
returns the request thread.
""" # noqa: E501
local_var_params, path_params, query_params, header_params, body_params = \
self._create_check_prepare(post_check, **kwargs)
return await self.api_client.call_api(
'/api/v2/checks', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=[],
files={},
response_type='Check', # noqa: E501
auth_settings=[],
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats={},
urlopen_kw=kwargs.get('urlopen_kw', None))
def _create_check_prepare(self, post_check, **kwargs): # noqa: E501,D401,D403
local_var_params = locals()
all_params = ['post_check'] # noqa: E501
self._check_operation_params('create_check', all_params, local_var_params)
# verify the required parameter 'post_check' is set
if ('post_check' not in local_var_params or
local_var_params['post_check'] is None):
raise ValueError("Missing the required parameter `post_check` when calling `create_check`") # noqa: E501
path_params = {}
query_params = []
header_params = {}
body_params = None
if 'post_check' in local_var_params:
body_params = local_var_params['post_check']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
return local_var_params, path_params, query_params, header_params, body_params
def delete_checks_id(self, check_id, **kwargs): # noqa: E501,D401,D403
"""Delete a check.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_checks_id(check_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str check_id: The check ID. (required)
:param str zap_trace_span: OpenTracing span context
:return: None
If the method is called asynchronously,
returns the request thread.
""" # noqa: E501
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_checks_id_with_http_info(check_id, **kwargs) # noqa: E501
else:
(data) = self.delete_checks_id_with_http_info(check_id, **kwargs) # noqa: E501
return data
def delete_checks_id_with_http_info(self, check_id, **kwargs): # noqa: E501,D401,D403
"""Delete a check.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_checks_id_with_http_info(check_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str check_id: The check ID. (required)
:param str zap_trace_span: OpenTracing span context
:return: None
If the method is called asynchronously,
returns the request thread.
""" # noqa: E501
local_var_params, path_params, query_params, header_params, body_params = \
self._delete_checks_id_prepare(check_id, **kwargs)
return self.api_client.call_api(
'/api/v2/checks/{checkID}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=[],
files={},
response_type=None, # noqa: E501
auth_settings=[],
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats={},
urlopen_kw=kwargs.get('urlopen_kw', None))
async def delete_checks_id_async(self, check_id, **kwargs): # noqa: E501,D401,D403
"""Delete a check.
This method makes an asynchronous HTTP request.
:param async_req bool
:param str check_id: The check ID. (required)
:param str zap_trace_span: OpenTracing span context
:return: None
If the method is called asynchronously,
returns the request thread.
""" # noqa: E501
local_var_params, path_params, query_params, header_params, body_params = \
self._delete_checks_id_prepare(check_id, **kwargs)
return await self.api_client.call_api(
'/api/v2/checks/{checkID}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=[],
files={},
response_type=None, # noqa: E501
auth_settings=[],
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats={},
urlopen_kw=kwargs.get('urlopen_kw', None))
def _delete_checks_id_prepare(self, check_id, **kwargs): # noqa: E501,D401,D403
local_var_params = locals()
all_params = ['check_id', 'zap_trace_span'] # noqa: E501
self._check_operation_params('delete_checks_id', all_params, local_var_params)
# verify the required parameter 'check_id' is set
if ('check_id' not in local_var_params or
local_var_params['check_id'] is None):
raise ValueError("Missing the required parameter `check_id` when calling `delete_checks_id`") # noqa: E501
path_params = {}
if 'check_id' in local_var_params:
path_params['checkID'] = local_var_params['check_id'] # noqa: E501
query_params = []
header_params = {}
if 'zap_trace_span' in local_var_params:
header_params['Zap-Trace-Span'] = local_var_params['zap_trace_span'] # noqa: E501
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
return local_var_params, path_params, query_params, header_params, body_params
def delete_checks_id_labels_id(self, check_id, label_id, **kwargs): # noqa: E501,D401,D403
"""Delete label from a check.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_checks_id_labels_id(check_id, label_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str check_id: The check ID. (required)
:param str label_id: The ID of the label to delete. (required)
:param str zap_trace_span: OpenTracing span context
:return: None
If the method is called asynchronously,
returns the request thread.
""" # noqa: E501
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_checks_id_labels_id_with_http_info(check_id, label_id, **kwargs) # noqa: E501
else:
(data) = self.delete_checks_id_labels_id_with_http_info(check_id, label_id, **kwargs) # noqa: E501
return data
def delete_checks_id_labels_id_with_http_info(self, check_id, label_id, **kwargs): # noqa: E501,D401,D403
"""Delete label from a check.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_checks_id_labels_id_with_http_info(check_id, label_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str check_id: The check ID. (required)
:param str label_id: The ID of the label to delete. (required)
:param str zap_trace_span: OpenTracing span context
:return: None
If the method is called asynchronously,
returns the request thread.
""" # noqa: E501
local_var_params, path_params, query_params, header_params, body_params = \
self._delete_checks_id_labels_id_prepare(check_id, label_id, **kwargs)
return self.api_client.call_api(
'/api/v2/checks/{checkID}/labels/{labelID}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=[],
files={},
response_type=None, # noqa: E501
auth_settings=[],
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats={},
urlopen_kw=kwargs.get('urlopen_kw', None))
async def delete_checks_id_labels_id_async(self, check_id, label_id, **kwargs): # noqa: E501,D401,D403
"""Delete label from a check.
This method makes an asynchronous HTTP request.
:param async_req bool
:param str check_id: The check ID. (required)
:param str label_id: The ID of the label to delete. (required)
:param str zap_trace_span: OpenTracing span context
:return: None
If the method is called asynchronously,
returns the request thread.
""" # noqa: E501
local_var_params, path_params, query_params, header_params, body_params = \
self._delete_checks_id_labels_id_prepare(check_id, label_id, **kwargs)
return await self.api_client.call_api(
'/api/v2/checks/{checkID}/labels/{labelID}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=[],
files={},
response_type=None, # noqa: E501
auth_settings=[],
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats={},
urlopen_kw=kwargs.get('urlopen_kw', None))
def _delete_checks_id_labels_id_prepare(self, check_id, label_id, **kwargs): # noqa: E501,D401,D403
local_var_params = locals()
all_params = ['check_id', 'label_id', 'zap_trace_span'] # noqa: E501
self._check_operation_params('delete_checks_id_labels_id', all_params, local_var_params)
# verify the required parameter 'check_id' is set
if ('check_id' not in local_var_params or
local_var_params['check_id'] is None):
raise ValueError("Missing the required parameter `check_id` when calling `delete_checks_id_labels_id`") # noqa: E501
# verify the required parameter 'label_id' is set
if ('label_id' not in local_var_params or
local_var_params['label_id'] is None):
raise ValueError("Missing the required parameter `label_id` when calling `delete_checks_id_labels_id`") # noqa: E501
path_params = {}
if 'check_id' in local_var_params:
path_params['checkID'] = local_var_params['check_id'] # noqa: E501
if 'label_id' in local_var_params:
path_params['labelID'] = local_var_params['label_id'] # noqa: E501
query_params = []
header_params = {}
if 'zap_trace_span' in local_var_params:
header_params['Zap-Trace-Span'] = local_var_params['zap_trace_span'] # noqa: E501
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
return local_var_params, path_params, query_params, header_params, body_params
def get_checks(self, org_id, **kwargs): # noqa: E501,D401,D403
"""List all checks.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_checks(org_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str org_id: Only show checks that belong to a specific organization ID. (required)
:param str zap_trace_span: OpenTracing span context
:param int offset:
:param int limit:
:return: Checks
If the method is called asynchronously,
returns the request thread.
""" # noqa: E501
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_checks_with_http_info(org_id, **kwargs) # noqa: E501
else:
(data) = self.get_checks_with_http_info(org_id, **kwargs) # noqa: E501
return data
def get_checks_with_http_info(self, org_id, **kwargs): # noqa: E501,D401,D403
"""List all checks.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_checks_with_http_info(org_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str org_id: Only show checks that belong to a specific organization ID. (required)
:param str zap_trace_span: OpenTracing span context
:param int offset:
:param int limit:
:return: Checks
If the method is called asynchronously,
returns the request thread.
""" # noqa: E501
local_var_params, path_params, query_params, header_params, body_params = \
self._get_checks_prepare(org_id, **kwargs)
return self.api_client.call_api(
'/api/v2/checks', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=[],
files={},
response_type='Checks', # noqa: E501
auth_settings=[],
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats={},
urlopen_kw=kwargs.get('urlopen_kw', None))
async def get_checks_async(self, org_id, **kwargs): # noqa: E501,D401,D403
"""List all checks.
This method makes an asynchronous HTTP request.
:param async_req bool
:param str org_id: Only show checks that belong to a specific organization ID. (required)
:param str zap_trace_span: OpenTracing span context
:param int offset:
:param int limit:
:return: Checks
If the method is called asynchronously,
returns the request thread.
""" # noqa: E501
local_var_params, path_params, query_params, header_params, body_params = \
self._get_checks_prepare(org_id, **kwargs)
return await self.api_client.call_api(
'/api/v2/checks', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=[],
files={},
response_type='Checks', # noqa: E501
auth_settings=[],
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats={},
urlopen_kw=kwargs.get('urlopen_kw', None))
def _get_checks_prepare(self, org_id, **kwargs): # noqa: E501,D401,D403
local_var_params = locals()
all_params = ['org_id', 'zap_trace_span', 'offset', 'limit'] # noqa: E501
self._check_operation_params('get_checks', all_params, local_var_params)
# verify the required parameter 'org_id' is set
if ('org_id' not in local_var_params or
local_var_params['org_id'] is None):
raise ValueError("Missing the required parameter `org_id` when calling `get_checks`") # noqa: E501
if 'offset' in local_var_params and local_var_params['offset'] < 0: # noqa: E501
raise ValueError("Invalid value for parameter `offset` when calling `get_checks`, must be a value greater than or equal to `0`") # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] > 100: # noqa: E501
raise ValueError("Invalid value for parameter `limit` when calling `get_checks`, must be a value less than or equal to `100`") # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `limit` when calling `get_checks`, must be a value greater than or equal to `1`") # noqa: E501
path_params = {}
query_params = []
if 'offset' in local_var_params:
query_params.append(('offset', local_var_params['offset'])) # noqa: E501
if 'limit' in local_var_params:
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'org_id' in local_var_params:
query_params.append(('orgID', local_var_params['org_id'])) # noqa: E501
header_params = {}
if 'zap_trace_span' in local_var_params:
header_params['Zap-Trace-Span'] = local_var_params['zap_trace_span'] # noqa: E501
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
return local_var_params, path_params, query_params, header_params, body_params
def get_checks_id(self, check_id, **kwargs): # noqa: E501,D401,D403
"""Retrieve a check.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_checks_id(check_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str check_id: The check ID. (required)
:param str zap_trace_span: OpenTracing span context
:return: Check
If the method is called asynchronously,
returns the request thread.
""" # noqa: E501
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_checks_id_with_http_info(check_id, **kwargs) # noqa: E501
else:
(data) = self.get_checks_id_with_http_info(check_id, **kwargs) # noqa: E501
return data
def get_checks_id_with_http_info(self, check_id, **kwargs): # noqa: E501,D401,D403
"""Retrieve a check.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_checks_id_with_http_info(check_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str check_id: The check ID. (required)
:param str zap_trace_span: OpenTracing span context
:return: Check
If the method is called asynchronously,
returns the request thread.
""" # noqa: E501
local_var_params, path_params, query_params, header_params, body_params = \
self._get_checks_id_prepare(check_id, **kwargs)
return self.api_client.call_api(
'/api/v2/checks/{checkID}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=[],
files={},
response_type='Check', # noqa: E501
auth_settings=[],
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats={},
urlopen_kw=kwargs.get('urlopen_kw', None))
async def get_checks_id_async(self, check_id, **kwargs): # noqa: E501,D401,D403
"""Retrieve a check.
This method makes an asynchronous HTTP request.
:param async_req bool
:param str check_id: The check ID. (required)
:param str zap_trace_span: OpenTracing span context
:return: Check
If the method is called asynchronously,
returns the request thread.
""" # noqa: E501
local_var_params, path_params, query_params, header_params, body_params = \
self._get_checks_id_prepare(check_id, **kwargs)
return await self.api_client.call_api(
'/api/v2/checks/{checkID}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=[],
files={},
response_type='Check', # noqa: E501
auth_settings=[],
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats={},
urlopen_kw=kwargs.get('urlopen_kw', None))
def _get_checks_id_prepare(self, check_id, **kwargs): # noqa: E501,D401,D403
local_var_params = locals()
all_params = ['check_id', 'zap_trace_span'] # noqa: E501
self._check_operation_params('get_checks_id', all_params, local_var_params)
# verify the required parameter 'check_id' is set
if ('check_id' not in local_var_params or
local_var_params['check_id'] is None):
raise ValueError("Missing the required parameter `check_id` when calling `get_checks_id`") # noqa: E501
path_params = {}
if 'check_id' in local_var_params:
path_params['checkID'] = local_var_params['check_id'] # noqa: E501
query_params = []
header_params = {}
if 'zap_trace_span' in local_var_params:
header_params['Zap-Trace-Span'] = local_var_params['zap_trace_span'] # noqa: E501
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
return local_var_params, path_params, query_params, header_params, body_params
def get_checks_id_labels(self, check_id, **kwargs): # noqa: E501,D401,D403
"""List all labels for a check.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_checks_id_labels(check_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str check_id: The check ID. (required)
:param str zap_trace_span: OpenTracing span context
:return: LabelsResponse
If the method is called asynchronously,
returns the request thread.
""" # noqa: E501
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_checks_id_labels_with_http_info(check_id, **kwargs) # noqa: E501
else:
(data) = self.get_checks_id_labels_with_http_info(check_id, **kwargs) # noqa: E501
return data
def get_checks_id_labels_with_http_info(self, check_id, **kwargs): # noqa: E501,D401,D403
"""List all labels for a check.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_checks_id_labels_with_http_info(check_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str check_id: The check ID. (required)
:param str zap_trace_span: OpenTracing span context
:return: LabelsResponse
If the method is called asynchronously,
returns the request thread.
""" # noqa: E501
local_var_params, path_params, query_params, header_params, body_params = \
self._get_checks_id_labels_prepare(check_id, **kwargs)
return self.api_client.call_api(
'/api/v2/checks/{checkID}/labels', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=[],
files={},
response_type='LabelsResponse', # noqa: E501
auth_settings=[],
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats={},
urlopen_kw=kwargs.get('urlopen_kw', None))
async def get_checks_id_labels_async(self, check_id, **kwargs): # noqa: E501,D401,D403
"""List all labels for a check.
This method makes an asynchronous HTTP request.
:param async_req bool
:param str check_id: The check ID. (required)
:param str zap_trace_span: OpenTracing span context
:return: LabelsResponse
If the method is called asynchronously,
returns the request thread.
""" # noqa: E501
local_var_params, path_params, query_params, header_params, body_params = \
self._get_checks_id_labels_prepare(check_id, **kwargs)
return await self.api_client.call_api(
'/api/v2/checks/{checkID}/labels', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=[],
files={},
response_type='LabelsResponse', # noqa: E501
auth_settings=[],
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats={},
urlopen_kw=kwargs.get('urlopen_kw', None))
def _get_checks_id_labels_prepare(self, check_id, **kwargs): # noqa: E501,D401,D403
local_var_params = locals()
all_params = ['check_id', 'zap_trace_span'] # noqa: E501
self._check_operation_params('get_checks_id_labels', all_params, local_var_params)
# verify the required parameter 'check_id' is set
if ('check_id' not in local_var_params or
local_var_params['check_id'] is None):
raise ValueError("Missing the required parameter `check_id` when calling `get_checks_id_labels`") # noqa: E501
path_params = {}
if 'check_id' in local_var_params:
path_params['checkID'] = local_var_params['check_id'] # noqa: E501
query_params = []
header_params = {}
if 'zap_trace_span' in local_var_params:
header_params['Zap-Trace-Span'] = local_var_params['zap_trace_span'] # noqa: E501
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
return local_var_params, path_params, query_params, header_params, body_params
def get_checks_id_query(self, check_id, **kwargs): # noqa: E501,D401,D403
"""Retrieve a check query.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_checks_id_query(check_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str check_id: The check ID. (required)
:param str zap_trace_span: OpenTracing span context
:return: FluxResponse
If the method is called asynchronously,
returns the request thread.
""" # noqa: E501
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_checks_id_query_with_http_info(check_id, **kwargs) # noqa: E501
else:
(data) = self.get_checks_id_query_with_http_info(check_id, **kwargs) # noqa: E501
return data
def get_checks_id_query_with_http_info(self, check_id, **kwargs): # noqa: E501,D401,D403
"""Retrieve a check query.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_checks_id_query_with_http_info(check_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str check_id: The check ID. (required)
:param str zap_trace_span: OpenTracing span context
:return: FluxResponse
If the method is called asynchronously,
returns the request thread.
""" # noqa: E501
local_var_params, path_params, query_params, header_params, body_params = \
self._get_checks_id_query_prepare(check_id, **kwargs)
return self.api_client.call_api(
'/api/v2/checks/{checkID}/query', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=[],
files={},
response_type='FluxResponse', # noqa: E501
auth_settings=[],
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats={},
urlopen_kw=kwargs.get('urlopen_kw', None))
async def get_checks_id_query_async(self, check_id, **kwargs): # noqa: E501,D401,D403
"""Retrieve a check query.
This method makes an asynchronous HTTP request.
:param async_req bool
:param str check_id: The check ID. (required)
:param str zap_trace_span: OpenTracing span context
:return: FluxResponse
If the method is called asynchronously,
returns the request thread.
""" # noqa: E501
local_var_params, path_params, query_params, header_params, body_params = \
self._get_checks_id_query_prepare(check_id, **kwargs)
return await self.api_client.call_api(
'/api/v2/checks/{checkID}/query', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=[],
files={},
response_type='FluxResponse', # noqa: E501
auth_settings=[],
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats={},
urlopen_kw=kwargs.get('urlopen_kw', None))
def _get_checks_id_query_prepare(self, check_id, **kwargs): # noqa: E501,D401,D403
local_var_params = locals()
all_params = ['check_id', 'zap_trace_span'] # noqa: E501
self._check_operation_params('get_checks_id_query', all_params, local_var_params)
# verify the required parameter 'check_id' is set
if ('check_id' not in local_var_params or
local_var_params['check_id'] is None):
raise ValueError("Missing the required parameter `check_id` when calling `get_checks_id_query`") # noqa: E501
path_params = {}
if 'check_id' in local_var_params:
path_params['checkID'] = local_var_params['check_id'] # noqa: E501
query_params = []
header_params = {}
if 'zap_trace_span' in local_var_params:
header_params['Zap-Trace-Span'] = local_var_params['zap_trace_span'] # noqa: E501
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
return local_var_params, path_params, query_params, header_params, body_params
def patch_checks_id(self, check_id, check_patch, **kwargs): # noqa: E501,D401,D403
"""Update a check.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_checks_id(check_id, check_patch, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str check_id: The check ID. (required)
:param CheckPatch check_patch: Check update to apply (required)
:param str zap_trace_span: OpenTracing span context
:return: Check
If the method is called asynchronously,
returns the request thread.
""" # noqa: E501
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_checks_id_with_http_info(check_id, check_patch, **kwargs) # noqa: E501
else:
(data) = self.patch_checks_id_with_http_info(check_id, check_patch, **kwargs) # noqa: E501
return data
def patch_checks_id_with_http_info(self, check_id, check_patch, **kwargs): # noqa: E501,D401,D403
"""Update a check.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_checks_id_with_http_info(check_id, check_patch, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str check_id: The check ID. (required)
:param CheckPatch check_patch: Check update to apply (required)
:param str zap_trace_span: OpenTracing span context
:return: Check
If the method is called asynchronously,
returns the request thread.
""" # noqa: E501
local_var_params, path_params, query_params, header_params, body_params = \
self._patch_checks_id_prepare(check_id, check_patch, **kwargs)
return self.api_client.call_api(
'/api/v2/checks/{checkID}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=[],
files={},
response_type='Check', # noqa: E501
auth_settings=[],
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats={},
urlopen_kw=kwargs.get('urlopen_kw', None))
async def patch_checks_id_async(self, check_id, check_patch, **kwargs): # noqa: E501,D401,D403
"""Update a check.
This method makes an asynchronous HTTP request.
:param async_req bool
:param str check_id: The check ID. (required)
:param CheckPatch check_patch: Check update to apply (required)
:param str zap_trace_span: OpenTracing span context
:return: Check
If the method is called asynchronously,
returns the request thread.
""" # noqa: E501
local_var_params, path_params, query_params, header_params, body_params = \
self._patch_checks_id_prepare(check_id, check_patch, **kwargs)
return await self.api_client.call_api(
'/api/v2/checks/{checkID}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=[],
files={},
response_type='Check', # noqa: E501
auth_settings=[],
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats={},
urlopen_kw=kwargs.get('urlopen_kw', None))
def _patch_checks_id_prepare(self, check_id, check_patch, **kwargs): # noqa: E501,D401,D403
local_var_params = locals()
all_params = ['check_id', 'check_patch', 'zap_trace_span'] # noqa: E501
self._check_operation_params('patch_checks_id', all_params, local_var_params)
# verify the required parameter 'check_id' is set
if ('check_id' not in local_var_params or
local_var_params['check_id'] is None):
raise ValueError("Missing the required parameter `check_id` when calling `patch_checks_id`") # noqa: E501
# verify the required parameter 'check_patch' is set
if ('check_patch' not in local_var_params or
local_var_params['check_patch'] is None):
raise ValueError("Missing the required parameter `check_patch` when calling `patch_checks_id`") # noqa: E501
path_params = {}
if 'check_id' in local_var_params:
path_params['checkID'] = local_var_params['check_id'] # noqa: E501
query_params = []
header_params = {}
if 'zap_trace_span' in local_var_params:
header_params['Zap-Trace-Span'] = local_var_params['zap_trace_span'] # noqa: E501
body_params = None
if 'check_patch' in local_var_params:
body_params = local_var_params['check_patch']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
return local_var_params, path_params, query_params, header_params, body_params
def post_checks_id_labels(self, check_id, label_mapping, **kwargs): # noqa: E501,D401,D403
"""Add a label to a check.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_checks_id_labels(check_id, label_mapping, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str check_id: The check ID. (required)
:param LabelMapping label_mapping: Label to add (required)
:param str zap_trace_span: OpenTracing span context
:return: LabelResponse
If the method is called asynchronously,
returns the request thread.
""" # noqa: E501
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.post_checks_id_labels_with_http_info(check_id, label_mapping, **kwargs) # noqa: E501
else:
(data) = self.post_checks_id_labels_with_http_info(check_id, label_mapping, **kwargs) # noqa: E501
return data
def post_checks_id_labels_with_http_info(self, check_id, label_mapping, **kwargs): # noqa: E501,D401,D403
"""Add a label to a check.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_checks_id_labels_with_http_info(check_id, label_mapping, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str check_id: The check ID. (required)
:param LabelMapping label_mapping: Label to add (required)
:param str zap_trace_span: OpenTracing span context
:return: LabelResponse
If the method is called asynchronously,
returns the request thread.
""" # noqa: E501
local_var_params, path_params, query_params, header_params, body_params = \
self._post_checks_id_labels_prepare(check_id, label_mapping, **kwargs)
return self.api_client.call_api(
'/api/v2/checks/{checkID}/labels', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=[],
files={},
response_type='LabelResponse', # noqa: E501
auth_settings=[],
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats={},
urlopen_kw=kwargs.get('urlopen_kw', None))
async def post_checks_id_labels_async(self, check_id, label_mapping, **kwargs): # noqa: E501,D401,D403
"""Add a label to a check.
This method makes an asynchronous HTTP request.
:param async_req bool
:param str check_id: The check ID. (required)
:param LabelMapping label_mapping: Label to add (required)
:param str zap_trace_span: OpenTracing span context
:return: LabelResponse
If the method is called asynchronously,
returns the request thread.
""" # noqa: E501
local_var_params, path_params, query_params, header_params, body_params = \
self._post_checks_id_labels_prepare(check_id, label_mapping, **kwargs)
return await self.api_client.call_api(
'/api/v2/checks/{checkID}/labels', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=[],
files={},
response_type='LabelResponse', # noqa: E501
auth_settings=[],
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats={},
urlopen_kw=kwargs.get('urlopen_kw', None))
def _post_checks_id_labels_prepare(self, check_id, label_mapping, **kwargs): # noqa: E501,D401,D403
local_var_params = locals()
all_params = ['check_id', 'label_mapping', 'zap_trace_span'] # noqa: E501
self._check_operation_params('post_checks_id_labels', all_params, local_var_params)
# verify the required parameter 'check_id' is set
if ('check_id' not in local_var_params or
local_var_params['check_id'] is None):
raise ValueError("Missing the required parameter `check_id` when calling `post_checks_id_labels`") # noqa: E501
# verify the required parameter 'label_mapping' is set
if ('label_mapping' not in local_var_params or
local_var_params['label_mapping'] is None):
raise ValueError("Missing the required parameter `label_mapping` when calling `post_checks_id_labels`") # noqa: E501
path_params = {}
if 'check_id' in local_var_params:
path_params['checkID'] = local_var_params['check_id'] # noqa: E501
query_params = []
header_params = {}
if 'zap_trace_span' in local_var_params:
header_params['Zap-Trace-Span'] = local_var_params['zap_trace_span'] # noqa: E501
body_params = None
if 'label_mapping' in local_var_params:
body_params = local_var_params['label_mapping']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
return local_var_params, path_params, query_params, header_params, body_params
def put_checks_id(self, check_id, check, **kwargs): # noqa: E501,D401,D403
"""Update a check.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.put_checks_id(check_id, check, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str check_id: The check ID. (required)
:param Check check: Check update to apply (required)
:param str zap_trace_span: OpenTracing span context
:return: Check
If the method is called asynchronously,
returns the request thread.
""" # noqa: E501
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.put_checks_id_with_http_info(check_id, check, **kwargs) # noqa: E501
else:
(data) = self.put_checks_id_with_http_info(check_id, check, **kwargs) # noqa: E501
return data
def put_checks_id_with_http_info(self, check_id, check, **kwargs): # noqa: E501,D401,D403
"""Update a check.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.put_checks_id_with_http_info(check_id, check, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str check_id: The check ID. (required)
:param Check check: Check update to apply (required)
:param str zap_trace_span: OpenTracing span context
:return: Check
If the method is called asynchronously,
returns the request thread.
""" # noqa: E501
local_var_params, path_params, query_params, header_params, body_params = \
self._put_checks_id_prepare(check_id, check, **kwargs)
return self.api_client.call_api(
'/api/v2/checks/{checkID}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=[],
files={},
response_type='Check', # noqa: E501
auth_settings=[],
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats={},
urlopen_kw=kwargs.get('urlopen_kw', None))
async def put_checks_id_async(self, check_id, check, **kwargs): # noqa: E501,D401,D403
"""Update a check.
This method makes an asynchronous HTTP request.
:param async_req bool
:param str check_id: The check ID. (required)
:param Check check: Check update to apply (required)
:param str zap_trace_span: OpenTracing span context
:return: Check
If the method is called asynchronously,
returns the request thread.
""" # noqa: E501
local_var_params, path_params, query_params, header_params, body_params = \
self._put_checks_id_prepare(check_id, check, **kwargs)
return await self.api_client.call_api(
'/api/v2/checks/{checkID}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=[],
files={},
response_type='Check', # noqa: E501
auth_settings=[],
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats={},
urlopen_kw=kwargs.get('urlopen_kw', None))
def _put_checks_id_prepare(self, check_id, check, **kwargs): # noqa: E501,D401,D403
local_var_params = locals()
all_params = ['check_id', 'check', 'zap_trace_span'] # noqa: E501
self._check_operation_params('put_checks_id', all_params, local_var_params)
# verify the required parameter 'check_id' is set
if ('check_id' not in local_var_params or
local_var_params['check_id'] is None):
raise ValueError("Missing the required parameter `check_id` when calling `put_checks_id`") # noqa: E501
# verify the required parameter 'check' is set
if ('check' not in local_var_params or
local_var_params['check'] is None):
raise ValueError("Missing the required parameter `check` when calling `put_checks_id`") # noqa: E501
path_params = {}
if 'check_id' in local_var_params:
path_params['checkID'] = local_var_params['check_id'] # noqa: E501
query_params = []
header_params = {}
if 'zap_trace_span' in local_var_params:
header_params['Zap-Trace-Span'] = local_var_params['zap_trace_span'] # noqa: E501
body_params = None
if 'check' in local_var_params:
body_params = local_var_params['check']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
return local_var_params, path_params, query_params, header_params, body_params
| 44.410828
| 159
| 0.633776
| 6,956
| 55,780
| 4.750144
| 0.026164
| 0.051813
| 0.090672
| 0.04116
| 0.969493
| 0.961715
| 0.955602
| 0.943678
| 0.935476
| 0.910932
| 0
| 0.021918
| 0.273664
| 55,780
| 1,255
| 160
| 44.446215
| 0.793632
| 0.249946
| 0
| 0.804154
| 0
| 0.004451
| 0.158026
| 0.036731
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045994
| false
| 0
| 0.004451
| 0
| 0.126113
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7e200443c90d9ec90f6fe1a83c71f4b659b88637
| 1,955
|
py
|
Python
|
tests/test_prometheus_rules.py
|
app-sre/app-bouncer
|
1153e21872f0c929904da32e34785286149302a4
|
[
"Apache-2.0"
] | 1
|
2020-07-05T21:23:05.000Z
|
2020-07-05T21:23:05.000Z
|
tests/test_prometheus_rules.py
|
app-sre/app-bouncer
|
1153e21872f0c929904da32e34785286149302a4
|
[
"Apache-2.0"
] | 4
|
2019-12-03T16:26:57.000Z
|
2020-09-30T22:14:46.000Z
|
tests/test_prometheus_rules.py
|
app-sre/app-bouncer
|
1153e21872f0c929904da32e34785286149302a4
|
[
"Apache-2.0"
] | 7
|
2019-09-05T08:15:14.000Z
|
2020-04-21T15:15:08.000Z
|
import yaml
from textwrap import dedent
from checks.prometheus_rule import (CheckPrometheusRuleSeverity,
CheckPrometheusRuleLabels)
from lib.result import CheckError, CheckSuccess
def test_prometheus_rule_severity_valid():
manifest = yaml.safe_load(dedent("""
---
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: rule
spec:
groups:
- name: group
rules:
- alert: alert
labels:
severity: high
"""))
c = CheckPrometheusRuleSeverity()
result = c.check_severity(manifest)
assert isinstance(result, CheckSuccess)
def test_prometheus_rule_severity_invalid():
manifest = yaml.safe_load(dedent("""
---
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: rule
spec:
groups:
- name: group
rules:
- alert: alert
labels:
severity: critical
"""))
c = CheckPrometheusRuleSeverity()
result = c.check_severity(manifest)
assert isinstance(result, CheckError)
def test_prometheus_rule_labels_valid():
manifest = yaml.safe_load(dedent("""
---
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: rule
labels:
prometheus: app-sre
role: alert-rules
"""))
c = CheckPrometheusRuleLabels()
result = c.check_labels(manifest)
assert isinstance(result, CheckSuccess)
def test_prometheus_rule_labels_invalid():
manifest = yaml.safe_load(dedent("""
---
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: rule
labels:
promethues: app-sre
role: alert-rules
"""))
c = CheckPrometheusRuleLabels()
result = c.check_labels(manifest)
assert isinstance(result, CheckError)
| 22.471264
| 64
| 0.618926
| 181
| 1,955
| 6.546961
| 0.259669
| 0.059072
| 0.057384
| 0.070886
| 0.850633
| 0.805907
| 0.764557
| 0.764557
| 0.764557
| 0.708861
| 0
| 0.00289
| 0.292072
| 1,955
| 86
| 65
| 22.732558
| 0.853324
| 0
| 0
| 0.811594
| 0
| 0
| 0.455243
| 0.049105
| 0
| 0
| 0
| 0
| 0.057971
| 1
| 0.057971
| false
| 0
| 0.057971
| 0
| 0.115942
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7e67a6bef329fdda05430df45d815e11a2b7f9b2
| 200
|
py
|
Python
|
spotdl/providers/lyrics/__init__.py
|
phcreery/spotdl-v4
|
3bd3768de10ae80b5e1ba3bbe6b792f7fc9f8dfc
|
[
"MIT"
] | 10
|
2022-01-03T15:00:34.000Z
|
2022-03-18T19:55:37.000Z
|
spotdl/providers/lyrics/__init__.py
|
phcreery/spotdl-v4
|
3bd3768de10ae80b5e1ba3bbe6b792f7fc9f8dfc
|
[
"MIT"
] | 9
|
2022-01-15T05:43:35.000Z
|
2022-03-16T17:57:47.000Z
|
spotdl/providers/lyrics/__init__.py
|
phcreery/spotdl-v4
|
3bd3768de10ae80b5e1ba3bbe6b792f7fc9f8dfc
|
[
"MIT"
] | 11
|
2022-01-03T15:00:22.000Z
|
2022-03-27T19:27:05.000Z
|
"""
Lyrics providers for spotdl.
"""
from spotdl.providers.lyrics.genius import Genius
from spotdl.providers.lyrics.musixmatch import MusixMatch
from spotdl.providers.lyrics.azlyrics import AzLyrics
| 25
| 57
| 0.825
| 25
| 200
| 6.6
| 0.36
| 0.181818
| 0.345455
| 0.454545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.095
| 200
| 7
| 58
| 28.571429
| 0.911602
| 0.14
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
7e920c3241060a24924bdfb6273d58223316ec08
| 28,994
|
py
|
Python
|
infoblox_netmri/api/broker/v3_6_0/config_error_broker.py
|
IngmarVG-IB/infoblox-netmri
|
b0c725fd64aee1890d83917d911b89236207e564
|
[
"Apache-2.0"
] | null | null | null |
infoblox_netmri/api/broker/v3_6_0/config_error_broker.py
|
IngmarVG-IB/infoblox-netmri
|
b0c725fd64aee1890d83917d911b89236207e564
|
[
"Apache-2.0"
] | null | null | null |
infoblox_netmri/api/broker/v3_6_0/config_error_broker.py
|
IngmarVG-IB/infoblox-netmri
|
b0c725fd64aee1890d83917d911b89236207e564
|
[
"Apache-2.0"
] | null | null | null |
from ..broker import Broker
class ConfigErrorBroker(Broker):
controller = "config_errors"
def index(self, **kwargs):
"""Lists the available config errors. Any of the inputs listed may be be used to narrow the list; other inputs will be ignored. Of the various ways to query lists, using this method is most efficient.
**Inputs**
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param ConfigErrorID: The internal NetMRI identifier of the Configuration Error.
:type ConfigErrorID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param ConfigErrorID: The internal NetMRI identifier of the Configuration Error.
:type ConfigErrorID: Array of Integer
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DeviceID: The internal NetMRI identifier for the device from which configuration error was collected.
:type DeviceID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceID: The internal NetMRI identifier for the device from which configuration error was collected.
:type DeviceID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results.
:type DeviceGroupID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` today
:param starttime: The data returned will represent the config errors with this date and time as lower boundary. If omitted, the result will indicate the most recently collected data.
:type starttime: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` tomorrow
:param endtime: The data returned will represent the config errors with this date and time as upper boundary. If omitted, the result will indicate the most recently collected data.
:type endtime: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` ConfigErrorID
:param sort: The data field(s) to use for sorting the output. Default is ConfigErrorID. Valid values are ConfigErrorID, DeviceID, Timestamp, ErrMsg, DatasourceID.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each ConfigError. Valid values are ConfigErrorID, DeviceID, Timestamp, ErrMsg, DatasourceID. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return config_errors: An array of the ConfigError objects that match the specified input criteria.
:rtype config_errors: Array of ConfigError
"""
return self.api_list_request(self._get_method_fullname("index"), kwargs)
def show(self, **kwargs):
"""Shows the details for the specified config error.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param ConfigErrorID: The internal NetMRI identifier of the Configuration Error.
:type ConfigErrorID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return config_error: The config error identified by the specified ConfigErrorID.
:rtype config_error: ConfigError
"""
return self.api_request(self._get_method_fullname("show"), kwargs)
def search(self, **kwargs):
"""Lists the available config errors matching the input criteria. This method provides a more flexible search interface than the index method, but searching using this method is more demanding on the system and will not perform to the same level as the index method. The input fields listed below will be used as in the index method, to filter the result, along with the optional query string and XML filter described below.
**Inputs**
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param ConfigErrorID: The internal NetMRI identifier of the Configuration Error.
:type ConfigErrorID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param ConfigErrorID: The internal NetMRI identifier of the Configuration Error.
:type ConfigErrorID: Array of Integer
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DatasourceID: The internal NetMRI identifier for the collector NetMRI that collected this data record.
:type DatasourceID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DatasourceID: The internal NetMRI identifier for the collector NetMRI that collected this data record.
:type DatasourceID: Array of Integer
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DeviceID: The internal NetMRI identifier for the device from which configuration error was collected.
:type DeviceID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceID: The internal NetMRI identifier for the device from which configuration error was collected.
:type DeviceID: Array of Integer
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param ErrMsg: The total inbound and outbound error message of the device configuration.
:type ErrMsg: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param ErrMsg: The total inbound and outbound error message of the device configuration.
:type ErrMsg: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param Timestamp: The date and time at which the session was updated.
:type Timestamp: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param Timestamp: The date and time at which the session was updated.
:type Timestamp: Array of DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results.
:type DeviceGroupID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` today
:param starttime: The data returned will represent the config errors with this date and time as lower boundary. If omitted, the result will indicate the most recently collected data.
:type starttime: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` tomorrow
:param endtime: The data returned will represent the config errors with this date and time as upper boundary. If omitted, the result will indicate the most recently collected data.
:type endtime: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` ConfigErrorID
:param sort: The data field(s) to use for sorting the output. Default is ConfigErrorID. Valid values are ConfigErrorID, DeviceID, Timestamp, ErrMsg, DatasourceID.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each ConfigError. Valid values are ConfigErrorID, DeviceID, Timestamp, ErrMsg, DatasourceID. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param query: This value will be matched against config errors, looking to see if one or more of the listed attributes contain the passed value. You may also surround the value with '/' and '/' to perform a regular expression search rather than a containment operation. Any record that matches will be returned. The attributes searched are: ConfigErrorID, DatasourceID, DeviceID, ErrMsg, Timestamp.
:type query: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return config_errors: An array of the ConfigError objects that match the specified input criteria.
:rtype config_errors: Array of ConfigError
"""
return self.api_list_request(self._get_method_fullname("search"), kwargs)
def find(self, **kwargs):
"""Lists the available config errors matching the input specification. This provides the most flexible search specification of all the query mechanisms, enabling searching using comparison operations other than equality. However, it is more complex to use and will not perform as efficiently as the index or search methods. In the input descriptions below, 'field names' refers to the following fields: ConfigErrorID, DatasourceID, DeviceID, ErrMsg, Timestamp.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_ConfigErrorID: The operator to apply to the field ConfigErrorID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. ConfigErrorID: The internal NetMRI identifier of the Configuration Error. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_ConfigErrorID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_ConfigErrorID: If op_ConfigErrorID is specified, the field named in this input will be compared to the value in ConfigErrorID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_ConfigErrorID must be specified if op_ConfigErrorID is specified.
:type val_f_ConfigErrorID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_ConfigErrorID: If op_ConfigErrorID is specified, this value will be compared to the value in ConfigErrorID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_ConfigErrorID must be specified if op_ConfigErrorID is specified.
:type val_c_ConfigErrorID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DatasourceID: The operator to apply to the field DatasourceID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DatasourceID: The internal NetMRI identifier for the collector NetMRI that collected this data record. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DatasourceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DatasourceID: If op_DatasourceID is specified, the field named in this input will be compared to the value in DatasourceID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DatasourceID must be specified if op_DatasourceID is specified.
:type val_f_DatasourceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DatasourceID: If op_DatasourceID is specified, this value will be compared to the value in DatasourceID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DatasourceID must be specified if op_DatasourceID is specified.
:type val_c_DatasourceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DeviceID: The operator to apply to the field DeviceID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DeviceID: The internal NetMRI identifier for the device from which configuration error was collected. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DeviceID: If op_DeviceID is specified, the field named in this input will be compared to the value in DeviceID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DeviceID must be specified if op_DeviceID is specified.
:type val_f_DeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DeviceID: If op_DeviceID is specified, this value will be compared to the value in DeviceID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DeviceID must be specified if op_DeviceID is specified.
:type val_c_DeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_ErrMsg: The operator to apply to the field ErrMsg. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. ErrMsg: The total inbound and outbound error message of the device configuration. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_ErrMsg: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_ErrMsg: If op_ErrMsg is specified, the field named in this input will be compared to the value in ErrMsg using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_ErrMsg must be specified if op_ErrMsg is specified.
:type val_f_ErrMsg: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_ErrMsg: If op_ErrMsg is specified, this value will be compared to the value in ErrMsg using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_ErrMsg must be specified if op_ErrMsg is specified.
:type val_c_ErrMsg: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_Timestamp: The operator to apply to the field Timestamp. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. Timestamp: The date and time at which the session was updated. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_Timestamp: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_Timestamp: If op_Timestamp is specified, the field named in this input will be compared to the value in Timestamp using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_Timestamp must be specified if op_Timestamp is specified.
:type val_f_Timestamp: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_Timestamp: If op_Timestamp is specified, this value will be compared to the value in Timestamp using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_Timestamp must be specified if op_Timestamp is specified.
:type val_c_Timestamp: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results.
:type DeviceGroupID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` today
:param starttime: The data returned will represent the config errors with this date and time as lower boundary. If omitted, the result will indicate the most recently collected data.
:type starttime: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` tomorrow
:param endtime: The data returned will represent the config errors with this date and time as upper boundary. If omitted, the result will indicate the most recently collected data.
:type endtime: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` ConfigErrorID
:param sort: The data field(s) to use for sorting the output. Default is ConfigErrorID. Valid values are ConfigErrorID, DeviceID, Timestamp, ErrMsg, DatasourceID.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each ConfigError. Valid values are ConfigErrorID, DeviceID, Timestamp, ErrMsg, DatasourceID. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return config_errors: An array of the ConfigError objects that match the specified input criteria.
:rtype config_errors: Array of ConfigError
"""
return self.api_list_request(self._get_method_fullname("find"), kwargs)
| 48.566164
| 468
| 0.592226
| 3,527
| 28,994
| 4.8296
| 0.076552
| 0.078666
| 0.051133
| 0.05988
| 0.918046
| 0.909827
| 0.895444
| 0.884936
| 0.884936
| 0.881238
| 0
| 0.006168
| 0.323412
| 28,994
| 597
| 469
| 48.566164
| 0.86216
| 0.803235
| 0
| 0
| 0
| 0
| 0.045134
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.363636
| false
| 0
| 0.090909
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 9
|
0ea10fe00626898aba3cd53349d279e23972bf9c
| 44
|
py
|
Python
|
pigments_from_rrs/_version.py
|
alisonpchase/pigments-from-rrs
|
33323e1e6ebe486ae15a8724d5539883284bcff5
|
[
"MIT"
] | null | null | null |
pigments_from_rrs/_version.py
|
alisonpchase/pigments-from-rrs
|
33323e1e6ebe486ae15a8724d5539883284bcff5
|
[
"MIT"
] | 8
|
2021-07-22T18:19:08.000Z
|
2022-02-10T01:17:02.000Z
|
pigments_from_rrs/_version.py
|
alisonpchase/pigments-from-rrs
|
33323e1e6ebe486ae15a8724d5539883284bcff5
|
[
"MIT"
] | 1
|
2021-07-22T17:57:33.000Z
|
2021-07-22T17:57:33.000Z
|
__version__ = '0.1.dev16+g2693076.d20211117'
| 44
| 44
| 0.795455
| 6
| 44
| 5.166667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.452381
| 0.045455
| 44
| 1
| 44
| 44
| 0.285714
| 0
| 0
| 0
| 0
| 0
| 0.622222
| 0.622222
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.