hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3909077e67c071a9d87d1506b85fb554b4a414db
| 15,150
|
py
|
Python
|
notebooks/classification/plotting_utils.py
|
LaudateCorpus1/macest
|
0a6b7bd26a31900a55164c75938c074c116c78f6
|
[
"UPL-1.0",
"Apache-2.0"
] | 88
|
2021-08-20T15:34:10.000Z
|
2022-03-17T04:26:20.000Z
|
notebooks/classification/plotting_utils.py
|
LaudateCorpus1/macest
|
0a6b7bd26a31900a55164c75938c074c116c78f6
|
[
"UPL-1.0",
"Apache-2.0"
] | 3
|
2022-01-09T17:11:36.000Z
|
2022-03-02T00:43:03.000Z
|
notebooks/classification/plotting_utils.py
|
LaudateCorpus1/macest
|
0a6b7bd26a31900a55164c75938c074c116c78f6
|
[
"UPL-1.0",
"Apache-2.0"
] | 11
|
2021-08-23T15:28:08.000Z
|
2022-02-18T01:29:24.000Z
|
"""Module containing plotting utility functions for use in example notebooks."""
from typing import Optional, Tuple
from matplotlib.axes import Axes
from macest.classification.models import ModelWithConfidence
from matplotlib import cm
import matplotlib.pyplot as plt
import numpy as np
from sklearn.ensemble import RandomForestClassifier
SklearnModelType = RandomForestClassifier
def plot_prediction_conf_surface(low_range: float,
up_range: float,
sklearn_model: SklearnModelType,
X_pp_train: np.ndarray,
y_pp_train: Optional[np.ndarray] = None,
plot_training_data=True):
"""Plot the predictions' confidence surface."""
x1_ne = np.random.uniform(-low_range, low_range, 10 ** 4)
x2_ne = np.random.uniform(-low_range, low_range, 10 ** 4)
points_ne = np.array((x1_ne, x2_ne)).T
x1_fa = np.random.uniform(-up_range, up_range, 10 ** 4)
x2_fa = np.random.uniform(-up_range, up_range, 10 ** 4)
points_fa = np.array((x1_fa, x2_fa)).T
fig, ax = plt.subplots(ncols=2, nrows=2,
figsize=(16, 12), )
h0 = ax[0, 0].tricontourf(x1_ne, x2_ne, sklearn_model.predict(points_ne),
cmap=cm.get_cmap('rainbow', 2),
alpha=.9)
g0 = ax[0, 1].tricontourf(x1_ne, x2_ne,
np.amax(sklearn_model.predict_proba(points_ne), axis=1),
vmin=0.,
vmax=1,
cmap=cm.get_cmap('viridis'))
h1 = ax[1, 0].tricontourf(x1_fa, x2_fa, sklearn_model.predict(points_fa),
extend='both',
cmap=cm.get_cmap('rainbow', 2),
alpha=.9)
g1 = ax[1, 1].tricontourf(x1_fa, x2_fa,
np.amax(sklearn_model.predict_proba(points_fa), axis=1),
vmin=0.,
vmax=1.,
cmap=cm.viridis)
if plot_training_data:
ax[0, 0].scatter(X_pp_train[:, 0], X_pp_train[:, 1], c=y_pp_train,
cmap=cm.rainbow,
edgecolors='black')
ax[0, 1].scatter(X_pp_train[:, 0], X_pp_train[:, 1],
c=y_pp_train,
cmap=cm.get_cmap('rainbow', 2),
edgecolors='black')
ax[1, 0].scatter(X_pp_train[:, 0], X_pp_train[:, 1],
c=y_pp_train,
edgecolors='black',
cmap=cm.get_cmap('rainbow', 2))
ax[1, 1].scatter(X_pp_train[:, 0], X_pp_train[:, 1],
c=y_pp_train,
alpha=0.9,
cmap=cm.get_cmap('rainbow', 2),
edgecolors='black')
fig.colorbar(h1, ax=[ax[0, 0], ax[1, 0]],
ticks=np.arange(0.0, len(np.unique(y_pp_train)) + 0.1, 1),
label='prediction')
fig.colorbar(g1, ax=[ax[0, 1], ax[1, 1]],
ticks=np.arange(0.0, 1.01, 0.1),
label='Confidence')
_set_axes_labels(ax)
def plot_prediction_conf_surface_multiclass(low_range: float,
up_range: float,
sklearn_model: SklearnModelType,
X_pp_train: np.ndarray,
y_pp_train: np.ndarray,
plot_training_data=True):
"""Plot the predictions' confidence surface for the multiclass case."""
x1_ne = np.random.uniform(-low_range, low_range, 10 ** 4)
x2_ne = np.random.uniform(-low_range, low_range, 10 ** 4)
points_ne = np.array((x1_ne, x2_ne)).T
x1_fa = np.random.uniform(-up_range, up_range, 10 ** 4)
x2_fa = np.random.uniform(-up_range, up_range, 10 ** 4)
points_fa = np.array((x1_fa, x2_fa)).T
fig, ax = plt.subplots(ncols=2, nrows=2,
figsize=(16, 12), )
h0 = ax[0, 0].tricontourf(x1_ne, x2_ne, sklearn_model.predict(points_ne),
cmap=cm.get_cmap('rainbow', len(np.unique(y_pp_train))),
alpha=.9)
g0 = ax[0, 1].tricontourf(x1_ne, x2_ne,
np.amax(sklearn_model.predict_proba(points_ne), axis=1),
vmin=0.,
vmax=1,
cmap=cm.get_cmap('viridis'))
h1 = ax[1, 0].tricontourf(x1_fa, x2_fa, sklearn_model.predict(points_fa),
cmap=cm.get_cmap('rainbow', len(np.unique(y_pp_train))),
alpha=.9)
g1 = ax[1, 1].tricontourf(x1_fa, x2_fa,
np.amax(sklearn_model.predict_proba(points_fa), axis=1),
vmin=0.,
vmax=1.,
cmap=cm.viridis)
if plot_training_data:
ax[0, 0].scatter(X_pp_train[:, 0], X_pp_train[:, 1], c=y_pp_train,
cmap=cm.rainbow,
edgecolors='black')
ax[0, 1].scatter(X_pp_train[:, 0], X_pp_train[:, 1],
c=y_pp_train,
cmap=cm.get_cmap('rainbow', len(np.unique(y_pp_train))),
edgecolors='black')
ax[1, 0].scatter(X_pp_train[:, 0], X_pp_train[:, 1],
c=y_pp_train,
edgecolors='black',
cmap=cm.get_cmap('rainbow', len(np.unique(y_pp_train))))
ax[1, 1].scatter(X_pp_train[:, 0], X_pp_train[:, 1],
c=y_pp_train,
alpha=0.9,
cmap=cm.get_cmap('rainbow', 4),
edgecolors='black')
fig.colorbar(h0, ax=[ax[0, 0], ax[1, 0]],
ticks=np.arange(0., len(np.unique(y_pp_train)) + 0.1, 1),
label='prediction')
fig.colorbar(g1, ax=[ax[0, 1], ax[1, 1]],
ticks=np.arange(0.0, 1.01, 0.1),
label='Confidence')
_set_axes_labels(ax)
def plot_macest_sklearn_comparison_surface(low_range: float,
up_range: float,
macest_model: ModelWithConfidence,
sklearn_model: SklearnModelType,
X_pp_train: Optional[np.ndarray] = None,
y_pp_train: Optional[np.ndarray] = None,
plot_training_data=True):
"""Plot a comparison of MACE with the original Sklearn model."""
x1_ne = np.random.uniform(-low_range, low_range, 10 ** 4)
x2_ne = np.random.uniform(-low_range, low_range, 10 ** 4)
points_ne = np.array((x1_ne, x2_ne)).T
x1_fa = np.random.uniform(-up_range, up_range, 10 ** 4)
x2_fa = np.random.uniform(-up_range, up_range, 10 ** 4)
points_fa = np.array((x1_fa, x2_fa)).T
fig, ax = plt.subplots(ncols=2, nrows=2,
figsize=(16, 12), )
h0 = ax[0, 0].tricontourf(x1_ne, x2_ne,
macest_model.predict_confidence_of_point_prediction(points_ne),
vmin=0.,
vmax=1,
cmap=cm.viridis)
g0 = ax[0, 1].tricontourf(x1_ne, x2_ne, np.amax(sklearn_model.predict_proba(points_ne), axis=1),
vmin=0.,
vmax=1,
cmap=cm.viridis)
if plot_training_data:
ax[0, 0].scatter(X_pp_train[:, 0], X_pp_train[:, 1],
c=y_pp_train,
cmap=cm.rainbow,
edgecolors='black')
ax[0, 1].scatter(X_pp_train[:, 0], X_pp_train[:, 1],
c=y_pp_train, alpha=0.9,
edgecolors='black',
cmap=cm.rainbow)
h1 = ax[1, 0].tricontourf(x1_fa, x2_fa,
macest_model.predict_confidence_of_point_prediction(points_fa),
vmin=0.,
vmax=1,
cmap=cm.viridis)
ax[1, 0].scatter(X_pp_train[:, 0], X_pp_train[:, 1],
c=y_pp_train,
edgecolors='black',
cmap=cm.get_cmap('rainbow', 2))
g1 = ax[1, 1].tricontourf(x1_fa, x2_fa,
np.amax(sklearn_model.predict_proba(points_fa), axis=1),
vmin=0.,
vmax=1.,
cmap=cm.viridis)
ax[1, 1].scatter(X_pp_train[:, 0], X_pp_train[:, 1],
c=y_pp_train,
alpha=0.9,
cmap=cm.get_cmap('rainbow', 2),
edgecolors='black')
fig.colorbar(h1,
ax=ax.ravel().tolist(),
ticks=np.arange(0., 1.01, 0.1),
label='Confidence')
ax[0, 0].set_title('MACEst')
ax[1, 0].set_title('MACEst')
ax[0, 1].set_title(f'{sklearn_model.__class__.__name__}')
ax[1, 1].set_title(f'{sklearn_model.__class__.__name__}')
_set_axes_labels(ax)
def plot_macest_sklearn_comparison_surface_multiclass(low_range: float,
up_range: float,
macest_model: ModelWithConfidence,
sklearn_model: SklearnModelType,
X_pp_train: np.ndarray,
y_pp_train: np.ndarray,
plot_training_data=True):
"""Plot a comparison of MACE with the original Sklearn model for the multiclass case."""
x1_ne = np.random.uniform(-low_range, low_range, 10 ** 4)
x2_ne = np.random.uniform(-low_range, low_range, 10 ** 4)
points_ne = np.array((x1_ne, x2_ne)).T
x1_fa = np.random.uniform(-up_range, up_range, 10 ** 4)
x2_fa = np.random.uniform(-up_range, up_range, 10 ** 4)
points_fa = np.array((x1_fa, x2_fa)).T
fig, ax = plt.subplots(ncols=2, nrows=2,
figsize=(16, 12), )
h0 = ax[0, 0].tricontourf(x1_ne, x2_ne,
macest_model.predict_confidence_of_point_prediction(points_ne),
vmin=0.,
vmax=1,
cmap=cm.viridis)
g0 = ax[0, 1].tricontourf(x1_ne, x2_ne, np.amax(sklearn_model.predict_proba(points_ne), axis=1),
vmin=0.,
vmax=1,
cmap=cm.viridis)
if plot_training_data:
ax[0, 0].scatter(X_pp_train[:, 0], X_pp_train[:, 1],
c=y_pp_train,
cmap=cm.rainbow,
edgecolors='black')
ax[0, 1].scatter(X_pp_train[:, 0], X_pp_train[:, 1],
c=y_pp_train, alpha=0.9,
edgecolors='black',
cmap=cm.rainbow)
h1 = ax[1, 0].tricontourf(x1_fa, x2_fa,
macest_model.predict_confidence_of_point_prediction(points_fa),
vmin=0.,
vmax=1,
cmap=cm.viridis)
ax[1, 0].scatter(X_pp_train[:, 0], X_pp_train[:, 1],
c=y_pp_train,
edgecolors='black',
cmap=cm.get_cmap('rainbow', len(np.unique(y_pp_train))))
g1 = ax[1, 1].tricontourf(x1_fa, x2_fa,
np.amax(sklearn_model.predict_proba(points_fa), axis=1),
vmin=0.,
vmax=1.,
cmap=cm.viridis)
ax[1, 1].scatter(X_pp_train[:, 0], X_pp_train[:, 1],
c=y_pp_train,
alpha=0.9,
cmap=cm.get_cmap('rainbow', len(np.unique(y_pp_train))),
edgecolors='black')
fig.colorbar(h1,
ax=ax.ravel().tolist(),
ticks=np.arange(0., 1.01, 0.1),
label='Confidence')
ax[0, 0].set_title('MACEst')
ax[1, 0].set_title('MACEst')
ax[0, 1].set_title(f'{sklearn_model.__class__.__name__}')
ax[1, 1].set_title(f'{sklearn_model.__class__.__name__}')
_set_axes_labels(ax)
def _set_axes_labels(ax,
x_label: str = "x1",
y_label: str = "x2",
fontsize: int = 18) -> None:
ax[0, 0].set_xlabel(x_label, fontsize=fontsize)
ax[0, 1].set_xlabel(x_label, fontsize=fontsize)
ax[0, 0].set_ylabel(y_label, fontsize=fontsize)
ax[0, 1].set_ylabel(y_label, fontsize=fontsize)
ax[1, 0].set_xlabel(x_label, fontsize=fontsize)
ax[1, 1].set_xlabel(x_label, fontsize=fontsize)
ax[1, 0].set_ylabel(y_label, fontsize=fontsize)
ax[1, 1].set_ylabel(y_label, fontsize=fontsize)
def make_funky_star(n_arms: int, n_points: int) -> np.ndarray:
"""Generate a star shaped data distribution."""
arms = n_arms
points_per_arm = int(n_points / arms)
angs = np.arange(np.pi / arms, np.pi + 0.001, np.pi / arms)
cov = np.array([[6, 5.93], [5.93, 6]])
a = np.random.multivariate_normal((0, 0),
cov,
points_per_arm)
arms = []
for theta in angs:
c, s = np.cos(theta), np.sin(theta)
R = np.array(((c, -s), (s, c)))
arms.append(R.dot(a.T).T)
star = np.vstack(arms)
return star
def make_star_classes(n_arms: int, n_points: int, n_classes: int) -> np.ndarray:
"""Generate classes for the star shaped data distribution."""
n_points_per_arm = int(n_points / n_classes)
y = np.zeros((int(n_arms), int(n_points_per_arm)))
for i in range(n_arms):
j = np.mod(i, n_classes)
y[i, :] = j * np.ones(int(n_points_per_arm))
y = y.flatten()
return y
def make_two_spirals(r: float,
n_rotations: int,
n_points: int,
noise: float) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""Generate spirals for the binary classification example."""
theta = np.linspace(0, 2 * np.pi * n_rotations, n_points)
r0 = r * theta
x_20 = r0 * np.cos(theta) + np.random.normal(0, noise, n_points)
y_20 = r0 * np.sin(theta) + np.random.normal(0, noise, n_points)
r1 = r * theta
x_21 = - r1 * np.cos(theta) + np.random.normal(-0.4
, noise, n_points)
y_21 = - r1 * np.sin(theta) + np.random.normal(0.4, noise, n_points)
return x_20, x_21, y_20, y_21
| 40.616622
| 100
| 0.486799
| 1,906
| 15,150
| 3.620147
| 0.091291
| 0.064928
| 0.041739
| 0.030145
| 0.847971
| 0.837536
| 0.817681
| 0.808841
| 0.757101
| 0.738551
| 0
| 0.047712
| 0.389901
| 15,150
| 372
| 101
| 40.725806
| 0.698799
| 0.031551
| 0
| 0.766551
| 0
| 0
| 0.028706
| 0.009295
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027875
| false
| 0
| 0.02439
| 0
| 0.062718
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
391f99902a48b14b3833191a07aba6657ce1f346
| 76
|
py
|
Python
|
fixed_file/__int__.py
|
DanielCastriani/Fixed-Width-File-Parser
|
fc8e4c00aaf703d5b2090854c708f9837591c480
|
[
"MIT"
] | null | null | null |
fixed_file/__int__.py
|
DanielCastriani/Fixed-Width-File-Parser
|
fc8e4c00aaf703d5b2090854c708f9837591c480
|
[
"MIT"
] | null | null | null |
fixed_file/__int__.py
|
DanielCastriani/Fixed-Width-File-Parser
|
fc8e4c00aaf703d5b2090854c708f9837591c480
|
[
"MIT"
] | null | null | null |
from fixed_file.parse import FixedFile
from fixed_file.layout import Layout
| 25.333333
| 38
| 0.868421
| 12
| 76
| 5.333333
| 0.583333
| 0.28125
| 0.40625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.105263
| 76
| 2
| 39
| 38
| 0.941176
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
1a97e25ddbb400d2b7f19fd7425712f7ae120692
| 7,700
|
py
|
Python
|
particle_packing/tests/test_ellipse.py
|
aluchies/particle_packing
|
127603a519ae25979de6c6197810a7ea38ec945b
|
[
"BSD-3-Clause"
] | null | null | null |
particle_packing/tests/test_ellipse.py
|
aluchies/particle_packing
|
127603a519ae25979de6c6197810a7ea38ec945b
|
[
"BSD-3-Clause"
] | null | null | null |
particle_packing/tests/test_ellipse.py
|
aluchies/particle_packing
|
127603a519ae25979de6c6197810a7ea38ec945b
|
[
"BSD-3-Clause"
] | null | null | null |
from particle_packing import ellipse
from particle_packing.ellipse import Ellipse
from scipy.spatial.distance import pdist
import unittest
import numpy as np
class TestCode(unittest.TestCase):
def test1_pack_rsa_mda(self):
"""
circle, npoints small
"""
npoints = 5
radius = 0.05 * np.ones(2)
phi = 0.
step_limit = 10 ** 2
x, y = ellipse.pack.rsa_mda(npoints, radius, phi, step_limit)
for i in xrange(len(x)):
center = np.array([x[i], y[i]])
c = Ellipse(center, radius, phi)
F = c.square_container_potential()
self.assertTrue(F >= 1.)
xy = np.vstack([x, y]).transpose()
d = pdist(xy)
self.assertTrue(d.min() > 2. * radius[0])
self.assertTrue(npoints == len(x))
def test2_pack_rsa_mda(self):
"""
circle, npoints large
"""
npoints = 50
radius = 0.05 * np.ones(2)
phi = 0.
step_limit = 10 ** 4
x, y = ellipse.pack.rsa_mda(npoints, radius, phi, step_limit)
for i in xrange(len(x)):
center = np.array([x[i], y[i]])
c = Ellipse(center, radius, phi)
F = c.square_container_potential()
self.assertTrue(F >= 1.)
xy = np.vstack([x, y]).transpose()
d = pdist(xy)
self.assertTrue(d.min() > 2. * radius[0])
self.assertTrue(npoints == len(x))
def test3_pack_rsa_mda(self):
"""
circle, random seed test
"""
npoints = 5
radius = 0.05 * np.ones(2)
phi = 0.
step_limit = 10 ** 3
randSeed = 100
x0, y0 = ellipse.pack.rsa_mda(npoints, radius, phi, step_limit,
randSeed)
x1, y1 = ellipse.pack.rsa_mda(npoints, radius, phi, step_limit,
randSeed)
self.assertTrue(np.allclose(x0, x1))
self.assertTrue(np.allclose(y0, y1))
def test4_pack_rsa_mda(self):
"""
ellipse, npoints small
"""
npoints = 5
radius = 0.05 * np.array([1., 2.])
phi = 0.
step_limit = 10 ** 2
x, y = ellipse.pack.rsa_mda(npoints, radius, phi, step_limit)
for i in xrange(len(x)):
center = np.array([x[i], y[i]])
c = Ellipse(center, radius, phi)
F = c.square_container_potential()
self.assertTrue(F >= 1.)
xy = np.vstack([x, y]).transpose()
for i in xrange(len(x)):
for k in xrange(i):
center = np.asarray([x[i], y[i]])
ci = Ellipse(center=center, radii=radius, phi=phi)
center = np.asarray([x[k], y[k]])
ck = Ellipse(center=center, radii=radius, phi=phi)
F = ci.overlap_potential(ck)
self.assertTrue(F >= 1.)
self.assertTrue(npoints == len(x))
def test5_pack_rsa_mda(self):
"""
ellipse, npoints larger
"""
npoints = 15
radius = 0.05 * np.array([1., 2.])
phi = 0.
step_limit = 10 ** 2
x, y = ellipse.pack.rsa_mda(npoints, radius, phi, step_limit)
for i in xrange(len(x)):
center = np.array([x[i], y[i]])
c = Ellipse(center, radius, phi)
F = c.square_container_potential()
self.assertTrue(F >= 1.)
xy = np.vstack([x, y]).transpose()
for i in xrange(len(x)):
for k in xrange(i):
center = np.asarray([x[i], y[i]])
ci = Ellipse(center=center, radii=radius, phi=phi)
center = np.asarray([x[k], y[k]])
ck = Ellipse(center=center, radii=radius, phi=phi)
F = ci.overlap_potential(ck)
self.assertTrue(F >= 1.)
self.assertTrue(npoints == len(x))
def test1_pack_rsa_md(self):
"""
circle, npoints small
"""
npoints = 5
radius = 0.05 * np.ones(2)
phi = 0.
step_limit = 10 ** 2
x, y, phi = ellipse.pack.rsa_md(npoints, radius, step_limit)
for i in xrange(len(x)):
center = np.array([x[i], y[i]])
c = Ellipse(center, radius, phi[i])
F = c.square_container_potential()
self.assertTrue(F >= 1.)
xy = np.vstack([x, y]).transpose()
d = pdist(xy)
self.assertTrue(d.min() > 2. * radius[0])
self.assertTrue(npoints == len(x))
def test2_pack_rsa_md(self):
"""
circle, npoints large
"""
npoints = 50
radius = 0.05 * np.ones(2)
step_limit = 10 ** 4
x, y, phi = ellipse.pack.rsa_md(npoints, radius, step_limit)
for i in xrange(len(x)):
center = np.array([x[i], y[i]])
c = Ellipse(center, radius, phi[i])
F = c.square_container_potential()
self.assertTrue(F >= 1.)
xy = np.vstack([x, y]).transpose()
d = pdist(xy)
self.assertTrue(d.min() > 2. * radius[0])
self.assertTrue(npoints == len(x))
def test3_pack_rsa_md(self):
"""
circle, random seed test
"""
npoints = 5
radius = 0.05 * np.ones(2)
step_limit = 10 ** 3
randSeed = 100
x0, y0, phi0 = ellipse.pack.rsa_md(npoints, radius, step_limit,
randSeed)
x1, y1, phi1 = ellipse.pack.rsa_md(npoints, radius, step_limit,
randSeed)
self.assertTrue(np.allclose(x0, x1))
self.assertTrue(np.allclose(y0, y1))
self.assertTrue(np.allclose(phi0, phi1))
def test4_pack_rsa_md(self):
"""
ellipse, npoints small
"""
npoints = 5
radius = 0.05 * np.array([1., 2.])
step_limit = 10 ** 2
x, y, phi = ellipse.pack.rsa_md(npoints, radius, step_limit)
for i in xrange(len(x)):
center = np.array([x[i], y[i]])
c = Ellipse(center, radius, phi[i])
F = c.square_container_potential()
self.assertTrue(F >= 1.)
xy = np.vstack([x, y]).transpose()
for i in xrange(len(x)):
for k in xrange(i):
center = np.asarray([x[i], y[i]])
ci = Ellipse(center=center, radii=radius, phi=phi[i])
center = np.asarray([x[k], y[k]])
ck = Ellipse(center=center, radii=radius, phi=phi[k])
F = ci.overlap_potential(ck)
self.assertTrue(F >= 1.)
self.assertTrue(npoints == len(x))
def test5_pack_rsa_md(self):
"""
ellipse, npoints larger
"""
npoints = 15
radius = 0.05 * np.array([1., 2.])
step_limit = 10 ** 2
x, y, phi = ellipse.pack.rsa_md(npoints, radius, step_limit)
for i in xrange(len(x)):
center = np.array([x[i], y[i]])
c = Ellipse(center, radius, phi[i])
F = c.square_container_potential()
self.assertTrue(F >= 1.)
xy = np.vstack([x, y]).transpose()
for i in xrange(len(x)):
for k in xrange(i):
center = np.asarray([x[i], y[i]])
ci = Ellipse(center=center, radii=radius, phi=phi[i])
center = np.asarray([x[k], y[k]])
ck = Ellipse(center=center, radii=radius, phi=phi[k])
F = ci.overlap_potential(ck)
self.assertTrue(F >= 1.)
self.assertTrue(npoints == len(x))
if __name__ == '__main__':
print 'Running unit tests for ellipse.so'
unittest.main()
| 21.448468
| 71
| 0.501299
| 989
| 7,700
| 3.807887
| 0.094034
| 0.107807
| 0.04461
| 0.038237
| 0.923526
| 0.920074
| 0.906798
| 0.906798
| 0.898301
| 0.872544
| 0
| 0.030371
| 0.358571
| 7,700
| 359
| 72
| 21.448468
| 0.732132
| 0
| 0
| 0.862069
| 0
| 0
| 0.005679
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 0
| null | null | 0
| 0.028736
| null | null | 0.005747
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
6457f9ce5516b02575ce4a4c6e64ba2d535475a8
| 4,646
|
py
|
Python
|
tests/test3D.py
|
zzZ5/CellularAutomaton
|
592bce13c2688bbfcaa541081c457cc22258f010
|
[
"MIT"
] | 2
|
2020-04-18T01:42:13.000Z
|
2020-06-16T03:35:45.000Z
|
tests/test3D.py
|
zzZ5/CellularAutomaton
|
592bce13c2688bbfcaa541081c457cc22258f010
|
[
"MIT"
] | null | null | null |
tests/test3D.py
|
zzZ5/CellularAutomaton
|
592bce13c2688bbfcaa541081c457cc22258f010
|
[
"MIT"
] | null | null | null |
from board import Board
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
a = [
[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]],
]
b = Board(a, liveNum=[4, 5, 6, 7, 8], generateNum=[6, 7])
fig = plt.figure()
# 打开交互模式
plt.ion()
# 循环
for index in range(50):
print("第", index, "次:")
# 清除原有图像
fig.clf()
# 生成测试数据
c = b.next()
x = []
y = []
z = []
for i in range(len(c)):
for j in range(len(c[i])):
for m in range(len(c[i][j])):
if c[i][j][m]:
x.append(m)
y.append(j)
z.append(i)
# 生成画布
ax = fig.add_subplot(111, projection="3d")
# 画三维散点图
ax.scatter(x, y, z)
# 设置坐标轴图标
ax.set_xlabel("X轴", fontproperties='STSong')
ax.set_ylabel("Y轴", fontproperties='STSong')
ax.set_zlabel("Z轴", fontproperties='STSong')
# 暂停
plt.pause(0.2)
# 关闭交互模式
plt.ioff()
# 图形显示
plt.show()
| 31.605442
| 57
| 0.317908
| 1,134
| 4,646
| 1.29806
| 0.066138
| 1.235054
| 1.789402
| 2.30163
| 0.695652
| 0.679348
| 0.679348
| 0.679348
| 0.679348
| 0.679348
| 0
| 0.356967
| 0.386784
| 4,646
| 146
| 58
| 31.821918
| 0.159705
| 0.012484
| 0
| 0.769231
| 0
| 0
| 0.006336
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.023077
| 0
| 0.023077
| 0.007692
| 0
| 0
| 1
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
649214e407ff51fbf0f94c57f8a40de1d854a57c
| 22,124
|
py
|
Python
|
tests/unit/backend/wmg/test_query.py
|
HumanCellAtlas/dcp-prototype
|
44ca66a266004124f39d7d3e3dd75e9076012ff0
|
[
"MIT"
] | 2
|
2020-02-07T18:12:12.000Z
|
2020-02-11T14:59:03.000Z
|
tests/unit/backend/wmg/test_query.py
|
HumanCellAtlas/dcp-prototype
|
44ca66a266004124f39d7d3e3dd75e9076012ff0
|
[
"MIT"
] | 173
|
2020-01-29T17:48:02.000Z
|
2020-03-20T02:52:58.000Z
|
tests/unit/backend/wmg/test_query.py
|
HumanCellAtlas/dcp-prototype
|
44ca66a266004124f39d7d3e3dd75e9076012ff0
|
[
"MIT"
] | null | null | null |
import unittest
from typing import NamedTuple
from backend.wmg.api.v1 import build_dot_plot_matrix
from backend.wmg.data.query import WmgQueryCriteria, WmgQuery
from backend.wmg.data.schemas.cube_schema import cube_non_indexed_dims
from tests.unit.backend.wmg.fixtures.test_snapshot import (
create_temp_wmg_snapshot,
all_ones_expression_summary_values,
all_tens_cell_counts_values,
)
# TODO: Test build_* methods separately in test_v1.py. This package's unit tests need only test the raw results of
# WmgQuery methods
class QueryTest(unittest.TestCase):
def test__query_with_no_genes__returns_empty_result(self):
criteria = WmgQueryCriteria(
organism_ontology_term_id="organism_ontology_term_id_0",
tissue_ontology_term_ids=["tissue_ontology_term_id_0"],
)
dim_size = 1
with create_temp_wmg_snapshot(
dim_size=dim_size, expression_summary_vals_fn=all_ones_expression_summary_values
) as snapshot:
query = WmgQuery(snapshot)
result = build_dot_plot_matrix(query.expression_summary(criteria), query.cell_counts(criteria))
expected = {
"cell_type_ontology_term_id": {},
"gene_ontology_term_id": {},
"n_cells": {},
"n_cells_cell_type": {},
"n_cells_tissue": {},
"nnz": {},
"sum": {},
"tissue_ontology_term_id": {},
}
self.assertEqual(expected, result.to_dict())
def test__query_all_indexed_dims_single_value__returns_correct_result(self):
criteria = WmgQueryCriteria(
gene_ontology_term_ids=["gene_ontology_term_id_0"],
organism_ontology_term_id="organism_ontology_term_id_1",
tissue_ontology_term_ids=["tissue_ontology_term_id_2"],
)
dim_size = 3
with create_temp_wmg_snapshot(
dim_size=dim_size,
expression_summary_vals_fn=all_ones_expression_summary_values,
cell_counts_generator_fn=all_tens_cell_counts_values,
) as snapshot:
query = WmgQuery(snapshot)
result = build_dot_plot_matrix(query.expression_summary(criteria), query.cell_counts(criteria))
# sanity check the expected value of the stats (n_cells, nnz, sum) for each data viz point; if this fails, the
# cube test fixture may have changed (e.g. TileDB Array schema) or the logic for creating the test cube fixture
# has changed
expected_cell_count_per_cell_type = dim_size ** (len(cube_non_indexed_dims) - 1)
assert expected_cell_count_per_cell_type == 729
expected = [
{
"gene_ontology_term_id": "gene_ontology_term_id_0",
"tissue_ontology_term_id": "tissue_ontology_term_id_2",
"cell_type_ontology_term_id": "cell_type_ontology_term_id_0",
"n_cells": 729,
"n_cells_cell_type": 7290,
"n_cells_tissue": 21870,
"nnz": 729,
"sum": 729.0,
},
{
"gene_ontology_term_id": "gene_ontology_term_id_0",
"tissue_ontology_term_id": "tissue_ontology_term_id_2",
"cell_type_ontology_term_id": "cell_type_ontology_term_id_1",
"n_cells": 729,
"n_cells_cell_type": 7290,
"n_cells_tissue": 21870,
"nnz": 729,
"sum": 729.0,
},
{
"gene_ontology_term_id": "gene_ontology_term_id_0",
"tissue_ontology_term_id": "tissue_ontology_term_id_2",
"cell_type_ontology_term_id": "cell_type_ontology_term_id_2",
"n_cells": 729,
"n_cells_cell_type": 7290,
"n_cells_tissue": 21870,
"nnz": 729,
"sum": 729.0,
},
]
self.assertEqual(
expected,
sorted(
result.to_dict("records"),
key=lambda r: (
r["gene_ontology_term_id"],
r["tissue_ontology_term_id"],
r["cell_type_ontology_term_id"],
),
),
)
def test__query_all_indexed_dims_multi_valued__returns_correct_result(self):
criteria = WmgQueryCriteria(
gene_ontology_term_ids=["gene_ontology_term_id_0", "gene_ontology_term_id_2"],
organism_ontology_term_id="organism_ontology_term_id_0",
tissue_ontology_term_ids=["tissue_ontology_term_id_1", "tissue_ontology_term_id_2"],
)
dim_size = 3
with create_temp_wmg_snapshot(
dim_size=dim_size,
expression_summary_vals_fn=all_ones_expression_summary_values,
cell_counts_generator_fn=all_tens_cell_counts_values,
) as snapshot:
query = WmgQuery(snapshot)
result = build_dot_plot_matrix(query.expression_summary(criteria), query.cell_counts(criteria))
# sanity check the expected value of the stats (n_cells, nnz, sum) for each data viz point; if this fails, the
# cube test fixture may have changed (e.g. TileDB Array schema) or the logic for creating the test cube fixture
# has changed
expected_cell_count_per_cell_type = dim_size ** (len(cube_non_indexed_dims) - 1)
assert expected_cell_count_per_cell_type == 729
expected_cell_count_per_tissue = 10 * (dim_size ** len(cube_non_indexed_dims))
assert expected_cell_count_per_tissue == 21870
expected = [
{
"gene_ontology_term_id": "gene_ontology_term_id_0",
"tissue_ontology_term_id": "tissue_ontology_term_id_1",
"cell_type_ontology_term_id": "cell_type_ontology_term_id_0",
"n_cells": 729,
"n_cells_cell_type": 7290,
"n_cells_tissue": 21870,
"nnz": 729,
"sum": 729.0,
},
{
"gene_ontology_term_id": "gene_ontology_term_id_0",
"tissue_ontology_term_id": "tissue_ontology_term_id_1",
"cell_type_ontology_term_id": "cell_type_ontology_term_id_1",
"n_cells": 729,
"n_cells_cell_type": 7290,
"n_cells_tissue": 21870,
"nnz": 729,
"sum": 729.0,
},
{
"gene_ontology_term_id": "gene_ontology_term_id_0",
"tissue_ontology_term_id": "tissue_ontology_term_id_1",
"cell_type_ontology_term_id": "cell_type_ontology_term_id_2",
"n_cells": 729,
"n_cells_cell_type": 7290,
"n_cells_tissue": 21870,
"nnz": 729,
"sum": 729.0,
},
{
"gene_ontology_term_id": "gene_ontology_term_id_0",
"tissue_ontology_term_id": "tissue_ontology_term_id_2",
"cell_type_ontology_term_id": "cell_type_ontology_term_id_0",
"n_cells": 729,
"n_cells_cell_type": 7290,
"n_cells_tissue": 21870,
"nnz": 729,
"sum": 729.0,
},
{
"gene_ontology_term_id": "gene_ontology_term_id_0",
"tissue_ontology_term_id": "tissue_ontology_term_id_2",
"cell_type_ontology_term_id": "cell_type_ontology_term_id_1",
"n_cells": 729,
"n_cells_cell_type": 7290,
"n_cells_tissue": 21870,
"nnz": 729,
"sum": 729.0,
},
{
"gene_ontology_term_id": "gene_ontology_term_id_0",
"tissue_ontology_term_id": "tissue_ontology_term_id_2",
"cell_type_ontology_term_id": "cell_type_ontology_term_id_2",
"n_cells": 729,
"n_cells_cell_type": 7290,
"n_cells_tissue": 21870,
"nnz": 729,
"sum": 729.0,
},
{
"gene_ontology_term_id": "gene_ontology_term_id_2",
"tissue_ontology_term_id": "tissue_ontology_term_id_1",
"cell_type_ontology_term_id": "cell_type_ontology_term_id_0",
"n_cells": 729,
"n_cells_cell_type": 7290,
"n_cells_tissue": 21870,
"nnz": 729,
"sum": 729.0,
},
{
"gene_ontology_term_id": "gene_ontology_term_id_2",
"tissue_ontology_term_id": "tissue_ontology_term_id_1",
"cell_type_ontology_term_id": "cell_type_ontology_term_id_1",
"n_cells": 729,
"n_cells_cell_type": 7290,
"n_cells_tissue": 21870,
"nnz": 729,
"sum": 729.0,
},
{
"gene_ontology_term_id": "gene_ontology_term_id_2",
"tissue_ontology_term_id": "tissue_ontology_term_id_1",
"cell_type_ontology_term_id": "cell_type_ontology_term_id_2",
"n_cells": 729,
"n_cells_cell_type": 7290,
"n_cells_tissue": 21870,
"nnz": 729,
"sum": 729.0,
},
{
"gene_ontology_term_id": "gene_ontology_term_id_2",
"tissue_ontology_term_id": "tissue_ontology_term_id_2",
"cell_type_ontology_term_id": "cell_type_ontology_term_id_0",
"n_cells": 729,
"n_cells_cell_type": 7290,
"n_cells_tissue": 21870,
"nnz": 729,
"sum": 729.0,
},
{
"gene_ontology_term_id": "gene_ontology_term_id_2",
"tissue_ontology_term_id": "tissue_ontology_term_id_2",
"cell_type_ontology_term_id": "cell_type_ontology_term_id_1",
"n_cells": 729,
"n_cells_cell_type": 7290,
"n_cells_tissue": 21870,
"nnz": 729,
"sum": 729.0,
},
{
"gene_ontology_term_id": "gene_ontology_term_id_2",
"tissue_ontology_term_id": "tissue_ontology_term_id_2",
"cell_type_ontology_term_id": "cell_type_ontology_term_id_2",
"n_cells": 729,
"n_cells_cell_type": 7290,
"n_cells_tissue": 21870,
"nnz": 729,
"sum": 729.0,
},
]
self.assertEqual(
expected,
sorted(
result.to_dict("records"),
key=lambda r: (
r["gene_ontology_term_id"],
r["tissue_ontology_term_id"],
r["cell_type_ontology_term_id"],
),
),
)
@classmethod
def setUpClass(cls) -> None:
super().setUpClass()
cls.maxDiff = None
def test__query_non_indexed_dim_single_valued__returns_correct_result(self):
criteria = WmgQueryCriteria(
gene_ontology_term_ids=["gene_ontology_term_id_0"],
organism_ontology_term_id="organism_ontology_term_id_0",
tissue_ontology_term_ids=["tissue_ontology_term_id_0"],
dataset_ids=["dataset_id_1"], # <-- non-indexed dim, single-valued
)
dim_size = 3
with create_temp_wmg_snapshot(
dim_size=dim_size,
expression_summary_vals_fn=all_ones_expression_summary_values,
cell_counts_generator_fn=all_tens_cell_counts_values,
) as snapshot:
query = WmgQuery(snapshot)
result = build_dot_plot_matrix(query.expression_summary(criteria), query.cell_counts(criteria))
# sanity check the expected value of the stats (n_cells, nnz, sum) for each data viz point; if this fails, the
# cube test fixture may have changed (e.g. TileDB Array schema) or the logic for creating the test cube fixture
# has changed
expected_cell_count_per_cell_type = dim_size ** (len(cube_non_indexed_dims) - 2)
assert expected_cell_count_per_cell_type == 243
expected_cell_count_per_tissue = 10 * (dim_size ** (len(cube_non_indexed_dims) - 1))
assert expected_cell_count_per_tissue == 7290
expected = [
{
"gene_ontology_term_id": "gene_ontology_term_id_0",
"tissue_ontology_term_id": "tissue_ontology_term_id_0",
"cell_type_ontology_term_id": "cell_type_ontology_term_id_0",
"n_cells": 243,
"n_cells_cell_type": 2430,
"n_cells_tissue": 7290,
"nnz": 243,
"sum": 243.0,
},
{
"gene_ontology_term_id": "gene_ontology_term_id_0",
"tissue_ontology_term_id": "tissue_ontology_term_id_0",
"cell_type_ontology_term_id": "cell_type_ontology_term_id_1",
"n_cells": 243,
"n_cells_cell_type": 2430,
"n_cells_tissue": 7290,
"nnz": 243,
"sum": 243.0,
},
{
"gene_ontology_term_id": "gene_ontology_term_id_0",
"tissue_ontology_term_id": "tissue_ontology_term_id_0",
"cell_type_ontology_term_id": "cell_type_ontology_term_id_2",
"n_cells": 243,
"n_cells_cell_type": 2430,
"n_cells_tissue": 7290,
"nnz": 243,
"sum": 243.0,
},
]
self.assertEqual(
expected,
sorted(
result.to_dict("records"),
key=lambda r: (
r["gene_ontology_term_id"],
r["tissue_ontology_term_id"],
r["cell_type_ontology_term_id"],
),
),
)
def test__query_non_indexed_dim_multi_valued__returns_correct_result(self):
criteria = WmgQueryCriteria(
gene_ontology_term_ids=["gene_ontology_term_id_0"],
organism_ontology_term_id="organism_ontology_term_id_0",
tissue_ontology_term_ids=["tissue_ontology_term_id_0"],
dataset_ids=["dataset_id_1", "dataset_id_0"], # <-- non-indexed dim, multi-valued
)
dim_size = 3
with create_temp_wmg_snapshot(
dim_size=dim_size,
expression_summary_vals_fn=all_ones_expression_summary_values,
cell_counts_generator_fn=all_tens_cell_counts_values,
) as snapshot:
query = WmgQuery(snapshot)
result = build_dot_plot_matrix(query.expression_summary(criteria), query.cell_counts(criteria))
# sanity check the expected value of the stats (n_cells, nnz, sum) for each data viz point; if this fails, the
# cube test fixture may have changed (e.g. TileDB Array schema) or the logic for creating the test cube fixture
# has changed
expected_cell_count_per_cell_type = dim_size ** (len(cube_non_indexed_dims) - 2) * 2
assert expected_cell_count_per_cell_type == 486
expected_cell_count_per_tissue = 10 * (dim_size ** (len(cube_non_indexed_dims) - 1) * 2)
assert expected_cell_count_per_tissue == 14580
expected = [
{
"gene_ontology_term_id": "gene_ontology_term_id_0",
"tissue_ontology_term_id": "tissue_ontology_term_id_0",
"cell_type_ontology_term_id": "cell_type_ontology_term_id_0",
"n_cells": 486,
"n_cells_cell_type": 4860,
"n_cells_tissue": 14580,
"nnz": 486,
"sum": 486.0,
},
{
"gene_ontology_term_id": "gene_ontology_term_id_0",
"tissue_ontology_term_id": "tissue_ontology_term_id_0",
"cell_type_ontology_term_id": "cell_type_ontology_term_id_1",
"n_cells": 486,
"n_cells_cell_type": 4860,
"n_cells_tissue": 14580,
"nnz": 486,
"sum": 486.0,
},
{
"gene_ontology_term_id": "gene_ontology_term_id_0",
"tissue_ontology_term_id": "tissue_ontology_term_id_0",
"cell_type_ontology_term_id": "cell_type_ontology_term_id_2",
"n_cells": 486,
"n_cells_cell_type": 4860,
"n_cells_tissue": 14580,
"nnz": 486,
"sum": 486.0,
},
]
self.assertEqual(
expected,
sorted(
result.to_dict("records"),
key=lambda r: (
r["gene_ontology_term_id"],
r["tissue_ontology_term_id"],
r["cell_type_ontology_term_id"],
),
),
)
def test__query_non_indexed_dim_single_and_multi_valued__returns_correct_result(self):
criteria = WmgQueryCriteria(
gene_ontology_term_ids=["gene_ontology_term_id_0"],
organism_ontology_term_id="organism_ontology_term_id_0",
tissue_ontology_term_ids=["tissue_ontology_term_id_0"],
ethnicity_ontology_term_ids=["ethnicity_ontology_term_id_1"], # <-- non-indexed dim, single-valued
dataset_ids=["dataset_id_1", "dataset_id_0"], # <-- non-indexed dim, multi-valued
)
dim_size = 3
with create_temp_wmg_snapshot(
dim_size=dim_size,
expression_summary_vals_fn=all_ones_expression_summary_values,
cell_counts_generator_fn=all_tens_cell_counts_values,
) as snapshot:
query = WmgQuery(snapshot)
result = build_dot_plot_matrix(query.expression_summary(criteria), query.cell_counts(criteria))
# sanity check the expected value of the stats (n_cells, nnz, sum) for each data viz point; if this fails, the
# cube test fixture may have changed (e.g. TileDB Array schema) or the logic for creating the test cube fixture
# has changed
expected_cell_count_per_cell_type = dim_size ** (len(cube_non_indexed_dims) - 3) * 1 * 2
assert expected_cell_count_per_cell_type == 162
expected_cell_count_per_tissue = 10 * (dim_size ** (len(cube_non_indexed_dims) - 2) * 1 * 2)
assert expected_cell_count_per_tissue == 4860
expected = [
{
"gene_ontology_term_id": "gene_ontology_term_id_0",
"tissue_ontology_term_id": "tissue_ontology_term_id_0",
"cell_type_ontology_term_id": "cell_type_ontology_term_id_0",
"n_cells": 162,
"n_cells_cell_type": 1620,
"n_cells_tissue": 4860,
"nnz": 162,
"sum": 162.0,
},
{
"gene_ontology_term_id": "gene_ontology_term_id_0",
"tissue_ontology_term_id": "tissue_ontology_term_id_0",
"cell_type_ontology_term_id": "cell_type_ontology_term_id_1",
"n_cells": 162,
"n_cells_cell_type": 1620,
"n_cells_tissue": 4860,
"nnz": 162,
"sum": 162.0,
},
{
"gene_ontology_term_id": "gene_ontology_term_id_0",
"tissue_ontology_term_id": "tissue_ontology_term_id_0",
"cell_type_ontology_term_id": "cell_type_ontology_term_id_2",
"n_cells": 162,
"n_cells_cell_type": 1620,
"n_cells_tissue": 4860,
"nnz": 162,
"sum": 162.0,
},
]
self.assertEqual(
expected,
sorted(
result.to_dict("records"),
key=lambda r: (
r["gene_ontology_term_id"],
r["tissue_ontology_term_id"],
r["cell_type_ontology_term_id"],
),
),
)
class QueryPrimaryFilterDimensionsTest(unittest.TestCase):
def test__single_dimension__returns_all_dimension_and_terms(self):
dim_size = 3
with create_temp_wmg_snapshot(dim_size=dim_size) as snapshot:
result = WmgQuery(snapshot).list_primary_filter_dimension_term_ids("gene_ontology_term_id")
self.assertEquals(["gene_ontology_term_id_0", "gene_ontology_term_id_1", "gene_ontology_term_id_2"], result)
def test__multiple_dimensions__returns_all_dimensions_and_terms_as_tuples(self):
dim_size = 3
def exclude_one_gene_per_organism(logical_coord: NamedTuple) -> bool:
# HACK: method called during building of both "expr summary" and "cell count" cubes, but the latter does not
# include gene_ontology_term_id
if "gene_ontology_term_id" not in logical_coord._fields:
return False
return logical_coord.gene_ontology_term_id == logical_coord.organism_ontology_term_id.replace(
"organism", "gene"
)
with create_temp_wmg_snapshot(
dim_size=dim_size, exclude_logical_coord_fn=exclude_one_gene_per_organism
) as snapshot:
result = WmgQuery(snapshot).list_grouped_primary_filter_dimensions_term_ids(
"gene_ontology_term_id", "organism_ontology_term_id"
)
self.assertEquals(
{
"organism_ontology_term_id_0": ["gene_ontology_term_id_1", "gene_ontology_term_id_2"],
"organism_ontology_term_id_1": ["gene_ontology_term_id_0", "gene_ontology_term_id_2"],
"organism_ontology_term_id_2": ["gene_ontology_term_id_0", "gene_ontology_term_id_1"],
},
result,
)
| 42.221374
| 120
| 0.578286
| 2,582
| 22,124
| 4.414407
| 0.068939
| 0.230567
| 0.254255
| 0.116863
| 0.888752
| 0.875504
| 0.859186
| 0.845587
| 0.828566
| 0.823215
| 0
| 0.042435
| 0.335337
| 22,124
| 523
| 121
| 42.302103
| 0.732676
| 0.070557
| 0
| 0.692641
| 0
| 0
| 0.292614
| 0.233848
| 0
| 0
| 0
| 0.001912
| 0.036797
| 1
| 0.021645
| false
| 0
| 0.012987
| 0
| 0.04329
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
64c004be25d006ba23e8ee9aec2d2c34aaa9c362
| 214
|
py
|
Python
|
src/yafowil/plone/widgets/__init__.py
|
gogobd/yafowil.plone
|
633a28dc3bbf018d84c6e399c31e26b8125399a5
|
[
"BSD-3-Clause"
] | 1
|
2019-07-09T12:46:54.000Z
|
2019-07-09T12:46:54.000Z
|
src/yafowil/plone/widgets/__init__.py
|
gogobd/yafowil.plone
|
633a28dc3bbf018d84c6e399c31e26b8125399a5
|
[
"BSD-3-Clause"
] | 18
|
2015-10-09T22:39:00.000Z
|
2021-09-06T07:01:42.000Z
|
src/yafowil/plone/widgets/__init__.py
|
gogobd/yafowil.plone
|
633a28dc3bbf018d84c6e399c31e26b8125399a5
|
[
"BSD-3-Clause"
] | 3
|
2018-01-20T18:31:55.000Z
|
2021-06-10T14:00:01.000Z
|
from yafowil.plone.widgets import datetime
from yafowil.plone.widgets import label
from yafowil.plone.widgets import recurrence
from yafowil.plone.widgets import relation
from yafowil.plone.widgets import richtext
| 35.666667
| 44
| 0.859813
| 30
| 214
| 6.133333
| 0.333333
| 0.298913
| 0.434783
| 0.625
| 0.788043
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.093458
| 214
| 5
| 45
| 42.8
| 0.948454
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
64c7fc1c2168843059470f058cbf54aeadf0842e
| 11,697
|
py
|
Python
|
bert_experimental/finetuning/bert_layer.py
|
AhmedYounes94/bert_experimental
|
c65015c560893e5a9265e295b90400382f02b229
|
[
"MIT"
] | 75
|
2019-06-29T02:09:22.000Z
|
2022-03-23T14:06:44.000Z
|
bert_experimental/finetuning/bert_layer.py
|
AhmedYounes94/bert_experimental
|
c65015c560893e5a9265e295b90400382f02b229
|
[
"MIT"
] | 10
|
2020-01-02T17:12:27.000Z
|
2022-03-07T17:42:02.000Z
|
bert_experimental/finetuning/bert_layer.py
|
AhmedYounes94/bert_experimental
|
c65015c560893e5a9265e295b90400382f02b229
|
[
"MIT"
] | 29
|
2019-06-30T13:58:07.000Z
|
2021-06-16T07:38:49.000Z
|
import os
import tensorflow as tf
import tensorflow_hub as hub
from .text_preprocessing import build_preprocessor
class BertLayer(tf.keras.layers.Layer):
def __init__(self, bert_path, seq_len=64, n_tune_layers=3,
pooling="cls", do_preprocessing=True, verbose=False,
tune_embeddings=False, trainable=True, use_layers=None,
as_dict=False, **kwargs):
self.trainable = trainable
self.n_tune_layers = n_tune_layers
self.tune_embeddings = tune_embeddings
self.do_preprocessing = do_preprocessing
self.as_dict = as_dict
self.verbose = verbose
self.seq_len = seq_len
self.pooling = pooling
self.bert_path = bert_path
self.use_layers = use_layers
self.var_per_encoder = 16
if self.pooling not in ["cls", "mean", "sqrt_mean", None]:
raise NameError(
f"Undefined pooling type (must be either 'cls', 'mean', 'sqrt_mean' or None, but is {self.pooling}"
)
super(BertLayer, self).__init__(**kwargs)
def build(self, input_shape):
self.bert = hub.Module(self.build_abspath(self.bert_path),
trainable=self.trainable, name=f"{self.name}_module")
trainable_layers = []
if self.tune_embeddings:
trainable_layers.append("embeddings")
if self.pooling == "cls":
trainable_layers.append("pooler")
if self.n_tune_layers > 0:
encoder_var_names = [var.name for var in self.bert.variables if 'encoder' in var.name]
n_encoder_layers = int(len(encoder_var_names) / self.var_per_encoder)
if self.use_layers:
n_encoder_layers = min(self.use_layers, n_encoder_layers)
for i in range(self.n_tune_layers):
trainable_layers.append(f"encoder/layer_{str(n_encoder_layers - 1 - i)}/")
# Add module variables to layer's trainable weights
for var in self.bert.variables:
if any([l in var.name for l in trainable_layers]):
self._trainable_weights.append(var)
else:
self._non_trainable_weights.append(var)
if self.verbose:
print("*** TRAINABLE VARS *** ")
for var in self._trainable_weights:
print(var)
self.build_preprocessor()
self.initialize_module()
super(BertLayer, self).build(input_shape)
def build_abspath(self, path):
if path.startswith("https://") or path.startswith("gs://"):
return path
else:
return os.path.abspath(path)
def build_preprocessor(self):
sess = tf.compat.v1.keras.backend.get_session()
tokenization_info = self.bert(signature="tokenization_info", as_dict=True)
vocab_file, do_lower_case = sess.run([tokenization_info["vocab_file"],
tokenization_info["do_lower_case"]])
self.preprocessor = build_preprocessor(vocab_file, self.seq_len, do_lower_case)
def initialize_module(self):
sess = tf.compat.v1.keras.backend.get_session()
vars_initialized = sess.run([tf.compat.v1.is_variable_initialized(var)
for var in self.bert.variables])
uninitialized = []
for var, is_initialized in zip(self.bert.variables, vars_initialized):
if not is_initialized:
uninitialized.append(var)
if len(uninitialized):
sess.run(tf.compat.v1.variables_initializer(uninitialized))
def call(self, input):
if self.do_preprocessing:
input = tf.numpy_function(self.preprocessor,
[input], [tf.int32, tf.int32, tf.int32],
name='preprocessor')
for feature in input:
feature.set_shape((None, self.seq_len))
input_ids, input_mask, segment_ids = input
bert_inputs = dict(
input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids
)
output = self.bert(inputs=bert_inputs, signature="tokens", as_dict=True)
input_mask = tf.cast(input_mask, tf.float32)
seq_output = output["sequence_output"]
tok_output = mul_mask(output.get("token_output", seq_output), input_mask)
if self.pooling == "cls":
pooled = output["pooled_output"]
else:
if self.pooling == "mean":
pooled = masked_reduce_mean(seq_output, input_mask)
elif self.pooling == "sqrt_mean":
pooled = masked_reduce_sqrt_mean(seq_output, input_mask)
else:
pooled = mul_mask(seq_output, input_mask)
if self.as_dict:
output = {
"sequence_output": seq_output,
"pooled_output": pooled,
"token_output": tok_output
}
else:
output = pooled
return output
def get_config(self):
config_dict = {
"bert_path": self.bert_path,
"seq_len": self.seq_len,
"pooling": self.pooling,
"n_tune_layers": self.n_tune_layers,
"tune_embeddings": self.tune_embeddings,
"do_preprocessing": self.do_preprocessing,
"use_layers": self.use_layers,
"trainable": self.trainable,
"as_dict": self.as_dict,
"verbose": self.verbose
}
super(BertLayer, self).get_config()
return config_dict
class StatefulBertLayer(tf.keras.layers.Layer):
def __init__(self, bert_path, seq_len=64, n_tune_layers=3,
pooling="cls", do_preprocessing=True, verbose=False,
tune_embeddings=False, trainable=True, use_layers=None,
as_dict=False, **kwargs):
self.trainable = trainable
self.n_tune_layers = n_tune_layers
self.tune_embeddings = tune_embeddings
self.do_preprocessing = do_preprocessing
self.as_dict = as_dict
self.verbose = verbose
self.seq_len = seq_len
self.pooling = pooling
self.bert_path = bert_path
self.use_layers = use_layers
self.var_per_encoder = 16
if self.pooling not in ["cls", "mean", "sqrt_mean", None]:
raise NameError(
f"Undefined pooling type (must be either 'cls', 'mean', 'sqrt_mean' or None, but is {self.pooling}"
)
super(StatefulBertLayer, self).__init__(**kwargs)
def build(self, input_shape):
self.bert = hub.Module(self.build_abspath(self.bert_path),
trainable=self.trainable, name=f"{self.name}_module")
trainable_layers = []
if self.tune_embeddings:
trainable_layers.append("embeddings")
if self.pooling == "cls":
trainable_layers.append("pooler")
if self.n_tune_layers > 0:
encoder_var_names = [var.name for var in self.bert.variables if 'encoder' in var.name]
n_encoder_layers = int(len(encoder_var_names) / self.var_per_encoder)
if self.use_layers:
n_encoder_layers = min(self.use_layers, n_encoder_layers)
for i in range(self.n_tune_layers):
trainable_layers.append(f"encoder/layer_{str(n_encoder_layers - 1 - i)}/")
# Add module variables to layer's trainable weights
for var in self.bert.variables:
if any([l in var.name for l in trainable_layers]):
self._trainable_weights.append(var)
else:
self._non_trainable_weights.append(var)
if self.verbose:
print("*** TRAINABLE VARS *** ")
for var in self._trainable_weights:
print(var)
self.build_preprocessor()
self.initialize_module()
super(StatefulBertLayer, self).build(input_shape)
def build_abspath(self, path):
if path.startswith("https://") or path.startswith("gs://"):
return path
else:
return os.path.abspath(path)
def build_preprocessor(self):
sess = tf.compat.v1.keras.backend.get_session()
tokenization_info = self.bert(signature="tokenization_info", as_dict=True)
vocab_file, do_lower_case = sess.run([tokenization_info["vocab_file"],
tokenization_info["do_lower_case"]])
self.preprocessor = build_preprocessor(vocab_file, self.seq_len, do_lower_case)
def initialize_module(self):
sess = tf.compat.v1.keras.backend.get_session()
vars_initialized = sess.run([tf.compat.v1.is_variable_initialized(var)
for var in self.bert.variables])
uninitialized = []
for var, is_initialized in zip(self.bert.variables, vars_initialized):
if not is_initialized:
uninitialized.append(var)
if len(uninitialized):
sess.run(tf.compat.v1.variables_initializer(uninitialized))
def call(self, input):
if self.do_preprocessing:
input_text, input_state = input
preprocessed_text = tf.numpy_function(
self.preprocessor, [input_text],
[tf.int32, tf.int32, tf.int32],
name='preprocessor')
for feature in preprocessed_text:
feature.set_shape((None, self.seq_len))
input_ids, input_mask, segment_ids = preprocessed_text
else:
input_ids, input_mask, segment_ids, input_state = input
bert_inputs = dict(
input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, input_state=input_state
)
output = self.bert(inputs=bert_inputs, signature="tokens", as_dict=True)
input_mask = tf.cast(input_mask, tf.float32)
seq_output = output["sequence_output"]
tok_output = mul_mask(output.get("token_output", seq_output), input_mask)
if self.pooling == "cls":
pooled = output["pooled_output"]
else:
if self.pooling == "mean":
pooled = masked_reduce_mean(seq_output, input_mask)
elif self.pooling == "sqrt_mean":
pooled = masked_reduce_sqrt_mean(seq_output, input_mask)
else:
pooled = mul_mask(seq_output, input_mask)
if self.as_dict:
output["pooled_output"] = pooled
else:
output = pooled
return output
def get_config(self):
config_dict = {
"bert_path": self.bert_path,
"seq_len": self.seq_len,
"pooling": self.pooling,
"n_tune_layers": self.n_tune_layers,
"tune_embeddings": self.tune_embeddings,
"do_preprocessing": self.do_preprocessing,
"use_layers": self.use_layers,
"trainable": self.trainable,
"as_dict": self.as_dict,
"verbose": self.verbose
}
super(StatefulBertLayer, self).get_config()
return config_dict
def mul_mask(x, m):
return x * tf.expand_dims(m, axis=-1)
def masked_reduce_mean(x, m):
return tf.reduce_sum(mul_mask(x, m), axis=1) / (
tf.reduce_sum(m, axis=1, keepdims=True) + 1e-10)
def masked_reduce_sqrt_mean(x, m):
return tf.reduce_sum(mul_mask(x, m), axis=1) / (
tf.sqrt(tf.reduce_sum(m, axis=1, keepdims=True)) + 1e-10)
| 36.326087
| 115
| 0.593913
| 1,380
| 11,697
| 4.77029
| 0.105797
| 0.026736
| 0.023394
| 0.018229
| 0.931794
| 0.928756
| 0.909008
| 0.904147
| 0.904147
| 0.904147
| 0
| 0.006058
| 0.308541
| 11,697
| 321
| 116
| 36.439252
| 0.807864
| 0.008464
| 0
| 0.825203
| 0
| 0.00813
| 0.083067
| 0.006038
| 0
| 0
| 0
| 0
| 0
| 1
| 0.069106
| false
| 0
| 0.01626
| 0.012195
| 0.138211
| 0.01626
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b3794a8d7ba30b2583285efbb415841485347f93
| 999
|
py
|
Python
|
template_user_script.py
|
dbaldwin/tello-adv-setup
|
05a0d248ecf2ef26dbc6a071ba6c85863499665b
|
[
"CNRI-Python"
] | 4
|
2020-12-17T11:02:27.000Z
|
2022-02-01T21:04:13.000Z
|
template_user_script.py
|
dbaldwin/tello-adv-setup
|
05a0d248ecf2ef26dbc6a071ba6c85863499665b
|
[
"CNRI-Python"
] | null | null | null |
template_user_script.py
|
dbaldwin/tello-adv-setup
|
05a0d248ecf2ef26dbc6a071ba6c85863499665b
|
[
"CNRI-Python"
] | 1
|
2021-07-15T21:16:54.000Z
|
2021-07-15T21:16:54.000Z
|
# User Configuration
SAMPLE_CONFIG_ITEM = 42
def init(tello, fly_flag=False):
"""
:param tello: Reference to the DJITelloPy Tello object.
:type tello: Tello
:param fly_flag: True - the fly flag was specified and the Tello will take off. False - the Tello will NOT
be instructed to take off
:type fly_flag: bool
:return: None
:rtype:
"""
print(f"Inside init method. fly_flag: {fly_flag}, sample config item: {SAMPLE_CONFIG_ITEM}")
def handler(tello, frame, fly_flag=False):
"""
:param tello: Reference to the DJITelloPy Tello object.
:type tello: Tello
:param frame: image
:type frame:
:param fly_flag: True - the fly flag was specified and the Tello will take off. False - the Tello will NOT
be instructed to take off
:type fly_flag: bool
:return: None
:rtype:
"""
print(f"Inside handler method. fly_flag: {fly_flag}, sample config item: {SAMPLE_CONFIG_ITEM}")
| 30.272727
| 110
| 0.650651
| 140
| 999
| 4.528571
| 0.285714
| 0.132492
| 0.126183
| 0.053628
| 0.847003
| 0.847003
| 0.847003
| 0.847003
| 0.847003
| 0.847003
| 0
| 0.002721
| 0.264264
| 999
| 32
| 111
| 31.21875
| 0.859864
| 0.596597
| 0
| 0
| 0
| 0
| 0.538217
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0
| 0
| 0
| 0.4
| 0.4
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b3b03fc1dd97de6287a33c96817cd3862b1382f0
| 13,139
|
py
|
Python
|
applications/Gesture/action_recognition/R3D/dataset/dataset_pkl.py
|
villawang/Continual_Learning_CV
|
6715fa9c741df920e56aede11cbb85a4be41871e
|
[
"BSD-3-Clause"
] | 14
|
2020-05-12T09:44:18.000Z
|
2022-02-14T10:30:28.000Z
|
applications/Gesture/action_recognition/R3D/dataset/dataset_pkl.py
|
villawang/Continual_Learning_CV
|
6715fa9c741df920e56aede11cbb85a4be41871e
|
[
"BSD-3-Clause"
] | 17
|
2020-05-18T06:12:28.000Z
|
2022-01-13T02:42:13.000Z
|
applications/Gesture/action_recognition/R3D/dataset/dataset_pkl.py
|
villawang/Continual_Learning_CV
|
6715fa9c741df920e56aede11cbb85a4be41871e
|
[
"BSD-3-Clause"
] | 8
|
2020-05-18T11:15:51.000Z
|
2021-11-11T10:55:11.000Z
|
<<<<<<< HEAD
import os
import sys
# sys.path.append(os.getcwd()[0:-7])
# sys.path.append(os.path.join(os.getcwd()[0:-7], 'utils'))
import json
import pickle
import numpy as np
import pandas as pd
import random
import torch
import pdb
from torch.utils.data import Dataset, DataLoader,RandomSampler
import torchvision.transforms as transforms
from torchvision.utils import save_image
from PIL import Image
import matplotlib.pyplot as plt
from tqdm import tqdm, trange
import random
import skimage.util as ski_util
from sklearn.utils import shuffle
# pdb.set_trace()
# self-defined modules
def load_video(annot_path, mode):
# mode: train, val, test
csv_file = os.path.join(annot_path, '{}.pkl'.format(mode))
annot_df = pd.read_pickle(csv_file)
# annot_df = DownSample(raw_annot_df, 40)
rgb_samples = []
depth_samples = []
labels = []
# get task index in dataframe
task_ind = []
for frame_i in range(annot_df.shape[0]):
rgb_list = annot_df['frame'].iloc[frame_i] # convert string in dataframe to list
rgb_samples.append(rgb_list)
labels.append(annot_df['label'].iloc[frame_i])
print('{}: {} videos have been loaded'.format(mode, len(rgb_samples)))
return rgb_samples, labels
class dataset_video(Dataset):
def __init__(self, root_path, mode, spatial_transform=None, temporal_transform=None):
self.root_path = root_path
self.rgb_samples, self.labels = load_video(root_path, mode)
rgb_test = Image.open(self.rgb_samples[0][0]).convert("RGB")
self.sample_num = len(self.rgb_samples)
self.spatial_transform = spatial_transform
self.temporal_transform = temporal_transform
# print('{} {} samples have been loaded'.format(class_id, self.sample_num))
def __getitem__(self, idx):
rgb_name = self.rgb_samples[idx]
label = self.labels[idx]
indices = [i for i in range(len(rgb_name))]
selected_indice = self.temporal_transform(indices)
clip_frames = []
for i, frame_name_i in enumerate(selected_indice):
rgb_cache = Image.open(rgb_name[frame_name_i]).convert("RGB")
clip_frames.append(rgb_cache)
clip_frames = self.spatial_transform(clip_frames)
return clip_frames.transpose(1,0), int(label)
def __len__(self):
return int(self.sample_num)
def load_video_test(annot_path, mode):
# mode: train, val, test
csv_file = os.path.join(annot_path, 'jester_{}.pkl'.format(mode))
annot_df = pd.read_pickle(csv_file)
# annot_df = DownSample(raw_annot_df, 40)
rgb_samples = []
# get task index in dataframe
task_ind = []
for frame_i in range(annot_df.shape[0]):
rgb_list = annot_df['frame'].iloc[frame_i] # convert string in dataframe to list
rgb_samples.append(rgb_list)
print('{}: {} videos have been loaded'.format(mode, len(rgb_samples)))
return rgb_samples
class dataset_video_test(Dataset):
def __init__(self, root_path, mode, spatial_transform=None, temporal_transform=None):
self.root_path = root_path
self.rgb_samples = load_video_test(root_path, mode)
self.sample_num = len(self.rgb_samples)
self.spatial_transform = spatial_transform
self.temporal_transform = temporal_transform
# print('{} {} samples have been loaded'.format(class_id, self.sample_num))
def __getitem__(self, idx):
clip_name = self.rgb_samples[idx]
indices = [i for i in range(len(clip_name))]
selected_indice = self.temporal_transform(indices)
clip_frames = []
for i, frame_name_i in enumerate(selected_indice):
rgb_cache = Image.open(clip_name[frame_name_i]).convert("RGB")
clip_frames.append(rgb_cache)
clip_frames = self.spatial_transform(clip_frames)
return clip_name, clip_frames.transpose(1,0)
# return rgb, mask, (torch.tensor(label)-1).long()
def __len__(self):
return int(self.sample_num)
def get_label_dict_jester(label_path, filename):
label = pd.read_csv(os.path.join(label_path, filename), header=None)
label_list = []
for i in range(len(label)):
label_list.append(label.iloc[i].item())
label_dict = {k: v for v,k in enumerate(label_list)}
return label_dict
def get_label_dict_sthv2(label_path, filename):
label_dict = dict()
with open(os.path.join(label_path, 'something-something-v2-labels.json'), encoding='utf-8') as f:
data = json.load(f)
categories = []
for i, (cat, idx) in enumerate(data.items()):
assert i == int(idx) # make sure the rank is right
categories.append(cat)
label_dict[cat] = i
return label_dict
# from temporal_transforms import *
# from spatial_transforms import *
# from transforms import *
# from opts import parse_opts
# args = parse_opts()
# annot_path = '/home/zhengwei/workspace/something-try/SlowFastNetworks/annotations'
# args = parse_opts()
# scales = [args.initial_scale]
# for i in range(1, args.n_scales):
# scales.append(scales[-1] * args.scale_step)
# # trans_train = Compose([
# # # Scale([100,100]), # w * h: 176 * 100
# # # SpatialElasticDisplacement(),
# # MultiScaleRandomCrop(scales, [112, 112]),
# # ToTensor(1)
# # ])
# trans_train = Compose([GroupMultiScaleCrop(112, [1, .875, .75, .66]),
# ToTorchFormatTensor(),
# Stack_3D(),
# GroupNormalize(mean=[.485, .456, .406], std=[.229, .224, .225])])
# temporal_transform_ = Compose([
# TemporalRandomCrop(100)
# # TemporalBeginCrop(100)
# ])
# dataset_train = dataset_video(annot_path, 'train',
# n_frames_per_clip=100, img_size=(112, 112),
# reverse=False, transform=trans_train,
# temporal_transform = temporal_transform_)
# rgb_name, rgbs = dataset_train.__getitem__(0)
# # dataloader_train = DataLoader(dataset_train, batch_size=32,
# # shuffle=True,
# # num_workers=args.num_workers, pin_memory=True)
# # trainiter = iter(dataloader_train)
# # rgbs, masks, labels = trainiter.next()
# label_dict = get_label_dict_sthv2('/home/data2/zhengwei/sth-sth-v2', 'jester-v1-labels.csv')
=======
import os
import sys
# sys.path.append(os.getcwd()[0:-7])
# sys.path.append(os.path.join(os.getcwd()[0:-7], 'utils'))
import json
import pickle
import numpy as np
import pandas as pd
import random
import torch
import pdb
from torch.utils.data import Dataset, DataLoader,RandomSampler
import torchvision.transforms as transforms
from torchvision.utils import save_image
from PIL import Image
import matplotlib.pyplot as plt
from tqdm import tqdm, trange
import random
import skimage.util as ski_util
from sklearn.utils import shuffle
# pdb.set_trace()
# self-defined modules
def load_video(annot_path, mode):
# mode: train, val, test
csv_file = os.path.join(annot_path, '{}.pkl'.format(mode))
annot_df = pd.read_pickle(csv_file)
# annot_df = DownSample(raw_annot_df, 40)
rgb_samples = []
depth_samples = []
labels = []
# get task index in dataframe
task_ind = []
for frame_i in range(annot_df.shape[0]):
rgb_list = annot_df['frame'].iloc[frame_i] # convert string in dataframe to list
rgb_samples.append(rgb_list)
labels.append(annot_df['label'].iloc[frame_i])
print('{}: {} videos have been loaded'.format(mode, len(rgb_samples)))
return rgb_samples, labels
class dataset_video(Dataset):
def __init__(self, root_path, mode, spatial_transform=None, temporal_transform=None):
self.root_path = root_path
self.rgb_samples, self.labels = load_video(root_path, mode)
rgb_test = Image.open(self.rgb_samples[0][0]).convert("RGB")
self.sample_num = len(self.rgb_samples)
self.spatial_transform = spatial_transform
self.temporal_transform = temporal_transform
# print('{} {} samples have been loaded'.format(class_id, self.sample_num))
def __getitem__(self, idx):
rgb_name = self.rgb_samples[idx]
label = self.labels[idx]
indices = [i for i in range(len(rgb_name))]
selected_indice = self.temporal_transform(indices)
clip_frames = []
for i, frame_name_i in enumerate(selected_indice):
rgb_cache = Image.open(rgb_name[frame_name_i]).convert("RGB")
clip_frames.append(rgb_cache)
clip_frames = self.spatial_transform(clip_frames)
return clip_frames.transpose(1,0), int(label)
def __len__(self):
return int(self.sample_num)
def load_video_test(annot_path, mode):
# mode: train, val, test
csv_file = os.path.join(annot_path, 'jester_{}.pkl'.format(mode))
annot_df = pd.read_pickle(csv_file)
# annot_df = DownSample(raw_annot_df, 40)
rgb_samples = []
# get task index in dataframe
task_ind = []
for frame_i in range(annot_df.shape[0]):
rgb_list = annot_df['frame'].iloc[frame_i] # convert string in dataframe to list
rgb_samples.append(rgb_list)
print('{}: {} videos have been loaded'.format(mode, len(rgb_samples)))
return rgb_samples
class dataset_video_test(Dataset):
def __init__(self, root_path, mode, spatial_transform=None, temporal_transform=None):
self.root_path = root_path
self.rgb_samples = load_video_test(root_path, mode)
self.sample_num = len(self.rgb_samples)
self.spatial_transform = spatial_transform
self.temporal_transform = temporal_transform
# print('{} {} samples have been loaded'.format(class_id, self.sample_num))
def __getitem__(self, idx):
clip_name = self.rgb_samples[idx]
indices = [i for i in range(len(clip_name))]
selected_indice = self.temporal_transform(indices)
clip_frames = []
for i, frame_name_i in enumerate(selected_indice):
rgb_cache = Image.open(clip_name[frame_name_i]).convert("RGB")
clip_frames.append(rgb_cache)
clip_frames = self.spatial_transform(clip_frames)
return clip_name, clip_frames.transpose(1,0)
# return rgb, mask, (torch.tensor(label)-1).long()
def __len__(self):
return int(self.sample_num)
def get_label_dict_jester(label_path, filename):
label = pd.read_csv(os.path.join(label_path, filename), header=None)
label_list = []
for i in range(len(label)):
label_list.append(label.iloc[i].item())
label_dict = {k: v for v,k in enumerate(label_list)}
return label_dict
def get_label_dict_sthv2(label_path, filename):
label_dict = dict()
with open(os.path.join(label_path, 'something-something-v2-labels.json'), encoding='utf-8') as f:
data = json.load(f)
categories = []
for i, (cat, idx) in enumerate(data.items()):
assert i == int(idx) # make sure the rank is right
categories.append(cat)
label_dict[cat] = i
return label_dict
# from temporal_transforms import *
# from spatial_transforms import *
# from transforms import *
# from opts import parse_opts
# args = parse_opts()
# annot_path = '/home/zhengwei/workspace/something-try/SlowFastNetworks/annotations'
# args = parse_opts()
# scales = [args.initial_scale]
# for i in range(1, args.n_scales):
# scales.append(scales[-1] * args.scale_step)
# # trans_train = Compose([
# # # Scale([100,100]), # w * h: 176 * 100
# # # SpatialElasticDisplacement(),
# # MultiScaleRandomCrop(scales, [112, 112]),
# # ToTensor(1)
# # ])
# trans_train = Compose([GroupMultiScaleCrop(112, [1, .875, .75, .66]),
# ToTorchFormatTensor(),
# Stack_3D(),
# GroupNormalize(mean=[.485, .456, .406], std=[.229, .224, .225])])
# temporal_transform_ = Compose([
# TemporalRandomCrop(100)
# # TemporalBeginCrop(100)
# ])
# dataset_train = dataset_video(annot_path, 'train',
# n_frames_per_clip=100, img_size=(112, 112),
# reverse=False, transform=trans_train,
# temporal_transform = temporal_transform_)
# rgb_name, rgbs = dataset_train.__getitem__(0)
# # dataloader_train = DataLoader(dataset_train, batch_size=32,
# # shuffle=True,
# # num_workers=args.num_workers, pin_memory=True)
# # trainiter = iter(dataloader_train)
# # rgbs, masks, labels = trainiter.next()
# label_dict = get_label_dict_sthv2('/home/data2/zhengwei/sth-sth-v2', 'jester-v1-labels.csv')
>>>>>>> e47b902b2f7d42c6a48fa803816e2793c5293c64
# pdb.set_trace()
| 34.944149
| 102
| 0.645255
| 1,695
| 13,139
| 4.748673
| 0.120944
| 0.037272
| 0.024351
| 0.019878
| 0.993167
| 0.993167
| 0.993167
| 0.993167
| 0.993167
| 0.993167
| 0
| 0.021385
| 0.238374
| 13,139
| 376
| 103
| 34.944149
| 0.782952
| 0.33861
| 0
| 0.984925
| 0
| 0
| 0.033927
| 0.008123
| 0
| 0
| 0
| 0
| 0.01005
| 0
| null | null | 0
| 0.180905
| null | null | 0.020101
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
b3cd86fe083b41622862ca218797535c510997f4
| 6,131
|
py
|
Python
|
loldib/getratings/models/NA/na_lux/na_lux_bot.py
|
koliupy/loldib
|
c9ab94deb07213cdc42b5a7c26467cdafaf81b7f
|
[
"Apache-2.0"
] | null | null | null |
loldib/getratings/models/NA/na_lux/na_lux_bot.py
|
koliupy/loldib
|
c9ab94deb07213cdc42b5a7c26467cdafaf81b7f
|
[
"Apache-2.0"
] | null | null | null |
loldib/getratings/models/NA/na_lux/na_lux_bot.py
|
koliupy/loldib
|
c9ab94deb07213cdc42b5a7c26467cdafaf81b7f
|
[
"Apache-2.0"
] | null | null | null |
from getratings.models.ratings import Ratings
class NA_Lux_Bot_Aatrox(Ratings):
pass
class NA_Lux_Bot_Ahri(Ratings):
pass
class NA_Lux_Bot_Akali(Ratings):
pass
class NA_Lux_Bot_Alistar(Ratings):
pass
class NA_Lux_Bot_Amumu(Ratings):
pass
class NA_Lux_Bot_Anivia(Ratings):
pass
class NA_Lux_Bot_Annie(Ratings):
pass
class NA_Lux_Bot_Ashe(Ratings):
pass
class NA_Lux_Bot_AurelionSol(Ratings):
pass
class NA_Lux_Bot_Azir(Ratings):
pass
class NA_Lux_Bot_Bard(Ratings):
pass
class NA_Lux_Bot_Blitzcrank(Ratings):
pass
class NA_Lux_Bot_Brand(Ratings):
pass
class NA_Lux_Bot_Braum(Ratings):
pass
class NA_Lux_Bot_Caitlyn(Ratings):
pass
class NA_Lux_Bot_Camille(Ratings):
pass
class NA_Lux_Bot_Cassiopeia(Ratings):
pass
class NA_Lux_Bot_Chogath(Ratings):
pass
class NA_Lux_Bot_Corki(Ratings):
pass
class NA_Lux_Bot_Darius(Ratings):
pass
class NA_Lux_Bot_Diana(Ratings):
pass
class NA_Lux_Bot_Draven(Ratings):
pass
class NA_Lux_Bot_DrMundo(Ratings):
pass
class NA_Lux_Bot_Ekko(Ratings):
pass
class NA_Lux_Bot_Elise(Ratings):
pass
class NA_Lux_Bot_Evelynn(Ratings):
pass
class NA_Lux_Bot_Ezreal(Ratings):
pass
class NA_Lux_Bot_Fiddlesticks(Ratings):
pass
class NA_Lux_Bot_Fiora(Ratings):
pass
class NA_Lux_Bot_Fizz(Ratings):
pass
class NA_Lux_Bot_Galio(Ratings):
pass
class NA_Lux_Bot_Gangplank(Ratings):
pass
class NA_Lux_Bot_Garen(Ratings):
pass
class NA_Lux_Bot_Gnar(Ratings):
pass
class NA_Lux_Bot_Gragas(Ratings):
pass
class NA_Lux_Bot_Graves(Ratings):
pass
class NA_Lux_Bot_Hecarim(Ratings):
pass
class NA_Lux_Bot_Heimerdinger(Ratings):
pass
class NA_Lux_Bot_Illaoi(Ratings):
pass
class NA_Lux_Bot_Irelia(Ratings):
pass
class NA_Lux_Bot_Ivern(Ratings):
pass
class NA_Lux_Bot_Janna(Ratings):
pass
class NA_Lux_Bot_JarvanIV(Ratings):
pass
class NA_Lux_Bot_Jax(Ratings):
pass
class NA_Lux_Bot_Jayce(Ratings):
pass
class NA_Lux_Bot_Jhin(Ratings):
pass
class NA_Lux_Bot_Jinx(Ratings):
pass
class NA_Lux_Bot_Kalista(Ratings):
pass
class NA_Lux_Bot_Karma(Ratings):
pass
class NA_Lux_Bot_Karthus(Ratings):
pass
class NA_Lux_Bot_Kassadin(Ratings):
pass
class NA_Lux_Bot_Katarina(Ratings):
pass
class NA_Lux_Bot_Kayle(Ratings):
pass
class NA_Lux_Bot_Kayn(Ratings):
pass
class NA_Lux_Bot_Kennen(Ratings):
pass
class NA_Lux_Bot_Khazix(Ratings):
pass
class NA_Lux_Bot_Kindred(Ratings):
pass
class NA_Lux_Bot_Kled(Ratings):
pass
class NA_Lux_Bot_KogMaw(Ratings):
pass
class NA_Lux_Bot_Leblanc(Ratings):
pass
class NA_Lux_Bot_LeeSin(Ratings):
pass
class NA_Lux_Bot_Leona(Ratings):
pass
class NA_Lux_Bot_Lissandra(Ratings):
pass
class NA_Lux_Bot_Lucian(Ratings):
pass
class NA_Lux_Bot_Lulu(Ratings):
pass
class NA_Lux_Bot_Lux(Ratings):
pass
class NA_Lux_Bot_Malphite(Ratings):
pass
class NA_Lux_Bot_Malzahar(Ratings):
pass
class NA_Lux_Bot_Maokai(Ratings):
pass
class NA_Lux_Bot_MasterYi(Ratings):
pass
class NA_Lux_Bot_MissFortune(Ratings):
pass
class NA_Lux_Bot_MonkeyKing(Ratings):
pass
class NA_Lux_Bot_Mordekaiser(Ratings):
pass
class NA_Lux_Bot_Morgana(Ratings):
pass
class NA_Lux_Bot_Nami(Ratings):
pass
class NA_Lux_Bot_Nasus(Ratings):
pass
class NA_Lux_Bot_Nautilus(Ratings):
pass
class NA_Lux_Bot_Nidalee(Ratings):
pass
class NA_Lux_Bot_Nocturne(Ratings):
pass
class NA_Lux_Bot_Nunu(Ratings):
pass
class NA_Lux_Bot_Olaf(Ratings):
pass
class NA_Lux_Bot_Orianna(Ratings):
pass
class NA_Lux_Bot_Ornn(Ratings):
pass
class NA_Lux_Bot_Pantheon(Ratings):
pass
class NA_Lux_Bot_Poppy(Ratings):
pass
class NA_Lux_Bot_Quinn(Ratings):
pass
class NA_Lux_Bot_Rakan(Ratings):
pass
class NA_Lux_Bot_Rammus(Ratings):
pass
class NA_Lux_Bot_RekSai(Ratings):
pass
class NA_Lux_Bot_Renekton(Ratings):
pass
class NA_Lux_Bot_Rengar(Ratings):
pass
class NA_Lux_Bot_Riven(Ratings):
pass
class NA_Lux_Bot_Rumble(Ratings):
pass
class NA_Lux_Bot_Ryze(Ratings):
pass
class NA_Lux_Bot_Sejuani(Ratings):
pass
class NA_Lux_Bot_Shaco(Ratings):
pass
class NA_Lux_Bot_Shen(Ratings):
pass
class NA_Lux_Bot_Shyvana(Ratings):
pass
class NA_Lux_Bot_Singed(Ratings):
pass
class NA_Lux_Bot_Sion(Ratings):
pass
class NA_Lux_Bot_Sivir(Ratings):
pass
class NA_Lux_Bot_Skarner(Ratings):
pass
class NA_Lux_Bot_Sona(Ratings):
pass
class NA_Lux_Bot_Soraka(Ratings):
pass
class NA_Lux_Bot_Swain(Ratings):
pass
class NA_Lux_Bot_Syndra(Ratings):
pass
class NA_Lux_Bot_TahmKench(Ratings):
pass
class NA_Lux_Bot_Taliyah(Ratings):
pass
class NA_Lux_Bot_Talon(Ratings):
pass
class NA_Lux_Bot_Taric(Ratings):
pass
class NA_Lux_Bot_Teemo(Ratings):
pass
class NA_Lux_Bot_Thresh(Ratings):
pass
class NA_Lux_Bot_Tristana(Ratings):
pass
class NA_Lux_Bot_Trundle(Ratings):
pass
class NA_Lux_Bot_Tryndamere(Ratings):
pass
class NA_Lux_Bot_TwistedFate(Ratings):
pass
class NA_Lux_Bot_Twitch(Ratings):
pass
class NA_Lux_Bot_Udyr(Ratings):
pass
class NA_Lux_Bot_Urgot(Ratings):
pass
class NA_Lux_Bot_Varus(Ratings):
pass
class NA_Lux_Bot_Vayne(Ratings):
pass
class NA_Lux_Bot_Veigar(Ratings):
pass
class NA_Lux_Bot_Velkoz(Ratings):
pass
class NA_Lux_Bot_Vi(Ratings):
pass
class NA_Lux_Bot_Viktor(Ratings):
pass
class NA_Lux_Bot_Vladimir(Ratings):
pass
class NA_Lux_Bot_Volibear(Ratings):
pass
class NA_Lux_Bot_Warwick(Ratings):
pass
class NA_Lux_Bot_Xayah(Ratings):
pass
class NA_Lux_Bot_Xerath(Ratings):
pass
class NA_Lux_Bot_XinZhao(Ratings):
pass
class NA_Lux_Bot_Yasuo(Ratings):
pass
class NA_Lux_Bot_Yorick(Ratings):
pass
class NA_Lux_Bot_Zac(Ratings):
pass
class NA_Lux_Bot_Zed(Ratings):
pass
class NA_Lux_Bot_Ziggs(Ratings):
pass
class NA_Lux_Bot_Zilean(Ratings):
pass
class NA_Lux_Bot_Zyra(Ratings):
pass
| 14.702638
| 46
| 0.750938
| 972
| 6,131
| 4.3107
| 0.151235
| 0.230549
| 0.329356
| 0.428162
| 0.784726
| 0.784726
| 0
| 0
| 0
| 0
| 0
| 0
| 0.18121
| 6,131
| 416
| 47
| 14.737981
| 0.834661
| 0
| 0
| 0.498195
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.498195
| 0.00361
| 0
| 0.501805
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
|
0
| 7
|
b3f9c7358828c817440ebfd329b97c43063c0727
| 95
|
py
|
Python
|
tests/test_all.py
|
vidartf/jupyterlab_celltests
|
071ecb8d1ce1052736b4dd34af832a8e652ec9b5
|
[
"Apache-2.0"
] | null | null | null |
tests/test_all.py
|
vidartf/jupyterlab_celltests
|
071ecb8d1ce1052736b4dd34af832a8e652ec9b5
|
[
"Apache-2.0"
] | null | null | null |
tests/test_all.py
|
vidartf/jupyterlab_celltests
|
071ecb8d1ce1052736b4dd34af832a8e652ec9b5
|
[
"Apache-2.0"
] | null | null | null |
# for Coverage
from jupyterlab_celltests import *
from jupyterlab_celltests.extension import *
| 23.75
| 44
| 0.842105
| 11
| 95
| 7.090909
| 0.636364
| 0.358974
| 0.589744
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.115789
| 95
| 3
| 45
| 31.666667
| 0.928571
| 0.126316
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
b613c80159a6337b24056abd3a25d2a49c7d5707
| 4,661
|
py
|
Python
|
tests/unit/aws/test_chain.py
|
TheRakeshPurohit/localstack
|
063e07827934a7c7ff00e6d7cf6e243bcce0eb99
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/aws/test_chain.py
|
TheRakeshPurohit/localstack
|
063e07827934a7c7ff00e6d7cf6e243bcce0eb99
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/aws/test_chain.py
|
TheRakeshPurohit/localstack
|
063e07827934a7c7ff00e6d7cf6e243bcce0eb99
|
[
"Apache-2.0"
] | null | null | null |
from unittest import mock
from localstack.aws.api import RequestContext
from localstack.aws.chain import CompositeHandler, HandlerChain
from localstack.http import Response
class TestCompositeHandler:
def test_composite_handler_stops_handler_chain(self):
def inner1(_chain: HandlerChain, request: RequestContext, response: Response):
_chain.stop()
inner2 = mock.MagicMock()
outer1 = mock.MagicMock()
outer2 = mock.MagicMock()
response1 = mock.MagicMock()
chain = HandlerChain()
composite = CompositeHandler()
composite.handlers.append(inner1)
composite.handlers.append(inner2)
chain.request_handlers.append(outer1)
chain.request_handlers.append(composite)
chain.request_handlers.append(outer2)
chain.response_handlers.append(response1)
chain.handle(RequestContext(), Response())
outer1.assert_called_once()
outer2.assert_not_called()
inner2.assert_not_called()
response1.assert_called_once()
def test_composite_handler_terminates_handler_chain(self):
def inner1(_chain: HandlerChain, request: RequestContext, response: Response):
_chain.terminate()
inner2 = mock.MagicMock()
outer1 = mock.MagicMock()
outer2 = mock.MagicMock()
response1 = mock.MagicMock()
chain = HandlerChain()
composite = CompositeHandler()
composite.handlers.append(inner1)
composite.handlers.append(inner2)
chain.request_handlers.append(outer1)
chain.request_handlers.append(composite)
chain.request_handlers.append(outer2)
chain.response_handlers.append(response1)
chain.handle(RequestContext(), Response())
outer1.assert_called_once()
outer2.assert_not_called()
inner2.assert_not_called()
response1.assert_not_called()
def test_composite_handler_with_not_return_on_stop(self):
def inner1(_chain: HandlerChain, request: RequestContext, response: Response):
_chain.stop()
inner2 = mock.MagicMock()
outer1 = mock.MagicMock()
outer2 = mock.MagicMock()
response1 = mock.MagicMock()
chain = HandlerChain()
composite = CompositeHandler(return_on_stop=False)
composite.handlers.append(inner1)
composite.handlers.append(inner2)
chain.request_handlers.append(outer1)
chain.request_handlers.append(composite)
chain.request_handlers.append(outer2)
chain.response_handlers.append(response1)
chain.handle(RequestContext(), Response())
outer1.assert_called_once()
outer2.assert_not_called()
inner2.assert_called_once()
response1.assert_called_once()
def test_composite_handler_continues_handler_chain(self):
inner1 = mock.MagicMock()
inner2 = mock.MagicMock()
outer1 = mock.MagicMock()
outer2 = mock.MagicMock()
response1 = mock.MagicMock()
chain = HandlerChain()
composite = CompositeHandler()
composite.handlers.append(inner1)
composite.handlers.append(inner2)
chain.request_handlers.append(outer1)
chain.request_handlers.append(composite)
chain.request_handlers.append(outer2)
chain.response_handlers.append(response1)
chain.handle(RequestContext(), Response())
outer1.assert_called_once()
outer2.assert_called_once()
inner1.assert_called_once()
inner2.assert_called_once()
response1.assert_called_once()
def test_composite_handler_exception_calls_outer_exception_handlers(self):
def inner1(_chain: HandlerChain, request: RequestContext, response: Response):
raise ValueError()
inner2 = mock.MagicMock()
outer1 = mock.MagicMock()
outer2 = mock.MagicMock()
exception_handler = mock.MagicMock()
response1 = mock.MagicMock()
chain = HandlerChain()
composite = CompositeHandler()
composite.handlers.append(inner1)
composite.handlers.append(inner2)
chain.request_handlers.append(outer1)
chain.request_handlers.append(composite)
chain.request_handlers.append(outer2)
chain.exception_handlers.append(exception_handler)
chain.response_handlers.append(response1)
chain.handle(RequestContext(), Response())
outer1.assert_called_once()
outer2.assert_not_called()
inner2.assert_not_called()
exception_handler.assert_called_once()
response1.assert_called_once()
| 33.292857
| 86
| 0.681184
| 454
| 4,661
| 6.759912
| 0.105727
| 0.141414
| 0.097752
| 0.127077
| 0.840991
| 0.840991
| 0.840991
| 0.827631
| 0.816878
| 0.776474
| 0
| 0.019783
| 0.229994
| 4,661
| 139
| 87
| 33.532374
| 0.83533
| 0
| 0
| 0.814815
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.203704
| 1
| 0.083333
| false
| 0
| 0.037037
| 0
| 0.12963
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b61478fb35e3ca6b046c833fddc4fe857563a2d6
| 9,398
|
py
|
Python
|
tests/test_projects.py
|
dvd7587/listthedocs
|
b4734be11977ea971e0ad5fa2e9920cc63e54ec0
|
[
"MIT"
] | 3
|
2019-08-12T13:46:13.000Z
|
2020-03-20T08:09:16.000Z
|
tests/test_projects.py
|
dvd7587/listthedocs
|
b4734be11977ea971e0ad5fa2e9920cc63e54ec0
|
[
"MIT"
] | 7
|
2019-08-12T13:06:32.000Z
|
2020-03-28T14:33:16.000Z
|
tests/test_projects.py
|
dvd7587/listthedocs
|
b4734be11977ea971e0ad5fa2e9920cc63e54ec0
|
[
"MIT"
] | 2
|
2019-09-26T14:31:09.000Z
|
2019-10-01T08:49:47.000Z
|
import pytest
def test_get_missing_project(client):
response = client.get('/api/v2/projects/test_project')
assert response.status_code == 404
def test_get_project_when_none_exists(client):
response = client.get('/api/v2/projects')
assert response.status_code == 200
projects = response.get_json()
assert len(projects) == 0
def test_add_project_creates_and_returns_the_project(client):
response = client.post('/api/v2/projects', json={'title': 'Test Project', 'description': 'A very long string'})
assert response.status_code == 201
project = response.get_json()
assert project['code'] == 'test-project'
assert project['title'] == 'Test Project'
assert project['description'] == 'A very long string'
assert 'logo' in project
def test_get_project_returns_the_project(client):
response = client.post('/api/v2/projects', json={'title': 'Test Project', 'description': 'A very long string'})
assert response.status_code == 201
response = client.get('/api/v2/projects/test-project')
assert response.status_code == 200
project = response.get_json()
assert project['code'] == 'test-project'
assert project['title'] == 'Test Project'
assert project['description'] == 'A very long string'
assert 'logo' in project
def test_get_projects_returns_all_the_projects(client):
response = client.post('/api/v2/projects', json={'title': 'Test Project 1', 'description': 'Tests description 1'})
assert response.status_code == 201
response = client.post('/api/v2/projects', json={'title': 'Test Project 2', 'description': 'Tests description 2'})
assert response.status_code == 201
response = client.get('/api/v2/projects')
assert response.status_code == 200
projects = response.get_json()
assert isinstance(projects, list)
assert projects[0]['code'] == 'test-project-1'
assert projects[0]['title'] == 'Test Project 1'
assert projects[0]['description'] == 'Tests description 1'
assert projects[1]['code'] == 'test-project-2'
assert projects[1]['title'] == 'Test Project 2'
assert projects[1]['description'] == 'Tests description 2'
def test_update_project_description(client):
response = client.post('/api/v2/projects', json={'title': 'test_project', 'description': 'A very long string'})
assert response.status_code == 201
response = client.patch('/api/v2/projects/test_project', json={'description': 'Short string'})
assert response.status_code == 200
project = response.get_json()
assert project['code'] == 'test_project'
assert project['title'] == 'test_project'
assert project['description'] == 'Short string'
assert 'logo' in project
def test_update_project_logo(client):
response = client.post('/api/v2/projects', json={'title': 'test_project', 'description': 'A very long string'})
assert response.status_code == 201
response = client.patch('/api/v2/projects/test_project', json={'logo': 'image.jpg'})
assert response.status_code == 200
project = response.get_json()
assert project['code'] == 'test_project'
assert project['title'] == 'test_project'
assert project['description'] == 'A very long string'
assert project['logo'] == 'image.jpg'
def test_update_project_title(client):
response = client.post('/api/v2/projects', json={'title': 'test_project', 'description': 'A very long string'})
assert response.status_code == 201
response = client.patch('/api/v2/projects/test_project', json={'title': 'Test Project'})
assert response.status_code == 200
project = response.get_json()
assert project['code'] == 'test_project'
assert project['title'] == 'Test Project'
assert project['description'] == 'A very long string'
assert 'logo' in project
def test_delete_project(client):
# Add a project
response = client.post('/api/v2/projects', json={'title': 'test_project', 'description': 'A very long string'})
assert response.status_code == 201
# Add a version
response = client.post(
'/api/v2/projects/test_project/versions',
json={'name': '1.0.0', 'url': 'www.example.com/index.html'}
)
assert response.status_code == 201
response = client.delete('/api/v2/projects/test_project')
assert response.status_code == 200
response = client.get('/api/v2/projects/test_project')
assert response.status_code == 404
def test_add_version(client):
response = client.post('/api/v2/projects', json={'title': 'test_project', 'description': 'A very long string'})
assert response.status_code == 201
response = client.post(
'/api/v2/projects/test_project/versions',
json={'name': '1.0.0', 'url': 'www.example.com/index.html'}
)
assert response.status_code == 201
project = response.get_json()
assert project['code'] == 'test_project'
assert project['title'] == 'test_project'
assert project['description'] == 'A very long string'
assert 'logo' in project
assert project['versions'][0]['name'] == '1.0.0'
assert project['versions'][0]['url'] == 'www.example.com/index.html'
def test_remove_version(client):
response = client.post('/api/v2/projects', json={'title': 'test_project', 'description': 'A very long string'})
assert response.status_code == 201
# Add multiple versions
response = client.post(
'/api/v2/projects/test_project/versions',
json={'name': '1.0.0', 'url': 'www.example.com/1.0.0/index.html'}
)
assert response.status_code == 201
response = client.post(
'/api/v2/projects/test_project/versions',
json={'name': '2.0.0', 'url': 'www.example.com/2.0.0/index.html'}
)
assert response.status_code == 201
# Remove a version
response = client.delete('/api/v2/projects/test_project/versions/2.0.0')
assert response.status_code == 200
response = client.get('/api/v2/projects/test_project')
assert response.status_code == 200
project = response.get_json()
assert project['code'] == 'test_project'
assert project['title'] == 'test_project'
assert project['description'] == 'A very long string'
assert 'logo' in project
assert isinstance(project['versions'], list)
assert len(project['versions']) == 1
assert project['versions'][0]['name'] == '1.0.0'
assert project['versions'][0]['url'] == 'www.example.com/1.0.0/index.html'
def test_update_version_link(client):
response = client.post('/api/v2/projects', json={'title': 'test_project', 'description': 'A very long string'})
assert response.status_code == 201
# Add multiple versions
response = client.post(
'/api/v2/projects/test_project/versions',
json={'name': '1.0.0', 'url': 'www.example.com/1.0.0/index.html'}
)
assert response.status_code == 201
response = client.post(
'/api/v2/projects/test_project/versions',
json={'name': '2.0.0', 'url': 'www.example.com/2.0.0/index.html'}
)
assert response.status_code == 201
# Patch a version link
response = client.patch(
'/api/v2/projects/test_project/versions/2.0.0',
json={'url': 'www.newexample.com/2.0.0/index.html'}
)
assert response.status_code == 200
project = response.get_json()
assert project['code'] == 'test_project'
assert project['title'] == 'test_project'
assert project['description'] == 'A very long string'
assert 'logo' in project
assert isinstance(project['versions'], list)
assert project['versions'][0]['name'] == '1.0.0'
assert project['versions'][0]['url'] == 'www.example.com/1.0.0/index.html'
assert project['versions'][1]['name'] == '2.0.0'
assert project['versions'][1]['url'] == 'www.newexample.com/2.0.0/index.html'
def test_add_same_version_name_to_different_projects(client):
response = client.post('/api/v2/projects', json={'title': 'test_project1', 'description': 'A very long string'})
assert response.status_code == 201
response = client.post('/api/v2/projects', json={'title': 'test_project2', 'description': 'A very long string'})
assert response.status_code == 201
# Add version 1.0.0 to test_project1
response = client.post(
'/api/v2/projects/test_project1/versions',
json={'name': '1.0.0', 'url': 'www.example.com/1.0.0/index.html'}
)
assert response.status_code == 201
# Add version 1.0.0 to test_project2
response = client.post(
'/api/v2/projects/test_project2/versions',
json={'name': '1.0.0', 'url': 'www.example.com/1.0.0/index.html'}
)
assert response.status_code == 201
def test_add_same_version_name_multiple_time_to_project_fails(client):
response = client.post('/api/v2/projects', json={'title': 'test_project1', 'description': 'A very long string'})
assert response.status_code == 201
# Add version 1.0.0 to test_project1
response = client.post(
'/api/v2/projects/test_project1/versions',
json={'name': '1.0.0', 'url': 'www.example.com/1.0.0/index.html'}
)
assert response.status_code == 201
# Add version 1.0.0 to test_project1 twice, fail
response = client.post(
'/api/v2/projects/test_project1/versions',
json={'name': '1.0.0', 'url': 'www.example.com/1.0.0/index.html'}
)
assert response.status_code == 409
| 35.198502
| 118
| 0.664503
| 1,246
| 9,398
| 4.891653
| 0.058587
| 0.086628
| 0.076784
| 0.141756
| 0.901723
| 0.886792
| 0.859721
| 0.845283
| 0.8379
| 0.816407
| 0
| 0.035696
| 0.174292
| 9,398
| 266
| 119
| 35.330827
| 0.749742
| 0.027772
| 0
| 0.701657
| 0
| 0
| 0.335087
| 0.128424
| 0
| 0
| 0
| 0
| 0.480663
| 1
| 0.077348
| false
| 0
| 0.005525
| 0
| 0.082873
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
37765c4463f3862961c124cff2d04a4ccdf52b2e
| 6,657
|
py
|
Python
|
test/azure/low-level/Expected/AcceptanceTests/LroLowLevel/lrolowlevel/rest/lrosads/__init__.py
|
cfculhane/autorest.python
|
8cbca95faee88d933a58bbbd17b76834faa8d387
|
[
"MIT"
] | 35
|
2018-04-03T12:15:53.000Z
|
2022-03-11T14:03:34.000Z
|
test/azure/low-level/Expected/AcceptanceTests/LroLowLevel/lrolowlevel/rest/lrosads/__init__.py
|
cfculhane/autorest.python
|
8cbca95faee88d933a58bbbd17b76834faa8d387
|
[
"MIT"
] | 652
|
2017-08-28T22:44:41.000Z
|
2022-03-31T21:20:31.000Z
|
test/azure/low-level/Expected/AcceptanceTests/LroLowLevel/lrolowlevel/rest/lrosads/__init__.py
|
cfculhane/autorest.python
|
8cbca95faee88d933a58bbbd17b76834faa8d387
|
[
"MIT"
] | 29
|
2017-08-28T20:57:01.000Z
|
2022-03-11T14:03:38.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
try:
from ._request_builders_py3 import build_put_non_retry400_request
from ._request_builders_py3 import build_put_non_retry201_creating400_request
from ._request_builders_py3 import build_put_non_retry201_creating400_invalid_json_request
from ._request_builders_py3 import build_put_async_relative_retry400_request
from ._request_builders_py3 import build_delete_non_retry400_request
from ._request_builders_py3 import build_delete202_non_retry400_request
from ._request_builders_py3 import build_delete_async_relative_retry400_request
from ._request_builders_py3 import build_post_non_retry400_request
from ._request_builders_py3 import build_post202_non_retry400_request
from ._request_builders_py3 import build_post_async_relative_retry400_request
from ._request_builders_py3 import build_put_error201_no_provisioning_state_payload_request
from ._request_builders_py3 import build_put_async_relative_retry_no_status_request
from ._request_builders_py3 import build_put_async_relative_retry_no_status_payload_request
from ._request_builders_py3 import build_delete204_succeeded_request
from ._request_builders_py3 import build_delete_async_relative_retry_no_status_request
from ._request_builders_py3 import build_post202_no_location_request
from ._request_builders_py3 import build_post_async_relative_retry_no_payload_request
from ._request_builders_py3 import build_put200_invalid_json_request
from ._request_builders_py3 import build_put_async_relative_retry_invalid_header_request
from ._request_builders_py3 import build_put_async_relative_retry_invalid_json_polling_request
from ._request_builders_py3 import build_delete202_retry_invalid_header_request
from ._request_builders_py3 import build_delete_async_relative_retry_invalid_header_request
from ._request_builders_py3 import build_delete_async_relative_retry_invalid_json_polling_request
from ._request_builders_py3 import build_post202_retry_invalid_header_request
from ._request_builders_py3 import build_post_async_relative_retry_invalid_header_request
from ._request_builders_py3 import build_post_async_relative_retry_invalid_json_polling_request
except (SyntaxError, ImportError):
from ._request_builders import build_put_non_retry400_request # type: ignore
from ._request_builders import build_put_non_retry201_creating400_request # type: ignore
from ._request_builders import build_put_non_retry201_creating400_invalid_json_request # type: ignore
from ._request_builders import build_put_async_relative_retry400_request # type: ignore
from ._request_builders import build_delete_non_retry400_request # type: ignore
from ._request_builders import build_delete202_non_retry400_request # type: ignore
from ._request_builders import build_delete_async_relative_retry400_request # type: ignore
from ._request_builders import build_post_non_retry400_request # type: ignore
from ._request_builders import build_post202_non_retry400_request # type: ignore
from ._request_builders import build_post_async_relative_retry400_request # type: ignore
from ._request_builders import build_put_error201_no_provisioning_state_payload_request # type: ignore
from ._request_builders import build_put_async_relative_retry_no_status_request # type: ignore
from ._request_builders import build_put_async_relative_retry_no_status_payload_request # type: ignore
from ._request_builders import build_delete204_succeeded_request # type: ignore
from ._request_builders import build_delete_async_relative_retry_no_status_request # type: ignore
from ._request_builders import build_post202_no_location_request # type: ignore
from ._request_builders import build_post_async_relative_retry_no_payload_request # type: ignore
from ._request_builders import build_put200_invalid_json_request # type: ignore
from ._request_builders import build_put_async_relative_retry_invalid_header_request # type: ignore
from ._request_builders import build_put_async_relative_retry_invalid_json_polling_request # type: ignore
from ._request_builders import build_delete202_retry_invalid_header_request # type: ignore
from ._request_builders import build_delete_async_relative_retry_invalid_header_request # type: ignore
from ._request_builders import build_delete_async_relative_retry_invalid_json_polling_request # type: ignore
from ._request_builders import build_post202_retry_invalid_header_request # type: ignore
from ._request_builders import build_post_async_relative_retry_invalid_header_request # type: ignore
from ._request_builders import build_post_async_relative_retry_invalid_json_polling_request # type: ignore
__all__ = [
"build_put_non_retry400_request",
"build_put_non_retry201_creating400_request",
"build_put_non_retry201_creating400_invalid_json_request",
"build_put_async_relative_retry400_request",
"build_delete_non_retry400_request",
"build_delete202_non_retry400_request",
"build_delete_async_relative_retry400_request",
"build_post_non_retry400_request",
"build_post202_non_retry400_request",
"build_post_async_relative_retry400_request",
"build_put_error201_no_provisioning_state_payload_request",
"build_put_async_relative_retry_no_status_request",
"build_put_async_relative_retry_no_status_payload_request",
"build_delete204_succeeded_request",
"build_delete_async_relative_retry_no_status_request",
"build_post202_no_location_request",
"build_post_async_relative_retry_no_payload_request",
"build_put200_invalid_json_request",
"build_put_async_relative_retry_invalid_header_request",
"build_put_async_relative_retry_invalid_json_polling_request",
"build_delete202_retry_invalid_header_request",
"build_delete_async_relative_retry_invalid_header_request",
"build_delete_async_relative_retry_invalid_json_polling_request",
"build_post202_retry_invalid_header_request",
"build_post_async_relative_retry_invalid_header_request",
"build_post_async_relative_retry_invalid_json_polling_request",
]
| 72.358696
| 113
| 0.839117
| 863
| 6,657
| 5.799537
| 0.090382
| 0.114286
| 0.197403
| 0.114286
| 0.939461
| 0.899301
| 0.85994
| 0.844555
| 0.76024
| 0.657942
| 0
| 0.034825
| 0.107105
| 6,657
| 91
| 114
| 73.153846
| 0.807201
| 0.118672
| 0
| 0
| 0
| 0
| 0.201954
| 0.201954
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.646341
| 0
| 0.646341
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 8
|
3786381edee6cb1b9357c1b304dc60bb41433171
| 6,501
|
py
|
Python
|
01-Single-Layer/letters.py
|
maikelSoFly/PSI_GCP04_zima_2017-2018_Mikolaj_Stepniewski
|
012a97e54c186b1fb25e8300dd77e16759632c6e
|
[
"Apache-2.0"
] | null | null | null |
01-Single-Layer/letters.py
|
maikelSoFly/PSI_GCP04_zima_2017-2018_Mikolaj_Stepniewski
|
012a97e54c186b1fb25e8300dd77e16759632c6e
|
[
"Apache-2.0"
] | null | null | null |
01-Single-Layer/letters.py
|
maikelSoFly/PSI_GCP04_zima_2017-2018_Mikolaj_Stepniewski
|
012a97e54c186b1fb25e8300dd77e16759632c6e
|
[
"Apache-2.0"
] | null | null | null |
class LetterInput():
def __init__(self, letter):
self.__dict__['_x'] = []
# expected output for whole task (lowercase/uppercase)
self.__dict__['_d'] = None
self.__dict__['_interD'] = None # expected outputs for 3 subtasks
self.__dict__['_letter'] = letter
self.getLetter()
def getLetter(self):
if self._letter == 'a':
self._x = [
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 1, 1, 0,
0, 1, 0, 1, 0,
0, 1, 0, 1, 0,
0, 1, 1, 1, 1
]
self._interD = [0, 0, 0]
self._d = 0
elif self._letter == 'b':
self._x = [
1, 0, 0, 0, 0,
1, 0, 0, 0, 0,
1, 0, 0, 0, 0,
1, 1, 1, 1, 0,
1, 0, 0, 1, 0,
1, 0, 0, 1, 0,
1, 1, 1, 1, 0
]
self._interD = [1, 0, 0]
self._d = 0
elif self._letter == 't':
self._x = [
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 1, 0, 0,
0, 1, 1, 1, 0,
0, 0, 1, 0, 0,
0, 0, 1, 1, 0
]
self._interD = [0, 0, 0]
self._d = 0
elif self._letter == 'p':
self._x = [
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 1, 1, 1, 0,
0, 1, 0, 1, 0,
0, 1, 1, 1, 0,
0, 1, 0, 1, 0
]
self._interD = [0, 0, 0]
self._d = 0
elif self._letter == 'c':
self._x = [
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 1, 1, 1, 0,
0, 1, 0, 0, 0,
0, 1, 0, 0, 0,
0, 1, 1, 1, 0
]
self._interD = [0, 0, 0]
self._d = 0
elif self._letter == 'w':
self._x = [
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
1, 0, 0, 0, 1,
1, 0, 0, 0, 1,
1, 0, 1, 0, 1,
0, 1, 0, 1, 0
]
self._interD = [1, 0, 1]
self._d = 0
elif self._letter == 'd':
self._x = [
0, 0, 0, 0, 1,
0, 0, 0, 0, 1,
0, 0, 0, 0, 1,
0, 0, 1, 1, 1,
0, 1, 0, 0, 1,
0, 1, 0, 0, 1,
0, 1, 1, 1, 1
]
self._interD = [0, 0, 0]
self._d = 0
elif self._letter == 'o':
self._x = [
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 1, 1, 1, 0,
0, 1, 0, 1, 0,
0, 1, 0, 1, 0,
0, 1, 1, 1, 0
]
self._interD = [0, 0, 0]
self._d = 0
elif self._letter == 'A':
self._x = [
0, 1, 1, 1, 0,
1, 0, 0, 0, 1,
1, 0, 0, 0, 1,
1, 1, 1, 1, 1,
1, 0, 0, 0, 1,
1, 0, 0, 0, 1,
1, 0, 0, 0, 1
]
self._interD = [1, 1, 1]
self._d = 1
elif self._letter == 'B':
self._x = [
1, 1, 1, 1, 0,
1, 0, 0, 0, 1,
1, 0, 0, 0, 1,
1, 1, 1, 1, 0,
1, 0, 0, 0, 1,
1, 0, 0, 0, 1,
1, 1, 1, 1, 0
]
self._interD = [1, 1, 1]
self._d = 1
elif self._letter == 'I':
self._x = [
0, 0, 1, 0, 0,
0, 0, 1, 0, 0,
0, 0, 1, 0, 0,
0, 0, 1, 0, 0,
0, 0, 1, 0, 0,
0, 0, 1, 0, 0,
0, 0, 1, 0, 0
]
self._interD = [0, 1, 0]
self._d = 1
elif self._letter == 'C':
self._x = [
0, 1, 1, 1, 0,
1, 0, 0, 0, 1,
1, 0, 0, 0, 0,
1, 0, 0, 0, 0,
1, 0, 0, 0, 0,
1, 0, 0, 0, 1,
0, 1, 1, 1, 0,
]
self._interD = [1, 1, 0]
self._d = 1
elif self._letter == 'D':
self._x = [
1, 1, 1, 1, 0,
1, 0, 0, 0, 1,
1, 0, 0, 0, 1,
1, 0, 0, 0, 1,
1, 0, 0, 0, 1,
1, 0, 0, 0, 1,
1, 1, 1, 1, 0,
]
self._interD = [1, 1, 1]
self._d = 1
elif self._letter == 'F':
self._x = [
1, 1, 1, 1, 1,
1, 0, 0, 0, 0,
1, 0, 0, 0, 0,
1, 1, 1, 1, 0,
1, 0, 0, 0, 0,
1, 0, 0, 0, 0,
1, 0, 0, 0, 0,
]
self._interD = [1, 1, 0]
self._d = 1
elif self._letter == 'K':
self._x = [
1, 0, 0, 0, 1,
1, 0, 0, 1, 0,
1, 0, 1, 0, 0,
1, 1, 0, 0, 0,
1, 0, 1, 0, 0,
1, 0, 0, 1, 0,
1, 0, 0, 0, 1,
]
self._interD = [1, 1, 1]
self._d = 1
elif self._letter == 'H':
self._x = [
1, 0, 0, 0, 1,
1, 0, 0, 0, 1,
1, 0, 0, 0, 1,
1, 1, 1, 1, 1,
1, 0, 0, 0, 1,
1, 0, 0, 0, 1,
1, 0, 0, 0, 1,
]
self._interD = [1, 1, 1]
self._d = 1
def __getitem__(self, index):
if index == 'x':
return self._x
elif index == 'd':
return self._d
elif index == 'interD':
return self._interD
| 28.765487
| 74
| 0.239963
| 844
| 6,501
| 1.735782
| 0.047393
| 0.344027
| 0.339932
| 0.278498
| 0.788396
| 0.788396
| 0.756997
| 0.743345
| 0.692833
| 0.681229
| 0
| 0.253756
| 0.621135
| 6,501
| 225
| 75
| 28.893333
| 0.341048
| 0.012921
| 0
| 0.724638
| 0
| 0
| 0.006548
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.014493
| false
| 0
| 0
| 0
| 0.033816
| 0
| 0
| 0
| 1
| null | 1
| 1
| 1
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 12
|
80d9d89a31bf6210a4296666f39422be9a34d120
| 1,121
|
py
|
Python
|
src/tests/test_utils_evform.py
|
reddcoin-project/ReddConnect
|
5c212683de6b80b81fd15ed05239c3a1b46c3afd
|
[
"BSD-3-Clause"
] | 5
|
2015-01-30T08:47:59.000Z
|
2022-01-22T19:27:03.000Z
|
src/tests/test_utils_evform.py
|
reddcoin-project/ReddConnect
|
5c212683de6b80b81fd15ed05239c3a1b46c3afd
|
[
"BSD-3-Clause"
] | 2
|
2017-12-28T21:36:48.000Z
|
2017-12-28T21:36:57.000Z
|
src/tests/test_utils_evform.py
|
reddcoin-project/ReddConnect
|
5c212683de6b80b81fd15ed05239c3a1b46c3afd
|
[
"BSD-3-Clause"
] | 1
|
2019-01-05T15:51:37.000Z
|
2019-01-05T15:51:37.000Z
|
import unittest
class TestEvForm(unittest.TestCase):
def test___init__(self):
# ev_form = EvForm(filename, cells, tables, form, **kwargs)
assert True # TODO: implement your test here
def test___str__(self):
# ev_form = EvForm(filename, cells, tables, form, **kwargs)
# self.assertEqual(expected, ev_form.__str__())
assert True # TODO: implement your test here
def test___unicode__(self):
# ev_form = EvForm(filename, cells, tables, form, **kwargs)
# self.assertEqual(expected, ev_form.__unicode__())
assert True # TODO: implement your test here
def test_map(self):
# ev_form = EvForm(filename, cells, tables, form, **kwargs)
# self.assertEqual(expected, ev_form.map(cells, tables, **kwargs))
assert True # TODO: implement your test here
def test_reload(self):
# ev_form = EvForm(filename, cells, tables, form, **kwargs)
# self.assertEqual(expected, ev_form.reload(filename, form, **kwargs))
assert True # TODO: implement your test here
if __name__ == '__main__':
unittest.main()
| 37.366667
| 78
| 0.657449
| 136
| 1,121
| 5.110294
| 0.227941
| 0.077698
| 0.071942
| 0.115108
| 0.805755
| 0.805755
| 0.805755
| 0.805755
| 0.805755
| 0.564029
| 0
| 0
| 0.22926
| 1,121
| 29
| 79
| 38.655172
| 0.804398
| 0.601249
| 0
| 0.357143
| 0
| 0
| 0.018519
| 0
| 0
| 0
| 0
| 0.034483
| 0.357143
| 1
| 0.357143
| false
| 0
| 0.071429
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
037fdc5dd491679d9f26eabbb2a0e0416e133cbd
| 63
|
py
|
Python
|
src/Set/removeSet.py
|
mikeludemann/helperFunctions_Python
|
62b1e8279eee216f3603f55cf2d010d611e3be0e
|
[
"MIT"
] | null | null | null |
src/Set/removeSet.py
|
mikeludemann/helperFunctions_Python
|
62b1e8279eee216f3603f55cf2d010d611e3be0e
|
[
"MIT"
] | null | null | null |
src/Set/removeSet.py
|
mikeludemann/helperFunctions_Python
|
62b1e8279eee216f3603f55cf2d010d611e3be0e
|
[
"MIT"
] | null | null | null |
x = set([0, 1, 2, 3, 4, 5])
x.pop()
print(x)
x.pop()
print(x)
| 9
| 27
| 0.47619
| 16
| 63
| 1.875
| 0.625
| 0.266667
| 0.6
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.12
| 0.206349
| 63
| 7
| 28
| 9
| 0.48
| 0
| 0
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.4
| 1
| 0
| 1
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
039bbf9c9cef10baaf89f0817f07f579fa3f7d9d
| 85
|
py
|
Python
|
radiomicsfeatureextractionpipeline/backend/test/component_tests/test_component_configuration_service.py
|
Maastro-CDS-Imaging-Group/SQLite4Radiomics
|
e3a7afc181eec0fe04c18da00edc3772064e6758
|
[
"Apache-2.0"
] | null | null | null |
radiomicsfeatureextractionpipeline/backend/test/component_tests/test_component_configuration_service.py
|
Maastro-CDS-Imaging-Group/SQLite4Radiomics
|
e3a7afc181eec0fe04c18da00edc3772064e6758
|
[
"Apache-2.0"
] | 6
|
2021-06-09T19:39:27.000Z
|
2021-09-30T16:41:40.000Z
|
radiomicsfeatureextractionpipeline/backend/test/component_tests/test_component_configuration_service.py
|
Maastro-CDS-Imaging-Group/SQLite4Radiomics
|
e3a7afc181eec0fe04c18da00edc3772064e6758
|
[
"Apache-2.0"
] | null | null | null |
import unittest
class ConfigurationServiceComponentTest(unittest.TestCase):
pass
| 21.25
| 59
| 0.847059
| 7
| 85
| 10.285714
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.105882
| 85
| 4
| 60
| 21.25
| 0.947368
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 7
|
ff08270a8a407317e5dc818c839a44622de49cac
| 177
|
py
|
Python
|
Machine Learning and Data Science/Basic/Medical Data Visualizer/main.py
|
ayushi1210/Project-Guidance
|
31d227ab70756a9afabce41ce577bd8cec1d264b
|
[
"MIT"
] | 1
|
2022-03-03T09:30:07.000Z
|
2022-03-03T09:30:07.000Z
|
Machine Learning and Data Science/Basic/Medical Data Visualizer/main.py
|
srinjoy-26/Project-Guidance
|
504f37e8f7566db2d1bca873df7dd1fff5368497
|
[
"MIT"
] | null | null | null |
Machine Learning and Data Science/Basic/Medical Data Visualizer/main.py
|
srinjoy-26/Project-Guidance
|
504f37e8f7566db2d1bca873df7dd1fff5368497
|
[
"MIT"
] | null | null | null |
import medical_data_visualizer
from unittest import main
medical_data_visualizer.draw_cat_plot()
medical_data_visualizer.draw_heat_map()
main(module='test_module', exit=False)
| 25.285714
| 39
| 0.864407
| 26
| 177
| 5.461538
| 0.615385
| 0.232394
| 0.443662
| 0.352113
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.062147
| 177
| 7
| 40
| 25.285714
| 0.855422
| 0
| 0
| 0
| 0
| 0
| 0.061798
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.4
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
454b2545b0229b623b30216a76675c0a6443427a
| 23,392
|
py
|
Python
|
a1/grad_descent.py
|
zhangtravis/IntSys-Education
|
407e0e1f60b57a922b84f6813378a0bddc178c3a
|
[
"MIT"
] | null | null | null |
a1/grad_descent.py
|
zhangtravis/IntSys-Education
|
407e0e1f60b57a922b84f6813378a0bddc178c3a
|
[
"MIT"
] | null | null | null |
a1/grad_descent.py
|
zhangtravis/IntSys-Education
|
407e0e1f60b57a922b84f6813378a0bddc178c3a
|
[
"MIT"
] | null | null | null |
"""Gradient Descent Assignment for CDS Intelligent Systems."""
import typing
import random
import numpy as np
from plotting import plot_grad_descent_1d, plot_linear_1d
# ============================================================================
# Example Hypothesis Functions
# ============================================================================
def linear_h(theta, x):
"""linear_h: The linear hypothesis regressor.
:param theta: parameters for our linear regressor; shape (1, features)
:type theta: np.ndarray
:param x: input that model is predicting; shape (samples, features)
:type x: np.ndarray
:return: The predictions of our model on inputs X; shape (samples, 1)
:rtype: np.ndarray
"""
return (theta @ x.T).T
def linear_grad_h(theta, x):
"""linear_h: The gradient of the linear hypothesis regressor.
:param theta: parameters for our linear regressor; shape (1, features)
:type theta: np.ndarray
:param x: input that model is predicting; shape (samples, features)
:type x: np.ndarray
:return: The gradient of our linear regressor; shape (samples, features)
:rtype: np.ndarray
"""
return x
def parabolic_h(theta, x):
"""parabolic_h: The parabolic hypothesis regressor.
:param theta: parameters for our parabolic regressor; shape (1, features)
:type theta: np.ndarray
:param x: input that model is predicting; shape (samples, features)
:type x: np.ndarray
:return: The predictions of our model on inputs X; shape (samples, 1)
:rtype: np.ndarray
"""
return (theta @ (x ** 2).T).T
def parabolic_grad_h(theta, x):
"""parabolic_grad_h: The gradient of the parabolic hypothesis regressor.
:param theta: parameters for our parabolic regressor; shape (1, features)
:type theta: np.ndarray
:param x: input that model is predicting; shape is (samples, features)
:type x: np.ndarray
:return: The gradient of our parabolic regressor; shape (samples, features)
:rtype: np.ndarray
"""
return x ** 2
# Add your own hypotheses if you want
def loss_f1(h, theta, x, y):
"""loss_f1 returns the loss for special function f1.
This function is for demonstration purposes, since it ignores
data points x and y.
:param h: hypothesis function that is being used
:type h: typing.Callable[[np.ndarray, np.ndarray], np.ndarray]
:param theta: The parameters for our model, must be of shape (2,)
:type theta: np.ndarray of shape (-1, 2)
:param x: A matrix of samples and their respective features.
:type x: np.ndarray of shape (samples, features)
:param y: The expected targets our model is attempting to match
:type y: np.ndarray of shape (samples,)
:return: Return the function evaluation of theta, x, y
:rtype: int or np.ndarray of shape (theta.shape[1],)
"""
theta = np.reshape(theta, (-1, 2))
w1 = theta[:, 0]
w2 = theta[:, 0]
return (
-2 * np.exp(-((w1 - 1) * (w1 - 1) + w2 * w2) / 0.2)
+ -3 * np.exp(-((w1 + 1) * (w1 + 1) + y * y) / 0.2)
+ w1 * w1
+ w2 * w2
)
def grad_loss_f1(h, grad_h, theta, x, y):
"""grad_loss_f1 returns the gradients for the loss of the f1 function.
This function is for demonstration purposes, since it ignores
data points x and y.
:param h: The hypothesis function that predicts our output given weights
:type h: typing.Callable[[np.ndarray, np.ndarray], np.ndarray]
:param grad_h: The gradient function of our hypothesis function
:type grad_h: typing.Callable[[np.ndarray, np.ndarray], np.ndarray]
:param theta: The parameters for our model.
:type theta: np.ndarray of shape (-1, 2)
:param x: A matrix of samples and their respective features.
:type x: np.ndarray of shape (samples, features)
:param y: The expected targets our model is attempting to match
:type y: np.ndarray of shape (samples,)
:return: gradients for the loss function along the two axes
:rtype: np.ndarray
"""
theta = np.reshape(theta, (-1, 2))
w1 = theta[:, 0]
w2 = theta[:, 0]
step = 1e-7
grad_w1 = (loss_f1(w1 + step, w2) - loss_f1(w1, w2)) / step
grad_w2 = (loss_f1(w1, w2 + step) - loss_f1(w1, y)) / step
return np.array((grad_w1, grad_w2))
def l2_loss(
h: typing.Callable[[np.ndarray, np.ndarray], np.ndarray],
grad_h: typing.Callable[[np.ndarray, np.ndarray], np.ndarray],
theta: np.ndarray,
x, y):
"""l2_loss: standard l2 loss.
The l2 loss is defined as (h(x) - y)^2. This is usually used for linear
regression in the sum of squares.
:param h: hypothesis function that models our data (x) using theta
:type h: typing.Callable[[np.ndarray, np.ndarray], np.ndarray]
:param grad_h: function for the gradient of our hypothesis function
:type grad_h: typing.Callable[[np.ndarray, np.ndarray], np.ndarray]
:param theta: The parameters of our hypothesis function of shape (1, features)
:type theta: np.ndarray
:param x: Input matrix of shape (samples, features)
:type x: np.ndarray
:param y: The expected targets our model is attempting to match, of shape (samples, 1)
:type y: np.ndarray
:return: The l2 loss value
:rtype: float
"""
return np.sum(np.square((h(theta, x) - y)))
def grad_l2_loss(h, grad_h, theta, x, y):
"""grad_l2_loss: The gradient of the standard l2 loss.
The gradient of l2 loss is given by d/dx[(h(x) - y)^2] which is
evaluated to 2*(h(x) - y)*h'(x).
:param h: hypothesis function that models our data (x) using theta
:type h: typing.Callable[[np.ndarray, np.ndarray], np.ndarray]
:param grad_h: function for the gradient of our hypothesis function
:type grad_h: typing.Callable[[np.ndarray, np.ndarray], np.ndarray]
:param theta: The parameters of our hypothesis fucntion
:type theta: np.ndarray
:param x: Input matrix of shape (samples, features)
:type x: np.ndarray
:param y: The expected targets our model is attempting to match, of shape (samples, 1)
:type y: np.ndarray
:return: The l2 loss gradient of shape (1, features)
:rtype: np.ndarray
"""
return np.sum(2 * (h(theta, x) - y) * grad_h(theta, x), axis=0).reshape(1, -1)
# ============================================================================
# YOUR CODE GOES HERE:
# ============================================================================
def grad_descent(h, grad_h, loss_f, grad_loss_f, x, y, steps):
"""grad_descent: gradient descent algorithm on a hypothesis class.
This does not use the matrix operations from numpy, this function
uses the brute force calculations
:param h: hypothesis function that models our data (x) using theta
:type h: typing.Callable[[np.ndarray, np.ndarray], np.ndarray]
:param grad_h: function for the gradient of our hypothesis function
:type grad_h: typing.Callable[[np.ndarray, np.ndarray], np.ndarray]
:param loss_f: loss function that we will be optimizing on
:type loss_f: typing.Callable[
[
typing.Callable[[np.ndarray, np.ndarray], np.ndarray],
typing.Callable[[np.ndarray, np.ndarray], np.ndarray],
np.ndarray,
np.ndarray,
np.ndarray
],
np.ndarray]
:param grad_loss_f: the gradient of the loss function we are optimizing
:type grad_loss_f: typing.Callable[
[
typing.Callable[[np.ndarray, np.ndarray], np.ndarray],
typing.Callable[[np.ndarray, np.ndarray], np.ndarray],
np.ndarray,
np.ndarray,
np.ndarray
],
np.ndarray]
:param x: Input matrix of shape (samples, features)
:type x: np.ndarray
:param y: The expected targets our model is attempting to match, of shape (samples, 1)
:param y: np.ndarray
:param steps: number of steps to take in the gradient descent algorithm
:type steps: int
:return: Ideal weights of shape (1, features), and the list of weights through time
:rtype: tuple[np.ndarray, np.ndarray]
"""
# TODO 1: Write the traditional gradient descent algorithm without matrix
# operations or numpy vectorization
# Ideal Parameter
weight = np.random.random((1, x.shape[1]))
# List of ideal parameters through time
weightList = []
# Size of data
dataSize = len(x)
# Learning rate
alpha = 0.01
for _ in range(steps):
weightList.append(weight)
totalGradLoss = 0
# Loop through all the x data
for i in range(dataSize):
# Calculate and add grad of loss with respect to input data x_i
totalGradLoss += grad_loss_f(h, grad_h, weight, x[i], y[i])
# Update weight
weight = weight - alpha * 1 / dataSize * totalGradLoss
return weight, np.array(weightList)
def stochastic_grad_descent(h, grad_h, loss_f, grad_loss_f, x, y, steps):
"""grad_descent: gradient descent algorithm on a hypothesis class.
This does not use the matrix operations from numpy, this function
uses the brute force calculations
:param h: hypothesis function that models our data (x) using theta
:type h: typing.Callable[[np.ndarray, np.ndarray], np.ndarray]
:param grad_h: function for the gradient of our hypothesis function
:type grad_h: typing.Callable[[np.ndarray, np.ndarray], np.ndarray]
:param loss_f: loss function that we will be optimizing on
:type loss_f: typing.Callable[
[
typing.Callable[[np.ndarray, np.ndarray], np.ndarray],
typing.Callable[[np.ndarray, np.ndarray], np.ndarray],
np.ndarray,
np.ndarray,
np.ndarray
],
np.ndarray]
:param grad_loss_f: the gradient of the loss function we are optimizing
:type grad_loss_f: typing.Callable[
[
typing.Callable[[np.ndarray, np.ndarray], np.ndarray],
typing.Callable[[np.ndarray, np.ndarray], np.ndarray],
np.ndarray,
np.ndarray,
np.ndarray
],
np.ndarray]
:param x: Input matrix of shape (samples, features)
:type x: np.ndarray
:param y: The expected targets our model is attempting to match, of shape (samples, 1)
:param y: np.ndarray
:param steps: number of steps to take in the gradient descent algorithm
:type steps: int
:return: Ideal weights of shape (1, features), and the list of weights through time
:rtype: tuple[np.ndarray, np.ndarray]
"""
# TODO 2
#initialize weight of h(x)
weight = np.random.random((1,x.shape[1]))
# List of ideal parameters through time
weightList = []
# Learning rate
alpha = 0.01
for _ in range(steps):
weightList.append(weight)
# find a sample randomly
i = random.randrange(len(x))
# Calculate gradient of loss with respect to the picked sample
gradLoss = grad_loss_f(h, grad_h, weight, x[i], y[i])
# Update weight
weight = weight - alpha * gradLoss
return weight, np.array(weightList)
def minibatch_grad_descent(h, grad_h, loss_f, grad_loss_f, x, y, steps, batch_size=8):
"""grad_descent: gradient descent algorithm on a hypothesis class.
This does not use the matrix operations from numpy, this function
uses the brute force calculations
:param h: hypothesis function that models our data (x) using theta
:type h: typing.Callable[[np.ndarray, np.ndarray], np.ndarray]
:param grad_h: function for the gradient of our hypothesis function
:type grad_h: typing.Callable[[np.ndarray, np.ndarray], np.ndarray]
:param loss_f: loss function that we will be optimizing on
:type loss_f: typing.Callable[
[
typing.Callable[[np.ndarray, np.ndarray], np.ndarray],
typing.Callable[[np.ndarray, np.ndarray], np.ndarray],
np.ndarray,
np.ndarray,
np.ndarray
],
np.ndarray]
:param grad_loss_f: the gradient of the loss function we are optimizing
:type grad_loss_f: typing.Callable[
[
typing.Callable[[np.ndarray, np.ndarray], np.ndarray],
typing.Callable[[np.ndarray, np.ndarray], np.ndarray],
np.ndarray,
np.ndarray,
np.ndarray
],
np.ndarray]
:param x: Input matrix of shape (samples, features)
:type x: np.ndarray
:param y: The expected targets our model is attempting to match, of shape (samples, 1)
:type y: np.ndarray
:param steps: number of steps to take in the gradient descent algorithm
:type steps: int
:param batch_size: Size of each batch
:type batch_size: int
:return: Ideal weights of shape (1, features), and the list of weights through time
:rtype: tuple[np.ndarray, np.ndarray]
"""
# TODO 3: Write the stochastic mini-batch gradient descent algorithm without
# matrix operations or numpy vectorization
# Ideal Parameter
weight = np.random.random_sample((1,x.shape[1]))
# List of ideal parameters through time
weightList = []
# Learning rate
alpha = 0.01
for _ in range(steps):
weightList.append(weight)
# Create minibatches
mini_batches = create_mini_batch(x, y, batch_size)
for mini_batch in mini_batches:
x_mini, y_mini = mini_batch
# Store total grad loss for a mini batch
totalGradMiniLoss = 0
for x_i, y_i in zip(x_mini, y_mini):
# Calculate gradient of loss with respect to x_i and add it to total grad mini loss
totalGradMiniLoss += grad_loss_f(h, grad_h, weight, x_i, y_i)
# Update weight
weight = weight - alpha * 1 / len(mini_batch) * totalGradMiniLoss
return weight, np.array(weightList)
def create_mini_batch(x, y, batch_size):
"""creates mini batches of input x and y. Each batch will have size batch_size
:param x: Input matrix of shape (samples, features)
:type x: np.ndarray
:param y: Groundtruth labels of shape (samples, 1)
:type y: np.ndarray
:param batch_size: Size of each batch
:type batch_size: int
:return: List of mini batches for x and y
:rtype: [np.ndarray]
"""
# Calculate number of batches
no_of_batches = x.shape[0] // batch_size
# stack x and y data together so that during shuffling
# corresponding x stays with corresponding y
data = np.hstack((x, y))
# shuffle data
np.random.shuffle(data)
# List of batches
mini_batches = []
for i in range(no_of_batches + 1):
# Check if there is enough data to create mini_batch of shape (batch_size, 2)
if (i+1) * batch_size > data.shape[0]:
# Append rest of data to end of mini_batches
# mini_batch has shape smaller (bs, 2) where bs < batch_size
mini_batch = data[i * batch_size:]
else:
# Split data into segments where mini_batch has shape (batch_size, 2)
mini_batch = data[i * batch_size:(i+1) * batch_size, :]
# Split each mini batch to x and y components
x_mini_batch = mini_batch[:, :-1]
y_mini_batch = mini_batch[:, -1].reshape((-1, 1))
# Append as a tuple with each element having shape (batch_size, 1)
mini_batches.append((x_mini_batch, y_mini_batch))
return mini_batches
def matrix_gd(h, grad_h, loss_f, grad_loss_f, x, y, steps, batch_size=8):
"""grad_descent: gradient descent algorithm on a hypothesis class.
This does not use the matrix operations from numpy, this function
uses the brute force calculations
:param h: hypothesis function that models our data (x) using theta
:type h: typing.Callable[[np.ndarray, np.ndarray], np.ndarray]
:param grad_h: function for the gradient of our hypothesis function
:type grad_h: typing.Callable[[np.ndarray, np.ndarray], np.ndarray]
:param loss_f: loss function that we will be optimizing on
:type loss_f: typing.Callable[
[
typing.Callable[[np.ndarray, np.ndarray], np.ndarray],
typing.Callable[[np.ndarray, np.ndarray], np.ndarray],
np.ndarray,
np.ndarray,
np.ndarray
],
np.ndarray]
:param grad_loss_f: the gradient of the loss function we are optimizing
:type grad_loss_f: typing.Callable[
[
typing.Callable[[np.ndarray, np.ndarray], np.ndarray],
typing.Callable[[np.ndarray, np.ndarray], np.ndarray],
np.ndarray,
np.ndarray,
np.ndarray
],
np.ndarray]
:param x: Input matrix of shape (samples, features)
:type x: np.ndarray
:param y: The expected targets our model is attempting to match, of shape (samples, 1)
:param y: np.ndarray
:param steps: number of steps to take in the gradient descent algorithm
:type steps: int
:param batch_size: number of elements in each training batch
:type batch_size: int
:return: Ideal weights of shape (1, features), and the list of weights through time
:rtype: tuple[np.ndarray, np.ndarray]
"""
# TODO 4: Write the traditional gradient descent algorithm WITH matrix
# operations or numpy vectorization
# Ideal Parameter
weight = np.random.random_sample((1,x.shape[1]))
# List of ideal parameters through time
weightList = []
# Learning rate
alpha = 0.01
# number of samples
datasize = len(x)
for _ in range(steps):
#update the weight
weight = weight - alpha * grad_loss_f(h, grad_h, weight, x,y)/datasize
weightList.append(weight)
return weight, np.array(weightList)
def matrix_sgd(h, grad_h, loss_f, grad_loss_f, x, y, steps):
"""grad_descent: gradient descent algorithm on a hypothesis class.
This does not use the matrix operations from numpy, this function
uses the brute force calculations
:param h: hypothesis function that models our data (x) using theta
:type h: typing.Callable[[np.ndarray, np.ndarray], np.ndarray]
:param grad_h: function for the gradient of our hypothesis function
:type grad_h: typing.Callable[[np.ndarray, np.ndarray], np.ndarray]
:param loss_f: loss function that we will be optimizing on
:type loss_f: typing.Callable[
[
typing.Callable[[np.ndarray, np.ndarray], np.ndarray],
typing.Callable[[np.ndarray, np.ndarray], np.ndarray],
np.ndarray,
np.ndarray,
np.ndarray
],
np.ndarray]
:param grad_loss_f: the gradient of the loss function we are optimizing
:type grad_loss_f: typing.Callable[
[
typing.Callable[[np.ndarray, np.ndarray], np.ndarray],
typing.Callable[[np.ndarray, np.ndarray], np.ndarray],
np.ndarray,
np.ndarray,
np.ndarray
],
np.ndarray]
:param x: Input matrix of shape (samples, features)
:type x: np.ndarray
:param y: The expected targets our model is attempting to match, of shape (samples, 1)
:param y: np.ndarray
:param steps: number of steps to take in the gradient descent algorithm
:type steps: int
:return: Ideal weights of shape (1, features), and the list of weights through time
:rtype: tuple[np.ndarray, np.ndarray]
"""
# TODO 5: Write the stochastic gradient descent algorithm WITH matrix
# operations or numpy vectorization
# Ideal Parameter
weight = np.random.random((1,x.shape[1]))
# List of ideal parameters through time
weightList = []
# Learning rate
alpha = 0.01
for _ in range(steps):
weightList.append(weight)
# Randomly choose element in x
idx = np.random.randint(0, len(x))
# Calculate gradient of loss with respect to x[idx]
gradLoss = grad_loss_f(h, grad_h, weight, x[idx], y[idx])
# Update weight
weight = weight - alpha * gradLoss
return weight, np.array(weightList)
def matrix_minibatch_gd(h, grad_h, loss_f, grad_loss_f, x, y, steps, batch_size=8):
"""matrix_minibatch_gd: Mini-Batch GD using numpy matrix operations
Stochastic Mini-batch GD with batches of size batch_size using numpy
operations to speed up all of the operations
:param h: hypothesis function that models our data (x) using theta
:type h: typing.Callable[[np.ndarray, np.ndarray], np.ndarray]
:param grad_h: function for the gradient of our hypothesis function
:type grad_h: typing.Callable[[np.ndarray, np.ndarray], np.ndarray]
:param loss_f: loss function that we will be optimizing on
:type loss_f: typing.Callable[
[
typing.Callable[[np.ndarray, np.ndarray], np.ndarray],
typing.Callable[[np.ndarray, np.ndarray], np.ndarray],
np.ndarray,
np.ndarray,
np.ndarray
],
np.ndarray]
:param grad_loss_f: the gradient of the loss function we are optimizing
:type grad_loss_f: typing.Callable[
[
typing.Callable[[np.ndarray, np.ndarray], np.ndarray],
typing.Callable[[np.ndarray, np.ndarray], np.ndarray],
np.ndarray,
np.ndarray,
np.ndarray
],
np.ndarray]
:param x: Input matrix of shape (samples, features)
:type x: np.ndarray
:param y: The expected targets our model is attempting to match, of shape (samples, 1)
:param y: np.ndarray
:param steps: number of steps to take in the gradient descent algorithm
:type steps: int
:param batch_size: number of elements in each training batch
:type batch_size: int
:return: Ideal weights of shape (1, features), and the list of weights through time
:rtype: tuple[np.ndarray, np.ndarray]
"""
# TODO 6: Write the stochastic mini-batch gradient descent algorithm WITH
# matrix operations or numpy vectorization
weight = np.random.random((1, x.shape[1]))
# List of ideal parameters through time
weightList = []
# Learning rate
alpha = 0.01
for _ in range(steps):
weightList.append(weight)
# Create minibatches
mini_batches = create_mini_batch(x, y, batch_size)
for i in range(len(mini_batches)):
x_batch, y_batch = mini_batches[i]
# Calculate gradient of loss with respect to the samples in batch
gradLoss = grad_loss_f(h, grad_h, weight, x_batch, y_batch)
# Update weight
weight = weight - alpha * 1 / batch_size * gradLoss
return weight, np.array(weightList)
# ============================================================================
# Sample tests that you can run to ensure the basics are working
# ============================================================================
def save_linear_gif():
"""simple_linear: description."""
x = np.arange(-3, 4, 0.1).reshape((-1, 1))
y = 2*np.arange(-3, 4, 0.1).reshape((-1, 1))
x_support = np.array((0, 4))
y_support = np.array((-0.1, 200))
plot_linear_1d(
linear_h,
linear_grad_h,
l2_loss,
grad_l2_loss,
x,
y,
matrix_minibatch_gd,
x_support,
y_support
)
plot_grad_descent_1d(
linear_h,
linear_grad_h,
l2_loss,
grad_l2_loss,
x,
y,
matrix_minibatch_gd,
x_support,
y_support
)
def test_gd(grad_des_f):
pass
if __name__ == "__main__":
save_linear_gif()
| 36.779874
| 99
| 0.641031
| 3,315
| 23,392
| 4.438914
| 0.07813
| 0.145566
| 0.107645
| 0.176147
| 0.80897
| 0.79667
| 0.776283
| 0.769215
| 0.758614
| 0.729732
| 0
| 0.01069
| 0.244186
| 23,392
| 635
| 100
| 36.837795
| 0.821606
| 0.685534
| 0
| 0.426667
| 0
| 0
| 0.001346
| 0
| 0
| 0
| 0
| 0.009449
| 0
| 1
| 0.113333
| false
| 0.006667
| 0.026667
| 0
| 0.24
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
2fbfd0b5fe30df4afe3f0e7601079199dcc1e221
| 49
|
py
|
Python
|
VacationPy/api_keys.py
|
scottthomas586/python-api-challenge
|
c45b3dd469ac0a2b541a2127d5e2a5865750197b
|
[
"ADSL"
] | null | null | null |
VacationPy/api_keys.py
|
scottthomas586/python-api-challenge
|
c45b3dd469ac0a2b541a2127d5e2a5865750197b
|
[
"ADSL"
] | null | null | null |
VacationPy/api_keys.py
|
scottthomas586/python-api-challenge
|
c45b3dd469ac0a2b541a2127d5e2a5865750197b
|
[
"ADSL"
] | null | null | null |
g_key = 'AIzaSyB8ANK_bxF326hfo6jfY6wxWC5S70F2ZiI'
| 49
| 49
| 0.897959
| 4
| 49
| 10.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.212766
| 0.040816
| 49
| 1
| 49
| 49
| 0.680851
| 0
| 0
| 0
| 0
| 0
| 0.78
| 0.78
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
2fe7208fad5c48a39e113f14638507ca00219e7d
| 17,859
|
py
|
Python
|
sdk/python/pulumi_gcp/projects/service.py
|
la3mmchen/pulumi-gcp
|
0e3c6fecd062dff78a4fd95b7ebd5ce4492ad1ea
|
[
"ECL-2.0",
"Apache-2.0"
] | 121
|
2018-06-18T19:16:42.000Z
|
2022-03-31T06:06:48.000Z
|
sdk/python/pulumi_gcp/projects/service.py
|
la3mmchen/pulumi-gcp
|
0e3c6fecd062dff78a4fd95b7ebd5ce4492ad1ea
|
[
"ECL-2.0",
"Apache-2.0"
] | 492
|
2018-06-22T19:41:03.000Z
|
2022-03-31T15:33:53.000Z
|
sdk/python/pulumi_gcp/projects/service.py
|
la3mmchen/pulumi-gcp
|
0e3c6fecd062dff78a4fd95b7ebd5ce4492ad1ea
|
[
"ECL-2.0",
"Apache-2.0"
] | 43
|
2018-06-19T01:43:13.000Z
|
2022-03-23T22:43:37.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['ServiceArgs', 'Service']
@pulumi.input_type
class ServiceArgs:
def __init__(__self__, *,
service: pulumi.Input[str],
disable_dependent_services: Optional[pulumi.Input[bool]] = None,
disable_on_destroy: Optional[pulumi.Input[bool]] = None,
project: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a Service resource.
:param pulumi.Input[str] service: The service to enable.
:param pulumi.Input[bool] disable_dependent_services: If `true`, services that are enabled
and which depend on this service should also be disabled when this service is
destroyed. If `false` or unset, an error will be generated if any enabled
services depend on this service when destroying it.
:param pulumi.Input[bool] disable_on_destroy: If true, disable the service when the resource is destroyed. Defaults to true. May be useful in the event that a project is long-lived but the infrastructure running in that project changes frequently.
:param pulumi.Input[str] project: The project ID. If not provided, the provider project
is used.
"""
pulumi.set(__self__, "service", service)
if disable_dependent_services is not None:
pulumi.set(__self__, "disable_dependent_services", disable_dependent_services)
if disable_on_destroy is not None:
pulumi.set(__self__, "disable_on_destroy", disable_on_destroy)
if project is not None:
pulumi.set(__self__, "project", project)
@property
@pulumi.getter
def service(self) -> pulumi.Input[str]:
"""
The service to enable.
"""
return pulumi.get(self, "service")
@service.setter
def service(self, value: pulumi.Input[str]):
pulumi.set(self, "service", value)
@property
@pulumi.getter(name="disableDependentServices")
def disable_dependent_services(self) -> Optional[pulumi.Input[bool]]:
"""
If `true`, services that are enabled
and which depend on this service should also be disabled when this service is
destroyed. If `false` or unset, an error will be generated if any enabled
services depend on this service when destroying it.
"""
return pulumi.get(self, "disable_dependent_services")
@disable_dependent_services.setter
def disable_dependent_services(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "disable_dependent_services", value)
@property
@pulumi.getter(name="disableOnDestroy")
def disable_on_destroy(self) -> Optional[pulumi.Input[bool]]:
"""
If true, disable the service when the resource is destroyed. Defaults to true. May be useful in the event that a project is long-lived but the infrastructure running in that project changes frequently.
"""
return pulumi.get(self, "disable_on_destroy")
@disable_on_destroy.setter
def disable_on_destroy(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "disable_on_destroy", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
"""
The project ID. If not provided, the provider project
is used.
"""
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
@pulumi.input_type
class _ServiceState:
def __init__(__self__, *,
disable_dependent_services: Optional[pulumi.Input[bool]] = None,
disable_on_destroy: Optional[pulumi.Input[bool]] = None,
project: Optional[pulumi.Input[str]] = None,
service: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering Service resources.
:param pulumi.Input[bool] disable_dependent_services: If `true`, services that are enabled
and which depend on this service should also be disabled when this service is
destroyed. If `false` or unset, an error will be generated if any enabled
services depend on this service when destroying it.
:param pulumi.Input[bool] disable_on_destroy: If true, disable the service when the resource is destroyed. Defaults to true. May be useful in the event that a project is long-lived but the infrastructure running in that project changes frequently.
:param pulumi.Input[str] project: The project ID. If not provided, the provider project
is used.
:param pulumi.Input[str] service: The service to enable.
"""
if disable_dependent_services is not None:
pulumi.set(__self__, "disable_dependent_services", disable_dependent_services)
if disable_on_destroy is not None:
pulumi.set(__self__, "disable_on_destroy", disable_on_destroy)
if project is not None:
pulumi.set(__self__, "project", project)
if service is not None:
pulumi.set(__self__, "service", service)
@property
@pulumi.getter(name="disableDependentServices")
def disable_dependent_services(self) -> Optional[pulumi.Input[bool]]:
"""
If `true`, services that are enabled
and which depend on this service should also be disabled when this service is
destroyed. If `false` or unset, an error will be generated if any enabled
services depend on this service when destroying it.
"""
return pulumi.get(self, "disable_dependent_services")
@disable_dependent_services.setter
def disable_dependent_services(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "disable_dependent_services", value)
@property
@pulumi.getter(name="disableOnDestroy")
def disable_on_destroy(self) -> Optional[pulumi.Input[bool]]:
"""
If true, disable the service when the resource is destroyed. Defaults to true. May be useful in the event that a project is long-lived but the infrastructure running in that project changes frequently.
"""
return pulumi.get(self, "disable_on_destroy")
@disable_on_destroy.setter
def disable_on_destroy(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "disable_on_destroy", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
"""
The project ID. If not provided, the provider project
is used.
"""
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
@property
@pulumi.getter
def service(self) -> Optional[pulumi.Input[str]]:
"""
The service to enable.
"""
return pulumi.get(self, "service")
@service.setter
def service(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "service", value)
class Service(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
disable_dependent_services: Optional[pulumi.Input[bool]] = None,
disable_on_destroy: Optional[pulumi.Input[bool]] = None,
project: Optional[pulumi.Input[str]] = None,
service: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Allows management of a single API service for a Google Cloud Platform project.
For a list of services available, visit the [API library page](https://console.cloud.google.com/apis/library)
or run `gcloud services list --available`.
This resource requires the [Service Usage API](https://console.cloud.google.com/apis/library/serviceusage.googleapis.com)
to use.
To get more information about `projects.Service`, see:
* [API documentation](https://cloud.google.com/service-usage/docs/reference/rest/v1/services)
* How-to Guides
* [Enabling and Disabling Services](https://cloud.google.com/service-usage/docs/enable-disable)
## Example Usage
```python
import pulumi
import pulumi_gcp as gcp
project = gcp.projects.Service("project",
disable_dependent_services=True,
project="your-project-id",
service="iam.googleapis.com")
```
## Import
Project services can be imported using the `project_id` and `service`, e.g.
```sh
$ pulumi import gcp:projects/service:Service my_project your-project-id/iam.googleapis.com
```
Note that unlike other resources that fail if they already exist, `terraform apply` can be successfully used to verify already enabled services. This means that when importing existing resources into Terraform, you can either import the `google_project_service` resources or treat them as new infrastructure and run `terraform apply` to add them to state.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] disable_dependent_services: If `true`, services that are enabled
and which depend on this service should also be disabled when this service is
destroyed. If `false` or unset, an error will be generated if any enabled
services depend on this service when destroying it.
:param pulumi.Input[bool] disable_on_destroy: If true, disable the service when the resource is destroyed. Defaults to true. May be useful in the event that a project is long-lived but the infrastructure running in that project changes frequently.
:param pulumi.Input[str] project: The project ID. If not provided, the provider project
is used.
:param pulumi.Input[str] service: The service to enable.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ServiceArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Allows management of a single API service for a Google Cloud Platform project.
For a list of services available, visit the [API library page](https://console.cloud.google.com/apis/library)
or run `gcloud services list --available`.
This resource requires the [Service Usage API](https://console.cloud.google.com/apis/library/serviceusage.googleapis.com)
to use.
To get more information about `projects.Service`, see:
* [API documentation](https://cloud.google.com/service-usage/docs/reference/rest/v1/services)
* How-to Guides
* [Enabling and Disabling Services](https://cloud.google.com/service-usage/docs/enable-disable)
## Example Usage
```python
import pulumi
import pulumi_gcp as gcp
project = gcp.projects.Service("project",
disable_dependent_services=True,
project="your-project-id",
service="iam.googleapis.com")
```
## Import
Project services can be imported using the `project_id` and `service`, e.g.
```sh
$ pulumi import gcp:projects/service:Service my_project your-project-id/iam.googleapis.com
```
Note that unlike other resources that fail if they already exist, `terraform apply` can be successfully used to verify already enabled services. This means that when importing existing resources into Terraform, you can either import the `google_project_service` resources or treat them as new infrastructure and run `terraform apply` to add them to state.
:param str resource_name: The name of the resource.
:param ServiceArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ServiceArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
disable_dependent_services: Optional[pulumi.Input[bool]] = None,
disable_on_destroy: Optional[pulumi.Input[bool]] = None,
project: Optional[pulumi.Input[str]] = None,
service: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ServiceArgs.__new__(ServiceArgs)
__props__.__dict__["disable_dependent_services"] = disable_dependent_services
__props__.__dict__["disable_on_destroy"] = disable_on_destroy
__props__.__dict__["project"] = project
if service is None and not opts.urn:
raise TypeError("Missing required property 'service'")
__props__.__dict__["service"] = service
super(Service, __self__).__init__(
'gcp:projects/service:Service',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
disable_dependent_services: Optional[pulumi.Input[bool]] = None,
disable_on_destroy: Optional[pulumi.Input[bool]] = None,
project: Optional[pulumi.Input[str]] = None,
service: Optional[pulumi.Input[str]] = None) -> 'Service':
"""
Get an existing Service resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] disable_dependent_services: If `true`, services that are enabled
and which depend on this service should also be disabled when this service is
destroyed. If `false` or unset, an error will be generated if any enabled
services depend on this service when destroying it.
:param pulumi.Input[bool] disable_on_destroy: If true, disable the service when the resource is destroyed. Defaults to true. May be useful in the event that a project is long-lived but the infrastructure running in that project changes frequently.
:param pulumi.Input[str] project: The project ID. If not provided, the provider project
is used.
:param pulumi.Input[str] service: The service to enable.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _ServiceState.__new__(_ServiceState)
__props__.__dict__["disable_dependent_services"] = disable_dependent_services
__props__.__dict__["disable_on_destroy"] = disable_on_destroy
__props__.__dict__["project"] = project
__props__.__dict__["service"] = service
return Service(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="disableDependentServices")
def disable_dependent_services(self) -> pulumi.Output[Optional[bool]]:
"""
If `true`, services that are enabled
and which depend on this service should also be disabled when this service is
destroyed. If `false` or unset, an error will be generated if any enabled
services depend on this service when destroying it.
"""
return pulumi.get(self, "disable_dependent_services")
@property
@pulumi.getter(name="disableOnDestroy")
def disable_on_destroy(self) -> pulumi.Output[Optional[bool]]:
"""
If true, disable the service when the resource is destroyed. Defaults to true. May be useful in the event that a project is long-lived but the infrastructure running in that project changes frequently.
"""
return pulumi.get(self, "disable_on_destroy")
@property
@pulumi.getter
def project(self) -> pulumi.Output[str]:
"""
The project ID. If not provided, the provider project
is used.
"""
return pulumi.get(self, "project")
@property
@pulumi.getter
def service(self) -> pulumi.Output[str]:
"""
The service to enable.
"""
return pulumi.get(self, "service")
| 46.266839
| 364
| 0.663363
| 2,183
| 17,859
| 5.253321
| 0.103527
| 0.053715
| 0.069062
| 0.0361
| 0.860307
| 0.8406
| 0.831444
| 0.821067
| 0.80572
| 0.798832
| 0
| 0.000224
| 0.251358
| 17,859
| 385
| 365
| 46.387013
| 0.857517
| 0.46128
| 0
| 0.689655
| 1
| 0
| 0.10517
| 0.039602
| 0
| 0
| 0
| 0
| 0
| 1
| 0.155172
| false
| 0.005747
| 0.028736
| 0
| 0.275862
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
641677016464e3ba2273660e984ffe8ae1bd61be
| 191
|
py
|
Python
|
EllipticCurves/__init__.py
|
SymmetricChaos/FiniteFields
|
65258e06b7f04ce15223c1bc0c2384ef5e9cec1a
|
[
"MIT"
] | 1
|
2021-08-22T15:03:59.000Z
|
2021-08-22T15:03:59.000Z
|
EllipticCurves/__init__.py
|
SymmetricChaos/NumberTheory
|
65258e06b7f04ce15223c1bc0c2384ef5e9cec1a
|
[
"MIT"
] | null | null | null |
EllipticCurves/__init__.py
|
SymmetricChaos/NumberTheory
|
65258e06b7f04ce15223c1bc0c2384ef5e9cec1a
|
[
"MIT"
] | null | null | null |
from EllipticCurves.EllipticPoint import Elliptic_Curve, Elliptic_Point, cyclic_subgroup, cyclic_subgroups
__all__=["Elliptic_Curve", "Elliptic_Point", "cyclic_subgroup", "cyclic_subgroups"]
| 63.666667
| 106
| 0.848168
| 21
| 191
| 7.142857
| 0.52381
| 0.173333
| 0.28
| 0.346667
| 0.733333
| 0.733333
| 0.733333
| 0.733333
| 0
| 0
| 0
| 0
| 0.057592
| 191
| 3
| 107
| 63.666667
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0.307292
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 8
|
ff3b611e8f198e7137d2689ee3e2312362c9937f
| 263
|
py
|
Python
|
hawkweed/classes/repr.py
|
hellerve/hawkweed
|
0cdb378f7dac5ab76ad53d6c1917f0ac5687793c
|
[
"MIT"
] | 20
|
2016-06-13T19:24:20.000Z
|
2022-01-26T14:08:11.000Z
|
hawkweed/classes/repr.py
|
hellerve/hawkweed
|
0cdb378f7dac5ab76ad53d6c1917f0ac5687793c
|
[
"MIT"
] | null | null | null |
hawkweed/classes/repr.py
|
hellerve/hawkweed
|
0cdb378f7dac5ab76ad53d6c1917f0ac5687793c
|
[
"MIT"
] | null | null | null |
"""The Representation base class."""
class Repr(object):
"""The Representation base class."""
def __repr__(self):
return "{}({})".format(self.__class__.__name__, super(Repr, self).__repr__())
def __str__(self):
return self.__repr__()
| 29.222222
| 85
| 0.638783
| 29
| 263
| 4.965517
| 0.448276
| 0.236111
| 0.291667
| 0.361111
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.186312
| 263
| 8
| 86
| 32.875
| 0.672897
| 0.231939
| 0
| 0
| 0
| 0
| 0.031414
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0
| 0
| 0.4
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 8
|
ff7e53d272b307dbef698ceac943c40417998c9f
| 15,931
|
py
|
Python
|
kd.py
|
microsoft/semiparametric-distillation
|
b7153f1e8bbbe6d8261f64c23828611a5419a13a
|
[
"MIT"
] | 6
|
2021-04-21T00:51:43.000Z
|
2021-10-29T21:11:30.000Z
|
kd.py
|
microsoft/semiparametric-distillation
|
b7153f1e8bbbe6d8261f64c23828611a5419a13a
|
[
"MIT"
] | null | null | null |
kd.py
|
microsoft/semiparametric-distillation
|
b7153f1e8bbbe6d8261f64c23828611a5419a13a
|
[
"MIT"
] | 4
|
2021-04-25T08:54:44.000Z
|
2022-03-10T18:54:45.000Z
|
import torch
from torch import nn
from torch.nn import functional as F
class KDLoss(nn.Module):
"""
Loss with knowledge distillation.
"""
def __init__(self, temperature=1.0, alpha=0.5):
super().__init__()
self.temperature = temperature
self.alpha = alpha
def forward(self, student_logit, teacher_logit, target, loss_original):
# Adapted from https://github.com/huggingface/pytorch-transformers/blob/master/examples/distillation/distiller.py
# Scaled by temperature^2 to balance the soft and hard loss
# See https://github.com/peterliht/knowledge-distillation-pytorch/blob/master/model/net.py
# or https://github.com/stanford-futuredata/lit-code/blob/master/cifar10/distillation_loss.py
kl = F.kl_div(F.log_softmax(student_logit / self.temperature, dim=-1),
F.softmax(teacher_logit / self.temperature, dim=-1), reduction='batchmean')
loss_kd = kl * self.temperature ** 2
return (1 - self.alpha) * loss_original + self.alpha * loss_kd
class KDMSELoss(nn.Module):
"""
Loss with knowledge distillation.
"""
def __init__(self, alpha=0.5, scale='logp'):
super().__init__()
assert scale in ['logp', 'p']
self.alpha = alpha
self.scale = scale
def forward(self, student_logit, teacher_logit, target, loss_original):
if self.scale == 'logp':
loss_kd = F.mse_loss(student_logit, teacher_logit)
else:
p = F.softmax(teacher_logit, dim=-1)
loss_kd = F.mse_loss(student_logit, p)
return (1 - self.alpha) * loss_original + self.alpha * loss_kd
class KDOrthoLoss(KDLoss):
"""
Orthogonal loss with knowledge distillation.
"""
def __init__(self, temperature=1.0, alpha=0.5, eps=1e-2, smoothing=0.0):
super().__init__(temperature, alpha)
self.eps = eps
self.smoothing = smoothing # How much to shrink toward uniform
def forward(self, student_logit, teacher_logit, target, loss_original):
kl = F.kl_div(F.log_softmax(student_logit / self.temperature, dim=-1),
F.softmax(teacher_logit / self.temperature, dim=-1), reduction='batchmean')
# w = F.softmax(teacher_logit / self.temperature, dim=-1)
# ce_temp = (torch.logsumexp(student_logit / self.temperature, dim=-1)
# - (w * student_logit).sum(dim=-1) / self.temperature).mean()
# p = F.softmax(teacher_logit, dim=-1)
# logit_q = student_logit
# ce = cross_entropy(logit_q, p, self.temperature)
# log_q = F.log_softmax(logit_q / temperature, dim=-1)
# psi = lambda s: torch.logsumexp(s / self.temperature, dim=-1)
first_order_term = self._first_order_term(student_logit, teacher_logit, target)
loss_kd = (kl + first_order_term) * self.temperature ** 2
# print(loss_original.item(), kl.item(), first_order_term.item())
return (1 - self.alpha) * loss_original + self.alpha * loss_kd
def _first_order_term(self, student_logit, teacher_logit, target):
a = 1.0 / self.temperature
w = F.softmax(teacher_logit / self.temperature, dim=-1)
log_q = F.log_softmax(student_logit / self.temperature, dim=-1)
p = F.softmax(teacher_logit, dim=-1)
if self.smoothing > 0.0:
uniform = torch.ones_like(p) / student_logit.shape[-1]
p = (1.0 - self.smoothing) * p + self.smoothing * uniform
y = F.one_hot(target, student_logit.shape[-1]).float()
yp_1 = y / p.clamp(self.eps) - 1.0
# print(yp_1.max().item(), (w / p).max().item())
yp_1_w = yp_1 * w
# print(yp_1_w.max().item(), yp_1_w.abs().mean().item())
# TODO: einsum or tensordot is probably faster
ortho1 = (yp_1_w.sum(dim=-1) * (w * log_q).sum(dim=-1))
ortho2 = -(yp_1_w * log_q).sum(dim=-1)
# y_p = (F.one_hot(target, nclasses).float() - p)
# ortho1 = a * ((y_p * w).sum(dim=-1) * (w * log_q / p).sum(dim=-1)).mean()
# ortho2 = -a * (y_p * w * log_q / p).sum(dim=-1).mean()
# ortho1 = a * ((y_p * (w/p)).sum(dim=-1) * (w * log_q).sum(dim=-1))
# ortho2 = -a * (y_p * w * log_q / p).sum(dim=-1)
return a * (ortho1 + ortho2).mean(dim=0)
# temp1 = (yp_1_w.sum(dim=-1) * (w * student_logit).sum(dim=-1))
# temp2 = -(yp_1_w * student_logit).sum(dim=-1)
# return a**2 * (temp1 + temp2).mean(dim=0)
# yp = y / p.clamp(self.eps)
# yp_w = yp * w
# temp1 = (yp_w.sum(dim=-1) * (w * student_logit).sum(dim=-1))
# temp2 = -(yp_w * student_logit).sum(dim=-1)
# return a**2 * (temp1 + temp2).mean(dim=0)
class KDMSEOrthoLoss(KDMSELoss):
"""
Loss with knowledge distillation.
"""
def __init__(self, alpha=0.5, eps=1e-2, smoothing=0.0):
super().__init__(alpha)
self.eps = eps
self.smoothing = smoothing
def forward(self, student_logit, teacher_logit, target, loss_original):
p = F.softmax(teacher_logit, dim=-1)
if self.smoothing > 0.0:
uniform = torch.ones_like(p) / student_logit.shape[-1]
p = (1.0 - self.smoothing) * p + self.smoothing * uniform
y = F.one_hot(target, student_logit.shape[-1]).float()
yp_1 = y / p.clamp(self.eps) - 1.0
loss_kd = F.mse_loss(student_logit, teacher_logit + yp_1)
return (1 - self.alpha) * loss_original + self.alpha * loss_kd
def find_optimal_gamma_sampling(phat, bound_fn, max_range=10, alpha=1.0, scale='logp'):
assert scale in ['p', 'logp']
phat_shape = phat.shape
phat = phat.flatten()
gamma = torch.arange(-max_range, max_range, 0.05, device=phat.device).unsqueeze(-1).unsqueeze(-1)
if scale == 'p':
def objective(p):
return (gamma * (p - phat) - (p - phat))**2 + (1 / alpha - 1 + gamma)**2 * p * (1 - p)
else:
def objective(p):
return (gamma * (p - phat) - (torch.log(p) - torch.log(phat)))**2 + (1 / alpha - 1 + gamma)**2 * p * (1 - p)
bound_l, bound_h = bound_fn(phat)
p_vals = bound_l + torch.linspace(0.0, 1.0, 10, device=phat.device).unsqueeze(-1) * (bound_h - bound_l)
objs = objective(p_vals)
max_objs = objs.max(dim=1)[0]
return gamma[torch.argmin(max_objs, dim=0)].reshape(*phat_shape)
def find_optimal_gamma_relerr(phat, c, max_range=10, alpha=1.0):
phat_shape = phat.shape
phat = phat.flatten()
gamma = torch.arange(-max_range, max_range, 0.05, device=phat.device).unsqueeze(-1).unsqueeze(-1)
def objective(p):
return (gamma * (p - phat) - (torch.log(p) - torch.log(phat)))**2 + (1 / alpha - 1 + gamma)**2 * p * (1 - p)
bound_l = torch.clamp(phat / (1 + c), min=1e-6)
bound_h = torch.clamp(phat * (1 + c), max=1.0)
p_vals = bound_l + torch.linspace(0.0, 1.0, 10, device=phat.device).unsqueeze(-1) * (bound_h - bound_l)
objs = objective(p_vals)
max_objs = objs.max(dim=1)[0]
return gamma[torch.argmin(max_objs, dim=0)].reshape(*phat_shape)
class KDMSEMinimaxRelerrLoss(KDMSELoss):
"""
Loss with knowledge distillation.
"""
def __init__(self, alpha=0.5, scale='logp', smoothing=0.0, c=2.0):
super().__init__(alpha, scale)
self.smoothing = smoothing
self.c = c
def forward(self, student_logit, teacher_logit, target, loss_original):
p = F.softmax(teacher_logit, dim=-1)
if self.smoothing > 0.0:
uniform = torch.ones_like(p) / student_logit.shape[-1]
p = (1.0 - self.smoothing) * p + self.smoothing * uniform
# Convert student_logit and teacher_logit to log probabilities
# Nvm this makes the loss NaN
# student_logit = F.log_softmax(student_logit, dim=-1)
# teacher_logit = F.log_softmax(teacher_logit, dim=-1)
y = F.one_hot(target, student_logit.shape[-1]).float()
bound_l = 0.0 if self.scale == 'p' else 1e-6
bound_fn = lambda phat: (torch.clamp(phat / (1 + self.c), min=bound_l),
torch.clamp(phat * (1 + self.c), max=1.0))
gamma = find_optimal_gamma_sampling(p, bound_fn, alpha=self.alpha, scale=self.scale)
if self.scale == 'logp':
loss_kd = F.mse_loss(student_logit, teacher_logit + gamma * (y - p))
else:
loss_kd = F.mse_loss(student_logit, p + gamma * (y - p))
return (1 - self.alpha) * loss_original + self.alpha * loss_kd
class KDMSEGamma1Loss(KDMSELoss):
"""
Loss with knowledge distillation.
"""
def __init__(self, alpha=0.5, smoothing=0.0):
super().__init__(alpha)
self.smoothing = smoothing
def forward(self, student_logit, teacher_logit, target, loss_original):
p = F.softmax(teacher_logit, dim=-1)
if self.smoothing > 0.0:
uniform = torch.ones_like(p) / student_logit.shape[-1]
p = (1.0 - self.smoothing) * p + self.smoothing * uniform
y = F.one_hot(target, student_logit.shape[-1]).float()
gamma = 1.0
loss_kd = F.mse_loss(student_logit, teacher_logit + gamma * (y - p))
return (1 - self.alpha) * loss_original + self.alpha * loss_kd
class KDMSEGammaVarLoss(KDMSELoss):
"""
Loss with knowledge distillation.
"""
def __init__(self, alpha=0.5, smoothing=0.0):
super().__init__(alpha)
self.smoothing = smoothing
def forward(self, student_logit, teacher_logit, target, loss_original):
p = F.softmax(teacher_logit, dim=-1)
if self.smoothing > 0.0:
uniform = torch.ones_like(p) / student_logit.shape[-1]
p = (1.0 - self.smoothing) * p + self.smoothing * uniform
y = F.one_hot(target, student_logit.shape[-1]).float()
gamma = 1 - p
loss_kd = F.mse_loss(student_logit, teacher_logit + gamma * (y - p))
return (1 - self.alpha) * loss_original + self.alpha * loss_kd
def find_optimal_gamma_power(phat, tmax, max_range=10, alpha=1.0):
phat_shape = phat.shape
phat = phat.flatten()
gamma = torch.arange(-max_range, max_range, 0.05, device=phat.device).unsqueeze(-1).unsqueeze(-1)
def objective(p):
return (gamma * (p - phat) - (torch.log(p) - torch.log(phat)))**2 + (1 / alpha - 1 + gamma)**2 * p * (1 - p)
bound_l = torch.clamp(phat, min=1e-6)
bound_h = torch.clamp(phat ** (1 / tmax), max=1.0)
p_vals = bound_l + torch.linspace(0.0, 1.0, 10, device=phat.device).unsqueeze(-1) * (bound_h - bound_l)
objs = objective(p_vals)
max_objs = objs.max(dim=1)[0]
return gamma[torch.argmin(max_objs, dim=0)].reshape(*phat_shape)
class KDMSEMinimaxPowerLoss(KDMSELoss):
"""
Loss with knowledge distillation.
"""
def __init__(self, alpha=0.5, scale='logp', smoothing=0.0, tmax=2.0):
super().__init__(alpha, scale)
self.smoothing = smoothing
self.tmax = tmax
def forward(self, student_logit, teacher_logit, target, loss_original):
p = F.softmax(teacher_logit, dim=-1)
if self.smoothing > 0.0:
uniform = torch.ones_like(p) / student_logit.shape[-1]
p = (1.0 - self.smoothing) * p + self.smoothing * uniform
y = F.one_hot(target, student_logit.shape[-1]).float()
bound_l = 0.0 if self.scale == 'p' else 1e-6
bound_fn = lambda phat: (torch.clamp(phat, min=bound_l),
torch.clamp(phat ** (1 / self.tmax), max=1.0))
gamma = find_optimal_gamma_sampling(p, bound_fn, alpha=self.alpha, scale=self.scale)
if self.scale == 'logp':
loss_kd = F.mse_loss(student_logit, teacher_logit + gamma * (y - p))
else:
loss_kd = F.mse_loss(student_logit, p + gamma * (y - p))
return (1 - self.alpha) * loss_original + self.alpha * loss_kd
def find_optimal_gamma_abserr(phat, c, max_range=10, alpha=1.0):
phat_shape = phat.shape
phat = phat.flatten()
gamma = torch.arange(-max_range, max_range, 0.05, device=phat.device).unsqueeze(-1).unsqueeze(-1)
def objective(p):
return (gamma * (p - phat) - (torch.log(p) - torch.log(phat)))**2 + (1 / alpha - 1 + gamma)**2 * p * (1 - p)
bound_l = torch.clamp(phat - c, min=1e-3)
bound_h = torch.clamp(phat + c, max=1.0)
p_vals = bound_l + torch.linspace(0.0, 1.0, 10, device=phat.device).unsqueeze(-1) * (bound_h - bound_l)
objs = objective(p_vals)
max_objs = objs.max(dim=1)[0]
return gamma[torch.argmin(max_objs, dim=0)].reshape(*phat_shape)
class KDMSEMinimaxAbserrLoss(KDMSELoss):
"""
Loss with knowledge distillation.
"""
def __init__(self, alpha=0.5, scale='logp', smoothing=0.0, c=0.05):
super().__init__(alpha, scale)
self.smoothing = smoothing
self.c = c
def forward(self, student_logit, teacher_logit, target, loss_original):
p = F.softmax(teacher_logit, dim=-1)
if self.smoothing > 0.0:
uniform = torch.ones_like(p) / student_logit.shape[-1]
p = (1.0 - self.smoothing) * p + self.smoothing * uniform
y = F.one_hot(target, student_logit.shape[-1]).float()
bound_l = 0.0 if self.scale == 'p' else 1e-3
bound_fn = lambda phat: (torch.clamp(phat - self.c, min=bound_l),
torch.clamp(phat + self.c, max=1.0))
gamma = find_optimal_gamma_sampling(p, bound_fn, alpha=self.alpha, scale=self.scale)
if self.scale == 'logp':
loss_kd = F.mse_loss(student_logit, teacher_logit + gamma * (y - p))
else:
loss_kd = F.mse_loss(student_logit, p + gamma * (y - p))
return (1 - self.alpha) * loss_original + self.alpha * loss_kd
class KDMSEBoundFast(KDMSELoss):
"""
Loss with knowledge distillation.
"""
def __init__(self, alpha=0.5, scale='logp', smoothing=0.0, c=1.0):
super().__init__(alpha, scale)
self.smoothing = smoothing
self.c = c
def forward(self, student_logit, teacher_logit, target, loss_original):
p = F.softmax(teacher_logit, dim=-1)
if self.smoothing > 0.0:
uniform = torch.ones_like(p) / student_logit.shape[-1]
p = (1.0 - self.smoothing) * p + self.smoothing * uniform
y = F.one_hot(target, student_logit.shape[-1]).float()
bound_l = 0.0 if self.scale == 'p' else 1e-3
p_clipped = torch.clamp(p, min=bound_l)
if self.scale == 'logp':
gamma = self.c / p_clipped / (self.c + (y - p)**2)
loss_kd = F.mse_loss(student_logit, teacher_logit + gamma * (y - p))
else:
gamma = self.c / (self.c + (y - p)**2)
loss_kd = F.mse_loss(student_logit, p + gamma * (y - p))
return (1 - self.alpha) * loss_original + self.alpha * loss_kd
class KDMSEVarRedOrthoLoss(KDMSELoss):
"""
Loss with knowledge distillation.
"""
def __init__(self, alpha=0.5, smoothing=0.0):
super().__init__(alpha)
self.smoothing = smoothing
def forward(self, student_logit, teacher_logit, target, loss_original):
p = F.softmax(teacher_logit, dim=-1)
if self.smoothing > 0.0:
uniform = torch.ones_like(p) / student_logit.shape[-1]
p = (1.0 - self.smoothing) * p + self.smoothing * uniform
y = F.one_hot(target, student_logit.shape[-1]).float()
yp_1 = y / p - 1.0
# TODO Cross term to avoid numerical error
# TODO Try explicit gradient
loss_kd = ((student_logit - (teacher_logit + yp_1))**2 * p).mean()
temp = ((student_logit - teacher_logit - y / p + 1.0)**2 * p).mean()
temp = (((student_logit - teacher_logit + 1.0)**2 * p + y**2/p - 2*y*(student_logit - teacher_logit + 1.0)) ).mean()
temp = (((student_logit - teacher_logit)**2 * p + (y-p)**2/p - 2*(y-p)*(student_logit - teacher_logit)) ).mean()
return (1 - self.alpha) * loss_original + self.alpha * loss_kd
| 44.876056
| 124
| 0.606679
| 2,314
| 15,931
| 3.988332
| 0.077355
| 0.081916
| 0.055586
| 0.070213
| 0.824683
| 0.808647
| 0.786542
| 0.765847
| 0.741575
| 0.720771
| 0
| 0.03071
| 0.243739
| 15,931
| 354
| 125
| 45.002825
| 0.735309
| 0.144184
| 0
| 0.702929
| 0
| 0
| 0.005733
| 0
| 0
| 0
| 0
| 0.00565
| 0.008368
| 1
| 0.133891
| false
| 0
| 0.012552
| 0.020921
| 0.280335
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ff892b92d26ac04427ae495f98de50e6a5e4119e
| 88,796
|
py
|
Python
|
tests/token/test_IbetShare.py
|
BoostryJP/ibet-SmartContract
|
dc3f73a708ef145e7200ce58fce4e8171e21d3c2
|
[
"Apache-2.0"
] | 10
|
2021-06-12T08:43:50.000Z
|
2022-02-17T14:24:48.000Z
|
tests/token/test_IbetShare.py
|
BoostryJP/ibet-SmartContract
|
dc3f73a708ef145e7200ce58fce4e8171e21d3c2
|
[
"Apache-2.0"
] | 44
|
2021-04-11T06:43:10.000Z
|
2022-03-30T12:42:32.000Z
|
tests/token/test_IbetShare.py
|
BoostryJP/ibet-SmartContract
|
dc3f73a708ef145e7200ce58fce4e8171e21d3c2
|
[
"Apache-2.0"
] | 1
|
2022-03-09T07:27:57.000Z
|
2022-03-09T07:27:57.000Z
|
"""
Copyright BOOSTRY Co., Ltd.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
SPDX-License-Identifier: Apache-2.0
"""
import brownie
import pytest
def init_args():
name = 'test_share'
symbol = 'test_symbol'
issue_price = 2 ** 256 - 1
total_supply = 2 ** 256 - 1
dividends = 2 ** 256 - 1
dividend_record_date = '20200829'
dividend_payment_date = '20200831'
cancellation_date = '20191231'
principal_value = 2 ** 256 - 1
deploy_args = [
name,
symbol,
issue_price,
total_supply,
dividends,
dividend_record_date,
dividend_payment_date,
cancellation_date,
principal_value
]
return deploy_args
def issue_transferable_share_token(issuer, exchange_address, personal_info_address):
from brownie import IbetShare
name = 'test_share'
symbol = 'IBS'
issue_price = 1000
total_supply = 10000
dividends = 1000
dividend_record_date = '20200829'
dividend_payment_date = '20200831'
cancellation_date = '20191231'
principal_value = 1000
deploy_args = [
name,
symbol,
issue_price,
total_supply,
dividends,
dividend_record_date,
dividend_payment_date,
cancellation_date,
principal_value
]
share_token = issuer.deploy(IbetShare, *deploy_args)
share_token.setTradableExchange.transact(exchange_address, {'from': issuer})
share_token.setPersonalInfoAddress.transact(personal_info_address, {'from': issuer})
share_token.setTransferable.transact(True, {'from': issuer})
return share_token, deploy_args
# TEST_deploy
class TestDeploy:
# Normal_1
def test_normal_1(self, IbetShare, users):
issuer = users['issuer']
deploy_args = init_args()
share_contract = issuer.deploy(
IbetShare,
*deploy_args
)
owner_address = share_contract.owner()
name = share_contract.name()
symbol = share_contract.symbol()
issue_price = share_contract.issuePrice()
principal_value = share_contract.principalValue()
total_supply = share_contract.totalSupply()
dividend_information = share_contract.dividendInformation()
cancellation_date = share_contract.cancellationDate()
is_canceled = share_contract.isCanceled()
status = share_contract.status()
balance = share_contract.balanceOf(issuer)
assert owner_address == issuer
assert name == deploy_args[0]
assert symbol == deploy_args[1]
assert issue_price == deploy_args[2]
assert total_supply == deploy_args[3]
assert dividend_information[0] == deploy_args[4]
assert dividend_information[1] == deploy_args[5]
assert dividend_information[2] == deploy_args[6]
assert cancellation_date == deploy_args[7]
assert principal_value == deploy_args[8]
assert is_canceled == False
assert status == True
assert balance == total_supply
# backward compatible calls
assert share_contract.offeringStatus() == False
assert share_contract.referenceUrls(0) == ""
# TEST_setPrincipalValue
class TestSetPrincipalValue:
#######################################
# Normal
#######################################
# Normal_1
def test_normal_1(self, users, IbetShare):
issuer = users["issuer"]
# issue token
deploy_args = init_args()
share_token = issuer.deploy(IbetShare, *deploy_args)
# update principal value
share_token.setPrincipalValue.transact(
9000,
{"from": issuer}
)
# assertion
assert share_token.principalValue() == 9000
#######################################
# Error
#######################################
# Error_1
# Not authorized
def test_error_1(self, users, IbetShare):
issuer = users["issuer"]
trader = users['trader']
# issue token
deploy_args = init_args()
share_token = issuer.deploy(IbetShare, *deploy_args)
# update principal value
with brownie.reverts():
share_token.setPrincipalValue.transact(
9000,
{"from": trader}
)
# assertion
assert share_token.principalValue() == deploy_args[8]
# TEST_setTradableExchange
class TestSetTradableExchange:
#######################################
# Normal
#######################################
# Normal_1
def test_normal_1(self, users, IbetShare):
issuer = users['issuer']
# issue token
deploy_args = init_args()
share_token = issuer.deploy(IbetShare, *deploy_args)
# change exchange contract
share_token.setTradableExchange.transact(
brownie.ETH_ADDRESS,
{'from': issuer}
)
# assertion
assert share_token.tradableExchange() == brownie.ETH_ADDRESS
#######################################
# Error
#######################################
# Error_1
# Not authorized
def test_error_1(self, users, IbetShare):
issuer = users['issuer']
# issue token
deploy_args = init_args()
share_token = issuer.deploy(IbetShare, *deploy_args)
# change exchange contract
with brownie.reverts():
share_token.setTradableExchange.transact(
brownie.ETH_ADDRESS,
{'from': users['user1']}
)
# assertion
assert share_token.tradableExchange() == brownie.ZERO_ADDRESS
# TEST_setPersonalInfoAddress
class TestSetPersonalInfoAddress:
#######################################
# Normal
#######################################
# Normal_1
def test_normal_1(self, users, IbetShare):
issuer = users['issuer']
# issue token
deploy_args = init_args()
share_token = issuer.deploy(IbetShare, *deploy_args)
# update contract
share_token.setPersonalInfoAddress.transact(
brownie.ETH_ADDRESS,
{'from': issuer}
)
# assertion
assert share_token.personalInfoAddress() == brownie.ETH_ADDRESS
#######################################
# Error
#######################################
# Error_1
# Not authorized
def test_error_1(self, users, IbetShare):
issuer = users['issuer']
# issue token
deploy_args = init_args()
share_token = issuer.deploy(IbetShare, *deploy_args)
# update contract
with brownie.reverts():
share_token.setPersonalInfoAddress.transact(
brownie.ETH_ADDRESS,
{'from': users['user1']}
)
# assertion
assert share_token.personalInfoAddress() == brownie.ZERO_ADDRESS
# TEST_setDividendInformation
class TestSetDividendInformation:
#######################################
# Normal
#######################################
# Normal_1
def test_normal_1(self, users, IbetShare):
issuer = users['issuer']
# issue token
deploy_args = init_args()
share_token = issuer.deploy(IbetShare, *deploy_args)
# update
share_token.setDividendInformation.transact(
22000,
'20200829',
'20200831',
{'from': issuer}
)
# assertion
dividend_information = share_token.dividendInformation()
assert dividend_information[0] == 22000
assert dividend_information[1] == '20200829'
assert dividend_information[2] == '20200831'
#######################################
# Error
#######################################
# Error_1
# Not authorized
def test_error_1(self, users, IbetShare):
issuer = users['issuer']
# issue token
deploy_args = init_args()
share_token = issuer.deploy(IbetShare, *deploy_args)
# update
with brownie.reverts():
share_token.setDividendInformation.transact(
22000,
'20200829',
'20200831',
{'from': users['user1']}
)
# assertion
dividend_information = share_token.dividendInformation()
assert dividend_information[0] == deploy_args[4]
assert dividend_information[1] == deploy_args[5]
assert dividend_information[2] == deploy_args[6]
# TEST_setCancellationDate
class TestSetCancellationDate:
#######################################
# Normal
#######################################
# Normal_1
def test_normal_1(self, users, IbetShare):
issuer = users['issuer']
# issue token
deploy_args = init_args()
share_token = issuer.deploy(IbetShare, *deploy_args)
# update
share_token.setCancellationDate.transact(
'20200831',
{'from': issuer}
)
# assertion
cancellation_date = share_token.cancellationDate()
assert cancellation_date == '20200831'
#######################################
# Error
#######################################
# Error_1
# Not authorized
def test_error_1(self, users, IbetShare):
issuer = users['issuer']
# issue token
deploy_args = init_args()
share_token = issuer.deploy(IbetShare, *deploy_args)
# update
with brownie.reverts():
share_token.setCancellationDate.transact(
'20200930',
{'from': users['user1']}
)
# assertion
cancellation_date = share_token.cancellationDate()
assert cancellation_date == deploy_args[7]
# TEST_setContactInformation
class TestSetContactInformation:
#######################################
# Normal
#######################################
# 正常系1: 発行(デプロイ) -> 修正
def test_normal_1(self, users, IbetShare):
issuer = users['issuer']
# issue token
deploy_args = init_args()
share_token = issuer.deploy(IbetShare, *deploy_args)
# update
share_token.setContactInformation.transact(
'updated contact information',
{'from': issuer}
)
# assertion
contact_information = share_token.contactInformation()
assert contact_information == 'updated contact information'
#######################################
# Error
#######################################
# Error_1
# Not authorized
def test_error_1(self, users, IbetShare):
issuer = users['issuer']
# issue token
deploy_args = init_args()
share_token = issuer.deploy(IbetShare, *deploy_args)
# update
with brownie.reverts():
share_token.setContactInformation.transact(
'updated contact information',
{'from': users['user1']}
)
# assertion
contact_information = share_token.contactInformation()
assert contact_information == ''
# TEST_setPrivacyPolicy
class TestSetPrivacyPolicy:
#######################################
# Normal
#######################################
# Normal_1
def test_normal_1(self, users, IbetShare):
issuer = users['issuer']
# issue token
deploy_args = init_args()
share_token = issuer.deploy(IbetShare, *deploy_args)
# update
share_token.setPrivacyPolicy.transact(
'updated privacy policy',
{'from': issuer}
)
# assertion
privacy_policy = share_token.privacyPolicy()
assert privacy_policy == 'updated privacy policy'
#######################################
# Error
#######################################
# Error_1
# Not authorized
def test_error_1(self, users, IbetShare):
issuer = users['issuer']
# issue token
deploy_args = init_args()
share_token = issuer.deploy(IbetShare, *deploy_args)
# update
with brownie.reverts():
share_token.setPrivacyPolicy.transact(
'updated privacy policy',
{'from': users['user1']}
)
# assertion
privacy_policy = share_token.privacyPolicy()
assert privacy_policy == ''
# TEST_setMemo
class TestSetMemo:
#######################################
# Normal
#######################################
# Normal_1
def test_normal_1(self, users, IbetShare):
issuer = users['issuer']
# issue token
deploy_args = init_args()
share_token = issuer.deploy(IbetShare, *deploy_args)
# set memo
share_token.setMemo.transact(
'updated memo',
{'from': issuer}
)
# assertion
memo = share_token.memo()
assert memo == 'updated memo'
#######################################
# Error
#######################################
# Error_1
# Not authorized
def test_error_1(self, users, IbetShare):
issuer = users['issuer']
# issue token
deploy_args = init_args()
share_token = issuer.deploy(IbetShare, *deploy_args)
# set memo
with brownie.reverts():
share_token.setMemo.transact(
'updated memo',
{'from': users['user1']}
)
memo = share_token.memo()
assert memo == ''
# TEST_setTransferable
class TestSetTransferable:
#######################################
# Normal
#######################################
# Normal_1
def test_normal_1(self, users, IbetShare):
issuer = users['issuer']
# issue token
deploy_args = init_args()
share_token = issuer.deploy(IbetShare, *deploy_args)
# update
share_token.setTransferable.transact(True, {'from': issuer})
# assertion
assert share_token.transferable() is True
#######################################
# Error
#######################################
# Error_1
# Not authorized
def test_error_1(self, users, IbetShare):
issuer = users['issuer']
# issue token
deploy_args = init_args()
share_token = issuer.deploy(IbetShare, *deploy_args)
# update
with brownie.reverts():
share_token.setTransferable.transact(True, {'from': users['user1']})
# assertion
assert share_token.transferable() is False
# TEST_changeOfferingStatus
class TestChangeOfferingStatus:
#######################################
# Normal
#######################################
# Normal_1
def test_normal_1(self, users, IbetShare):
issuer = users['issuer']
# issue token
deploy_args = init_args()
share_token = issuer.deploy(IbetShare, *deploy_args)
# update
share_token.changeOfferingStatus.transact(True, {'from': issuer})
# assertion
assert share_token.isOffering() is True
#######################################
# Error
#######################################
# Error_1
# Not authorized
def test_error_1(self, users, IbetShare):
issuer = users['issuer']
# issue token
deploy_args = init_args()
share_token = issuer.deploy(IbetShare, *deploy_args)
# change exchange contract
with brownie.reverts():
share_token.changeOfferingStatus.transact(True, {'from': users['user1']})
# TEST_balanceOf
class TestBalanceOf:
#######################################
# Normal
#######################################
# Normal_1
def test_normal_1(self, users, IbetShare):
issuer = users['issuer']
# issue token
deploy_args = init_args()
share_token = issuer.deploy(IbetShare, *deploy_args)
# assertion
balance = share_token.balanceOf(issuer)
assert balance == deploy_args[3]
# TEST_authorizeLockAddress
class TestAuthorize:
#######################################
# Normal
#######################################
# Normal_1
def test_normal_1(self, users, IbetShare):
issuer = users['issuer']
# issue token
deploy_args = init_args()
share_token = issuer.deploy(IbetShare, *deploy_args)
# authorize
share_token.authorizeLockAddress.transact(
brownie.ETH_ADDRESS,
True,
{'from': issuer}
)
# assertion
assert share_token.authorizedLockAddress(brownie.ETH_ADDRESS) is True
assert share_token.authorizedLockAddress(brownie.ZERO_ADDRESS) is False
#######################################
# Error
#######################################
# Error_1
# Not authorized
def test_error_1(self, users, IbetShare):
issuer = users['issuer']
# issue token
deploy_args = init_args()
share_token = issuer.deploy(IbetShare, *deploy_args)
# authorize
with brownie.reverts():
share_token.authorizeLockAddress.transact(
brownie.ETH_ADDRESS,
True,
{'from': users['user1']}
)
# assertion
assert share_token.authorizedLockAddress(brownie.ETH_ADDRESS) is False
# TEST_lock
class TestLock:
#######################################
# Normal
#######################################
# Normal_1
# Lock assets to authorized addresses
def test_normal_1(self, users, IbetShare):
issuer = users['issuer']
user = users['user1']
target = users['user2']
# issue token
deploy_args = init_args()
share_token = issuer.deploy(IbetShare, *deploy_args)
transfer_amount = 30
lock_amount = 10
# transfer to account
share_token.transferFrom.transact(issuer, user, transfer_amount, {'from': issuer})
# authorize target address
share_token.authorizeLockAddress.transact(target, True, {'from': issuer})
# lock
tx = share_token.lock.transact(target, lock_amount, {'from': user})
# assertion
assert share_token.balanceOf(user) == transfer_amount - lock_amount
assert share_token.lockedOf(target, user) == lock_amount
assert tx.events["Lock"]["accountAddress"] == user
assert tx.events["Lock"]["lockAddress"] == target
assert tx.events["Lock"]["value"] == lock_amount
# Normal_2
# Lock assets to issuer addresses
def test_normal_2(self, users, IbetShare):
issuer = users['issuer']
user = users['user1']
transfer_amount = 30
lock_amount = 10
# issue token
deploy_args = init_args()
share_token = issuer.deploy(IbetShare, *deploy_args)
# transfer to account
share_token.transferFrom.transact(issuer, user, transfer_amount, {'from': issuer})
# lock
share_token.lock.transact(issuer, lock_amount, {'from': user})
# assertion
assert share_token.balanceOf(user) == transfer_amount - lock_amount
assert share_token.lockedOf(issuer, user) == lock_amount
#######################################
# Error
#######################################
# Error_1
# Insufficient balance
def test_error_1(self, users, IbetShare):
issuer = users['issuer']
user = users['user1']
# issue token
deploy_args = init_args()
share_token = issuer.deploy(IbetShare, *deploy_args)
transfer_amount = 30
lock_amount = 40
# transfer to account
share_token.transferFrom.transact(issuer, user, transfer_amount, {'from': issuer})
# lock
with brownie.reverts():
share_token.lock.transact(issuer, lock_amount, {'from': user})
# assertion
assert share_token.balanceOf(user) == transfer_amount
assert share_token.lockedOf(issuer, user) == 0
# Error_2
# Lock assets to not authorized address
def test_error_2(self, users, IbetShare):
issuer = users['issuer']
user = users['user1']
not_authorized_address = users['user2']
# issue token
deploy_args = init_args()
share_token = issuer.deploy(IbetShare, *deploy_args)
transfer_amount = 30
lock_amount = 10
# transfer to account
share_token.transferFrom.transact(issuer, user, transfer_amount, {'from': issuer})
# lock
with brownie.reverts():
share_token.lock.transact(not_authorized_address, lock_amount, {'from': user})
# assertion
assert share_token.balanceOf(user) == transfer_amount
assert share_token.lockedOf(issuer, user) == 0
# TEST_lockedOf
class TestLockedOf:
#######################################
# Normal
#######################################
# Normal_1
def test_normal_1(self, users, IbetShare):
issuer = users['issuer']
user = users['user1']
# issue token
deploy_args = init_args()
share_token = issuer.deploy(IbetShare, *deploy_args)
transfer_amount = 30
lock_amount = 10
# transfer to account
share_token.transferFrom.transact(issuer, user, transfer_amount, {'from': issuer})
# lock
share_token.lock.transact(issuer, lock_amount, {'from': user})
# assertion
assert share_token.lockedOf(issuer, user) == lock_amount
# TEST_unlock
class TestUnlock:
#######################################
# Normal
#######################################
# Normal_1
# authorized addresses
def test_normal_1(self, users, IbetShare):
issuer = users['issuer']
user1 = users['user1']
user2 = users['user2']
target = users['agent']
# issue token
deploy_args = init_args()
share_token = issuer.deploy(IbetShare, *deploy_args)
transfer_amount = 30
lock_amount = 20
unlock_amount = 10
# transfer to account
share_token.transferFrom.transact(issuer, user1, transfer_amount, {'from': issuer})
# authorize target address
share_token.authorizeLockAddress.transact(target, True, {'from': issuer})
# lock
share_token.lock.transact(target, lock_amount, {'from': user1})
# unlock
tx = share_token.unlock.transact(user1, user2, unlock_amount, {'from': target})
# assertion
assert share_token.balanceOf(user1) == transfer_amount - lock_amount
assert share_token.balanceOf(user2) == unlock_amount
assert share_token.lockedOf(target, user1) == lock_amount - unlock_amount
assert tx.events["Unlock"]["accountAddress"] == user1.address
assert tx.events["Unlock"]["lockAddress"] == target.address
assert tx.events["Unlock"]["recipientAddress"] == user2.address
assert tx.events["Unlock"]["value"] == unlock_amount
# Normal_2
# issuer
def test_normal_2(self, users, IbetShare):
issuer = users['issuer']
user1 = users['user1']
user2 = users['user2']
# issue token
deploy_args = init_args()
share_token = issuer.deploy(IbetShare, *deploy_args)
transfer_amount = 30
lock_amount = 20
unlock_amount = 10
# transfer to account
share_token.transferFrom.transact(issuer, user1, transfer_amount, {'from': issuer})
# lock
share_token.lock.transact(issuer, lock_amount, {'from': user1})
# unlock
tx = share_token.unlock.transact(user1, user2, unlock_amount, {'from': issuer})
# assertion
assert share_token.balanceOf(user1) == transfer_amount - lock_amount
assert share_token.balanceOf(user2) == unlock_amount
assert share_token.lockedOf(issuer, user1) == lock_amount - unlock_amount
assert tx.events["Unlock"]["accountAddress"] == user1.address
assert tx.events["Unlock"]["lockAddress"] == issuer.address
assert tx.events["Unlock"]["recipientAddress"] == user2.address
assert tx.events["Unlock"]["value"] == unlock_amount
#######################################
# Error
#######################################
# Error_1
# Cannot unlock a quantity that exceeds the lock quantity
def test_error_1(self, users, IbetShare):
issuer = users['issuer']
user1 = users['user1']
user2 = users['user2']
# issue token
deploy_args = init_args()
share_token = issuer.deploy(IbetShare, *deploy_args)
transfer_amount = 30
lock_amount = 10
unlock_amount = 11
# transfer to account
share_token.transferFrom.transact(issuer, user1, transfer_amount, {'from': issuer})
# lock
share_token.lock.transact(issuer, lock_amount, {'from': user1})
# unlock
with brownie.reverts():
share_token.unlock.transact(user1, user2, unlock_amount, {'from': issuer})
# assertion
assert share_token.balanceOf(user1) == transfer_amount - lock_amount
assert share_token.balanceOf(user2) == 0
assert share_token.lockedOf(issuer, user1) == lock_amount
# Error_2
# Not authorized
def test_error_2(self, users, IbetShare):
issuer = users['issuer']
user1 = users['user1']
user2 = users['user2']
# issue token
deploy_args = init_args()
share_token = issuer.deploy(IbetShare, *deploy_args)
transfer_amount = 30
lock_amount = 10
unlock_amount = 3
# transfer to account
share_token.transferFrom.transact(issuer, user1, transfer_amount, {'from': issuer})
# lock
share_token.lock.transact(issuer, lock_amount, {'from': user1})
# unlock
with brownie.reverts():
share_token.unlock.transact(user1, user2, unlock_amount, {'from': user2})
# assertion
assert share_token.balanceOf(user1) == transfer_amount - lock_amount
assert share_token.balanceOf(user2) == 0
assert share_token.lockedOf(issuer, user1) == lock_amount
# TEST_transfer
class TestTransfer:
#######################################
# Normal
#######################################
# Normal_1
# Transfer to EOA
def test_normal_1(self, users, personal_info):
issuer = users["issuer"]
from_address = issuer
to_address = users["trader"]
transfer_amount = 100
# issue token
share_token, deploy_args = issue_transferable_share_token(
issuer=issuer,
exchange_address=brownie.ZERO_ADDRESS,
personal_info_address=personal_info.address
)
# register personal info of to_address
personal_info.register.transact(
from_address.address,
"encrypted_message",
{'from': to_address}
)
# transfer
tx = share_token.transfer.transact(
to_address.address,
transfer_amount,
{"from": issuer}
)
# assertion
assert share_token.balanceOf(issuer) == deploy_args[3] - transfer_amount
assert share_token.balanceOf(to_address) == transfer_amount
assert tx.events["Transfer"]["from"] == from_address
assert tx.events["Transfer"]["to"] == to_address
assert tx.events["Transfer"]["value"] == transfer_amount
# Normal_2
# Transfer to contract address
def test_normal_2(self, users, exchange, personal_info):
issuer = users["issuer"]
from_address = issuer
transfer_amount = 100
# issue token
share_token, deploy_args = issue_transferable_share_token(
issuer=issuer,
exchange_address=exchange.address,
personal_info_address=personal_info.address
)
# transfer
to_address = exchange.address
tx = share_token.transfer.transact(
to_address,
transfer_amount,
{"from": from_address}
)
# assertion
assert share_token.balanceOf(from_address) == deploy_args[3] - transfer_amount
assert share_token.balanceOf(to_address) == transfer_amount
assert tx.events["Transfer"]["from"] == from_address
assert tx.events["Transfer"]["to"] == to_address
assert tx.events["Transfer"]["value"] == transfer_amount
#######################################
# Error
#######################################
# Error_1
# Insufficient balance
def test_error_1(self, users, personal_info):
issuer = users["issuer"]
from_address = issuer
to_address = users["trader"]
# issue token
share_token, deploy_args = issue_transferable_share_token(
issuer=issuer,
exchange_address=brownie.ZERO_ADDRESS,
personal_info_address=personal_info.address
)
# register personal info of to_address
personal_info.register.transact(
from_address.address,
"encrypted_message",
{'from': to_address}
)
# transfer
transfer_amount = deploy_args[3] + 1
with brownie.reverts():
share_token.transfer.transact(
to_address.address,
transfer_amount,
{"from": issuer}
)
# assertion
assert share_token.balanceOf(issuer) == deploy_args[3]
assert share_token.balanceOf(to_address) == 0
# Error_2
# Cannot access private function
def test_error_2(self, users):
issuer = users["issuer"]
from_address = issuer
to_address = users["trader"]
transfer_amount = 100
# issue token
share_token, deploy_args = issue_transferable_share_token(
issuer=issuer,
exchange_address=brownie.ZERO_ADDRESS,
personal_info_address=brownie.ZERO_ADDRESS
)
with pytest.raises(AttributeError):
share_token.isContract(to_address)
with pytest.raises(AttributeError):
share_token.transferToAddress.transact(
to_address,
transfer_amount,
"test_data",
{"from": from_address}
)
with pytest.raises(AttributeError):
share_token.transferToContract.transact(
to_address,
transfer_amount,
"test_data",
{"from": from_address}
)
# Error_3
# Not transferable token
def test_error_3(self, users, IbetShare):
issuer = users["issuer"]
to_address = users["trader"]
transfer_amount = 100
# issue token
deploy_args = init_args()
share_token = issuer.deploy(IbetShare, *deploy_args)
# transfer
with brownie.reverts(revert_msg="Must be transferable."):
share_token.transfer.transact(
to_address,
transfer_amount,
{"from": issuer}
)
# assertion
from_balance = share_token.balanceOf(issuer)
to_balance = share_token.balanceOf(to_address)
assert from_balance == deploy_args[3]
assert to_balance == 0
# Error_4
# Transfer to non-tradable exchange
def test_error_4(self, users, IbetShare, exchange):
issuer = users['issuer']
transfer_amount = 100
# issue transferable token
deploy_args = init_args()
share_token = issuer.deploy(IbetShare, *deploy_args)
share_token.setTransferable(
True,
{"from": issuer}
)
# transfer
with brownie.reverts(revert_msg="Transfers to contract addresses are only possible to tradableExchange."):
share_token.transfer.transact(
exchange,
transfer_amount,
{"from": issuer}
)
assert share_token.balanceOf(issuer) == deploy_args[3]
assert share_token.balanceOf(exchange) == 0
# Error_5
# Transfer to an address with personal information not registered
def test_error_5(self, users, personal_info):
issuer = users["issuer"]
to_address = users["trader"]
transfer_amount = 100
# issue token
share_token, deploy_args = issue_transferable_share_token(
issuer=issuer,
exchange_address=brownie.ZERO_ADDRESS,
personal_info_address=personal_info.address
)
# transfer
with brownie.reverts(revert_msg="The transfer is only possible if personal information is registered."):
share_token.transfer.transact(
to_address.address,
transfer_amount,
{"from": issuer}
)
# assertion
assert share_token.balanceOf(issuer) == deploy_args[3]
assert share_token.balanceOf(to_address) == 0
# Error_6
# Tokens that require transfer approval
def test_error_6(self, users, personal_info):
issuer = users["issuer"]
to_address = users["trader"]
transfer_amount = 100
# issue token
share_token, deploy_args = issue_transferable_share_token(
issuer=issuer,
exchange_address=brownie.ZERO_ADDRESS,
personal_info_address=personal_info.address
)
share_token.setTransferApprovalRequired(
True,
{"from": issuer}
)
# transfer
with brownie.reverts(revert_msg="Direct transfer is not possible for tokens that require approval for transfer."):
share_token.transfer.transact(
to_address,
transfer_amount,
{"from": issuer}
)
# assertion
from_balance = share_token.balanceOf(issuer)
to_balance = share_token.balanceOf(to_address)
assert from_balance == deploy_args[3]
assert to_balance == 0
# TEST_bulkTransfer
class TestBulkTransfer:
#######################################
# Normal
#######################################
# Normal_1
# Bulk transfer to account address (1 data)
def test_normal_1(self, users, personal_info):
issuer = users["issuer"]
from_address = issuer
to_address = users["trader"]
# issue share token
share_contract, deploy_args = issue_transferable_share_token(
issuer=issuer,
exchange_address=brownie.ZERO_ADDRESS,
personal_info_address=personal_info.address
)
# register personal info (to_address)
personal_info.register.transact(
from_address.address,
"encrypted_message",
{"from": to_address}
)
# bulk transfer
to_address_list = [to_address]
amount_list = [1]
share_contract.bulkTransfer.transact(
to_address_list,
amount_list,
{"from": from_address}
)
# assertion
from_balance = share_contract.balanceOf(from_address)
to_balance = share_contract.balanceOf(to_address)
assert from_balance == deploy_args[3] - 1
assert to_balance == 1
# Normal_2
# Bulk transfer to account address (multiple data)
def test_normal_2(self, users, personal_info):
issuer = users["issuer"]
from_address = issuer
to_address = users["trader"]
# issue share token
share_contract, deploy_args = issue_transferable_share_token(
issuer=issuer,
exchange_address=brownie.ZERO_ADDRESS,
personal_info_address=personal_info.address
)
# register personal info (to_address)
personal_info.register.transact(
from_address.address,
"encrypted_message",
{"from": to_address}
)
# bulk transfer
to_address_list = []
amount_list = []
for i in range(100):
to_address_list.append(to_address)
amount_list.append(1)
share_contract.bulkTransfer.transact(
to_address_list,
amount_list,
{"from": from_address}
)
# assertion
from_balance = share_contract.balanceOf(from_address)
to_balance = share_contract.balanceOf(to_address)
assert from_balance == deploy_args[3] - 100
assert to_balance == 100
# Normal_3
# Bulk transfer to contract address
def test_normal_3(self, users, exchange, personal_info):
issuer = users["issuer"]
from_address = issuer
# issue share token
share_contract, deploy_args = issue_transferable_share_token(
issuer=issuer,
exchange_address=exchange.address,
personal_info_address=personal_info.address
)
# bulk transfer
to_address_list = [exchange.address]
amount_list = [1]
share_contract.bulkTransfer.transact(
to_address_list,
amount_list,
{"from": from_address}
)
# assertion
from_balance = share_contract.balanceOf(from_address)
to_balance = share_contract.balanceOf(exchange.address)
assert from_balance == deploy_args[3] - 1
assert to_balance == 1
#######################################
# Error
#######################################
# Error_1
# Insufficient balance
def test_error_1(self, users, personal_info):
issuer = users['issuer']
from_address = issuer
to_address = users['trader']
# issue share token
share_contract, deploy_args = issue_transferable_share_token(
issuer=issuer,
exchange_address=brownie.ZERO_ADDRESS,
personal_info_address=personal_info.address
)
# register personal info (to_address)
personal_info.register.transact(
from_address,
"encrypted_message",
{"from": to_address}
)
# bulk transfer
with brownie.reverts():
share_contract.bulkTransfer.transact(
[to_address, to_address],
[deploy_args[3], 1],
{'from': issuer}
)
# assertion
assert share_contract.balanceOf(issuer) == deploy_args[3]
assert share_contract.balanceOf(to_address) == 0
# Error_2
# Not transferable token
def test_error_2(self, users, personal_info):
issuer = users["issuer"]
from_address = issuer
to_address = users["trader"]
# issue share token
share_contract, deploy_args = issue_transferable_share_token(
issuer=issuer,
exchange_address=brownie.ZERO_ADDRESS,
personal_info_address=personal_info.address
)
share_contract.setTransferable.transact(
False,
{"from": issuer}
)
# register personal info (to_address)
personal_info.register.transact(
from_address,
"encrypted_message",
{"from": to_address}
)
# bulk transfer
with brownie.reverts():
share_contract.bulkTransfer.transact(
[to_address],
[1],
{"from": issuer}
)
# assertion
from_balance = share_contract.balanceOf(issuer)
to_balance = share_contract.balanceOf(to_address)
assert from_balance == deploy_args[3]
assert to_balance == 0
# Error_3
# Transfer to an address with no personal information registered
def test_error_3(self, users, personal_info):
issuer = users["issuer"]
to_address = users["trader"]
# issue share token
share_contract, deploy_args = issue_transferable_share_token(
issuer=issuer,
exchange_address=brownie.ZERO_ADDRESS,
personal_info_address=personal_info.address
)
# bulk transfer
with brownie.reverts():
share_contract.bulkTransfer.transact(
[to_address],
[1],
{'from': issuer}
)
# assertion
from_balance = share_contract.balanceOf(issuer)
to_balance = share_contract.balanceOf(to_address)
assert from_balance == deploy_args[3]
assert to_balance == 0
# Error_4
# Tokens that require transfer approval cannot be executed.
def test_error_4(self, users, personal_info):
issuer = users["issuer"]
to_address = users["trader"]
# issue share token
share_contract, deploy_args = issue_transferable_share_token(
issuer=issuer,
exchange_address=brownie.ZERO_ADDRESS,
personal_info_address=personal_info.address
)
share_contract.setTransferApprovalRequired(
True,
{"from": issuer}
)
# bulk transfer
with brownie.reverts():
share_contract.bulkTransfer.transact(
[to_address],
[1],
{'from': issuer}
)
# assertion
from_balance = share_contract.balanceOf(issuer)
to_balance = share_contract.balanceOf(to_address)
assert from_balance == deploy_args[3]
assert to_balance == 0
# TEST_transferFrom
class TestTransferFrom:
#######################################
# Normal
#######################################
# Normal_1
def test_normal_1(self, users, personal_info):
issuer = users['issuer']
from_address = issuer
to_address = users['user1']
value = 100
# issue token
share_token, deploy_args = issue_transferable_share_token(
issuer=issuer,
exchange_address=brownie.ZERO_ADDRESS,
personal_info_address=personal_info.address
)
# forced transfer
share_token.transferFrom.transact(
from_address,
to_address,
value,
{'from': issuer}
)
# assertion
assert share_token.balanceOf(issuer) == deploy_args[3] - value
assert share_token.balanceOf(to_address) == value
#######################################
# Error
#######################################
# Error_1
# Insufficient balance
def test_error_1(self, users, personal_info):
issuer = users['issuer']
from_address = issuer
to_address = users['user1']
# issue token
share_token, deploy_args = issue_transferable_share_token(
issuer=issuer,
exchange_address=brownie.ZERO_ADDRESS,
personal_info_address=personal_info.address
)
# forced transfer
with brownie.reverts():
share_token.transferFrom.transact(
from_address,
to_address,
deploy_args[3] + 1,
{'from': issuer}
)
# assertion
assert share_token.balanceOf(issuer) == deploy_args[3]
assert share_token.balanceOf(to_address) == 0
# Error_2
# Not authorized
def test_error_2(self, users, personal_info):
issuer = users['issuer']
from_address = issuer
to_address = users['user1']
# issue token
share_token, deploy_args = issue_transferable_share_token(
issuer=issuer,
exchange_address=brownie.ZERO_ADDRESS,
personal_info_address=personal_info.address
)
# forced transfer
with brownie.reverts():
share_token.transferFrom.transact(
from_address,
to_address,
deploy_args[3] + 1,
{'from': to_address}
)
# assertion
assert share_token.balanceOf(issuer) == deploy_args[3]
assert share_token.balanceOf(to_address) == 0
# TEST_applyForOffering
class TestApplyForOffering:
#######################################
# Normal
#######################################
# Normal_1
# Default value
def test_normal_1(self, users, personal_info):
issuer = users['issuer']
# issue token
share_token, deploy_args = issue_transferable_share_token(
issuer=issuer,
exchange_address=brownie.ZERO_ADDRESS,
personal_info_address=personal_info.address
)
# assertion
application = share_token.applicationsForOffering(brownie.ETH_ADDRESS)
assert application[0] == 0
assert application[1] == 0
assert application[2] == ''
# Normal_2
def test_normal_2(self, users, personal_info):
issuer = users['issuer']
applicant = users['user1']
# issue token
share_token, deploy_args = issue_transferable_share_token(
issuer=issuer,
exchange_address=brownie.ZERO_ADDRESS,
personal_info_address=personal_info.address
)
# update offering status
share_token.changeOfferingStatus.transact(True, {'from': issuer})
# register personal info of applicant
personal_info.register.transact(
issuer,
"encrypted_message",
{'from': applicant}
)
# apply for offering
share_token.applyForOffering.transact(
10,
'abcdefgh',
{'from': applicant}
)
# assertion
application = share_token.applicationsForOffering(applicant)
assert application[0] == 10
assert application[1] == 0
assert application[2] == 'abcdefgh'
# Normal_3
# Multiple applications
def test_normal_3(self, users, personal_info):
issuer = users['issuer']
applicant = users['user1']
# issue token
share_token, deploy_args = issue_transferable_share_token(
issuer=issuer,
exchange_address=brownie.ZERO_ADDRESS,
personal_info_address=personal_info.address
)
# update offering status
share_token.changeOfferingStatus.transact(True, {'from': issuer})
# register personal info of applicant
personal_info.register.transact(
issuer,
"encrypted_message",
{'from': applicant}
)
# apply for offering (1)
share_token.applyForOffering.transact(
10,
'abcdefgh',
{'from': applicant}
)
# apply for offering (2)
share_token.applyForOffering.transact(
20,
'vwxyz',
{'from': applicant}
)
# assertion
application = share_token.applicationsForOffering(applicant)
assert application[0] == 20
assert application[1] == 0
assert application[2] == 'vwxyz'
#######################################
# Error
#######################################
# Error_1
# The offering status must be true.
def test_error_1(self, users, personal_info):
issuer = users['issuer']
applicant = users['user1']
# issue token
share_token, deploy_args = issue_transferable_share_token(
issuer=issuer,
exchange_address=brownie.ZERO_ADDRESS,
personal_info_address=personal_info.address
)
# apply for offering
with brownie.reverts():
share_token.applyForOffering.transact(
10,
'abcdefgh',
{'from': applicant}
)
# assertion
application = share_token.applicationsForOffering(applicant)
assert application[0] == 0
assert application[1] == 0
assert application[2] == ''
# Error_2
# Applicant need to register personal information.
def test_error_2(self, users, personal_info):
issuer = users['issuer']
applicant = users['user1']
# issue token
share_token, deploy_args = issue_transferable_share_token(
issuer=issuer,
exchange_address=brownie.ZERO_ADDRESS,
personal_info_address=personal_info.address
)
# update offering status
share_token.changeOfferingStatus.transact(True, {'from': issuer})
# apply for offering
with brownie.reverts():
share_token.applyForOffering.transact(
10,
'abcdefgh',
{'from': applicant}
)
# assertion
application = share_token.applicationsForOffering(applicant)
assert application[0] == 0
assert application[1] == 0
assert application[2] == ''
# TEST_allot
class TestAllot:
#######################################
# Normal
#######################################
# Normal_1
def test_normal_1(self, users, personal_info):
issuer = users['issuer']
applicant = users['user1']
# issue token
share_token, deploy_args = issue_transferable_share_token(
issuer=issuer,
exchange_address=brownie.ZERO_ADDRESS,
personal_info_address=personal_info.address
)
# update offering status
share_token.changeOfferingStatus.transact(True, {'from': issuer})
# register personal info of applicant
personal_info.register.transact(
issuer,
"encrypted_message",
{'from': applicant}
)
# apply for offering
share_token.applyForOffering.transact(
10,
'abcdefgh',
{'from': applicant}
)
# allot
share_token.allot.transact(
applicant,
5,
{'from': issuer}
)
# assertion
application = share_token.applicationsForOffering(applicant)
assert application[0] == 10
assert application[1] == 5
assert application[2] == 'abcdefgh'
#######################################
# Error
#######################################
# Error_1
# Not authorized
def test_error_1(self, users, personal_info):
issuer = users['issuer']
applicant = users['user1']
# issue token
share_token, deploy_args = issue_transferable_share_token(
issuer=issuer,
exchange_address=brownie.ZERO_ADDRESS,
personal_info_address=personal_info.address
)
# update offering status
share_token.changeOfferingStatus.transact(True, {'from': issuer})
# allot
with brownie.reverts():
share_token.allot.transact(applicant, 5, {'from': applicant})
# assertion
application = share_token.applicationsForOffering(applicant)
assert application[0] == 0
assert application[1] == 0
assert application[2] == ''
# TEST_issueFrom
class TestIssueFrom:
#######################################
# Normal
#######################################
# Normal_1
# Issue from issuer address
def test_normal_1(self, users, IbetShare):
issuer = users['issuer']
issue_amount = 10
# issue token
deploy_args = init_args()
deploy_args[3] = 1000
share_token = issuer.deploy(IbetShare, *deploy_args)
# issue from issuer address
share_token.issueFrom.transact(
issuer,
brownie.ZERO_ADDRESS,
issue_amount,
{'from': issuer}
)
# assertion
assert share_token.totalSupply() == deploy_args[3] + issue_amount
assert share_token.balanceOf(issuer) == deploy_args[3] + issue_amount
# Normal_2
# Issue from EOA
def test_normal_2(self, users, IbetShare):
issuer = users['issuer']
issue_amount = 10
# issue token
deploy_args = init_args()
deploy_args[3] = 1000
share_token = issuer.deploy(IbetShare, *deploy_args)
# issue from EOA
share_token.issueFrom.transact(
brownie.ETH_ADDRESS,
brownie.ZERO_ADDRESS,
issue_amount,
{'from': issuer}
)
# assertion
assert share_token.totalSupply() == deploy_args[3] + issue_amount
assert share_token.balanceOf(issuer) == deploy_args[3]
assert share_token.balanceOf(brownie.ETH_ADDRESS) == issue_amount
# Normal_3
# Issue from locked address
def test_normal_3(self, users, IbetShare):
issuer = users['issuer']
lock_address = users['user1']
lock_amount = 10
issue_amount = 10
# issue token
deploy_args = init_args()
deploy_args[3] = 1000
share_token = issuer.deploy(IbetShare, *deploy_args)
# authorize lock address
share_token.authorizeLockAddress.transact(lock_address, True, {'from': issuer})
# lock
share_token.lock.transact(lock_address, lock_amount, {'from': issuer})
# issue from lock address
share_token.issueFrom.transact(
issuer,
lock_address,
issue_amount,
{'from': issuer}
)
# assertion
assert share_token.totalSupply() == deploy_args[3] + issue_amount
assert share_token.balanceOf(issuer) == deploy_args[3] - lock_amount
assert share_token.lockedOf(lock_address, issuer) == lock_amount + issue_amount
#######################################
# Error
#######################################
# Error_1_1
# Over the limit
# issuer address
def test_error_1_1(self, users, IbetShare):
issuer = users['issuer']
# issue token
deploy_args = init_args()
share_token = issuer.deploy(IbetShare, *deploy_args)
# issue from issuer address
with brownie.reverts():
share_token.issueFrom.transact(
issuer,
brownie.ZERO_ADDRESS,
1,
{'from': issuer}
)
# Error_1_2
# Over the limit
# locked address
def test_error_1_2(self, users, IbetShare):
issuer = users['issuer']
lock_address = users['user1']
lock_amount = 2 ** 256 - 1
issue_amount = 1
# issue token
deploy_args = init_args()
share_token = issuer.deploy(IbetShare, *deploy_args)
# authorize lock address
share_token.authorizeLockAddress.transact(lock_address, True, {'from': issuer})
# lock
share_token.lock.transact(lock_address, lock_amount, {'from': issuer})
# issue from lock address
with brownie.reverts():
share_token.issueFrom.transact(
issuer,
lock_address,
issue_amount,
{'from': issuer}
)
# assertion
assert share_token.balanceOf(issuer) == deploy_args[3] - lock_amount
assert share_token.lockedOf(lock_address, issuer) == lock_amount
# Error_2
# Not authorized
def test_error_2(self, users, IbetShare):
issuer = users['issuer']
# issue token
deploy_args = init_args()
share_token = issuer.deploy(IbetShare, *deploy_args)
# issue from not authorized user
with brownie.reverts():
share_token.issueFrom.transact(
issuer,
brownie.ZERO_ADDRESS,
1,
{'from': users['user1']}
)
# TEST_redeemFrom
class TestRedeemFrom:
#######################################
# Normal
#######################################
# Normal_1
# Redeem from issuer address
def test_normal_1(self, users, IbetShare):
issuer = users['issuer']
redeem_amount = 10
# issue token
deploy_args = init_args()
share_token = issuer.deploy(IbetShare, *deploy_args)
# redeem
share_token.redeemFrom.transact(
issuer,
brownie.ZERO_ADDRESS,
redeem_amount,
{'from': issuer}
)
# assertion
total_supply = share_token.totalSupply()
balance = share_token.balanceOf(issuer)
assert total_supply == deploy_args[3] - redeem_amount
assert balance == deploy_args[3] - redeem_amount
# Normal_2
# Redeem from EOA
def test_normal_2(self, users, IbetShare):
issuer = users['issuer']
user = users['user1']
transfer_amount = 20
redeem_amount = 10
# issue token
deploy_args = init_args()
deploy_args[3] = 1000
share_token = issuer.deploy(IbetShare, *deploy_args)
# transfer to user
share_token.transferFrom.transact(
issuer,
user,
transfer_amount,
{'from': issuer}
)
# redeem
share_token.redeemFrom.transact(
user,
brownie.ZERO_ADDRESS,
redeem_amount,
{'from': issuer}
)
# assertion
assert share_token.totalSupply() == deploy_args[3] - redeem_amount
assert share_token.balanceOf(issuer) == deploy_args[3] - transfer_amount
assert share_token.balanceOf(user) == transfer_amount - redeem_amount
# Normal_3
# Redeem from locked address
def test_normal_3(self, users, IbetShare):
issuer = users['issuer']
lock_address = users['user1']
lock_amount = 20
redeem_amount = 10
# issue token
deploy_args = init_args()
deploy_args[3] = 1000
share_token = issuer.deploy(IbetShare, *deploy_args)
# authorize lock address
share_token.authorizeLockAddress.transact(
lock_address,
True,
{'from': issuer}
)
# lock
share_token.lock.transact(
lock_address,
lock_amount,
{'from': issuer}
)
# redeem from lock address
share_token.redeemFrom.transact(
issuer,
lock_address,
redeem_amount,
{'from': issuer}
)
# assertion
assert share_token.totalSupply() == deploy_args[3] - redeem_amount
assert share_token.balanceOf(issuer) == deploy_args[3] - lock_amount
assert share_token.lockedOf(lock_address, issuer) == lock_amount - redeem_amount
#######################################
# Error
#######################################
# Error_1
# Exceeds balance
def test_error_1(self, users, IbetShare):
issuer = users['issuer']
redeem_amount = 101
# issue token
deploy_args = init_args()
deploy_args[3] = 100
share_token = issuer.deploy(IbetShare, *deploy_args)
# redeem
with brownie.reverts():
share_token.redeemFrom.transact(
issuer,
brownie.ZERO_ADDRESS,
redeem_amount,
{'from': issuer}
)
# assertion
assert share_token.totalSupply() == deploy_args[3]
assert share_token.balanceOf(issuer) == deploy_args[3]
# Error_2
# Exceeds locked quantity
def test_error_2(self, users, IbetShare):
issuer = users['issuer']
lock_address = users['user1']
lock_amount = 20
redeem_amount = 21
# issue token
deploy_args = init_args()
deploy_args[3] = 1000
share_token = issuer.deploy(IbetShare, *deploy_args)
# authorize lock address
share_token.authorizeLockAddress.transact(
lock_address,
True,
{'from': issuer}
)
# lock
share_token.lock.transact(
lock_address,
lock_amount,
{'from': issuer}
)
# redeem from lock address
with brownie.reverts():
share_token.redeemFrom.transact(
issuer,
lock_address,
redeem_amount,
{'from': issuer}
)
# assertion
assert share_token.totalSupply() == deploy_args[3]
assert share_token.balanceOf(issuer) == deploy_args[3] - lock_amount
assert share_token.lockedOf(lock_address, issuer) == lock_amount
# Error_3
# Not authorized
def test_error_3(self, users, IbetShare):
issuer = users['issuer']
redeem_amount = 100
# issue token
deploy_args = init_args()
share_token = issuer.deploy(IbetShare, *deploy_args)
# redeem
with brownie.reverts():
share_token.redeemFrom.transact(
issuer,
brownie.ZERO_ADDRESS,
redeem_amount,
{'from': users['user1']}
)
# assertion
assert share_token.totalSupply() == deploy_args[3]
assert share_token.balanceOf(issuer) == deploy_args[3]
# TEST_applyForTransfer
class TestApplyForTransfer:
#######################################
# Normal
#######################################
# Normal_1
def test_normal_1(self, users, personal_info):
issuer = users["issuer"]
to_address = users["user1"]
transfer_amount = 100
transfer_data = "test_data"
# issue token
share_token, deploy_args = issue_transferable_share_token(
issuer=issuer,
exchange_address=brownie.ZERO_ADDRESS,
personal_info_address=personal_info.address
)
share_token.setTransferApprovalRequired(
True,
{"from": issuer}
)
# register personal information (to_address)
personal_info.register(
issuer,
"encrypted_message",
{"from": to_address}
)
# apply for transfer
tx = share_token.applyForTransfer(
to_address,
transfer_amount,
transfer_data,
{"from": issuer}
)
# assertion
assert share_token.balances(issuer) == deploy_args[3] - transfer_amount
assert share_token.balances(to_address) == 0
assert share_token.pendingTransfer(issuer) == transfer_amount
assert share_token.applicationsForTransfer(0) == (
issuer,
to_address,
transfer_amount,
True
)
assert tx.events["ApplyForTransfer"]["index"] == 0
assert tx.events["ApplyForTransfer"]["from"] == issuer
assert tx.events["ApplyForTransfer"]["to"] == to_address
assert tx.events["ApplyForTransfer"]["value"] == transfer_amount
assert tx.events["ApplyForTransfer"]["data"] == transfer_data
# Normal_2
# Multiple execution
def test_normal_2(self, users, personal_info):
issuer = users["issuer"]
to_address = users["user1"]
transfer_amount = 100
transfer_data = "test_data"
# issue token
share_token, deploy_args = issue_transferable_share_token(
issuer=issuer,
exchange_address=brownie.ZERO_ADDRESS,
personal_info_address=personal_info.address
)
share_token.setTransferApprovalRequired(
True,
{"from": issuer}
)
# register personal information (to_address)
personal_info.register(
issuer,
"encrypted_message",
{"from": to_address}
)
# apply for transfer
for i in range(2):
share_token.applyForTransfer(
to_address,
transfer_amount,
transfer_data,
{"from": issuer}
)
# assertion
assert share_token.balances(issuer) == deploy_args[3] - transfer_amount * 2
assert share_token.balances(to_address) == 0
assert share_token.pendingTransfer(issuer) == transfer_amount * 2
for i in range(2):
assert share_token.applicationsForTransfer(i) == (
issuer,
to_address,
transfer_amount,
True
)
# Normal_3
# Transfer to issuer
# No need to register personal information
def test_normal_3(self, users, personal_info):
issuer = users["issuer"]
to_address = issuer
transfer_amount = 100
transfer_data = "test_data"
# issue token
share_token, deploy_args = issue_transferable_share_token(
issuer=issuer,
exchange_address=brownie.ZERO_ADDRESS,
personal_info_address=personal_info.address
)
share_token.setTransferApprovalRequired(
True,
{"from": issuer}
)
# apply for transfer
share_token.applyForTransfer(
to_address,
transfer_amount,
transfer_data,
{"from": issuer}
)
# assertion
assert share_token.balances(issuer) == deploy_args[3] - transfer_amount
assert share_token.pendingTransfer(issuer) == transfer_amount
assert share_token.applicationsForTransfer(0) == (
issuer,
to_address,
transfer_amount,
True
)
#######################################
# Error
#######################################
# Error_1
# transferApprovalRequired = false
def test_error_1(self, users):
issuer = users["issuer"]
to_address = users["user1"]
transfer_amount = 100
transfer_data = "test_data"
# issue token
share_token, deploy_args = issue_transferable_share_token(
issuer=issuer,
exchange_address=brownie.ZERO_ADDRESS,
personal_info_address=brownie.ZERO_ADDRESS
)
# apply for transfer
with brownie.reverts():
share_token.applyForTransfer(
to_address,
transfer_amount,
transfer_data,
{"from": issuer}
)
# assertion
assert share_token.balances(issuer) == deploy_args[3]
assert share_token.balances(to_address) == 0
assert share_token.pendingTransfer(issuer) == 0
# Error_2
# transferable = false
def test_error_2(self, users):
issuer = users["issuer"]
to_address = users["user1"]
transfer_amount = 100
transfer_data = "test_data"
# issue token
share_token, deploy_args = issue_transferable_share_token(
issuer=issuer,
exchange_address=brownie.ZERO_ADDRESS,
personal_info_address=brownie.ZERO_ADDRESS
)
share_token.setTransferApprovalRequired(
True,
{"from": issuer}
)
share_token.setTransferable(
False,
{"from": issuer}
)
# apply for transfer
with brownie.reverts():
share_token.applyForTransfer(
to_address,
transfer_amount,
transfer_data,
{"from": issuer}
)
# assertion
assert share_token.balances(issuer) == deploy_args[3]
assert share_token.balances(to_address) == 0
assert share_token.pendingTransfer(issuer) == 0
# Error_3
# Insufficient balance
def test_error_3(self, users):
issuer = users["issuer"]
to_address = users["user1"]
transfer_data = "test_data"
# issue token
share_token, deploy_args = issue_transferable_share_token(
issuer=issuer,
exchange_address=brownie.ZERO_ADDRESS,
personal_info_address=brownie.ZERO_ADDRESS
)
share_token.setTransferApprovalRequired(
True,
{"from": issuer}
)
# apply for transfer
with brownie.reverts():
share_token.applyForTransfer(
to_address,
deploy_args[3] + 1,
transfer_data,
{"from": issuer}
)
# assertion
assert share_token.balances(issuer) == deploy_args[3]
assert share_token.balances(to_address) == 0
assert share_token.pendingTransfer(issuer) == 0
# Error_4
# Personal information is not registered
def test_error_4(self, users, personal_info):
issuer = users["issuer"]
to_address = users["user1"]
transfer_amount = 100
transfer_data = "test_data"
# issue token
share_token, deploy_args = issue_transferable_share_token(
issuer=issuer,
exchange_address=brownie.ZERO_ADDRESS,
personal_info_address=personal_info.address
)
share_token.setTransferApprovalRequired(
True,
{"from": issuer}
)
# apply for transfer
with brownie.reverts():
share_token.applyForTransfer(
to_address,
transfer_amount,
transfer_data,
{"from": issuer}
)
# assertion
assert share_token.balances(issuer) == deploy_args[3]
assert share_token.balances(to_address) == 0
assert share_token.pendingTransfer(issuer) == 0
# TEST_cancelTransfer
class TestCancelTransfer:
#######################################
# Normal
#######################################
# Normal_1
# Cancel by applicant
def test_normal_1(self, users, personal_info):
issuer = users["issuer"]
user1 = users["user1"]
user2 = users["user2"]
transfer_amount = 100
# issue token
share_token, deploy_args = issue_transferable_share_token(
issuer=issuer,
exchange_address=brownie.ZERO_ADDRESS,
personal_info_address=personal_info.address
)
share_token.transferFrom(
issuer,
user1,
transfer_amount,
{"from": issuer}
)
share_token.setTransferApprovalRequired(
True,
{"from": issuer}
)
# register personal information (to_address)
personal_info.register(
issuer,
"encrypted_message",
{"from": user2}
)
# apply for transfer
share_token.applyForTransfer(
user2,
transfer_amount,
"test_data",
{"from": user1} # from user1 to user2
)
# cancel transfer (from applicant)
tx = share_token.cancelTransfer(
0,
"test_data",
{"from": user1}
)
# assertion
assert share_token.balances(issuer) == deploy_args[3] - transfer_amount
assert share_token.balances(user1) == transfer_amount
assert share_token.pendingTransfer(user1) == 0
assert share_token.applicationsForTransfer(0) == (
user1,
user2,
transfer_amount,
False
)
assert tx.events["CancelTransfer"]["index"] == 0
assert tx.events["CancelTransfer"]["from"] == user1
assert tx.events["CancelTransfer"]["to"] == user2
assert tx.events["CancelTransfer"]["data"] == "test_data"
# Normal_2
# Cancel by issuer
def test_normal_2(self, users, personal_info):
issuer = users["issuer"]
user1 = users["user1"]
user2 = users["user2"]
transfer_amount = 100
# issue token
share_token, deploy_args = issue_transferable_share_token(
issuer=issuer,
exchange_address=brownie.ZERO_ADDRESS,
personal_info_address=personal_info.address
)
share_token.transferFrom(
issuer,
user1,
transfer_amount,
{"from": issuer}
)
share_token.setTransferApprovalRequired(
True,
{"from": issuer}
)
# register personal information (to_address)
personal_info.register(
issuer,
"encrypted_message",
{"from": user2}
)
# apply for transfer
share_token.applyForTransfer(
user2,
transfer_amount,
"test_data",
{"from": user1} # from user1 to user2
)
# cancel transfer (from issuer)
tx = share_token.cancelTransfer(
0,
"test_data",
{"from": issuer}
)
# assertion
assert share_token.balances(issuer) == deploy_args[3] - transfer_amount
assert share_token.balances(user1) == transfer_amount
assert share_token.pendingTransfer(user1) == 0
assert share_token.applicationsForTransfer(0) == (
user1,
user2,
transfer_amount,
False
)
assert tx.events["CancelTransfer"]["index"] == 0
assert tx.events["CancelTransfer"]["from"] == user1
assert tx.events["CancelTransfer"]["to"] == user2
assert tx.events["CancelTransfer"]["data"] == "test_data"
#######################################
# Error
#######################################
# Error_1
# Not authorized
def test_error_1(self, users, personal_info):
issuer = users["issuer"]
user1 = users["user1"]
user2 = users["user2"]
transfer_amount = 100
# issue token
share_token, deploy_args = issue_transferable_share_token(
issuer=issuer,
exchange_address=brownie.ZERO_ADDRESS,
personal_info_address=personal_info.address
)
share_token.transferFrom(
issuer,
user1,
transfer_amount,
{"from": issuer}
)
share_token.setTransferApprovalRequired(
True,
{"from": issuer}
)
# register personal information (to_address)
personal_info.register(
issuer,
"encrypted_message",
{"from": user2}
)
# apply for transfer
share_token.applyForTransfer(
user2,
transfer_amount,
"test_data",
{"from": user1} # from user1 to user2
)
# cancel transfer (from issuer)
with brownie.reverts():
share_token.cancelTransfer(
0,
"test_data",
{"from": user2}
)
# assertion
assert share_token.balances(issuer) == deploy_args[3] - transfer_amount
assert share_token.balances(user1) == 0
assert share_token.pendingTransfer(user1) == transfer_amount
assert share_token.applicationsForTransfer(0) == (
user1,
user2,
transfer_amount,
True
)
# Error_2
# Applications that have already been cancelled cannot be cancelled.
def test_error_2(self, users, personal_info):
issuer = users["issuer"]
user1 = users["user1"]
user2 = users["user2"]
transfer_amount = 100
# issue token
share_token, deploy_args = issue_transferable_share_token(
issuer=issuer,
exchange_address=brownie.ZERO_ADDRESS,
personal_info_address=personal_info.address
)
share_token.transferFrom(
issuer,
user1,
transfer_amount,
{"from": issuer}
)
share_token.setTransferApprovalRequired(
True,
{"from": issuer}
)
# register personal information (to_address)
personal_info.register(
issuer,
"encrypted_message",
{"from": user2}
)
# apply for transfer
share_token.applyForTransfer(
user2,
transfer_amount,
"test_data",
{"from": user1} # from user1 to user2
)
# cancel transfer (1)
share_token.cancelTransfer(
0,
"test_data",
{"from": user1}
)
# cancel transfer (2)
with brownie.reverts():
share_token.cancelTransfer(
0,
"test_data",
{"from": user1}
)
# assertion
assert share_token.balances(issuer) == deploy_args[3] - transfer_amount
assert share_token.balances(user1) == transfer_amount
assert share_token.pendingTransfer(user1) == 0
assert share_token.applicationsForTransfer(0) == (
user1,
user2,
transfer_amount,
False
)
# TEST_approveTransfer
class TestApproveTransfer:
#######################################
# Normal
#######################################
# Normal_1
def test_normal_1(self, users, personal_info):
issuer = users["issuer"]
user1 = users["user1"]
transfer_amount = 100
# issue token
share_token, deploy_args = issue_transferable_share_token(
issuer=issuer,
exchange_address=brownie.ZERO_ADDRESS,
personal_info_address=personal_info.address
)
share_token.setTransferApprovalRequired(
True,
{"from": issuer}
)
# register personal information (to_address)
personal_info.register(
issuer,
"encrypted_message",
{"from": user1}
)
# apply for transfer
share_token.applyForTransfer(
user1,
transfer_amount,
"test_data",
{"from": issuer} # from issuer to user1
)
# approve transfer
tx = share_token.approveTransfer(
0,
"test_data",
{"from": issuer}
)
# assertion
assert share_token.balances(issuer) == deploy_args[3] - transfer_amount
assert share_token.balances(user1) == transfer_amount
assert share_token.pendingTransfer(issuer) == 0
assert share_token.applicationsForTransfer(0) == (
issuer,
user1,
transfer_amount,
False
)
assert tx.events["ApproveTransfer"]["index"] == 0
assert tx.events["ApproveTransfer"]["from"] == issuer
assert tx.events["ApproveTransfer"]["to"] == user1
assert tx.events["ApproveTransfer"]["data"] == "test_data"
assert tx.events["Transfer"]["from"] == issuer
assert tx.events["Transfer"]["to"] == user1
assert tx.events["Transfer"]["value"] == transfer_amount
#######################################
# Error
#######################################
# Error_1
# Not authorized
def test_error_1(self, users, personal_info):
issuer = users["issuer"]
user1 = users["user1"]
transfer_amount = 100
# issue token
share_token, deploy_args = issue_transferable_share_token(
issuer=issuer,
exchange_address=brownie.ZERO_ADDRESS,
personal_info_address=personal_info.address
)
share_token.setTransferApprovalRequired(
True,
{"from": issuer}
)
# register personal information (to_address)
personal_info.register(
issuer,
"encrypted_message",
{"from": user1}
)
# apply for transfer
share_token.applyForTransfer(
user1,
transfer_amount,
"test_data",
{"from": issuer} # from issuer to user1
)
# approve transfer
with brownie.reverts():
share_token.approveTransfer(
0,
"test_data",
{"from": user1}
)
# assertion
assert share_token.balances(issuer) == deploy_args[3] - transfer_amount
assert share_token.balances(user1) == 0
assert share_token.pendingTransfer(issuer) == transfer_amount
assert share_token.applicationsForTransfer(0) == (
issuer,
user1,
transfer_amount,
True
)
# Error_2
# transferable = false
def test_error_2(self, users, personal_info):
issuer = users["issuer"]
user1 = users["user1"]
transfer_amount = 100
# issue token
share_token, deploy_args = issue_transferable_share_token(
issuer=issuer,
exchange_address=brownie.ZERO_ADDRESS,
personal_info_address=personal_info.address
)
share_token.setTransferApprovalRequired(
True,
{"from": issuer}
)
# register personal information (to_address)
personal_info.register(
issuer,
"encrypted_message",
{"from": user1}
)
# apply for transfer
share_token.applyForTransfer(
user1,
transfer_amount,
"test_data",
{"from": issuer} # from issuer to user1
)
# approve transfer
share_token.setTransferable(
False,
{"from": issuer}
)
with brownie.reverts():
share_token.approveTransfer(
0,
"test_data",
{"from": issuer}
)
# assertion
assert share_token.balances(issuer) == deploy_args[3] - transfer_amount
assert share_token.balances(user1) == 0
assert share_token.pendingTransfer(issuer) == transfer_amount
assert share_token.applicationsForTransfer(0) == (
issuer,
user1,
transfer_amount,
True
)
# Error_3
# Applications that have already been approved cannot be approved.
def test_error_3(self, users, personal_info):
issuer = users["issuer"]
user1 = users["user1"]
transfer_amount = 100
# issue token
share_token, deploy_args = issue_transferable_share_token(
issuer=issuer,
exchange_address=brownie.ZERO_ADDRESS,
personal_info_address=personal_info.address
)
share_token.setTransferApprovalRequired(
True,
{"from": issuer}
)
# register personal information (to_address)
personal_info.register(
issuer,
"encrypted_message",
{"from": user1}
)
# apply for transfer
share_token.applyForTransfer(
user1,
transfer_amount,
"test_data",
{"from": issuer} # from issuer to user1
)
# approve transfer (1)
share_token.approveTransfer(
0,
"test_data",
{"from": issuer}
)
# approve transfer (2)
with brownie.reverts():
share_token.approveTransfer(
0,
"test_data",
{"from": issuer}
)
# assertion
assert share_token.balances(issuer) == deploy_args[3] - transfer_amount
assert share_token.balances(user1) == transfer_amount
assert share_token.pendingTransfer(issuer) == 0
assert share_token.applicationsForTransfer(0) == (
issuer,
user1,
transfer_amount,
False
)
# TEST_setTransferApprovalRequired
class TestSetTransferApprovalRequired:
#######################################
# Normal
#######################################
# Normal_1
# Default value
def test_normal_1(self, users):
issuer = users["issuer"]
# issue token
share_token, deploy_args = issue_transferable_share_token(
issuer=issuer,
exchange_address=brownie.ZERO_ADDRESS,
personal_info_address=brownie.ZERO_ADDRESS
)
# assertion
assert share_token.transferApprovalRequired() == False
# Normal_2
def test_normal_2(self, users):
issuer = users["issuer"]
# issue token
share_token, deploy_args = issue_transferable_share_token(
issuer=issuer,
exchange_address=brownie.ZERO_ADDRESS,
personal_info_address=brownie.ZERO_ADDRESS
)
# update
tx = share_token.setTransferApprovalRequired(
True,
{"from": issuer}
)
# assertion
assert share_token.transferApprovalRequired() == True
assert tx.events["ChangeTransferApprovalRequired"]["required"] == True
#######################################
# Error
#######################################
# Error_1
# Not authorized
def test_error_1(self, users, personal_info):
issuer = users["issuer"]
# issue token
share_token, deploy_args = issue_transferable_share_token(
issuer=issuer,
exchange_address=brownie.ZERO_ADDRESS,
personal_info_address=personal_info.address
)
# set required to True
with brownie.reverts():
share_token.setTransferApprovalRequired(
True,
{"from": users["user1"]}
)
# assertion
assert share_token.transferApprovalRequired() == False
# TEST_Cancel
class TestCancel:
#######################################
# Normal
#######################################
# Normal_1
def test_normal_1(self, users, IbetShare):
issuer = users["issuer"]
# issue token
deploy_args = init_args()
share_token = issuer.deploy(IbetShare, *deploy_args)
# cancel
share_token.changeToCanceled(
{"from": issuer}
)
# assertion
is_canceled = share_token.isCanceled()
assert is_canceled is True
#######################################
# Error
#######################################
# Error_1
# Not authorized
def test_error_1(self, users, IbetShare):
issuer = users["issuer"]
# issue token
deploy_args = init_args()
share_token = issuer.deploy(IbetShare, *deploy_args)
# cancel
with brownie.reverts():
share_token.changeToCanceled(
{"from": users["user1"]}
)
# assertion
is_canceled = share_token.isCanceled()
assert is_canceled is False
# TEST_setStatus
class TestSetStatus:
#######################################
# Normal
#######################################
# Normal_1
def test_normal_1(self, users, IbetShare):
issuer = users['issuer']
# issue token
deploy_args = init_args()
share_token = issuer.deploy(IbetShare, *deploy_args)
# update
share_token.setStatus(False, {'from': issuer})
# assertion
assert share_token.status() is False
#######################################
# Error
#######################################
# Error_1
# Not authorized
def test_error_1(self, users, IbetShare):
issuer = users['issuer']
# issue token
deploy_args = init_args()
share_token = issuer.deploy(IbetShare, *deploy_args)
# change exchange contract
with brownie.reverts():
share_token.setStatus(False, {'from': users['user1']})
| 28.680879
| 122
| 0.557705
| 8,195
| 88,796
| 5.79817
| 0.03856
| 0.092811
| 0.044111
| 0.042133
| 0.884881
| 0.856848
| 0.833382
| 0.816988
| 0.79489
| 0.772392
| 0
| 0.015892
| 0.314023
| 88,796
| 3,095
| 123
| 28.690145
| 0.764184
| 0.101863
| 0
| 0.778747
| 0
| 0
| 0.047016
| 0.0004
| 0
| 0
| 0
| 0
| 0.13188
| 1
| 0.051226
| false
| 0
| 0.001635
| 0
| 0.069755
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ffaae45c5fc330ae7c860fa039b86ec367618bdb
| 12,122
|
py
|
Python
|
project/server/api/donor.py
|
tafodinho/transfuzol_api
|
f8a37e4665c746e6a123b26971ba09066f342296
|
[
"MIT"
] | null | null | null |
project/server/api/donor.py
|
tafodinho/transfuzol_api
|
f8a37e4665c746e6a123b26971ba09066f342296
|
[
"MIT"
] | null | null | null |
project/server/api/donor.py
|
tafodinho/transfuzol_api
|
f8a37e4665c746e6a123b26971ba09066f342296
|
[
"MIT"
] | null | null | null |
# project/server/auth/views.py
from flask import Blueprint, request, make_response, jsonify
from flask.views import MethodView
from sqlalchemy import inspect
import datetime
import json
from project.server import bcrypt, db
from project.server.models.Donor import Donor
from project.server.models.User import User
donor_blueprint = Blueprint('donors', __name__)
RECEIVE_MATCH = {
'O-': ['O-', 'O+', 'B-', 'B+', 'A-', 'A+', 'AB-', 'AB+'],
'O+': ['O+', 'B+', 'A+', 'AB+'],
'B-': ['B-', 'B+', 'AB-', 'AB+'],
'B+': ['B+','AB+'],
'A-': ['B+', 'AB+'],
'A+': ['A+', 'AB+'],
'AB-': ['AB-', 'AB+'],
'AB+': ['AB+']
}
class DonorAPI(MethodView):
"""
Donor resource
"""
def get(self):
# get the auth token
print("AUTH", request.headers.get('Authorization'))
auth_header = request.headers.get('Authorization')
if auth_header:
try:
auth_token = auth_header.split(" ")[1]
except IndexError:
responseObject = {
'status': 'fail',
'message': 'Bearer token malformed.'
}
return make_response(json.dumps(responseObject, default=str)), 401
else:
auth_token = ''
if auth_token:
resp = User.decode_auth_token(auth_token)
if not isinstance(resp, str):
donors_arr = []
donors = Donor.query.all()
for i in range(len(donors)):
donors_arr.append(donors[i]._return_data())
responseObject = {
'status': 'success',
'data': donors_arr
}
return make_response(json.dumps(responseObject, default=str)), 200
responseObject = {
'status': 'fail',
'message': resp
}
return make_response(json.dumps(responseObject, default=str)), 401
else:
responseObject = {
'status': 'fail',
'message': 'Provide a valid auth token.'
}
return make_response(json.dumps(responseObject, default=str)), 401
def delete(self):
""" delete goes here """
class DonorItemAPI(MethodView):
"""
Donor Item Resource
"""
def post(self):
""" post an item here """
# check if user already exists
auth_header = request.headers.get('Authorization')
# get the post data
post_data = request.json
if auth_header:
try:
auth_token = auth_header.split(" ")[1]
except IndexError:
responseObject = {
'status': 'fail',
'message': 'Bearer token malformed.'
}
return make_response(json.dumps(responseObject, default=str)), 401
else:
auth_token = ''
if auth_token:
resp = User.decode_auth_token(auth_token)
if not isinstance(resp, str):
mc = ', '.join([obj['value'] for obj in post_data["medical_conditions"]])
donor = Donor()
donor.sn = post_data["sn"]
donor.email = post_data["email"]
donor.hospital_id = post_data["hospital_id"]
donor.first_name = post_data["first_name"]
donor.middle_name = post_data["middle_name"]
donor.last_name = post_data["last_name"]
donor.home_address = post_data["home_address"]
donor.city = post_data["city"]
donor.region = post_data["region"]
donor.phone1 = post_data["phone1"]
donor.phone2 = post_data["phone2"]
donor.cni = post_data["cni"]
donor.cni_doi = post_data["cni_doi"]
donor.cni_poi = post_data["cni_poi"]
donor.dob = post_data["dob"]
donor.pob = post_data["pob"]
donor.gender = post_data["gender"]
donor.blood_group = post_data["blood_group"]
donor.allergies = post_data["allergies"]
donor.rhesus_factor = post_data["rhesus_factor"]
donor.medical_conditions = mc
donor.current_medications = post_data["current_medications"]
donor.dolbd = post_data["dolbd"]
donor.referrer_id = post_data["referrer_id"]
db.session.add(donor)
db.session.commit()
donor.update_ndefbd()
donor.update_status()
data = donor._return_data()
responseObject = {
'status': 'success',
'data': data
}
return make_response(json.dumps(responseObject, default=str)), 200
responseObject = {
'status': 'fail',
'message': resp
}
return make_response(json.dumps(responseObject, default=str)), 401
else:
responseObject = {
'status': 'fail',
'message': 'Provide a valid auth token.'
}
return make_response(json.dumps(responseObject, default=str)), 401
def get(self):
""" get an by id """
auth_header = request.headers.get('Authorization')
# get the post data
post_data = request.get_json()
if auth_header:
try:
auth_token = auth_header.split(" ")[1]
except IndexError:
responseObject = {
'status': 'fail',
'message': 'Bearer token malformed.'
}
return make_response(json.dumps(responseObject, default=str)), 401
else:
auth_token = ''
if auth_token:
resp = User.decode_auth_token(auth_token)
if not isinstance(resp, str):
donor = Donor().query.filter(Donor.id == post_data['id']).first()
if donor:
donor_data = donor._return_data()
responseObject = {
'status': 'success',
'message': 'donor found',
'data': donor_data
}
return make_response(json.dumps(responseObject, default=str)), 200
else:
responseObject = {
'status': 'fail',
'message': 'donor not found'
}
return make_response(json.dumps(responseObject, default=str)), 402
responseObject = {
'status': 'fail',
'message': resp
}
return make_response(json.dumps(responseObject, default=str)), 401
else:
responseObject = {
'status': 'fail',
'message': 'Provide a valid auth token.'
}
return make_response(json.dumps(responseObject, default=str)), 401
def put(self):
""" put an item here """
# check if user already exists
auth_header = request.headers.get('Authorization')
# get the post data
post_data = request.json
if auth_header:
try:
auth_token = auth_header.split(" ")[1]
except IndexError:
responseObject = {
'status': 'fail',
'message': 'Bearer token malformed.'
}
return make_response(json.dumps(responseObject, default=str)), 401
else:
auth_token = ''
if auth_token:
resp = User.decode_auth_token(auth_token)
if not isinstance(resp, str):
donor = Donor.query.filter_by(id=post_data['id']).first()
donor.sn = post_data["sn"]
donor.email = post_data["email"]
donor.hospital_id = post_data["hospital_id"]
donor.first_name = post_data["first_name"]
donor.middle_name = post_data["middle_name"]
donor.last_name = post_data["last_name"]
donor.home_address = post_data["home_address"]
donor.city = post_data["city"]
donor.region = post_data["region"]
donor.phone1 = post_data["phone1"]
donor.phone2 = post_data["phone2"]
donor.cni = post_data["cni"]
donor.cni_doi = post_data["cni_doi"]
donor.cni_poi = post_data["cni_poi"]
donor.dob = post_data["dob"]
donor.pob = post_data["pob"]
donor.gender = post_data["gender"]
donor.blood_group = post_data["blood_group"]
donor.allergies = post_data["allergies"]
donor.rhesus_factor = post_data["rhesus_factor"]
donor.medical_conditions = post_data["medical_conditions"]
donor.current_medications = post_data["current_medications"]
donor.dolbd = post_data["dolbd"]
donor.referrer_id = post_data["referrer_id"]
db.session.add(donor)
db.session.commit()
donor.update_ndefbd()
donor.update_status()
data = donor._return_data()
responseObject = {
'status': 'success',
'data': data
}
return make_response(json.dumps(responseObject, default=str)), 200
responseObject = {
'status': 'fail',
'message': resp
}
return make_response(json.dumps(responseObject, default=str)), 401
else:
responseObject = {
'status': 'fail',
'message': 'Provide a valid auth token.'
}
return make_response(json.dumps(responseObject, default=str)), 401
def patch(self):
""" update an item here """
def delete(self):
""" delete an item here """
# check if user already exists
auth_header = request.headers.get('Authorization')
# get the post data
post_data = request.json
if auth_header:
try:
auth_token = auth_header.split(" ")[1]
except IndexError:
responseObject = {
'status': 'fail',
'message': 'Bearer token malformed.'
}
return make_response(json.dumps(responseObject, default=str)), 401
else:
auth_token = ''
if auth_token:
resp = User.decode_auth_token(auth_token)
if not isinstance(resp, str):
donor = Donor.query.filter_by(id=post_data['id']).first()
db.session.delete(donor)
db.session.commit()
responseObject = {
'status': 'success',
}
return make_response(json.dumps(responseObject, default=str)), 200
responseObject = {
'status': 'fail',
'message': resp
}
return make_response(json.dumps(responseObject, default=str)), 401
else:
responseObject = {
'status': 'fail',
'message': 'Provide a valid auth token.'
}
return make_response(json.dumps(responseObject, default=str)), 401
# define the API resources
donors_api = DonorAPI.as_view('donors_api')
donor_item_api = DonorItemAPI.as_view('donor_item_api')
# add Rules for API Endpoints
donor_blueprint.add_url_rule(
'/api/donors',
view_func=donors_api,
methods=['GET', 'DELETE']
)
donor_blueprint.add_url_rule(
'/api/donor_item',
view_func=donor_item_api,
methods=['POST', 'GET', 'PUT', 'PATCH', 'DELETE']
)
| 37.184049
| 89
| 0.50627
| 1,167
| 12,122
| 5.073693
| 0.12425
| 0.079716
| 0.063841
| 0.078027
| 0.824354
| 0.804256
| 0.781456
| 0.773687
| 0.765073
| 0.765073
| 0
| 0.010125
| 0.380795
| 12,122
| 326
| 90
| 37.184049
| 0.77871
| 0.033493
| 0
| 0.732852
| 0
| 0
| 0.111254
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.025271
| false
| 0
| 0.028881
| 0
| 0.137184
| 0.018051
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4403842d00ba0db729e810c11ce2e5d2941a6e07
| 29
|
py
|
Python
|
tests/test_module/file1.py
|
lengstrom/fastargs
|
bf2e0ac826bfb81f7559cd44e956c6a4aad982f1
|
[
"MIT"
] | 20
|
2021-04-02T06:43:37.000Z
|
2022-02-16T18:33:10.000Z
|
tests/test_module/file1.py
|
lengstrom/fastargs
|
bf2e0ac826bfb81f7559cd44e956c6a4aad982f1
|
[
"MIT"
] | 16
|
2021-04-02T05:27:26.000Z
|
2022-03-07T18:11:11.000Z
|
tests/test_module/file1.py
|
lengstrom/fastargs
|
bf2e0ac826bfb81f7559cd44e956c6a4aad982f1
|
[
"MIT"
] | 1
|
2021-11-11T03:31:10.000Z
|
2021-11-11T03:31:10.000Z
|
def testme():
return 42
| 7.25
| 13
| 0.586207
| 4
| 29
| 4.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1
| 0.310345
| 29
| 3
| 14
| 9.666667
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0.5
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
440c098f4e06b4ce54d1e2d36fb5e229e40bf385
| 83,461
|
py
|
Python
|
tests/unit/operations/test_logsops.py
|
senstb/aws-elastic-beanstalk-cli
|
ef27ae50e8be34ccbe29bc6dc421323bddc3f485
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/operations/test_logsops.py
|
senstb/aws-elastic-beanstalk-cli
|
ef27ae50e8be34ccbe29bc6dc421323bddc3f485
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/operations/test_logsops.py
|
senstb/aws-elastic-beanstalk-cli
|
ef27ae50e8be34ccbe29bc6dc421323bddc3f485
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import os
import shutil
import sys
import pytest
import unittest
import mock
from ebcli.operations import logsops
from ebcli.objects.exceptions import (
InvalidOptionsError,
NotFoundError,
ServiceError
)
from ebcli.resources.statics import logs_operations_constants
from tests.unit import mock_logs
class TestLogsOperations(unittest.TestCase):
app_name = 'MyFooApp'
env_name = 'MyFooEnv'
instance_id = 'i-123456789'
instance_id_alt = 'i-666666666'
specified_log_group = '/aws/elasticbeanstalk/{0}/specific/error.log'.format(env_name)
def setUp(self):
self.root_dir = os.getcwd()
if os.path.isdir('testDir'):
shutil.rmtree('testDir')
os.mkdir('testDir')
os.chdir('testDir')
def tearDown(self):
os.chdir(self.root_dir)
shutil.rmtree('testDir')
def test_beanstalk_prefix_for_environment(self):
self.assertEqual(
'/aws/elasticbeanstalk/my_env',
logsops.cloudwatch_log_group_prefix_for_environment('my_env')
)
def test_beanstalk_log_group_for_environment_health_streaming(self):
self.assertEqual(
'/aws/elasticbeanstalk/my_env/environment-health.log',
logsops.cloudwatch_log_group_for_environment_health_streaming('my_env')
)
@mock.patch('ebcli.operations.logsops.cloudwatch.get_all_stream_names')
def test_cloudwatch_log_stream_names(self, get_all_stream_names_mock):
get_all_stream_names_mock.return_value = ['log_stream_1', 'log_stream_2']
self.assertEqual(
['log_stream_1', 'log_stream_2'],
logsops.cloudwatch_log_stream_names('some_log_group', 'some_instance_id')
)
@mock.patch('ebcli.operations.logsops.io.get_event_streamer')
@mock.patch('ebcli.operations.logsops.cloudwatch_log_stream_names')
@mock.patch('ebcli.operations.logsops._create_log_stream_for_log_group')
@mock.patch('ebcli.operations.logsops._delay_subsequent_stream_creation')
@mock.patch('ebcli.operations.logsops._wait_to_poll_cloudwatch')
def test_stream_cloudwatch_logs(
self,
_wait_to_poll_cloudwatch_mock,
_delay_subsequent_stream_creation_mock,
_create_log_stream_for_log_group_mock,
cloudwatch_log_stream_names_mock,
get_event_streamer_mock,
):
streamer = mock.MagicMock()
get_event_streamer_mock.return_value = streamer
_wait_to_poll_cloudwatch_mock.side_effect = KeyboardInterrupt
_delay_subsequent_stream_creation_mock.return_value = None
cloudwatch_log_stream_names_mock.return_value = ['log_group_1', 'log_group_2']
calls = [
mock.call('/aws/elasticbeanstalk/my_environment', 'log_group_1', streamer, 0, None),
mock.call('/aws/elasticbeanstalk/my_environment', 'log_group_2', streamer, 0, None),
]
try:
logsops.stream_instance_logs_from_cloudwatch(
sleep_time=0,
log_group='/aws/elasticbeanstalk/my_environment',
specific_log_stream='i-213123qsdasdad'
)
except KeyboardInterrupt:
pass
_create_log_stream_for_log_group_mock.assert_has_calls(calls, any_order=True)
@mock.patch('ebcli.operations.logsops.io.get_event_streamer')
@mock.patch('ebcli.operations.logsops.cloudwatch_log_stream_names')
@mock.patch('ebcli.operations.logsops._create_log_stream_for_log_group')
@mock.patch('ebcli.operations.logsops._delay_subsequent_stream_creation')
@mock.patch('ebcli.operations.logsops._wait_to_poll_cloudwatch')
@mock.patch('ebcli.operations.logsops._updated_start_time')
def test_stream_cloudwatch_logs__multiple_times(
self,
_updated_start_time_mock,
_wait_to_poll_cloudwatch_mock,
_delay_subsequent_stream_creation_mock,
_create_log_stream_for_log_group_mock,
cloudwatch_log_stream_names_mock,
get_event_streamer_mock,
):
streamer = mock.MagicMock()
_updated_start_time_mock.return_value = '1231231231234'
get_event_streamer_mock.return_value = streamer
_wait_to_poll_cloudwatch_mock.side_effect = [None, KeyboardInterrupt]
_delay_subsequent_stream_creation_mock.return_value = None
cloudwatch_log_stream_names_mock.return_value = ['log_group_1']
calls = [
mock.call('/aws/elasticbeanstalk/my_environment', 'log_group_1', streamer, 0, None),
mock.call('/aws/elasticbeanstalk/my_environment', 'log_group_1', streamer, 0, '1231231231234'),
]
try:
logsops.stream_instance_logs_from_cloudwatch(
sleep_time=0,
log_group='/aws/elasticbeanstalk/my_environment',
specific_log_stream='i-213123qsdasdad'
)
except KeyboardInterrupt:
pass
_create_log_stream_for_log_group_mock.assert_has_calls(calls, any_order=True)
def test_create_log_stream_for_log_group(self):
streamer = mock.MagicMock()
with mock.patch('ebcli.operations.logsops.stream_single_stream'):
for i in range(1, 3):
logsops._create_log_stream_for_log_group(
'/aws/elasticbeanstalk/my_environment',
'log_group_2',
streamer,
0
)
@mock.patch('ebcli.operations.logsops.commonops.update_environment')
@mock.patch('ebcli.operations.logsops.instance_log_streaming_enabled')
@mock.patch('ebcli.operations.logsops.environment_health_streaming_enabled')
@mock.patch('ebcli.operations.logsops.elasticbeanstalk.describe_configuration_settings')
def test_disable_cloudwatch_logs__all(
self,
describe_configuration_settings_mock,
environment_health_streaming_enabled_mock,
instance_log_streaming_enabled_mock,
update_environment_mock
):
instance_log_streaming_enabled_mock.return_value = True
environment_health_streaming_enabled_mock.return_value = True
describe_configuration_settings_mock.side_effect = None
logsops.disable_cloudwatch_logs(self.app_name, self.env_name, cloudwatch_log_source='all')
update_environment_mock.assert_called_once_with(
'MyFooEnv', changes=[
{
'Namespace': 'aws:elasticbeanstalk:cloudwatch:logs',
'OptionName': 'StreamLogs',
'Value': 'false'
},
{
'Namespace': 'aws:elasticbeanstalk:cloudwatch:logs:health',
'OptionName': 'HealthStreamingEnabled',
'Value': 'false'
}
],
nohang=False,
timeout=15
)
@mock.patch('ebcli.operations.logsops.commonops.update_environment')
@mock.patch('ebcli.operations.logsops.instance_log_streaming_enabled')
@mock.patch('ebcli.operations.logsops.environment_health_streaming_enabled')
@mock.patch('ebcli.operations.logsops.elasticbeanstalk.describe_configuration_settings')
def test_disable_cloudwatch_logs__all__both_already_disabled(
self,
describe_configuration_settings_mock,
environment_health_streaming_enabled_mock,
instance_log_streaming_enabled_mock,
update_environment_mock
):
environment_health_streaming_enabled_mock.return_value = False
instance_log_streaming_enabled_mock.return_value = False
describe_configuration_settings_mock.side_effect = None
logsops.disable_cloudwatch_logs(self.app_name, self.env_name, cloudwatch_log_source='all')
update_environment_mock.assert_not_called()
@mock.patch('ebcli.operations.logsops.commonops.update_environment')
@mock.patch('ebcli.operations.logsops.instance_log_streaming_enabled')
@mock.patch('ebcli.operations.logsops.environment_health_streaming_enabled')
@mock.patch('ebcli.operations.logsops.elasticbeanstalk.describe_configuration_settings')
def test_disable_cloudwatch_logs__all__instance_log_streaming_already_disabled(
self,
describe_configuration_settings_mock,
environment_health_streaming_enabled_mock,
instance_log_streaming_enabled_mock,
update_environment_mock
):
instance_log_streaming_enabled_mock.return_value = False
environment_health_streaming_enabled_mock.return_value = True
logsops.disable_cloudwatch_logs(self.app_name, self.env_name, cloudwatch_log_source='all')
update_environment_mock.assert_called_once_with(
'MyFooEnv',
changes=[
{
'Namespace': 'aws:elasticbeanstalk:cloudwatch:logs:health',
'OptionName': 'HealthStreamingEnabled',
'Value': 'false'
}
],
nohang=False,
timeout=5
)
@mock.patch('ebcli.operations.logsops.commonops.update_environment')
@mock.patch('ebcli.operations.logsops.instance_log_streaming_enabled')
@mock.patch('ebcli.operations.logsops.elasticbeanstalk.describe_configuration_settings')
def test_disable_cloudwatch_logs__instance(
self,
describe_configuration_settings_mock,
instance_log_streaming_enabled,
update_environment_mock
):
instance_log_streaming_enabled.return_value = True
describe_configuration_settings_mock.side_effect = None
logsops.disable_cloudwatch_logs(self.app_name, self.env_name, cloudwatch_log_source='instance')
update_environment_mock.assert_called_once_with(
'MyFooEnv',
changes=[
{
'Namespace': 'aws:elasticbeanstalk:cloudwatch:logs',
'OptionName': 'StreamLogs',
'Value': 'false'
}
],
nohang=False,
timeout=15
)
@mock.patch('ebcli.operations.logsops.commonops.update_environment')
@mock.patch('ebcli.operations.logsops.instance_log_streaming_enabled')
@mock.patch('ebcli.operations.logsops.elasticbeanstalk.describe_configuration_settings')
def test_disable_cloudwatch_logs__instance__already_disabled(
self,
describe_configuration_settings_mock,
instance_log_streaming_disable,
update_environment_mock
):
instance_log_streaming_disable.return_value = False
describe_configuration_settings_mock.side_effect = None
logsops.disable_cloudwatch_logs(self.app_name, self.env_name, cloudwatch_log_source='instance')
update_environment_mock.assert_not_called()
@mock.patch('ebcli.operations.logsops.commonops.update_environment')
@mock.patch('ebcli.operations.logsops.instance_log_streaming_enabled')
@mock.patch('ebcli.operations.logsops.elasticbeanstalk.describe_configuration_settings')
def test_disable_cloudwatch_logs__cloudwatch_log_source_not_specified_by_customer__defaults_to_instance__already_disabled(
self,
describe_configuration_settings_mock,
instance_log_streaming_enabled,
update_environment_mock
):
instance_log_streaming_enabled.return_value = True
logsops.disable_cloudwatch_logs(self.app_name, self.env_name, cloudwatch_log_source=None)
update_environment_mock.assert_called_once_with(
'MyFooEnv',
changes=[
{
'Namespace': 'aws:elasticbeanstalk:cloudwatch:logs',
'OptionName': 'StreamLogs',
'Value': 'false'
}
],
nohang=False,
timeout=15
)
@mock.patch('ebcli.operations.logsops.commonops.update_environment')
@mock.patch('ebcli.operations.logsops.instance_log_streaming_enabled')
@mock.patch('ebcli.operations.logsops.elasticbeanstalk.describe_configuration_settings')
def test_disable_cloudwatch_logs__cloudwatch_log_source_not_specified_by_customer__defaults_to_instance__instance_already_disabled(
self,
describe_configuration_settings_mock,
instance_log_streaming_disabled,
update_environment_mock
):
instance_log_streaming_disabled.return_value = False
describe_configuration_settings_mock.side_effect = None
logsops.disable_cloudwatch_logs(self.app_name, self.env_name, cloudwatch_log_source=None)
update_environment_mock.assert_not_called()
@mock.patch('ebcli.operations.logsops.commonops.update_environment')
@mock.patch('ebcli.operations.logsops.environment_health_streaming_enabled')
@mock.patch('ebcli.operations.logsops.elasticbeanstalk.describe_configuration_settings')
def test_disable_cloudwatch_logs__environment_health(
self,
describe_configuration_settings_mock,
environment_health_streaming_enabled_mock,
update_environment_mock
):
environment_health_streaming_enabled_mock.return_value = True
logsops.disable_cloudwatch_logs(self.app_name, self.env_name, cloudwatch_log_source='environment-health')
update_environment_mock.assert_called_once_with(
'MyFooEnv',
changes=[
{
'Namespace': 'aws:elasticbeanstalk:cloudwatch:logs:health',
'OptionName': 'HealthStreamingEnabled',
'Value': 'false'
}
],
nohang=False,
timeout=5
)
@mock.patch('ebcli.operations.logsops.commonops.update_environment')
@mock.patch('ebcli.operations.logsops.environment_health_streaming_enabled')
@mock.patch('ebcli.operations.logsops.elasticbeanstalk.describe_configuration_settings')
def test_disable_cloudwatch_logs__environment_health__already_disabled(
self,
describe_configuration_settings_mock,
environment_health_streaming_enabled_mock,
update_environment_mock
):
environment_health_streaming_enabled_mock.return_value = False
describe_configuration_settings_mock.side_effect = None
logsops.disable_cloudwatch_logs(self.app_name, self.env_name, cloudwatch_log_source='environment-health')
update_environment_mock.assert_not_called()
@mock.patch('ebcli.operations.logsops._echo_link_to_cloudwatch_console')
@mock.patch('ebcli.operations.logsops.commonops.update_environment')
@mock.patch('ebcli.operations.logsops.instance_log_streaming_enabled')
@mock.patch('ebcli.operations.logsops.environment_health_streaming_enabled')
@mock.patch('ebcli.operations.logsops.elasticbeanstalk.describe_configuration_settings')
@mock.patch('ebcli.operations.logsops._raise_if_environment_is_not_using_enhanced_health')
def test_enable_cloudwatch_logs__all(
self,
raise_if_environment_is_not_using_enhanced_health_mock,
describe_configuration_settings_mock,
environment_health_streaming_enabled_mock,
instance_log_streaming_enabled_mock,
update_environment_mock,
_echo_link_to_cloudwatch_console_mock
):
environment_health_streaming_enabled_mock.return_value = False
instance_log_streaming_enabled_mock.return_value = False
describe_configuration_settings_mock.side_effect = None
raise_if_environment_is_not_using_enhanced_health_mock.side_effect = None
_echo_link_to_cloudwatch_console_mock.side_effect = None
logsops.enable_cloudwatch_logs(self.app_name, self.env_name, cloudwatch_log_source='all')
update_environment_mock.assert_called_once_with(
'MyFooEnv', changes=[
{
'Namespace': 'aws:elasticbeanstalk:cloudwatch:logs',
'OptionName': 'StreamLogs',
'Value': 'true'
},
{
'Namespace': 'aws:elasticbeanstalk:cloudwatch:logs:health',
'OptionName': 'HealthStreamingEnabled',
'Value': 'true'
}
],
nohang=False,
timeout=15
)
@mock.patch('ebcli.operations.logsops.commonops.update_environment')
@mock.patch('ebcli.operations.logsops.instance_log_streaming_enabled')
@mock.patch('ebcli.operations.logsops.environment_health_streaming_enabled')
@mock.patch('ebcli.operations.logsops.elasticbeanstalk.describe_configuration_settings')
@mock.patch('ebcli.operations.logsops._raise_if_environment_is_not_using_enhanced_health')
def test_enable_cloudwatch_logs__all__already_enabled(
self,
raise_if_environment_is_not_using_enhanced_health_mock,
describe_configuration_settings_mock,
environment_health_streaming_enabled_mock,
instance_log_streaming_enabled_mock,
update_environment_mock
):
environment_health_streaming_enabled_mock.return_value = True
instance_log_streaming_enabled_mock.return_value = True
describe_configuration_settings_mock.side_effect = None
raise_if_environment_is_not_using_enhanced_health_mock.side_effect = None
logsops.enable_cloudwatch_logs(self.app_name, self.env_name, cloudwatch_log_source='all')
update_environment_mock.assert_not_called()
@mock.patch('ebcli.operations.logsops._echo_link_to_cloudwatch_console')
@mock.patch('ebcli.operations.logsops.commonops.update_environment')
@mock.patch('ebcli.operations.logsops.instance_log_streaming_enabled')
@mock.patch('ebcli.operations.logsops.environment_health_streaming_enabled')
@mock.patch('ebcli.operations.logsops.elasticbeanstalk.describe_configuration_settings')
@mock.patch('ebcli.operations.logsops._raise_if_environment_is_not_using_enhanced_health')
def test_enable_cloudwatch_logs__all__environment_health_streaming_enabled(
self,
raise_if_environment_is_not_using_enhanced_health_mock,
describe_configuration_settings_mock,
environment_health_streaming_enabled_mock,
instance_log_streaming_enabled_mock,
update_environment_mock,
_echo_link_to_cloudwatch_console_mock
):
environment_health_streaming_enabled_mock.return_value = True
instance_log_streaming_enabled_mock.return_value = False
describe_configuration_settings_mock.side_effect = None
raise_if_environment_is_not_using_enhanced_health_mock.side_effect = None
_echo_link_to_cloudwatch_console_mock.side_effect = None
logsops.enable_cloudwatch_logs(self.app_name, self.env_name, cloudwatch_log_source='all')
update_environment_mock.assert_called_once_with(
'MyFooEnv',
changes=[
{
'Namespace': 'aws:elasticbeanstalk:cloudwatch:logs',
'OptionName': 'StreamLogs',
'Value': 'true'
}
],
nohang=False,
timeout=15
)
@mock.patch('ebcli.operations.logsops.commonops.update_environment')
@mock.patch('ebcli.operations.logsops.instance_log_streaming_enabled')
@mock.patch('ebcli.operations.logsops.elasticbeanstalk.describe_configuration_settings')
@mock.patch('ebcli.operations.logsops._echo_link_to_cloudwatch_console')
def test_enable_cloudwatch_logs__instance(
self,
_echo_link_to_cloudwatch_console_mock,
describe_configuration_settings_mock,
instance_log_streaming_enabled_mock,
update_environment_mock
):
instance_log_streaming_enabled_mock.return_value = False
describe_configuration_settings_mock.side_effect = None
_echo_link_to_cloudwatch_console_mock.side_effect = None
logsops.enable_cloudwatch_logs(self.app_name, self.env_name, cloudwatch_log_source='instance')
update_environment_mock.assert_called_once_with(
'MyFooEnv',
changes=[
{
'Namespace': 'aws:elasticbeanstalk:cloudwatch:logs',
'OptionName': 'StreamLogs',
'Value': 'true'
}
],
nohang=False,
timeout=15
)
@mock.patch('ebcli.operations.logsops.commonops.update_environment')
@mock.patch('ebcli.operations.logsops.instance_log_streaming_enabled')
@mock.patch('ebcli.operations.logsops.elasticbeanstalk.describe_configuration_settings')
def test_enable_cloudwatch_logs__instance__already_enabled(
self,
describe_configuration_settings_mock,
instance_log_streaming_enabled_mock,
update_environment_mock
):
instance_log_streaming_enabled_mock.return_value = True
describe_configuration_settings_mock.side_effect = None
logsops.enable_cloudwatch_logs(self.app_name, self.env_name, cloudwatch_log_source='instance')
update_environment_mock.assert_not_called()
@mock.patch('ebcli.operations.logsops._echo_link_to_cloudwatch_console')
@mock.patch('ebcli.operations.logsops.commonops.update_environment')
@mock.patch('ebcli.operations.logsops.instance_log_streaming_enabled')
@mock.patch('ebcli.operations.logsops.elasticbeanstalk.describe_configuration_settings')
def test_enable_cloudwatch_logs__cloudwatch_log_source_not_specified_by_customer__defaults_to_instance(
self,
describe_configuration_settings_mock,
instance_log_streaming_enabled_mock,
update_environment_mock,
_echo_link_to_cloudwatch_console_mock
):
instance_log_streaming_enabled_mock.return_value = False
describe_configuration_settings_mock.side_effect = None
_echo_link_to_cloudwatch_console_mock.side_effect = None
logsops.enable_cloudwatch_logs(self.app_name, self.env_name, cloudwatch_log_source=None)
update_environment_mock.assert_called_once_with(
'MyFooEnv',
changes=[
{
'Namespace': 'aws:elasticbeanstalk:cloudwatch:logs',
'OptionName': 'StreamLogs',
'Value': 'true'
}
],
nohang=False,
timeout=15
)
@mock.patch('ebcli.operations.logsops.commonops.update_environment')
@mock.patch('ebcli.operations.logsops.instance_log_streaming_enabled')
@mock.patch('ebcli.operations.logsops.elasticbeanstalk.describe_configuration_settings')
def test_enable_cloudwatch_logs__cloudwatch_log_source_not_specified_by_customer__defaults_to_instance__already_enabled(
self,
describe_configuration_settings_mock,
instance_log_streaming_enabled_mock,
update_environment_mock
):
instance_log_streaming_enabled_mock.return_value = True
describe_configuration_settings_mock.side_effect = None
logsops.enable_cloudwatch_logs(self.app_name, self.env_name, cloudwatch_log_source=None)
update_environment_mock.assert_not_called()
@mock.patch('ebcli.operations.logsops._echo_link_to_cloudwatch_console')
@mock.patch('ebcli.operations.logsops.commonops.update_environment')
@mock.patch('ebcli.operations.logsops.environment_health_streaming_enabled')
@mock.patch('ebcli.operations.logsops.elasticbeanstalk.describe_configuration_settings')
@mock.patch('ebcli.operations.logsops._raise_if_environment_is_not_using_enhanced_health')
def test_enable_cloudwatch_logs__environment_health(
self,
raise_if_environment_is_not_using_enhanced_health_mock,
describe_configuration_settings_mock,
environment_health_streaming_enabled_mock,
update_environment_mock,
_echo_link_to_cloudwatch_console_mock
):
environment_health_streaming_enabled_mock.return_value = False
describe_configuration_settings_mock.side_effect = None
raise_if_environment_is_not_using_enhanced_health_mock.side_effect = None
_echo_link_to_cloudwatch_console_mock.side_effect = None
logsops.enable_cloudwatch_logs(self.app_name, self.env_name, cloudwatch_log_source='environment-health')
update_environment_mock.assert_called_once_with(
'MyFooEnv',
changes=[
{
'Namespace': 'aws:elasticbeanstalk:cloudwatch:logs:health',
'OptionName': 'HealthStreamingEnabled',
'Value': 'true'
}
],
nohang=False,
timeout=5
)
@mock.patch('ebcli.operations.logsops.commonops.update_environment')
@mock.patch('ebcli.operations.logsops.environment_health_streaming_enabled')
@mock.patch('ebcli.operations.logsops.elasticbeanstalk.describe_configuration_settings')
@mock.patch('ebcli.operations.logsops._raise_if_environment_is_not_using_enhanced_health')
def test_enable_cloudwatch_logs__environment_health__already_enabled(
self,
raise_if_environment_is_not_using_enhanced_health_mock,
describe_configuration_settings_mock,
environment_health_streaming_enabled_mock,
update_environment_mock
):
environment_health_streaming_enabled_mock.return_value = True
describe_configuration_settings_mock.side_effect = None
raise_if_environment_is_not_using_enhanced_health_mock.side_effect = None
logsops.enable_cloudwatch_logs(self.app_name, self.env_name, cloudwatch_log_source='environment-health')
update_environment_mock.assert_not_called()
@mock.patch('ebcli.operations.logsops.cloudwatch.get_log_events')
def test_get_cloudwatch_stream_logs_for_instance(self, get_log_events_mock):
get_log_events_mock.return_value = {
'events': [
{
'timestamp': 1521501601426,
'message': '[2018-03-19T23:19:55.811Z] INFO [2810] - [Initialization] : Starting activity...',
'ingestionTime': 1521501607457
},
{
'timestamp': 1521501601426,
'message': '[2018-03-19T23:19:55.811Z] INFO [2810] - [Initialization/AddonsBefore] : Starting activity...',
'ingestionTime': 1521501607457
}
]
}
self.assertEqual(
'[my_log_stream] [2018-03-19T23:19:55.811Z] INFO [2810] - [Initialization] : Starting activity...{linesep}'
'[my_log_stream] [2018-03-19T23:19:55.811Z] INFO [2810] - [Initialization/AddonsBefore] : Starting activity...'.format(
linesep=os.linesep
),
logsops.get_cloudwatch_log_stream_events(
'log_group_name',
'my_log_stream'
)
)
@mock.patch('ebcli.operations.logsops.cloudwatch.get_log_events')
def test_get_cloudwatch_stream_logs_for_instance__service_error_encountered(self, get_log_events_mock):
get_log_events_mock.side_effect = ServiceError
self.assertEqual(
'',
logsops.get_cloudwatch_log_stream_events(
'log_group_name',
'my_log_stream'
)
)
@mock.patch('ebcli.operations.logsops.cloudwatch.get_log_events')
def test_get_cloudwatch_stream_logs_for_instance__general_exception_encountered(self, get_log_events_mock):
get_log_events_mock.side_effect = Exception
self.assertEqual(
'',
logsops.get_cloudwatch_log_stream_events(
'log_group_name',
'my_log_stream'
)
)
@mock.patch('ebcli.operations.logsops.cloudwatch.get_log_events')
def test_get_logs_cloudwatch_throws_service_error(self, get_log_events_mock):
get_log_events_mock.side_effect = ServiceError("Service is throwing an error!")
logsops.get_cloudwatch_log_stream_events(self.specified_log_group, self.instance_id_alt)
get_log_events_mock.assert_called_with(self.specified_log_group, self.instance_id_alt, limit=None)
@mock.patch('ebcli.operations.logsops.cloudwatch.get_log_events')
def test_get_logs_cloudwatch_throws_unknown_exception(self, get_log_events_mock):
get_log_events_mock.side_effect = Exception("An unknown error appeared!")
logsops.get_cloudwatch_log_stream_events(self.specified_log_group, self.instance_id, num_log_events=50)
get_log_events_mock.assert_called_with(self.specified_log_group, self.instance_id, limit=50)
@mock.patch('ebcli.operations.logsops.elasticbeanstalk.describe_configuration_settings')
def test_log_streaming_enabled__config_settings_not_passed_in(
self,
describe_configuration_settings_mock
):
describe_configuration_settings_mock.return_value = {
'OptionSettings': [
{
'Namespace': 'aws:elasticbeanstalk:cloudwatch:logs',
'OptionName': 'StreamLogs',
'Value': 'true'
}
]
}
self.assertTrue(
logsops.instance_log_streaming_enabled(self.app_name, self.env_name, None)
)
describe_configuration_settings_mock.return_value = {
'OptionSettings': [
{
'Namespace': 'aws:elasticbeanstalk:cloudwatch:logs',
'OptionName': 'StreamLogs',
'Value': 'false'
}
]
}
self.assertFalse(
logsops.instance_log_streaming_enabled(self.app_name, self.env_name, None)
)
@mock.patch('ebcli.operations.logsops.elasticbeanstalk.describe_configuration_settings')
def test_log_streaming_enabled__config_settings_passed_in(
self,
describe_configuration_settings_mock
):
describe_configuration_settings = {
'OptionSettings': [
{
'Namespace': 'aws:elasticbeanstalk:cloudwatch:logs',
'OptionName': 'StreamLogs',
'Value': 'true'
}
]
}
self.assertTrue(
logsops.instance_log_streaming_enabled(self.app_name, self.env_name, describe_configuration_settings)
)
describe_configuration_settings_mock.assert_not_called()
describe_configuration_settings = {
'OptionSettings': [
{
'Namespace': 'aws:elasticbeanstalk:cloudwatch:logs',
'OptionName': 'StreamLogs',
'Value': 'false'
}
]
}
self.assertFalse(
logsops.instance_log_streaming_enabled(self.app_name, self.env_name, describe_configuration_settings)
)
describe_configuration_settings_mock.assert_not_called()
@mock.patch('ebcli.operations.logsops.elasticbeanstalk.describe_configuration_settings')
def test_environment_health_streaming_enabled__config_settings_not_passed_in(
self,
describe_configuration_settings_mock
):
describe_configuration_settings_mock.return_value = {
'OptionSettings': [
{
'Namespace': 'aws:elasticbeanstalk:cloudwatch:logs:health',
'OptionName': 'HealthStreamingEnabled',
'Value': 'true'
}
]
}
self.assertTrue(
logsops.environment_health_streaming_enabled(self.app_name, self.env_name, None)
)
describe_configuration_settings_mock.return_value = {
'OptionSettings': [
{
'Namespace': 'aws:elasticbeanstalk:cloudwatch:logs:health',
'OptionName': 'HealthStreamingEnabled',
'Value': 'false'
}
]
}
self.assertFalse(
logsops.environment_health_streaming_enabled(self.app_name, self.env_name, None)
)
@mock.patch('ebcli.operations.logsops.elasticbeanstalk.describe_configuration_settings')
def test_environment_health_streaming_enabled__config_settings_passed_in(
self,
describe_configuration_settings_mock
):
describe_configuration_settings = {
'OptionSettings': [
{
'Namespace': 'aws:elasticbeanstalk:cloudwatch:logs:health',
'OptionName': 'HealthStreamingEnabled',
'Value': 'true'
}
]
}
self.assertTrue(
logsops.environment_health_streaming_enabled(self.app_name, self.env_name, describe_configuration_settings)
)
describe_configuration_settings_mock.assert_not_called()
describe_configuration_settings = {
'OptionSettings': [
{
'Namespace': 'aws:elasticbeanstalk:cloudwatch:logs:health',
'OptionName': 'HealthStreamingEnabled',
'Value': 'false'
}
]
}
self.assertFalse(
logsops.environment_health_streaming_enabled(self.app_name, self.env_name, describe_configuration_settings)
)
describe_configuration_settings_mock.assert_not_called()
@mock.patch('ebcli.operations.logsops.elasticbeanstalk')
def test_log_streaming_enabled_is_false(self, mock_beanstalk):
meaningless_config = "doesn't matter"
mock_beanstalk.describe_configuration_settings.return_value = meaningless_config
mock_beanstalk.get_specific_configuration.return_value = None
self.assertFalse(
logsops.instance_log_streaming_enabled(self.app_name, self.env_name),
"Expected log streaming to be disabled"
)
@mock.patch('ebcli.operations.logsops.beanstalk_log_group_builder')
def test_log_group_builder_default(
self,
beanstalk_log_group_builder_mock
):
beanstalk_log_group_builder_mock.return_value = '/aws/elasticbeanstalk/MyFooEnv/var/log/eb-activity.log'
actual_log_group = logsops.beanstalk_log_group_builder(self.env_name)
self.assertEqual(
'/aws/elasticbeanstalk/MyFooEnv/var/log/eb-activity.log',
actual_log_group,
"Expected log group to be: {0} but got: {1}".format(
'/aws/elasticbeanstalk/MyFooEnv/var/log/eb-activity.log',
actual_log_group
)
)
def test_log_group_builder_with_full_filepath(self):
actual_log_group = logsops.beanstalk_log_group_builder(self.env_name, self.specified_log_group)
self.assertEqual(
self.specified_log_group,
actual_log_group,
"Expected log group to be: {0} but got: {1}".format(
self.specified_log_group,
actual_log_group
)
)
def test_log_group_builder_with_partial_filepath(self):
filepath = 'foo/specific/error.log'
actual_log_group = logsops.beanstalk_log_group_builder(self.env_name, filepath)
expected_log_group = '/aws/elasticbeanstalk/{0}/{1}'.format(self.env_name, filepath)
self.assertEqual(
expected_log_group,
actual_log_group,
"Expected log group to be: {0} but got: {1}".format(expected_log_group, actual_log_group)
)
@mock.patch('ebcli.operations.logsops.beanstalk_log_group_builder')
def test_normalize_log_group_name__log_group_and_cloudwatch_log_source_not_passed_in_by_customer(
self,
beanstalk_log_group_builder_mock
):
beanstalk_log_group_builder_mock.return_value = '/aws/elasticbeanstalk/MyFooEnv/var/log/eb-activity.log'
self.assertEqual(
'/aws/elasticbeanstalk/MyFooEnv/var/log/eb-activity.log',
logsops.normalize_log_group_name('my_env')
)
def test_normalize_log_group_name__log_group_name_passed_in_but_not_cloudwatch_log_source(self):
self.assertEqual(
'/aws/elasticbeanstalk/my_env/some_log_group',
logsops.normalize_log_group_name('my_env', log_group='some_log_group')
)
def test_normalize_log_group_name__log_group_name_and_instance_cloudwatch_log_source_arguments_passed_in(self):
self.assertEqual(
'/aws/elasticbeanstalk/my_env/some_log_group',
logsops.normalize_log_group_name(
'my_env',
log_group='some_log_group',
cloudwatch_log_source='instance'
)
)
def test_normalize_log_group_name__log_group_name_and_environment_health_cloudwatch_log_source_arguments_passed_in__log_group_discarded(
self
):
with self.assertRaises(InvalidOptionsError) as context_manager:
logsops.normalize_log_group_name(
'my_env',
log_group='some_log_group',
cloudwatch_log_source='environment-health'
)
self.assertEqual(
"""You can't use the "--log-group" option when retrieving environment-health logs. These logs are in a specific, implied log group.""",
str(context_manager.exception)
)
def test_normalize_log_group_name__invalid_cloudwatch_log_source(self):
with self.assertRaises(InvalidOptionsError) as context_manager:
logsops.normalize_log_group_name('my_env', log_group='some_log_group', cloudwatch_log_source='all')
self.assertEqual(
"""Invalid CloudWatch Logs source type for retrieving logs: "all". Valid types: instance | environment-health""",
str(context_manager.exception))
def test_resolve_log_result_type(self):
self.assertEqual('bundle', logsops.resolve_log_result_type(True, True))
self.assertEqual('bundle', logsops.resolve_log_result_type(None, True))
self.assertEqual('bundle', logsops.resolve_log_result_type(True, None))
self.assertEqual('tail', logsops.resolve_log_result_type(None, None))
@mock.patch('ebcli.operations.logsops.get_cloudwatch_log_stream_events')
@mock.patch('ebcli.operations.logsops.io.echo_with_pager')
def test_stream_logs_in_terminal(
self,
echo_with_pager_mock,
get_cloudwatch_stream_logs_for_instance_mock
):
log_stream_1_events = """[my_log_stream_1] [2018-03-19T23:19:55.811Z] INFO [2810] - [Initialization] : Starting activity...
[my_log_stream] [2018-03-19T23:19:55.811Z] INFO [2810] - [Initialization/AddonsBefore] : Starting activity..."""
log_stream_2_events = """[my_log_stream_2] [2018-03-19T23:19:55.811Z] INFO [2810] - [Initialization] : Starting activity...
[my_log_stream] [2018-03-19T23:19:55.811Z] INFO [2810] - [Initialization/AddonsBefore] : Starting activity..."""
get_cloudwatch_stream_logs_for_instance_mock.side_effect = [
log_stream_1_events,
log_stream_2_events
]
logsops.stream_logs_in_terminal('log_group', ['log_stream_1', 'log_stream_2'])
echo_with_pager_mock.assert_called_with(
'{linesep}{linesep}============= log_stream_1 - log_group =============={linesep}{linesep}'
'[my_log_stream_1] [2018-03-19T23:19:55.811Z] INFO [2810] - [Initialization] : Starting activity...\n'
'[my_log_stream] [2018-03-19T23:19:55.811Z] INFO [2810] - [Initialization/AddonsBefore] : Starting activity...'
'{linesep}'
'{linesep}============= log_stream_2 - log_group =============={linesep}'
'{linesep}'
'[my_log_stream_2] [2018-03-19T23:19:55.811Z] INFO [2810] - [Initialization] : Starting activity...\n'
'[my_log_stream] [2018-03-19T23:19:55.811Z] INFO [2810] - [Initialization/AddonsBefore] : Starting activity...'.format(
linesep=os.linesep
)
)
@mock.patch('ebcli.operations.logsops.cloudwatch.get_all_stream_names')
@mock.patch('ebcli.operations.logsops.stream_logs_in_terminal')
def test_retrieve_cloudwatch_logs__tail_instance_logs(
self,
stream_logs_in_terminal_mock,
get_all_stream_names_mock
):
get_all_stream_names_mock.return_value = ['log_stream_1', 'log_stream_2']
logsops.retrieve_cloudwatch_logs('some_log_group', 'tail')
stream_logs_in_terminal_mock.assert_called_once_with('some_log_group', ['log_stream_1', 'log_stream_2'])
@mock.patch('ebcli.operations.logsops.cloudwatch.get_all_stream_names')
@mock.patch('ebcli.operations.logsops.stream_logs_in_terminal')
def test_retrieve_cloudwatch_logs__tail_environment_health_logs(
self,
stream_logs_in_terminal_mock,
get_all_stream_names_mock
):
get_all_stream_names_mock.return_value = ['log_stream_1', 'log_stream_2']
logsops.retrieve_cloudwatch_logs(
'some_log_group',
'tail',
cloudwatch_log_source=logs_operations_constants.LOG_SOURCES.ENVIRONMENT_HEALTH_LOG_SOURCE
)
stream_logs_in_terminal_mock.assert_called_once_with('some_log_group', ['log_stream_1', 'log_stream_2'])
@pytest.mark.skipif(
getattr(os, 'symlink', None) is None,
reason="`os` module does not define `symlink` function for Python 2.7 on Windows"
)
@mock.patch('ebcli.operations.logsops.cloudwatch.get_all_stream_names')
@mock.patch('ebcli.operations.logsops.get_cloudwatch_log_stream_events')
def test_retrieve_cloudwatch_logs__info_type_bundle(
self,
get_cloudwatch_log_stream_events_mock,
get_all_stream_names_mock
):
os.mkdir('.elasticbeanstalk')
get_all_stream_names_mock.return_value = ['log_stream_1']
get_cloudwatch_log_stream_events_mock.return_value = 'These are the full logs\\xe2\\x96'
logsops.retrieve_cloudwatch_logs('some_log_group', 'bundle')
self.assertEqual(
'These are the full logs\\xe2\\x96',
open(os.path.join('.elasticbeanstalk', 'logs', 'latest', 'log_stream_1.log')).read()
)
@pytest.mark.skipif(
getattr(os, 'symlink', None) is None,
reason="`os` module does not define `symlink` function for Python 2.7 on Windows"
)
@mock.patch('ebcli.operations.logsops._timestamped_directory_name')
@mock.patch('ebcli.operations.logsops.cloudwatch.get_all_stream_names')
@mock.patch('ebcli.operations.logsops.get_cloudwatch_log_stream_events')
def test_retrieve_cloudwatch_logs__info_type_bundle__multiple_retrieves(
self,
get_cloudwatch_log_stream_events_mock,
get_all_stream_names_mock,
_timestamped_directory_name_mock
):
os.mkdir('.elasticbeanstalk')
_timestamped_directory_name_mock.side_effect = [
'180417_175442',
'180417_175450'
]
get_all_stream_names_mock.return_value = ['log_stream_1']
get_cloudwatch_log_stream_events_mock.return_value = 'These are the full logs\\xe2\\x96'
logsops.retrieve_cloudwatch_logs('some_log_group', 'bundle')
self.assertEqual(
'These are the full logs\\xe2\\x96',
open(os.path.join('.elasticbeanstalk', 'logs', 'latest', 'log_stream_1.log')).read()
)
get_all_stream_names_mock.return_value = ['log_stream_2']
get_cloudwatch_log_stream_events_mock.return_value = 'These are also the full logs\\xe2\\x96'
logsops.retrieve_cloudwatch_logs('some_log_group', 'bundle')
self.assertEqual(
'These are also the full logs\\xe2\\x96',
open(os.path.join('.elasticbeanstalk', 'logs', 'latest', 'log_stream_2.log')).read()
)
@mock.patch('ebcli.operations.logsops.cloudwatch.get_all_stream_names')
@mock.patch('ebcli.operations.logsops.get_cloudwatch_log_stream_events')
def test_retrieve_cloudwatch_logs__info_type_bundle__create_zip(
self,
get_cloudwatch_log_stream_events_mock,
get_all_stream_names_mock
):
os.mkdir('.elasticbeanstalk')
get_all_stream_names_mock.return_value = ['log_stream_1']
get_cloudwatch_log_stream_events_mock.return_value = 'These are the full logs\\xe2\\x96'
logsops.retrieve_cloudwatch_logs('some_log_group', 'bundle', do_zip=True)
logs_dir_contents = os.listdir(os.path.join('.elasticbeanstalk', 'logs'))
self.assertEqual('.zip', logs_dir_contents[0][-4:])
@pytest.mark.skipif(
getattr(os, 'symlink', None) is None,
reason="`os` module does not define `symlink` function for Python 2.7 on Windows"
)
@mock.patch('ebcli.operations.logsops.cloudwatch.get_all_stream_names')
@mock.patch('ebcli.operations.logsops.get_cloudwatch_log_stream_events')
def test_retrieve_cloudwatch_logs__info_type_bundle__environment_health_source(
self,
get_cloudwatch_log_stream_events_mock,
get_all_stream_names_mock
):
os.mkdir('.elasticbeanstalk')
get_all_stream_names_mock.return_value = ['log_stream_1']
get_cloudwatch_log_stream_events_mock.return_value = 'These are the full logs\\xe2\\x96'
logsops.retrieve_cloudwatch_logs(
'some_log_group',
'bundle',
cloudwatch_log_source=logs_operations_constants.LOG_SOURCES.ENVIRONMENT_HEALTH_LOG_SOURCE
)
self.assertEqual(
'These are the full logs\\xe2\\x96',
open(os.path.join('.elasticbeanstalk', 'logs', 'environment-health', 'latest', 'log_stream_1.log')).read()
)
@mock.patch('ebcli.operations.logsops.cloudwatch.get_all_stream_names')
@mock.patch('ebcli.operations.logsops.get_cloudwatch_log_stream_events')
def test_retrieve_cloudwatch_logs__info_type_bundle__environment_health_log_source__create_zip(
self,
get_cloudwatch_log_stream_events_mock,
get_all_stream_names_mock
):
os.mkdir('.elasticbeanstalk')
get_all_stream_names_mock.return_value = ['log_stream_1']
get_cloudwatch_log_stream_events_mock.return_value = 'These are the full logs\\xe2\\x96'
logsops.retrieve_cloudwatch_logs(
'some_log_group',
'bundle',
do_zip=True,
cloudwatch_log_source=logs_operations_constants.LOG_SOURCES.ENVIRONMENT_HEALTH_LOG_SOURCE
)
logs_dir_contents = os.listdir(os.path.join('.elasticbeanstalk', 'logs', 'environment-health'))
self.assertEqual('.zip', logs_dir_contents[0][-4:])
@mock.patch('ebcli.operations.logsops.instance_log_streaming_enabled')
def test_raise_if_instance_log_streaming_is_not_enabled__not_enabled__raises_exception(
self,
instance_log_streaming_enabled_mock
):
instance_log_streaming_enabled_mock.return_value = False
with self.assertRaises(InvalidOptionsError) as context_manager:
logsops.raise_if_instance_log_streaming_is_not_enabled('some-app', 'some-env')
self.assertEqual(
"""Can't retrieve instance logs for environment some-env. Instance log streaming is disabled.""",
str(context_manager.exception)
)
@mock.patch('ebcli.operations.logsops.instance_log_streaming_enabled')
def test_raise_if_instance_log_streaming_is_not_enabled__enabled(
self,
instance_log_streaming_enabled_mock
):
instance_log_streaming_enabled_mock.return_value = True
logsops.raise_if_instance_log_streaming_is_not_enabled('some-app', 'some-env')
@mock.patch('ebcli.operations.logsops.environment_health_streaming_enabled')
def test_raise_if_environment_health_log_streaming_is_not_enabled__not_enabled__raises_exception(
self,
environment_health_streaming_enabled_mock
):
environment_health_streaming_enabled_mock.return_value = False
with self.assertRaises(InvalidOptionsError) as context_manager:
logsops.raise_if_environment_health_log_streaming_is_not_enabled('some-app', 'some-env')
self.assertEqual(
"""Can't retrieve environment-health logs for environment some-env. Environment-health log streaming is disabled.""",
str(context_manager.exception)
)
@mock.patch('ebcli.operations.logsops.environment_health_streaming_enabled')
def test_raise_if_environment_health_log_streaming_is_not_enabled__raises_exception(
self,
environment_health_streaming_enabled_mock
):
environment_health_streaming_enabled_mock.return_value = True
logsops.raise_if_environment_health_log_streaming_is_not_enabled('some-app', 'some-env')
@mock.patch('ebcli.operations.logsops._get_cloudwatch_messages')
@mock.patch('ebcli.operations.logsops._wait_to_poll_cloudwatch')
def test_get_cloudwatch_logs__simulate_ctrl_c_after_polling_cloudwatch_three_times(
self,
_wait_to_poll_cloudwatch_mock,
_get_cloudwatch_messages_mock
):
_wait_to_poll_cloudwatch_mock.return_value = None
message_1 = '[my-log-stream-name] b\'I, [2018-03-08T02:35:00.179536+0000#21452] INFO -- Packer: 1520476500,,ui,message, HVM AMI builder: + for TAR_BALL in \\\'"$@"\\\'\''
message_2 = "[my-log-stream-name] b'I, [2018-03-08T02:35:04.381352+0000#21452] INFO -- Packer: 1520476504,,ui,message, HVM AMI builder: \\x1b[K 100% |\\xe2\\x96\\x88\\xe2\\x96\\x88\\xe2\\x96\\x88\\xe2\\x96\\x88\\xe2\\x96\\x88\\xe2\\x96\\x88\\xe2\\x96\\x88\\xe2\\x96\\x88\\xe2\\x96\\x88\\xe2\\x96\\x88'"
message_3 = '\\xe2\\x96\\x88\\xe2\\x96\\x88\\xe2\\x96\\x88\\xe2\\x96\\x88\\xe2\\x96\\x88\\xe2\\x96\\x88\\xe2\\x96\\x88\\xe2\\x96\\x88\\xe2\\x96\\x88\\xe2\\x96\\x88\\xe2\\x96\\x88\\xe2\\x96\\x88\\xe2\\x96\\x88\\xe2\\x96\\x88\\xe2\\x96\\x88\\xe2\\x96\\x88\\xe2\\x96\\x88\\xe2\\x96\\x88\\xe2\\x96\\x88\\xe2\\x96\\x88\\xe2\\x96\\x88\\xe2\\x96\\x88| 460kB 1.4MB/s'
_get_cloudwatch_messages_mock.side_effect = [
(
[message_1],
'f/33907759104553211733662036833768876100389685601243824177',
None
),
(
[message_2],
'f/12312311231231211733662036833768876100389685601243824177',
1520476504381
),
(
[message_3],
'f/34536456456456456433662036833768876100389685601243824177',
1520476508712
),
KeyboardInterrupt
]
streamer = mock.MagicMock()
streamer.stream_event = mock.MagicMock()
stream_event_calls = [
mock.call(message_1),
mock.call(message_2),
mock.call(message_3)
]
def messages_handler(messages):
[streamer.stream_event(message) for message in messages]
logsops.get_cloudwatch_messages(
log_group_name='some-log-group-name',
stream_name='some-log-group-stream-name',
formatter=streamer,
next_token=None,
start_time=None,
messages_handler=messages_handler
)
streamer.stream_event.assert_has_calls(stream_event_calls)
self.assertEqual(3, _wait_to_poll_cloudwatch_mock.call_count)
@mock.patch('ebcli.operations.logsops._get_cloudwatch_messages')
@mock.patch('ebcli.operations.logsops._wait_to_poll_cloudwatch')
def test_get_cloudwatch_logs__exit_with_service_error(
self,
_wait_to_poll_cloudwatch_mock,
_get_cloudwatch_messages_mock
):
_wait_to_poll_cloudwatch_mock.return_value = None
_get_cloudwatch_messages_mock.side_effect = [
ServiceError('Dummy service error message', code=4)
]
streamer = mock.MagicMock()
def messages_handler(messages):
[streamer.stream_event(message) for message in messages]
logsops.get_cloudwatch_messages(
log_group_name='some-log-group-name',
stream_name='some-log-group-stream-name',
formatter=streamer,
next_token=None,
start_time=None,
messages_handler=messages_handler
)
self.assertEqual(0, _wait_to_poll_cloudwatch_mock.call_count)
@mock.patch('ebcli.operations.logsops._get_cloudwatch_messages')
@mock.patch('ebcli.operations.logsops._wait_to_poll_cloudwatch')
@mock.patch('ebcli.operations.logsops.LOG.debug')
@mock.patch('traceback.format_exc')
def test_get_cloudwatch_logs__retries_after_encountering_general_exception(
self,
traceback_format_exc_mock,
LOG_debug_mock,
_wait_to_poll_cloudwatch_mock,
_get_cloudwatch_messages_mock
):
_wait_to_poll_cloudwatch_mock.return_value = None
traceback_format_exc_mock.return_value = 'This is a dummy stack trace'
_get_cloudwatch_messages_mock.side_effect = [
Exception('This is a general exception'),
KeyboardInterrupt
]
streamer = mock.MagicMock()
def messages_handler(messages):
[streamer.stream_event(message) for message in messages]
logsops.get_cloudwatch_messages(
log_group_name='some-log-group-name',
stream_name='some-log-group-stream-name',
formatter=streamer,
next_token=None,
start_time=None,
messages_handler=messages_handler
)
LOG_debug_mock.assert_has_calls(
[
mock.call('Exception raised: This is a general exception'),
mock.call('This is a dummy stack trace'),
]
)
self.assertEqual(1, _wait_to_poll_cloudwatch_mock.call_count)
@mock.patch('ebcli.operations.logsops.cloudwatch.get_log_events')
def test__get_cloudwatch_messages(self, get_log_events_mock):
get_log_events_mock.return_value = {
'events': [
{
'timestamp': 1520476500179,
'message': 'I, [2018-03-08T02:35:00.179536+0000#21452] INFO -- Packer: 1520476500,,ui,message, HVM AMI builder: + for TAR_BALL in \'"$@"\'',
'ingestionTime': 1520476506246
},
{
'timestamp': 1520476504381,
'message': 'I, [2018-03-08T02:35:04.381352+0000#21452] INFO -- Packer: 1520476504,,ui,message, HVM AMI builder: \\x1b[K 100% |\\xe2\\x96| 460kB 1.4MB/s',
'ingestionTime': 1520476506246
}
],
'nextForwardToken': 'f/33907759104553211733662036833768876100389685601243824177',
'nextBackwardToken': 'b/33907759010845480409436358393035787918721270553114116096',
'ResponseMetadata': {
'RequestId': '51637641-2279-11e8-89e2-977b487bfa41',
'HTTPStatusCode': 200,
'date': 'Thu, 08 Mar 2018 02:35:08 GMT',
'RetryAttempts': 0
}
}
if sys.version_info < (3, 0):
expected_events = [
'[my-log-stream-name] I, [2018-03-08T02:35:00.179536+0000#21452] INFO -- Packer: 1520476500,,ui,message, HVM AMI builder: + for TAR_BALL in \'"$@"\'',
'[my-log-stream-name] I, [2018-03-08T02:35:04.381352+0000#21452] INFO -- Packer: 1520476504,,ui,message, HVM AMI builder: \\x1b[K 100% |\\xe2\\x96| 460kB 1.4MB/s'
]
else:
expected_events = [
'[my-log-stream-name] b\'I, [2018-03-08T02:35:00.179536+0000#21452] INFO -- Packer: 1520476500,,ui,message, HVM AMI builder: + for TAR_BALL in \\\'"$@"\\\'\'',
"[my-log-stream-name] b'I, [2018-03-08T02:35:04.381352+0000#21452] INFO -- Packer: 1520476504,,ui,message, HVM AMI builder: \\\\x1b[K 100% |\\\\xe2\\\\x96| 460kB 1.4MB/s'"
]
actual_events = logsops._get_cloudwatch_messages(
'my-log-group',
'my-log-stream-name'
)
print(actual_events)
self.assertEqual(
(
expected_events,
'f/33907759104553211733662036833768876100389685601243824177',
None
),
actual_events
)
@mock.patch('ebcli.operations.logsops.elasticbeanstalk.get_environment')
def test_deployment_logs_log_group_name__non_windows_platform(self, get_environment_mock):
environment_mock = mock.MagicMock()
platform_mock = mock.MagicMock()
platform_mock.name = 'arn:aws:elasticbeanstalk:ap-southeast-2::platform/PHP 5.4 running on 64bit Amazon Linux 2014.03/1.1.0'
environment_mock.platform = platform_mock
get_environment_mock.return_value = environment_mock
self.assertEqual(
'/aws/elasticbeanstalk/my-env/var/log/eb-activity.log',
logsops.deployment_logs_log_group_name('my-env')
)
@mock.patch('ebcli.operations.logsops.elasticbeanstalk.get_environment')
def test_deployment_logs_log_group_name__windows_platform(self, get_environment_mock):
environment_mock = mock.MagicMock()
platform_mock = mock.MagicMock()
platform_mock.name = 'arn:aws:elasticbeanstalk:ap-southeast-2::platform/IIS 10.0 running on 64bit Windows Server Core 2016/1.2.0'
environment_mock.platform = platform_mock
get_environment_mock.return_value = environment_mock
self.assertEqual(
'/aws/elasticbeanstalk/my-env/EBDeploy-Log',
logsops.deployment_logs_log_group_name('my-env')
)
def test_get_platform_builder_group_name(self):
self.assertEqual(
'/aws/elasticbeanstalk/platform/platform_name',
logsops._get_platform_builder_group_name('platform_name')
)
def test_raise_if_environment_is_not_using_enhanced_health(self):
describe_configuration_settings = {
'OptionSettings': [
{
'Namespace': 'aws:elasticbeanstalk:healthreporting:system',
'OptionName': 'SystemType',
'Value': 'basic'
}
]
}
with self.assertRaises(InvalidOptionsError) as context_manager:
logsops._raise_if_environment_is_not_using_enhanced_health(describe_configuration_settings)
self.assertEqual(
'Enhanced health disabled. Could not setup health-transitions log streaming.',
str(context_manager.exception)
)
def test_raise_if_environment_is_using_enhanced_health(self):
describe_configuration_settings = {
'OptionSettings': [
{
'Namespace': 'aws:elasticbeanstalk:healthreporting:system',
'OptionName': 'SystemType',
'Value': 'enhanced'
}
]
}
logsops._raise_if_environment_is_not_using_enhanced_health(describe_configuration_settings)
@mock.patch('ebcli.operations.logsops.io.echo_with_pager')
@mock.patch('ebcli.operations.logsops.utils.get_data_from_url')
def test_handle_tail_logs(
self,
get_data_from_url_mock,
echo_with_pager_mock
):
get_data_from_url_mock.return_value = mock_logs.INSTANCE_TAIL_LOGS_RESPONSE
logsops._handle_tail_logs(
{
'i-090689581e5afcfc6': 'https://elasticbeanstalk-us-east-1-1231231231234.s3.amazonaws.com/resources/environments/logs/tail/e-spfgk5xbd',
'i-053efe7c102d0a540': 'https://elasticbeanstalk-us-east-1-1231231231234.s3.amazonaws.com/resources/environments/logs/tail/e-spfgk5xbd'
}
)
# six.iteritems does not guarantee order in which key, value pairs of a dict are parsed
try:
echo_with_pager_mock.assert_called_with(
os.linesep.join(
[
'============= i-090689581e5afcfc6 =============={linesep}-------------------------------------\n/var/log/awslogs.log\n-------------------------------------\n{\'skipped_events_count\': 0, \'first_event\': {\'timestamp\': 1522962583519, \'start_position\': 559799L, \'end_position\': 560017L}, \'fallback_events_count\': 0, \'last_event\': {\'timestamp\': 1522962583519, \'start_position\': 559799L, \'end_position\': 560017L}, \'source_id\': \'77b026040b93055eb448bdc0b59e446f\', \'num_of_events\': 1, \'batch_size_in_bytes\': 243}\n\n\n\n-------------------------------------\n/var/log/httpd/error_log\n-------------------------------------\n[Thu Apr 05 19:54:23.624780 2018] [mpm_prefork:warn] [pid 3470] AH00167: long lost child came home! (pid 3088)\n\n\n\n-------------------------------------\n/var/log/httpd/access_log\n-------------------------------------\n172.31.69.153 (94.208.192.103) - - [05/Apr/2018:20:57:55 +0000] "HEAD /pma/ HTTP/1.1" 404 - "-" "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36"\n\n\n\n-------------------------------------\n/var/log/eb-activity.log\n-------------------------------------\n + chown -R webapp:webapp /var/app/ondeck\n[2018-04-05T19:54:21.630Z] INFO [3555] - [Application update app-180406_044630@3/AppDeployStage0/AppDeployPreHook/02_setup_envvars.sh] : Starting activity...\n\n\n-------------------------------------\n/tmp/sample-app.log\n-------------------------------------\n2018-04-05 20:52:51 Received message: \\xe2\\x96\\x88\\xe2\n\n\n\n-------------------------------------\n/var/log/eb-commandprocessor.log\n-------------------------------------\n[2018-04-05T19:45:05.526Z] INFO [2853] : Running 2 of 2 actions: AppDeployPostHook...',
'============= i-053efe7c102d0a540 =============={linesep}-------------------------------------\n/var/log/awslogs.log\n-------------------------------------\n{\'skipped_events_count\': 0, \'first_event\': {\'timestamp\': 1522962583519, \'start_position\': 559799L, \'end_position\': 560017L}, \'fallback_events_count\': 0, \'last_event\': {\'timestamp\': 1522962583519, \'start_position\': 559799L, \'end_position\': 560017L}, \'source_id\': \'77b026040b93055eb448bdc0b59e446f\', \'num_of_events\': 1, \'batch_size_in_bytes\': 243}\n\n\n\n-------------------------------------\n/var/log/httpd/error_log\n-------------------------------------\n[Thu Apr 05 19:54:23.624780 2018] [mpm_prefork:warn] [pid 3470] AH00167: long lost child came home! (pid 3088)\n\n\n\n-------------------------------------\n/var/log/httpd/access_log\n-------------------------------------\n172.31.69.153 (94.208.192.103) - - [05/Apr/2018:20:57:55 +0000] "HEAD /pma/ HTTP/1.1" 404 - "-" "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36"\n\n\n\n-------------------------------------\n/var/log/eb-activity.log\n-------------------------------------\n + chown -R webapp:webapp /var/app/ondeck\n[2018-04-05T19:54:21.630Z] INFO [3555] - [Application update app-180406_044630@3/AppDeployStage0/AppDeployPreHook/02_setup_envvars.sh] : Starting activity...\n\n\n-------------------------------------\n/tmp/sample-app.log\n-------------------------------------\n2018-04-05 20:52:51 Received message: \\xe2\\x96\\x88\\xe2\n\n\n\n-------------------------------------\n/var/log/eb-commandprocessor.log\n-------------------------------------\n[2018-04-05T19:45:05.526Z] INFO [2853] : Running 2 of 2 actions: AppDeployPostHook...',
]
).replace('{linesep}', os.linesep)
)
except AssertionError:
echo_with_pager_mock.assert_called_with(
os.linesep.join(
[
'============= i-053efe7c102d0a540 =============={linesep}-------------------------------------\n/var/log/awslogs.log\n-------------------------------------\n{\'skipped_events_count\': 0, \'first_event\': {\'timestamp\': 1522962583519, \'start_position\': 559799L, \'end_position\': 560017L}, \'fallback_events_count\': 0, \'last_event\': {\'timestamp\': 1522962583519, \'start_position\': 559799L, \'end_position\': 560017L}, \'source_id\': \'77b026040b93055eb448bdc0b59e446f\', \'num_of_events\': 1, \'batch_size_in_bytes\': 243}\n\n\n\n-------------------------------------\n/var/log/httpd/error_log\n-------------------------------------\n[Thu Apr 05 19:54:23.624780 2018] [mpm_prefork:warn] [pid 3470] AH00167: long lost child came home! (pid 3088)\n\n\n\n-------------------------------------\n/var/log/httpd/access_log\n-------------------------------------\n172.31.69.153 (94.208.192.103) - - [05/Apr/2018:20:57:55 +0000] "HEAD /pma/ HTTP/1.1" 404 - "-" "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36"\n\n\n\n-------------------------------------\n/var/log/eb-activity.log\n-------------------------------------\n + chown -R webapp:webapp /var/app/ondeck\n[2018-04-05T19:54:21.630Z] INFO [3555] - [Application update app-180406_044630@3/AppDeployStage0/AppDeployPreHook/02_setup_envvars.sh] : Starting activity...\n\n\n-------------------------------------\n/tmp/sample-app.log\n-------------------------------------\n2018-04-05 20:52:51 Received message: \\xe2\\x96\\x88\\xe2\n\n\n\n-------------------------------------\n/var/log/eb-commandprocessor.log\n-------------------------------------\n[2018-04-05T19:45:05.526Z] INFO [2853] : Running 2 of 2 actions: AppDeployPostHook...',
'============= i-090689581e5afcfc6 =============={linesep}-------------------------------------\n/var/log/awslogs.log\n-------------------------------------\n{\'skipped_events_count\': 0, \'first_event\': {\'timestamp\': 1522962583519, \'start_position\': 559799L, \'end_position\': 560017L}, \'fallback_events_count\': 0, \'last_event\': {\'timestamp\': 1522962583519, \'start_position\': 559799L, \'end_position\': 560017L}, \'source_id\': \'77b026040b93055eb448bdc0b59e446f\', \'num_of_events\': 1, \'batch_size_in_bytes\': 243}\n\n\n\n-------------------------------------\n/var/log/httpd/error_log\n-------------------------------------\n[Thu Apr 05 19:54:23.624780 2018] [mpm_prefork:warn] [pid 3470] AH00167: long lost child came home! (pid 3088)\n\n\n\n-------------------------------------\n/var/log/httpd/access_log\n-------------------------------------\n172.31.69.153 (94.208.192.103) - - [05/Apr/2018:20:57:55 +0000] "HEAD /pma/ HTTP/1.1" 404 - "-" "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36"\n\n\n\n-------------------------------------\n/var/log/eb-activity.log\n-------------------------------------\n + chown -R webapp:webapp /var/app/ondeck\n[2018-04-05T19:54:21.630Z] INFO [3555] - [Application update app-180406_044630@3/AppDeployStage0/AppDeployPreHook/02_setup_envvars.sh] : Starting activity...\n\n\n-------------------------------------\n/tmp/sample-app.log\n-------------------------------------\n2018-04-05 20:52:51 Received message: \\xe2\\x96\\x88\\xe2\n\n\n\n-------------------------------------\n/var/log/eb-commandprocessor.log\n-------------------------------------\n[2018-04-05T19:45:05.526Z] INFO [2853] : Running 2 of 2 actions: AppDeployPostHook...',
]
).replace('{linesep}', os.linesep)
)
class TestSetupLogs(unittest.TestCase):
def setUp(self):
self.root_dir = os.getcwd()
if os.path.isdir('testDir'):
shutil.rmtree('testDir')
os.mkdir('testDir')
os.chdir('testDir')
def tearDown(self):
os.chdir(self.root_dir)
shutil.rmtree('testDir')
@mock.patch('ebcli.operations.logsops.utils.save_file_from_url')
@mock.patch('ebcli.operations.logsops.fileoperations.delete_file')
@mock.patch('ebcli.operations.logsops.fileoperations.unzip_folder')
def test_download_logs_for_all_instances(
self,
unzip_folder_mock,
delete_file_mock,
save_file_from_url_mock
):
save_file_from_url_mock.return_value = os.path.join('testDir', '.elasticbeanstalk', 'logs', '180404_044924', 'logs.zip')
logsops._download_logs_for_all_instances(
instance_id_list={
'i-090689581e5afcfc6': 'https://elasticbeanstalk-us-east-1-1231231231234.s3.amazonaws.com/resources/environments/logs/tail/e-spfgk5xbd',
'i-053efe7c102d0a540': 'https://elasticbeanstalk-us-east-1-1231231231234.s3.amazonaws.com/resources/environments/logs/tail/e-spfgk5xbd'
},
logs_location=os.path.join('testDir', '.elasticbeanstalk', 'logs', '180404_044924')
)
unzip_folder_mock.assert_has_calls(
[
mock.call(
os.path.join('testDir', '.elasticbeanstalk', 'logs', '180404_044924', 'logs.zip'),
os.path.join('testDir', '.elasticbeanstalk', 'logs', '180404_044924', 'i-090689581e5afcfc6')
),
mock.call(
os.path.join('testDir', '.elasticbeanstalk', 'logs', '180404_044924', 'logs.zip'),
os.path.join('testDir', '.elasticbeanstalk', 'logs', '180404_044924', 'i-053efe7c102d0a540')
)
],
any_order=True
)
delete_file_mock.assert_has_calls(
[
mock.call(
os.path.join('testDir', '.elasticbeanstalk', 'logs', '180404_044924', 'logs.zip')
),
mock.call(
os.path.join('testDir', '.elasticbeanstalk', 'logs', '180404_044924', 'logs.zip')
)
]
)
@mock.patch('ebcli.operations.logsops.fileoperations.delete_directory')
@mock.patch('ebcli.operations.logsops.fileoperations.zip_up_folder')
@mock.patch('ebcli.operations.logsops.fileoperations.set_user_only_permissions')
def test_handle_log_zipping(
self,
set_user_only_permissions_mock,
zip_up_folder_mock,
delete_directory_mock
):
logsops._handle_log_zipping(
logs_location=os.path.join('testDir', '.elasticbeanstalk', 'logs', '180404_044924')
)
zip_up_folder_mock.assert_called_once_with(
os.path.join('testDir', '.elasticbeanstalk', 'logs', '180404_044924'),
os.path.join('testDir', '.elasticbeanstalk', 'logs', '180404_044924.zip')
)
delete_directory_mock.assert_called_once_with(os.path.join('testDir', '.elasticbeanstalk', 'logs', '180404_044924'))
set_user_only_permissions_mock.assert_called_once_with(os.path.join('testDir', '.elasticbeanstalk', 'logs', '180404_044924.zip'))
@mock.patch('ebcli.operations.logsops.fileoperations.set_user_only_permissions')
@mock.patch('ebcli.operations.logsops.fileoperations.get_logs_location')
@mock.patch('ebcli.operations.logsops._timestamped_directory_name')
@mock.patch('ebcli.operations.logsops._download_logs_for_all_instances')
@mock.patch('ebcli.operations.logsops._handle_log_zipping')
@mock.patch('ebcli.operations.logsops._attempt_update_symlink_to_latest_logs_retrieved')
def test_handle_bundle_logs__without_zipping(
self,
attempt_update_of_symlink_to_latest_logs_mock,
handle_log_zipping_mock,
download_logs_for_all_instances_mock,
_timestamped_directory_name_mock,
get_logs_location_mock,
set_user_only_permissions_mock
):
os.mkdir('.elasticbeanstalk')
os.mkdir(os.path.join('.elasticbeanstalk', 'logs'))
instance_id_list = {
'i-090689581e5afcfc6': 'https://elasticbeanstalk-us-east-1-1231231231234.s3.amazonaws.com/resources/environments/logs/tail/e-spfgk5xbd',
'i-053efe7c102d0a540': 'https://elasticbeanstalk-us-east-1-1231231231234.s3.amazonaws.com/resources/environments/logs/tail/e-spfgk5xbd'
}
logs_location = os.path.join('.elasticbeanstalk', 'logs', '180404_044924')
_timestamped_directory_name_mock.return_value = '180404_044924'
get_logs_location_mock.return_value = logs_location
logsops._handle_bundle_logs(instance_id_list, do_zip=False)
get_logs_location_mock.assert_called_with('180404_044924')
download_logs_for_all_instances_mock.assert_called_with(
instance_id_list,
logs_location
)
set_user_only_permissions_mock.assert_called_once_with(logs_location)
attempt_update_of_symlink_to_latest_logs_mock.assert_called_once_with(logs_location)
handle_log_zipping_mock.assert_not_called()
@mock.patch('ebcli.operations.logsops.fileoperations.set_user_only_permissions')
@mock.patch('ebcli.operations.logsops.fileoperations.get_logs_location')
@mock.patch('ebcli.operations.logsops._timestamped_directory_name')
@mock.patch('ebcli.operations.logsops._download_logs_for_all_instances')
@mock.patch('ebcli.operations.logsops._handle_log_zipping')
@mock.patch('ebcli.operations.logsops._attempt_update_symlink_to_latest_logs_retrieved')
def test_handle_bundle_logs__with_zipping(
self,
attempt_update_of_symlink_to_latest_logs_mock,
handle_log_zipping_mock,
download_logs_for_all_instances_mock,
_timestamped_directory_name_mock,
get_logs_location_mock,
set_user_only_permissions_mock
):
os.mkdir('.elasticbeanstalk')
os.mkdir(os.path.join('.elasticbeanstalk', 'logs'))
instance_id_list = {
'i-090689581e5afcfc6': 'https://elasticbeanstalk-us-east-1-1231231231234.s3.amazonaws.com/resources/environments/logs/tail/e-spfgk5xbd',
'i-053efe7c102d0a540': 'https://elasticbeanstalk-us-east-1-1231231231234.s3.amazonaws.com/resources/environments/logs/tail/e-spfgk5xbd'
}
logs_location = os.path.join('.elasticbeanstalk', 'logs', '180404_044924')
_timestamped_directory_name_mock.return_value = '180404_044924'
get_logs_location_mock.return_value = logs_location
logsops._handle_bundle_logs(instance_id_list, do_zip=True)
get_logs_location_mock.assert_called_with('180404_044924')
download_logs_for_all_instances_mock.assert_called_with(
instance_id_list,
logs_location
)
set_user_only_permissions_mock.assert_called_once_with(logs_location)
attempt_update_of_symlink_to_latest_logs_mock.assert_not_called()
handle_log_zipping_mock.assert_called_once_with(logs_location)
@mock.patch('ebcli.operations.logsops.elasticbeanstalk.retrieve_environment_info')
def test_def_get_instance_id_list(
self,
get_instance_id_list_mock
):
get_instance_id_list_mock.return_value = mock_logs.REQUEST_ENVIRONMENT_INFO_RESPONSE
print(logsops.get_instance_log_url_mappings('some-env', 'tail'))
self.assertEqual(
{
'i-024a31a441247971d': 'https://elasticbeanstalk-us-east-1-123123123123.s3.amazonaws.com',
'i-0dce0f6c5e2d5fa48': 'https://elasticbeanstalk-us-east-1-123123123123.s3.amazonaws.com',
'i-090689581e5afcfc6': 'https://elasticbeanstalk-us-east-1-123123123123.s3.amazonaws.com',
'i-053efe7c102d0a540': 'https://elasticbeanstalk-us-east-1-123123123123.s3.amazonaws.com'
},
logsops.get_instance_log_url_mappings('some-env', 'tail')
)
@mock.patch('ebcli.operations.logsops.get_instance_log_url_mappings')
@mock.patch('ebcli.operations.logsops._handle_bundle_logs')
@mock.patch('ebcli.operations.logsops._handle_tail_logs')
def test_get_logs__tailed_logs(
self,
handle_tail_logs_mock,
handle_bundle_logs_mock,
get_instance_id_list_mock
):
get_instance_id_list_mock.return_value = {
'i-024a31a441247971d': 'https://elasticbeanstalk-us-east-1-123123123123.s3.amazonaws.com',
'i-0dce0f6c5e2d5fa48': 'https://elasticbeanstalk-us-east-1-123123123123.s3.amazonaws.com',
'i-090689581e5afcfc6': 'https://elasticbeanstalk-us-east-1-123123123123.s3.amazonaws.com',
'i-053efe7c102d0a540': 'https://elasticbeanstalk-us-east-1-123123123123.s3.amazonaws.com'
}
logsops.get_logs('my-env', 'tail')
handle_tail_logs_mock.assert_called_once_with(get_instance_id_list_mock.return_value)
handle_bundle_logs_mock.assert_not_called()
@mock.patch('ebcli.operations.logsops.get_instance_log_url_mappings')
@mock.patch('ebcli.operations.logsops._handle_bundle_logs')
@mock.patch('ebcli.operations.logsops._handle_tail_logs')
def test_get_logs__bundled_logs(
self,
handle_tail_logs_mock,
handle_bundle_logs_mock,
get_instance_id_list_mock
):
get_instance_id_list_mock.return_value = {
'i-024a31a441247971d': 'https://elasticbeanstalk-us-east-1-123123123123.s3.amazonaws.com',
'i-0dce0f6c5e2d5fa48': 'https://elasticbeanstalk-us-east-1-123123123123.s3.amazonaws.com',
'i-090689581e5afcfc6': 'https://elasticbeanstalk-us-east-1-123123123123.s3.amazonaws.com',
'i-053efe7c102d0a540': 'https://elasticbeanstalk-us-east-1-123123123123.s3.amazonaws.com'
}
logsops.get_logs('my-env', 'bundle')
handle_tail_logs_mock.assert_not_called()
handle_bundle_logs_mock.assert_called_once_with(get_instance_id_list_mock.return_value, False)
@mock.patch('ebcli.operations.logsops.get_instance_log_url_mappings')
@mock.patch('ebcli.operations.logsops._handle_bundle_logs')
@mock.patch('ebcli.operations.logsops._handle_tail_logs')
def test_get_logs__tailed_logs__specific_instance(
self,
handle_tail_logs_mock,
handle_bundle_logs_mock,
get_instance_id_list_mock
):
get_instance_id_list_mock.return_value = {
'i-024a31a441247971d': 'https://elasticbeanstalk-us-east-1-123123123123.s3.amazonaws.com',
'i-0dce0f6c5e2d5fa48': 'https://elasticbeanstalk-us-east-1-123123123123.s3.amazonaws.com',
'i-090689581e5afcfc6': 'https://elasticbeanstalk-us-east-1-123123123123.s3.amazonaws.com',
'i-053efe7c102d0a540': 'https://elasticbeanstalk-us-east-1-123123123123.s3.amazonaws.com'
}
logsops.get_logs('my-env', 'tail', False, 'i-090689581e5afcfc6')
handle_tail_logs_mock.assert_called_once_with(
{
'i-090689581e5afcfc6': 'https://elasticbeanstalk-us-east-1-123123123123.s3.amazonaws.com'
}
)
handle_bundle_logs_mock.assert_not_called()
@mock.patch('ebcli.operations.logsops.get_instance_log_url_mappings')
@mock.patch('ebcli.operations.logsops._handle_bundle_logs')
@mock.patch('ebcli.operations.logsops._handle_tail_logs')
def test_get_logs__bundled_logs__specific_instance(
self,
handle_tail_logs_mock,
handle_bundle_logs_mock,
get_instance_id_list_mock
):
get_instance_id_list_mock.return_value = {
'i-024a31a441247971d': 'https://elasticbeanstalk-us-east-1-123123123123.s3.amazonaws.com',
'i-0dce0f6c5e2d5fa48': 'https://elasticbeanstalk-us-east-1-123123123123.s3.amazonaws.com',
'i-090689581e5afcfc6': 'https://elasticbeanstalk-us-east-1-123123123123.s3.amazonaws.com',
'i-053efe7c102d0a540': 'https://elasticbeanstalk-us-east-1-123123123123.s3.amazonaws.com'
}
logsops.get_logs('my-env', 'bundle', True, 'i-090689581e5afcfc6')
handle_tail_logs_mock.assert_not_called()
handle_bundle_logs_mock.assert_called_once_with(
{
'i-090689581e5afcfc6': 'https://elasticbeanstalk-us-east-1-123123123123.s3.amazonaws.com'
},
True
)
def test_updated_instance_id_list(self):
with self.assertRaises(NotFoundError) as context_manager:
logsops._updated_instance_id_list(
{
'i-024a31a441247971d': 'https://elasticbeanstalk-us-east-1-123123123123.s3.amazonaws.com',
},
'i-123123a455ef666'
)
self.assertEqual(
"""Can't find instance "i-123123a455ef666" in the environment's instance logs on CloudWatch Logs.""",
str(context_manager.exception)
)
@mock.patch('ebcli.operations.logsops.cloudwatch.log_group_exists')
@mock.patch('ebcli.operations.logsops._wait_to_poll_cloudwatch')
def test_wait_for_log_group_to_come_into_existence(
self,
_wait_to_poll_cloudwatch_mock,
log_group_exists_mock
):
log_group_exists_mock.side_effect = [
False,
False,
True
]
logsops.wait_for_log_group_to_come_into_existence('my-log-group')
_wait_to_poll_cloudwatch_mock.assert_has_calls(
[
mock.call(10),
mock.call(10)
]
)
@mock.patch('ebcli.operations.logsops.wait_for_log_group_to_come_into_existence')
@mock.patch('ebcli.operations.logsops.stream_single_stream')
def test_stream_platform_logs(
self,
stream_single_stream_mock,
wait_for_log_group_to_come_into_existence
):
logsops.stream_platform_logs('my-platform', '1.0.0')
stream_single_stream_mock.assert_called_once_with(
'/aws/elasticbeanstalk/platform/my-platform',
'1.0.0',
4,
None,
None
)
wait_for_log_group_to_come_into_existence.assert_called_once_with(
'/aws/elasticbeanstalk/platform/my-platform',
4
)
| 47.993675
| 1,790
| 0.658763
| 9,220
| 83,461
| 5.576681
| 0.060087
| 0.047844
| 0.044382
| 0.076084
| 0.901025
| 0.884999
| 0.862555
| 0.849719
| 0.842251
| 0.823035
| 0
| 0.057094
| 0.220582
| 83,461
| 1,738
| 1,791
| 48.021289
| 0.733317
| 0.007453
| 0
| 0.647333
| 0
| 0.024667
| 0.349824
| 0.207568
| 0
| 0
| 0
| 0
| 0.074
| 1
| 0.056
| false
| 0.006667
| 0.006667
| 0
| 0.067333
| 0.001333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
92390bcadcb6c580d65f08ee56ebc4292e5c71d2
| 33,638
|
py
|
Python
|
couch/tests/test_database.py
|
madron/django-couch
|
21e4c3a0022bdb7cfaff017f72025afbf5220b3b
|
[
"MIT"
] | null | null | null |
couch/tests/test_database.py
|
madron/django-couch
|
21e4c3a0022bdb7cfaff017f72025afbf5220b3b
|
[
"MIT"
] | null | null | null |
couch/tests/test_database.py
|
madron/django-couch
|
21e4c3a0022bdb7cfaff017f72025afbf5220b3b
|
[
"MIT"
] | null | null | null |
import itertools
import warnings
from django.test import override_settings
from django.test import SimpleTestCase
from couch.test import CouchTestCase
from .. import Database
from .. import documents
from .. import exceptions
from .. import Server
class Book(documents.Document):
title = documents.TextField()
pages = documents.IntegerField()
class Meta:
database_name = 'mydb'
document_type = 'book'
class Author(documents.Document):
name = documents.TextField()
class Meta:
database_name = 'mydb'
document_type = 'author'
@override_settings(COUCH_SERVERS=dict(default=dict()))
class DatabaseNoCouchTest(SimpleTestCase):
def test_init_default(self):
db = Database('mydb')
self.assertEqual(db.name, 'mydb')
self.assertEqual(db.server.alias, 'default')
self.assertEqual(db.server.protocol, 'http')
self.assertEqual(db.server.host, 'localhost')
self.assertEqual(db.server.port, 5984)
self.assertEqual(db.server.username, None)
self.assertEqual(db.server.password, None)
@override_settings(COUCH_SERVERS=dict(default=dict(), another=dict(PORT=9999)))
def test_init_by_name(self):
db = Database('anotherdb', alias='another')
self.assertEqual(db.name, 'anotherdb')
self.assertEqual(db.server.alias, 'another')
self.assertEqual(db.server.protocol, 'http')
self.assertEqual(db.server.host, 'localhost')
self.assertEqual(db.server.port, 9999)
self.assertEqual(db.server.username, None)
self.assertEqual(db.server.password, None)
def test_init_by_instance(self):
db = Database('customdb', server=Server(host='example.com'))
self.assertEqual(db.name, 'customdb')
self.assertEqual(db.server.alias, 'default')
self.assertEqual(db.server.protocol, 'http')
self.assertEqual(db.server.host, 'example.com')
self.assertEqual(db.server.port, 5984)
self.assertEqual(db.server.username, None)
self.assertEqual(db.server.password, None)
class DatabaseTest(CouchTestCase):
def setUp(self):
self.server = Server()
self.db1 = self.server.create_database('db1')
self.db2 = self.server.create_database('db2')
self.db1_name = self.db1._get_database_name()
self.db2_name = self.db2._get_database_name()
def test_database_name(self):
self.assertEqual(self.db1._get_database_name(), 't_e_s_t__db1')
self.assertEqual(self.db2._get_database_name(), 't_e_s_t__db2')
def test_get(self):
self.server.put('{}/docid1'.format(self.db1_name), json=dict())
self.server.put('{}/docid2'.format(self.db2_name), json=dict())
# db1
data = self.db1.get('docid1')
self.assertEqual(data['_id'], 'docid1')
with self.assertRaises(exceptions.CouchError) as context:
self.db1.get('docid2')
self.assertEqual(context.exception.args[0]['error'], 'not_found')
# db2
data = self.db2.get('docid2')
self.assertEqual(data['_id'], 'docid2')
with self.assertRaises(exceptions.CouchError) as context:
self.db2.get('docid1')
self.assertEqual(context.exception.args[0]['error'], 'not_found')
def test_get_acceptable_status_codes_ok(self):
self.server.put('{}/docid'.format(self.db1_name), json=dict())
data = self.db1.get('docid', acceptable_status_codes=[200])
self.assertEqual(data['_id'], 'docid')
def test_get_acceptable_status_codes_ko(self):
self.server.put('{}/docid'.format(self.db1_name), json=dict())
with self.assertRaises(exceptions.CouchError):
self.db1.get('docid', acceptable_status_codes=[202])
def test_put(self):
self.db1.put('docid1', json=dict())
self.db2.put('docid2', json=dict())
# db1
data = self.server.get('{}/docid1'.format(self.db1_name))
self.assertEqual(data['_id'], 'docid1')
with self.assertRaises(exceptions.CouchError) as context:
self.server.get('{}/docid2'.format(self.db1_name))
self.assertEqual(context.exception.args[0]['error'], 'not_found')
# db2
data = self.server.get('{}/docid2'.format(self.db2_name))
self.assertEqual(data['_id'], 'docid2')
with self.assertRaises(exceptions.CouchError) as context:
self.server.get('{}/docid1'.format(self.db2_name))
self.assertEqual(context.exception.args[0]['error'], 'not_found')
def test_put_acceptable_status_codes_ok(self):
self.db1.put('docid', json=dict(), acceptable_status_codes=[201])
data = self.server.get('{}/docid'.format(self.db1_name))
self.assertEqual(data['_id'], 'docid')
def test_put_acceptable_status_codes_ko(self):
with self.assertRaises(exceptions.CouchError):
self.db1.put('docid', json=dict(), acceptable_status_codes=[202])
def test_delete(self):
data1 = self.db1.put('docid1', json=dict())
data2 = self.db2.put('docid2', json=dict())
# db1
delete = self.db1.delete('docid1?rev={}'.format(data1['rev']))
self.assertEqual(delete['id'], data1['id'])
self.assertNotEqual(delete['rev'], data1['rev'])
with self.assertRaises(exceptions.CouchError) as context:
self.server.get('{}/docid1'.format(self.db1_name))
self.assertEqual(context.exception.args[0]['error'], 'not_found')
with self.assertRaises(exceptions.CouchError) as context:
self.server.get('{}/docid2'.format(self.db1_name))
self.assertEqual(context.exception.args[0]['error'], 'not_found')
# db2
delete = self.db2.delete('docid2?rev={}'.format(data2['rev']))
self.assertEqual(delete['id'], data2['id'])
self.assertNotEqual(delete['rev'], data2['rev'])
with self.assertRaises(exceptions.CouchError) as context:
self.server.get('{}/docid1'.format(self.db2_name))
self.assertEqual(context.exception.args[0]['error'], 'not_found')
with self.assertRaises(exceptions.CouchError) as context:
self.server.get('{}/docid2'.format(self.db2_name))
self.assertEqual(context.exception.args[0]['error'], 'not_found')
def test_delete_acceptable_status_codes_ok(self):
data = self.db1.put('docid', json=dict())
delete = self.db1.delete('docid?rev={}'.format(data['rev']), acceptable_status_codes=[200])
self.assertEqual(delete['id'], data['id'])
self.assertNotEqual(delete['rev'], data['rev'])
with self.assertRaises(exceptions.CouchError) as context:
self.server.get('{}/docid'.format(self.db1_name))
self.assertEqual(context.exception.args[0]['error'], 'not_found')
def test_delete_acceptable_status_codes_ko(self):
data = self.db1.put('docid', json=dict())
with self.assertRaises(exceptions.CouchError):
self.db1.delete('docid?rev={}'.format(data['rev']), acceptable_status_codes=[202])
class DatabaseListDocumentsTest(CouchTestCase):
def test_list_design_documents(self):
db = Server().create_database('mydb')
db.put('_design/docid', json=dict(views=dict(view=dict(map='function (doc) {\n emit(doc._id, 1);\n}'))))
docs = db.list_design_documents()
self.assertEqual(docs['total_rows'], 1)
self.assertEqual(docs['offset'], 0)
row = docs['rows'][0]
self.assertEqual(row['id'], '_design/docid')
self.assertEqual(row['key'], '_design/docid')
self.assertNotEqual(row['value']['rev'], '')
class DatabaseViewTest(CouchTestCase):
def setUp(self):
self.db, created = Server().get_or_create_database('mydb')
Book(_id='python_cookbook', title='Python Cookbook', pages=806).save()
Book(_id='django_guide', title='The Definitive Guide to Django', pages=536).save()
Author(_id='alex', name='Alex Martelli').save()
Author(_id='adrian', name='Adrian Holovaty').save()
def test_raw_empy_emit(self):
self.db.put('_design/viewdocid', json=dict(views=dict(view=dict(map='function(doc) { emit(); }'))))
result = self.db.raw_view('viewdocid', 'view')
self.assertEqual(result['offset'], 0)
self.assertEqual(result['total_rows'], 4)
self.assertEqual(len(result['rows']), 4)
self.assertEqual(result['rows'][0], dict(id='adrian', key=None, value=None))
self.assertEqual(result['rows'][1], dict(id='alex', key=None, value=None))
self.assertEqual(result['rows'][2], dict(id='django_guide', key=None, value=None))
self.assertEqual(result['rows'][3], dict(id='python_cookbook', key=None, value=None))
def test_raw_emit_key(self):
self.db.put('_design/viewdocid', json=dict(views=dict(view=dict(map='function(doc) { emit(doc._id); }'))))
result = self.db.raw_view('viewdocid', 'view')
self.assertEqual(result['offset'], 0)
self.assertEqual(result['total_rows'], 4)
self.assertEqual(len(result['rows']), 4)
self.assertEqual(result['rows'][0], dict(id='adrian', key='adrian', value=None))
self.assertEqual(result['rows'][1], dict(id='alex', key='alex', value=None))
self.assertEqual(result['rows'][2], dict(id='django_guide', key='django_guide', value=None))
self.assertEqual(result['rows'][3], dict(id='python_cookbook', key='python_cookbook', value=None))
def test_raw_emit_key_value(self):
self.db.put('_design/viewdocid', json=dict(views=dict(view=dict(map='function(doc) { emit(doc._id, 1); }'))))
result = self.db.raw_view('viewdocid', 'view')
self.assertEqual(result['offset'], 0)
self.assertEqual(result['total_rows'], 4)
self.assertEqual(len(result['rows']), 4)
self.assertEqual(result['rows'][0], dict(id='adrian', key='adrian', value=1))
self.assertEqual(result['rows'][1], dict(id='alex', key='alex', value=1))
self.assertEqual(result['rows'][2], dict(id='django_guide', key='django_guide', value=1))
self.assertEqual(result['rows'][3], dict(id='python_cookbook', key='python_cookbook', value=1))
def test_raw_limit(self):
self.db.put('_design/viewdocid', json=dict(views=dict(view=dict(map='function(doc) { emit(); }'))))
result = self.db.raw_view('viewdocid', 'view', limit=2)
self.assertEqual(result['offset'], 0)
self.assertEqual(result['total_rows'], 4)
self.assertEqual(len(result['rows']), 2)
self.assertEqual(result['rows'][0]['id'], 'adrian')
self.assertEqual(result['rows'][1]['id'], 'alex')
def test_raw_startkey_1(self):
self.db.put('_design/viewdocid', json=dict(views=dict(view=dict(map='function(doc) { emit(doc._id); }'))))
result = self.db.raw_view('viewdocid', 'view', startkey='a')
self.assertEqual(result['offset'], 0)
self.assertEqual(result['total_rows'], 4)
def test_raw_startkey_2(self):
self.db.put('_design/viewdocid', json=dict(views=dict(view=dict(map='function(doc) { emit(doc._id); }'))))
result = self.db.raw_view('viewdocid', 'view', startkey='b')
self.assertEqual(result['offset'], 2)
self.assertEqual(result['total_rows'], 4)
def test_raw_startkey_3(self):
self.db.put('_design/viewdocid', json=dict(views=dict(view=dict(map='function(doc) { emit(doc._id); }'))))
result = self.db.raw_view('viewdocid', 'view', startkey='z')
self.assertEqual(result['offset'], 4)
self.assertEqual(result['total_rows'], 4)
def test_raw_startkey_docid(self):
self.db.put('_design/viewdocid', json=dict(views=dict(view=dict(map='function(doc) { emit(doc._id); }'))))
result = self.db.raw_view('viewdocid', 'view', startkey='django_guide', startkey_docid='django_guide')
self.assertEqual(result['offset'], 2)
self.assertEqual(result['total_rows'], 4)
def test_batch_1(self):
self.db.put('_design/viewdocid', json=dict(views=dict(view=dict(map='function(doc) { emit(doc._id); }'))))
result = self.db.view('viewdocid', batch_size=1)
# The result is a generator so we map the first results to a list
result = list(itertools.islice(result, 5))
self.assertEqual(len(result), 4)
self.assertEqual(result[0]['id'], 'adrian')
self.assertEqual(result[1]['id'], 'alex')
self.assertEqual(result[2]['id'], 'django_guide')
self.assertEqual(result[3]['id'], 'python_cookbook')
def test_batch_2(self):
self.db.put('_design/viewdocid', json=dict(views=dict(view=dict(map='function(doc) { emit(doc._id); }'))))
result = self.db.view('viewdocid', batch_size=2)
# The result is a generator so we map the first results to a list
result = list(itertools.islice(result, 5))
self.assertEqual(len(result), 4)
self.assertEqual(result[0]['id'], 'adrian')
self.assertEqual(result[1]['id'], 'alex')
self.assertEqual(result[2]['id'], 'django_guide')
self.assertEqual(result[3]['id'], 'python_cookbook')
def test_batch_3(self):
self.db.put('_design/viewdocid', json=dict(views=dict(view=dict(map='function(doc) { emit(doc._id); }'))))
result = self.db.view('viewdocid', batch_size=10)
# The result is a generator so we map the first results to a list
result = list(itertools.islice(result, 5))
self.assertEqual(len(result), 4)
self.assertEqual(result[0]['id'], 'adrian')
self.assertEqual(result[1]['id'], 'alex')
self.assertEqual(result[2]['id'], 'django_guide')
self.assertEqual(result[3]['id'], 'python_cookbook')
def test_batch_limit(self):
self.db.put('_design/viewdocid', json=dict(views=dict(view=dict(map='function(doc) { emit(doc._id); }'))))
result = self.db.view('viewdocid', batch_size=2, limit=3)
# The result is a generator so we map the first results to a list
result = list(itertools.islice(result, 5))
self.assertEqual(len(result), 3)
self.assertEqual(result[0]['id'], 'adrian')
self.assertEqual(result[1]['id'], 'alex')
self.assertEqual(result[2]['id'], 'django_guide')
def test_batch_ko_limit(self):
self.db.put('_design/viewdocid', json=dict(views=dict(view=dict(map='function(doc) { emit(doc._id); }'))))
with self.assertRaises(ValueError) as context:
list(self.db.view('viewdocid', limit=0))
self.assertEqual(context.exception.args, ('limit must be greater than 0',))
def test_batch_ko_batch_size(self):
self.db.put('_design/viewdocid', json=dict(views=dict(view=dict(map='function(doc) { emit(doc._id); }'))))
with self.assertRaises(ValueError) as context:
list(self.db.view('viewdocid', batch_size=0))
self.assertEqual(context.exception.args, ('batch_size must be greater than 0',))
def test_document_class(self):
self.db.put('_design/viewdocid', json=dict(views=dict(view=dict(map='function(doc) { if(doc.document_type && doc.document_type=="book") { emit(doc._id, doc); }}'))))
result = self.db.view('viewdocid', document_class=Book)
result = list(itertools.islice(result, 5))
self.assertEqual(len(result), 2)
self.assertIsInstance(result[0], Book)
self.assertEqual(result[0]._id, 'django_guide')
self.assertEqual(result[0].title, 'The Definitive Guide to Django')
self.assertEqual(result[0].pages, 536)
self.assertNotEqual(result[0]._rev, None)
self.assertIsInstance(result[1], Book)
self.assertEqual(result[1]._id, 'python_cookbook')
self.assertEqual(result[1].title, 'Python Cookbook')
self.assertEqual(result[1].pages, 806)
self.assertNotEqual(result[1]._rev, None)
def test_document_class_type_mismatch(self):
self.db.put('_design/viewdocid', json=dict(views=dict(view=dict(map='function(doc) { if(doc.document_type && doc.document_type=="author") { emit(doc._id, doc); }}'))))
with self.assertRaises(exceptions.CouchError) as context:
list(self.db.view('viewdocid', document_class=Book))
self.assertEqual(context.exception.args[0], "Type mismatch error: document_type 'book' expected, got 'author'")
class DatabaseViewOneTest(CouchTestCase):
def setUp(self):
self.db, created = Server().get_or_create_database('mydb')
Book(_id='python_cookbook', title='Python Cookbook', pages=806).save()
Book(_id='django_guide', title='The Definitive Guide to Django', pages=536).save()
Author(_id='alex', name='Alex Martelli').save()
Author(_id='adrian', name='Adrian Holovaty').save()
def test_ok(self):
self.db.put('_design/viewdocid', json=dict(views=dict(view=dict(map='function(doc) { if(doc.title) { emit(doc.title); }}'))))
result = self.db.view_one('viewdocid', 'Python Cookbook')
self.assertEqual(result['key'], 'Python Cookbook')
self.assertEqual(result['id'], 'python_cookbook')
self.assertEqual(result['value'], None)
def test_document_class(self):
self.db.put('_design/viewdocid', json=dict(views=dict(view=dict(map='function(doc) { if(doc.title) { emit(doc.title, doc); }}'))))
result = self.db.view_one('viewdocid', 'Python Cookbook', document_class=Book)
self.assertEqual(result.document_type, 'book')
self.assertEqual(result._id, 'python_cookbook')
self.assertEqual(result.title, 'Python Cookbook')
self.assertNotEqual(result._rev, None)
def test_document_class_type_mismatch(self):
self.db.put('_design/viewdocid', json=dict(views=dict(view=dict(map='function(doc) { if(doc.title) { emit(doc.title, doc); }}'))))
with self.assertRaises(exceptions.CouchError) as context:
self.db.view_one('viewdocid', 'Python Cookbook', document_class=Author)
self.assertEqual(context.exception.args[0], "Type mismatch error: document_type 'author' expected, got 'book'")
def test_not_found(self):
self.db.put('_design/viewdocid', json=dict(views=dict(view=dict(map='function(doc) { if(doc.title) { emit(doc.title, doc); }}'))))
with self.assertRaises(exceptions.ObjectDoesNotExist):
self.db.view_one('viewdocid', 'wrong')
def test_multiple_objects(self):
self.db.put('_design/viewdocid', json=dict(views=dict(view=dict(map='function(doc) { emit(doc.document_type, doc); }'))))
with self.assertRaises(exceptions.MultipleObjectsReturned):
self.db.view_one('viewdocid', 'book')
class DatabaseFindTest(CouchTestCase):
def setUp(self):
self.db, created = Server().get_or_create_database('mydb')
Book(_id='python_cookbook', title='Python Cookbook', pages=806).save()
Book(_id='django_guide', title='The Definitive Guide to Django', pages=536).save()
Author(_id='alex', name='Alex Martelli').save()
Author(_id='adrian', name='Adrian Holovaty').save()
def test_ok(self):
result = self.db.find(selector=dict(pages={'$gt': 700}), warning=False)
# The result is a generator so we map the first results to a list
result = list(itertools.islice(result, 5))
self.assertEqual(len(result), 1)
self.assertEqual(result[0]['_id'], 'python_cookbook')
self.assertEqual(result[0]['document_type'], 'book')
self.assertEqual(result[0]['title'], 'Python Cookbook')
self.assertEqual(result[0]['pages'], 806)
self.assertNotEqual(result[0]['_rev'], None)
def test_batch(self):
result = self.db.find(selector=dict(), batch_size=2, warning=False)
# The result is a generator so we map the first results to a list
result = list(itertools.islice(result, 5))
self.assertEqual(len(result), 4)
self.assertEqual(result[0]['_id'], 'adrian')
self.assertEqual(result[1]['_id'], 'alex')
self.assertEqual(result[2]['_id'], 'django_guide')
self.assertEqual(result[3]['_id'], 'python_cookbook')
def test_batch_ko_limit(self):
with self.assertRaises(ValueError) as context:
list(self.db.find(selector=dict(), limit=0))
self.assertEqual(context.exception.args, ('limit must be greater than 0',))
def test_batch_ko_batch_size(self):
with self.assertRaises(ValueError) as context:
list(self.db.find(selector=dict(), batch_size=0))
self.assertEqual(context.exception.args, ('batch_size must be greater than 0',))
def test_document_class(self):
result = self.db.find(selector=dict(document_type='author'), document_class=Author, warning=False)
# The result is a generator so we map the first results to a list
result = list(itertools.islice(result, 5))
self.assertEqual(len(result), 2)
self.assertIsInstance(result[0], Author)
self.assertEqual(result[0].document_type, 'author')
self.assertEqual(result[0]._id, 'adrian')
self.assertEqual(result[0].name, 'Adrian Holovaty')
self.assertNotEqual(result[0]._rev, None)
self.assertIsInstance(result[1], Author)
self.assertEqual(result[1].document_type, 'author')
self.assertEqual(result[1]._id, 'alex')
self.assertEqual(result[1].name, 'Alex Martelli')
self.assertNotEqual(result[1]._rev, None)
def test_document_class_type_mismatch(self):
result = self.db.find(selector=dict(document_type='author'), document_class=Book, warning=False)
with self.assertRaises(exceptions.CouchError) as context:
list(itertools.islice(result, 5))
self.assertEqual(context.exception.args[0], "Type mismatch error: document_type 'book' expected, got 'author'")
def test_index_warning_true(self):
with warnings.catch_warnings(record=True) as warning_list:
warnings.simplefilter("always")
list(self.db.find(selector=dict(document_type='book'), warning=True, batch_size=1))
self.assertEqual(len(warning_list), 1)
message = warning_list[0].message.args[0]
self.assertIn('no matching index found, create an index to optimize query time', message)
self.assertIn("'selector': {'document_type': 'book'}", message)
def test_index_warning_false(self):
with warnings.catch_warnings(record=True) as warning_list:
warnings.simplefilter("always")
list(self.db.find(selector=dict(document_type='book'), warning=False, batch_size=1))
self.assertEqual(len(warning_list), 0)
class DatabaseFindOneTest(CouchTestCase):
def setUp(self):
self.db, created = Server().get_or_create_database('mydb')
Book(_id='python_cookbook', title='Python Cookbook', pages=806).save()
Book(_id='django_guide', title='The Definitive Guide to Django', pages=536).save()
Author(_id='alex', name='Alex Martelli').save()
Author(_id='adrian', name='Adrian Holovaty').save()
def test_ok(self):
result = self.db.find_one(selector=dict(pages={'$gt': 700}), warning=False)
self.assertEqual(result['_id'], 'python_cookbook')
self.assertEqual(result['document_type'], 'book')
self.assertEqual(result['title'], 'Python Cookbook')
self.assertEqual(result['pages'], 806)
self.assertNotEqual(result['_rev'], None)
def test_document_class(self):
result = self.db.find_one(selector=dict(name='Alex Martelli'), document_class=Author, warning=False)
self.assertIsInstance(result, Author)
self.assertEqual(result.document_type, 'author')
self.assertEqual(result._id, 'alex')
self.assertEqual(result.name, 'Alex Martelli')
self.assertNotEqual(result._rev, None)
def test_document_class_type_mismatch(self):
with self.assertRaises(exceptions.CouchError) as context:
self.db.find_one(selector=dict(name='Alex Martelli'), document_class=Book, warning=False)
self.assertEqual(context.exception.args[0], "Type mismatch error: document_type 'book' expected, got 'author'")
def test_index_warning_true(self):
with warnings.catch_warnings(record=True) as warning_list:
warnings.simplefilter("always")
self.db.find_one(selector=dict(name='Alex Martelli'), warning=True)
self.assertEqual(len(warning_list), 1)
message = warning_list[0].message.args[0]
self.assertIn('no matching index found, create an index to optimize query time', message)
self.assertIn("'selector': {'name': 'Alex Martelli'}", message)
def test_index_warning_false(self):
with warnings.catch_warnings(record=True) as warning_list:
warnings.simplefilter("always")
self.db.find_one(selector=dict(name='Alex Martelli'), warning=False)
self.assertEqual(len(warning_list), 0)
def test_find_one_got_not_found(self):
with self.assertRaises(exceptions.ObjectDoesNotExist):
self.db.find_one(selector=dict(name='not found'), warning=False)
def test_find_one_got_multiple_objects(self):
# from time import sleep
# sleep(0.1)
with self.assertRaises(exceptions.MultipleObjectsReturned):
self.db.find_one(selector=dict(document_type='author'), warning=False)
class DatabaseIndexTest(CouchTestCase):
def setUp(self):
self.db, created = Server().get_or_create_database('mydb')
def test_normalize_index(self):
index = self.db.normalize_index(dict(fields=['document_type']))
self.assertEqual(index, {'fields': [{'document_type': 'asc'}]})
def test_list_indexes(self):
# Create index
self.db.post('_index', json=dict(ddoc='ddoc1', name='index1', index=dict(fields=['document_type'])))
# List indexes
indexes = self.db.list_indexes()
self.assertEqual(len(indexes), 2)
# _all_docs
index = indexes[(None, '_all_docs')]
self.assertEqual(index['type'], 'special')
self.assertEqual(index['ddoc'], None)
self.assertEqual(index['name'], '_all_docs')
self.assertEqual(index['def'], dict(fields=[dict(_id='asc')]))
# index1
index = indexes[('ddoc1', 'index1')]
self.assertEqual(index['type'], 'json')
self.assertEqual(index['ddoc'], 'ddoc1')
self.assertEqual(index['name'], 'index1')
self.assertEqual(index['def'], dict(fields=[dict(document_type='asc')], partial_filter_selector=dict()))
def test_list_indexes_filter_ddoc(self):
# Create index
self.db.post('_index', json=dict(ddoc='ddoc1', name='index1', index=dict(fields=['document_type'])))
# List indexes
indexes = self.db.list_indexes(ddoc='ddoc1')
self.assertEqual(len(indexes), 1)
# index1
index = indexes[('ddoc1', 'index1')]
self.assertEqual(index['type'], 'json')
self.assertEqual(index['ddoc'], 'ddoc1')
self.assertEqual(index['name'], 'index1')
self.assertEqual(index['def'], dict(fields=[dict(document_type='asc')], partial_filter_selector=dict()))
def test_list_indexes_filter_name(self):
# Create index
self.db.post('_index', json=dict(ddoc='ddoc1', name='index1', index=dict(fields=['document_type'])))
# List indexes
indexes = self.db.list_indexes(name='_all_docs')
self.assertEqual(len(indexes), 1)
# _all_docs
index = indexes[(None, '_all_docs')]
self.assertEqual(index['type'], 'special')
self.assertEqual(index['ddoc'], None)
self.assertEqual(index['name'], '_all_docs')
self.assertEqual(index['def'], dict(fields=[dict(_id='asc')]))
def test_list_indexes_filter_ddoc_name(self):
# Create index
self.db.post('_index', json=dict(ddoc='ddoc1', name='index1', index=dict(fields=['document_type'])))
# List indexes
indexes = self.db.list_indexes(ddoc='ddoc1', name='index1')
self.assertEqual(len(indexes), 1)
# index1
index = indexes[('ddoc1', 'index1')]
self.assertEqual(index['type'], 'json')
self.assertEqual(index['ddoc'], 'ddoc1')
self.assertEqual(index['name'], 'index1')
self.assertEqual(index['def'], dict(fields=[dict(document_type='asc')], partial_filter_selector=dict()))
def test_list_indexes_filter_not_found(self):
# Create index
self.db.post('_index', json=dict(ddoc='ddoc1', name='index1', index=dict(fields=['document_type'])))
# List indexes
indexes = self.db.list_indexes(ddoc='wrong', name='index1')
self.assertEqual(len(indexes), 0)
def test_get_index(self):
self.db.post('_index', json=dict(ddoc='ddoc1', name='index1', index=dict(fields=['document_type'])))
index = self.db.get_index(ddoc='ddoc1', name='index1')
self.assertEqual(index['name'], 'index1')
self.assertEqual(index['ddoc'], 'ddoc1')
self.assertEqual(index['type'], 'json')
self.assertEqual(index['def'], dict(fields=[dict(document_type='asc')], partial_filter_selector=dict()))
def test_get_index_not_found(self):
self.db.post('_index', json=dict(ddoc='ddoc1', name='index1', index=dict(fields=['document_type'])))
index = self.db.get_index(ddoc='wrong', name='index1')
self.assertEqual(index, None)
def test_create_index(self):
data = self.db.create_index(ddoc='ddoc1', name='index1', index=dict(fields=['document_type']))
self.assertEqual(data['result'], 'created')
self.assertEqual(data['ddoc'], 'ddoc1')
self.assertEqual(data['name'], 'index1')
# check index
indexes = self.db.get('_index')['indexes']
index = list(filter(lambda i: i['ddoc'] == '_design/ddoc1', indexes))[0]
self.assertEqual(index['name'], 'index1')
self.assertEqual(index['ddoc'], '_design/ddoc1')
self.assertEqual(index['type'], 'json')
self.assertEqual(index['def'], dict(fields=[dict(document_type='asc')], partial_filter_selector=dict()))
def test_create_index_changed(self):
data = self.db.create_index(ddoc='ddoc1', name='index1', index=dict(fields=['document_type']))
self.assertEqual(data['result'], 'created')
self.assertEqual(data['ddoc'], 'ddoc1')
self.assertEqual(data['name'], 'index1')
# check index
indexes = self.db.get('_index')['indexes']
index = list(filter(lambda i: i['ddoc'] == '_design/ddoc1', indexes))[0]
self.assertEqual(index['name'], 'index1')
self.assertEqual(index['ddoc'], '_design/ddoc1')
self.assertEqual(index['type'], 'json')
self.assertEqual(index['def'], dict(fields=[dict(document_type='asc')], partial_filter_selector=dict()))
# Change index
data = self.db.create_index(ddoc='ddoc1', name='index1', index=dict(fields=['_id']))
self.assertEqual(data['result'], 'created')
self.assertEqual(data['ddoc'], 'ddoc1')
self.assertEqual(data['name'], 'index1')
# check index
indexes = self.db.get('_index')['indexes']
index = list(filter(lambda i: i['ddoc'] == '_design/ddoc1', indexes))[0]
self.assertEqual(index['name'], 'index1')
self.assertEqual(index['ddoc'], '_design/ddoc1')
self.assertEqual(index['type'], 'json')
self.assertEqual(index['def'], dict(fields=[dict(_id='asc')], partial_filter_selector=dict()))
def test_create_index_unchanged(self):
data = self.db.create_index(ddoc='ddoc1', name='index1', index=dict(fields=['document_type'], partial_filter_selector=dict()))
self.assertEqual(data['result'], 'created')
self.assertEqual(data['ddoc'], 'ddoc1')
self.assertEqual(data['name'], 'index1')
# check index
indexes = self.db.get('_index')['indexes']
index = list(filter(lambda i: i['ddoc'] == '_design/ddoc1', indexes))[0]
self.assertEqual(index['name'], 'index1')
self.assertEqual(index['ddoc'], '_design/ddoc1')
self.assertEqual(index['type'], 'json')
self.assertEqual(index['def'], dict(fields=[dict(document_type='asc')], partial_filter_selector=dict()))
# Same index
data = self.db.create_index(ddoc='ddoc1', name='index1', index=dict(fields=dict(document_type='asc'), partial_filter_selector=dict()))
self.assertEqual(data['result'], 'unchanged')
self.assertEqual(data['ddoc'], 'ddoc1')
self.assertEqual(data['name'], 'index1')
# check index
indexes = self.db.get('_index')['indexes']
index = list(filter(lambda i: i['ddoc'] == '_design/ddoc1', indexes))[0]
self.assertEqual(index['name'], 'index1')
self.assertEqual(index['ddoc'], '_design/ddoc1')
self.assertEqual(index['type'], 'json')
self.assertEqual(index['def'], dict(fields=[dict(document_type='asc')], partial_filter_selector=dict()))
def test_delete_index(self):
self.db.create_index(ddoc='ddoc1', name='index1', index=dict(fields=['document_type']))
index = self.db.get_index(ddoc='ddoc1', name='index1')
self.assertEqual(index['ddoc'], 'ddoc1')
self.assertEqual(index['name'], 'index1')
# Delete
result = self.db.delete_index('ddoc1', 'index1')
self.assertEqual(result['ok'], True)
index = self.db.get_index(ddoc='ddoc1', name='index1')
self.assertEqual(index, None)
| 50.889561
| 175
| 0.649979
| 4,226
| 33,638
| 5.038097
| 0.050402
| 0.152882
| 0.07792
| 0.017566
| 0.899159
| 0.858532
| 0.816589
| 0.770654
| 0.754544
| 0.714339
| 0
| 0.014655
| 0.184523
| 33,638
| 660
| 176
| 50.966667
| 0.761511
| 0.022802
| 0
| 0.551852
| 0
| 0.009259
| 0.172515
| 0.002345
| 0
| 0
| 0
| 0
| 0.485185
| 1
| 0.125926
| false
| 0.005556
| 0.016667
| 0
| 0.17037
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
924261f6c842c99c2be927500ea03d9452f39f6e
| 5,658
|
py
|
Python
|
youwol_utils/clients/stories/stories.py
|
youwol/py-youwol
|
85a8877e302c9da1aea168bf1d964d19036c1134
|
[
"MIT"
] | null | null | null |
youwol_utils/clients/stories/stories.py
|
youwol/py-youwol
|
85a8877e302c9da1aea168bf1d964d19036c1134
|
[
"MIT"
] | 1
|
2022-03-14T09:40:15.000Z
|
2022-03-14T09:40:15.000Z
|
youwol_utils/clients/stories/stories.py
|
youwol/py-youwol
|
85a8877e302c9da1aea168bf1d964d19036c1134
|
[
"MIT"
] | null | null | null |
from dataclasses import dataclass, field
from typing import Dict
import aiohttp
from youwol_utils.clients.utils import raise_exception_from_response
@dataclass(frozen=True)
class StoriesClient:
url_base: str
headers: Dict[str, str] = field(default_factory=lambda: {})
connector = aiohttp.TCPConnector(verify_ssl=False)
async def create_story(self, body, **kwargs):
url = f"{self.url_base}/stories"
async with aiohttp.ClientSession(headers=self.headers) as session:
async with await session.put(url=url, json=body, **kwargs) as resp:
if resp.status == 200:
resp = await resp.json()
return resp
await raise_exception_from_response(resp, **kwargs)
async def publish_story(self, data, **kwargs):
url = f"{self.url_base}/stories"
async with aiohttp.ClientSession(headers=self.headers) as session:
async with await session.post(url=url, data=data, **kwargs) as resp:
if resp.status == 200:
resp = await resp.json()
return resp
await raise_exception_from_response(resp, **kwargs)
async def get_story(self, story_id: str, **kwargs):
url = f"{self.url_base}/stories/{story_id}"
async with aiohttp.ClientSession(headers=self.headers) as session:
async with await session.get(url=url, **kwargs) as resp:
if resp.status == 200:
resp = await resp.json()
return resp
await raise_exception_from_response(resp, **kwargs)
async def update_story(self, story_id: str, body, **kwargs):
url = f"{self.url_base}/stories/{story_id}"
async with aiohttp.ClientSession(headers=self.headers) as session:
async with await session.post(url=url, json=body, **kwargs) as resp:
if resp.status == 200:
resp = await resp.json()
return resp
await raise_exception_from_response(resp, **kwargs)
async def delete_story(self, story_id: str, **kwargs):
url = f"{self.url_base}/stories/{story_id}"
async with aiohttp.ClientSession(headers=self.headers) as session:
async with await session.delete(url=url, **kwargs) as resp:
if resp.status == 200:
resp = await resp.json()
return resp
await raise_exception_from_response(resp, **kwargs)
async def get_children(self, story_id: str, parent_document_id: str, from_index=float, count=int, **kwargs):
params = {
"from-index": from_index,
"count": count
}
url = f"{self.url_base}/stories/{story_id}/documents/{parent_document_id}/children"
async with aiohttp.ClientSession(headers=self.headers) as session:
async with await session.get(url=url, params=params, **kwargs) as resp:
if resp.status == 200:
resp = await resp.json()
return resp
await raise_exception_from_response(resp, **kwargs)
async def get_content(self, story_id: str, content_id: str, **kwargs):
url = f"{self.url_base}/stories/{story_id}/contents/{content_id}"
async with aiohttp.ClientSession(headers=self.headers) as session:
async with await session.get(url=url, **kwargs) as resp:
if resp.status == 200:
resp = await resp.text(encoding='utf8')
return resp
await raise_exception_from_response(resp, **kwargs)
async def set_content(self, story_id: str, content_id: str, body, **kwargs):
url = f"{self.url_base}/stories/{story_id}/contents/{content_id}"
async with aiohttp.ClientSession(headers=self.headers) as session:
async with await session.post(url=url, json=body, **kwargs) as resp:
if resp.status == 200:
resp = await resp.text()
return resp
await raise_exception_from_response(resp, **kwargs)
async def create_document(self, story_id: str, body, **kwargs):
url = f"{self.url_base}/stories/{story_id}/documents"
async with aiohttp.ClientSession(headers=self.headers) as session:
async with await session.put(url=url, json=body, **kwargs) as resp:
if resp.status == 200:
resp = await resp.json()
return resp
await raise_exception_from_response(resp, **kwargs)
async def delete_document(self, story_id: str, document_id: str, **kwargs):
url = f"{self.url_base}/stories/{story_id}/documents/{document_id}"
async with aiohttp.ClientSession(headers=self.headers) as session:
async with await session.delete(url=url, **kwargs) as resp:
if resp.status == 200:
resp = await resp.json()
return resp
await raise_exception_from_response(resp, **kwargs)
async def update_document(self, story_id: str, document_id: str, body, **kwargs):
url = f"{self.url_base}/stories/{story_id}/documents/{document_id}"
async with aiohttp.ClientSession(headers=self.headers) as session:
async with await session.post(url=url, json=body, **kwargs) as resp:
if resp.status == 200:
resp = await resp.json()
return resp
await raise_exception_from_response(resp, **kwargs)
| 40.414286
| 112
| 0.602156
| 684
| 5,658
| 4.843567
| 0.106725
| 0.059765
| 0.065198
| 0.094174
| 0.858436
| 0.854513
| 0.854513
| 0.854513
| 0.818292
| 0.806822
| 0
| 0.008528
| 0.295334
| 5,658
| 139
| 113
| 40.705036
| 0.822423
| 0
| 0
| 0.70297
| 0
| 0
| 0.090668
| 0.08731
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.039604
| 0
| 0.188119
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
924ffff22c32059ed5f1c2e5cd248cf1f1318d1d
| 1,415
|
py
|
Python
|
gdz/serialisers.py
|
audiua/shkolyar_django
|
a2de2dc0a42e6bdd51321f857c0aa1106c51ba80
|
[
"MIT"
] | 1
|
2017-04-22T11:00:07.000Z
|
2017-04-22T11:00:07.000Z
|
gdz/serialisers.py
|
audiua/shkolyar_django
|
a2de2dc0a42e6bdd51321f857c0aa1106c51ba80
|
[
"MIT"
] | 6
|
2017-04-20T17:49:39.000Z
|
2017-04-22T11:55:07.000Z
|
gdz/serialisers.py
|
audiua/shkolyar_django
|
a2de2dc0a42e6bdd51321f857c0aa1106c51ba80
|
[
"MIT"
] | null | null | null |
from django.core.urlresolvers import reverse
from .models import GdzClas, GdzBook, GdzSubject
from rest_framework import serializers
class GdzClasSerializer(serializers.HyperlinkedModelSerializer):
url = serializers.HyperlinkedIdentityField(view_name="gdz_api:gdzclas-detail")
class Meta:
model = GdzClas
fields = ('id', 'title', 'slug', 'create_time', 'update_time',
'description', 'is_promote', 'uri', 'url')
class GdzSubjectSerializer(serializers.HyperlinkedModelSerializer):
url = serializers.HyperlinkedIdentityField(view_name="gdz_api:gdzsubject-detail")
gdz_clas = serializers.HyperlinkedIdentityField(view_name="gdz_api:gdzclas-detail")
class Meta:
model = GdzSubject
fields = ('id', 'title', 'slug', 'create_time', 'update_time',
'description', 'is_promote', 'uri', 'url', 'gdz_clas')
class GdzBookSerializer(serializers.HyperlinkedModelSerializer):
url = serializers.HyperlinkedIdentityField(view_name="gdz_api:gdzbook-detail")
gdz_clas = serializers.HyperlinkedIdentityField(view_name="gdz_api:gdzclas-detail")
gdz_subject = serializers.HyperlinkedIdentityField(view_name="gdz_api:gdzsubject-detail")
class Meta:
model = GdzBook
fields = ('id', 'author', 'slug', 'create_time', 'update_time',
'description', 'is_promote', 'uri', 'url', 'gdz_clas', 'gdz_subject')
| 50.535714
| 93
| 0.715194
| 143
| 1,415
| 6.881119
| 0.286713
| 0.213415
| 0.237805
| 0.262195
| 0.734756
| 0.734756
| 0.734756
| 0.734756
| 0.658537
| 0.436992
| 0
| 0
| 0.162544
| 1,415
| 28
| 94
| 50.535714
| 0.83038
| 0
| 0
| 0.291667
| 0
| 0
| 0.24435
| 0.097458
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.125
| 0
| 0.625
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 7
|
92b17384927222ff91a45c297f642b1ed6d28333
| 32,037
|
py
|
Python
|
sdk/iothub/azure-mgmt-iothubprovisioningservices/azure/mgmt/iothubprovisioningservices/operations/dps_certificate_operations.py
|
iscai-msft/azure-sdk-for-python
|
83715b95c41e519d5be7f1180195e2fba136fc0f
|
[
"MIT"
] | 8
|
2021-01-13T23:44:08.000Z
|
2021-03-17T10:13:36.000Z
|
sdk/iothub/azure-mgmt-iothubprovisioningservices/azure/mgmt/iothubprovisioningservices/operations/dps_certificate_operations.py
|
iscai-msft/azure-sdk-for-python
|
83715b95c41e519d5be7f1180195e2fba136fc0f
|
[
"MIT"
] | 226
|
2019-07-24T07:57:21.000Z
|
2019-10-15T01:07:24.000Z
|
sdk/iothub/azure-mgmt-iothubprovisioningservices/azure/mgmt/iothubprovisioningservices/operations/dps_certificate_operations.py
|
iscai-msft/azure-sdk-for-python
|
83715b95c41e519d5be7f1180195e2fba136fc0f
|
[
"MIT"
] | 2
|
2021-05-23T16:46:31.000Z
|
2021-05-26T23:51:09.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from .. import models
class DpsCertificateOperations(object):
"""DpsCertificateOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: The version of the API. Constant value: "2018-01-22".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2018-01-22"
self.config = config
def get(
self, certificate_name, resource_group_name, provisioning_service_name, if_match=None, custom_headers=None, raw=False, **operation_config):
"""Get the certificate from the provisioning service.
:param certificate_name: Name of the certificate to retrieve.
:type certificate_name: str
:param resource_group_name: Resource group identifier.
:type resource_group_name: str
:param provisioning_service_name: Name of the provisioning service the
certificate is associated with.
:type provisioning_service_name: str
:param if_match: ETag of the certificate.
:type if_match: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: CertificateResponse or ClientRawResponse if raw=true
:rtype:
~azure.mgmt.iothubprovisioningservices.models.CertificateResponse or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorDetailsException<azure.mgmt.iothubprovisioningservices.models.ErrorDetailsException>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'certificateName': self._serialize.url("certificate_name", certificate_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'provisioningServiceName': self._serialize.url("provisioning_service_name", provisioning_service_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorDetailsException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('CertificateResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/provisioningServices/{provisioningServiceName}/certificates/{certificateName}'}
def create_or_update(
self, resource_group_name, provisioning_service_name, certificate_name, if_match=None, certificate=None, custom_headers=None, raw=False, **operation_config):
"""Upload the certificate to the provisioning service.
Add new certificate or update an existing certificate.
:param resource_group_name: Resource group identifier.
:type resource_group_name: str
:param provisioning_service_name: The name of the provisioning
service.
:type provisioning_service_name: str
:param certificate_name: The name of the certificate create or update.
:type certificate_name: str
:param if_match: ETag of the certificate. This is required to update
an existing certificate, and ignored while creating a brand new
certificate.
:type if_match: str
:param certificate: Base-64 representation of the X509 leaf
certificate .cer file or just .pem file content.
:type certificate: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: CertificateResponse or ClientRawResponse if raw=true
:rtype:
~azure.mgmt.iothubprovisioningservices.models.CertificateResponse or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorDetailsException<azure.mgmt.iothubprovisioningservices.models.ErrorDetailsException>`
"""
certificate_description = models.CertificateBodyDescription(certificate=certificate)
# Construct URL
url = self.create_or_update.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'provisioningServiceName': self._serialize.url("provisioning_service_name", provisioning_service_name, 'str'),
'certificateName': self._serialize.url("certificate_name", certificate_name, 'str', max_length=256)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(certificate_description, 'CertificateBodyDescription')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorDetailsException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('CertificateResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/provisioningServices/{provisioningServiceName}/certificates/{certificateName}'}
def delete(
self, resource_group_name, if_match, provisioning_service_name, certificate_name, certificatename=None, certificateraw_bytes=None, certificateis_verified=None, certificatepurpose=None, certificatecreated=None, certificatelast_updated=None, certificatehas_private_key=None, certificatenonce=None, custom_headers=None, raw=False, **operation_config):
"""Delete the Provisioning Service Certificate.
Deletes the specified certificate assosciated with the Provisioning
Service.
:param resource_group_name: Resource group identifier.
:type resource_group_name: str
:param if_match: ETag of the certificate
:type if_match: str
:param provisioning_service_name: The name of the provisioning
service.
:type provisioning_service_name: str
:param certificate_name: This is a mandatory field, and is the logical
name of the certificate that the provisioning service will access by.
:type certificate_name: str
:param certificatename: This is optional, and it is the Common Name of
the certificate.
:type certificatename: str
:param certificateraw_bytes: Raw data within the certificate.
:type certificateraw_bytes: bytearray
:param certificateis_verified: Indicates if certificate has been
verified by owner of the private key.
:type certificateis_verified: bool
:param certificatepurpose: A description that mentions the purpose of
the certificate. Possible values include: 'clientAuthentication',
'serverAuthentication'
:type certificatepurpose: str or
~azure.mgmt.iothubprovisioningservices.models.CertificatePurpose
:param certificatecreated: Time the certificate is created.
:type certificatecreated: datetime
:param certificatelast_updated: Time the certificate is last updated.
:type certificatelast_updated: datetime
:param certificatehas_private_key: Indicates if the certificate
contains a private key.
:type certificatehas_private_key: bool
:param certificatenonce: Random number generated to indicate Proof of
Possession.
:type certificatenonce: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorDetailsException<azure.mgmt.iothubprovisioningservices.models.ErrorDetailsException>`
"""
# Construct URL
url = self.delete.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'provisioningServiceName': self._serialize.url("provisioning_service_name", provisioning_service_name, 'str'),
'certificateName': self._serialize.url("certificate_name", certificate_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if certificatename is not None:
query_parameters['certificate.name'] = self._serialize.query("certificatename", certificatename, 'str')
if certificateraw_bytes is not None:
query_parameters['certificate.rawBytes'] = self._serialize.query("certificateraw_bytes", certificateraw_bytes, 'bytearray')
if certificateis_verified is not None:
query_parameters['certificate.isVerified'] = self._serialize.query("certificateis_verified", certificateis_verified, 'bool')
if certificatepurpose is not None:
query_parameters['certificate.purpose'] = self._serialize.query("certificatepurpose", certificatepurpose, 'str')
if certificatecreated is not None:
query_parameters['certificate.created'] = self._serialize.query("certificatecreated", certificatecreated, 'iso-8601')
if certificatelast_updated is not None:
query_parameters['certificate.lastUpdated'] = self._serialize.query("certificatelast_updated", certificatelast_updated, 'iso-8601')
if certificatehas_private_key is not None:
query_parameters['certificate.hasPrivateKey'] = self._serialize.query("certificatehas_private_key", certificatehas_private_key, 'bool')
if certificatenonce is not None:
query_parameters['certificate.nonce'] = self._serialize.query("certificatenonce", certificatenonce, 'str')
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 204]:
raise models.ErrorDetailsException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/provisioningServices/{provisioningServiceName}/certificates/{certificateName}'}
def list(
self, resource_group_name, provisioning_service_name, custom_headers=None, raw=False, **operation_config):
"""Get all the certificates tied to the provisioning service.
:param resource_group_name: Name of resource group.
:type resource_group_name: str
:param provisioning_service_name: Name of provisioning service to
retrieve certificates for.
:type provisioning_service_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: CertificateListDescription or ClientRawResponse if raw=true
:rtype:
~azure.mgmt.iothubprovisioningservices.models.CertificateListDescription
or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorDetailsException<azure.mgmt.iothubprovisioningservices.models.ErrorDetailsException>`
"""
# Construct URL
url = self.list.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'provisioningServiceName': self._serialize.url("provisioning_service_name", provisioning_service_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorDetailsException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('CertificateListDescription', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/provisioningServices/{provisioningServiceName}/certificates'}
def generate_verification_code(
self, certificate_name, if_match, resource_group_name, provisioning_service_name, certificatename=None, certificateraw_bytes=None, certificateis_verified=None, certificatepurpose=None, certificatecreated=None, certificatelast_updated=None, certificatehas_private_key=None, certificatenonce=None, custom_headers=None, raw=False, **operation_config):
"""Generate verification code for Proof of Possession.
:param certificate_name: The mandatory logical name of the
certificate, that the provisioning service uses to access.
:type certificate_name: str
:param if_match: ETag of the certificate. This is required to update
an existing certificate, and ignored while creating a brand new
certificate.
:type if_match: str
:param resource_group_name: name of resource group.
:type resource_group_name: str
:param provisioning_service_name: Name of provisioning service.
:type provisioning_service_name: str
:param certificatename: Common Name for the certificate.
:type certificatename: str
:param certificateraw_bytes: Raw data of certificate.
:type certificateraw_bytes: bytearray
:param certificateis_verified: Indicates if the certificate has been
verified by owner of the private key.
:type certificateis_verified: bool
:param certificatepurpose: Description mentioning the purpose of the
certificate. Possible values include: 'clientAuthentication',
'serverAuthentication'
:type certificatepurpose: str or
~azure.mgmt.iothubprovisioningservices.models.CertificatePurpose
:param certificatecreated: Certificate creation time.
:type certificatecreated: datetime
:param certificatelast_updated: Certificate last updated time.
:type certificatelast_updated: datetime
:param certificatehas_private_key: Indicates if the certificate
contains private key.
:type certificatehas_private_key: bool
:param certificatenonce: Random number generated to indicate Proof of
Possession.
:type certificatenonce: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: VerificationCodeResponse or ClientRawResponse if raw=true
:rtype:
~azure.mgmt.iothubprovisioningservices.models.VerificationCodeResponse
or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorDetailsException<azure.mgmt.iothubprovisioningservices.models.ErrorDetailsException>`
"""
# Construct URL
url = self.generate_verification_code.metadata['url']
path_format_arguments = {
'certificateName': self._serialize.url("certificate_name", certificate_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'provisioningServiceName': self._serialize.url("provisioning_service_name", provisioning_service_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if certificatename is not None:
query_parameters['certificate.name'] = self._serialize.query("certificatename", certificatename, 'str')
if certificateraw_bytes is not None:
query_parameters['certificate.rawBytes'] = self._serialize.query("certificateraw_bytes", certificateraw_bytes, 'bytearray')
if certificateis_verified is not None:
query_parameters['certificate.isVerified'] = self._serialize.query("certificateis_verified", certificateis_verified, 'bool')
if certificatepurpose is not None:
query_parameters['certificate.purpose'] = self._serialize.query("certificatepurpose", certificatepurpose, 'str')
if certificatecreated is not None:
query_parameters['certificate.created'] = self._serialize.query("certificatecreated", certificatecreated, 'iso-8601')
if certificatelast_updated is not None:
query_parameters['certificate.lastUpdated'] = self._serialize.query("certificatelast_updated", certificatelast_updated, 'iso-8601')
if certificatehas_private_key is not None:
query_parameters['certificate.hasPrivateKey'] = self._serialize.query("certificatehas_private_key", certificatehas_private_key, 'bool')
if certificatenonce is not None:
query_parameters['certificate.nonce'] = self._serialize.query("certificatenonce", certificatenonce, 'str')
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorDetailsException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VerificationCodeResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
generate_verification_code.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/provisioningServices/{provisioningServiceName}/certificates/{certificateName}/generateVerificationCode'}
def verify_certificate(
self, certificate_name, if_match, resource_group_name, provisioning_service_name, certificatename=None, certificateraw_bytes=None, certificateis_verified=None, certificatepurpose=None, certificatecreated=None, certificatelast_updated=None, certificatehas_private_key=None, certificatenonce=None, certificate=None, custom_headers=None, raw=False, **operation_config):
"""Verify certificate's private key possession.
Verifies the certificate's private key possession by providing the leaf
cert issued by the verifying pre uploaded certificate.
:param certificate_name: The mandatory logical name of the
certificate, that the provisioning service uses to access.
:type certificate_name: str
:param if_match: ETag of the certificate.
:type if_match: str
:param resource_group_name: Resource group name.
:type resource_group_name: str
:param provisioning_service_name: Provisioning service name.
:type provisioning_service_name: str
:param certificatename: Common Name for the certificate.
:type certificatename: str
:param certificateraw_bytes: Raw data of certificate.
:type certificateraw_bytes: bytearray
:param certificateis_verified: Indicates if the certificate has been
verified by owner of the private key.
:type certificateis_verified: bool
:param certificatepurpose: Describe the purpose of the certificate.
Possible values include: 'clientAuthentication',
'serverAuthentication'
:type certificatepurpose: str or
~azure.mgmt.iothubprovisioningservices.models.CertificatePurpose
:param certificatecreated: Certificate creation time.
:type certificatecreated: datetime
:param certificatelast_updated: Certificate last updated time.
:type certificatelast_updated: datetime
:param certificatehas_private_key: Indicates if the certificate
contains private key.
:type certificatehas_private_key: bool
:param certificatenonce: Random number generated to indicate Proof of
Possession.
:type certificatenonce: str
:param certificate: base-64 representation of X509 certificate .cer
file or just .pem file content.
:type certificate: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: CertificateResponse or ClientRawResponse if raw=true
:rtype:
~azure.mgmt.iothubprovisioningservices.models.CertificateResponse or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorDetailsException<azure.mgmt.iothubprovisioningservices.models.ErrorDetailsException>`
"""
request = models.VerificationCodeRequest(certificate=certificate)
# Construct URL
url = self.verify_certificate.metadata['url']
path_format_arguments = {
'certificateName': self._serialize.url("certificate_name", certificate_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'provisioningServiceName': self._serialize.url("provisioning_service_name", provisioning_service_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if certificatename is not None:
query_parameters['certificate.name'] = self._serialize.query("certificatename", certificatename, 'str')
if certificateraw_bytes is not None:
query_parameters['certificate.rawBytes'] = self._serialize.query("certificateraw_bytes", certificateraw_bytes, 'bytearray')
if certificateis_verified is not None:
query_parameters['certificate.isVerified'] = self._serialize.query("certificateis_verified", certificateis_verified, 'bool')
if certificatepurpose is not None:
query_parameters['certificate.purpose'] = self._serialize.query("certificatepurpose", certificatepurpose, 'str')
if certificatecreated is not None:
query_parameters['certificate.created'] = self._serialize.query("certificatecreated", certificatecreated, 'iso-8601')
if certificatelast_updated is not None:
query_parameters['certificate.lastUpdated'] = self._serialize.query("certificatelast_updated", certificatelast_updated, 'iso-8601')
if certificatehas_private_key is not None:
query_parameters['certificate.hasPrivateKey'] = self._serialize.query("certificatehas_private_key", certificatehas_private_key, 'bool')
if certificatenonce is not None:
query_parameters['certificate.nonce'] = self._serialize.query("certificatenonce", certificatenonce, 'str')
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(request, 'VerificationCodeRequest')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorDetailsException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('CertificateResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
verify_certificate.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/provisioningServices/{provisioningServiceName}/certificates/{certificateName}/verify'}
| 54.764103
| 378
| 0.707401
| 3,284
| 32,037
| 6.70676
| 0.079476
| 0.039546
| 0.013076
| 0.015255
| 0.906788
| 0.893167
| 0.877911
| 0.864245
| 0.860159
| 0.85462
| 0
| 0.004003
| 0.204607
| 32,037
| 584
| 379
| 54.857877
| 0.860333
| 0.332834
| 0
| 0.795367
| 0
| 0.019305
| 0.215798
| 0.117671
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027027
| false
| 0
| 0.011583
| 0
| 0.088803
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
2bde0c94c43c9aa79f99db627285e95051414000
| 99
|
py
|
Python
|
safe_agents/__init__.py
|
dmelichar/thesis-poc
|
c78eb56bd6b43ff51e17c870ba2bba22958a4668
|
[
"MIT"
] | null | null | null |
safe_agents/__init__.py
|
dmelichar/thesis-poc
|
c78eb56bd6b43ff51e17c870ba2bba22958a4668
|
[
"MIT"
] | null | null | null |
safe_agents/__init__.py
|
dmelichar/thesis-poc
|
c78eb56bd6b43ff51e17c870ba2bba22958a4668
|
[
"MIT"
] | null | null | null |
import safe_agents.envs.lunar
import safe_agents.agents
from safe_agents.utils import plot_visuals
| 24.75
| 42
| 0.878788
| 16
| 99
| 5.1875
| 0.5625
| 0.361446
| 0.385542
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.080808
| 99
| 3
| 43
| 33
| 0.912088
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
920de5b6b22c4c7a91b64358261b5264984b1937
| 104
|
py
|
Python
|
tests/test_embed.py
|
thomasballinger/observable-jupyter
|
d86e3213611c78fb2f5b61a04fb22ea21d0b0904
|
[
"ISC"
] | 22
|
2021-09-24T17:03:28.000Z
|
2022-03-22T14:35:31.000Z
|
tests/test_embed.py
|
thomasballinger/observable-jupyter
|
d86e3213611c78fb2f5b61a04fb22ea21d0b0904
|
[
"ISC"
] | 7
|
2021-09-24T16:24:50.000Z
|
2021-11-28T21:53:16.000Z
|
tests/test_embed.py
|
thomasballinger/observable-jupyter
|
d86e3213611c78fb2f5b61a04fb22ea21d0b0904
|
[
"ISC"
] | 1
|
2022-01-26T08:39:55.000Z
|
2022-01-26T08:39:55.000Z
|
from observable_jupyter import embed
def test_observable_jupyter_embed():
assert embed is not None
| 20.8
| 36
| 0.817308
| 15
| 104
| 5.4
| 0.733333
| 0.419753
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.153846
| 104
| 4
| 37
| 26
| 0.920455
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
a60625ede8bbdc74b79a81f7246b6224dca3d2e6
| 41,455
|
py
|
Python
|
src/neurons_engine/mbuild_modules/starter_shield.py
|
FFtust/k210_scripts
|
ef2e9afea4b2344a425c750d1d3bb31e9e389c03
|
[
"MIT"
] | null | null | null |
src/neurons_engine/mbuild_modules/starter_shield.py
|
FFtust/k210_scripts
|
ef2e9afea4b2344a425c750d1d3bb31e9e389c03
|
[
"MIT"
] | null | null | null |
src/neurons_engine/mbuild_modules/starter_shield.py
|
FFtust/k210_scripts
|
ef2e9afea4b2344a425c750d1d3bb31e9e389c03
|
[
"MIT"
] | null | null | null |
from common import num_range_scale
from neurons_engine import neurons_request, neurons_blocking_read, neurons_async_read, neurons_get_block_index
import time
##############################################################################################################
######################### car motion API API ###################################################################
##############################################################################################################
__MAX_SPEED = 240 # car speed unit: rpm, 300 rmp = 1800°/s
__MAX_RUN_TIME = 3600000 # run time, unit: ms, 3600000 ms = 1hour
__MAX_ACCEL_TIME = 10000 # accelerate time, unit: ms, 10000 ms = 10s
__MAX_DECEL_TIME = 10000 # decelerate time, unit: ms, 10000 ms = 10s
__MAX_DISTANCE = 10000 # straight distance, unit: mm, 10000 mm = 10m
__MAX_ANGLE = 10000 # wheel turns clockwise angle, unit: mm, 10000 mm = 10m
__DEFAULT_SPEED = 50 # car speed unit: rpm, 300 rmp = 1800°/s
__DEFAULT_DISTANCE = 100 # straight distance, unit: mm, 10000 mm = 10m
__DEFAULT_ANGLE = 360 # wheel turns clockwise angle, unit: degree(°), 360° = 1r
__MS_TO_SECOND_FACTOR = 0.001 # millisecond convert to second
__MM_TO_MS_FACTOR = 2.921 # factor for default runtime base on distance
__DEFAULT_DELAY_COMPENSATION = 0.02 # Time delay compensation
def car_spd_mode_forward(speed = __DEFAULT_SPEED, run_time = 0, accel_time = 1, decel_time = 1, device_index = 1):
if not isinstance(speed, (int, float)):
return
if not isinstance(run_time, (int)):
return
if not isinstance(accel_time, (int)):
return
if not isinstance(decel_time, (int)):
return
speed = num_range_scale(speed, -__MAX_SPEED, __MAX_SPEED)
run_time = num_range_scale(run_time, 0, __MAX_RUN_TIME)
accel_time = num_range_scale(accel_time, 0, __MAX_ACCEL_TIME)
decel_time = num_range_scale(decel_time, 0, __MAX_DECEL_TIME)
neurons_request("m_starter_shield", "car_spd_mode_forward", (speed, run_time, accel_time, decel_time), device_index)
time.sleep(run_time * __MS_TO_SECOND_FACTOR)
def car_spd_mode_backward(speed = __DEFAULT_SPEED, run_time = 0, accel_time = 1, decel_time = 1, device_index = 1):
if not isinstance(speed, (int, float)):
return
if not isinstance(run_time, (int)):
return
if not isinstance(accel_time, (int)):
return
if not isinstance(decel_time, (int)):
return
speed = num_range_scale(speed, -__MAX_SPEED, __MAX_SPEED)
run_time = num_range_scale(run_time, 0, __MAX_RUN_TIME)
accel_time = num_range_scale(accel_time, 0, __MAX_ACCEL_TIME)
decel_time = num_range_scale(decel_time, 0, __MAX_DECEL_TIME)
neurons_request("m_starter_shield", "car_spd_mode_backward", (speed, run_time, accel_time, decel_time), device_index)
time.sleep(run_time * __MS_TO_SECOND_FACTOR)
def car_spd_mode_turn_left(speed = __DEFAULT_SPEED, run_time = 0, accel_time = 1, decel_time = 1, device_index = 1):
if not isinstance(speed, (int, float)):
return
if not isinstance(run_time, (int)):
return
if not isinstance(accel_time, (int)):
return
if not isinstance(decel_time, (int)):
return
speed = num_range_scale(speed, -__MAX_SPEED, __MAX_SPEED)
run_time = num_range_scale(run_time, 0, __MAX_RUN_TIME)
accel_time = num_range_scale(accel_time, 0, __MAX_ACCEL_TIME)
decel_time = num_range_scale(decel_time, 0, __MAX_DECEL_TIME)
neurons_request("m_starter_shield", "car_spd_mode_turn_left", (speed, run_time, accel_time, decel_time), device_index)
time.sleep(run_time * __MS_TO_SECOND_FACTOR)
def car_spd_mode_turn_right(speed = __DEFAULT_SPEED, run_time = 0, accel_time = 1, decel_time = 1, device_index = 1):
if not isinstance(speed, (int, float)):
return
if not isinstance(run_time, (int)):
return
if not isinstance(accel_time, (int)):
return
if not isinstance(decel_time, (int)):
return
speed = num_range_scale(speed, -__MAX_SPEED, __MAX_SPEED)
run_time = num_range_scale(run_time, 0, __MAX_RUN_TIME)
accel_time = num_range_scale(accel_time, 0, __MAX_ACCEL_TIME)
decel_time = num_range_scale(decel_time, 0, __MAX_DECEL_TIME)
neurons_request("m_starter_shield", "car_spd_mode_turn_right", (speed, run_time, accel_time, decel_time), device_index)
time.sleep(run_time * __MS_TO_SECOND_FACTOR)
def car_spd_mode_apiece(left_speed = __DEFAULT_SPEED, right_speed = __DEFAULT_SPEED, run_time = 0, accel_time = 1, decel_time = 1, device_index = 1):
if not isinstance(left_speed, (int, float)):
return
if not isinstance(right_speed, (int, float)):
return
if not isinstance(run_time, (int)):
return
if not isinstance(accel_time, (int)):
return
if not isinstance(decel_time, (int)):
return
left_speed = num_range_scale(left_speed, -__MAX_SPEED, __MAX_SPEED)
right_speed = num_range_scale(right_speed, -__MAX_SPEED, __MAX_SPEED)
run_time = num_range_scale(run_time, 0, __MAX_RUN_TIME)
accel_time = num_range_scale(accel_time, 0, __MAX_ACCEL_TIME)
decel_time = num_range_scale(decel_time, 0, __MAX_DECEL_TIME)
neurons_request("m_starter_shield", "car_spd_mode_apiece", (left_speed, right_speed, run_time, accel_time, decel_time), device_index)
time.sleep(run_time * __MS_TO_SECOND_FACTOR)
def car_pos_mode_straight(wheel_angle = __DEFAULT_ANGLE, run_time = 0, accel_time = 1, decel_time = 1, device_index = 1):
if not isinstance(wheel_angle, (int)):
return
if not isinstance(run_time, (int)):
return
if not isinstance(accel_time, (int)):
return
if not isinstance(decel_time, (int)):
return
wheel_angle = num_range_scale(wheel_angle, -__MAX_ANGLE, __MAX_ANGLE)
run_time = num_range_scale(run_time, 0, __MAX_RUN_TIME)
accel_time = num_range_scale(accel_time, 0, __MAX_ACCEL_TIME)
decel_time = num_range_scale(decel_time, 0, __MAX_DECEL_TIME)
neurons_request("m_starter_shield", "car_pos_mode_straight", (wheel_angle, run_time, accel_time, decel_time), device_index)
if (run_time == 0):
run_time = wheel_angle * __MM_TO_MS_FACTOR + 50
time.sleep(run_time * __MS_TO_SECOND_FACTOR + __DEFAULT_DELAY_COMPENSATION)
def car_pos_mode_wheel_clockwise(wheel_angle = __DEFAULT_ANGLE, run_time = 0, accel_time = 1, decel_time = 1, device_index = 1):
if not isinstance(wheel_angle, (int)):
return
if not isinstance(run_time, (int)):
return
if not isinstance(accel_time, (int)):
return
if not isinstance(decel_time, (int)):
return
wheel_angle = num_range_scale(wheel_angle, -__MAX_ANGLE, __MAX_ANGLE)
run_time = num_range_scale(run_time, 0, __MAX_RUN_TIME)
accel_time = num_range_scale(accel_time, 0, __MAX_ACCEL_TIME)
decel_time = num_range_scale(decel_time, 0, __MAX_DECEL_TIME)
neurons_request("m_starter_shield", "car_pos_mode_wheel_clockwise", (wheel_angle, run_time, accel_time, decel_time), device_index)
time.sleep(run_time * __MS_TO_SECOND_FACTOR + __DEFAULT_DELAY_COMPENSATION)
def car_pos_mode_wheel_anticlockwise(angle = __DEFAULT_ANGLE, run_time = 0, accel_time = 1, decel_time = 1, device_index = 1):
if isinstance(angle, (int)):
angle = -angle
car_pos_mode_wheel_clockwise(angle, run_time, accel_time, decel_time, device_index)
def car_stop(decel_time = 1, device_index = 1):
if not isinstance(decel_time, (int)):
return
decel_time = num_range_scale(decel_time, 0, __MAX_DECEL_TIME)
neurons_request("m_starter_shield", "car_stop", (decel_time), device_index)
##############################################################################################################
######################### car motion API ##################################################################
##############################################################################################################
##############################################################################################################
######################### encoder motor API ##############################################################
##############################################################################################################
def encoder_motor_set_power(motor_index, power, device_index = 1):
if not isinstance(motor_index, (int, float)):
return
if not isinstance(power, (int, float)):
return
if not isinstance(device_index, (int, float)):
return
motor_index = num_range_scale(motor_index, 0, 127)
power = num_range_scale(power, -100, 100)
neurons_request("m_starter_shield", "encoder_motor_set_power", (motor_index, power), device_index)
def encoder_motor_set_power_both(power_1, power_2, device_index = 1):
if not isinstance(power_1, (int, float)):
return
if not isinstance(power_2, (int, float)):
return
if not isinstance(device_index, (int, float)):
return
power_1 = num_range_scale(power_1, -100, 100)
power_2 = num_range_scale(power_2, -100, 100)
neurons_request("m_starter_shield", "encoder_motor_set_power_both", (power_1,power_2), device_index)
def encoder_motor_set_speed(motor_index, speed, device_index = 1):
if not isinstance(motor_index, (int, float)):
return
if not isinstance(speed, (int, float)):
return
if not isinstance(device_index, (int, float)):
return
motor_index = num_range_scale(motor_index, 0, 127)
speed = num_range_scale(speed, -__MAX_SPEED, __MAX_SPEED)
neurons_request("m_starter_shield", "encoder_motor_set_speed", (motor_index, speed), device_index)
def encoder_motor_set_angle(port = 0, angle = __DEFAULT_ANGLE, run_time = 0, accel_time = 1, decel_time = 1, device_index = 1):
if not isinstance(angle, (int)):
return
if not isinstance(run_time, (int)):
return
if not isinstance(accel_time, (int)):
return
if not isinstance(decel_time, (int)):
return
port = num_range_scale(port, 0, 2)
angle = num_range_scale(angle, -__MAX_ANGLE, __MAX_ANGLE)
run_time = num_range_scale(run_time, 0, __MAX_RUN_TIME)
accel_time = num_range_scale(accel_time, 0, __MAX_ACCEL_TIME)
decel_time = num_range_scale(decel_time, 0, __MAX_DECEL_TIME)
neurons_request("m_starter_shield", "encoder_motor_set_angle", (port, angle, run_time, accel_time, decel_time), device_index)
time.sleep(run_time * __MS_TO_SECOND_FACTOR + __DEFAULT_DELAY_COMPENSATION)
def encoder_motor_release(motor_index = 1, device_index = 1):
if not isinstance(motor_index, (int, float)):
return
if not isinstance(device_index, (int, float)):
return
motor_index = num_range_scale(motor_index, 0, 127)
neurons_request("m_starter_shield", "encoder_motor_release",(motor_index), device_index)
def encoder_motor_stop(motor_index = 1, device_index = 1):
encoder_motor_release(motor_index, device_index)
def encoder_motor_lock(motor_index = 1, lock_state = 0, device_index = 1):
if not isinstance(motor_index, (int, float)):
return
if not isinstance(device_index, (int, float)):
return
if not isinstance(lock_state, (int, float)):
return
motor_index = num_range_scale(motor_index, 0, 127)
lock_state = num_range_scale(lock_state, 0, 1)
neurons_request("m_starter_shield", "encoder_motor_lock",(motor_index, lock_state), device_index)
def encoder_motor_lock_power(motor_index = 1, lock_power = 30, device_index = 1):
if not isinstance(motor_index, (int, float)):
return
if not isinstance(device_index, (int, float)):
return
if not isinstance(lock_power, (int, float)):
return
motor_index = num_range_scale(motor_index, 0, 127)
lock_power = num_range_scale(lock_power, 0, 127)
neurons_request("m_starter_shield", "encoder_motor_lock_power",(motor_index, lock_power), device_index)
def encoder_motor_reset_position(motor_index, device_index = 1):
if not isinstance(motor_index, (int, float)):
return
if not isinstance(device_index, (int, float)):
return
motor_index = num_range_scale(motor_index, 0, 127)
neurons_request("m_starter_shield", "encoder_motor_reset_position", (motor_index), device_index)
def encoder_motor_get_positon(motor_index = 1, device_index = 1):
if not isinstance(motor_index, (int, float)):
return -1
if not isinstance(device_index, (int, float)):
return -1
if motor_index not in [1, 2]:
return -1
ret = neurons_blocking_read("m_starter_shield", "encoder_motor_get_positon", (motor_index), device_index)
if ret:
return ret[1]
else:
return 0
def encoder_motor_get_speed(motor_index = 1, device_index = 1):
if not isinstance(motor_index, (int, float)):
return -1
if not isinstance(device_index, (int, float)):
return -1
if motor_index not in [1, 2]:
return -1
ret = neurons_blocking_read("m_starter_shield", "encoder_motor_get_speed", (motor_index), device_index)
if ret:
return ret[1]
else:
return 0
def encoder_motor_get_power(motor_index = 1, device_index = 1):
if not isinstance(motor_index, (int, float)):
return -1
if not isinstance(device_index, (int, float)):
return -1
if motor_index not in [1, 2]:
return -1
ret = neurons_blocking_read("m_starter_shield", "encoder_motor_get_power", (motor_index), device_index)
if ret:
return ret[1]
else:
return 0
##############################################################################################################
######################### encoder motor API ##############################################################
##############################################################################################################
##############################################################################################################
######################### DC motor API ###################################################################
##############################################################################################################
def dc_motor_set_power(motor_index, power, device_index = 1):
if not isinstance(motor_index, (int, float)):
return
if not isinstance(power, (int, float)):
return
if not isinstance(device_index, (int, float)):
return
motor_index = num_range_scale(motor_index, 0, 127)
power = num_range_scale(power, -100, 100)
neurons_request("m_starter_shield", "dc_motor_set_power", (motor_index, power), device_index)
def dc_motor_set_power_both(power_1, power_2, device_index = 1):
if not isinstance(power_1, (int, float)):
return
if not isinstance(power_2, (int, float)):
return
if not isinstance(device_index, (int, float)):
return
power_1 = num_range_scale(power_1, -100, 100)
power_2 = num_range_scale(power_2, -100, 100)
neurons_request("m_starter_shield", "dc_motor_set_power_both", (power_1,power_2), device_index)
def dc_motor_change_power(motor_index, power, device_index = 1):
if not isinstance(motor_index, (int, float)):
return
if not isinstance(power, (int, float)):
return
if not isinstance(device_index, (int, float)):
return
motor_index = num_range_scale(motor_index, 0, 127)
power = num_range_scale(power, -200, 200)
neurons_request("m_starter_shield", "dc_motor_change_power", (motor_index, power), device_index)
def dc_motor_stop(motor_index = 1, device_index = 1):
if not isinstance(motor_index, (int, float)):
return
if not isinstance(device_index, (int, float)):
return
motor_index = num_range_scale(motor_index, 0, 127)
power = 0
neurons_request("m_starter_shield", "dc_motor_stop",(motor_index), device_index)
def dc_motor_get_power(motor_index = 1, device_index = 1):
if not isinstance(motor_index, (int, float)):
return -1
if not isinstance(device_index, (int, float)):
return -1
if motor_index not in [1, 2]:
return -1
ret = neurons_blocking_read("m_starter_shield", "dc_motor_get_power", (motor_index), device_index)
if ret:
return ret[1]
else:
return 0
##############################################################################################################
######################### DC motor API ###################################################################
##############################################################################################################
##############################################################################################################
######################### AC servo API ###################################################################
##############################################################################################################
__INQUIRE_REPORT_MODE = 0x00
__CHANGE_REPORT_MODE = 0x01
__PERIOD_REPORT_MODE = 0x02
def servo_set_angle(servo_index, angle, device_index = 1):
if not isinstance(servo_index, (int, float)):
return
if not isinstance(device_index, (int, float)):
return
if not isinstance(angle, (int, float)):
return
servo_index = num_range_scale(servo_index, 0, 127)
angle = num_range_scale(angle, 0, 180)
neurons_request("m_starter_shield", "servo_set_angle", (servo_index, angle), device_index)
def servo_set_angle_all(angle_1, angle_2, angle_3, angle_4, device_index = 1):
if not isinstance(device_index, (int, float)):
return
if not isinstance(angle_1, (int, float)):
return
if not isinstance(angle_2, (int, float)):
return
if not isinstance(angle_3, (int, float)):
return
if not isinstance(angle_4, (int, float)):
return
angle_1 = num_range_scale(angle_1, 0, 180)
angle_2 = num_range_scale(angle_2, 0, 180)
angle_3 = num_range_scale(angle_3, 0, 180)
angle_4 = num_range_scale(angle_4, 0, 180)
neurons_request("m_starter_shield", "servo_set_angle_all", (angle_1, angle_2, angle_3, angle_4), device_index)
def servo_change_angle(servo_index, angle, device_index = 1):
if not isinstance(servo_index, (int, float)):
return
if not isinstance(device_index, (int, float)):
return
if not isinstance(angle, (int, float)):
return
servo_index = num_range_scale(servo_index, 0, 127)
angle = num_range_scale(angle, -180, 180)
neurons_request("m_starter_shield", "servo_change_angle", (servo_index, angle), device_index)
def servo_change_angle_all(angle_1, angle_2, angle_3, angle_4, device_index = 1):
if not isinstance(device_index, (int, float)):
return
if not isinstance(angle_1, (int, float)):
return
if not isinstance(angle_2, (int, float)):
return
if not isinstance(angle_3, (int, float)):
return
if not isinstance(angle_4, (int, float)):
return
angle_1 = num_range_scale(angle_1, -180, 180)
angle_2 = num_range_scale(angle_2, -180, 180)
angle_3 = num_range_scale(angle_3, -180, 180)
angle_4 = num_range_scale(angle_4, -180, 180)
neurons_request("m_starter_shield", "servo_change_angle_all", (angle_1, angle_2, angle_3, angle_4), device_index)
def servo_release(servo_index = 1, device_index = 1):
if not isinstance(servo_index, (int, float)):
return
if not isinstance(device_index, (int, float)):
return
servo_index = num_range_scale(servo_index, 0, 127)
neurons_request("m_starter_shield", "servo_release", (servo_index), device_index)
def servo_set_pulse_width(servo_index, pulse_width, device_index = 1):
if not isinstance(servo_index, (int, float)):
return
if not isinstance(device_index, (int, float)):
return
if not isinstance(pulse_width, (int, float)):
return
servo_index = num_range_scale(servo_index, 0, 127)
pulse_width = num_range_scale(pulse_width, 0, 16383)
neurons_request("m_starter_shield", "servo_set_pulse_width", (servo_index, pulse_width), device_index)
def servo_get_angle(servo_index = 1, device_index = 1):
if not isinstance(servo_index, (int, float)):
return -1
if not isinstance(device_index, (int, float)):
return -1
if servo_index not in [1, 2, 3, 4]:
return -1
value = neurons_blocking_read("m_starter_shield", "servo_get_angle", (servo_index), device_index)
if value != None:
return value[1]
else:
return 0
def servo_get_angle_all(device_index = 1):
if not isinstance(device_index, (int, float)):
return -1
value = neurons_blocking_read("m_starter_shield", "servo_get_angle_all", (), device_index)
if value != None:
return value
else:
return [0,0,0,0]
def servo_get_load(servo_index = 1, device_index = 1):
if not isinstance(servo_index, (int, float)):
return -1
if not isinstance(device_index, (int, float)):
return -1
if servo_index not in [1, 2]:
return -1
value = neurons_blocking_read("m_starter_shield", "servo_get_load", (servo_index), device_index)
if value != None:
return value[0]
else:
return 0
def servo_reset(servo_index = 1, device_index = 1):
if not isinstance(servo_index, (int, float)):
return
if not isinstance(device_index, (int, float)):
return
neurons_request("m_starter_shield", "servo_reset", (servo_index), device_index)
def servo_set_report_mode(servo_index, mode, timestamp, device_index = 1):
if not isinstance(servo_index, (int, float)):
return
if not isinstance(timestamp, (int, float)):
return
timestamp = num_range_scale(timestamp, 10, None)
if mode == __INQUIRE_REPORT_MODE or mode == __CHANGE_REPORT_MODE or mode == __PERIOD_REPORT_MODE:
neurons_request("m_starter_shield", "servo_set_report_mode", (servo_index, mode, timestamp), device_index)
else:
return
##############################################################################################################
######################### AC servo API ###################################################################
##############################################################################################################
##############################################################################################################
######################### LED strip API ###################################################################
##############################################################################################################
def led_strip_set_single(strip_index, led_index, red_value, green_value, blue_value, device_index = 1):
if not isinstance(strip_index, (int, float)):
return
if not isinstance(led_index, (int, float)):
return
if not isinstance(red_value, (int, float)):
return
if not isinstance(green_value, (int, float)):
return
if not isinstance(blue_value, (int, float)):
return
if not isinstance(device_index, (int, float)):
return
strip_index = num_range_scale(strip_index, 0, 127)
led_index = num_range_scale(led_index, 0, 127)
red_value = num_range_scale(red_value, 0, 255)
green_value = num_range_scale(green_value, 0, 255)
blue_value = num_range_scale(blue_value, 0, 255)
neurons_request("m_starter_shield", "led_strip_set_single", (strip_index, led_index, red_value, green_value, blue_value), device_index)
def led_strip_set_all(strip_index, red_value, green_value, blue_value, device_index = 1):
led_strip_set_single(strip_index, 00, red_value, green_value, blue_value, device_index)
def led_strip_off_all(strip_index, device_index = 1):
if not isinstance(strip_index, (int, float)):
return
if not isinstance(device_index, (int, float)):
return
strip_index = num_range_scale(strip_index, 0, 127)
neurons_request("m_starter_shield", "led_strip_off_all", (strip_index), device_index)
def led_strip_set_red(strip_index, led_index, value, device_index = 1):
if not isinstance(strip_index, (int, float)):
return
if not isinstance(device_index, (int, float)):
return
if not (isinstance(led_index, (int, float)) or (led_index == "all")):
return
if not isinstance(value, (int, float)):
return
strip_index = num_range_scale(strip_index, 0, 127)
if led_index == "all":
led_index = 0
else:
led_index = num_range_scale(led_index, 0, 127)
value = num_range_scale(value, 0, 255)
neurons_request("m_starter_shield", "led_strip_set_red", (strip_index, led_index, value), device_index)
def led_strip_set_green(strip_index, led_index, value, device_index = 1):
if not isinstance(strip_index, (int, float)):
return
if not isinstance(device_index, (int, float)):
return
if not (isinstance(led_index, (int, float)) or (led_index == "all")):
return
if not isinstance(value, (int, float)):
return
strip_index = num_range_scale(strip_index, 0, 127)
if led_index == "all":
led_index = 0
else:
led_index = num_range_scale(led_index, 0, 127)
value = num_range_scale(value, 0, 255)
neurons_request("m_starter_shield", "led_strip_set_green", (strip_index, led_index, value), device_index)
def led_strip_set_blue(strip_index, led_index, value, device_index = 1):
if not isinstance(strip_index, (int, float)):
return
if not isinstance(device_index, (int, float)):
return
if not (isinstance(led_index, (int, float)) or (led_index == "all")):
return
if not isinstance(value, (int, float)):
return
strip_index = num_range_scale(strip_index, 0, 127)
if led_index == "all":
led_index = 0
else:
led_index = num_range_scale(led_index, 0, 127)
value = num_range_scale(value, 0, 255)
neurons_request("m_starter_shield", "led_strip_set_blue", (strip_index, led_index, value), device_index)
def led_strip_change_red(strip_index, led_index, value, device_index = 1):
if not isinstance(strip_index, (int, float)):
return
if not isinstance(device_index, (int, float)):
return
if not (isinstance(led_index, (int, float)) or (led_index == "all")):
return
if not isinstance(value, (int, float)):
return
strip_index = num_range_scale(strip_index, 0, 127)
if led_index == "all":
led_index = 0
else:
led_index = num_range_scale(led_index, 0, 127)
value = num_range_scale(value, -255, 255)
neurons_request("m_starter_shield", "led_strip_change_red", (strip_index, led_index, value), device_index)
def led_strip_change_green(strip_index, led_index, value, device_index = 1):
if not isinstance(strip_index, (int, float)):
return
if not isinstance(device_index, (int, float)):
return
if not (isinstance(led_index, (int, float)) or (led_index == "all")):
return
if not isinstance(value, (int, float)):
return
strip_index = num_range_scale(strip_index, 0, 127)
if led_index == "all":
led_index = 0
else:
led_index = num_range_scale(led_index, 0, 127)
value = num_range_scale(value, -255, 255)
neurons_request("m_starter_shield", "led_strip_change_green", (strip_index, led_index, value), device_index)
def led_strip_change_blue(strip_index, led_index, value, device_index = 1):
if not isinstance(strip_index, (int, float)):
return
if not isinstance(device_index, (int, float)):
return
if not (isinstance(led_index, (int, float)) or (led_index == "all")):
return
if not isinstance(value, (int, float)):
return
strip_index = num_range_scale(strip_index, 0, 127)
if led_index == "all":
led_index = 0
else:
led_index = num_range_scale(led_index, 0, 127)
value = num_range_scale(value, -255, 255)
neurons_request("m_starter_shield", "led_strip_change_blue", (strip_index, led_index, value), device_index)
def led_strip_set_mode(strip_index, mode, device_index =1):
if not isinstance(strip_index, (int, float)):
return
if not isinstance(device_index, (int, float)):
return
if not isinstance(mode, (str,)):
return
strip_index = num_range_scale(strip_index, 0, 127)
if mode == "static":
mode = 0x00
elif mode == "marquee":
mode = 0x03
elif mode == "breathe":
mode = 0x04
else:
return
neurons_request("m_starter_shield", "led_strip_set_mode", (strip_index, mode), device_index)
def led_strip_set_block(strip_index, led_num, data, device_index = 1):
if not isinstance(strip_index, (int, float)):
return
if not isinstance(device_index, (int, float)):
return
if not isinstance(led_num, (int, float)):
return
if not isinstance(data, (list, tuple)):
return
strip_index = num_range_scale(strip_index, 0, 127)
list_data = list()
if led_num <= 0:
return
elif led_num < len(data):
list_data.extend(data[0 : led_num])
else:
list_data.extend(data)
neurons_request("m_starter_shield", "led_strip_set_block", (strip_index, list_data), device_index)
def led_strip_set_brightness(strip_index = 1, brightness = 30, device_index = 1):
if not isinstance(strip_index, (int, float)):
return
if not isinstance(device_index, (int, float)):
return
if not isinstance(brightness, (int, float)):
return
strip_index = num_range_scale(strip_index, 0, 127)
brightness = num_range_scale(brightness, 0, 100)
neurons_request("m_starter_shield", "led_strip_set_brightness", (strip_index, brightness), device_index)
def led_strip_change_brightness(strip_index = 1, brightness = 50, device_index = 1):
if not isinstance(strip_index, (int, float)):
return
if not isinstance(device_index, (int, float)):
return
if not isinstance(brightness, (int, float)):
return
strip_index = num_range_scale(strip_index, 0, 127)
brightness = num_range_scale(brightness, -100, 100)
neurons_request("m_starter_shield", "led_strip_change_brightness", (strip_index, brightness), device_index)
def led_strip_get_brightness(strip_index = 1, device_index = 1):
if not isinstance(strip_index, (int, float)):
return -1
if not isinstance(device_index, (int, float)):
return -1
if strip_index not in [1, 2]:
return -1
ret = neurons_blocking_read("m_starter_shield", "led_strip_get_brightness", (strip_index), device_index)
if ret:
return ret[1]
else:
return -1
def led_strip_set_move(strip_index = 1, move_step = 1, move_cycle = 1, device_index = 1): ## led_move(step, cycle, port)
if not isinstance(strip_index, (int, float)):
return
if not isinstance(device_index, (int, float)):
return
if not isinstance(move_step, (int, float)):
return
if not isinstance(move_cycle, (int, float)):
return
strip_index = num_range_scale(strip_index, 0, 127)
move_step = num_range_scale(move_step, -128, 127)
move_cycle = num_range_scale(move_cycle, 0, 127)
neurons_request("m_starter_shield", "led_strip_set_move", (strip_index, move_step,move_cycle), device_index)
'''
def led_strip_set_effect(strip_index, mode, speed, data, device_index = 1):
if not isinstance(strip_index, (int, float)):
return
if not isinstance(device_index, (int, float)):
return
if not isinstance(speed, (int, float)):
return
if not isinstance(data, (list, tuple)):
return
if mode == "static" or mode == "steady":
mode = 0x00
elif mode == "marquee":
mode = 0x03
elif mode == "breathe":
mode = 0x04
else:
return
speed = num_range_scale(speed, 0, 8)
list_data = list()
list_data.append(speed)
list_data.extend(data)
neurons_request("m_starter_shield", "led_strip_set_mode", (strip_index, mode), device_index)
neurons_request("m_starter_shield", "led_strip_set_block", (strip_index, data), device_index)
def led_strip_show(strip_index, color, device_index = 1):
if not isinstance(strip_index, (int, float)):
return
if not isinstance(device_index, (int, float)):
return
if not isinstance(color, (list, tuple)):
return
for i in range(len(color)):
if isinstance(color[i], str) and (color[i] in mbuild_color_table):
color[i] = mbuild_color_table[color[i]]
elif isinstance(color[i], (int, float)):
pass
else:
color[i] = 0
neurons_request("m_starter_shield", "led_strip_set_block", (strip_index, color), device_index)
'''
##############################################################################################################
######################### LED strip API ###################################################################
##############################################################################################################
##############################################################################################################
######################### power management API ############################################################
##############################################################################################################
def power_get_battery_voltag(device_index = 1):
if not isinstance(device_index, (int, float)):
return 0
ret = neurons_blocking_read("m_starter_shield", "power_get_battery_voltag", (), device_index)
if ret:
return ret[0]
else:
return 0
def power_set_battery_voltag_mode(mode, period, device_index = 1):
if not isinstance(device_index, (int, float)):
return 0
if not isinstance(mode, (int, float)):
return 0
if not isinstance(period, (int, float)):
return 0
neurons_request("m_starter_shield", "power_set_battery_voltag_mode", (mode, period), device_index)
def power_get_usb_state(device_index = 1):
if not isinstance(device_index, (int, float)):
return 0
ret = neurons_blocking_read("m_starter_shield", "power_get_usb_state", (), device_index)
if ret:
return ret[0]
else:
return 0
def power_set_usb_state_mode(mode, period, device_index = 1):
if not isinstance(device_index, (int, float)):
return 0
if not isinstance(mode, (int, float)):
return 0
if not isinstance(period, (int, float)):
return 0
neurons_request("m_starter_shield", "power_set_usb_state_mode", (mode, period), device_index)
def power_get_switch_state(device_index = 1):
if not isinstance(device_index, (int, float)):
return 0
ret = neurons_blocking_read("m_starter_shield", "power_get_switch_state", (), device_index)
if ret:
return ret[0]
else:
return 0
def power_set_switch_state_mode(mode, period, device_index = 1):
if not isinstance(device_index, (int, float)):
return 0
if not isinstance(mode, (int, float)):
return 0
if not isinstance(period, (int, float)):
return 0
neurons_request("m_starter_shield", "power_set_switch_state_mode", (mode, period), device_index)
def power_get_charge_state(device_index = 1):
if not isinstance(device_index, (int, float)):
return 0
ret = neurons_blocking_read("m_starter_shield", "power_get_charge_state", (), device_index)
if ret:
return ret[0]
else:
return 0
def power_set_charge_state_mode(mode, period, device_index = 1):
if not isinstance(device_index, (int, float)):
return 0
if not isinstance(mode, (int, float)):
return 0
if not isinstance(period, (int, float)):
return 0
neurons_request("m_starter_shield", "power_set_charge_state_mode", (mode, period), device_index)
def power_get_battery_level(device_index = 1):
if not isinstance(device_index, (int, float)):
return 0
ret = neurons_blocking_read("m_starter_shield", "power_get_battery_level", (), device_index)
if ret:
return ret[0]
else:
return 0
def power_set_battery_level_mode(mode, period, device_index = 1):
if not isinstance(device_index, (int, float)):
return 0
if not isinstance(mode, (int, float)):
return 0
if not isinstance(period, (int, float)):
return 0
neurons_request("m_starter_shield", "power_set_battery_level_mode", (mode, period), device_index)
def power_get_power_all_state(device_index = 1):
if not isinstance(device_index, (int, float)):
return 0
ret = neurons_blocking_read("m_starter_shield", "power_get_power_all_state", (), device_index)
if ret:
return ret
else:
return 0
def power_set_power_all_state_mode(mode, period, device_index = 1):
if not isinstance(device_index, (int, float)):
return 0
if not isinstance(mode, (int, float)):
return 0
if not isinstance(period, (int, float)):
return 0
neurons_request("m_starter_shield", "power_set_power_all_state_mode", (mode, period), device_index)
def power_respond_power_all_state(device_index = 1):
if not isinstance(device_index, (int, float)):
return False
ret = neurons_async_read("m_starter_shield", "power_get_power_all_state", (), device_index)
if ret:
return ret
else:
return False
##############################################################################################################
######################### power management API ############################################################
##############################################################################################################
##############################################################################################################
######################### multifunction port API ##########################################################
##############################################################################################################
def multifunction_digital_write(port, value, device_index =1):
if not isinstance(port, (int, float)):
return
if not isinstance(device_index, (int, float)):
return
if not isinstance(value, (int, float)):
return
port = num_range_scale(port, 0, 127)
value = num_range_scale(value, 0, 1)
neurons_request("m_starter_shield", "multifunction_digital_write", (port, value), device_index)
def multifunction_digital_read(port, device_index =1):
if not isinstance(port, (int, float)):
return -1
if not isinstance(device_index, (int, float)):
return -1
if port not in [1, 2]:
return -1
ret = neurons_blocking_read("m_starter_shield", "multifunction_digital_read", (port), device_index)
if ret:
return ret[1]
else:
return -1
def multifunction_analog_read(port, device_index =1):
if not isinstance(port, (int, float)):
return -1
if not isinstance(device_index, (int, float)):
return -1
if port not in [1, 2]:
return -1
ret = neurons_blocking_read("m_starter_shield", "multifunction_analog_read", (port), device_index)
if ret:
return ret[1]
else:
return -1
def multifunction_pwm_set(port, duty, frequency, device_index =1):
if not isinstance(port, (int, float)):
return
if not isinstance(device_index, (int, float)):
return
if not isinstance(duty, (int, float)):
return
if not isinstance(frequency, (int, float)):
return
port = num_range_scale(port, 0, 127)
duty = num_range_scale(duty, 0, 100)
frequency = num_range_scale(frequency, 0, 2000)
neurons_request("m_starter_shield", "multifunction_pwm_set", (port, duty, frequency), device_index)
##############################################################################################################
######################### multifunction port API ##########################################################
##############################################################################################################
| 40.762045
| 149
| 0.602123
| 5,214
| 41,455
| 4.4229
| 0.036057
| 0.095876
| 0.127488
| 0.097437
| 0.915485
| 0.897836
| 0.865444
| 0.83626
| 0.804735
| 0.762586
| 0
| 0.023931
| 0.202653
| 41,455
| 1,016
| 150
| 40.802165
| 0.673625
| 0.020215
| 0
| 0.707851
| 0
| 0
| 0.072471
| 0.028362
| 0
| 0
| 0.00069
| 0
| 0
| 1
| 0.088803
| false
| 0
| 0.003861
| 0
| 0.395109
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
a685213c4c54d14d92b3365d02fdca203076981e
| 154
|
py
|
Python
|
dev/app/admin.py
|
johncmacy/excel-tracker-to-django
|
12e65dc92f4f18b64f43442f61a38fa0b2e8ce95
|
[
"MIT"
] | null | null | null |
dev/app/admin.py
|
johncmacy/excel-tracker-to-django
|
12e65dc92f4f18b64f43442f61a38fa0b2e8ce95
|
[
"MIT"
] | 3
|
2021-09-20T12:48:15.000Z
|
2021-10-01T15:43:51.000Z
|
dev/app/admin.py
|
johncmacy/excel-tracker-to-django
|
12e65dc92f4f18b64f43442f61a38fa0b2e8ce95
|
[
"MIT"
] | null | null | null |
# Created by django-from-excel at 2021-09-20 08:24:05.503356
from django.contrib import admin
from .models import *
admin.site.register(ConvertedModel)
| 154
| 154
| 0.785714
| 25
| 154
| 4.84
| 0.8
| 0.181818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.147059
| 0.116883
| 154
| 1
| 154
| 154
| 0.742647
| 0.980519
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
471093040caa7381082a19feeb7dcdd8d00814df
| 118
|
py
|
Python
|
tests/_base2.py
|
garywu/pipedream
|
d89a4031d5ee78c05c6845341607a59528f0bd75
|
[
"BSD-3-Clause"
] | 8
|
2018-02-21T04:13:25.000Z
|
2020-04-24T20:05:47.000Z
|
tests/_base2.py
|
garywu/pipedream
|
d89a4031d5ee78c05c6845341607a59528f0bd75
|
[
"BSD-3-Clause"
] | 1
|
2019-05-13T13:14:32.000Z
|
2019-05-13T13:14:32.000Z
|
tests/_base2.py
|
garywu/pypedream
|
d89a4031d5ee78c05c6845341607a59528f0bd75
|
[
"BSD-3-Clause"
] | null | null | null |
import unittest_rand_gen_state
class RandStateSaverBase(object):
__metaclass__ = unittest_rand_gen_state.Saver
| 16.857143
| 49
| 0.838983
| 14
| 118
| 6.357143
| 0.714286
| 0.269663
| 0.337079
| 0.449438
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.118644
| 118
| 6
| 50
| 19.666667
| 0.855769
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
5b30b8b09b5f58be39d2655f4b7e3bcc5d3125c3
| 197
|
py
|
Python
|
TCConfig/fields/__init__.py
|
nightfire2/BiomeEdit
|
e009eb5ecdca063abfbb1a708495176ac5f7defc
|
[
"FSFAP"
] | 1
|
2016-02-08T16:10:53.000Z
|
2016-02-08T16:10:53.000Z
|
TCConfig/fields/__init__.py
|
nightfire2/BiomeEdit
|
e009eb5ecdca063abfbb1a708495176ac5f7defc
|
[
"FSFAP"
] | null | null | null |
TCConfig/fields/__init__.py
|
nightfire2/BiomeEdit
|
e009eb5ecdca063abfbb1a708495176ac5f7defc
|
[
"FSFAP"
] | null | null | null |
from ConfigFieldAttribute import ConfigFieldAttribute
from ConfigFieldAttribute import ConfigFieldText
from ConfigFieldFunc import ConfigFieldFunc
from ConfigFieldUnknown import ConfigFieldUnknown
| 39.4
| 53
| 0.918782
| 16
| 197
| 11.3125
| 0.375
| 0.265193
| 0.331492
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.081218
| 197
| 4
| 54
| 49.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
5b46f3d2e241fcbf93edb1a28442185eb10c400f
| 41
|
py
|
Python
|
gee_gateway/web/__init__.py
|
jdilger/gee-gateway
|
2e4d11f01e785359e6f213fe7d647032f1212b76
|
[
"MIT"
] | 5
|
2016-12-08T15:40:27.000Z
|
2021-09-10T16:01:36.000Z
|
gee_gateway/web/__init__.py
|
jdilger/gee-gateway
|
2e4d11f01e785359e6f213fe7d647032f1212b76
|
[
"MIT"
] | 1
|
2020-02-03T22:11:09.000Z
|
2020-02-03T22:11:09.000Z
|
gee_gateway/web/__init__.py
|
jdilger/gee-gateway
|
2e4d11f01e785359e6f213fe7d647032f1212b76
|
[
"MIT"
] | 8
|
2017-05-12T20:49:38.000Z
|
2020-05-14T19:22:21.000Z
|
from . import routes
from . import errors
| 20.5
| 20
| 0.780488
| 6
| 41
| 5.333333
| 0.666667
| 0.625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.170732
| 41
| 2
| 21
| 20.5
| 0.941176
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
5b6c27edfc853310c86450ac4d88ab7595608441
| 15,993
|
py
|
Python
|
pybind/slxos/v16r_1_00b/brocade_mpls_rpc/clear_mpls_statistics_ldp_transit/input/__init__.py
|
shivharis/pybind
|
4e1c6d54b9fd722ccec25546ba2413d79ce337e6
|
[
"Apache-2.0"
] | null | null | null |
pybind/slxos/v16r_1_00b/brocade_mpls_rpc/clear_mpls_statistics_ldp_transit/input/__init__.py
|
shivharis/pybind
|
4e1c6d54b9fd722ccec25546ba2413d79ce337e6
|
[
"Apache-2.0"
] | null | null | null |
pybind/slxos/v16r_1_00b/brocade_mpls_rpc/clear_mpls_statistics_ldp_transit/input/__init__.py
|
shivharis/pybind
|
4e1c6d54b9fd722ccec25546ba2413d79ce337e6
|
[
"Apache-2.0"
] | 1
|
2021-11-05T22:15:42.000Z
|
2021-11-05T22:15:42.000Z
|
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
class input(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-mpls - based on the path /brocade_mpls_rpc/clear-mpls-statistics-ldp-transit/input. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__clear_statistics_ldp_transit_fec_prefix','__clear_statistics_ldp_transit_fec_prefix_address','__clear_statistics_ldp_transit_fec_prefix_mask',)
_yang_name = 'input'
_rest_name = 'input'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__clear_statistics_ldp_transit_fec_prefix_mask = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="clear-statistics-ldp-transit-fec-prefix-mask", rest_name="clear-statistics-ldp-transit-fec-prefix-mask", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='mpls-ipv4-subnet-mask', is_config=True)
self.__clear_statistics_ldp_transit_fec_prefix_address = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="clear-statistics-ldp-transit-fec-prefix-address", rest_name="clear-statistics-ldp-transit-fec-prefix-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='mpls-ipv4-address', is_config=True)
self.__clear_statistics_ldp_transit_fec_prefix = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/(([0-9])|([1-2][0-9])|(3[0-2]))'}), is_leaf=True, yang_name="clear-statistics-ldp-transit-fec-prefix", rest_name="clear-statistics-ldp-transit-fec-prefix", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='mpls-ipv4-prefix', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'brocade_mpls_rpc', u'clear-mpls-statistics-ldp-transit', u'input']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'clear-mpls-statistics-ldp-transit', u'input']
def _get_clear_statistics_ldp_transit_fec_prefix(self):
"""
Getter method for clear_statistics_ldp_transit_fec_prefix, mapped from YANG variable /brocade_mpls_rpc/clear_mpls_statistics_ldp_transit/input/clear_statistics_ldp_transit_fec_prefix (mpls-ipv4-prefix)
YANG Description: Transit fec prefix
"""
return self.__clear_statistics_ldp_transit_fec_prefix
def _set_clear_statistics_ldp_transit_fec_prefix(self, v, load=False):
"""
Setter method for clear_statistics_ldp_transit_fec_prefix, mapped from YANG variable /brocade_mpls_rpc/clear_mpls_statistics_ldp_transit/input/clear_statistics_ldp_transit_fec_prefix (mpls-ipv4-prefix)
If this variable is read-only (config: false) in the
source YANG file, then _set_clear_statistics_ldp_transit_fec_prefix is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_clear_statistics_ldp_transit_fec_prefix() directly.
YANG Description: Transit fec prefix
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/(([0-9])|([1-2][0-9])|(3[0-2]))'}), is_leaf=True, yang_name="clear-statistics-ldp-transit-fec-prefix", rest_name="clear-statistics-ldp-transit-fec-prefix", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='mpls-ipv4-prefix', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """clear_statistics_ldp_transit_fec_prefix must be of a type compatible with mpls-ipv4-prefix""",
'defined-type': "brocade-mpls:mpls-ipv4-prefix",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/(([0-9])|([1-2][0-9])|(3[0-2]))'}), is_leaf=True, yang_name="clear-statistics-ldp-transit-fec-prefix", rest_name="clear-statistics-ldp-transit-fec-prefix", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='mpls-ipv4-prefix', is_config=True)""",
})
self.__clear_statistics_ldp_transit_fec_prefix = t
if hasattr(self, '_set'):
self._set()
def _unset_clear_statistics_ldp_transit_fec_prefix(self):
self.__clear_statistics_ldp_transit_fec_prefix = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/(([0-9])|([1-2][0-9])|(3[0-2]))'}), is_leaf=True, yang_name="clear-statistics-ldp-transit-fec-prefix", rest_name="clear-statistics-ldp-transit-fec-prefix", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='mpls-ipv4-prefix', is_config=True)
def _get_clear_statistics_ldp_transit_fec_prefix_address(self):
"""
Getter method for clear_statistics_ldp_transit_fec_prefix_address, mapped from YANG variable /brocade_mpls_rpc/clear_mpls_statistics_ldp_transit/input/clear_statistics_ldp_transit_fec_prefix_address (mpls-ipv4-address)
YANG Description: Fec prefix address
"""
return self.__clear_statistics_ldp_transit_fec_prefix_address
def _set_clear_statistics_ldp_transit_fec_prefix_address(self, v, load=False):
"""
Setter method for clear_statistics_ldp_transit_fec_prefix_address, mapped from YANG variable /brocade_mpls_rpc/clear_mpls_statistics_ldp_transit/input/clear_statistics_ldp_transit_fec_prefix_address (mpls-ipv4-address)
If this variable is read-only (config: false) in the
source YANG file, then _set_clear_statistics_ldp_transit_fec_prefix_address is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_clear_statistics_ldp_transit_fec_prefix_address() directly.
YANG Description: Fec prefix address
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="clear-statistics-ldp-transit-fec-prefix-address", rest_name="clear-statistics-ldp-transit-fec-prefix-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='mpls-ipv4-address', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """clear_statistics_ldp_transit_fec_prefix_address must be of a type compatible with mpls-ipv4-address""",
'defined-type': "brocade-mpls:mpls-ipv4-address",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="clear-statistics-ldp-transit-fec-prefix-address", rest_name="clear-statistics-ldp-transit-fec-prefix-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='mpls-ipv4-address', is_config=True)""",
})
self.__clear_statistics_ldp_transit_fec_prefix_address = t
if hasattr(self, '_set'):
self._set()
def _unset_clear_statistics_ldp_transit_fec_prefix_address(self):
self.__clear_statistics_ldp_transit_fec_prefix_address = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="clear-statistics-ldp-transit-fec-prefix-address", rest_name="clear-statistics-ldp-transit-fec-prefix-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='mpls-ipv4-address', is_config=True)
def _get_clear_statistics_ldp_transit_fec_prefix_mask(self):
"""
Getter method for clear_statistics_ldp_transit_fec_prefix_mask, mapped from YANG variable /brocade_mpls_rpc/clear_mpls_statistics_ldp_transit/input/clear_statistics_ldp_transit_fec_prefix_mask (mpls-ipv4-subnet-mask)
YANG Description: Fec prefix address mask
"""
return self.__clear_statistics_ldp_transit_fec_prefix_mask
def _set_clear_statistics_ldp_transit_fec_prefix_mask(self, v, load=False):
"""
Setter method for clear_statistics_ldp_transit_fec_prefix_mask, mapped from YANG variable /brocade_mpls_rpc/clear_mpls_statistics_ldp_transit/input/clear_statistics_ldp_transit_fec_prefix_mask (mpls-ipv4-subnet-mask)
If this variable is read-only (config: false) in the
source YANG file, then _set_clear_statistics_ldp_transit_fec_prefix_mask is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_clear_statistics_ldp_transit_fec_prefix_mask() directly.
YANG Description: Fec prefix address mask
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="clear-statistics-ldp-transit-fec-prefix-mask", rest_name="clear-statistics-ldp-transit-fec-prefix-mask", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='mpls-ipv4-subnet-mask', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """clear_statistics_ldp_transit_fec_prefix_mask must be of a type compatible with mpls-ipv4-subnet-mask""",
'defined-type': "brocade-mpls:mpls-ipv4-subnet-mask",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="clear-statistics-ldp-transit-fec-prefix-mask", rest_name="clear-statistics-ldp-transit-fec-prefix-mask", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='mpls-ipv4-subnet-mask', is_config=True)""",
})
self.__clear_statistics_ldp_transit_fec_prefix_mask = t
if hasattr(self, '_set'):
self._set()
def _unset_clear_statistics_ldp_transit_fec_prefix_mask(self):
self.__clear_statistics_ldp_transit_fec_prefix_mask = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="clear-statistics-ldp-transit-fec-prefix-mask", rest_name="clear-statistics-ldp-transit-fec-prefix-mask", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='mpls-ipv4-subnet-mask', is_config=True)
clear_statistics_ldp_transit_fec_prefix = __builtin__.property(_get_clear_statistics_ldp_transit_fec_prefix, _set_clear_statistics_ldp_transit_fec_prefix)
clear_statistics_ldp_transit_fec_prefix_address = __builtin__.property(_get_clear_statistics_ldp_transit_fec_prefix_address, _set_clear_statistics_ldp_transit_fec_prefix_address)
clear_statistics_ldp_transit_fec_prefix_mask = __builtin__.property(_get_clear_statistics_ldp_transit_fec_prefix_mask, _set_clear_statistics_ldp_transit_fec_prefix_mask)
_pyangbind_elements = {'clear_statistics_ldp_transit_fec_prefix': clear_statistics_ldp_transit_fec_prefix, 'clear_statistics_ldp_transit_fec_prefix_address': clear_statistics_ldp_transit_fec_prefix_address, 'clear_statistics_ldp_transit_fec_prefix_mask': clear_statistics_ldp_transit_fec_prefix_mask, }
| 78.014634
| 637
| 0.73751
| 2,512
| 15,993
| 4.406449
| 0.070462
| 0.023128
| 0.168037
| 0.189719
| 0.863041
| 0.832415
| 0.800885
| 0.796188
| 0.757702
| 0.722739
| 0
| 0.038926
| 0.10367
| 15,993
| 204
| 638
| 78.397059
| 0.73324
| 0.167698
| 0
| 0.390625
| 0
| 0.09375
| 0.415981
| 0.339808
| 0
| 0
| 0
| 0
| 0
| 1
| 0.09375
| false
| 0
| 0.0625
| 0
| 0.289063
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5bb4accd0ad572de26dcbca1f7c401d5d129f198
| 3,709
|
py
|
Python
|
regexlib/2021-5-15/python_re2_test_file/regexlib_1959.py
|
yetingli/ReDoS-Benchmarks
|
f5b5094d835649e957bf3fec6b8bd4f6efdb35fc
|
[
"MIT"
] | 1
|
2022-01-24T14:43:23.000Z
|
2022-01-24T14:43:23.000Z
|
regexlib/python_re2_test_file/regexlib_1959.py
|
yetingli/ReDoS-Benchmarks
|
f5b5094d835649e957bf3fec6b8bd4f6efdb35fc
|
[
"MIT"
] | null | null | null |
regexlib/python_re2_test_file/regexlib_1959.py
|
yetingli/ReDoS-Benchmarks
|
f5b5094d835649e957bf3fec6b8bd4f6efdb35fc
|
[
"MIT"
] | null | null | null |
# 1959
# ^((http|https|ftp|ftps)+(:\/\/))?(www\.)?(([a-z0-9\.-]{2,})\.(ac|ad|ae|af|ag|ai|al|am|an|ao|aq|ar|as|at|au|aw|ax|az|ba|bb|bd|be|bf|bg|bh|bi|bj|bm|bn|bo|br|bs|bt|bv|bw|by|bz|ca|cc|cd|cf|cg|ch|ci|ck|cl|cm|cn|co|cr|cs|cu|cv|cx|cy|cz|dd|de|dj|dk|dm|do|dz|ec|ee|eg|eh|er|es|et|eu|fi|fj|fk|fm|fo|fr|fx|ga|gb|gd|ge|gf|gg|gh|gi|gl|gm|gn|gp|gq|gr|gs|gt|gu|gw|gy|hk|hm|hn|hr|ht|hu|id|ie|il|im|in|io|iq|ir|is|it|je|jm|jo|jp|ke|kg|kh|ki|km|kn|kp|kr|kw|ky|kz|la|lb|lc|li|lk|lr|ls|lt|lu|lv|ly|ma|mc|md|me|mg|mh|mk|ml|mm|mn|mo|mp|mq|mr|ms|mt|mu|mv|mw|mx|my|mz|na|nc|ne|nf|ng|ni|nl|no|np|nr|nu|nz|om|pa|pe|pf|pg|ph|pk|pl|pm|pn|pr|ps|pt|pw|py|qa|re|ro|rs|ru|rw|sa|sb|sc|sd|se|sg|sh|si|sj|sk|sl|sm|sn|so|sr|st|su|sv|sy|sz|tc|td|tf|tg|th|tj|tk|tl|tm|tn|to|tp|tr|tt|tv|tw|tz|ua|ug|uk|um|us|uy|uz|va|vc|ve|vg|vi|vn|vu|wf|ws|ye|yt|yu|za|zm|aero|asia|cat|coop|edu|gov|jobs|mil|mobi|museum|tel|travel|pro|post|biz|com|info|int|name|net|org|pro|arpa)|((25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9])\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9]|0)\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9]|0)\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[0-9])))(:([1-9][0-9]?[0-9]?[0-9]?|[1-5][0-9][0-9][0-9][0-9]|6[0-4][0-9][0-9][0-9]|65[0-4][0-9][0-9]|655[0-2][0-9]|6553[0-5]|))?(((\/(([a-zA-Z0-9_\-\%\~\+\&\;]{1,})+)*)*)|\/$)?(\.(php|html|htm|zip$|arj$|rar$|sit$|pdf$|gif$|jpg$|jpeg$|jpe$|tif$|tiff$))?(\?([a-zA-Z0-9_\-]+\=[a-z-A-Z0-9_\-\%\~\+]+)?(\&([a-zA-Z0-9_\-]+\=[a-z-A-Z0-9_\-\%\~\+]+))*)?(\=\?([a-zA-Z0-9_\-])*)?(((\+([a-zA-Z0-9_])*)?(\-([a-zA-Z0-9_])*)?)*)?(\#([a-z-A-Z0-9_\-\%\~\+\&\;]*$))?$
# EXPONENT
# nums:5
# EXPONENT AttackString:"aa.ac"+"+0-"*32+"! _1_EOA(i or ii)"
import re2 as re
from time import perf_counter
regex = """^((http|https|ftp|ftps)+(:\/\/))?(www\.)?(([a-z0-9\.-]{2,})\.(ac|ad|ae|af|ag|ai|al|am|an|ao|aq|ar|as|at|au|aw|ax|az|ba|bb|bd|be|bf|bg|bh|bi|bj|bm|bn|bo|br|bs|bt|bv|bw|by|bz|ca|cc|cd|cf|cg|ch|ci|ck|cl|cm|cn|co|cr|cs|cu|cv|cx|cy|cz|dd|de|dj|dk|dm|do|dz|ec|ee|eg|eh|er|es|et|eu|fi|fj|fk|fm|fo|fr|fx|ga|gb|gd|ge|gf|gg|gh|gi|gl|gm|gn|gp|gq|gr|gs|gt|gu|gw|gy|hk|hm|hn|hr|ht|hu|id|ie|il|im|in|io|iq|ir|is|it|je|jm|jo|jp|ke|kg|kh|ki|km|kn|kp|kr|kw|ky|kz|la|lb|lc|li|lk|lr|ls|lt|lu|lv|ly|ma|mc|md|me|mg|mh|mk|ml|mm|mn|mo|mp|mq|mr|ms|mt|mu|mv|mw|mx|my|mz|na|nc|ne|nf|ng|ni|nl|no|np|nr|nu|nz|om|pa|pe|pf|pg|ph|pk|pl|pm|pn|pr|ps|pt|pw|py|qa|re|ro|rs|ru|rw|sa|sb|sc|sd|se|sg|sh|si|sj|sk|sl|sm|sn|so|sr|st|su|sv|sy|sz|tc|td|tf|tg|th|tj|tk|tl|tm|tn|to|tp|tr|tt|tv|tw|tz|ua|ug|uk|um|us|uy|uz|va|vc|ve|vg|vi|vn|vu|wf|ws|ye|yt|yu|za|zm|aero|asia|cat|coop|edu|gov|jobs|mil|mobi|museum|tel|travel|pro|post|biz|com|info|int|name|net|org|pro|arpa)|((25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9])\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9]|0)\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9]|0)\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[0-9])))(:([1-9][0-9]?[0-9]?[0-9]?|[1-5][0-9][0-9][0-9][0-9]|6[0-4][0-9][0-9][0-9]|65[0-4][0-9][0-9]|655[0-2][0-9]|6553[0-5]|))?(((\/(([a-zA-Z0-9_\-\%\~\+\&\;]{1,})+)*)*)|\/$)?(\.(php|html|htm|zip$|arj$|rar$|sit$|pdf$|gif$|jpg$|jpeg$|jpe$|tif$|tiff$))?(\?([a-zA-Z0-9_\-]+\=[a-z-A-Z0-9_\-\%\~\+]+)?(\&([a-zA-Z0-9_\-]+\=[a-z-A-Z0-9_\-\%\~\+]+))*)?(\=\?([a-zA-Z0-9_\-])*)?(((\+([a-zA-Z0-9_])*)?(\-([a-zA-Z0-9_])*)?)*)?(\#([a-z-A-Z0-9_\-\%\~\+\&\;]*$))?$"""
REGEX = re.compile(regex)
for i in range(0, 150000):
ATTACK = "aa.ac" + "+0-" * i * 1 + "! _1_EOA(i or ii)"
LEN = len(ATTACK)
BEGIN = perf_counter()
m = REGEX.search(ATTACK)
# m = REGEX.match(ATTACK)
DURATION = perf_counter() - BEGIN
print(f"{i *1}: took {DURATION} seconds!")
| 195.210526
| 1,639
| 0.539768
| 1,011
| 3,709
| 1.95549
| 0.335312
| 0.052605
| 0.036419
| 0.032372
| 0.87304
| 0.863935
| 0.863935
| 0.863935
| 0.863935
| 0.863935
| 0
| 0.096676
| 0.026692
| 3,709
| 19
| 1,640
| 195.210526
| 0.45097
| 0.465894
| 0
| 0
| 0
| 0.090909
| 0.852077
| 0.823202
| 0.090909
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.181818
| 0
| 0.181818
| 0.090909
| 0
| 0
| 1
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 12
|
5bcffa842952b4b690e857ea7913b1d5f3dec6fa
| 255
|
py
|
Python
|
email2slack/compat.py
|
mikoim/email2slack
|
b5b176776791ad3895b94aece40f864323d5c105
|
[
"MIT"
] | 5
|
2017-07-27T15:53:41.000Z
|
2021-04-16T00:29:40.000Z
|
email2slack/compat.py
|
mikoim/email2slack
|
b5b176776791ad3895b94aece40f864323d5c105
|
[
"MIT"
] | 14
|
2016-11-12T15:56:47.000Z
|
2019-09-23T10:01:26.000Z
|
email2slack/compat.py
|
mikoim/email2slack
|
b5b176776791ad3895b94aece40f864323d5c105
|
[
"MIT"
] | 4
|
2017-06-15T09:17:53.000Z
|
2021-04-16T01:08:02.000Z
|
from __future__ import unicode_literals
try:
from configparser import ConfigParser as compat_configparser # Python 3
except ImportError:
from ConfigParser import ConfigParser as compat_configparser # Python 2
__all__ = ['compat_configparser']
| 28.333333
| 76
| 0.807843
| 29
| 255
| 6.689655
| 0.517241
| 0.278351
| 0.226804
| 0.350515
| 0.618557
| 0.618557
| 0.618557
| 0.618557
| 0
| 0
| 0
| 0.009302
| 0.156863
| 255
| 8
| 77
| 31.875
| 0.893023
| 0.066667
| 0
| 0
| 0
| 0
| 0.080851
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
752d024287434e3a5fa6a23e949ec2cd446f2519
| 60,350
|
py
|
Python
|
netforce/tests/unit/napalm/test_nxosnc.py
|
eBay/pynetforce
|
599fbfd4d2dc23c0d70a730c80a0e63a4f461b2f
|
[
"Apache-2.0"
] | 16
|
2018-01-26T19:29:50.000Z
|
2020-07-31T04:50:37.000Z
|
netforce/tests/unit/napalm/test_nxosnc.py
|
eBay/pynetforce
|
599fbfd4d2dc23c0d70a730c80a0e63a4f461b2f
|
[
"Apache-2.0"
] | 3
|
2018-02-02T21:45:00.000Z
|
2019-09-13T15:31:50.000Z
|
netforce/tests/unit/napalm/test_nxosnc.py
|
eBay/pynetforce
|
599fbfd4d2dc23c0d70a730c80a0e63a4f461b2f
|
[
"Apache-2.0"
] | 7
|
2018-01-27T01:08:49.000Z
|
2021-01-15T11:03:59.000Z
|
# Copyright 2018 eBay Inc.
# Copyright 2012 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from napalm_base import get_network_driver
from napalm_baseebay import ebay_exceptions
from netforce.tests.unit.napalm import base
class NexusOSTestSuite(base.DietTestCase):
"""Nexus OS Test Suite
This test suite performs setup and teardown functions for this file's
unit tests. Each unit test class should inherit from this class, and
implement a single "runTest" function.
"""
def setUp(self):
"""Perform setup activities
"""
super(NexusOSTestSuite, self).setUp()
driver = get_network_driver('ebaynxos')
self.driver = driver(
hostname='127.0.0.1',
username='cisco',
password='cisco'
)
self.interface_names = ["Ethernet1", "Ethernet2"]
mock_mgr = mock.Mock()
self.driver.manager = mock_mgr
self.stdout = None
def tearDown(self):
"""Perform teardown activities
"""
super(NexusOSTestSuite, self).tearDown()
class test_update_switch_port_on_interface_vlan_suspended(NexusOSTestSuite):
def mock_get_vlan(self, *args, **kwargs):
return {
'name': 'test-vlan-2',
'status': 'suspend'
}
def runTest(self):
with mock.patch.object(self.driver, 'get_vlan') as get_vlan_mocks:
with mock.patch.object(self.driver, 'compare_vlan_config') \
as compare_config:
with mock.patch.object(self.driver, 'get_vlans_on_interface') \
as get_vlan_interface_mock:
with mock.patch.object(self.driver, '_exec_command') \
as push_changes:
with mock.patch.object(
self.driver, '_check_if_connected') \
as check_connected:
check_connected.return_value = None
push_changes.return_value = None
get_vlan_interface_mock.return_vlaue = {
'switch_port_mode': u'trunk',
'native_vlan': u'3', 'trunk_vlans': u'3-4'
}
compare_config.side_effect = [False, True]
get_vlan_mocks.side_effect = self.mock_get_vlan
port = {
"switch_port_mode": "access",
"admin_status": "SUSPENDED",
"vlans": [
{
"vlan": {
"tag": "2"
}
}
]
}
self.assertRaises(
ebay_exceptions.EntityInSuspendedModeException,
self.driver.update_switch_port_vlans,
'Ethernet1', port)
class test_update_switch_port_on_int_multi_vlan_not_support_acc_mode(
NexusOSTestSuite):
def mock_get_vlan(self, *args, **kwargs):
return {
'name': 'test-vlan-2',
'status': 'active'
}
def runTest(self):
with mock.patch.object(self.driver, 'get_vlan') as get_vlan_mocks:
with mock.patch.object(self.driver, 'compare_vlan_config') \
as compare_config:
with mock.patch.object(self.driver, 'get_vlans_on_interface') \
as get_vlan_interface_mock:
with mock.patch.object(self.driver, '_exec_command') \
as push_changes:
with mock.patch.object(
self.driver, '_check_if_connected') \
as check_connected:
check_connected.return_value = None
push_changes.return_value = None
get_vlan_interface_mock.return_vlaue = {
'switch_port_mode': u'trunk',
'native_vlan': u'3', 'trunk_vlans': u'3-4'
}
compare_config.side_effect = [False, True]
get_vlan_mocks.side_effect = self.mock_get_vlan
port = {
"switch_port_mode": "access",
"admin_status": "SUSPENDED",
"vlans": [
{
"vlan": {
"tag": "2"
}
},
{
"vlan": {
"tag": "3"
}
}
]
}
self.assertRaises(
ebay_exceptions.MoreThanOneAccessVlan,
self.driver.update_switch_port_vlans,
'Ethernet1', port)
class test_update_switch_port_on_interface_invalid_switch_port_mode(
NexusOSTestSuite):
def mock_get_vlan(self, *args, **kwargs):
if kwargs['number'] == 2:
return {
'name': 'test-vlan-2',
'status': 'active'
}
else:
return {
'name': 'test-vlan-3',
'status': 'active'
}
def runTest(self):
with mock.patch.object(self.driver, 'get_vlan') as get_vlan_mocks:
get_vlan_mocks.side_effect = self.mock_get_vlan
with mock.patch.object(self.driver, '_exec_command') \
as push_changes:
with mock.patch.object(
self.driver, '_check_if_connected') \
as check_connected:
check_connected.return_value = None
push_changes.return_value = None
port = {
"switch_port_mode": "invalid",
"admin_status": "ACTIVE",
"vlans": [
{
"vlan": {
"tag": "2",
"is_native": False
},
},
{
"vlan": {
"tag": "3",
"is_native": True
}
}
]
}
self.assertRaises(ebay_exceptions.
InvalidValueForParameterException,
self.driver.update_switch_port_vlans,
'Et1', port)
class test_update_switch_port_on_interface_access_mode_success(
NexusOSTestSuite):
def validate_commands(self, config):
self.assertIs(5, len(config))
def mock_get_vlan(self, *args, **kwargs):
return {
'name': 'test-vlan-2',
'status': 'active'
}
def mock_get_vlans_on_interfaces(self, interfaces):
return {
'access_vlan': '2'
}
def runTest(self):
with mock.patch.object(self.driver, 'get_vlan') as get_vlan_mocks:
get_vlan_mocks.side_effect = self.mock_get_vlan
with mock.patch.object(self.driver, '_exec_command') \
as push_changes:
with mock.patch.object(
self.driver, '_check_if_connected') \
as check_connected:
check_connected.return_value = None
push_changes.exec_command.return_value = None
port = {
"switch_port_mode": "access",
"admin_status": "ACTIVE",
"vlans": [
{
"vlan": {
"tag": "2"
}
}
]
}
with mock.patch.object(self.driver,
'get_vlans_on_interface')\
as vlan_if_mock:
with mock.patch.object(self.driver, 'compare_vlan_config') \
as compare_config:
compare_config.side_effect = [False, True]
vlan_if_mock.side_effect = \
self.mock_get_vlans_on_interfaces
self.driver.update_switch_port_vlans('Ethernet1',
port)
class test_update_switch_port_prevalidation_success(
NexusOSTestSuite):
def validate_commands(self, config):
self.assertIs(5, len(config))
def mock_get_vlan(self, *args, **kwargs):
return {
'name': 'test-vlan-2',
'status': 'active'
}
def mock_get_vlans_on_interfaces(self, interfaces):
return {
'access_vlan': '2'
}
def runTest(self):
with mock.patch.object(self.driver, 'get_vlan') as get_vlan_mocks:
get_vlan_mocks.side_effect = self.mock_get_vlan
with mock.patch.object(self.driver, '_exec_command') \
as push_changes:
with mock.patch.object(
self.driver, '_check_if_connected') \
as check_connected:
check_connected.return_value = None
push_changes.return_value = None
port = {
"switch_port_mode": "access",
"admin_status": "ACTIVE",
"vlans": [
{
"vlan": {
"tag": "2"
}
}
]
}
with mock.patch.object(self.driver,
'get_vlans_on_interface')\
as vlan_if_mock:
with mock.patch.object(self.driver, 'compare_vlan_config') \
as compare_config:
compare_config.side_effect = [True, True]
vlan_if_mock.side_effect = \
self.mock_get_vlans_on_interfaces
self.driver.update_switch_port_vlans('Ethernet1',
port)
class test_update_switch_port_postvalidation_failure(
NexusOSTestSuite):
def validate_commands(self, config):
self.assertIs(5, len(config))
def mock_get_vlan(self, *args, **kwargs):
return {
'name': 'test-vlan-2',
'status': 'active'
}
def mock_get_vlans_on_interfaces(self, interfaces):
return {
'access_vlan': '2'
}
def runTest(self):
with mock.patch.object(self.driver, 'get_vlan') as get_vlan_mocks:
get_vlan_mocks.side_effect = self.mock_get_vlan
port = {
"switch_port_mode": "access",
"admin_status": "ACTIVE",
"vlans": [
{
"vlan": {
"tag": "2"
}
}
]
}
with mock.patch.object(self.driver,
'get_vlans_on_interface')\
as vlan_if_mock:
with mock.patch.object(self.driver, 'compare_vlan_config') \
as compare_config:
with mock.patch.object(self.driver, '_exec_command') \
as push_changes:
with mock.patch.object(
self.driver, '_check_if_connected') \
as check_connected:
check_connected.return_value = None
push_changes.exec_command.return_value = None
compare_config.side_effect = [False, False]
vlan_if_mock.side_effect = \
self.mock_get_vlans_on_interfaces
self.assertRaises(ebay_exceptions.
PostChangeValidationException,
self.driver.
update_switch_port_vlans,
'Ethernet1', port)
class test_interface_label_validation_success(NexusOSTestSuite):
def validate_commands(self, config):
self.assertIs(2, len(config))
def mock_interfaces(self, interfaces):
ifdict = {}
for ifname in self.interface_names:
ifdict[ifname] = {
"name": ifname,
"description": 'test-label',
}
return ifdict
def runTest(self):
with mock.patch.object(self.driver, 'load_merge_candidate') \
as load_merge_candidate_mock:
load_merge_candidate_mock.side_effect = self.validate_commands
with mock.patch.object(self.driver, 'commit_config') \
as commit_config_mock:
commit_config_mock.return_value = None
with mock.patch.object(self.driver, 'get_interfaces_by_name') \
as \
get_interfaces_mock:
with mock.patch.object(self.driver, '_exec_command') \
as push_changes:
with mock.patch.object(
self.driver, '_check_if_connected') \
as check_connected:
check_connected.return_value = None
push_changes.return_value = None
get_interfaces_mock.side_effect = \
self.mock_interfaces
self.driver.update_interface_label('Ethernet1',
'test-label')
class test_interface_label_post_change_validation_failure(NexusOSTestSuite):
def validate_commands(self, config):
self.assertIs(2, len(config))
def mock_interfaces(self, interfaces):
ifdict = {}
for ifname in self.interface_names:
ifdict[ifname] = {
"name": ifname,
"description": 'test-label-1',
}
return ifdict
def runTest(self):
with mock.patch.object(self.driver, 'get_interfaces_by_name') \
as \
get_interfaces_mock:
with mock.patch.object(self.driver, '_exec_command') \
as push_changes:
with mock.patch.object(
self.driver, '_check_if_connected') \
as check_connected:
check_connected.return_value = None
push_changes.return_value = None
get_interfaces_mock.side_effect = self.mock_interfaces
self.assertRaises(ebay_exceptions.
PostChangeValidationException,
self.driver.update_interface_label,
'Ethernet1', 'test-label')
class test_create_subnet_success(NexusOSTestSuite):
def runTest(self):
with mock.patch.object(self.driver, 'get_vlan') as vlan_mock:
vlan_mock.return_value = {
'name': 'test-vlan',
'status': 'active'
}
with mock.patch.object(self.driver,
'get_ip_addrs_on_interface') \
as get_subnets_mock:
with mock.patch.object(self.driver, '_exec_command') \
as push_changes:
with mock.patch.object(
self.driver, '_check_if_connected') \
as check_connected:
with mock.patch.object(self.driver, 'open') \
as open:
with mock.patch.object(self.driver, 'close') \
as close:
open.return_value = None
close.return_value = None
check_connected.return_value = None
push_changes.return_value = None
get_subnets_mock.side_effect = \
[[], ['1.1.1.1/24']]
commands = self.driver.create_subnet(
'1.1.1.1/24', 2)
self.assertIsNotNone(commands)
class test_create_subnet_failure_vlan_not_found(NexusOSTestSuite):
def runTest(self):
with mock.patch.object(self.driver, 'get_vlan') as vlan_mock:
vlan_mock.return_value = None
with mock.patch.object(self.driver,
'get_ip_addrs_on_interface') \
as get_subnets_mock:
with mock.patch.object(self.driver, '_exec_command') \
as push_changes:
with mock.patch.object(
self.driver, '_check_if_connected') \
as check_connected:
check_connected.return_value = None
push_changes.return_value = None
get_subnets_mock.side_effect = [[], ['1.1.1.1/24']]
self.assertRaises(ebay_exceptions.
EntityDoesNotExistsException,
self.driver.create_subnet,
'1.1.1.1/24',
2)
class test_create_subnet_failure_in_config_push(NexusOSTestSuite):
def runTest(self):
with mock.patch.object(self.driver, 'get_vlan') as vlan_mock:
vlan_mock.return_value = {
'name': 'test-vlan',
'status': 'active'
}
with mock.patch.object(self.driver,
'get_ip_addrs_on_interface') \
as get_subnets_mock:
with mock.patch.object(self.driver, '_exec_command') \
as push_changes:
with mock.patch.object(
self.driver, '_check_if_connected') \
as check_connected:
with mock.patch.object(self.driver, 'open') \
as open:
with mock.patch.object(self.driver, 'close') \
as close:
open.return_value = None
close.return_value = None
check_connected.return_value = None
push_changes.return_value = None
get_subnets_mock.side_effect = [[], []]
self.assertRaises(
ebay_exceptions.
PostChangeValidationException,
self.driver.create_subnet,
'1.1.1.1/24', 2)
class test_create_subnet_failure_subnet_already_exists(NexusOSTestSuite):
def runTest(self):
with mock.patch.object(self.driver, 'get_vlan') as vlan_mock:
vlan_mock.return_value = {
'name': 'test-vlan',
'status': 'active'
}
with mock.patch.object(self.driver,
'get_ip_addrs_on_interface') \
as get_subnets_mock:
with mock.patch.object(self.driver, '_exec_command') \
as push_changes:
with mock.patch.object(
self.driver, '_check_if_connected') \
as check_connected:
check_connected.return_value = None
push_changes.return_value = None
get_subnets_mock.side_effect = [['1.1.1.1/24']]
self.assertRaises(ebay_exceptions.
SubnetAlreadyConfiguredException,
self.driver.create_subnet,
'1.1.1.1/24',
2)
class test_get_mac_addresses_on_interface(NexusOSTestSuite):
def runTest(self):
with mock.patch.object(self.driver, '_exec_command') \
as push_changes:
with mock.patch.object(
self.driver, '_check_if_connected') \
as check_connected:
check_connected.return_value = None
push_changes.return_value \
= '''<?xml version="1.0" encoding="ISO-8859-1"?>
<nf:rpc-reply>
<nf:data>
<show>
<mac>
<address-table>
<__XML__OPT_Cmd_show_mac_addr_tbl_static>
<__XML__OPT_Cmd_show_mac_addr_tbl_address>
<__XML__OPT_Cmd_show_mac_addr_tbl___readonly__>
<__readonly__>
<TABLE_mac_address>
<ROW_mac_address>
<disp_mac_addr>dead.dead.dead</disp_mac_addr>
<disp_type>* </disp_type>
<disp_vlan>2</disp_vlan>
<disp_is_static>disabled</disp_is_static>
<disp_age>0</disp_age>
<disp_is_secure>disabled</disp_is_secure>
<disp_is_ntfy>disabled</disp_is_ntfy>
<disp_port>Ethernet1/1</disp_port>
</ROW_mac_address>
</TABLE_mac_address>
</__readonly__>
</__XML__OPT_Cmd_show_mac_addr_tbl___readonly__>
</__XML__OPT_Cmd_show_mac_addr_tbl_address>
</__XML__OPT_Cmd_show_mac_addr_tbl_static>
</address-table>
</mac>
</show>
</nf:data>
</nf:rpc-reply>
]]>]]>
'''
ret = self.driver.get_mac_addresses_on_interface(
'Ethernet1/1', 2)
expected = [{'mac_address': 'dead.dead.dead', 'vlan': 2}]
self.assertEqual(expected, ret)
#cmd = 'show mac address-table interface %s vlan %s' % ('Ethernet1/1',
# 2)
#self.driver.manager.exec_command.assert_called_once_with(cmd)
class test_get_mac_addresses_on_interface_more_than_one_row(NexusOSTestSuite):
def runTest(self):
with mock.patch.object(self.driver, '_exec_command') \
as push_changes:
with mock.patch.object(
self.driver, '_check_if_connected') \
as check_connected:
check_connected.return_value = None
push_changes.return_value \
= '''<?xml version="1.0" encoding="ISO-8859-1"?>
<nf:rpc-reply>
<nf:data>
<show>
<mac>
<address-table>
<__XML__OPT_Cmd_show_mac_addr_tbl_static>
<__XML__OPT_Cmd_show_mac_addr_tbl_address>
<__XML__OPT_Cmd_show_mac_addr_tbl___readonly__>
<__readonly__>
<TABLE_mac_address>
<ROW_mac_address>
<disp_mac_addr>dead.dead.dead</disp_mac_addr>
<disp_type>* </disp_type>
<disp_vlan>2</disp_vlan>
<disp_is_static>disabled</disp_is_static>
<disp_age>0</disp_age>
<disp_is_secure>disabled</disp_is_secure>
<disp_is_ntfy>disabled</disp_is_ntfy>
<disp_port>Ethernet1/1</disp_port>
</ROW_mac_address>
<ROW_mac_address>
<disp_mac_addr>beef.beef.beef</disp_mac_addr>
<disp_type>* </disp_type>
<disp_vlan>2</disp_vlan>
<disp_is_static>disabled</disp_is_static>
<disp_age>0</disp_age>
<disp_is_secure>disabled</disp_is_secure>
<disp_is_ntfy>disabled</disp_is_ntfy>
<disp_port>Ethernet1/1</disp_port>
</ROW_mac_address>
</TABLE_mac_address>
</__readonly__>
</__XML__OPT_Cmd_show_mac_addr_tbl___readonly__>
</__XML__OPT_Cmd_show_mac_addr_tbl_address>
</__XML__OPT_Cmd_show_mac_addr_tbl_static>
</address-table>
</mac>
</show>
</nf:data>
</nf:rpc-reply>
]]>]]>
'''
ret = self.driver.get_mac_addresses_on_interface(
'Ethernet1/1', 2)
expected = [{'mac_address': 'dead.dead.dead', 'vlan': 2},
{'mac_address': 'beef.beef.beef', 'vlan': 2}]
self.assertEqual(expected, ret)
#cmd = 'show mac address-table interface %s vlan %s' % ('Ethernet1/1',
# 2)
#self.driver.manager.exec_command.assert_called_once_with([cmd])
class test_get_mac_addresses_on_interface_empty(NexusOSTestSuite):
def runTest(self):
with mock.patch.object(self.driver, '_exec_command') \
as push_changes:
with mock.patch.object(
self.driver, '_check_if_connected') \
as check_connected:
check_connected.return_value = None
push_changes.return_value \
= '''<?xml version="1.0" encoding="ISO-8859-1"?>
<nf:rpc-reply>
<nf:data>
<show>
<mac>
<address-table>
<__XML__OPT_Cmd_show_mac_addr_tbl_static>
<__XML__OPT_Cmd_show_mac_addr_tbl_address>
<__XML__OPT_Cmd_show_mac_addr_tbl___readonly__>
<__readonly__>
<header>fake header</header>
</__readonly__>
</__XML__OPT_Cmd_show_mac_addr_tbl___readonly__>
</__XML__OPT_Cmd_show_mac_addr_tbl_address>
</__XML__OPT_Cmd_show_mac_addr_tbl_static>
</address-table>
</mac>
</show>
</nf:data>
</nf:rpc-reply>
]]>]]>
'''
ret = self.driver.get_mac_addresses_on_interface(
'Ethernet1/1', 2)
expected = []
self.assertEqual(expected, ret)
#cmd = 'show mac address-table interface %s vlan %s' % ('Ethernet1/1',
# 2)
#self.driver.manager.exec_command.assert_called_once_with([cmd])
class test_get_traffic_on_interface(NexusOSTestSuite):
def runTest(self):
with mock.patch.object(self.driver, '_exec_command') \
as push_changes:
with mock.patch.object(
self.driver, '_check_if_connected') \
as check_connected:
check_connected.return_value = None
push_changes.return_value \
= " input rate 3.27 Kbps, 0 pps; output rate 3.78 Kbps," \
" 1 pps\n"
data = self.driver.get_traffic_on_interface('Ethernet1/1')
expected = (3000, 4000)
self.assertEqual(expected, data)
routes_string = '''<?xml version="1.0" encoding="ISO-8859-1"?>
<nf:rpc-reply>
<nf:data>
<show>
<__XML__BLK_Cmd_urib_show_routing_command_routing>
<__XML__OPT_Cmd_urib_show_routing_command_vrf>
<__XML__OPT_Cmd_urib_show_routing_command_ip>
<ip/>
</__XML__OPT_Cmd_urib_show_routing_command_ip>
</__XML__OPT_Cmd_urib_show_routing_command_vrf>
<ip>
<route/>
</ip>
<__XML__OPT_Cmd_urib_show_routing_command_vrf>
<__XML__OPT_Cmd_urib_show_routing_command_ip>
<__XML__OPT_Cmd_urib_show_routing_command_unicast>
<__XML__OPT_Cmd_urib_show_routing_command_topology>
<__XML__OPT_Cmd_urib_show_routing_command_l3vm-info>
<__XML__OPT_Cmd_urib_show_routing_command_rpf>
<__XML__OPT_Cmd_urib_show_routing_command_ip-addr>
<__XML__OPT_Cmd_urib_show_routing_command_protocol>
<__XML__OPT_Cmd_urib_show_routing_command_summary>
<__XML__OPT_Cmd_urib_show_routing_command_vrf>
<__XML__OPT_Cmd_urib_show_routing_command___readonly__>
<__readonly__>
<TABLE_vrf>
<ROW_vrf>
<vrf-name-out>default</vrf-name-out>
<TABLE_addrf>
<ROW_addrf>
<addrf>ipv4</addrf>
<TABLE_prefix>
<ROW_prefix>
<ipprefix>192.168.12.0/24</ipprefix>
<ucast-nhops>1</ucast-nhops>
<mcast-nhops>0</mcast-nhops>
<attached>false</attached>
<TABLE_path>
<ROW_path>
<uptime>P19DT37M39S</uptime>
<pref>20</pref>
<metric>0</metric>
<clientname>bgp-65001</clientname>
<type>external</type>
<tag>65002</tag>
<ubest>true</ubest>
</ROW_path>
</TABLE_path>
</ROW_prefix>
<ROW_prefix>
<ipprefix>10.1.0.0/24</ipprefix>
<ucast-nhops>1</ucast-nhops>
<mcast-nhops>0</mcast-nhops>
<attached>false</attached>
<TABLE_path>
<ROW_path>
<uptime>P19DT37M39S</uptime>
<pref>20</pref>
<metric>0</metric>
<clientname>bgp-65001</clientname>
<type>external</type>
<tag>65002</tag>
<ubest>true</ubest>
</ROW_path>
</TABLE_path>
</ROW_prefix>
</TABLE_prefix>
</ROW_addrf>
</TABLE_addrf>
</ROW_vrf>
</TABLE_vrf>
</__readonly__>
</__XML__OPT_Cmd_urib_show_routing_command___readonly__>
</__XML__OPT_Cmd_urib_show_routing_command_vrf>
</__XML__OPT_Cmd_urib_show_routing_command_summary>
</__XML__OPT_Cmd_urib_show_routing_command_protocol>
</__XML__OPT_Cmd_urib_show_routing_command_ip-addr>
</__XML__OPT_Cmd_urib_show_routing_command_rpf>
</__XML__OPT_Cmd_urib_show_routing_command_l3vm-info>
</__XML__OPT_Cmd_urib_show_routing_command_topology>
</__XML__OPT_Cmd_urib_show_routing_command_unicast>
</__XML__OPT_Cmd_urib_show_routing_command_ip>
</__XML__OPT_Cmd_urib_show_routing_command_vrf>
</__XML__BLK_Cmd_urib_show_routing_command_routing>
</show>
</nf:data>
</nf:rpc-reply>
]]>]]>
'''
class test_get_routes(NexusOSTestSuite):
def runTest(self):
with mock.patch.object(self.driver, '_get_vrfs') as vrf_mock:
with mock.patch.object(self.driver, '_exec_command') \
as push_changes:
with mock.patch.object(
self.driver, '_check_if_connected') \
as check_connected:
check_connected.return_value = None
push_changes.return_value \
= routes_string
with mock.patch.object(self.driver,
'get_routes_aggregate') \
as routes_aggregate:
routes_aggregate.return_value = []
vrf_mock.return_value = ['test']
data = self.driver.get_routes('test')
expected = [u'192.168.12.0/24', u'10.1.0.0/24']
self.assertEqual(sorted(expected),
sorted(data))
# test with no-vrf
data = self.driver.get_routes()
expected = []
self.assertEqual(sorted(expected),
sorted(data))
class test_get_routes_aggregates_exist(NexusOSTestSuite):
def runTest(self):
with mock.patch.object(self.driver, '_get_vrfs') as vrf_mock:
with mock.patch.object(self.driver, '_exec_command') \
as push_changes:
with mock.patch.object(
self.driver, '_check_if_connected') \
as check_connected:
check_connected.return_value = None
push_changes.return_value \
= routes_string
with mock.patch.object(self.driver,
'get_routes_aggregate') \
as routes_aggregate:
vrf_mock.return_value = ['test']
routes_aggregate.side_effect = [[u'192.168.12.0/24'],
[]]
data = self.driver.get_routes('test')
expected = [u'10.1.0.0/24']
# vrf_list =['lab1-10', 'lab1-20', 'lab1-30']
self.assertEqual(expected, data)
# test with no-vrf
data = self.driver.get_routes()
expected = []
# vrf_list =['lab1-10', 'lab1-20', 'lab1-30']
self.assertEqual(sorted(expected),
sorted(data))
class test_get_vrfs(NexusOSTestSuite):
vrf_string = '''<?xml version="1.0" encoding="ISO-8859-1"?>
<nf:rpc-reply xmlns:if="http://">
<nf:data>
<show>
<vrf>
<__XML__OPT_Cmd_l3vm_show_vrf_cmd_vrf-name>
<__XML__OPT_Cmd_l3vm_show_vrf_cmd_detail>
<__XML__OPT_Cmd_l3vm_show_vrf_cmd___readonly__>
<__readonly__>
<TABLE_vrf>
<ROW_vrf>
<vrf_name>default</vrf_name>
<vrf_id>1</vrf_id>
<vrf_state>Up</vrf_state>
<vrf_reason>--</vrf_reason>
</ROW_vrf>
<ROW_vrf>
<vrf_name>test1</vrf_name>
<vrf_id>1</vrf_id>
<vrf_state>Up</vrf_state>
<vrf_reason>--</vrf_reason>
</ROW_vrf>
</TABLE_vrf>
</__readonly__>
</__XML__OPT_Cmd_l3vm_show_vrf_cmd___readonly__>
</__XML__OPT_Cmd_l3vm_show_vrf_cmd_detail>
</__XML__OPT_Cmd_l3vm_show_vrf_cmd_vrf-name>
</vrf>
</show>
</nf:data>
</nf:rpc-reply>
]]>]]>
'''
def runTest(self):
with mock.patch.object(self.driver, '_exec_command') \
as push_changes:
push_changes.return_value = self.vrf_string
with mock.patch.object(
self.driver, '_check_if_connected') \
as check_connected:
check_connected.return_value = None
expected = [u'default', u'test1']
data = self.driver._get_vrfs()
self.assertEqual(expected, data)
class test_get_ip_addrs_on_interface(NexusOSTestSuite):
def runTest(self):
with mock.patch.object(self.driver, '_exec_command') \
as push_changes:
with mock.patch.object(
self.driver, '_check_if_connected') \
as check_connected:
check_connected.return_value = None
push_changes.return_value \
= """ip address 2.2.2.2/24\nip address 1.1.1.1/24 secondary\n
"""
data = self.driver.get_ip_addrs_on_interface('vlan2')
expected = [u'2.2.2.2/24', u'1.1.1.1/24']
self.assertEqual(sorted(expected), sorted(data))
class test_get_ip_addrs_on_interface_no_subnet(NexusOSTestSuite):
def runTest(self):
with mock.patch.object(self.driver, '_exec_command') \
as push_changes:
with mock.patch.object(
self.driver, '_check_if_connected') \
as check_connected:
check_connected.return_value = None
push_changes.return_value = " "
data = self.driver.get_ip_addrs_on_interface('vlan2')
expected = []
self.assertEqual(expected, data)
class test_get_ip_addrs_on_interface_ip_addr_in_description(
NexusOSTestSuite):
def runTest(self):
with mock.patch.object(self.driver, '_exec_command') \
as push_changes:
with mock.patch.object(
self.driver, '_check_if_connected') \
as check_connected:
check_connected.return_value = None
push_changes.return_value = """description ip address\n
ip address 2.2.2.2/24\n
ip address 1.1.1.1/24 secondary\n
"""
data = self.driver.get_ip_addrs_on_interface('vlan2')
expected = [u'2.2.2.2/24', u'1.1.1.1/24']
self.assertEqual(expected, data)
class test_get_routes_aggregates_no_vrf(NexusOSTestSuite):
aggregates = '''<?xml version="1.0" encoding="ISO-8859-1"?>
<nf:rpc-reply xmlns:nf="urn:ietf:params:xml:ns:netconf:base:1.0">
<nf:data>
<show>
<__XML__BLK_Cmd_urib_show_routing_command_routing>
<__XML__OPT_Cmd_urib_show_routing_command_vrf>
<__XML__OPT_Cmd_urib_show_routing_command_ip>
<ip/>
</__XML__OPT_Cmd_urib_show_routing_command_ip>
</__XML__OPT_Cmd_urib_show_routing_command_vrf>
<ip>
<route/>
</ip>
<__XML__OPT_Cmd_urib_show_routing_command_vrf>
<__XML__OPT_Cmd_urib_show_routing_command_ip>
<__XML__OPT_Cmd_urib_show_routing_command_unicast>
<__XML__OPT_Cmd_urib_show_routing_command_topology>
<__XML__OPT_Cmd_urib_show_routing_command_l3vm-info>
<__XML__OPT_Cmd_urib_show_routing_command_rpf>
<__XML__OPT_Cmd_urib_show_routing_command_ip-addr>
<__XML__OPT_Cmd_urib_show_routing_command_protocol>
<__XML__ALL_og_Cmd_urib_show_routing_command_protocol>
<protocol>
<__XML__PARAM_value>static</__XML__PARAM_value>
</protocol>
</__XML__ALL_og_Cmd_urib_show_routing_command_protocol>
<__XML__OPT_Cmd_urib_show_routing_command_summary>
<__XML__OPT_Cmd_urib_show_routing_command_vrf>
<__XML__OPT_Cmd_urib_show_routing_command___readonly__>
<__readonly__>
<TABLE_vrf>
<ROW_vrf>
<vrf-name-out>default</vrf-name-out>
<TABLE_addrf>
<ROW_addrf>
<addrf>ipv4</addrf>
<TABLE_prefix>
<ROW_prefix>
<ipprefix>0.0.0.0/0</ipprefix>
<ucast-nhops>1</ucast-nhops>
<mcast-nhops>0</mcast-nhops>
<attached>false</attached>
<TABLE_path>
<ROW_path>
<uptime>PT8H19M19S</uptime>
<pref>20</pref>
<metric>101</metric>
<clientname>bgp-65001</clientname>
<type>external</type>
<tag>65000</tag>
<ubest>true</ubest>
</ROW_path>
</TABLE_path>
</ROW_prefix>
<ROW_prefix>
<ipprefix>1.1.1.0/24</ipprefix>
<ucast-nhops>1</ucast-nhops>
<mcast-nhops>0</mcast-nhops>
<attached>false</attached>
<TABLE_path>
<ROW_path>
<uptime>P3M10DT6H30M4S</uptime>
<pref>20</pref>
<metric>0</metric>
<clientname>bgp-65001</clientname>
<type>external</type>
<tag>65002</tag>
<ubest>true</ubest>
</ROW_path>
</TABLE_path>
</ROW_prefix>
</TABLE_prefix>
</ROW_addrf>
</TABLE_addrf>
</ROW_vrf>
</TABLE_vrf>
</__readonly__>
</__XML__OPT_Cmd_urib_show_routing_command___readonly__>
</__XML__OPT_Cmd_urib_show_routing_command_vrf>
</__XML__OPT_Cmd_urib_show_routing_command_summary>
</__XML__OPT_Cmd_urib_show_routing_command_protocol>
</__XML__OPT_Cmd_urib_show_routing_command_ip-addr>
</__XML__OPT_Cmd_urib_show_routing_command_rpf>
</__XML__OPT_Cmd_urib_show_routing_command_l3vm-info>
</__XML__OPT_Cmd_urib_show_routing_command_topology>
</__XML__OPT_Cmd_urib_show_routing_command_unicast>
</__XML__OPT_Cmd_urib_show_routing_command_ip>
</__XML__OPT_Cmd_urib_show_routing_command_vrf>
</__XML__BLK_Cmd_urib_show_routing_command_routing>
</show>
</nf:data>
</nf:rpc-reply>
]]>]]>
'''
def runTest(self):
with mock.patch.object(self.driver, '_get_vrfs') as vrf_mock:
with mock.patch.object(self.driver, '_exec_command') \
as push_changes:
with mock.patch.object(
self.driver, '_check_if_connected') \
as check_connected:
check_connected.return_value = None
push_changes.return_value \
= self.aggregates
vrf_mock.return_value = ['test']
data = self.driver.get_routes_aggregate()
expected = []
# vrf_list =['lab1-10', 'lab1-20', 'lab1-30']
self.assertEqual(expected, data)
class test_get_routes_aggregates_with_vrf(NexusOSTestSuite):
vrf_aggregates = '''<nf:rpc-reply xmlns:nf="urn:ietf:">
<nf:data>
<show>
<__XML__BLK_Cmd_urib_show_routing_command_routing>
<__XML__OPT_Cmd_urib_show_routing_command_vrf>
<__XML__OPT_Cmd_urib_show_routing_command_ip>
<ip/>
</__XML__OPT_Cmd_urib_show_routing_command_ip>
</__XML__OPT_Cmd_urib_show_routing_command_vrf>
<ip>
<route/>
</ip>
<__XML__OPT_Cmd_urib_show_routing_command_vrf>
<__XML__OPT_Cmd_urib_show_routing_command_ip>
<__XML__OPT_Cmd_urib_show_routing_command_unicast>
<__XML__OPT_Cmd_urib_show_routing_command_topology>
<__XML__OPT_Cmd_urib_show_routing_command_l3vm-info>
<__XML__OPT_Cmd_urib_show_routing_command_rpf>
<__XML__OPT_Cmd_urib_show_routing_command_ip-addr>
<__XML__OPT_Cmd_urib_show_routing_command_protocol>
<__XML__OPT_Cmd_urib_show_routing_command_summary>
<__XML__OPT_Cmd_urib_show_routing_command_vrf>
<vrf>
<__XML__BLK_Cmd_urib_show_routing_command_vrf-name>
<vrf-known-name>fake-20</vrf-known-name>
</__XML__BLK_Cmd_urib_show_routing_command_vrf-name>
</vrf>
<__XML__OPT_Cmd_urib_show_routing_command___readonly__>
<__readonly__>
<TABLE_vrf>
<ROW_vrf>
<vrf-name-out>fake-20</vrf-name-out>
<TABLE_addrf>
<ROW_addrf>
<addrf>ipv4</addrf>
<TABLE_prefix>
<ROW_prefix>
<ipprefix>10.167.128.0/18</ipprefix>
<ucast-nhops>1</ucast-nhops>
<mcast-nhops>0</mcast-nhops>
<attached>FALSE</attached>
<TABLE_path>
<ROW_path>
<ifname>Null0</ifname>
<uptime>P4M29DT3H35M12S</uptime>
<pref>220</pref>
<metric>0</metric>
<clientname>bgp-64615</clientname>
<type>discard</type>
<tag>64615</tag>
<ubest>TRUE</ubest>
</ROW_path>
</TABLE_path>
</ROW_prefix>
</TABLE_prefix>
</ROW_addrf>
</TABLE_addrf>
</ROW_vrf>
</TABLE_vrf>
</__readonly__>
</__XML__OPT_Cmd_urib_show_routing_command___readonly__>
</__XML__OPT_Cmd_urib_show_routing_command_vrf>
</__XML__OPT_Cmd_urib_show_routing_command_summary>
</__XML__OPT_Cmd_urib_show_routing_command_protocol>
</__XML__OPT_Cmd_urib_show_routing_command_ip-addr>
</__XML__OPT_Cmd_urib_show_routing_command_rpf>
</__XML__OPT_Cmd_urib_show_routing_command_l3vm-info>
</__XML__OPT_Cmd_urib_show_routing_command_topology>
</__XML__OPT_Cmd_urib_show_routing_command_unicast>
</__XML__OPT_Cmd_urib_show_routing_command_ip>
</__XML__OPT_Cmd_urib_show_routing_command_vrf>
</__XML__BLK_Cmd_urib_show_routing_command_routing>
</show>
</nf:data>
</nf:rpc-reply>
]]>]]>
'''
def runTest(self):
with mock.patch.object(self.driver, '_get_vrfs') as vrf_mock:
with mock.patch.object(self.driver, '_exec_command') \
as push_changes:
with mock.patch.object(
self.driver, '_check_if_connected') \
as check_connected:
check_connected.return_value = None
push_changes.return_value \
= self.vrf_aggregates
vrf_mock.return_value = ['test']
data = self.driver.get_routes_aggregate('test')
expected = [u'10.167.128.0/18']
# vrf_list =['lab1-10', 'lab1-20', 'lab1-30']
self.assertEqual(expected, data)
class test_get_routes_aggregates_wrong_vrf(NexusOSTestSuite):
def runTest(self):
with mock.patch.object(self.driver, '_get_vrfs') \
as vrfs:
with mock.patch.object(
self.driver, '_check_if_connected') \
as check_connected:
check_connected.return_value = None
vrfs.return_value = ['lab1-10']
vrf_name = 'fake'
self.assertRaises(ebay_exceptions.
EntityDoesNotExistsException,
self.driver.get_routes_aggregate,
vrf_name)
class test_delete_subnet_success(NexusOSTestSuite):
def runTest(self):
with mock.patch.object(self.driver, 'get_vlan') as vlan_mock:
vlan_mock.return_value = {
'name': 'test-vlan',
'status': 'active'
}
with mock.patch.object(self.driver,
'get_ip_addrs_on_interface') \
as get_subnets_mock:
with mock.patch.object(
self.driver, '_check_if_connected') \
as check_connected:
with mock.patch.object(self.driver, '_exec_command') \
as push_changes:
push_changes.return_value = None
check_connected.return_value = None
get_subnets_mock.side_effect = [['1.1.1.1/24'], []]
commands = self.driver.delete_subnet_on_device(
'1.1.1.1/24', 2)
self.assertIsNotNone(commands)
class test_get_routes_aggregates_with_vrf_other_type(NexusOSTestSuite):
vrf_aggregates = '''<nf:rpc-reply xmlns:nf="urn:ietf:">
<nf:data>
<show>
<ip>
<route>
<vrf>
<__XML__PARAM__vrf-known-name'>
<__readonly__>
<TABLE_vrf>
<ROW_vrf>
<vrf-name-out>fake-20</vrf-name-out>
<TABLE_addrf>
<ROW_addrf>
<addrf>ipv4</addrf>
<TABLE_prefix>
<ROW_prefix>
<ipprefix>10.167.128.0/18</ipprefix>
<ucast-nhops>1</ucast-nhops>
<mcast-nhops>0</mcast-nhops>
<attached>FALSE</attached>
<TABLE_path>
<ROW_path>
<ifname>Null0</ifname>
<uptime>P4M29DT3H35M12S</uptime>
<pref>220</pref>
<metric>0</metric>
<clientname>bgp-64615</clientname>
<type>discard</type>
<tag>64615</tag>
<ubest>TRUE</ubest>
</ROW_path>
</TABLE_path>
</ROW_prefix>
</TABLE_prefix>
</ROW_addrf>
</TABLE_addrf>
</ROW_vrf>
</TABLE_vrf>
</__readonly__>
</__XML__PARAM__vrf-known-name'>
</vrf>
</route>
</ip>
<__XML__PARAM__vrf-known-name'>
</show>
</nf:data>
</nf:rpc-reply>
]]>]]>
'''
class test_check_hidden_routes_aggregates_with_vrf(NexusOSTestSuite):
hidden_aggregates = '''*>a10.166.0.0/16 0.0.0.0 100 32768 i\n
a10.173.160.0/20 0.0.0.0 100 32768 i\n'''
def runTest(self):
with mock.patch.object(self.driver, '_get_vrfs') \
as vrfs:
with mock.patch.object(self.driver, '_exec_command') \
as push_changes:
with mock.patch.object(
self.driver, '_check_if_connected') \
as check_connected:
check_connected.return_value = None
push_changes.return_value \
= self.hidden_aggregates
vrfs.return_value = ['lab04-native']
data = self.driver.check_hidden_routes_aggregates(
'lab04-native')
expected = ['10.173.160.0/20', '10.166.0.0/16']
self.assertEqual(sorted(expected), sorted(data))
class test_update_switch_port_on_interface_trunk_mode_allowed_vlans(
NexusOSTestSuite):
def validate_commands(self, config):
self.assertIs(5, len(config))
def mock_get_vlan(self, *args, **kwargs):
return {
'name': 'test-vlan-2',
'status': 'active'
}
def mock_get_vlans_on_interfaces(self, interfaces):
return {
'native_vlan': '5'
}
def runTest(self):
with mock.patch.object(self.driver, 'get_vlan') as get_vlan_mocks:
get_vlan_mocks.side_effect = self.mock_get_vlan
with mock.patch.object(self.driver, '_exec_command') \
as push_changes:
with mock.patch.object(
self.driver, '_check_if_connected') \
as check_connected:
check_connected.return_value = None
push_changes.exec_command.return_value = None
port = {
"switch_port_mode": "trunk",
"admin_status": "ACTIVE",
"vlans": [
{
"vlan": {
"tag": "2",
'is_native': False
}
},
{
"vlan": {
"tag": "5",
'is_native': True
}
},
{
"vlan": {
"tag": "3",
'is_native': True
}
}
]
}
with mock.patch.object(self.driver,
'get_vlans_on_interface')\
as vlan_if_mock:
with mock.patch.object(self.driver, 'compare_vlan_config') \
as compare_config:
compare_config.side_effect = [False, True]
vlan_if_mock.side_effect = \
self.mock_get_vlans_on_interfaces
commands = self.driver.update_switch_port_vlans(
'Ethernet1', port)
self.assertIn('2', commands)
| 44.115497
| 101
| 0.451069
| 5,277
| 60,350
| 4.698124
| 0.067463
| 0.054453
| 0.037028
| 0.076638
| 0.891981
| 0.881534
| 0.875282
| 0.859995
| 0.84374
| 0.826154
| 0
| 0.019951
| 0.469279
| 60,350
| 1,367
| 102
| 44.147769
| 0.754098
| 0.027705
| 0
| 0.803043
| 0
| 0.003381
| 0.43407
| 0.142557
| 0
| 0
| 0
| 0
| 0.027895
| 1
| 0.04142
| false
| 0.000845
| 0.003381
| 0.008453
| 0.086221
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f34b13f7d93d7c19e623aafc6d5f7aa82ce6933e
| 9,623
|
py
|
Python
|
test/test_action_templates_api.py
|
cvent/octopus-deploy-api-client
|
0e03e842e1beb29b132776aee077df570b88366a
|
[
"Apache-2.0"
] | null | null | null |
test/test_action_templates_api.py
|
cvent/octopus-deploy-api-client
|
0e03e842e1beb29b132776aee077df570b88366a
|
[
"Apache-2.0"
] | null | null | null |
test/test_action_templates_api.py
|
cvent/octopus-deploy-api-client
|
0e03e842e1beb29b132776aee077df570b88366a
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Octopus Server API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 2019.6.7+Branch.tags-2019.6.7.Sha.aa18dc6809953218c66f57eff7d26481d9b23d6a
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import octopus_deploy_swagger_client
from octopus_deploy_client.action_templates_api import ActionTemplatesApi # noqa: E501
from octopus_deploy_swagger_client.rest import ApiException
class TestActionTemplatesApi(unittest.TestCase):
"""ActionTemplatesApi unit test stubs"""
def setUp(self):
self.api = octopus_deploy_client.action_templates_api.ActionTemplatesApi() # noqa: E501
def tearDown(self):
pass
def test_create_response_descriptor_projects_action_template_action_template_resource(self):
"""Test case for create_response_descriptor_projects_action_template_action_template_resource
Create a ActionTemplateResource # noqa: E501
"""
pass
def test_create_response_descriptor_projects_action_template_action_template_resource_spaces(self):
"""Test case for create_response_descriptor_projects_action_template_action_template_resource_spaces
Create a ActionTemplateResource # noqa: E501
"""
pass
def test_custom_action_response_descriptor_octopus_server_web_api_actions_action_template_logo_get_responder(self):
"""Test case for custom_action_response_descriptor_octopus_server_web_api_actions_action_template_logo_get_responder
"""
pass
def test_custom_action_response_descriptor_octopus_server_web_api_actions_action_template_logo_get_responder_0(self):
"""Test case for custom_action_response_descriptor_octopus_server_web_api_actions_action_template_logo_get_responder_0
"""
pass
def test_custom_action_response_descriptor_octopus_server_web_api_actions_action_template_logo_get_responder_spaces(self):
"""Test case for custom_action_response_descriptor_octopus_server_web_api_actions_action_template_logo_get_responder_spaces
"""
pass
def test_custom_action_response_descriptor_octopus_server_web_api_actions_action_template_logo_get_responder_spaces_0(self):
"""Test case for custom_action_response_descriptor_octopus_server_web_api_actions_action_template_logo_get_responder_spaces_0
"""
pass
def test_custom_action_response_descriptor_octopus_server_web_api_actions_action_template_logo_put_responder(self):
"""Test case for custom_action_response_descriptor_octopus_server_web_api_actions_action_template_logo_put_responder
"""
pass
def test_custom_action_response_descriptor_octopus_server_web_api_actions_action_template_logo_put_responder_0(self):
"""Test case for custom_action_response_descriptor_octopus_server_web_api_actions_action_template_logo_put_responder_0
"""
pass
def test_custom_action_response_descriptor_octopus_server_web_api_actions_action_template_logo_put_responder_spaces(self):
"""Test case for custom_action_response_descriptor_octopus_server_web_api_actions_action_template_logo_put_responder_spaces
"""
pass
def test_custom_action_response_descriptor_octopus_server_web_api_actions_action_template_logo_put_responder_spaces_0(self):
"""Test case for custom_action_response_descriptor_octopus_server_web_api_actions_action_template_logo_put_responder_spaces_0
"""
pass
def test_custom_action_response_descriptor_octopus_server_web_api_actions_action_template_usage_responder(self):
"""Test case for custom_action_response_descriptor_octopus_server_web_api_actions_action_template_usage_responder
"""
pass
def test_custom_action_response_descriptor_octopus_server_web_api_actions_action_template_usage_responder_spaces(self):
"""Test case for custom_action_response_descriptor_octopus_server_web_api_actions_action_template_usage_responder_spaces
"""
pass
def test_custom_action_response_descriptor_octopus_server_web_api_actions_action_template_version_get_responder(self):
"""Test case for custom_action_response_descriptor_octopus_server_web_api_actions_action_template_version_get_responder
"""
pass
def test_custom_action_response_descriptor_octopus_server_web_api_actions_action_template_version_get_responder_spaces(self):
"""Test case for custom_action_response_descriptor_octopus_server_web_api_actions_action_template_version_get_responder_spaces
"""
pass
def test_custom_action_response_descriptor_octopus_server_web_api_actions_action_templates_search_responder(self):
"""Test case for custom_action_response_descriptor_octopus_server_web_api_actions_action_templates_search_responder
"""
pass
def test_custom_action_response_descriptor_octopus_server_web_api_actions_action_templates_search_responder_spaces(self):
"""Test case for custom_action_response_descriptor_octopus_server_web_api_actions_action_templates_search_responder_spaces
"""
pass
def test_custom_action_response_descriptor_octopus_server_web_api_actions_deployment_action_update_action_template_actions_update_responder(self):
"""Test case for custom_action_response_descriptor_octopus_server_web_api_actions_deployment_action_update_action_template_actions_update_responder
"""
pass
def test_custom_action_response_descriptor_octopus_server_web_api_actions_deployment_action_update_action_template_actions_update_responder_spaces(self):
"""Test case for custom_action_response_descriptor_octopus_server_web_api_actions_deployment_action_update_action_template_actions_update_responder_spaces
"""
pass
def test_custom_query_response_descriptor_octopus_server_web_api_actions_list_action_template_categories_responder(self):
"""Test case for custom_query_response_descriptor_octopus_server_web_api_actions_list_action_template_categories_responder
"""
pass
def test_custom_query_response_descriptor_octopus_server_web_api_actions_list_action_template_categories_responder_spaces(self):
"""Test case for custom_query_response_descriptor_octopus_server_web_api_actions_list_action_template_categories_responder_spaces
"""
pass
def test_delete_on_background_response_descriptor_projects_action_template_action_template_resource(self):
"""Test case for delete_on_background_response_descriptor_projects_action_template_action_template_resource
Delete a ActionTemplateResource by ID # noqa: E501
"""
pass
def test_delete_on_background_response_descriptor_projects_action_template_action_template_resource_spaces(self):
"""Test case for delete_on_background_response_descriptor_projects_action_template_action_template_resource_spaces
Delete a ActionTemplateResource by ID # noqa: E501
"""
pass
def test_index_response_descriptor_projects_action_template_action_template_resource(self):
"""Test case for index_response_descriptor_projects_action_template_action_template_resource
Get a list of ActionTemplateResources # noqa: E501
"""
pass
def test_index_response_descriptor_projects_action_template_action_template_resource_spaces(self):
"""Test case for index_response_descriptor_projects_action_template_action_template_resource_spaces
Get a list of ActionTemplateResources # noqa: E501
"""
pass
def test_list_all_response_descriptor_projects_action_template_action_template_resource(self):
"""Test case for list_all_response_descriptor_projects_action_template_action_template_resource
Get a list of ActionTemplateResources # noqa: E501
"""
pass
def test_list_all_response_descriptor_projects_action_template_action_template_resource_spaces(self):
"""Test case for list_all_response_descriptor_projects_action_template_action_template_resource_spaces
Get a list of ActionTemplateResources # noqa: E501
"""
pass
def test_load_response_descriptor_projects_action_template_action_template_resource(self):
"""Test case for load_response_descriptor_projects_action_template_action_template_resource
Get a ActionTemplateResource by ID # noqa: E501
"""
pass
def test_load_response_descriptor_projects_action_template_action_template_resource_spaces(self):
"""Test case for load_response_descriptor_projects_action_template_action_template_resource_spaces
Get a ActionTemplateResource by ID # noqa: E501
"""
pass
def test_modify_response_descriptor_projects_action_template_action_template_resource(self):
"""Test case for modify_response_descriptor_projects_action_template_action_template_resource
Modify a ActionTemplateResource by ID # noqa: E501
"""
pass
def test_modify_response_descriptor_projects_action_template_action_template_resource_spaces(self):
"""Test case for modify_response_descriptor_projects_action_template_action_template_resource_spaces
Modify a ActionTemplateResource by ID # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
| 42.39207
| 162
| 0.808064
| 1,171
| 9,623
| 5.982067
| 0.077711
| 0.159886
| 0.12848
| 0.159315
| 0.929051
| 0.929051
| 0.918487
| 0.907637
| 0.889793
| 0.889793
| 0
| 0.011293
| 0.153383
| 9,623
| 226
| 163
| 42.579646
| 0.848533
| 0.470124
| 0
| 0.430556
| 1
| 0
| 0.001699
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.444444
| false
| 0.430556
| 0.069444
| 0
| 0.527778
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 10
|
f373bcdfc1753164eb38b128cd8de5bd35fb3b61
| 42
|
py
|
Python
|
pymonero/bitmonerod/__init__.py
|
Monero-Monitor/pymonero
|
fc3a0c6ab507d79e9ec77378ec5817f5f0127b52
|
[
"BSD-3-Clause"
] | 10
|
2016-03-24T08:09:55.000Z
|
2019-01-15T11:03:30.000Z
|
pymonero/bitmonerod/__init__.py
|
Monero-Monitor/pymonero
|
fc3a0c6ab507d79e9ec77378ec5817f5f0127b52
|
[
"BSD-3-Clause"
] | 2
|
2016-06-19T19:10:59.000Z
|
2016-09-19T17:19:46.000Z
|
pymonero/bitmonerod/__init__.py
|
Monero-Monitor/pymonero
|
fc3a0c6ab507d79e9ec77378ec5817f5f0127b52
|
[
"BSD-3-Clause"
] | 7
|
2016-09-19T16:37:29.000Z
|
2020-02-16T07:21:32.000Z
|
from . import rpc
from . import classes
| 14
| 22
| 0.714286
| 6
| 42
| 5
| 0.666667
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.238095
| 42
| 2
| 23
| 21
| 0.9375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
f3a32eaf5fbfd33cfbbce7d986e54a0da6d81428
| 1,909
|
py
|
Python
|
plenum/test/monitoring/test_throughput_median_avg.py
|
andkononykhin/plenum
|
28dc1719f4b7e80d31dafbadb38cfec4da949886
|
[
"Apache-2.0"
] | 148
|
2017-07-11T19:05:25.000Z
|
2022-03-16T21:31:20.000Z
|
plenum/test/monitoring/test_throughput_median_avg.py
|
andkononykhin/plenum
|
28dc1719f4b7e80d31dafbadb38cfec4da949886
|
[
"Apache-2.0"
] | 561
|
2017-06-29T17:59:56.000Z
|
2022-03-09T15:47:14.000Z
|
plenum/test/monitoring/test_throughput_median_avg.py
|
andkononykhin/plenum
|
28dc1719f4b7e80d31dafbadb38cfec4da949886
|
[
"Apache-2.0"
] | 378
|
2017-06-29T17:45:27.000Z
|
2022-03-26T07:27:59.000Z
|
def test_low_median(fake_monitor, tconf):
monitor = fake_monitor
# Filling for case, when the most of backup throughputs are small and several is very big
# Case is [100, 120, 120, 120, 10000]
for i in monitor.instances.ids:
monitor.throughputs[i].throughput = 120
monitor.throughputs[0].throughput = 100
monitor.throughputs[monitor.instances.count - 1].throughput = 10000
assert monitor.instance_throughput_ratio(0) > tconf.DELTA
def test_medium_median(fake_monitor, tconf):
monitor = fake_monitor
# Filling for case, when the most of backup throughputs a similar with master,
# but there is a very small and a very big
# Case is [100, 1, 120, 120, 10000]
for i in monitor.instances.ids:
monitor.throughputs[i].throughput = 120
monitor.throughputs[0].throughput = 100
monitor.throughputs[1].throughput = 1
monitor.throughputs[monitor.instances.count - 1].throughput = 10000
assert monitor.instance_throughput_ratio(0) > tconf.DELTA
def test_high_median(fake_monitor, tconf):
monitor = fake_monitor
# Filling for case, when the most of backup throughputs a similar with master,
# but there is a some very big values
# Case is [100, 1, 120, 120, 1]
for i in monitor.instances.ids:
monitor.throughputs[i].throughput = 120
monitor.throughputs[0].throughput = 100
monitor.throughputs[1].throughput = 1
monitor.throughputs[monitor.instances.count - 1].throughput = 1
assert monitor.instance_throughput_ratio(0) > tconf.DELTA
def test_triggering_view_change(fake_monitor, tconf):
monitor = fake_monitor
# Filling for case, when the all of backup throughputs are higher, then for master
for i in monitor.instances.ids:
monitor.throughputs[i].throughput = 1001
monitor.throughputs[0].throughput = 100
assert monitor.instance_throughput_ratio(0) < tconf.DELTA
| 42.422222
| 93
| 0.726035
| 266
| 1,909
| 5.116541
| 0.206767
| 0.171932
| 0.047024
| 0.067597
| 0.89493
| 0.854519
| 0.835415
| 0.835415
| 0.800882
| 0.800882
| 0
| 0.060802
| 0.190152
| 1,909
| 44
| 94
| 43.386364
| 0.819534
| 0.261393
| 0
| 0.758621
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137931
| 1
| 0.137931
| false
| 0
| 0
| 0
| 0.137931
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f3c6a5a135f9c3ac0a7903b2e792a192026d0f69
| 245
|
py
|
Python
|
ikev2-node/sentinel/node/__init__.py
|
sentinel-official/sentinel-go
|
d0238f4ff21d0a7f2d684f645e47cff85550f0d9
|
[
"MIT"
] | 342
|
2017-08-21T20:12:56.000Z
|
2022-03-19T17:58:25.000Z
|
ikev2-node/sentinel/node/__init__.py
|
kninezinho/sentinel
|
012425258c3bc24bd41b2347624ddcadb208adf3
|
[
"MIT"
] | 57
|
2017-11-13T11:16:47.000Z
|
2022-03-01T13:54:31.000Z
|
ikev2-node/sentinel/node/__init__.py
|
kninezinho/sentinel
|
012425258c3bc24bd41b2347624ddcadb208adf3
|
[
"MIT"
] | 72
|
2017-11-23T05:13:24.000Z
|
2022-02-25T14:18:33.000Z
|
# coding=utf-8
from .controllers import create_account
from .controllers import deregister_node
from .controllers import register_node
from .controllers import send_connections_info
from .controllers import send_node_info
from .node import Node
| 30.625
| 46
| 0.857143
| 34
| 245
| 5.970588
| 0.411765
| 0.369458
| 0.517241
| 0.246305
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004566
| 0.106122
| 245
| 7
| 47
| 35
| 0.922374
| 0.04898
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
3405a722a343eba6b9b4b4c6a5ae255905cf3ef0
| 9,729
|
py
|
Python
|
tests/test_app.py
|
davidzyx/ca-wildfire-risk
|
e0affe4471cfd0656616faad5d0701c7ed56095d
|
[
"MIT"
] | 2
|
2021-06-12T23:15:19.000Z
|
2021-06-12T23:15:24.000Z
|
tests/test_app.py
|
nlpathak/California-Wildfire-Risk-Analysis
|
d0215739fa23a423eb0575db07edc2626b465469
|
[
"MIT"
] | null | null | null |
tests/test_app.py
|
nlpathak/California-Wildfire-Risk-Analysis
|
d0215739fa23a423eb0575db07edc2626b465469
|
[
"MIT"
] | 2
|
2021-05-21T16:40:40.000Z
|
2021-06-12T23:14:26.000Z
|
from dash.testing.application_runners import import_app
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.action_chains import ActionChains
import pandas as pd
import dash
import dash_core_components as dcc
import dash_html_components as html
import plotly.express as px
import numpy as np
import dash_table
from datetime import datetime, time
import calendar
import json
from os import path, environ
TIMEOUT_TIME = 30
def test_header(dash_duo):
app = import_app('src.app')
dash_duo.start_server(app)
dash_duo.driver.maximize_window()
dash_duo.wait_for_element('#header', timeout=TIMEOUT_TIME)
assert dash_duo.find_element('#header').text == 'California Wildfire Interactive Dashboard'
def test_tab_cal(dash_duo):
app = import_app('src.app')
dash_duo.start_server(app)
dash_duo.driver.maximize_window()
# navigate to cali map page
dash_duo.wait_for_page(url=None, timeout=TIMEOUT_TIME)
dash_duo.multiple_click('#cali-map-nav', 1)
dash_duo.wait_for_element('#calmap', timeout=TIMEOUT_TIME)
assert "Note that a small date range is required for full functionality with the select and hovering tools" in dash_duo.find_element('#calmap').text
def test_tab_county(dash_duo):
app = import_app('src.app')
dash_duo.start_server(app)
dash_duo.driver.maximize_window()
# navigate to county map page
dash_duo.wait_for_page(url=None, timeout=TIMEOUT_TIME)
dash_duo.multiple_click('#county-map-nav', 1)
dash_duo.wait_for_element('#app2-div', timeout=TIMEOUT_TIME)
print(dash_duo.find_element('#app2-div').text)
assert "Please choose your preferred date range between 02-28-2013 and 01-22-2021\nFilter" in dash_duo.find_element('#app2-div').text
def test_cal_date_update(dash_duo):
app = import_app('src.app')
dash_duo.start_server(app)
dash_duo.driver.maximize_window()
# navigate to cali map page
dash_duo.wait_for_page(url=None, timeout=TIMEOUT_TIME)
dash_duo.multiple_click('#cali-map-nav', 1)
dash_duo.wait_for_element('#calmap', timeout=TIMEOUT_TIME)
#start date is updated to 11/01/2020
start_date_element = dash_duo.find_element('html > body > div > div > div:nth-of-type(3) > div > div > div:nth-of-type(2) > div:nth-of-type(2) > div > div > div > div:nth-of-type(1) > input')
start_date_element.click()
start_date_element.send_keys(Keys.ARROW_RIGHT)
start_date_element.send_keys(Keys.ARROW_RIGHT)
start_date_element.send_keys(Keys.ARROW_RIGHT)
start_date_element.send_keys(Keys.ARROW_RIGHT)
start_date_element.send_keys(Keys.ARROW_RIGHT)
start_date_element.send_keys(Keys.BACKSPACE)
start_date_element.send_keys(Keys.NUMPAD0)
start_date_element.send_keys(Keys.ARROW_LEFT)
start_date_element.send_keys(Keys.ARROW_LEFT)
start_date_element.send_keys(Keys.ARROW_LEFT)
start_date_element.send_keys(Keys.ARROW_LEFT)
start_date_element.send_keys(Keys.ARROW_LEFT)
start_date_element.send_keys(Keys.ARROW_LEFT)
start_date_element.send_keys(Keys.ARROW_LEFT)
start_date_element.send_keys(Keys.ARROW_LEFT)
start_date_element.send_keys(Keys.ARROW_LEFT)
start_date_element.send_keys(Keys.BACKSPACE)
start_date_element.send_keys(Keys.NUMPAD1)
# click away from the date widget
dash_duo.multiple_click('#header', 1)
# see the graphic update
dash_duo.wait_for_element('#cali_map', timeout=TIMEOUT_TIME)
def test_cal_select(dash_duo):
app = import_app('src.app')
dash_duo.start_server(app)
# navigate to cali map page
dash_duo.wait_for_page(url=None, timeout=TIMEOUT_TIME)
dash_duo.multiple_click('#cali-map-nav', 1)
dash_duo.wait_for_element('#calmap', timeout=TIMEOUT_TIME)
dash_duo.driver.maximize_window()
action = ActionChains(dash_duo.driver)
map_element = dash_duo.driver.find_element_by_class_name('js-plotly-plot')
action.pause(2).move_to_element(map_element).perform()
elements = dash_duo.driver.find_elements_by_class_name('modebar-btn')
for element in elements:
if element.get_attribute('data-title') == 'Box Select':
# click on the box select element of the map
action.click(element).perform()
# create bounding box around some incidents
action.click_and_hold(map_element).move_by_offset(-80, 80).release().pause(5).perform()
dash_duo.wait_for_element('#cali_map_table', timeout=TIMEOUT_TIME)
def test_county_map_date_update(dash_duo):
app = import_app('src.app')
dash_duo.start_server(app)
dash_duo.driver.maximize_window()
# navigate to cali map page
dash_duo.wait_for_page(url=None, timeout=TIMEOUT_TIME)
dash_duo.multiple_click('#county-map-nav', 1)
dash_duo.wait_for_element('#county_map_div', timeout=TIMEOUT_TIME)
#start date is updated to 11/01/2020
start_date_element = dash_duo.find_element('html > body > div > div > div:nth-of-type(3) > div > div > div:nth-of-type(2) > div:nth-of-type(2) > div > div > div > div:nth-of-type(1) > input')
start_date_element.click()
start_date_element.send_keys(Keys.ARROW_RIGHT)
start_date_element.send_keys(Keys.ARROW_RIGHT)
start_date_element.send_keys(Keys.ARROW_RIGHT)
start_date_element.send_keys(Keys.ARROW_RIGHT)
start_date_element.send_keys(Keys.ARROW_RIGHT)
start_date_element.send_keys(Keys.BACKSPACE)
start_date_element.send_keys(Keys.NUMPAD0)
start_date_element.send_keys(Keys.ARROW_LEFT)
start_date_element.send_keys(Keys.ARROW_LEFT)
start_date_element.send_keys(Keys.ARROW_LEFT)
start_date_element.send_keys(Keys.ARROW_LEFT)
start_date_element.send_keys(Keys.ARROW_LEFT)
start_date_element.send_keys(Keys.ARROW_LEFT)
start_date_element.send_keys(Keys.ARROW_LEFT)
start_date_element.send_keys(Keys.ARROW_LEFT)
start_date_element.send_keys(Keys.ARROW_LEFT)
start_date_element.send_keys(Keys.BACKSPACE)
start_date_element.send_keys(Keys.NUMPAD1)
# click away from the date widget
dash_duo.multiple_click('#header', 1)
# see the graphic update
dash_duo.wait_for_element('#county_map_div', timeout=TIMEOUT_TIME)
def test_county_pie_date_update(dash_duo):
app = import_app('src.app')
dash_duo.start_server(app)
dash_duo.driver.maximize_window()
# navigate to county map page
dash_duo.wait_for_page(url=None, timeout=TIMEOUT_TIME)
dash_duo.multiple_click('#county-map-nav', 1)
dash_duo.wait_for_element('#county_pie_div', timeout=TIMEOUT_TIME)
#start date is updated to 11/01/2020
start_date_element = dash_duo.find_element('html > body > div > div > div:nth-of-type(3) > div > div > div:nth-of-type(2) > div:nth-of-type(2) > div > div > div > div:nth-of-type(1) > input')
start_date_element.click()
start_date_element.send_keys(Keys.ARROW_RIGHT)
start_date_element.send_keys(Keys.ARROW_RIGHT)
start_date_element.send_keys(Keys.ARROW_RIGHT)
start_date_element.send_keys(Keys.ARROW_RIGHT)
start_date_element.send_keys(Keys.ARROW_RIGHT)
start_date_element.send_keys(Keys.BACKSPACE)
start_date_element.send_keys(Keys.NUMPAD0)
start_date_element.send_keys(Keys.ARROW_LEFT)
start_date_element.send_keys(Keys.ARROW_LEFT)
start_date_element.send_keys(Keys.ARROW_LEFT)
start_date_element.send_keys(Keys.ARROW_LEFT)
start_date_element.send_keys(Keys.ARROW_LEFT)
start_date_element.send_keys(Keys.ARROW_LEFT)
start_date_element.send_keys(Keys.ARROW_LEFT)
start_date_element.send_keys(Keys.ARROW_LEFT)
start_date_element.send_keys(Keys.ARROW_LEFT)
start_date_element.send_keys(Keys.BACKSPACE)
start_date_element.send_keys(Keys.NUMPAD1)
# click away from the date widget
dash_duo.multiple_click('#header', 1)
# see the graphic update
dash_duo.wait_for_element('#county_pie_div', timeout=TIMEOUT_TIME)
def test_county_prediction(dash_duo):
app = import_app('src.app')
dash_duo.start_server(app)
dash_duo.driver.maximize_window()
# load page and navigate to page
dash_duo.wait_for_page(url=None, timeout=TIMEOUT_TIME)
dash_duo.multiple_click('#county-based-pred-nav', 1)
dash_duo.wait_for_element('#pred', timeout=TIMEOUT_TIME)
# find the elements to interact with
month_picker_element = dash_duo.find_element('#month_slider')
county_picker_element = dash_duo.find_element('#county_dropdown')
# click month
dash_duo.click_at_coord_fractions(month_picker_element, fx=0.3, fy=0.1)
# click multiple counties
action = ActionChains(dash_duo.driver)
action.move_to_element(county_picker_element).click().move_by_offset(0, 50).click().pause(1).perform()
action.move_to_element(county_picker_element).click().move_by_offset(0, 50).click().pause(1).perform()
dash_duo.multiple_click('#header', 1)
# see the graphic update
dash_duo.wait_for_element('#pred_table', timeout=TIMEOUT_TIME)
def test_location_prediction(dash_duo):
app = import_app('src.app')
dash_duo.start_server(app)
dash_duo.driver.maximize_window()
# load page and navigate to page
dash_duo.wait_for_page(url=None, timeout=TIMEOUT_TIME)
dash_duo.multiple_click('#geo-based-pred-nav', 1)
dash_duo.wait_for_element('#map-id', timeout=TIMEOUT_TIME)
# find the elements to interact with
month_picker_element = dash_duo.find_element('#month_slider2')
# click month
dash_duo.click_at_coord_fractions(month_picker_element, fx=0.3, fy=0.1)
# click multiple counties
dash_duo.multiple_click('#map-id', 1)
dash_duo.multiple_click('#header', 1)
# see the graphic update
dash_duo.wait_for_element('#th', timeout=TIMEOUT_TIME)
| 38.760956
| 195
| 0.754445
| 1,512
| 9,729
| 4.518519
| 0.116402
| 0.081967
| 0.140515
| 0.15808
| 0.82377
| 0.80079
| 0.785422
| 0.768004
| 0.768004
| 0.75805
| 0
| 0.011977
| 0.141844
| 9,729
| 251
| 196
| 38.760956
| 0.806324
| 0.079248
| 0
| 0.708333
| 0
| 0.017857
| 0.128989
| 0.002463
| 0
| 0
| 0
| 0
| 0.017857
| 1
| 0.053571
| false
| 0
| 0.136905
| 0
| 0.190476
| 0.005952
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
34259140541b99caec7d326d04e00f9361254840
| 33,409
|
py
|
Python
|
sdk/python/pulumi_google_native/cloudscheduler/v1beta1/_inputs.py
|
AaronFriel/pulumi-google-native
|
75d1cda425e33d4610348972cd70bddf35f1770d
|
[
"Apache-2.0"
] | 44
|
2021-04-18T23:00:48.000Z
|
2022-02-14T17:43:15.000Z
|
sdk/python/pulumi_google_native/cloudscheduler/v1beta1/_inputs.py
|
AaronFriel/pulumi-google-native
|
75d1cda425e33d4610348972cd70bddf35f1770d
|
[
"Apache-2.0"
] | 354
|
2021-04-16T16:48:39.000Z
|
2022-03-31T17:16:39.000Z
|
sdk/python/pulumi_google_native/cloudscheduler/v1beta1/_inputs.py
|
AaronFriel/pulumi-google-native
|
75d1cda425e33d4610348972cd70bddf35f1770d
|
[
"Apache-2.0"
] | 8
|
2021-04-24T17:46:51.000Z
|
2022-01-05T10:40:21.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from ._enums import *
__all__ = [
'AppEngineHttpTargetArgs',
'AppEngineRoutingArgs',
'HttpTargetArgs',
'OAuthTokenArgs',
'OidcTokenArgs',
'PubsubTargetArgs',
'RetryConfigArgs',
]
@pulumi.input_type
class AppEngineHttpTargetArgs:
def __init__(__self__, *,
app_engine_routing: Optional[pulumi.Input['AppEngineRoutingArgs']] = None,
body: Optional[pulumi.Input[str]] = None,
headers: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
http_method: Optional[pulumi.Input['AppEngineHttpTargetHttpMethod']] = None,
relative_uri: Optional[pulumi.Input[str]] = None):
"""
App Engine target. The job will be pushed to a job handler by means of an HTTP request via an http_method such as HTTP POST, HTTP GET, etc. The job is acknowledged by means of an HTTP response code in the range [200 - 299]. Error 503 is considered an App Engine system error instead of an application error. Requests returning error 503 will be retried regardless of retry configuration and not counted against retry counts. Any other response code, or a failure to receive a response before the deadline, constitutes a failed attempt.
:param pulumi.Input['AppEngineRoutingArgs'] app_engine_routing: App Engine Routing setting for the job.
:param pulumi.Input[str] body: Body. HTTP request body. A request body is allowed only if the HTTP method is POST or PUT. It will result in invalid argument error to set a body on a job with an incompatible HttpMethod.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] headers: HTTP request headers. This map contains the header field names and values. Headers can be set when the job is created. Cloud Scheduler sets some headers to default values: * `User-Agent`: By default, this header is `"AppEngine-Google; (+http://code.google.com/appengine)"`. This header can be modified, but Cloud Scheduler will append `"AppEngine-Google; (+http://code.google.com/appengine)"` to the modified `User-Agent`. * `X-CloudScheduler`: This header will be set to true. If the job has an body, Cloud Scheduler sets the following headers: * `Content-Type`: By default, the `Content-Type` header is set to `"application/octet-stream"`. The default can be overridden by explictly setting `Content-Type` to a particular media type when the job is created. For example, `Content-Type` can be set to `"application/json"`. * `Content-Length`: This is computed by Cloud Scheduler. This value is output only. It cannot be changed. The headers below are output only. They cannot be set or overridden: * `X-Google-*`: For Google internal use only. * `X-AppEngine-*`: For Google internal use only. In addition, some App Engine headers, which contain job-specific information, are also be sent to the job handler.
:param pulumi.Input['AppEngineHttpTargetHttpMethod'] http_method: The HTTP method to use for the request. PATCH and OPTIONS are not permitted.
:param pulumi.Input[str] relative_uri: The relative URI. The relative URL must begin with "/" and must be a valid HTTP relative URL. It can contain a path, query string arguments, and `#` fragments. If the relative URL is empty, then the root path "/" will be used. No spaces are allowed, and the maximum length allowed is 2083 characters.
"""
if app_engine_routing is not None:
pulumi.set(__self__, "app_engine_routing", app_engine_routing)
if body is not None:
pulumi.set(__self__, "body", body)
if headers is not None:
pulumi.set(__self__, "headers", headers)
if http_method is not None:
pulumi.set(__self__, "http_method", http_method)
if relative_uri is not None:
pulumi.set(__self__, "relative_uri", relative_uri)
@property
@pulumi.getter(name="appEngineRouting")
def app_engine_routing(self) -> Optional[pulumi.Input['AppEngineRoutingArgs']]:
"""
App Engine Routing setting for the job.
"""
return pulumi.get(self, "app_engine_routing")
@app_engine_routing.setter
def app_engine_routing(self, value: Optional[pulumi.Input['AppEngineRoutingArgs']]):
pulumi.set(self, "app_engine_routing", value)
@property
@pulumi.getter
def body(self) -> Optional[pulumi.Input[str]]:
"""
Body. HTTP request body. A request body is allowed only if the HTTP method is POST or PUT. It will result in invalid argument error to set a body on a job with an incompatible HttpMethod.
"""
return pulumi.get(self, "body")
@body.setter
def body(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "body", value)
@property
@pulumi.getter
def headers(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
HTTP request headers. This map contains the header field names and values. Headers can be set when the job is created. Cloud Scheduler sets some headers to default values: * `User-Agent`: By default, this header is `"AppEngine-Google; (+http://code.google.com/appengine)"`. This header can be modified, but Cloud Scheduler will append `"AppEngine-Google; (+http://code.google.com/appengine)"` to the modified `User-Agent`. * `X-CloudScheduler`: This header will be set to true. If the job has an body, Cloud Scheduler sets the following headers: * `Content-Type`: By default, the `Content-Type` header is set to `"application/octet-stream"`. The default can be overridden by explictly setting `Content-Type` to a particular media type when the job is created. For example, `Content-Type` can be set to `"application/json"`. * `Content-Length`: This is computed by Cloud Scheduler. This value is output only. It cannot be changed. The headers below are output only. They cannot be set or overridden: * `X-Google-*`: For Google internal use only. * `X-AppEngine-*`: For Google internal use only. In addition, some App Engine headers, which contain job-specific information, are also be sent to the job handler.
"""
return pulumi.get(self, "headers")
@headers.setter
def headers(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "headers", value)
@property
@pulumi.getter(name="httpMethod")
def http_method(self) -> Optional[pulumi.Input['AppEngineHttpTargetHttpMethod']]:
"""
The HTTP method to use for the request. PATCH and OPTIONS are not permitted.
"""
return pulumi.get(self, "http_method")
@http_method.setter
def http_method(self, value: Optional[pulumi.Input['AppEngineHttpTargetHttpMethod']]):
pulumi.set(self, "http_method", value)
@property
@pulumi.getter(name="relativeUri")
def relative_uri(self) -> Optional[pulumi.Input[str]]:
"""
The relative URI. The relative URL must begin with "/" and must be a valid HTTP relative URL. It can contain a path, query string arguments, and `#` fragments. If the relative URL is empty, then the root path "/" will be used. No spaces are allowed, and the maximum length allowed is 2083 characters.
"""
return pulumi.get(self, "relative_uri")
@relative_uri.setter
def relative_uri(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "relative_uri", value)
@pulumi.input_type
class AppEngineRoutingArgs:
def __init__(__self__, *,
instance: Optional[pulumi.Input[str]] = None,
service: Optional[pulumi.Input[str]] = None,
version: Optional[pulumi.Input[str]] = None):
"""
App Engine Routing. For more information about services, versions, and instances see [An Overview of App Engine](https://cloud.google.com/appengine/docs/python/an-overview-of-app-engine), [Microservices Architecture on Google App Engine](https://cloud.google.com/appengine/docs/python/microservices-on-app-engine), [App Engine Standard request routing](https://cloud.google.com/appengine/docs/standard/python/how-requests-are-routed), and [App Engine Flex request routing](https://cloud.google.com/appengine/docs/flexible/python/how-requests-are-routed).
:param pulumi.Input[str] instance: App instance. By default, the job is sent to an instance which is available when the job is attempted. Requests can only be sent to a specific instance if [manual scaling is used in App Engine Standard](https://cloud.google.com/appengine/docs/python/an-overview-of-app-engine?hl=en_US#scaling_types_and_instance_classes). App Engine Flex does not support instances. For more information, see [App Engine Standard request routing](https://cloud.google.com/appengine/docs/standard/python/how-requests-are-routed) and [App Engine Flex request routing](https://cloud.google.com/appengine/docs/flexible/python/how-requests-are-routed).
:param pulumi.Input[str] service: App service. By default, the job is sent to the service which is the default service when the job is attempted.
:param pulumi.Input[str] version: App version. By default, the job is sent to the version which is the default version when the job is attempted.
"""
if instance is not None:
pulumi.set(__self__, "instance", instance)
if service is not None:
pulumi.set(__self__, "service", service)
if version is not None:
pulumi.set(__self__, "version", version)
@property
@pulumi.getter
def instance(self) -> Optional[pulumi.Input[str]]:
"""
App instance. By default, the job is sent to an instance which is available when the job is attempted. Requests can only be sent to a specific instance if [manual scaling is used in App Engine Standard](https://cloud.google.com/appengine/docs/python/an-overview-of-app-engine?hl=en_US#scaling_types_and_instance_classes). App Engine Flex does not support instances. For more information, see [App Engine Standard request routing](https://cloud.google.com/appengine/docs/standard/python/how-requests-are-routed) and [App Engine Flex request routing](https://cloud.google.com/appengine/docs/flexible/python/how-requests-are-routed).
"""
return pulumi.get(self, "instance")
@instance.setter
def instance(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "instance", value)
@property
@pulumi.getter
def service(self) -> Optional[pulumi.Input[str]]:
"""
App service. By default, the job is sent to the service which is the default service when the job is attempted.
"""
return pulumi.get(self, "service")
@service.setter
def service(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "service", value)
@property
@pulumi.getter
def version(self) -> Optional[pulumi.Input[str]]:
"""
App version. By default, the job is sent to the version which is the default version when the job is attempted.
"""
return pulumi.get(self, "version")
@version.setter
def version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "version", value)
@pulumi.input_type
class HttpTargetArgs:
def __init__(__self__, *,
uri: pulumi.Input[str],
body: Optional[pulumi.Input[str]] = None,
headers: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
http_method: Optional[pulumi.Input['HttpTargetHttpMethod']] = None,
oauth_token: Optional[pulumi.Input['OAuthTokenArgs']] = None,
oidc_token: Optional[pulumi.Input['OidcTokenArgs']] = None):
"""
Http target. The job will be pushed to the job handler by means of an HTTP request via an http_method such as HTTP POST, HTTP GET, etc. The job is acknowledged by means of an HTTP response code in the range [200 - 299]. A failure to receive a response constitutes a failed execution. For a redirected request, the response returned by the redirected request is considered.
:param pulumi.Input[str] uri: The full URI path that the request will be sent to. This string must begin with either "http://" or "https://". Some examples of valid values for uri are: `http://acme.com` and `https://acme.com/sales:8080`. Cloud Scheduler will encode some characters for safety and compatibility. The maximum allowed URL length is 2083 characters after encoding.
:param pulumi.Input[str] body: HTTP request body. A request body is allowed only if the HTTP method is POST, PUT, or PATCH. It is an error to set body on a job with an incompatible HttpMethod.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] headers: The user can specify HTTP request headers to send with the job's HTTP request. This map contains the header field names and values. Repeated headers are not supported, but a header value can contain commas. These headers represent a subset of the headers that will accompany the job's HTTP request. Some HTTP request headers will be ignored or replaced. A partial list of headers that will be ignored or replaced is below: - Host: This will be computed by Cloud Scheduler and derived from uri. * `Content-Length`: This will be computed by Cloud Scheduler. * `User-Agent`: This will be set to `"Google-Cloud-Scheduler"`. * `X-Google-*`: Google internal use only. * `X-AppEngine-*`: Google internal use only. The total size of headers must be less than 80KB.
:param pulumi.Input['HttpTargetHttpMethod'] http_method: Which HTTP method to use for the request.
:param pulumi.Input['OAuthTokenArgs'] oauth_token: If specified, an [OAuth token](https://developers.google.com/identity/protocols/OAuth2) will be generated and attached as an `Authorization` header in the HTTP request. This type of authorization should generally only be used when calling Google APIs hosted on *.googleapis.com.
:param pulumi.Input['OidcTokenArgs'] oidc_token: If specified, an [OIDC](https://developers.google.com/identity/protocols/OpenIDConnect) token will be generated and attached as an `Authorization` header in the HTTP request. This type of authorization can be used for many scenarios, including calling Cloud Run, or endpoints where you intend to validate the token yourself.
"""
pulumi.set(__self__, "uri", uri)
if body is not None:
pulumi.set(__self__, "body", body)
if headers is not None:
pulumi.set(__self__, "headers", headers)
if http_method is not None:
pulumi.set(__self__, "http_method", http_method)
if oauth_token is not None:
pulumi.set(__self__, "oauth_token", oauth_token)
if oidc_token is not None:
pulumi.set(__self__, "oidc_token", oidc_token)
@property
@pulumi.getter
def uri(self) -> pulumi.Input[str]:
"""
The full URI path that the request will be sent to. This string must begin with either "http://" or "https://". Some examples of valid values for uri are: `http://acme.com` and `https://acme.com/sales:8080`. Cloud Scheduler will encode some characters for safety and compatibility. The maximum allowed URL length is 2083 characters after encoding.
"""
return pulumi.get(self, "uri")
@uri.setter
def uri(self, value: pulumi.Input[str]):
pulumi.set(self, "uri", value)
@property
@pulumi.getter
def body(self) -> Optional[pulumi.Input[str]]:
"""
HTTP request body. A request body is allowed only if the HTTP method is POST, PUT, or PATCH. It is an error to set body on a job with an incompatible HttpMethod.
"""
return pulumi.get(self, "body")
@body.setter
def body(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "body", value)
@property
@pulumi.getter
def headers(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
The user can specify HTTP request headers to send with the job's HTTP request. This map contains the header field names and values. Repeated headers are not supported, but a header value can contain commas. These headers represent a subset of the headers that will accompany the job's HTTP request. Some HTTP request headers will be ignored or replaced. A partial list of headers that will be ignored or replaced is below: - Host: This will be computed by Cloud Scheduler and derived from uri. * `Content-Length`: This will be computed by Cloud Scheduler. * `User-Agent`: This will be set to `"Google-Cloud-Scheduler"`. * `X-Google-*`: Google internal use only. * `X-AppEngine-*`: Google internal use only. The total size of headers must be less than 80KB.
"""
return pulumi.get(self, "headers")
@headers.setter
def headers(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "headers", value)
@property
@pulumi.getter(name="httpMethod")
def http_method(self) -> Optional[pulumi.Input['HttpTargetHttpMethod']]:
"""
Which HTTP method to use for the request.
"""
return pulumi.get(self, "http_method")
@http_method.setter
def http_method(self, value: Optional[pulumi.Input['HttpTargetHttpMethod']]):
pulumi.set(self, "http_method", value)
@property
@pulumi.getter(name="oauthToken")
def oauth_token(self) -> Optional[pulumi.Input['OAuthTokenArgs']]:
"""
If specified, an [OAuth token](https://developers.google.com/identity/protocols/OAuth2) will be generated and attached as an `Authorization` header in the HTTP request. This type of authorization should generally only be used when calling Google APIs hosted on *.googleapis.com.
"""
return pulumi.get(self, "oauth_token")
@oauth_token.setter
def oauth_token(self, value: Optional[pulumi.Input['OAuthTokenArgs']]):
pulumi.set(self, "oauth_token", value)
@property
@pulumi.getter(name="oidcToken")
def oidc_token(self) -> Optional[pulumi.Input['OidcTokenArgs']]:
"""
If specified, an [OIDC](https://developers.google.com/identity/protocols/OpenIDConnect) token will be generated and attached as an `Authorization` header in the HTTP request. This type of authorization can be used for many scenarios, including calling Cloud Run, or endpoints where you intend to validate the token yourself.
"""
return pulumi.get(self, "oidc_token")
@oidc_token.setter
def oidc_token(self, value: Optional[pulumi.Input['OidcTokenArgs']]):
pulumi.set(self, "oidc_token", value)
@pulumi.input_type
class OAuthTokenArgs:
def __init__(__self__, *,
scope: Optional[pulumi.Input[str]] = None,
service_account_email: Optional[pulumi.Input[str]] = None):
"""
Contains information needed for generating an [OAuth token](https://developers.google.com/identity/protocols/OAuth2). This type of authorization should generally only be used when calling Google APIs hosted on *.googleapis.com.
:param pulumi.Input[str] scope: OAuth scope to be used for generating OAuth access token. If not specified, "https://www.googleapis.com/auth/cloud-platform" will be used.
:param pulumi.Input[str] service_account_email: [Service account email](https://cloud.google.com/iam/docs/service-accounts) to be used for generating OAuth token. The service account must be within the same project as the job. The caller must have iam.serviceAccounts.actAs permission for the service account.
"""
if scope is not None:
pulumi.set(__self__, "scope", scope)
if service_account_email is not None:
pulumi.set(__self__, "service_account_email", service_account_email)
@property
@pulumi.getter
def scope(self) -> Optional[pulumi.Input[str]]:
"""
OAuth scope to be used for generating OAuth access token. If not specified, "https://www.googleapis.com/auth/cloud-platform" will be used.
"""
return pulumi.get(self, "scope")
@scope.setter
def scope(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "scope", value)
@property
@pulumi.getter(name="serviceAccountEmail")
def service_account_email(self) -> Optional[pulumi.Input[str]]:
"""
[Service account email](https://cloud.google.com/iam/docs/service-accounts) to be used for generating OAuth token. The service account must be within the same project as the job. The caller must have iam.serviceAccounts.actAs permission for the service account.
"""
return pulumi.get(self, "service_account_email")
@service_account_email.setter
def service_account_email(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "service_account_email", value)
@pulumi.input_type
class OidcTokenArgs:
def __init__(__self__, *,
audience: Optional[pulumi.Input[str]] = None,
service_account_email: Optional[pulumi.Input[str]] = None):
"""
Contains information needed for generating an [OpenID Connect token](https://developers.google.com/identity/protocols/OpenIDConnect). This type of authorization can be used for many scenarios, including calling Cloud Run, or endpoints where you intend to validate the token yourself.
:param pulumi.Input[str] audience: Audience to be used when generating OIDC token. If not specified, the URI specified in target will be used.
:param pulumi.Input[str] service_account_email: [Service account email](https://cloud.google.com/iam/docs/service-accounts) to be used for generating OIDC token. The service account must be within the same project as the job. The caller must have iam.serviceAccounts.actAs permission for the service account.
"""
if audience is not None:
pulumi.set(__self__, "audience", audience)
if service_account_email is not None:
pulumi.set(__self__, "service_account_email", service_account_email)
@property
@pulumi.getter
def audience(self) -> Optional[pulumi.Input[str]]:
"""
Audience to be used when generating OIDC token. If not specified, the URI specified in target will be used.
"""
return pulumi.get(self, "audience")
@audience.setter
def audience(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "audience", value)
@property
@pulumi.getter(name="serviceAccountEmail")
def service_account_email(self) -> Optional[pulumi.Input[str]]:
"""
[Service account email](https://cloud.google.com/iam/docs/service-accounts) to be used for generating OIDC token. The service account must be within the same project as the job. The caller must have iam.serviceAccounts.actAs permission for the service account.
"""
return pulumi.get(self, "service_account_email")
@service_account_email.setter
def service_account_email(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "service_account_email", value)
@pulumi.input_type
class PubsubTargetArgs:
def __init__(__self__, *,
topic_name: pulumi.Input[str],
attributes: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
data: Optional[pulumi.Input[str]] = None):
"""
Pub/Sub target. The job will be delivered by publishing a message to the given Pub/Sub topic.
:param pulumi.Input[str] topic_name: The name of the Cloud Pub/Sub topic to which messages will be published when a job is delivered. The topic name must be in the same format as required by PubSub's [PublishRequest.name](https://cloud.google.com/pubsub/docs/reference/rpc/google.pubsub.v1#publishrequest), for example `projects/PROJECT_ID/topics/TOPIC_ID`. The topic must be in the same project as the Cloud Scheduler job.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] attributes: Attributes for PubsubMessage. Pubsub message must contain either non-empty data, or at least one attribute.
:param pulumi.Input[str] data: The message payload for PubsubMessage. Pubsub message must contain either non-empty data, or at least one attribute.
"""
pulumi.set(__self__, "topic_name", topic_name)
if attributes is not None:
pulumi.set(__self__, "attributes", attributes)
if data is not None:
pulumi.set(__self__, "data", data)
@property
@pulumi.getter(name="topicName")
def topic_name(self) -> pulumi.Input[str]:
"""
The name of the Cloud Pub/Sub topic to which messages will be published when a job is delivered. The topic name must be in the same format as required by PubSub's [PublishRequest.name](https://cloud.google.com/pubsub/docs/reference/rpc/google.pubsub.v1#publishrequest), for example `projects/PROJECT_ID/topics/TOPIC_ID`. The topic must be in the same project as the Cloud Scheduler job.
"""
return pulumi.get(self, "topic_name")
@topic_name.setter
def topic_name(self, value: pulumi.Input[str]):
pulumi.set(self, "topic_name", value)
@property
@pulumi.getter
def attributes(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Attributes for PubsubMessage. Pubsub message must contain either non-empty data, or at least one attribute.
"""
return pulumi.get(self, "attributes")
@attributes.setter
def attributes(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "attributes", value)
@property
@pulumi.getter
def data(self) -> Optional[pulumi.Input[str]]:
"""
The message payload for PubsubMessage. Pubsub message must contain either non-empty data, or at least one attribute.
"""
return pulumi.get(self, "data")
@data.setter
def data(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "data", value)
@pulumi.input_type
class RetryConfigArgs:
def __init__(__self__, *,
max_backoff_duration: Optional[pulumi.Input[str]] = None,
max_doublings: Optional[pulumi.Input[int]] = None,
max_retry_duration: Optional[pulumi.Input[str]] = None,
min_backoff_duration: Optional[pulumi.Input[str]] = None,
retry_count: Optional[pulumi.Input[int]] = None):
"""
Settings that determine the retry behavior. By default, if a job does not complete successfully (meaning that an acknowledgement is not received from the handler, then it will be retried with exponential backoff according to the settings in RetryConfig.
:param pulumi.Input[str] max_backoff_duration: The maximum amount of time to wait before retrying a job after it fails. The default value of this field is 1 hour.
:param pulumi.Input[int] max_doublings: The time between retries will double `max_doublings` times. A job's retry interval starts at min_backoff_duration, then doubles `max_doublings` times, then increases linearly, and finally retries at intervals of max_backoff_duration up to retry_count times. For example, if min_backoff_duration is 10s, max_backoff_duration is 300s, and `max_doublings` is 3, then the a job will first be retried in 10s. The retry interval will double three times, and then increase linearly by 2^3 * 10s. Finally, the job will retry at intervals of max_backoff_duration until the job has been attempted retry_count times. Thus, the requests will retry at 10s, 20s, 40s, 80s, 160s, 240s, 300s, 300s, .... The default value of this field is 5.
:param pulumi.Input[str] max_retry_duration: The time limit for retrying a failed job, measured from time when an execution was first attempted. If specified with retry_count, the job will be retried until both limits are reached. The default value for max_retry_duration is zero, which means retry duration is unlimited.
:param pulumi.Input[str] min_backoff_duration: The minimum amount of time to wait before retrying a job after it fails. The default value of this field is 5 seconds.
:param pulumi.Input[int] retry_count: The number of attempts that the system will make to run a job using the exponential backoff procedure described by max_doublings. The default value of retry_count is zero. If retry_count is zero, a job attempt will *not* be retried if it fails. Instead the Cloud Scheduler system will wait for the next scheduled execution time. If retry_count is set to a non-zero number then Cloud Scheduler will retry failed attempts, using exponential backoff, retry_count times, or until the next scheduled execution time, whichever comes first. Values greater than 5 and negative values are not allowed.
"""
if max_backoff_duration is not None:
pulumi.set(__self__, "max_backoff_duration", max_backoff_duration)
if max_doublings is not None:
pulumi.set(__self__, "max_doublings", max_doublings)
if max_retry_duration is not None:
pulumi.set(__self__, "max_retry_duration", max_retry_duration)
if min_backoff_duration is not None:
pulumi.set(__self__, "min_backoff_duration", min_backoff_duration)
if retry_count is not None:
pulumi.set(__self__, "retry_count", retry_count)
@property
@pulumi.getter(name="maxBackoffDuration")
def max_backoff_duration(self) -> Optional[pulumi.Input[str]]:
"""
The maximum amount of time to wait before retrying a job after it fails. The default value of this field is 1 hour.
"""
return pulumi.get(self, "max_backoff_duration")
@max_backoff_duration.setter
def max_backoff_duration(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "max_backoff_duration", value)
@property
@pulumi.getter(name="maxDoublings")
def max_doublings(self) -> Optional[pulumi.Input[int]]:
"""
The time between retries will double `max_doublings` times. A job's retry interval starts at min_backoff_duration, then doubles `max_doublings` times, then increases linearly, and finally retries at intervals of max_backoff_duration up to retry_count times. For example, if min_backoff_duration is 10s, max_backoff_duration is 300s, and `max_doublings` is 3, then the a job will first be retried in 10s. The retry interval will double three times, and then increase linearly by 2^3 * 10s. Finally, the job will retry at intervals of max_backoff_duration until the job has been attempted retry_count times. Thus, the requests will retry at 10s, 20s, 40s, 80s, 160s, 240s, 300s, 300s, .... The default value of this field is 5.
"""
return pulumi.get(self, "max_doublings")
@max_doublings.setter
def max_doublings(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max_doublings", value)
@property
@pulumi.getter(name="maxRetryDuration")
def max_retry_duration(self) -> Optional[pulumi.Input[str]]:
"""
The time limit for retrying a failed job, measured from time when an execution was first attempted. If specified with retry_count, the job will be retried until both limits are reached. The default value for max_retry_duration is zero, which means retry duration is unlimited.
"""
return pulumi.get(self, "max_retry_duration")
@max_retry_duration.setter
def max_retry_duration(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "max_retry_duration", value)
@property
@pulumi.getter(name="minBackoffDuration")
def min_backoff_duration(self) -> Optional[pulumi.Input[str]]:
"""
The minimum amount of time to wait before retrying a job after it fails. The default value of this field is 5 seconds.
"""
return pulumi.get(self, "min_backoff_duration")
@min_backoff_duration.setter
def min_backoff_duration(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "min_backoff_duration", value)
@property
@pulumi.getter(name="retryCount")
def retry_count(self) -> Optional[pulumi.Input[int]]:
"""
The number of attempts that the system will make to run a job using the exponential backoff procedure described by max_doublings. The default value of retry_count is zero. If retry_count is zero, a job attempt will *not* be retried if it fails. Instead the Cloud Scheduler system will wait for the next scheduled execution time. If retry_count is set to a non-zero number then Cloud Scheduler will retry failed attempts, using exponential backoff, retry_count times, or until the next scheduled execution time, whichever comes first. Values greater than 5 and negative values are not allowed.
"""
return pulumi.get(self, "retry_count")
@retry_count.setter
def retry_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "retry_count", value)
| 67.904472
| 1,278
| 0.708881
| 4,686
| 33,409
| 4.961374
| 0.093683
| 0.058196
| 0.045765
| 0.039744
| 0.852467
| 0.801067
| 0.770399
| 0.732505
| 0.71616
| 0.692933
| 0
| 0.004628
| 0.19809
| 33,409
| 491
| 1,279
| 68.04277
| 0.863163
| 0.575773
| 0
| 0.387755
| 1
| 0
| 0.108552
| 0.017877
| 0
| 0
| 0
| 0
| 0
| 1
| 0.20068
| false
| 0
| 0.020408
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
ca9ea30d247f62cd5038b66f0c5e1d3632c6246e
| 76
|
py
|
Python
|
datasets/__init__.py
|
Lmy0217/PyTorch-GAN
|
fa6317f8ecffddeff88caccfbe9d581b2b0c342c
|
[
"MIT"
] | null | null | null |
datasets/__init__.py
|
Lmy0217/PyTorch-GAN
|
fa6317f8ecffddeff88caccfbe9d581b2b0c342c
|
[
"MIT"
] | null | null | null |
datasets/__init__.py
|
Lmy0217/PyTorch-GAN
|
fa6317f8ecffddeff88caccfbe9d581b2b0c342c
|
[
"MIT"
] | null | null | null |
from .MI import *
def forName(dataset_name):
return eval(dataset_name)
| 15.2
| 29
| 0.736842
| 11
| 76
| 4.909091
| 0.818182
| 0.407407
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.171053
| 76
| 5
| 29
| 15.2
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
cae490f322f49127364451169c7ff079994e66dd
| 97
|
py
|
Python
|
profit/utils/training_utils/__init__.py
|
ayushkarnawat/profit
|
f3c4d601078b52513af6832c3faf75ddafc59ac5
|
[
"MIT"
] | null | null | null |
profit/utils/training_utils/__init__.py
|
ayushkarnawat/profit
|
f3c4d601078b52513af6832c3faf75ddafc59ac5
|
[
"MIT"
] | 1
|
2021-09-15T13:13:12.000Z
|
2021-09-15T13:13:12.000Z
|
profit/utils/training_utils/__init__.py
|
ayushkarnawat/profit
|
f3c4d601078b52513af6832c3faf75ddafc59ac5
|
[
"MIT"
] | null | null | null |
from profit.utils.training_utils import tensorflow
from profit.utils.training_utils import torch
| 32.333333
| 50
| 0.876289
| 14
| 97
| 5.928571
| 0.5
| 0.240964
| 0.361446
| 0.554217
| 0.819277
| 0.819277
| 0
| 0
| 0
| 0
| 0
| 0
| 0.082474
| 97
| 2
| 51
| 48.5
| 0.932584
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 10
|
1b05357d379f2a04273ff96594a44ff475853501
| 362,591
|
py
|
Python
|
parser/team07/Proyecto/comprobadorTipos/nodoPosicion.py
|
webdev188/tytus
|
847071edb17b218f51bb969d335a8ec093d13f94
|
[
"MIT"
] | 35
|
2020-12-07T03:11:43.000Z
|
2021-04-15T17:38:16.000Z
|
parser/team07/Proyecto/comprobadorTipos/nodoPosicion.py
|
webdev188/tytus
|
847071edb17b218f51bb969d335a8ec093d13f94
|
[
"MIT"
] | 47
|
2020-12-09T01:29:09.000Z
|
2021-01-13T05:37:50.000Z
|
parser/team07/Proyecto/comprobadorTipos/nodoPosicion.py
|
webdev188/tytus
|
847071edb17b218f51bb969d335a8ec093d13f94
|
[
"MIT"
] | 556
|
2020-12-07T03:13:31.000Z
|
2021-06-17T17:41:10.000Z
|
from tabla_Simbolos import simbolo
from tabla_Simbolos import tipoSimbolo
from tabla_Simbolos import simboloColumna
class NodoPosicion():
def __init__(self,fila,columna):
self.fila = fila
self.columna = columna
self.tipoDatoResultado = None
self.tipoOperacion = None
def operar(self,tipoOperacion):
if (self.fila == 0 and self.columna == 0):
if(tipoOperacion==tipoSimbolo.TipoSimbolo.AND or tipoOperacion==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores smallInt")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.smallInt)
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.DIVISION:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.decimal)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.smallInt)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.smallInt)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.smallInt)
return simb
else:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.smallInt)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.smallInt)
return simb
elif (self.fila == 0 and self.columna == 1):
if(tipoOperacion==tipoSimbolo.TipoSimbolo.AND or tipoOperacion==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores smallInt - Integer")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.integer)
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.DIVISION:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.decimal)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.integer)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo Integer no puede ser contenido dentro de una columna tipo smallInt")
return simb
else:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.integer)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.integer)
return simb
elif (self.fila == 0 and self.columna == 2):
if(tipoOperacion==tipoSimbolo.TipoSimbolo.AND or tipoOperacion==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores smallInt - bigInt")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.bigInit)
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.DIVISION:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.decimal)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.bigInit)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo bigInt no puede ser contenido dentro de una columna tipo smallInt")
return simb
else:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.bigInit)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.bigInit)
return simb
elif (self.fila == 0 and self.columna == 3):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores smallInt - decimal")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.decimal)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo decimal no puede ser contenido dentro de una columna tipo smallInt")
return simb
else:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.decimal)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.decimal)
return simb
elif (self.fila == 0 and self.columna == 4):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores smallInt - numeric")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.numeric)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo numeric no puede ser contenido dentro de una columna tipo smallInt")
return simb
else:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.numeric)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.numeric)
return simb
elif (self.fila == 0 and self.columna == 5):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores smallInt - real")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.real)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo real no puede ser contenido dentro de una columna tipo smallInt")
return simb
else:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.real)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.real)
return simb
elif (self.fila == 0 and self.columna == 6):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores smallInt - double")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.double_precision)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo double no puede ser contenido dentro de una columna tipo smallInt")
return simb
else:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.double_precision)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.double_precision)
return simb
elif (self.fila == 0 and self.columna == 7):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores smallInt - money")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.money)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo money no puede ser contenido dentro de una columna tipo smallInt")
return simb
else:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.money)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.money)
return simb
elif (self.fila == 0 and self.columna == 8):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores smallInt - varchar")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.SUMA:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.varchar)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.varchar)
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.varchar)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo varchar no puede ser contenido dentro de una columna tipo smallInt")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: resta, multiplicacion, division, potencia, modulo no pueden operar valores smallInt - varchar")
return simb
elif (self.fila == 0 and self.columna == 9):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores smallInt - char")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.SUMA:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.character)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.character)
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.character)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo char no puede ser contenido dentro de una columna tipo smallInt")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: resta, multiplicacion, division, potencia, modulo no pueden operar valores smallInt - char")
return simb
elif (self.fila == 0 and self.columna == 10):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores smallInt - text")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.SUMA:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.text)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.text)
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.text)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo text no puede ser contenido dentro de una columna tipo smallInt")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: resta, multiplicacion, division, potencia, modulo no pueden operar valores smallInt - text")
return simb
elif (self.fila == 0 and self.columna == 11):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores smallInt - date")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.SUMA or tipoOperacion == tipoSimbolo.TipoSimbolo.RESTA:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.date)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.date)
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: >, >=, <, <=, == y <> no pueden operar valores smallInt - date")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo date no puede ser contenido dentro de una columna tipo smallInt")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: multiplicacion, division, potencia, modulo no pueden operar valores smallInt - date")
return simb
elif (self.fila == 0 and self.columna == 12):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores smallInt - Time withOut time zone")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.SUMA or tipoOperacion == tipoSimbolo.TipoSimbolo.RESTA:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.time_No_zone)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.time_No_zone)
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: >, >=, <, <=, == y <> no pueden operar valores smallInt - withOut time zone")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo time withOut time zone no puede ser contenido dentro de una columna tipo smallInt")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: multiplicacion, division, potencia, modulo no pueden operar valores smallInt - withOut time zone")
return simb
elif (self.fila == 0 and self.columna == 13):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores smallInt - time with time zone")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.SUMA or tipoOperacion == tipoSimbolo.TipoSimbolo.RESTA:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.time_si_zone)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.time_si_zone)
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: >, >=, <, <=, == y <> no pueden operar valores smallInt - time with time zone")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo time with time zone no puede ser contenido dentro de una columna tipo smallInt")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: multiplicacion, division, potencia, modulo no pueden operar valores smallInt - time with time zone")
return simb
elif (self.fila == 0 and self.columna == 14):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores smallInt - boolean")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: >, >=, <, <=, == y <> no pueden operar valores smallInt - boolean")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo boolean no puede ser contenido dentro de una columna tipo smallInt")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones:suma, resta, multiplicacion, division, potencia, modulo no pueden operar valores smallInt - boolean")
return simb
# ********************************************************************************************
# ************************************* FILA 1 ***********************************************
# #*******************************************************************************************
if (self.fila == 1 and self.columna == 0):
if(tipoOperacion==tipoSimbolo.TipoSimbolo.AND or tipoOperacion==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores Integer - smallInt")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.integer)
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.DIVISION:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.decimal)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.integer)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.integer)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.integer)
return simb
else:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.integer)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.integer)
return simb
elif (self.fila == 1 and self.columna == 1):
if(tipoOperacion==tipoSimbolo.TipoSimbolo.AND or tipoOperacion==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores Integer")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.integer)
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.DIVISION:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.decimal)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.integer)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.integer)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.integer)
return simb
else:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.integer)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.integer)
return simb
elif (self.fila == 1 and self.columna == 2):
if(tipoOperacion==tipoSimbolo.TipoSimbolo.AND or tipoOperacion==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores integer - bigInt")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.bigInit)
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.DIVISION:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.decimal)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.bigInit)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo bigInt no puede ser contenido dentro de una columna tipo Integer")
return simb
else:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.bigInit)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.bigInit)
return simb
elif (self.fila == 1 and self.columna == 3):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores Integer - decimal")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.decimal)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo decimal no puede ser contenido dentro de una columna tipo Integer")
return simb
else:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.decimal)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.decimal)
return simb
elif (self.fila == 1 and self.columna == 4):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores Integer - numeric")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.numeric)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo numeric no puede ser contenido dentro de una columna tipo Integer")
return simb
else:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.numeric)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.numeric)
return simb
elif (self.fila == 1 and self.columna == 5):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores Integer - real")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.real)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo real no puede ser contenido dentro de una columna tipo Integer")
return simb
else:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.real)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.real)
return simb
elif (self.fila == 1 and self.columna == 6):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores Integer - double")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.double_precision)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo double no puede ser contenido dentro de una columna tipo Integer")
return simb
else:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.double_precision)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.double_precision)
return simb
elif (self.fila == 1 and self.columna == 7):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores Integer - money")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.money)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo money no puede ser contenido dentro de una columna tipo Integer")
return simb
else:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.money)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.money)
return simb
elif (self.fila == 1 and self.columna == 8):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores integer - varchar")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.SUMA:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.varchar)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.varchar)
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.varchar)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo varchar no puede ser contenido dentro de una columna tipo integer")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: resta, multiplicacion, division, potencia, modulo no pueden operar valores integer - varchar")
return simb
elif (self.fila == 1 and self.columna == 9):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores integer - char")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.SUMA:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.character)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.character)
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.character)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo char no puede ser contenido dentro de una columna tipo Integer")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: resta, multiplicacion, division, potencia, modulo no pueden operar valores integer - char")
return simb
elif (self.fila == 1 and self.columna == 10):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores integer - text")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.SUMA:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.text)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.text)
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.text)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo text no puede ser contenido dentro de una columna tipo integer")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: resta, multiplicacion, division, potencia, modulo no pueden operar valores integer - text")
return simb
elif (self.fila == 1 and self.columna == 11):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores integer - date")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.SUMA or tipoOperacion == tipoSimbolo.TipoSimbolo.RESTA:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.date)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.date)
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: >, >=, <, <=, == y <> no pueden operar valores integer - date")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo date no puede ser contenido dentro de una columna tipo integer")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: multiplicacion, division, potencia, modulo no pueden operar valores integer - date")
return simb
elif (self.fila == 1 and self.columna == 12):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores integer - Time withOut time zone")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.SUMA or tipoOperacion == tipoSimbolo.TipoSimbolo.RESTA:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.time_No_zone)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.time_No_zone)
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: >, >=, <, <=, == y <> no pueden operar valores integer - withOut time zone")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo time withOut time zone no puede ser contenido dentro de una columna tipo integer")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: multiplicacion, division, potencia, modulo no pueden operar valores integer - withOut time zone")
return simb
elif (self.fila == 1 and self.columna == 13):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores integer - time with time zone")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.SUMA or tipoOperacion == tipoSimbolo.TipoSimbolo.RESTA:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.time_si_zone)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.time_si_zone)
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: >, >=, <, <=, == y <> no pueden operar valores integer - time with time zone")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo time with time zone no puede ser contenido dentro de una columna tipo integer")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: multiplicacion, division, potencia, modulo no pueden operar valores integer - time with time zone")
return simb
elif (self.fila == 1 and self.columna == 14):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores integer - boolean")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: >, >=, <, <=, == y <> no pueden operar valores integer - boolean")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo boolean no puede ser contenido dentro de una columna tipo integer")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones:suma, resta, multiplicacion, division, potencia, modulo no pueden operar valores integer - boolean")
return simb
# ********************************************************************************************
# ************************************* FILA 2 ***********************************************
# #*******************************************************************************************
if (self.fila == 2 and self.columna == 0):
if(tipoOperacion==tipoSimbolo.TipoSimbolo.AND or tipoOperacion==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores bigInt - smallInt")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.bigInit)
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.DIVISION:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.decimal)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.bigInit)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.bigInit)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.bigInit)
return simb
else:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.bigInit)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.bigInit)
return simb
elif (self.fila == 2 and self.columna == 1):
if(tipoOperacion==tipoSimbolo.TipoSimbolo.AND or tipoOperacion==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores bigInt - Integer")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.bigInit)
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.DIVISION:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.decimal)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.bigInit)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.bigInit)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.bigInit)
return simb
else:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.bigInit)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.bigInit)
return simb
elif (self.fila == 2 and self.columna == 2):
if(tipoOperacion==tipoSimbolo.TipoSimbolo.AND or tipoOperacion==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores bigInt - bigInt")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.bigInit)
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.DIVISION:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.decimal)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.bigInit)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.bigInit)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.bigInit)
return simb
else:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.bigInit)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.bigInit)
return simb
elif (self.fila == 2 and self.columna == 3):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores bigInt - decimal")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.decimal)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo decimal no puede ser contenido dentro de una columna tipo bigInt")
return simb
else:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.decimal)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.decimal)
return simb
elif (self.fila == 2 and self.columna == 4):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores bigInt - numeric")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.numeric)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo numeric no puede ser contenido dentro de una columna tipo bigInt")
return simb
else:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.numeric)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.numeric)
return simb
elif (self.fila == 2 and self.columna == 5):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores bigInt - real")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.real)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo real no puede ser contenido dentro de una columna tipo bigInt")
return simb
else:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.real)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.real)
return simb
elif (self.fila == 2 and self.columna == 6):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores bigInit - double")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.double_precision)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo double no puede ser contenido dentro de una columna tipo bigInt")
return simb
else:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.double_precision)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.double_precision)
return simb
elif (self.fila == 2 and self.columna == 7):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores bigInit - money")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.money)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo money no puede ser contenido dentro de una columna tipo bigInt")
return simb
else:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.money)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.money)
return simb
elif (self.fila == 2 and self.columna == 8):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores bigInt - varchar")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.SUMA:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.varchar)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.varchar)
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.varchar)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo varchar no puede ser contenido dentro de una columna tipo bigInt")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: resta, multiplicacion, division, potencia, modulo no pueden operar valores bigInt - varchar")
return simb
elif (self.fila == 2 and self.columna == 9):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores bigInt - char")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.SUMA:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.character)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.character)
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.character)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo char no puede ser contenido dentro de una columna tipo bigInt")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: resta, multiplicacion, division, potencia, modulo no pueden operar valores bigInt - char")
return simb
elif (self.fila == 2 and self.columna == 10):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores bigInt - text")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.SUMA:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.text)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.text)
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.text)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo text no puede ser contenido dentro de una columna tipo bigInt")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: resta, multiplicacion, division, potencia, modulo no pueden operar valores bigInt - text")
return simb
elif (self.fila == 2 and self.columna == 11):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores bigInt - date")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.SUMA or tipoOperacion == tipoSimbolo.TipoSimbolo.RESTA:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.date)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.date)
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: >, >=, <, <=, == y <> no pueden operar valores bigInt - date")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo date no puede ser contenido dentro de una columna tipo bigInt")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: multiplicacion, division, potencia, modulo no pueden operar valores bigInt - date")
return simb
elif (self.fila == 2 and self.columna == 12):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores bigInt - Time withOut time zone")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.SUMA or tipoOperacion == tipoSimbolo.TipoSimbolo.RESTA:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.time_No_zone)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.time_No_zone)
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: >, >=, <, <=, == y <> no pueden operar valores bigInt - withOut time zone")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo time withOut time zone no puede ser contenido dentro de una columna tipo bigInt")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: multiplicacion, division, potencia, modulo no pueden operar valores bigInt - withOut time zone")
return simb
elif (self.fila == 2 and self.columna == 13):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores bigInt - time with time zone")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.SUMA or tipoOperacion == tipoSimbolo.TipoSimbolo.RESTA:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.time_si_zone)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.time_si_zone)
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: >, >=, <, <=, == y <> no pueden operar valores bigInt - time with time zone")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo time with time zone no puede ser contenido dentro de una columna tipo bigInt")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: multiplicacion, division, potencia, modulo no pueden operar valores bigInt - time with time zone")
return simb
elif (self.fila == 2 and self.columna == 14):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores bigInt - boolean")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: >, >=, <, <=, == y <> no pueden operar valores bigInt - boolean")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo boolean no puede ser contenido dentro de una columna tipo bigInt")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones:suma, resta, multiplicacion, division, potencia, modulo no pueden operar valores bigInt - boolean")
return simb
# ********************************************************************************************
# ************************************* FILA 3 ***********************************************
# #*******************************************************************************************
if (self.fila == 3 and self.columna == 0):
if(tipoOperacion==tipoSimbolo.TipoSimbolo.AND or tipoOperacion==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores decimal - smallInt")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.decimal)
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.DIVISION:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.decimal)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.decimal)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.decimal)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.decimal)
return simb
else:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.decimal)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.decimal)
return simb
elif (self.fila == 3 and self.columna == 1):
if(tipoOperacion==tipoSimbolo.TipoSimbolo.AND or tipoOperacion==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores decimal - Integer")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.decimal)
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.DIVISION:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.decimal)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.decimal)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.decimal)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.decimal)
return simb
else:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.decimal)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.decimal)
return simb
elif (self.fila == 3 and self.columna == 2):
if(tipoOperacion==tipoSimbolo.TipoSimbolo.AND or tipoOperacion==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores decimal - bigInt")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.decimal)
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.DIVISION:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.decimal)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.decimal)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.decimal)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.decimal)
return simb
else:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.decimal)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.decimal)
return simb
elif (self.fila == 3 and self.columna == 3):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores decimal - decimal")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.decimal)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.decimal)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.decimal)
return simb
else:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.decimal)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.decimal)
return simb
elif (self.fila == 3 and self.columna == 4):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores decimal - numeric")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.numeric)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.decimal)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.decimal)
return simb
else:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.numeric)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.numeric)
return simb
elif (self.fila == 3 and self.columna == 5):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores decimal - real")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.decimal)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.decimal)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.decimal)
return simb
else:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.decimal)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.decimal)
return simb
elif (self.fila == 3 and self.columna == 6):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores decimal - double")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.double_precision)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo double no puede ser contenido dentro de una columna tipo decimal")
return simb
else:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.double_precision)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.double_precision)
return simb
elif (self.fila == 3 and self.columna == 7):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores decimal - money")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.money)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo money no puede ser contenido dentro de una columna tipo decimal")
return simb
else:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.money)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.money)
return simb
elif (self.fila == 3 and self.columna == 8):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores decimal - varchar")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.SUMA:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.varchar)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.varchar)
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.varchar)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo varchar no puede ser contenido dentro de una columna tipo decimal")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: resta, multiplicacion, division, potencia, modulo no pueden operar valores decimal - varchar")
return simb
elif (self.fila == 3 and self.columna == 9):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores decimal - char")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.SUMA:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.character)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.character)
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.character)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo char no puede ser contenido dentro de una columna tipo decimal")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: resta, multiplicacion, division, potencia, modulo no pueden operar valores decimal - char")
return simb
elif (self.fila == 3 and self.columna == 10):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores decimal - text")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.SUMA:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.text)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.text)
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.text)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo text no puede ser contenido dentro de una columna tipo decimal")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: resta, multiplicacion, division, potencia, modulo no pueden operar valores decimal - text")
return simb
elif (self.fila == 3 and self.columna == 11):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores decimal - date")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.SUMA or tipoOperacion == tipoSimbolo.TipoSimbolo.RESTA:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.date)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.date)
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: >, >=, <, <=, == y <> no pueden operar valores decimal - date")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo date no puede ser contenido dentro de una columna tipo decimal")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: multiplicacion, division, potencia, modulo no pueden operar valores decimal - date")
return simb
elif (self.fila == 3 and self.columna == 12):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores decimal - Time withOut time zone")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.SUMA or tipoOperacion == tipoSimbolo.TipoSimbolo.RESTA:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.time_No_zone)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.time_No_zone)
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: >, >=, <, <=, == y <> no pueden operar valores decimal - withOut time zone")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo time withOut time zone no puede ser contenido dentro de una columna tipo decimal")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: multiplicacion, division, potencia, modulo no pueden operar valores decimal - withOut time zone")
return simb
elif (self.fila == 3 and self.columna == 13):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores decimal - time with time zone")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.SUMA or tipoOperacion == tipoSimbolo.TipoSimbolo.RESTA:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.time_si_zone)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.time_si_zone)
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: >, >=, <, <=, == y <> no pueden operar valores decimal - time with time zone")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo time with time zone no puede ser contenido dentro de una columna tipo decimal")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: multiplicacion, division, potencia, modulo no pueden operar valores decimal - time with time zone")
return simb
elif (self.fila == 3 and self.columna == 14):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores decimal - boolean")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: >, >=, <, <=, == y <> no pueden operar valores decimal - boolean")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo boolean no puede ser contenido dentro de una columna tipo decimal")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones:suma, resta, multiplicacion, division, potencia, modulo no pueden operar valores decimal - boolean")
return simb
# ********************************************************************************************
# ************************************* FILA 4 ***********************************************
# #*******************************************************************************************
if (self.fila == 4 and self.columna == 0):
if(tipoOperacion==tipoSimbolo.TipoSimbolo.AND or tipoOperacion==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores numeric - smallInt")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.numeric)
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.DIVISION:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.numeric)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.numeric)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.numeric)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.numeric)
return simb
else:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.numeric)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.numeric)
return simb
elif (self.fila == 4 and self.columna == 1):
if(tipoOperacion==tipoSimbolo.TipoSimbolo.AND or tipoOperacion==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores numeric - Integer")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.numeric)
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.DIVISION:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.numeric)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.numeric)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.numeric)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.numeric)
return simb
else:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.numeric)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.numeric)
return simb
elif (self.fila == 4 and self.columna == 2):
if(tipoOperacion==tipoSimbolo.TipoSimbolo.AND or tipoOperacion==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores numeric - bigInt")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.numeric)
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.DIVISION:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.numeric)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.numeric)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.numeric)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.numeric)
return simb
else:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.numeric)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.numeric)
return simb
elif (self.fila == 4 and self.columna == 3):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores numeric - decimal")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.numeric)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.numeric)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.numeric)
return simb
else:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.numeric)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.numeric)
return simb
elif (self.fila == 4 and self.columna == 4):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores numeric - numeric")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.numeric)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.numeric)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.numeric)
return simb
else:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.numeric)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.numeric)
return simb
elif (self.fila == 4 and self.columna == 5):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores numeric - real")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.real)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo real no puede ser contenido dentro de una columna tipo numeric")
return simb
else:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.real)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.real)
return simb
elif (self.fila == 4 and self.columna == 6):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores numeric - double")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.double_precision)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo double no puede ser contenido dentro de una columna tipo numeric")
return simb
else:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.double_precision)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.double_precision)
return simb
elif (self.fila == 4 and self.columna == 7):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores numeric - money")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.money)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo money no puede ser contenido dentro de una columna tipo numeric")
return simb
else:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.money)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.money)
return simb
elif (self.fila == 4 and self.columna == 8):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores numeric - varchar")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.SUMA:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.varchar)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.varchar)
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.varchar)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo varchar no puede ser contenido dentro de una columna tipo numeric")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: resta, multiplicacion, division, potencia, modulo no pueden operar valores numeric - varchar")
return simb
elif (self.fila == 4 and self.columna == 9):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores numeric - char")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.SUMA:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.character)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.character)
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.character)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo char no puede ser contenido dentro de una columna tipo numeric")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: resta, multiplicacion, division, potencia, modulo no pueden operar valores numeric - char")
return simb
elif (self.fila == 4 and self.columna == 10):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores numeric - text")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.SUMA:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.text)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.text)
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.text)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo text no puede ser contenido dentro de una columna tipo numeric")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: resta, multiplicacion, division, potencia, modulo no pueden operar valores numeric - text")
return simb
elif (self.fila == 4 and self.columna == 11):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores numeric - date")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.SUMA or tipoOperacion == tipoSimbolo.TipoSimbolo.RESTA:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.date)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.date)
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: >, >=, <, <=, == y <> no pueden operar valores numeric - date")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo date no puede ser contenido dentro de una columna tipo numeric")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: multiplicacion, division, potencia, modulo no pueden operar valores numeric - date")
return simb
elif (self.fila == 4 and self.columna == 12):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores numeric - Time withOut time zone")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.SUMA or tipoOperacion == tipoSimbolo.TipoSimbolo.RESTA:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.time_No_zone)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.time_No_zone)
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: >, >=, <, <=, == y <> no pueden operar valores numeric - withOut time zone")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo time withOut time zone no puede ser contenido dentro de una columna tipo numeric")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: multiplicacion, division, potencia, modulo no pueden operar valores numeric - withOut time zone")
return simb
elif (self.fila == 4 and self.columna == 13):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores numeric - time with time zone")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.SUMA or tipoOperacion == tipoSimbolo.TipoSimbolo.RESTA:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.time_si_zone)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.time_si_zone)
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: >, >=, <, <=, == y <> no pueden operar valores bigInt - numeric with time zone")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo time with time zone no puede ser contenido dentro de una columna tipo numeric")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: multiplicacion, division, potencia, modulo no pueden operar valores numeric - time with time zone")
return simb
elif (self.fila == 4 and self.columna == 14):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores numeric - boolean")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: >, >=, <, <=, == y <> no pueden operar valores numeric - boolean")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo boolean no puede ser contenido dentro de una columna tipo numeric")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones:suma, resta, multiplicacion, division, potencia, modulo no pueden operar valores numeric - boolean")
return simb
# ********************************************************************************************
# ************************************* FILA 5 ***********************************************
# #*******************************************************************************************
if (self.fila == 5 and self.columna == 0):
if(tipoOperacion==tipoSimbolo.TipoSimbolo.AND or tipoOperacion==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores real - smallInt")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.real)
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.DIVISION:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.real)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.real)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.real)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.real)
return simb
else:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.real)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.real)
return simb
elif (self.fila == 5 and self.columna == 1):
if(tipoOperacion==tipoSimbolo.TipoSimbolo.AND or tipoOperacion==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores real - Integer")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.real)
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.DIVISION:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.real)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.real)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.real)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.real)
return simb
else:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.real)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.real)
return simb
elif (self.fila == 5 and self.columna == 2):
if(tipoOperacion==tipoSimbolo.TipoSimbolo.AND or tipoOperacion==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores real - bigInt")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.real)
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.DIVISION:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.real)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.real)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.real)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.real)
return simb
else:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.real)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.real)
return simb
elif (self.fila == 5 and self.columna == 3):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores real - decimal")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.real)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.real)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.real)
return simb
else:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.real)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.real)
return simb
elif (self.fila == 5 and self.columna == 4):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores real - numeric")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.real)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.real)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.real)
return simb
else:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.real)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.real)
return simb
elif (self.fila == 5 and self.columna == 5):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores real - real")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.real)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.real)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.real)
return simb
else:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.real)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.real)
return simb
elif (self.fila == 5 and self.columna == 6):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores real - double")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.double_precision)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo double no puede ser contenido dentro de una columna tipo real")
return simb
else:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.double_precision)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.double_precision)
return simb
elif (self.fila == 5 and self.columna == 7):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores real - money")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.money)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo money no puede ser contenido dentro de una columna tipo real")
return simb
else:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.money)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.money)
return simb
elif (self.fila == 5 and self.columna == 8):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores real - varchar")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.SUMA:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.varchar)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.varchar)
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.varchar)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo varchar no puede ser contenido dentro de una columna tipo real")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: resta, multiplicacion, division, potencia, modulo no pueden operar valores real - varchar")
return simb
elif (self.fila == 5 and self.columna == 9):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores real - char")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.SUMA:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.character)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.character)
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.character)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo char no puede ser contenido dentro de una columna tipo real")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: resta, multiplicacion, division, potencia, modulo no pueden operar valores real - char")
return simb
elif (self.fila == 5 and self.columna == 10):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores real - text")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.SUMA:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.text)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.text)
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.text)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo text no puede ser contenido dentro de una columna tipo real")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: resta, multiplicacion, division, potencia, modulo no pueden operar valores real - text")
return simb
elif (self.fila == 5 and self.columna == 11):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores real - date")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.SUMA or tipoOperacion == tipoSimbolo.TipoSimbolo.RESTA:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.date)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.date)
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: >, >=, <, <=, == y <> no pueden operar valores real - date")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo date no puede ser contenido dentro de una columna tipo real")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: multiplicacion, division, potencia, modulo no pueden operar valores real - date")
return simb
elif (self.fila == 5 and self.columna == 12):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores real - Time withOut time zone")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.SUMA or tipoOperacion == tipoSimbolo.TipoSimbolo.RESTA:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.time_No_zone)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.time_No_zone)
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: >, >=, <, <=, == y <> no pueden operar valores real - withOut time zone")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo time withOut time zone no puede ser contenido dentro de una columna tipo real")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: multiplicacion, division, potencia, modulo no pueden operar valores real - withOut time zone")
return simb
elif (self.fila == 5 and self.columna == 13):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores real - time with time zone")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.SUMA or tipoOperacion == tipoSimbolo.TipoSimbolo.RESTA:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.time_si_zone)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.time_si_zone)
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: >, >=, <, <=, == y <> no pueden operar valores real - numeric with time zone")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo time with time zone no puede ser contenido dentro de una columna tipo real")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: multiplicacion, division, potencia, modulo no pueden operar valores real - time with time zone")
return simb
elif (self.fila == 5 and self.columna == 14):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores real - boolean")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: >, >=, <, <=, == y <> no pueden operar valores real - boolean")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo boolean no puede ser contenido dentro de una columna real numeric")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones:suma, resta, multiplicacion, division, potencia, modulo no pueden operar valores real - boolean")
return simb
# ********************************************************************************************
# ************************************* FILA 6 ***********************************************
# #*******************************************************************************************
if (self.fila == 6 and self.columna == 0):
if(tipoOperacion==tipoSimbolo.TipoSimbolo.AND or tipoOperacion==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores double - smallInt")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.double_precision)
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.DIVISION:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.double_precision)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.double_precision)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.double_precision)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.double_precision)
return simb
else:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.double_precision)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.double_precision)
return simb
elif (self.fila == 6 and self.columna == 1):
if(tipoOperacion==tipoSimbolo.TipoSimbolo.AND or tipoOperacion==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores double - Integer")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.double_precision)
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.DIVISION:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.double_precision)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.double_precision)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.double_precision)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.double_precision)
return simb
else:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.double_precision)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.double_precision)
return simb
elif (self.fila == 6 and self.columna == 2):
if(tipoOperacion==tipoSimbolo.TipoSimbolo.AND or tipoOperacion==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores dobule - bigInt")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.double_precision)
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.DIVISION:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.double_precision)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.double_precision)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.double_precision)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.double_precision)
return simb
else:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.double_precision)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.double_precision)
return simb
elif (self.fila == 6 and self.columna == 3):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores double - decimal")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.double_precision)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.double_precision)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.double_precision)
return simb
else:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.double_precision)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.double_precision)
return simb
elif (self.fila == 6 and self.columna == 4):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores double - numeric")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.double_precision)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.double_precision)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.double_precision)
return simb
else:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.double_precision)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.double_precision)
return simb
elif (self.fila == 6 and self.columna == 5):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores double - real")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.double_precision)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.double_precision)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.double_precision)
return simb
else:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.double_precision)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.double_precision)
return simb
elif (self.fila == 6 and self.columna == 6):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores double - double")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.double_precision)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.double_precision)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.double_precision)
return simb
else:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.double_precision)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.double_precision)
return simb
elif (self.fila == 6 and self.columna == 7):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores double - money")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.money)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo money no puede ser contenido dentro de una columna tipo double")
return simb
else:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.money)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.money)
return simb
elif (self.fila == 6 and self.columna == 8):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores double - varchar")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.SUMA:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.varchar)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.varchar)
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.varchar)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo varchar no puede ser contenido dentro de una columna tipo double")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: resta, multiplicacion, division, potencia, modulo no pueden operar valores double - varchar")
return simb
elif (self.fila == 6 and self.columna == 9):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores double - char")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.SUMA:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.character)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.character)
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.character)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo char no puede ser contenido dentro de una columna tipo double")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: resta, multiplicacion, division, potencia, modulo no pueden operar valores double - char")
return simb
elif (self.fila == 6 and self.columna == 10):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores double - text")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.SUMA:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.text)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.text)
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.text)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo text no puede ser contenido dentro de una columna tipo double")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: resta, multiplicacion, division, potencia, modulo no pueden operar valores double - text")
return simb
elif (self.fila == 6 and self.columna == 11):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores double - date")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.SUMA or tipoOperacion == tipoSimbolo.TipoSimbolo.RESTA:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.date)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.date)
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: >, >=, <, <=, == y <> no pueden operar valores double - date")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo date no puede ser contenido dentro de una columna tipo double")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: multiplicacion, division, potencia, modulo no pueden operar valores double - date")
return simb
elif (self.fila == 6 and self.columna == 12):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores double - Time withOut time zone")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.SUMA or tipoOperacion == tipoSimbolo.TipoSimbolo.RESTA:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.time_No_zone)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.time_No_zone)
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: >, >=, <, <=, == y <> no pueden operar valores double - withOut time zone")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo time withOut time zone no puede ser contenido dentro de una columna tipo double")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: multiplicacion, division, potencia, modulo no pueden operar valores double - withOut time zone")
return simb
elif (self.fila == 6 and self.columna == 13):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores double - time with time zone")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.SUMA or tipoOperacion == tipoSimbolo.TipoSimbolo.RESTA:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.time_si_zone)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.time_si_zone)
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: >, >=, <, <=, == y <> no pueden operar valores double - numeric with time zone")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo time with time zone no puede ser contenido dentro de una columna tipo double")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: multiplicacion, division, potencia, modulo no pueden operar valores double - time with time zone")
return simb
elif (self.fila == 6 and self.columna == 14):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores double - boolean")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: >, >=, <, <=, == y <> no pueden operar valores double - boolean")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo boolean no puede ser contenido dentro de una columna tipo double")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones:suma, resta, multiplicacion, division, potencia, modulo no pueden operar valores double - boolean")
return simb
# ********************************************************************************************
# ************************************* FILA 7 ***********************************************
# #*******************************************************************************************
if (self.fila == 7 and self.columna == 0):
if(tipoOperacion==tipoSimbolo.TipoSimbolo.AND or tipoOperacion==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores money - smallInt")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.money)
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.DIVISION:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.money)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.money)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.money)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.money)
return simb
else:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.money)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.money)
return simb
elif (self.fila == 7 and self.columna == 1):
if(tipoOperacion==tipoSimbolo.TipoSimbolo.AND or tipoOperacion==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores money - Integer")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.money)
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.DIVISION:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.money)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.money)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.money)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.money)
return simb
else:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.money)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.money)
return simb
elif (self.fila == 7 and self.columna == 2):
if(tipoOperacion==tipoSimbolo.TipoSimbolo.AND or tipoOperacion==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores money - bigInt")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.money)
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.DIVISION:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.money)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.money)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.money)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.money)
return simb
else:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.money)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.money)
return simb
elif (self.fila == 7 and self.columna == 3):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores money - decimal")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.money)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.money)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.money)
return simb
else:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.money)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.money)
return simb
elif (self.fila == 7 and self.columna == 4):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores money - numeric")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.money)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.money)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.money)
return simb
else:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.money)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.money)
return simb
elif (self.fila == 7 and self.columna == 5):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores money - real")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.money)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.money)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.money)
return simb
else:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.money)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.money)
return simb
elif (self.fila == 7 and self.columna == 6):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores money - double")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.money)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.money)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.money)
return simb
else:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.money)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.money)
return simb
elif (self.fila == 7 and self.columna == 7):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores money - money")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.money)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.money)
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.money)
return simb
else:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.money)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.money)
return simb
elif (self.fila == 7 and self.columna == 8):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores money - varchar")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.SUMA:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.varchar)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.varchar)
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.varchar)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo varchar no puede ser contenido dentro de una columna tipo money")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: resta, multiplicacion, division, potencia, modulo no pueden operar valores money - varchar")
return simb
elif (self.fila == 7 and self.columna == 9):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores money - char")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.SUMA:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.character)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.character)
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.character)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo char no puede ser contenido dentro de una columna tipo money")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: resta, multiplicacion, division, potencia, modulo no pueden operar valores money - char")
return simb
elif (self.fila == 7 and self.columna == 10):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores money - text")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.SUMA:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.text)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.text)
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.text)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo text no puede ser contenido dentro de una columna tipo money")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: resta, multiplicacion, division, potencia, modulo no pueden operar valores money - text")
return simb
elif (self.fila == 7 and self.columna == 11):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores money - date")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: >, >=, <, <=, == y <> no pueden operar valores money - date")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo date no puede ser contenido dentro de una columna tipo money")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: suma, resta, multiplicacion, division, potencia, modulo no pueden operar valores money - date")
return simb
elif (self.fila == 7 and self.columna == 12):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores money - Time withOut time zone")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: >, >=, <, <=, == y <> no pueden operar valores money - withOut time zone")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo time withOut time zone no puede ser contenido dentro de una columna tipo money")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: suma,resta, multiplicacion, division, potencia, modulo no pueden operar valores money - withOut time zone")
return simb
elif (self.fila == 7 and self.columna == 13):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores money - time with time zone")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: >, >=, <, <=, == y <> no pueden operar valores money - numeric with time zone")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo time with time zone no puede ser contenido dentro de una columna tipo money")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: suma,resta, multiplicacion, division, potencia, modulo no pueden operar valores money - time with time zone")
return simb
elif (self.fila == 7 and self.columna == 14):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores money - boolean")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: >, >=, <, <=, == y <> no pueden operar valores money - boolean")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo boolean no puede ser contenido dentro de una columna tipo money")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones:suma, resta, multiplicacion, division, potencia, modulo no pueden operar valores money - boolean")
return simb
# ********************************************************************************************
# ************************************* FILA 8 ***********************************************
# #*******************************************************************************************
if (self.fila == 8 and self.columna == 0):
if(tipoOperacion==tipoSimbolo.TipoSimbolo.AND or tipoOperacion==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores varchar - smallInt")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.varchar)
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.SUMA:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.varchar)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.varchar)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo smallInt no puede ser contenido dentro de una columna tipo varchar")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: resta, multiplicacion, division, potencia, modulo no pueden operar valores varchar - smallInt")
return simb
elif (self.fila == 8 and self.columna == 1):
if(tipoOperacion==tipoSimbolo.TipoSimbolo.AND or tipoOperacion==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores varchar - integer")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.varchar)
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.SUMA:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.varchar)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.varchar)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo integer no puede ser contenido dentro de una columna tipo varchar")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: resta, multiplicacion, division, potencia, modulo no pueden operar valores varchar - integer")
return simb
elif (self.fila == 8 and self.columna == 2):
if(tipoOperacion==tipoSimbolo.TipoSimbolo.AND or tipoOperacion==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores varchar - bigInt")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.varchar)
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.SUMA:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.varchar)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.varchar)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo bitInt no puede ser contenido dentro de una columna tipo varchar")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: resta, multiplicacion, division, potencia, modulo no pueden operar valores bigInt - smallInt")
return simb
elif (self.fila == 8 and self.columna == 3):
if(tipoOperacion==tipoSimbolo.TipoSimbolo.AND or tipoOperacion==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores varchar - decimal")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.varchar)
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.SUMA:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.varchar)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.varchar)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo decimal no puede ser contenido dentro de una columna tipo varchar")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: resta, multiplicacion, division, potencia, modulo no pueden operar valores varchar - decimal")
return simb
elif (self.fila == 8 and self.columna == 4):
if(tipoOperacion==tipoSimbolo.TipoSimbolo.AND or tipoOperacion==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores varchar - numeric")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.varchar)
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.SUMA:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.varchar)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.varchar)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo numeric no puede ser contenido dentro de una columna tipo varchar")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: resta, multiplicacion, division, potencia, modulo no pueden operar valores varchar - numeric")
return simb
elif (self.fila == 8 and self.columna == 5):
if(tipoOperacion==tipoSimbolo.TipoSimbolo.AND or tipoOperacion==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores varchar - real")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.varchar)
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.SUMA:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.varchar)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.varchar)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo real no puede ser contenido dentro de una columna tipo varchar")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: resta, multiplicacion, division, potencia, modulo no pueden operar valores varchar - real")
return simb
elif (self.fila == 8 and self.columna == 6):
if(tipoOperacion==tipoSimbolo.TipoSimbolo.AND or tipoOperacion==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores varchar - double")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.varchar)
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.SUMA:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.varchar)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.varchar)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo double no puede ser contenido dentro de una columna tipo varchar")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: resta, multiplicacion, division, potencia, modulo no pueden operar valores varchar - double")
return simb
elif (self.fila == 8 and self.columna == 7):
if(tipoOperacion==tipoSimbolo.TipoSimbolo.AND or tipoOperacion==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores varchar - money")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones: >, >=, <, <=, ==, <> no puede operar varlos varchar - money")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.SUMA:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.varchar)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.varchar)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo money no puede ser contenido dentro de una columna tipo varchar")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: resta, multiplicacion, division, potencia, modulo no pueden operar valores varchar - money")
return simb
elif (self.fila == 8 and self.columna == 8):
if(tipoOperacion==tipoSimbolo.TipoSimbolo.AND or tipoOperacion==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores varchar - varchar")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.varchar)
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.SUMA:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.varchar)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.varchar)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.varchar)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.varchar)
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: resta, multiplicacion, division, potencia, modulo no pueden operar valores varchar - varchar")
return simb
elif (self.fila == 8 and self.columna == 9):
if(tipoOperacion==tipoSimbolo.TipoSimbolo.AND or tipoOperacion==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores varchar - char")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.varchar)
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.SUMA:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.varchar)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.varchar)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.varchar)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.varchar)
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: resta, multiplicacion, division, potencia, modulo no pueden operar valores varchar - char")
return simb
elif (self.fila == 8 and self.columna == 10):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores varchar - text")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.SUMA:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.text)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.text)
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.text)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.varchar)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.varchar)
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: resta, multiplicacion, division, potencia, modulo no pueden operar valores varchar - text")
return simb
elif (self.fila == 8 and self.columna == 11):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores varchar - date")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones: <, <=, >, >=, ==, <> no pueden operar valures varchar - date")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo date no puede ser contenido dentro de una columna tipo varchar")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: suma, resta, multiplicacion, division, potencia, modulo no pueden operar valores varchar - date")
return simb
elif (self.fila == 8 and self.columna == 12):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores varchar - time withOut time zone")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones: <, <=, >, >=, ==, <> no pueden operar valures varchar - time withOut time zone")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo time withOut time no puede ser contenido dentro de una columna tipo varchar")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: suma, resta, multiplicacion, division, potencia, modulo no pueden operar valores varchar - time withOut time zone")
return simb
elif (self.fila == 8 and self.columna == 13):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores varchar - time with time zone")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones: <, <=, >, >=, ==, <> no pueden operar valures varchar - time with time zone")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo time with time zone no puede ser contenido dentro de una columna tipo varchar")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: suma, resta, multiplicacion, division, potencia, modulo no pueden operar valores time with time zone - varchar")
return simb
elif (self.fila == 8 and self.columna == 14):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores varchar - boolean")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones: <, <=, >, >=, ==, <> no pueden operar valures varchar - boolean")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo boolean no puede ser contenido dentro de una columna tipo varchar")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: suma, resta, multiplicacion, division, potencia, modulo no pueden operar valores varchar - boolean")
return simb
# ********************************************************************************************
# ************************************* FILA 9 ***********************************************
# #*******************************************************************************************
if (self.fila == 9 and self.columna == 0):
if(tipoOperacion==tipoSimbolo.TipoSimbolo.AND or tipoOperacion==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores char - smallInt")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.character)
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.SUMA:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.character)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.character)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo smallInt no puede ser contenido dentro de una columna tipo char")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: resta, multiplicacion, division, potencia, modulo no pueden operar valores char - smallInt")
return simb
elif (self.fila == 9 and self.columna == 1):
if(tipoOperacion==tipoSimbolo.TipoSimbolo.AND or tipoOperacion==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores char - integer")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.character)
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.SUMA:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.character)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.character)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo integer no puede ser contenido dentro de una columna tipo char")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: resta, multiplicacion, division, potencia, modulo no pueden operar valores char - integer")
return simb
elif (self.fila == 9 and self.columna == 2):
if(tipoOperacion==tipoSimbolo.TipoSimbolo.AND or tipoOperacion==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores char - bigInt")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.character)
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.SUMA:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.character)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.character)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo bitInt no puede ser contenido dentro de una columna tipo char")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: resta, multiplicacion, division, potencia, modulo no pueden operar valores char - smallInt")
return simb
elif (self.fila == 9 and self.columna == 3):
if(tipoOperacion==tipoSimbolo.TipoSimbolo.AND or tipoOperacion==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores char - decimal")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.character)
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.SUMA:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.character)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.character)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo decimal no puede ser contenido dentro de una columna tipo char")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: resta, multiplicacion, division, potencia, modulo no pueden operar valores char - decimal")
return simb
elif (self.fila == 9 and self.columna == 4):
if(tipoOperacion==tipoSimbolo.TipoSimbolo.AND or tipoOperacion==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores char - numeric")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.character)
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.SUMA:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.character)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.character)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo numeric no puede ser contenido dentro de una columna tipo char")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: resta, multiplicacion, division, potencia, modulo no pueden operar valores char - numeric")
return simb
elif (self.fila == 9 and self.columna == 5):
if(tipoOperacion==tipoSimbolo.TipoSimbolo.AND or tipoOperacion==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores char - real")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.character)
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.SUMA:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.character)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.character)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo real no puede ser contenido dentro de una columna tipo char")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: resta, multiplicacion, division, potencia, modulo no pueden operar valores char - real")
return simb
elif (self.fila == 9 and self.columna == 6):
if(tipoOperacion==tipoSimbolo.TipoSimbolo.AND or tipoOperacion==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores char - double")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.character)
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.SUMA:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.character)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.character)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo double no puede ser contenido dentro de una columna tipo char")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: resta, multiplicacion, division, potencia, modulo no pueden operar valores char - double")
return simb
elif (self.fila == 9 and self.columna == 7):
if(tipoOperacion==tipoSimbolo.TipoSimbolo.AND or tipoOperacion==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores char - money")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones: >, >=, <, <=, ==, <> no puede operar varlos char - money")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.SUMA:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.character)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.character)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo money no puede ser contenido dentro de una columna tipo char")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: resta, multiplicacion, division, potencia, modulo no pueden operar valores char - money")
return simb
elif (self.fila == 9 and self.columna == 8):
if(tipoOperacion==tipoSimbolo.TipoSimbolo.AND or tipoOperacion==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores char - varchar")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.character)
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.SUMA:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.character)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.character)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.character)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.character)
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: resta, multiplicacion, division, potencia, modulo no pueden operar valores char - varchar")
return simb
elif (self.fila == 9 and self.columna == 9):
if(tipoOperacion==tipoSimbolo.TipoSimbolo.AND or tipoOperacion==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores char - char")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.character)
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.SUMA:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.character)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.character)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.character)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.character)
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: resta, multiplicacion, division, potencia, modulo no pueden operar valores char - char")
return simb
elif (self.fila == 9 and self.columna == 10):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores char - text")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.SUMA:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.character)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.character)
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.character)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.character)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.character)
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: resta, multiplicacion, division, potencia, modulo no pueden operar valores char - text")
return simb
elif (self.fila == 9 and self.columna == 11):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores char - date")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones: <, <=, >, >=, ==, <> no pueden operar valures char - date")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo date no puede ser contenido dentro de una columna tipo char")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: suma, resta, multiplicacion, division, potencia, modulo no pueden operar valores char - date")
return simb
elif (self.fila == 9 and self.columna == 12):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores char - time withOut time zone")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones: <, <=, >, >=, ==, <> no pueden operar valures char - time withOut time zone")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo time withOut time no puede ser contenido dentro de una columna tipo char")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: suma, resta, multiplicacion, division, potencia, modulo no pueden operar valores char - time withOut time zone")
return simb
elif (self.fila == 9 and self.columna == 13):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores char - time with time zone")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones: <, <=, >, >=, ==, <> no pueden operar valures char - time with time zone")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo time with time zone no puede ser contenido dentro de una columna tipo char")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: suma, resta, multiplicacion, division, potencia, modulo no pueden operar valores time with time zone - char")
return simb
elif (self.fila == 9 and self.columna == 14):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores char - boolean")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones: <, <=, >, >=, ==, <> no pueden operar valures char - boolean")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo boolean no puede ser contenido dentro de una columna tipo char")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: suma, resta, multiplicacion, division, potencia, modulo no pueden operar valores char - boolean")
return simb
# ********************************************************************************************
# ************************************* FILA 10 ***********************************************
# #*******************************************************************************************
if (self.fila == 10 and self.columna == 0):
if(tipoOperacion==tipoSimbolo.TipoSimbolo.AND or tipoOperacion==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores text - smallInt")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.text)
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.SUMA:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.text)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.text)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo smallInt no puede ser contenido dentro de una columna tipo text")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: resta, multiplicacion, division, potencia, modulo no pueden operar valores text - smallInt")
return simb
elif (self.fila == 10 and self.columna == 1):
if(tipoOperacion==tipoSimbolo.TipoSimbolo.AND or tipoOperacion==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores text - integer")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.text)
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.SUMA:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.text)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.text)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo integer no puede ser contenido dentro de una columna tipo text")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: resta, multiplicacion, division, potencia, modulo no pueden operar valores text - integer")
return simb
elif (self.fila == 10 and self.columna == 2):
if(tipoOperacion==tipoSimbolo.TipoSimbolo.AND or tipoOperacion==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores text - bigInt")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.text)
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.SUMA:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.text)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.text)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo bitInt no puede ser contenido dentro de una columna tipo text")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: resta, multiplicacion, division, potencia, modulo no pueden operar valores text - smallInt")
return simb
elif (self.fila == 10 and self.columna == 3):
if(tipoOperacion==tipoSimbolo.TipoSimbolo.AND or tipoOperacion==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores text - decimal")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.text)
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.SUMA:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.text)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.text)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo decimal no puede ser contenido dentro de una columna tipo text")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: resta, multiplicacion, division, potencia, modulo no pueden operar valores text - decimal")
return simb
elif (self.fila == 10 and self.columna == 4):
if(tipoOperacion==tipoSimbolo.TipoSimbolo.AND or tipoOperacion==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores text - numeric")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.text)
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.SUMA:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.text)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.text)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo numeric no puede ser contenido dentro de una columna tipo text")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: resta, multiplicacion, division, potencia, modulo no pueden operar valores text - numeric")
return simb
elif (self.fila == 10 and self.columna == 5):
if(tipoOperacion==tipoSimbolo.TipoSimbolo.AND or tipoOperacion==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores text - real")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.text)
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.SUMA:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.text)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.text)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo real no puede ser contenido dentro de una columna tipo text")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: resta, multiplicacion, division, potencia, modulo no pueden operar valores text - real")
return simb
elif (self.fila == 10 and self.columna == 6):
if(tipoOperacion==tipoSimbolo.TipoSimbolo.AND or tipoOperacion==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores text - double")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.text)
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.SUMA:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.text)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.text)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo double no puede ser contenido dentro de una columna tipo text")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: resta, multiplicacion, division, potencia, modulo no pueden operar valores text - double")
return simb
elif (self.fila == 10 and self.columna == 7):
if(tipoOperacion==tipoSimbolo.TipoSimbolo.AND or tipoOperacion==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores text - money")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones: >, >=, <, <=, ==, <> no puede operar varlos text - money")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.SUMA:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.text)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.text)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo money no puede ser contenido dentro de una columna tipo text")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: resta, multiplicacion, division, potencia, modulo no pueden operar valores text - money")
return simb
elif (self.fila == 10 and self.columna == 8):
if(tipoOperacion==tipoSimbolo.TipoSimbolo.AND or tipoOperacion==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores text - varchar")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.text)
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.SUMA:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.text)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.text)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.text)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.text)
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: resta, multiplicacion, division, potencia, modulo no pueden operar valores text - varchar")
return simb
elif (self.fila == 10 and self.columna == 9):
if(tipoOperacion==tipoSimbolo.TipoSimbolo.AND or tipoOperacion==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores text - char")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.text)
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.SUMA:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.text)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.text)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.text)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.text)
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: resta, multiplicacion, division, potencia, modulo no pueden operar valores text - char")
return simb
elif (self.fila == 10 and self.columna == 10):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores text - text")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.SUMA:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.text)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.text)
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.text)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.text)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.text)
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: resta, multiplicacion, division, potencia, modulo no pueden operar valores text - text")
return simb
elif (self.fila == 10 and self.columna == 11):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores text - date")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones: <, <=, >, >=, ==, <> no pueden operar valures text - date")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo date no puede ser contenido dentro de una columna tipo text")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: suma, resta, multiplicacion, division, potencia, modulo no pueden operar valores text - date")
return simb
elif (self.fila == 10 and self.columna == 12):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores text - time withOut time zone")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones: <, <=, >, >=, ==, <> no pueden operar valures text - time withOut time zone")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo time withOut time no puede ser contenido dentro de una columna tipo text")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: suma, resta, multiplicacion, division, potencia, modulo no pueden operar valores text - time withOut time zone")
return simb
elif (self.fila == 10 and self.columna == 13):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores text - time with time zone")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones: <, <=, >, >=, ==, <> no pueden operar valures text - time with time zone")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo time with time zone no puede ser contenido dentro de una columna tipo text")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: suma, resta, multiplicacion, division, potencia, modulo no pueden operar valores time text - with time zone ")
return simb
elif (self.fila == 10 and self.columna == 14):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores text - boolean")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones: <, <=, >, >=, ==, <> no pueden operar valures text - boolean")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo boolean no puede ser contenido dentro de una columna tipo text")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: suma, resta, multiplicacion, division, potencia, modulo no pueden operar valores text - boolean")
return simb
# ********************************************************************************************
# ************************************* FILA 11 ***********************************************
# #*******************************************************************************************
if (self.fila == 11 and self.columna == 0):
if(tipoOperacion==tipoSimbolo.TipoSimbolo.AND or tipoOperacion==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores date - smallInt")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operacion: <, <=, >, >=, ==, <> no puede operar valores date - smallInt")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.SUMA or tipoOperacion==tipoSimbolo.TipoSimbolo.RESTA:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.date)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.date)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo smallInt no puede ser contenido dentro de una columna tipo date")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: multiplicacion, division, potencia, modulo no pueden operar valores date - smallInt")
return simb
elif (self.fila == 11 and self.columna == 1):
if(tipoOperacion==tipoSimbolo.TipoSimbolo.AND or tipoOperacion==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores date - integer")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operacion: <, <=, >, >=, ==, <> no puede operar valores date - integer")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.SUMA or tipoOperacion==tipoSimbolo.TipoSimbolo.RESTA:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.date)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.date)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo integer no puede ser contenido dentro de una columna tipo date")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: multiplicacion, division, potencia, modulo no pueden operar valores date - integer")
return simb
elif (self.fila == 11 and self.columna == 2):
if(tipoOperacion==tipoSimbolo.TipoSimbolo.AND or tipoOperacion==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores date - bigInit")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operacion: <, <=, >, >=, ==, <> no puede operar valores date - bigInit")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.SUMA or tipoOperacion==tipoSimbolo.TipoSimbolo.RESTA:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.date)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.date)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo bigInit no puede ser contenido dentro de una columna tipo date")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: multiplicacion, division, potencia, modulo no pueden operar valores date - bigInit")
return simb
elif (self.fila == 11 and self.columna == 3):
if(tipoOperacion==tipoSimbolo.TipoSimbolo.AND or tipoOperacion==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores date - decimal")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operacion: <, <=, >, >=, ==, <> no puede operar valores date - decimal")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.SUMA or tipoOperacion==tipoSimbolo.TipoSimbolo.RESTA:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.date)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.date)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo decimal no puede ser contenido dentro de una columna tipo date")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: multiplicacion, division, potencia, modulo no pueden operar valores date - decimal")
return simb
elif (self.fila == 11 and self.columna == 4):
if(tipoOperacion==tipoSimbolo.TipoSimbolo.AND or tipoOperacion==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores date - numeric")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operacion: <, <=, >, >=, ==, <> no puede operar valores date - numeric")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.SUMA or tipoOperacion==tipoSimbolo.TipoSimbolo.RESTA:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.date)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.date)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo numeric no puede ser contenido dentro de una columna tipo date")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: multiplicacion, division, potencia, modulo no pueden operar valores date - numeric")
return simb
elif (self.fila == 11 and self.columna == 5):
if(tipoOperacion==tipoSimbolo.TipoSimbolo.AND or tipoOperacion==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores date - real")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operacion: <, <=, >, >=, ==, <> no puede operar valores date - real")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.SUMA or tipoOperacion==tipoSimbolo.TipoSimbolo.RESTA:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.date)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.date)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo real no puede ser contenido dentro de una columna tipo date")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: multiplicacion, division, potencia, modulo no pueden operar valores date - real")
return simb
elif (self.fila == 11 and self.columna == 6):
if(tipoOperacion==tipoSimbolo.TipoSimbolo.AND or tipoOperacion==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores date - double")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operacion: <, <=, >, >=, ==, <> no puede operar valores date - double")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.SUMA or tipoOperacion==tipoSimbolo.TipoSimbolo.RESTA:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.date)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.date)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo double no puede ser contenido dentro de una columna tipo date")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: multiplicacion, division, potencia, modulo no pueden operar valores date - double")
return simb
elif (self.fila == 11 and self.columna == 7):
if(tipoOperacion==tipoSimbolo.TipoSimbolo.AND or tipoOperacion==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores date - money")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operacion: <, <=, >, >=, ==, <> no puede operar valores date - money")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo money no puede ser contenido dentro de una columna tipo date")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: suma, resta, multiplicacion, division, potencia, modulo no pueden operar valores date - money")
return simb
elif (self.fila == 11 and self.columna == 8):
if(tipoOperacion==tipoSimbolo.TipoSimbolo.AND or tipoOperacion==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores date - varchar")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones: <, <=, >, >=, ==, <> no pueden operar valores date - varchar")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo varchar no puede ser contenido dentro de una columna tipo date")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: suma, resta, multiplicacion, division, potencia, modulo no pueden operar valores date - varchar")
return simb
elif (self.fila == 11 and self.columna == 9):
if(tipoOperacion==tipoSimbolo.TipoSimbolo.AND or tipoOperacion==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores date - char")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones: <, <=, >, >=, ==, <> no pueden operar valores date - char")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo char no puede ser contenido dentro de una columna tipo date")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: suma, resta, multiplicacion, division, potencia, modulo no pueden operar valores date - char")
return simb
elif (self.fila == 11 and self.columna == 10):
if(tipoOperacion==tipoSimbolo.TipoSimbolo.AND or tipoOperacion==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores date - text")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones: <, <=, >, >=, ==, <> no pueden operar valores date - text")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo text no puede ser contenido dentro de una columna tipo date")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: suma, resta, multiplicacion, division, potencia, modulo no pueden operar valores date - text")
return simb
elif (self.fila == 11 and self.columna == 11):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores date - date")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.date)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.date)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.date)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.date)
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: suma, resta, multiplicacion, division, potencia, modulo no pueden operar valores date - date")
return simb
elif (self.fila == 11 and self.columna == 12):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores date - time withOut time zone")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones: <, <=, >, >=, ==, <> no pueden operar valures date - time withOut time zone")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo time withOut time no puede ser contenido dentro de una columna tipo date")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: suma, resta, multiplicacion, division, potencia, modulo no pueden operar valores date - time withOut time zone")
return simb
elif (self.fila == 11 and self.columna == 13):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores date - time with time zone")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones: <, <=, >, >=, ==, <> no pueden operar valores date - time with time zone")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo time with time zone no puede ser contenido dentro de una columna tipo date")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: suma, resta, multiplicacion, division, potencia, modulo no pueden operar valores time date - with time zone ")
return simb
elif (self.fila == 11 and self.columna == 14):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores date - boolean")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones: <, <=, >, >=, ==, <> no pueden operar valores date - boolean")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo boolean no puede ser contenido dentro de una columna tipo date")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: suma, resta, multiplicacion, division, potencia, modulo no pueden operar valores date - boolean")
return simb
# ********************************************************************************************
# ************************************* FILA 12 ***********************************************
# #*******************************************************************************************
if (self.fila == 12 and self.columna == 0):
if(tipoOperacion==tipoSimbolo.TipoSimbolo.AND or tipoOperacion==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores time withOut time zone - smallInt")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operacion: <, <=, >, >=, ==, <> no puede operar valores time withOut time zone - smallInt")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.SUMA or tipoOperacion==tipoSimbolo.TipoSimbolo.RESTA:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.time_No_zone)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.time_No_zone)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo smallInt no puede ser contenido dentro de una columna tipo time withOut time zone")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: multiplicacion, division, potencia, modulo no pueden operar valores time withOut time zone - smallInt")
return simb
elif (self.fila == 12 and self.columna == 1):
if(tipoOperacion==tipoSimbolo.TipoSimbolo.AND or tipoOperacion==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores time withOut time zone - integer")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operacion: <, <=, >, >=, ==, <> no puede operar valores time withOut time zone - integer")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.SUMA or tipoOperacion==tipoSimbolo.TipoSimbolo.RESTA:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.time_No_zone)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.time_No_zone)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo integer no puede ser contenido dentro de una columna tipo time withOut time zone")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: multiplicacion, division, potencia, modulo no pueden operar valores time withOut time zone - integer")
return simb
elif (self.fila == 12 and self.columna == 2):
if(tipoOperacion==tipoSimbolo.TipoSimbolo.AND or tipoOperacion==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores time withOut time zone - bigInit")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operacion: <, <=, >, >=, ==, <> no puede operar valores time withOut time zone - bigInit")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.SUMA or tipoOperacion==tipoSimbolo.TipoSimbolo.RESTA:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.time_No_zone)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.time_No_zone)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo bigInit no puede ser contenido dentro de una columna tipo time withOut time zone")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: multiplicacion, division, potencia, modulo no pueden operar valores time withOut time zone - bigInit")
return simb
elif (self.fila == 12 and self.columna == 3):
if(tipoOperacion==tipoSimbolo.TipoSimbolo.AND or tipoOperacion==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores time withOut time zone - decimal")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operacion: <, <=, >, >=, ==, <> no puede operar valores time withOut time zone - decimal")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.SUMA or tipoOperacion==tipoSimbolo.TipoSimbolo.RESTA:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.time_No_zone)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.time_No_zone)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo decimal no puede ser contenido dentro de una columna tipo time withOut time zone")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: multiplicacion, division, potencia, modulo no pueden operar valores time withOut time zone - decimal")
return simb
elif (self.fila == 12 and self.columna == 4):
if(tipoOperacion==tipoSimbolo.TipoSimbolo.AND or tipoOperacion==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores time withOut time zone - numeric")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operacion: <, <=, >, >=, ==, <> no puede operar valores time withOut time zone - numeric")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.SUMA or tipoOperacion==tipoSimbolo.TipoSimbolo.RESTA:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.time_No_zone)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.time_No_zone)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo numeric no puede ser contenido dentro de una columna tipo time withOut time zone")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: multiplicacion, division, potencia, modulo no pueden operar valores time withOut time zone - numeric")
return simb
elif (self.fila == 12 and self.columna == 5):
if(tipoOperacion==tipoSimbolo.TipoSimbolo.AND or tipoOperacion==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores time withOut time zone - real")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operacion: <, <=, >, >=, ==, <> no puede operar valores time withOut time zone - real")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.SUMA or tipoOperacion==tipoSimbolo.TipoSimbolo.RESTA:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.time_No_zone)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.time_No_zone)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo real no puede ser contenido dentro de una columna tipo time withOut time zone")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: multiplicacion, division, potencia, modulo no pueden operar valores time withOut time zone - real")
return simb
elif (self.fila == 12 and self.columna == 6):
if(tipoOperacion==tipoSimbolo.TipoSimbolo.AND or tipoOperacion==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores time withOut time zone - double")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operacion: <, <=, >, >=, ==, <> no puede operar valores time withOut time zone - double")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.SUMA or tipoOperacion==tipoSimbolo.TipoSimbolo.RESTA:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.time_No_zone)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.time_No_zone)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo double no puede ser contenido dentro de una columna tipo time withOut time zone")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: multiplicacion, division, potencia, modulo no pueden operar valores time withOut time zone - double")
return simb
elif (self.fila == 12 and self.columna == 7):
if(tipoOperacion==tipoSimbolo.TipoSimbolo.AND or tipoOperacion==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores time withOut time zone - money")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operacion: <, <=, >, >=, ==, <> no puede operar valores time withOut time zone - money")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo money no puede ser contenido dentro de una columna tipo time withOut time zone")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: suma, resta, multiplicacion, division, potencia, modulo no pueden operar valores time withOut time zone - money")
return simb
elif (self.fila == 12 and self.columna == 8):
if(tipoOperacion==tipoSimbolo.TipoSimbolo.AND or tipoOperacion==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores time withOut time zone - varchar")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones: <, <=, >, >=, ==, <> no pueden operar valores time withOut time out - varchar")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo varchar no puede ser contenido dentro de una columna tipo time withOut time zone")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: suma, resta, multiplicacion, division, potencia, modulo no pueden operar valores time withOut time zone - varchar")
return simb
elif (self.fila == 12 and self.columna == 9):
if(tipoOperacion==tipoSimbolo.TipoSimbolo.AND or tipoOperacion==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores time withOut time zone - char")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones: <, <=, >, >=, ==, <> no pueden operar valores time withOut time zone - char")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo char no puede ser contenido dentro de una columna tipo time withOut time zone")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: suma, resta, multiplicacion, division, potencia, modulo no pueden operar valores time withOut time zone - char")
return simb
elif (self.fila == 12 and self.columna == 10):
if(tipoOperacion==tipoSimbolo.TipoSimbolo.AND or tipoOperacion==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores time withOut - text")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones: <, <=, >, >=, ==, <> no pueden operar valores time withOut time zone - text")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo text no puede ser contenido dentro de una columna tipo time withOut time zone")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: suma, resta, multiplicacion, division, potencia, modulo no pueden operar valores time withOut time zone - text")
return simb
elif (self.fila == 12 and self.columna == 11):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores time withOut time zone - date")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones: <, <=, >, >=, ==, <> no pueden operar valures time withOut time zone - date")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo date no puede ser contenido dentro de una columna tipo time withOut time zone")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: suma, resta, multiplicacion, division, potencia, modulo no pueden operar valores time withOut time zone - date")
return simb
elif (self.fila == 12 and self.columna == 12):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores time withOut time zone - time withOut time zone")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.time_No_zone)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.time_No_zone)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.time_No_zone)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.time_No_zone)
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: suma, resta, multiplicacion, division, potencia, modulo no pueden operar valores time withOut time zone - time withOut time zone")
return simb
elif (self.fila == 12 and self.columna == 13):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores time withOut time zone - time with time zone")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.time_No_zone)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.time_No_zone)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo time with time zone no puede ser contenido dentro de una columna tipo time withOut time zone")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: suma, resta, multiplicacion, division, potencia, modulo no pueden operar valores time withOut time zone - time with time zone")
return simb
elif (self.fila == 12 and self.columna == 14):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores time wtihOut time zone - boolean")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones: <, <=, >, >=, ==, <> no pueden operar valores time withOut time zone - boolean")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo boolean no puede ser contenido dentro de una columna tipo time withOut time zone")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: suma, resta, multiplicacion, division, potencia, modulo no pueden operar valores time withOut time zone - boolean")
return simb
# ********************************************************************************************
# ************************************* FILA 13 ***********************************************
# #*******************************************************************************************
if (self.fila == 13 and self.columna == 0):
if(tipoOperacion==tipoSimbolo.TipoSimbolo.AND or tipoOperacion==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores time with time zone - smallInt")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operacion: <, <=, >, >=, ==, <> no puede operar valores time with time zone - smallInt")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.SUMA or tipoOperacion==tipoSimbolo.TipoSimbolo.RESTA:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.time_si_zone)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.time_si_zone)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo smallInt no puede ser contenido dentro de una columna tipo time with time zone")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: multiplicacion, division, potencia, modulo no pueden operar valores time with time zone - smallInt")
return simb
elif (self.fila == 13 and self.columna == 1):
if(tipoOperacion==tipoSimbolo.TipoSimbolo.AND or tipoOperacion==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores time with time zone - integer")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operacion: <, <=, >, >=, ==, <> no puede operar valores time with time zone - integer")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.SUMA or tipoOperacion==tipoSimbolo.TipoSimbolo.RESTA:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.time_si_zone)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.time_si_zone)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo integer no puede ser contenido dentro de una columna tipo time with time zone")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: multiplicacion, division, potencia, modulo no pueden operar valores time with time zone - integer")
return simb
elif (self.fila == 13 and self.columna == 2):
if(tipoOperacion==tipoSimbolo.TipoSimbolo.AND or tipoOperacion==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores time with time zone - bigInit")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operacion: <, <=, >, >=, ==, <> no puede operar valores time with time zone - bigInit")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.SUMA or tipoOperacion==tipoSimbolo.TipoSimbolo.RESTA:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.time_si_zone)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.time_si_zone)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo bigInit no puede ser contenido dentro de una columna tipo time with time zone")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: multiplicacion, division, potencia, modulo no pueden operar valores time with time zone - bigInit")
return simb
elif (self.fila == 13 and self.columna == 3):
if(tipoOperacion==tipoSimbolo.TipoSimbolo.AND or tipoOperacion==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores time with time zone - decimal")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operacion: <, <=, >, >=, ==, <> no puede operar valores time with time zone - decimal")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.SUMA or tipoOperacion==tipoSimbolo.TipoSimbolo.RESTA:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.time_si_zone)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.time_si_zone)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo decimal no puede ser contenido dentro de una columna tipo time with time zone")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: multiplicacion, division, potencia, modulo no pueden operar valores time with time zone - decimal")
return simb
elif (self.fila == 13 and self.columna == 4):
if(tipoOperacion==tipoSimbolo.TipoSimbolo.AND or tipoOperacion==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores time with time zone - numeric")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operacion: <, <=, >, >=, ==, <> no puede operar valores time with time zone - numeric")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.SUMA or tipoOperacion==tipoSimbolo.TipoSimbolo.RESTA:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.time_si_zone)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.time_si_zone)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo numeric no puede ser contenido dentro de una columna tipo time with time zone")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: multiplicacion, division, potencia, modulo no pueden operar valores time with time zone - numeric")
return simb
elif (self.fila == 13 and self.columna == 5):
if(tipoOperacion==tipoSimbolo.TipoSimbolo.AND or tipoOperacion==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores time with time zone - real")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operacion: <, <=, >, >=, ==, <> no puede operar valores time with time zone - real")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.SUMA or tipoOperacion==tipoSimbolo.TipoSimbolo.RESTA:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.time_si_zone)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.time_si_zone)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo real no puede ser contenido dentro de una columna tipo time with time zone")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: multiplicacion, division, potencia, modulo no pueden operar valores time with time zone - real")
return simb
elif (self.fila == 13 and self.columna == 6):
if(tipoOperacion==tipoSimbolo.TipoSimbolo.AND or tipoOperacion==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores time with time zone - double")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operacion: <, <=, >, >=, ==, <> no puede operar valores time with time zone - double")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.SUMA or tipoOperacion==tipoSimbolo.TipoSimbolo.RESTA:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.time_si_zone)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.time_si_zone)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo double no puede ser contenido dentro de una columna tipo time with time zone")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: multiplicacion, division, potencia, modulo no pueden operar valores time with time zone - double")
return simb
elif (self.fila == 13 and self.columna == 7):
if(tipoOperacion==tipoSimbolo.TipoSimbolo.AND or tipoOperacion==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores time with time zone - money")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operacion: <, <=, >, >=, ==, <> no puede operar valores time with time zone - money")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo money no puede ser contenido dentro de una columna tipo time with time zone")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: suma, resta, multiplicacion, division, potencia, modulo no pueden operar valores time with time zone - money")
return simb
elif (self.fila == 13 and self.columna == 8):
if(tipoOperacion==tipoSimbolo.TipoSimbolo.AND or tipoOperacion==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores time with time zone - varchar")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones: <, <=, >, >=, ==, <> no pueden operar valores time with time out - varchar")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo varchar no puede ser contenido dentro de una columna tipo time with time zone")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: suma, resta, multiplicacion, division, potencia, modulo no pueden operar valores time with time zone - varchar")
return simb
elif (self.fila == 13 and self.columna == 9):
if(tipoOperacion==tipoSimbolo.TipoSimbolo.AND or tipoOperacion==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores time with time zone - char")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones: <, <=, >, >=, ==, <> no pueden operar valores time with time zone - char")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo char no puede ser contenido dentro de una columna tipo time with time zone")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: suma, resta, multiplicacion, division, potencia, modulo no pueden operar valores time with time zone - char")
return simb
elif (self.fila == 13 and self.columna == 10):
if(tipoOperacion==tipoSimbolo.TipoSimbolo.AND or tipoOperacion==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores time with - text")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones: <, <=, >, >=, ==, <> no pueden operar valores time with time zone - text")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo text no puede ser contenido dentro de una columna tipo time with time zone")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: suma, resta, multiplicacion, division, potencia, modulo no pueden operar valores time with time zone - text")
return simb
elif (self.fila == 13 and self.columna == 11):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores time with time zone - date")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones: <, <=, >, >=, ==, <> no pueden operar valures time with time zone - date")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo date no puede ser contenido dentro de una columna tipo time with time zone")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: suma, resta, multiplicacion, division, potencia, modulo no pueden operar valores time with time zone - date")
return simb
elif (self.fila == 13 and self.columna == 12):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores time with time zone - time with time zone")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones <, <=, >, >=, == , <> no pueden operar valores time with time zone - time withOut time zone")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo time withOut time zone no puede ser contenido dentro de una columna time with time zone")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: suma, resta, multiplicacion, division, potencia, modulo no pueden operar valores time with time zone - time withOut time zone")
return simb
elif (self.fila == 13 and self.columna == 13):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores time with time zone - time with time zone")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.time_si_zone)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.time_si_zone)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.time_si_zone)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.time_si_zone)
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: suma, resta, multiplicacion, division, potencia, modulo no pueden operar valores time with time zone - time with time zone")
return simb
elif (self.fila == 13 and self.columna == 14):
if(tipoSimbolo==tipoSimbolo.TipoSimbolo.AND or tipoSimbolo==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores time with time zone - boolean")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones: <, <=, >, >=, ==, <> no pueden operar valores time with time zone - boolean")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo boolean no puede ser contenido dentro de una columna tipo time with time zone")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: suma, resta, multiplicacion, division, potencia, modulo no pueden operar valores time with time zone - boolean")
return simb
# ********************************************************************************************
# ************************************* FILA 14 ***********************************************
# #*******************************************************************************************
if (self.fila == 14 and self.columna == 0):
if(tipoOperacion==tipoSimbolo.TipoSimbolo.AND or tipoOperacion==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores time with time boolean - smallInt")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operacion: <, <=, >, >=, ==, <> no puede operar valores time with time boolean - smallInt")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo smallInt no puede ser contenido dentro de una columna tipo time with time boolean")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: suma, resta, multiplicacion, division, potencia, modulo no pueden operar valores boolean - smallInt")
return simb
elif (self.fila == 14 and self.columna == 1):
if(tipoOperacion==tipoSimbolo.TipoSimbolo.AND or tipoOperacion==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores time with time boolean - integer")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operacion: <, <=, >, >=, ==, <> no puede operar valores time with time boolean - integer")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo integer no puede ser contenido dentro de una columna tipo time with time boolean")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: suma, resta, multiplicacion, division, potencia, modulo no pueden operar valores boolean - integer")
return simb
elif (self.fila == 14 and self.columna == 2):
if(tipoOperacion==tipoSimbolo.TipoSimbolo.AND or tipoOperacion==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores time with time boolean - bigInt")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operacion: <, <=, >, >=, ==, <> no puede operar valores time with time boolean - bigInt")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo bigInit no puede ser contenido dentro de una columna tipo time with time boolean")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: suma, resta, multiplicacion, division, potencia, modulo no pueden operar valores boolean - bigInt")
return simb
elif (self.fila == 14 and self.columna == 3):
if(tipoOperacion==tipoSimbolo.TipoSimbolo.AND or tipoOperacion==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores time with time boolean - decimal")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operacion: <, <=, >, >=, ==, <> no puede operar valores time with time boolean - decimal")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo decimal no puede ser contenido dentro de una columna tipo time with time boolean")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: suma, resta, multiplicacion, division, potencia, modulo no pueden operar valores boolean - decimal")
return simb
elif (self.fila == 14 and self.columna == 4):
if(tipoOperacion==tipoSimbolo.TipoSimbolo.AND or tipoOperacion==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores time with time boolean - numeric")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operacion: <, <=, >, >=, ==, <> no puede operar valores time with time boolean - numericc")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo numeric no puede ser contenido dentro de una columna tipo time with time boolean")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: suma, resta, multiplicacion, division, potencia, modulo no pueden operar valores boolean - numeric")
return simb
elif (self.fila == 14 and self.columna == 5):
if(tipoOperacion==tipoSimbolo.TipoSimbolo.AND or tipoOperacion==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores time with time boolean - real")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operacion: <, <=, >, >=, ==, <> no puede operar valores time with time boolean - real")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo real no puede ser contenido dentro de una columna tipo time with time boolean")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: suma, resta, multiplicacion, division, potencia, modulo no pueden operar valores boolean - real")
return simb
elif (self.fila == 14 and self.columna == 6):
if(tipoOperacion==tipoSimbolo.TipoSimbolo.AND or tipoOperacion==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores time with time boolean - double")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operacion: <, <=, >, >=, ==, <> no puede operar valores time with time boolean - double")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo double no puede ser contenido dentro de una columna tipo time with time boolean")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: suma, resta, multiplicacion, division, potencia, modulo no pueden operar valores boolean - double")
return simb
elif (self.fila == 14 and self.columna == 7):
if(tipoOperacion==tipoSimbolo.TipoSimbolo.AND or tipoOperacion==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores time with time boolean - money")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operacion: <, <=, >, >=, ==, <> no puede operar valores time with time boolean - money")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo money no puede ser contenido dentro de una columna tipo time with time boolean")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: suma, resta, multiplicacion, division, potencia, modulo no pueden operar valores boolean - money")
return simb
elif (self.fila == 14 and self.columna == 8):
if(tipoOperacion==tipoSimbolo.TipoSimbolo.AND or tipoOperacion==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores time with time boolean - varchar")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operacion: <, <=, >, >=, ==, <> no puede operar valores time with time boolean - varchar")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo varchar no puede ser contenido dentro de una columna tipo time with time boolean")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: suma, resta, multiplicacion, division, potencia, modulo no pueden operar valores boolean- varchar")
return simb
elif (self.fila == 14 and self.columna == 9):
if(tipoOperacion==tipoSimbolo.TipoSimbolo.AND or tipoOperacion==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores time with time boolean - char")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operacion: <, <=, >, >=, ==, <> no puede operar valores time with time boolean - char")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo char no puede ser contenido dentro de una columna tipo time with time boolean")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: suma, resta, multiplicacion, division, potencia, modulo no pueden operar valores boolean - char")
return simb
elif (self.fila == 14 and self.columna == 10):
if(tipoOperacion==tipoSimbolo.TipoSimbolo.AND or tipoOperacion==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores time with time boolean - text")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operacion: <, <=, >, >=, ==, <> no puede operar valores time with time boolean - text")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo text no puede ser contenido dentro de una columna tipo time with time boolean")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: suma, resta, multiplicacion, division, potencia, modulo no pueden operar valores boolean - text")
return simb
elif (self.fila == 14 and self.columna == 11):
if(tipoOperacion==tipoSimbolo.TipoSimbolo.AND or tipoOperacion==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores time with time boolean - date")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operacion: <, <=, >, >=, ==, <> no puede operar valores time with time boolean - date")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo date no puede ser contenido dentro de una columna tipo time with time boolean")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: suma, resta, multiplicacion, division, potencia, modulo no pueden operar valores boolean - date")
return simb
elif (self.fila == 14 and self.columna == 12):
if(tipoOperacion==tipoSimbolo.TipoSimbolo.AND or tipoOperacion==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores time with time boolean - time withOut time zone")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operacion: <, <=, >, >=, ==, <> no puede operar valores time with time boolean - time withOut time zone")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo time withOut time zone no puede ser contenido dentro de una columna tipo time with time boolean")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: suma, resta, multiplicacion, division, potencia, modulo no pueden operar valores boolean - time withOut time zone")
return simb
elif (self.fila == 14 and self.columna == 13):
if(tipoOperacion==tipoSimbolo.TipoSimbolo.AND or tipoOperacion==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setDescripcionError("Las operaciones And, Or no pueden operar valores time with time boolean - time with time zone")
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operacion: <, <=, >, >=, ==, <> no puede operar valores time with time boolean - time with time zone")
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setDescripcionError("Valor tipo time with time zone no puede ser contenido dentro de una columna tipo time with time boolean")
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: suma, resta, multiplicacion, division, potencia, modulo no pueden operar valores boolean - time with time zone")
return simb
elif (self.fila == 14 and self.columna == 14):
if(tipoOperacion==tipoSimbolo.TipoSimbolo.AND or tipoOperacion==tipoSimbolo.TipoSimbolo.OR):
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.boolean)
return simb
elif tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.MAYOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_QUE or tipoOperacion==tipoSimbolo.TipoSimbolo.MENOR_IGUAL or tipoOperacion==tipoSimbolo.TipoSimbolo.IGUALACION or tipoOperacion==tipoSimbolo.TipoSimbolo.DISTINTO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.boolean)
return simb
elif tipoOperacion == tipoSimbolo.TipoSimbolo.COLUMNA_DATO:
simb = simbolo.Simbolo()
simb.setTipoDatoRetorno(simboloColumna.TiposDatos.boolean)
simb.setTipoDatosCasteo(simboloColumna.TiposDatos.boolean)
return simb
else:
simb = simbolo.Simbolo()
simb.setDescripcionError("La operaciones: suma, resta, multiplicacion, division, potencia, modulo no pueden operar valores boolean - boolean")
return simb
else:
return None
| 74.119174
| 330
| 0.644202
| 32,669
| 362,591
| 7.109462
| 0.003092
| 0.230742
| 0.292949
| 0.202158
| 0.998265
| 0.998127
| 0.997219
| 0.997219
| 0.996241
| 0.984776
| 0
| 0.002346
| 0.272445
| 362,591
| 4,891
| 331
| 74.134328
| 0.878073
| 0.010745
| 0
| 0.810915
| 0
| 0.013856
| 0.155419
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.000426
| false
| 0
| 0.00064
| 0
| 0.219569
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
1b0e520c702425e966de423964e3f4834ea7de32
| 53,896
|
py
|
Python
|
notebooks/Compare results_physiology.py
|
patrickmineault/brain-scorer
|
5e882bafb323ff58028ade2394d18176e6c02e80
|
[
"MIT"
] | 7
|
2021-07-22T02:19:14.000Z
|
2022-02-21T15:07:35.000Z
|
notebooks/Compare results_physiology.py
|
patrickmineault/your-head-is-there-to-move-you-around
|
5e882bafb323ff58028ade2394d18176e6c02e80
|
[
"MIT"
] | null | null | null |
notebooks/Compare results_physiology.py
|
patrickmineault/your-head-is-there-to-move-you-around
|
5e882bafb323ff58028ade2394d18176e6c02e80
|
[
"MIT"
] | 2
|
2021-07-22T02:27:17.000Z
|
2022-03-21T02:08:42.000Z
|
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.13.0
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %% [markdown]
# # Core table of results
# %%
import collections
import numpy as np
import pickle
import torch
from tqdm import tqdm
import wandb
import sys
sys.path.append('../')
import models
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from python_dict_wrapper import wrap
import torch
import seaborn as sns
import matplotlib
matplotlib.rcParams["font.family"] = "Arial"
matplotlib.rcParams['font.sans-serif'] = "Arial"
sns.set(font="Arial", style="ticks")
# %%
api = wandb.Api()
runs = api.runs("pmin/train_fmri_convex.py",
{"$and":
[
{"config.dataset": {"$in": ["pvc1-repeats", "pvc4", "mt1_norm_neutralbg", "mt2", "mst_norm_neutralbg"]}},
{"config.aggregator": "downsample"},
{"config.pca": 500},
{"state": "finished"},
]
}
)
archives = {}
# Obtained using Sahani and Linden (2001) estimator
maxr2 = {'pvc1-repeats': np.array([0.5744896820822633, 0.402217859192537, 0.0724421977946134, 0.6354586579714803, 0.9058806806864625, 0.8336628618135512, 0.17459746284283278, 0.06161188996050254, 0.8174059944033387, 0.06870710384497566, 0.42338794910123256, -0.018763251412587834, 0.5083599168053451, 0.015472472397394084, 0.3707145547472778, 0.37212756865521673, 0.20308082812019865, 0.6502611134346646, 0.5865097550973541, 0.8000969058326385, 0.19490149480742838, 0.16097380666220693, 0.35043086612449575]),
'mst_norm_neutralbg': np.array([0.9238747684974108, 0.6333767821204601, 0.7837951303681188, 0.5308424287543246, 0.6260003702050283, 0.1227297633207262, -0.008784291339639153, 0.969760707867131, 0.969760707867131, 0.934727465911976, 0.7529227717733953, 0.5552180843492884, 0.9697541232379692, 0.9697541232379692, 0.9568011354583447, 0.8665498339085808, 0.9760604082608981, 0.8886188901104971, 0.939593861110838, 0.7750084758128198, 0.8745374754975368, 0.31508570581933376, 0.7633260217238106, 0.42445345198910983, 0.37880214350624575, 0.7589317052993544, 0.8804914772347142, 0.6289593364765851, 0.8682772295743525, 0.953022447487612, 0.6817736539099156, 0.7436142187949378, 0.7436142187949378, 0.6583185135218004, -0.24862120717043004, 0.7360928676249001])}
print("Found %i" % len(runs))
for run in tqdm(runs):
if run.config['features'] == 'SlowFast':
if run.config['layer'] > 16:
run.config['features'] = 'SlowFast_Fast'
run.config['layer'] = run.config['layer'] - 17
else:
run.config['features'] = 'SlowFast_Slow'
unique_name = f"{run.config['features']}_layer{int(run.config['layer']):02}_{run.config['dataset']}_{(run.config['subset'])}"
if unique_name in archives:
continue
if 'corrs_report' in run.summary:
maxr2_ = [1]
if run.config['dataset'] in maxr2.keys():
maxr2_ = [maxr2[run.config['dataset']][int(run.summary['subset'])]]
archives[unique_name] = {'corrs_report': [run.summary['corrs_report']],
'maxr2': maxr2_,
'config': run.config,
}
# %% [markdown]
# Normalize names.
# %%
families = {'I3D': 'SlowFast',
'MotionNet': 'control',
'SlowFast_Fast': 'SlowFast',
'SlowFast_Slow': 'SlowFast',
'Slow': 'SlowFast',
'Slow': 'SlowFast',
'gaborpyramid3d': 'shallow',
'gaborpyramid3d_motionless': 'control',
'r3d_18': 'r3d',
'mc3_18': 'r3d',
'r2plus1d_18': 'r3d',
'ShallowMonkeyNet_pvc1': 'dorsalnets',
'ShallowMonkeyNet_pvc4': 'dorsalnets',
'resnet18': 'control',
'ShiftNet': 'control',
'V1Net': 'dorsalnets',
'dorsalnet': 'dorsalnets',
'airsim_00': 'dorsalnets',
'airsim_02': 'dorsalnets',
'airsim_03': 'dorsalnets',
'airsim_04': 'dorsalnets',
'cpc': 'control',
'cpc_01': 'control',
'cpc_02': 'control'}
friendly_names = {'I3D': 'i3d',
'MotionNet': 'motionnet',
'SlowFast_Fast': 'slowfast',
'SlowFast_Slow': 'slowfast_slow',
'Slow': 'slow',
'gaborpyramid3d': 'gaborpyramid3d',
'gaborpyramid3d_motionless': 'gaborpyramid3d_motionless',
'r3d_18': 'r3d_18',
'mc3_18': 'mc3_18',
'r2plus1d_18': 'r2plus1d_18',
'ShallowMonkeyNet_pvc1': 'ShallowMonkeyNet_pvc1',
'ShallowMonkeyNet_pvc4': 'ShallowMonkeyNet_pvc4',
'resnet18': 'resnet18',
'ShiftNet': 'ShiftNet',
'V1Net': 'V1Net',
'dorsalnet': 'dorsalnets',
'airsim_00': 'dorsalnets',
'airsim_02': 'dorsalnets',
'airsim_03': 'dorsalnets',
'airsim_04': 'dorsalnet',
'cpc': 'cpc',
'cpc_01': 'cpc_airsim',
'cpc_02': 'cpc_ucf'}
whitelist = {
'i3d': [0, 1, 2, 4, 6, 8],
'slowfast': [0, 1, 2, 4, 6, 8],
'motionnet': [0, 1],
'gaborpyramid3d_motionless': [0],
'gaborpyramid3d': [0],
'mc3_18': [0, 1, 2, 4, 6, 8, 10, 12],
'r2plus1d_18': [0, 1, 2, 4, 6, 8, 10, 12],
'r3d_18': [0, 1, 2, 4, 6, 8, 10, 12],
'cpc_airsim': [0, 1, 2, 4, 6, 8, 10, 12],
'cpc_ucf': [0, 1, 2, 4, 6, 8, 10, 12],
'dorsalnet': [0, 1, 2, 3, 4, 5],
}
dataset_names = {
"pvc1-repeats": "pvc1",
"pvc4": "pvc4",
"mt1_norm_neutralbg": "mt1",
"mt2": "mt2",
"mst_norm_neutralbg": "mst"
}
results = []
for k, v in archives.items():
fam = families[v['config']['features']]
try:
layer_num = int(v['config']['layer_name'][-2:])
except (KeyError, ValueError):
layer_num = int(v['config']['layer'])
friendly_name = friendly_names[v['config']['features']]
if friendly_name not in whitelist.keys() or layer_num not in whitelist[friendly_name]:
continue
results += [{'key': k,
'family': fam,
'dataset': dataset_names[v['config']['dataset']],
'subset': int(v['config']['subset']) + i*100,
'features': friendly_name,
'layer': layer_num,
'features_layer': f"{friendly_name}_{layer_num:02}",
'report_corr': x,
'max_r2': np.sqrt(max([m, .1])),
'rnorm': x / np.sqrt(max([m, .1]))} for i, (x, m) in enumerate(zip(v['corrs_report'], v['maxr2']))]
df = pd.DataFrame(results)
df = df.sort_values('key')
nums = df.groupby(['dataset', 'features', 'layer']).subset.count().reset_index()
df = df.merge(nums, left_on=('dataset', 'features', 'layer'), right_on=('dataset', 'features', 'layer'), suffixes=('', '_overall'))
df = df[df.subset_overall.isin([23, 25, 36, 44, 84])]
df.groupby(['dataset', 'subset_overall']).first()
# %% [markdown]
# # Key measurements table
# %%
#df_ = df.groupby(['dataset', 'features', 'layer']).rnorm.agg(['mean', 'sem', 'count'])
#df_.reset_index().groupby(['dataset', 'features'])['mean'].max()
datasets = df.dataset.unique()
perf = []
for dataset in datasets:
df_ = df.query(f'dataset == "{dataset}"')
df_m = df_.pivot_table('rnorm', 'features', 'layer')
df_s = df_.pivot_table('rnorm', 'features', 'layer', aggfunc='sem')
m = np.diag(df_m.loc[df_m.index, df_m.idxmax(axis=1)].values)
s = np.diag(df_s.loc[df_s.index, df_m.idxmax(axis=1)].values)
series = df_.groupby(['features_layer']).mean().report_corr
best = series.index[series.argmax()]
df_ = pd.merge(df_, df_.query(f'features_layer == "{best}"')[['subset', 'rnorm']], left_on='subset', right_on='subset', suffixes=('', '_best'))
df_['rnorm_delta'] = df_['rnorm'] - df_['rnorm_best']
df_m_delta = df_.pivot_table('rnorm_delta', 'features', 'layer')
df_s_delta = df_.pivot_table('rnorm_delta', 'features', 'layer', aggfunc='sem') + 1e-6
df_ = (df_m_delta / df_s_delta).max(axis=1)
perf += [{'dataset': dataset,
'features': f,
'z': df_.loc[f],
'm': mm,
's': ss} for f, mm, ss in zip(df_m.index, m, s)]
df_m = pd.DataFrame(perf).pivot('features', 'dataset', 'm')
df_s = pd.DataFrame(perf).pivot('features', 'dataset', 's')
df_z = pd.DataFrame(perf).pivot('features', 'dataset', 'z')
models = ['slowfast', 'i3d', 'r3d_18', 'r2plus1d_18', 'mc3_18', 'cpc_ucf', 'cpc_airsim', 'gaborpyramid3d_motionless', 'gaborpyramid3d', 'motionnet', 'dorsalnet']
datasets = ['pvc1', 'pvc4', 'mt1', 'mt2', 'mst']
df_m = df_m[datasets].loc[models]
df_s = df_s[datasets].loc[models]
df_z = df_z[datasets].loc[models]
the_str = f"\\begin{{tabular}}{{{'l' * (df_m.shape[1] + 1)}}}\n\\toprule \n"
the_str += '{} & ' + ' & '.join(df_m.columns.tolist()).replace('\\', '\\\\').replace('_', '\_') + '\\\\\n'
the_str += '\midrule \n'
for idx, row in df_m.iterrows():
vals = [idx.replace('_', '\_')]
for feature, v in row.items():
if np.isnan(v):
vals.append('-')
else:
if df_z.loc[idx, feature].item() < -1.96:
vals.append(f"{v:.3f} ({df_s.loc[idx, feature]:.3f})"[1:])
else:
vals.append(f"\\textbf{{{v:.3f}}} ({df_s.loc[idx, feature]:.3f})")
the_str += ' & '.join(vals) + '\\\\\n'
the_str += "\end{tabular}\n"
print(the_str)
# %%
import scipy.stats
zvals = pd.DataFrame(2*scipy.stats.norm.cdf(df_z))
zvals.columns = df_z.columns
zvals.index = df_z.index
zvals
# %%
datasets = df.dataset.unique()
perf = []
for dataset in datasets:
df_ = df.query(f'dataset == "{dataset}"')
df_m = df_.pivot_table('rnorm', 'features', 'layer')
best_layer = df_m.idxmax(axis=1)
good_combos = [f'{x}_{y:02}' for x, y in zip(best_layer.index, best_layer.values)]
df_ = df_[df_.features_layer.isin(good_combos)]
df_ = df_.pivot_table('rnorm', 'subset', 'features')
plt.figure()
g = sns.scatterplot(x='motionnet', y='dorsalnet', data=df_, ax=plt.gca())
g.plot([-1, 1], [-1, 1], 'k-')
plt.axis('square')
g.set_xlim([-.05, .8])
g.set_ylim([-.05, .8])
plt.title(dataset)
nbetter = (df_.dorsalnet > df_.motionnet).sum()
ntotal = df_.shape[0]
print(f"{dataset}, cells better {nbetter}/{ntotal}")
# %% [markdown]
# # Bump plot
# Show the alignment of each area to dorsalnet layers.
# %%
df.dataset.unique()
# %%
dmap = {'mst': 'mst', 'mt1': 'mt', 'mt2': 'mt', 'pvc1': 'v1', 'pvc4': 'v1'}
df['area'] = df.dataset.map(lambda x: dmap[x])
df_ = df.query('features == "dorsalnet"')
df_.groupby(['area', 'layer']).rnorm.mean()
def bs(df):
df = df.copy()
idx = np.random.randint(0, df.shape[0], df.shape[0])
df_ = df.iloc[idx]
the_max = df_.groupby(['area', 'layer']).rnorm.mean().groupby('area').max()
df_ = df_.groupby(['area', 'layer']).rnorm.mean() / the_max
return df_
dmap = {'pvc1': 'v1', 'pvc4': 'v1', 'mt1': 'mt', 'mt2': 'mt', 'mst': 'mst'}
df['area'] = df.dataset.map(lambda x: dmap[x])
df_ = df.query('features == "dorsalnet"')
df_ = df_.sort_values('area', ascending=True)
df_['rnorm_max'] = df_.rnorm
A = np.array([bs(df_).values for x in range(500)])
df__ = pd.DataFrame(A.T)
df__.index = bs(df_).index
plt.figure(figsize=(4, 3))
plt.figure(figsize=(4, 3))
for area in ['v1', 'mt', 'mst']:
vals = np.quantile(df__.loc[area], [.05, .95, .5], axis=1).T
plt.fill_between(df__.loc[area].index, vals[:, 0], vals[:, 1], alpha=.2)
plt.plot(df__.loc[area].index, vals[:, 2])
sns.despine()
plt.xlabel('layer')
plt.ylabel('R relative to maximum for area')
plt.yticks([0, .2, .4, .6, .8, 1.0])
cols = sns.color_palette(as_cmap=True)
for i, area in enumerate(['v1', 'mt', 'mst']):
df_ = df.query(f'report_corr > .01 and features == "dorsalnet" and area == "{area}"')
df_['dataset_subset'] = df_.dataset + '_' + df_.subset.astype(np.str)
v = df_.pivot('dataset_subset', 'layer', 'rnorm').values
vals = []
for n in range(10000):
v_ = v[np.random.randint(low=0, high=v.shape[0], size=v.shape[0]), :].argmax(axis=1)#mean(axis=0)
#cs = CubicSpline(np.arange(6), v_)
#xs = np.linspace(0, 5)
vals.append(v_.mean())
print(np.mean(vals))
print(np.std(vals))
q = np.quantile(vals, [.025, .975])
plt.plot(q, (.2 + .1*i) * np.array([1, 1]), color=g.lines[2-i].get_color())
plt.text((q[0] + q[1])/2, .225 + .1*i, area, {'ha': 'center'})
print(np.quantile(vals, [.025, .5, .975]))
plt.ylim([0, 1.05])
plt.savefig('/home/pmin/paper-assets/figure-2-layer-alignment.pdf', bbox_inches='tight')
# %%
dmap = {'pvc1': 'v1', 'pvc4': 'v1', 'mt1': 'mt', 'mt2': 'mt', 'mst': 'mst'}
df['area'] = df.dataset.map(lambda x: dmap[x])
df_ = df.query('features == "dorsalnet"')
df_ = df_.sort_values('area', ascending=True)
df_['rnorm_max'] = df_.rnorm
df_.loc[df_.area == 'v1', 'rnorm_max'] /= .3618
df_.loc[df_.area == 'mt', 'rnorm_max'] /= .2954
df_.loc[df_.area == 'mst', 'rnorm_max'] /= .4540
plt.figure(figsize=(4, 3))
g = sns.lineplot(x='layer',
y='rnorm_max',
hue='area',
palette="Set2",
data=df_, legend=False, ax=plt.gca())
sns.despine()
#plt.legend(loc='upper right', fancybox=None, frameon=False, bbox_to_anchor=(0.,.3, 1.0, .105))
plt.ylabel('R relative to maximum for area')
plt.yticks([0, .2, .4, .6, .8, 1.0])
cols = sns.color_palette(as_cmap=True)
for i, area in enumerate(['v1', 'mt', 'mst']):
df_ = df.query(f'report_corr > .01 and features == "dorsalnet" and area == "{area}"')
df_['dataset_subset'] = df_.dataset + '_' + df_.subset.astype(np.str)
v = df_.pivot('dataset_subset', 'layer', 'rnorm').values
vals = []
for n in range(10000):
v_ = v[np.random.randint(low=0, high=v.shape[0], size=v.shape[0]), :].argmax(axis=1)#mean(axis=0)
#cs = CubicSpline(np.arange(6), v_)
#xs = np.linspace(0, 5)
vals.append(v_.mean())
print(np.mean(vals))
print(np.std(vals))
q = np.quantile(vals, [.025, .975])
plt.plot(q, (.2 + .1*i) * np.array([1, 1]), color=g.lines[2-i].get_color())
plt.text((q[0] + q[1])/2, .225 + .1*i, area, {'ha': 'center'})
print(np.quantile(vals, [.025, .5, .975]))
plt.ylim([0, 1.2])
#plt.savefig('/home/pmin/paper-assets/figure-2-layer-alignment.pdf', bbox_inches='tight')
# %% [markdown]
# # Sparse regression
# %%
api = wandb.Api()
runs = api.runs("pmin/train_fmri_convex.py",
{"$and":
[
{"config.dataset": {"$in": ["pvc1-repeats", "pvc4", "mt1_norm_neutralbg", "mt2", "mst_norm_neutralbg"]}},
{"config.aggregator": "downsample"},
{"config.pca": -1},
{"config.method": "boosting"},
{"state": "finished"},
]
}
)
archives = {}
print("Found %i" % len(runs))
for run in tqdm(runs):
if run.config['features'] == 'SlowFast':
if run.config['layer'] > 16:
run.config['features'] = 'SlowFast_Fast'
run.config['layer'] = run.config['layer'] - 17
else:
run.config['features'] = 'SlowFast_Slow'
unique_name = f"{run.config['features']}_layer{int(run.config['layer']):02}_{run.config['dataset']}_{(run.config['subset'])}"
if unique_name in archives:
continue
if 'corrs_report' in run.summary:
maxr2_ = [1]
if run.config['dataset'] in maxr2.keys():
maxr2_ = [maxr2[run.config['dataset']][int(run.summary['subset'])]]
archives[unique_name] = {'corrs_report': [run.summary['corrs_report']],
'maxr2': maxr2_,
'config': run.config,
}
# %%
results = []
for k, v in archives.items():
fam = families[v['config']['features']]
try:
layer_num = int(v['config']['layer_name'][-2:])
except (KeyError, ValueError):
layer_num = int(v['config']['layer'])
friendly_name = friendly_names[v['config']['features']]
if friendly_name not in whitelist.keys() or layer_num not in whitelist[friendly_name]:
continue
if v['corrs_report'][0] is None:
# This happens when boosting chooses no
v['corrs_report'][0] = 0.0
results += [{'key': k,
'family': fam,
'dataset': dataset_names[v['config']['dataset']],
'subset': int(v['config']['subset']) + i*100,
'features': friendly_name,
'layer': layer_num,
'features_layer': f"{friendly_name}_{layer_num:02}",
'report_corr': x,
'max_r2': np.sqrt(max([m, .1])),
'rnorm': x / np.sqrt(max([m, .1]))} for i, (x, m) in enumerate(zip(v['corrs_report'], v['maxr2']))]
df = pd.DataFrame(results)
df = df.sort_values('key')
nums = df.groupby(['dataset', 'features', 'layer']).subset.count().reset_index()
df = df.merge(nums, left_on=('dataset', 'features', 'layer'), right_on=('dataset', 'features', 'layer'), suffixes=('', '_overall'))
df = df[df.subset_overall.isin([23, 25, 36, 44, 84])]
#df.query('dataset == "mst"').groupby('features_layer').mean()
#df.query('features == "dorsalnet"')
nums
# %%
#df_ = df.groupby(['dataset', 'features', 'layer']).rnorm.agg(['mean', 'sem', 'count'])
#df_.reset_index().groupby(['dataset', 'features'])['mean'].max()
datasets = df.dataset.unique()
perf = []
for dataset in datasets:
df_ = df.query(f'dataset == "{dataset}"')
df_m = df_.pivot_table('rnorm', 'features', 'layer')
df_s = df_.pivot_table('rnorm', 'features', 'layer', aggfunc='sem')
m = np.diag(df_m.loc[df_m.index, df_m.idxmax(axis=1)].values)
s = np.diag(df_s.loc[df_s.index, df_m.idxmax(axis=1)].values)
series = df_.groupby(['features_layer']).mean().report_corr
best = series.index[series.argmax()]
df_ = pd.merge(df_, df_.query(f'features_layer == "{best}"')[['subset', 'rnorm']], left_on='subset', right_on='subset', suffixes=('', '_best'))
df_['rnorm_delta'] = df_['rnorm'] - df_['rnorm_best']
df_m_delta = df_.pivot_table('rnorm_delta', 'features', 'layer')
df_s_delta = df_.pivot_table('rnorm_delta', 'features', 'layer', aggfunc='sem') + 1e-6
df_ = (df_m_delta / df_s_delta).max(axis=1)
perf += [{'dataset': dataset,
'features': f,
'z': df_.loc[f],
'm': mm,
's': ss} for f, mm, ss in zip(df_m.index, m, s)]
df_m = pd.DataFrame(perf).pivot('features', 'dataset', 'm')
df_s = pd.DataFrame(perf).pivot('features', 'dataset', 's')
df_z = pd.DataFrame(perf).pivot('features', 'dataset', 'z')
models = ['motionnet', 'dorsalnet']
datasets = ['mt1', 'mt2', 'mst']
df_m = df_m[datasets].loc[models]
df_s = df_s[datasets].loc[models]
df_z = df_z[datasets].loc[models]
the_str = f"\\begin{{tabular}}{{{'l' * (df_m.shape[1] + 1)}}}\n\\toprule \n"
the_str += '{} & ' + ' & '.join(df_m.columns.tolist()).replace('\\', '\\\\').replace('_', '\_') + '\\\\\n'
the_str += '\midrule \n'
for idx, row in df_m.iterrows():
vals = [idx.replace('_', '\_')]
for feature, v in row.items():
if np.isnan(v):
vals.append('-')
else:
if df_z.loc[idx, feature].item() < -1.96:
vals.append(f"{v:.3f} ({df_s.loc[idx, feature]:.3f})"[1:])
else:
vals.append(f"\\textbf{{{v:.3f}}} ({df_s.loc[idx, feature]:.3f})")
the_str += ' & '.join(vals) + '\\\\\n'
the_str += "\end{tabular}\n"
print(the_str)
# %%
import scipy.stats
zvals = pd.DataFrame(2*scipy.stats.norm.cdf(df_z))
zvals.columns = df_z.columns
zvals.index = df_z.index
zvals
# %%
datasets = df.dataset.unique()
perf = []
for dataset in datasets:
df_ = df.query(f'dataset == "{dataset}"')
df_m = df_.pivot_table('rnorm', 'features', 'layer')
best_layer = df_m.idxmax(axis=1)
good_combos = [f'{x}_{y:02}' for x, y in zip(best_layer.index, best_layer.values)]
df_ = df_[df_.features_layer.isin(good_combos)]
df_ = df_.pivot_table('rnorm', 'subset', 'features')
plt.figure()
g = sns.scatterplot(x='motionnet', y='dorsalnet', data=df_, ax=plt.gca())
g.plot([-1, 1], [-1, 1], 'k-')
plt.axis('square')
g.set_xlim([-.05, .8])
g.set_ylim([-.05, .8])
plt.title(dataset)
nbetter = (df_.dorsalnet > df_.motionnet).sum()
ntotal = df_.shape[0]
print(f"{dataset}, cells better {nbetter}/{ntotal}")
# %% [markdown]
# # Present CKA results
# %%
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
A = np.load('../cka.npy')
plt.figure(figsize=(5, 4))
f = sns.heatmap(A[:-1, :-1], annot=True)
plt.xlabel('layer')
plt.ylabel('layer')
plt.title('CKA across layers of DorsalNet')
plt.ylim([0, 6])
plt.savefig('../revision/cka.pdf')
# %% [markdown] tags=[]
# # Bump plots
# # Calculate bump plots for alignment between area and layer across all relative scalings of the data
# %%
import collections
import numpy as np
import pickle
import torch
from tqdm import tqdm
import wandb
import sys
sys.path.append('../')
import models
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from python_dict_wrapper import wrap
import torch
import seaborn as sns
import matplotlib
matplotlib.rcParams["font.family"] = "Arial"
matplotlib.rcParams['font.sans-serif'] = "Arial"
sns.set(font="Arial", style="ticks")
# %%
api = wandb.Api()
runs = api.runs("pmin/train_fmri_convex.py",
{"$and":
[
{"config.dataset": {"$in": ["pvc1-repeats", "pvc4", "mt1_norm_neutralbg", "mt2", "mst_norm_neutralbg"]}},
{"config.aggregator": "downsample"},
{"config.pca": 500},
{"config.features": {"$in": ["airsim_04", "MotionNet"]}},
{"state": "finished"},
]
}
)
archives = {}
print("Found %i" % len(runs))
for run in tqdm(runs):
if run.config['features'] == 'SlowFast':
if run.config['layer'] > 16:
run.config['features'] = 'SlowFast_Fast'
run.config['layer'] = run.config['layer'] - 17
else:
run.config['features'] = 'SlowFast_Slow'
sz = 112
if 'resize' in run.config:
sz = run.config['resize']
unique_name = f"{run.config['features']}_layer{int(run.config['layer']):02}_{run.config['dataset']}_{(run.config['subset'])}_{sz}"
if unique_name in archives:
continue
if 'corrs_report' in run.summary:
maxr2_ = [1]
if run.config['dataset'] in maxr2.keys():
maxr2_ = [maxr2[run.config['dataset']][int(run.summary['subset'])]]
archives[unique_name] = {'corrs_report': [run.summary['corrs_report']],
'maxr2': maxr2_,
'config': run.config,
}
results = []
for k, v in archives.items():
fam = families[v['config']['features']]
try:
layer_num = int(v['config']['layer_name'][-2:])
except (KeyError, ValueError):
layer_num = int(v['config']['layer'])
friendly_name = friendly_names[v['config']['features']]
if friendly_name not in whitelist.keys() or layer_num not in whitelist[friendly_name]:
continue
if v['corrs_report'][0] is None:
# This happens when boosting chooses no
v['corrs_report'][0] = 0.0
sz = 112
if 'resize' in v['config']:
sz = v['config']['resize']
results += [{'key': k,
'family': fam,
'dataset': dataset_names[v['config']['dataset']],
'subset': int(v['config']['subset']) + i*100,
'features': friendly_name,
'layer': layer_num,
'features_layer': f"{friendly_name}_{layer_num:02}",
'report_corr': x,
'max_r2': np.sqrt(max([m, .1])),
'scale': round((sz / 112) * 100) / 100,
'rnorm': x / np.sqrt(max([m, .1]))} for i, (x, m) in enumerate(zip(v['corrs_report'], v['maxr2']))]
df = pd.DataFrame(results)
df = df.sort_values('key')
nums = df.groupby(['dataset', 'features', 'layer', 'scale']).subset.count().reset_index()
df = df.merge(nums, left_on=('dataset', 'features', 'layer', 'scale'), right_on=('dataset', 'features', 'layer', 'scale'), suffixes=('', '_overall'))
df = df[df.subset_overall.isin([23, 25, 36, 44, 84])]
#df.query('dataset == "mst"').groupby('features_layer').mean()
#df.query('features == "dorsalnet"')
nums
for ratio in [0.66, 1.0, 1.5]:
def bs(df):
df = df.copy()
idx = np.random.randint(0, df.shape[0], df.shape[0])
df_ = df.iloc[idx]
the_max = df_.groupby(['area', 'layer']).rnorm.mean().groupby('area').max()
df_ = df_.groupby(['area', 'layer']).rnorm.mean() / the_max
return df_
dmap = {'pvc1': 'v1', 'pvc4': 'v1', 'mt1': 'mt', 'mt2': 'mt', 'mst': 'mst'}
df['area'] = df.dataset.map(lambda x: dmap[x])
df_ = df.query(f'features == "dorsalnet" and scale == {ratio}')
df_ = df_.sort_values('area', ascending=True)
df_['rnorm_max'] = df_.rnorm
A = np.array([bs(df_).values for x in range(500)])
df__ = pd.DataFrame(A.T)
df__.index = bs(df_).index
plt.figure(figsize=(4, 3))
#g = sns.lineplot(x='layer',
# y='rnorm_max',
# hue='area',
# palette="Set2",
# data=df_, legend=False, ax=plt.gca())
#np.quantile(A, [.025, .0975], axis=0)
plt.figure(figsize=(4, 3))
lines = []
for area in ['v1', 'mt', 'mst']:
vals = np.quantile(df__.loc[area], [.05, .95, .5], axis=1).T
plt.fill_between(df__.loc[area].index, vals[:, 0], vals[:, 1], alpha=.2)
lines.append(plt.plot(df__.loc[area].index, vals[:, 2]))
sns.despine()
plt.xlabel('layer')
plt.ylabel('R relative to maximum for area')
plt.yticks([0, .2, .4, .6, .8, 1.0])
cols = sns.color_palette(as_cmap=True)
for i, area in enumerate(['v1', 'mt', 'mst']):
df_ = df.query(f'report_corr > .01 and features == "dorsalnet" and area == "{area}" and scale == {ratio}')
df_['dataset_subset'] = df_.dataset + '_' + df_.subset.astype(np.str)
v = df_.pivot('dataset_subset', 'layer', 'rnorm').values
vals = []
for n in range(10000):
v_ = v[np.random.randint(low=0, high=v.shape[0], size=v.shape[0]), :].argmax(axis=1)#mean(axis=0)
#cs = CubicSpline(np.arange(6), v_)
#xs = np.linspace(0, 5)
vals.append(v_.mean())
print(np.mean(vals))
print(np.std(vals))
q = np.quantile(vals, [.025, .975])
plt.plot(q, (.2 + .1*i) * np.array([1, 1]), color=lines[i][0].get_color())
plt.text((q[0] + q[1])/2, .225 + .1*i, area, {'ha': 'center'})
print(np.quantile(vals, [.025, .5, .975]))
plt.ylim([0, 1.05])
plt.title(f'Scaled {ratio}X')
plt.savefig(f'/home/pmin/paper-assets/figure-2-layer-alignment-rescaled-{ratio}.pdf', bbox_inches='tight')
plt.savefig(f'/home/pmin/paper-assets/figure-2-layer-alignment-rescaled-{ratio}.png', bbox_inches='tight')
# %%
#df_ = df.groupby(['dataset', 'features', 'layer']).rnorm.agg(['mean', 'sem', 'count'])
#df_.reset_index().groupby(['dataset', 'features'])['mean'].max()
datasets = df.dataset.unique()
for ratio in [0.66, 1.0, 1.5]:
perf = []
for dataset in datasets:
df_ = df.query(f'dataset == "{dataset}" and scale == {ratio}')
df_m = df_.pivot_table('rnorm', 'features', 'layer')
df_s = df_.pivot_table('rnorm', 'features', 'layer', aggfunc='sem')
m = np.diag(df_m.loc[df_m.index, df_m.idxmax(axis=1)].values)
s = np.diag(df_s.loc[df_s.index, df_m.idxmax(axis=1)].values)
series = df_.groupby(['features_layer']).mean().report_corr
best = series.index[series.argmax()]
df_ = pd.merge(df_, df_.query(f'features_layer == "{best}"')[['subset', 'rnorm']], left_on='subset', right_on='subset', suffixes=('', '_best'))
df_['rnorm_delta'] = df_['rnorm'] - df_['rnorm_best']
df_m_delta = df_.pivot_table('rnorm_delta', 'features', 'layer')
df_s_delta = df_.pivot_table('rnorm_delta', 'features', 'layer', aggfunc='sem') + 1e-6
df_ = (df_m_delta / df_s_delta).max(axis=1)
perf += [{'dataset': dataset,
'features': f,
'z': df_.loc[f],
'm': mm,
's': ss} for f, mm, ss in zip(df_m.index, m, s)]
df_m = pd.DataFrame(perf).pivot('features', 'dataset', 'm')
df_s = pd.DataFrame(perf).pivot('features', 'dataset', 's')
df_z = pd.DataFrame(perf).pivot('features', 'dataset', 'z')
models = ['motionnet', 'dorsalnet']
datasets = ['pvc1', 'pvc4', 'mt1', 'mt2', 'mst']
df_m = df_m[datasets].loc[models]
df_s = df_s[datasets].loc[models]
df_z = df_z[datasets].loc[models]
the_str = f"\\begin{{tabular}}{{{'l' * (df_m.shape[1] + 1)}}}\n\\toprule \n"
the_str += '{} & ' + ' & '.join(df_m.columns.tolist()).replace('\\', '\\\\').replace('_', '\_') + '\\\\\n'
the_str += '\midrule \n'
for idx, row in df_m.iterrows():
vals = [idx.replace('_', '\_')]
for feature, v in row.items():
if np.isnan(v):
vals.append('-')
else:
if df_z.loc[idx, feature].item() < -1.96:
vals.append(f"{v:.3f} ({df_s.loc[idx, feature]:.3f})")
else:
vals.append(f"\\textbf{{{v:.3f}}} ({df_s.loc[idx, feature]:.3f})")
the_str += ' & '.join([x.replace('0.', '.') for x in vals]) + '\\\\\n'
the_str += "\end{tabular}\n"
print(the_str)
# %% [markdown]
# # Resizing, with regression
# %%
api = wandb.Api()
runs = api.runs("pmin/train_fmri_convex.py",
{"$and":
[
{"config.dataset": {"$in": ["pvc1-repeats", "pvc4", "mt1_norm_neutralbg", "mt2", "mst_norm_neutralbg"]}},
{"config.aggregator": "downsample"},
{"config.pca": 500},
{"config.features": {"$in": ["airsim_04", "MotionNet"]}},
{"state": "finished"},
]
}
)
archives = {}
print("Found %i" % len(runs))
for run in tqdm(runs):
if run.config['features'] == 'SlowFast':
if run.config['layer'] > 16:
run.config['features'] = 'SlowFast_Fast'
run.config['layer'] = run.config['layer'] - 17
else:
run.config['features'] = 'SlowFast_Slow'
sz = 112
if 'resize' in run.config:
sz = run.config['resize']
unique_name = f"{run.config['features']}_layer{int(run.config['layer']):02}_{run.config['dataset']}_{(run.config['subset'])}_{sz}"
if unique_name in archives:
continue
if 'corrs_report' in run.summary:
maxr2_ = [1]
if run.config['dataset'] in maxr2.keys():
maxr2_ = [maxr2[run.config['dataset']][int(run.summary['subset'])]]
archives[unique_name] = {'corrs_report': [run.summary['corrs_report']],
'maxr2': maxr2_,
'config': run.config,
}
# %%
results = []
for k, v in archives.items():
fam = families[v['config']['features']]
try:
layer_num = int(v['config']['layer_name'][-2:])
except (KeyError, ValueError):
layer_num = int(v['config']['layer'])
friendly_name = friendly_names[v['config']['features']]
if friendly_name not in whitelist.keys() or layer_num not in whitelist[friendly_name]:
continue
if v['corrs_report'][0] is None:
# This happens when boosting chooses no
v['corrs_report'][0] = 0.0
sz = 112
if 'resize' in v['config']:
sz = v['config']['resize']
scale = f"{round((sz / 112) * 100) / 100:.2f}"
results += [{'key': k,
'family': fam,
'dataset': dataset_names[v['config']['dataset']],
'subset': int(v['config']['subset']) + i*100,
'features': f"{friendly_name}_{scale}",
'layer': layer_num,
'features_layer': f"{friendly_name}_{scale}_{layer_num:02}",
'report_corr': x,
'max_r2': np.sqrt(max([m, .1])),
'rnorm': x / np.sqrt(max([m, .1]))} for i, (x, m) in enumerate(zip(v['corrs_report'], v['maxr2']))]
df = pd.DataFrame(results)
df = df.sort_values('key')
nums = df.groupby(['dataset', 'features', 'layer']).subset.count().reset_index()
df = df.merge(nums, left_on=('dataset', 'features', 'layer'), right_on=('dataset', 'features', 'layer'), suffixes=('', '_overall'))
df = df[df.subset_overall.isin([23, 25, 36, 44, 84])]
#df.query('dataset == "mst"').groupby('features_layer').mean()
#df.query('features == "dorsalnet"')
nums
# %%
#df_ = df.groupby(['dataset', 'features', 'layer']).rnorm.agg(['mean', 'sem', 'count'])
#df_.reset_index().groupby(['dataset', 'features'])['mean'].max()
datasets = df.dataset.unique()
datasets = ['pvc1', 'pvc4', 'mt1', 'mt2', 'mst']
df_ms = []
df_ss = []
df_zs = []
perf = []
for dataset in datasets:
df_ = df.query(f'dataset == "{dataset}"')
df_m = df_.pivot_table('rnorm', 'features', 'layer')
df_s = df_.pivot_table('rnorm', 'features', 'layer', aggfunc='sem')
m = np.diag(df_m.loc[df_m.index, df_m.idxmax(axis=1)].values)
s = np.diag(df_s.loc[df_s.index, df_m.idxmax(axis=1)].values)
series = df_.groupby(['features_layer']).mean().report_corr
best = series.index[series.argmax()]
df_ = pd.merge(df_, df_.query(f'features_layer == "{best}"')[['subset', 'rnorm']], left_on='subset', right_on='subset', suffixes=('', '_best'))
df_['rnorm_delta'] = df_['rnorm'] - df_['rnorm_best']
df_m_delta = df_.pivot_table('rnorm_delta', 'features', 'layer')
df_s_delta = df_.pivot_table('rnorm_delta', 'features', 'layer', aggfunc='sem') + 1e-6
df_ = (df_m_delta / df_s_delta).max(axis=1)
perf += [{'dataset': dataset,
'features': f,
'z': df_.loc[f],
'm': mm,
's': ss} for f, mm, ss in zip(df_m.index, m, s)]
df_m = pd.DataFrame(perf).pivot('features', 'dataset', 'm')
df_s = pd.DataFrame(perf).pivot('features', 'dataset', 's')
df_z = pd.DataFrame(perf).pivot('features', 'dataset', 'z')
#models = ['motionnet', 'dorsalnet']
#datasets = ['pvc1', 'pvc4', 'mt1', 'mt2', 'mst']
df_m = df_m[datasets]#.loc[models]
df_s = df_s[datasets]#.loc[models]
df_z = df_z[datasets]#.loc[models]
# %%
the_str = f"\\begin{{tabular}}{{{'l' * (df_m.shape[1] + 2)}}}\n\\toprule \n"
the_str += '{scaling} & {model} & ' + ' & '.join(df_m.columns.tolist()).replace('\\', '\\\\').replace('_', '\_') + '\\\\\n'
the_str += '\midrule \n'
for idx, row in df_m.iterrows():
vals = [idx.split('_')[0], idx.split('_')[1]]
for feature, v in row.items():
if np.isnan(v):
vals.append('-')
else:
if df_z.loc[idx, feature].item() < -1.96:
vals.append(f"{v:.3f} ({df_s.loc[idx, feature]:.3f})")
else:
vals.append(f"\\textbf{{{v:.3f}}} ({df_s.loc[idx, feature]:.3f})")
the_str += ' & '.join([x.replace('0.', '.') for x in vals]) + '\\\\\n'
the_str += "\end{tabular}\n"
#print(f"Ratio: {ratio}")
print(the_str)
# %% [markdown]
# # Resizing, with boosting
# %%
api = wandb.Api()
runs = api.runs("pmin/train_fmri_convex.py",
{"$and":
[
{"config.dataset": {"$in": ["pvc1-repeats", "pvc4", "mt1_norm_neutralbg", "mt2", "mst_norm_neutralbg"]}},
{"config.aggregator": "downsample"},
{"config.method": "boosting"},
{"config.features": {"$in": ["airsim_04", "MotionNet"]}},
{"state": "finished"},
]
}
)
archives = {}
print("Found %i" % len(runs))
for run in tqdm(runs):
if run.config['features'] == 'SlowFast':
if run.config['layer'] > 16:
run.config['features'] = 'SlowFast_Fast'
run.config['layer'] = run.config['layer'] - 17
else:
run.config['features'] = 'SlowFast_Slow'
sz = 112
if 'resize' in run.config:
sz = run.config['resize']
unique_name = f"{run.config['features']}_layer{int(run.config['layer']):02}_{run.config['dataset']}_{(run.config['subset'])}_{sz}"
if unique_name in archives:
continue
if 'corrs_report' in run.summary:
maxr2_ = [1]
if run.config['dataset'] in maxr2.keys():
maxr2_ = [maxr2[run.config['dataset']][int(run.summary['subset'])]]
archives[unique_name] = {'corrs_report': [run.summary['corrs_report']],
'maxr2': maxr2_,
'config': run.config,
}
# %%
results = []
for k, v in archives.items():
fam = families[v['config']['features']]
try:
layer_num = int(v['config']['layer_name'][-2:])
except (KeyError, ValueError):
layer_num = int(v['config']['layer'])
friendly_name = friendly_names[v['config']['features']]
if friendly_name not in whitelist.keys() or layer_num not in whitelist[friendly_name]:
continue
if v['corrs_report'][0] is None:
# This happens when boosting chooses no
v['corrs_report'][0] = 0.0
sz = 112
if 'resize' in v['config']:
sz = v['config']['resize']
scale = f"{round((sz / 112) * 100) / 100:.2f}"
results += [{'key': k,
'family': fam,
'dataset': dataset_names[v['config']['dataset']],
'subset': int(v['config']['subset']) + i*100,
'features': f"{friendly_name}_{scale}",
'layer': layer_num,
'features_layer': f"{friendly_name}_{scale}_{layer_num:02}",
'report_corr': x,
'max_r2': np.sqrt(max([m, .1])),
'rnorm': x / np.sqrt(max([m, .1]))} for i, (x, m) in enumerate(zip(v['corrs_report'], v['maxr2']))]
df = pd.DataFrame(results)
df = df.sort_values('key')
nums = df.groupby(['dataset', 'features', 'layer']).subset.count().reset_index()
df = df.merge(nums, left_on=('dataset', 'features', 'layer'), right_on=('dataset', 'features', 'layer'), suffixes=('', '_overall'))
df = df[df.subset_overall.isin([23, 25, 36, 44, 84])]
#df.query('dataset == "mst"').groupby('features_layer').mean()
#df.query('features == "dorsalnet"')
nums
# %%
#df_ = df.groupby(['dataset', 'features', 'layer']).rnorm.agg(['mean', 'sem', 'count'])
#df_.reset_index().groupby(['dataset', 'features'])['mean'].max()
datasets = df.dataset.unique()
datasets = ['mt1', 'mt2', 'mst']
df_ms = []
df_ss = []
df_zs = []
perf = []
for dataset in datasets:
df_ = df.query(f'dataset == "{dataset}"')
df_m = df_.pivot_table('rnorm', 'features', 'layer')
df_s = df_.pivot_table('rnorm', 'features', 'layer', aggfunc='sem')
m = np.diag(df_m.loc[df_m.index, df_m.idxmax(axis=1)].values)
s = np.diag(df_s.loc[df_s.index, df_m.idxmax(axis=1)].values)
series = df_.groupby(['features_layer']).mean().report_corr
best = series.index[series.argmax()]
df_ = pd.merge(df_, df_.query(f'features_layer == "{best}"')[['subset', 'rnorm']], left_on='subset', right_on='subset', suffixes=('', '_best'))
df_['rnorm_delta'] = df_['rnorm'] - df_['rnorm_best']
df_m_delta = df_.pivot_table('rnorm_delta', 'features', 'layer')
df_s_delta = df_.pivot_table('rnorm_delta', 'features', 'layer', aggfunc='sem') + 1e-6
df_ = (df_m_delta / df_s_delta).max(axis=1)
perf += [{'dataset': dataset,
'features': f,
'z': df_.loc[f],
'm': mm,
's': ss} for f, mm, ss in zip(df_m.index, m, s)]
df_m = pd.DataFrame(perf).pivot('features', 'dataset', 'm')
df_s = pd.DataFrame(perf).pivot('features', 'dataset', 's')
df_z = pd.DataFrame(perf).pivot('features', 'dataset', 'z')
#models = ['motionnet', 'dorsalnet']
#datasets = ['pvc1', 'pvc4', 'mt1', 'mt2', 'mst']
df_m = df_m[datasets]#.loc[models]
df_s = df_s[datasets]#.loc[models]
df_z = df_z[datasets]#.loc[models]
# %%
the_str = f"\\begin{{tabular}}{{{'l' * (df_m.shape[1] + 2)}}}\n\\toprule \n"
the_str += '{scaling} & {model} & ' + ' & '.join(df_m.columns.tolist()).replace('\\', '\\\\').replace('_', '\_') + '\\\\\n'
the_str += '\midrule \n'
for idx, row in df_m.iterrows():
vals = [idx.split('_')[0], idx.split('_')[1]]
for feature, v in row.items():
if np.isnan(v):
vals.append('-')
else:
if df_z.loc[idx, feature].item() < -1.96:
vals.append(f"{v:.3f} ({df_s.loc[idx, feature]:.3f})")
else:
vals.append(f"\\textbf{{{v:.3f}}} ({df_s.loc[idx, feature]:.3f})")
the_str += ' & '.join([x.replace('0.', '.') for x in vals]) + '\\\\\n'
the_str += "\end{tabular}\n"
#print(f"Ratio: {ratio}")
print(the_str)
# %%
# %% [markdown]
# # Resizing, with boosting
# %%
api = wandb.Api()
runs = api.runs("pmin/train_fmri_convex.py",
{"$and":
[
{"config.dataset": {"$in": ["pvc1-repeats", "pvc4", "mt1_norm_neutralbg", "mt2", "mst_norm_neutralbg"]}},
{"config.aggregator": "downsample"},
{"config.method": "boosting"},
{"config.features": {"$in": ["airsim_04", "MotionNet"]}},
{"state": "finished"},
]
}
)
archives = {}
print("Found %i" % len(runs))
for run in tqdm(runs):
if run.config['features'] == 'SlowFast':
if run.config['layer'] > 16:
run.config['features'] = 'SlowFast_Fast'
run.config['layer'] = run.config['layer'] - 17
else:
run.config['features'] = 'SlowFast_Slow'
sz = 112
if 'resize' in run.config:
sz = run.config['resize']
unique_name = f"{run.config['features']}_layer{int(run.config['layer']):02}_{run.config['dataset']}_{(run.config['subset'])}_{sz}"
if unique_name in archives:
continue
if 'corrs_report' in run.summary:
maxr2_ = [1]
if run.config['dataset'] in maxr2.keys():
maxr2_ = [maxr2[run.config['dataset']][int(run.summary['subset'])]]
archives[unique_name] = {'corrs_report': [run.summary['corrs_report']],
'maxr2': maxr2_,
'config': run.config,
}
# %%
results = []
for k, v in archives.items():
fam = families[v['config']['features']]
try:
layer_num = int(v['config']['layer_name'][-2:])
except (KeyError, ValueError):
layer_num = int(v['config']['layer'])
friendly_name = friendly_names[v['config']['features']]
if friendly_name not in whitelist.keys() or layer_num not in whitelist[friendly_name]:
continue
if v['corrs_report'][0] is None:
# This happens when boosting chooses no
v['corrs_report'][0] = 0.0
sz = 112
if 'resize' in v['config']:
sz = v['config']['resize']
scale = f"{round((sz / 112) * 100) / 100:.2f}"
results += [{'key': k,
'family': fam,
'dataset': dataset_names[v['config']['dataset']],
'subset': int(v['config']['subset']) + i*100,
'features': f"{friendly_name}_{scale}",
'layer': layer_num,
'features_layer': f"{friendly_name}_{scale}_{layer_num:02}",
'report_corr': x,
'max_r2': np.sqrt(max([m, .1])),
'rnorm': x / np.sqrt(max([m, .1]))} for i, (x, m) in enumerate(zip(v['corrs_report'], v['maxr2']))]
df = pd.DataFrame(results)
df = df.sort_values('key')
nums = df.groupby(['dataset', 'features', 'layer']).subset.count().reset_index()
df = df.merge(nums, left_on=('dataset', 'features', 'layer'), right_on=('dataset', 'features', 'layer'), suffixes=('', '_overall'))
df = df[df.subset_overall.isin([23, 25, 36, 44, 84])]
#df.query('dataset == "mst"').groupby('features_layer').mean()
#df.query('features == "dorsalnet"')
nums
# %%
#df_ = df.groupby(['dataset', 'features', 'layer']).rnorm.agg(['mean', 'sem', 'count'])
#df_.reset_index().groupby(['dataset', 'features'])['mean'].max()
datasets = df.dataset.unique()
datasets = ['mt1', 'mt2', 'mst']
df_ms = []
df_ss = []
df_zs = []
perf = []
for dataset in datasets:
df_ = df.query(f'dataset == "{dataset}"')
df_m = df_.pivot_table('rnorm', 'features', 'layer')
df_s = df_.pivot_table('rnorm', 'features', 'layer', aggfunc='sem')
m = np.diag(df_m.loc[df_m.index, df_m.idxmax(axis=1)].values)
s = np.diag(df_s.loc[df_s.index, df_m.idxmax(axis=1)].values)
series = df_.groupby(['features_layer']).mean().report_corr
best = series.index[series.argmax()]
df_ = pd.merge(df_, df_.query(f'features_layer == "{best}"')[['subset', 'rnorm']], left_on='subset', right_on='subset', suffixes=('', '_best'))
df_['rnorm_delta'] = df_['rnorm'] - df_['rnorm_best']
df_m_delta = df_.pivot_table('rnorm_delta', 'features', 'layer')
df_s_delta = df_.pivot_table('rnorm_delta', 'features', 'layer', aggfunc='sem') + 1e-6
df_ = (df_m_delta / df_s_delta).max(axis=1)
perf += [{'dataset': dataset,
'features': f,
'z': df_.loc[f],
'm': mm,
's': ss} for f, mm, ss in zip(df_m.index, m, s)]
df_m = pd.DataFrame(perf).pivot('features', 'dataset', 'm')
df_s = pd.DataFrame(perf).pivot('features', 'dataset', 's')
df_z = pd.DataFrame(perf).pivot('features', 'dataset', 'z')
#models = ['motionnet', 'dorsalnet']
#datasets = ['pvc1', 'pvc4', 'mt1', 'mt2', 'mst']
df_m = df_m[datasets]#.loc[models]
df_s = df_s[datasets]#.loc[models]
df_z = df_z[datasets]#.loc[models]
# %%
the_str = f"\\begin{{tabular}}{{{'l' * (df_m.shape[1] + 2)}}}\n\\toprule \n"
the_str += '{scaling} & {model} & ' + ' & '.join(df_m.columns.tolist()).replace('\\', '\\\\').replace('_', '\_') + '\\\\\n'
the_str += '\midrule \n'
for idx, row in df_m.iterrows():
vals = [idx.split('_')[0], idx.split('_')[1]]
for feature, v in row.items():
if np.isnan(v):
vals.append('-')
else:
if df_z.loc[idx, feature].item() < -1.96:
vals.append(f"{v:.3f} ({df_s.loc[idx, feature]:.3f})")
else:
vals.append(f"\\textbf{{{v:.3f}}} ({df_s.loc[idx, feature]:.3f})")
the_str += ' & '.join([x.replace('0.', '.') for x in vals]) + '\\\\\n'
the_str += "\end{tabular}\n"
#print(f"Ratio: {ratio}")
print(the_str)
# %%
df.query('dataset == "mt2"').groupby(['features', 'layer', 'scale']).first()
# %%
# Boosting results, version 2
# %%
api = wandb.Api()
runs = api.runs("pmin/train_fmri_convex.py",
{"$and":
[
{"config.dataset": {"$in": ["pvc1-repeats", "pvc4", "mt1_norm_neutralbg", "mt2", "mst_norm_neutralbg"]}},
{"config.aggregator": "downsample_t"},
{"config.method": "boosting"},
{"config.features": {"$in": ["airsim_04", "MotionNet"]}},
{"state": "finished"},
]
}
)
archives = {}
print("Found %i" % len(runs))
for run in tqdm(runs):
if run.config['features'] == 'SlowFast':
if run.config['layer'] > 16:
run.config['features'] = 'SlowFast_Fast'
run.config['layer'] = run.config['layer'] - 17
else:
run.config['features'] = 'SlowFast_Slow'
sz = 112
if 'resize' in run.config:
sz = run.config['resize']
unique_name = f"{run.config['features']}_layer{int(run.config['layer']):02}_{run.config['dataset']}_{(run.config['subset'])}_{sz}"
if unique_name in archives:
continue
if 'corrs_report' in run.summary:
maxr2_ = [1]
if run.config['dataset'] in maxr2.keys():
maxr2_ = [maxr2[run.config['dataset']][int(run.summary['subset'])]]
archives[unique_name] = {'corrs_report': [run.summary['corrs_report']],
'maxr2': maxr2_,
'config': run.config,
}
# %%
results = []
for k, v in archives.items():
fam = families[v['config']['features']]
try:
layer_num = int(v['config']['layer_name'][-2:])
except (KeyError, ValueError):
layer_num = int(v['config']['layer'])
friendly_name = friendly_names[v['config']['features']]
if friendly_name not in whitelist.keys() or layer_num not in whitelist[friendly_name]:
continue
if v['corrs_report'][0] is None:
# This happens when boosting chooses nothing
v['corrs_report'][0] = 0.0
sz = 112
if 'resize' in v['config']:
sz = v['config']['resize']
scale = f"{round((sz / 112) * 100) / 100:.2f}"
results += [{'key': k,
'family': fam,
'dataset': dataset_names[v['config']['dataset']],
'subset': int(v['config']['subset']) + i*100,
'features': f"{friendly_name}_{scale}",
'layer': layer_num,
'features_layer': f"{friendly_name}_{scale}_{layer_num:02}",
'report_corr': x,
'max_r2': np.sqrt(max([m, .1])),
'rnorm': x / np.sqrt(max([m, .1]))} for i, (x, m) in enumerate(zip(v['corrs_report'], v['maxr2']))]
df = pd.DataFrame(results)
df = df.sort_values('key')
nums = df.groupby(['dataset', 'features', 'layer']).subset.count().reset_index()
df = df.merge(nums, left_on=('dataset', 'features', 'layer'), right_on=('dataset', 'features', 'layer'), suffixes=('', '_overall'))
#df = df[df.subset_overall.isin([23, 25, 36, 44, 84])]
#df.query('dataset == "mst"').groupby('features_layer').mean()
#df.query('features == "dorsalnet"')
#nums.query('dataset == "mst"')
#df.query('dataset == "mt2" and layer == 1 and features == "motionnet" and scale == 1.5')
# %%
#df_ = df.groupby(['dataset', 'features', 'layer']).rnorm.agg(['mean', 'sem', 'count'])
#df_.reset_index().groupby(['dataset', 'features'])['mean'].max()
datasets = df.dataset.unique()
datasets = ['pvc1', 'pvc4', 'mt1', 'mt2', 'mst']
df_ms = []
df_ss = []
df_zs = []
perf = []
for dataset in datasets:
df_ = df.query(f'dataset == "{dataset}"')
df_m = df_.pivot_table('rnorm', 'features', 'layer')
df_s = df_.pivot_table('rnorm', 'features', 'layer', aggfunc='sem')
m = np.diag(df_m.loc[df_m.index, df_m.idxmax(axis=1)].values)
s = np.diag(df_s.loc[df_s.index, df_m.idxmax(axis=1)].values)
series = df_.groupby(['features_layer']).mean().report_corr
best = series.index[series.argmax()]
df_ = pd.merge(df_, df_.query(f'features_layer == "{best}"')[['subset', 'rnorm']], left_on='subset', right_on='subset', suffixes=('', '_best'))
df_['rnorm_delta'] = df_['rnorm'] - df_['rnorm_best']
df_m_delta = df_.pivot_table('rnorm_delta', 'features', 'layer')
df_s_delta = df_.pivot_table('rnorm_delta', 'features', 'layer', aggfunc='sem') + 1e-6
df_ = (df_m_delta / df_s_delta).max(axis=1)
perf += [{'dataset': dataset,
'features': f,
'z': df_.loc[f],
'm': mm,
's': ss} for f, mm, ss in zip(df_m.index, m, s)]
df_m = pd.DataFrame(perf).pivot('features', 'dataset', 'm')
df_s = pd.DataFrame(perf).pivot('features', 'dataset', 's')
df_z = pd.DataFrame(perf).pivot('features', 'dataset', 'z')
#models = ['motionnet', 'dorsalnet']
#datasets = ['pvc1', 'pvc4', 'mt1', 'mt2', 'mst']
df_m = df_m[datasets]#.loc[models]
df_s = df_s[datasets]#.loc[models]
df_z = df_z[datasets]#.loc[models]
# %%
df_m
# %%
the_str = f"\\begin{{tabular}}{{{'l' * (df_m.shape[1] + 2)}}}\n\\toprule \n"
the_str += '{scaling} & {model} & ' + ' & '.join(df_m.columns.tolist()).replace('\\', '\\\\').replace('_', '\_') + '\\\\\n'
the_str += '\midrule \n'
for idx, row in df_m.iterrows():
vals = [idx.split('_')[0], idx.split('_')[1]]
for feature, v in row.items():
if np.isnan(v):
vals.append('-')
else:
if df_z.loc[idx, feature].item() < -1.96:
vals.append(f"{v:.3f} ({df_s.loc[idx, feature]:.3f})")
else:
vals.append(f"\\textbf{{{v:.3f}}} ({df_s.loc[idx, feature]:.3f})")
the_str += ' & '.join([x.replace('0.', '.') for x in vals]) + '\\\\\n'
the_str += "\end{tabular}\n"
#print(f"Ratio: {ratio}")
print(the_str)
| 36.763984
| 765
| 0.550523
| 6,946
| 53,896
| 4.110855
| 0.064354
| 0.010822
| 0.013448
| 0.019052
| 0.890908
| 0.885445
| 0.882293
| 0.879211
| 0.878861
| 0.875849
| 0
| 0.05046
| 0.241428
| 53,896
| 1,465
| 766
| 36.789079
| 0.647955
| 0.077316
| 0
| 0.874647
| 0
| 0.016023
| 0.242485
| 0.038313
| 0.006598
| 0
| 0
| 0
| 0
| 1
| 0.001885
| false
| 0
| 0.032988
| 0
| 0.036758
| 0.023563
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
1b212412751f51cf3dec0f2f9cfa56ec33e668a5
| 217
|
py
|
Python
|
Trees/DFS_Preorder_traversal.py
|
IshGill/Leetcode-Guides
|
90b0f8e69e558926b3d47c988c663b9a4d1c845c
|
[
"Unlicense"
] | 6
|
2021-02-08T08:00:45.000Z
|
2021-09-29T11:08:40.000Z
|
Trees/DFS_Preorder_traversal.py
|
IshGill/Leetcode-Guides
|
90b0f8e69e558926b3d47c988c663b9a4d1c845c
|
[
"Unlicense"
] | 11
|
2021-02-19T08:56:32.000Z
|
2021-03-22T04:52:33.000Z
|
Trees/DFS_Preorder_traversal.py
|
IshGill/Leetcode-Guides
|
90b0f8e69e558926b3d47c988c663b9a4d1c845c
|
[
"Unlicense"
] | 3
|
2021-02-20T12:03:36.000Z
|
2021-03-22T13:19:30.000Z
|
# Preorder = print, left, right. DFS = moving down the tree.
def preorderTraversal(self, root):
return [root.val] + self.preorderTraversal(root.left) + self.preorderTraversal(root.right) if root != None else []
| 72.333333
| 118
| 0.718894
| 28
| 217
| 5.571429
| 0.642857
| 0.269231
| 0.320513
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.152074
| 217
| 3
| 118
| 72.333333
| 0.847826
| 0.267281
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
1b33f43fb4c3a536d2aa211aeac1445b37448b93
| 182
|
py
|
Python
|
count.py
|
oskopek/udi-05
|
aca88136150c977327324fc65a1a32bcdcd9bb95
|
[
"MIT"
] | null | null | null |
count.py
|
oskopek/udi-05
|
aca88136150c977327324fc65a1a32bcdcd9bb95
|
[
"MIT"
] | null | null | null |
count.py
|
oskopek/udi-05
|
aca88136150c977327324fc65a1a32bcdcd9bb95
|
[
"MIT"
] | null | null | null |
def count(matrix, element):
return 0
def test_count_not_in_matrix():
assert 0 == count([], 0)
assert 0 == count([[1],[2]], 3)
assert 0 == count([[1, 2],[3, 4]], 5)
| 20.222222
| 41
| 0.549451
| 30
| 182
| 3.2
| 0.5
| 0.21875
| 0.375
| 0.270833
| 0.3125
| 0.3125
| 0
| 0
| 0
| 0
| 0
| 0.092857
| 0.230769
| 182
| 8
| 42
| 22.75
| 0.592857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0.333333
| false
| 0
| 0
| 0.166667
| 0.5
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 7
|
1b423da721ae20bb1d17ec2d41f63aa0cd766fcc
| 1,811
|
py
|
Python
|
day9-1.py
|
vicyyn/AdventOfCode
|
f5980ecbd958dc979a305992c5e4927f40dd5daf
|
[
"MIT"
] | null | null | null |
day9-1.py
|
vicyyn/AdventOfCode
|
f5980ecbd958dc979a305992c5e4927f40dd5daf
|
[
"MIT"
] | null | null | null |
day9-1.py
|
vicyyn/AdventOfCode
|
f5980ecbd958dc979a305992c5e4927f40dd5daf
|
[
"MIT"
] | null | null | null |
with open("input.txt") as f:
dat = f.readlines()
dat = [line.strip() for line in dat]
s=0
j=-1
for l in dat:
j=j+1
i=-1
for v in l:
i=i+1
if(i==0 and j==0):
if(int(v)< int(l[1]) and int(v)< int(dat[1][0])):
s = s +int(v) +1
continue
if(i==len(l)-1 and j==0):
if(int(v)< int(l[len(l)-2]) and int(v)< int(dat[1][len(l)-1])):
s = s +int(v) +1
continue
if(j==len(dat)-1 and i==0):
if(int(v)< int(l[1]) and int(v)< int(dat[len(dat)-2][0])):
s = s +int(v)+1
continue
if(j==len(dat)-1 and i==len(l)-1):
if(int(v)< int(l[len(l)-2]) and int(v)< int(dat[len(dat)-2][len(l)-1])):
s = s +int(v)+1
continue
if(j==0):
if(int(v)< int(l[i-1]) and int(v)<int(l[i+1]) and int(v)< int(dat[j+1][i])):
s = s +int(v)+1
continue
if(j==len(dat)-1):
if(int(v)< int(l[i-1]) and int(v)<int(l[i+1]) and int(v)< int(dat[j-1][i])):
s = s +int(v)+1
continue
if(i==0):
if(int(v)< int(l[i+1]) and int(v)<int(dat[j-1][i]) and int(v)< int(dat[j+1][i])):
s = s +int(v)+1
continue
if(i==len(l)-1):
if(int(v)< int(l[i-1]) and int(v)<int(dat[j-1][i]) and int(v)< int(dat[j+1][i]) ):
s = s +int(v)+1
continue
if(int(v)< int(l[i-1]) and int(v) < int(l[i+1]) and int(v)<int(dat[j-1][i]) and int(v)< int(dat[j+1][i])):
s = s +int(v)+1
print(s)
| 32.339286
| 118
| 0.363335
| 323
| 1,811
| 2.037152
| 0.080495
| 0.200608
| 0.255319
| 0.227964
| 0.857143
| 0.857143
| 0.854103
| 0.854103
| 0.816109
| 0.808511
| 0
| 0.05138
| 0.419658
| 1,811
| 55
| 119
| 32.927273
| 0.574691
| 0
| 0
| 0.377778
| 0
| 0
| 0.004972
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.022222
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
1b755da5d25b7cfb3ea0072bb07dd25e472e42d5
| 20,735
|
py
|
Python
|
rev/brodyrev/solve.py
|
vidner/codepwnda-ctf
|
7e086044b753fe555b44395b79827d2f5b89da1d
|
[
"Unlicense"
] | 6
|
2021-02-18T15:07:55.000Z
|
2022-02-04T01:38:10.000Z
|
rev/brodyrev/solve.py
|
vidner/codepwnda-ctf
|
7e086044b753fe555b44395b79827d2f5b89da1d
|
[
"Unlicense"
] | null | null | null |
rev/brodyrev/solve.py
|
vidner/codepwnda-ctf
|
7e086044b753fe555b44395b79827d2f5b89da1d
|
[
"Unlicense"
] | null | null | null |
from z3 import *
def brute(x):
pass
LEN = 66
s = Solver()
a1 = [BitVec(i, 32) for i in range(LEN)]
# ========================================
a = [121, 123, 113, 131, 113, 57, 59, 123, 131, 105, 105, 131, 123, 131, 121, 57, 129, 131, 59, 131, 131, 121, 121, 57, 113, 121, 113, 123, 131, 57, 113, 113, 121, 131, 123, 59, 123, 113, 57, 123, 131, 121, 57, 129, 131, 107, 113, 131, 59, 121, 107, 131, 113, 121, 107, 123, 105, 123, 57, 121, 131, 57, 113, 131, 59, 131, 57, 91, 42, 60, 41, 89, 92, 91, 60, 74, 73, 60, 91, 60, 90, 89, 90, 60, 92, 59, 60, 89, 90, 89, 73, 89, 74, 92, 60, 90, 74, 73, 89, 60, 92, 92, 91, 73, 89, 92, 60, 90, 89, 90, 60, 75, 74, 60, 92, 90, 75, 60, 73, 90, 75, 91, 74, 92, 89, 89, 60, 90, 74, 60, 92, 59, 121, 127, 118, 136, 117, 61, 60, 123, 136, 106, 109, 136, 123, 136, 126, 57, 130, 136, 60, 131, 136, 125, 126, 57, 113, 125, 114, 128, 136, 62, 114, 113, 125, 136, 124, 60, 127, 117, 57, 124, 136, 126, 57, 130, 136, 111, 114, 136, 64, 126, 107, 136, 117, 126, 111, 123, 110, 128, 57, 121, 136, 62, 114, 136, 60, 131, 105, 137, 114, 114, 113, 73, 74, 137, 114, 138, 137, 114, 137, 114, 138, 73, 146, 114, 74, 113, 114, 137, 138, 73, 145, 137, 146, 138, 114, 74, 146, 145, 137, 114, 138, 74, 137, 145, 73, 138, 114, 138, 73, 146, 114, 137, 146, 114, 74, 138, 137, 114, 145, 138, 137, 137, 138, 138, 73, 137, 114, 74, 146, 114, 74, 113, 57, 93, 46, 62, 45, 93, 90, 89, 62, 74, 77, 62, 89, 62, 94, 89, 90, 62, 90, 57, 62, 93, 94, 89, 73, 93, 74, 94, 62, 94, 74, 73, 93, 62, 90, 90, 93, 77, 89, 90, 62, 94, 89, 90, 62, 77, 74, 62, 94, 94, 73, 62, 77, 94, 77, 89, 78, 94, 89, 89, 62, 94, 74, 62, 90, 57, 121, 127, 118, 136, 117, 61, 60, 123, 136, 106, 109, 136, 123, 136, 126, 57, 130, 136, 60, 131, 136, 125, 126, 57, 113, 125, 114, 128, 136, 62, 114, 113, 125, 136, 124, 60, 127, 117, 57, 124, 136, 126, 57, 130, 136, 111, 114, 136, 64, 126, 107, 136, 117, 126, 111, 123, 110, 128, 57, 121, 136, 62, 114, 136, 60, 131, 121, 153, 105, 121, 105, 89, 89, 153, 121, 137, 137, 121, 153, 121, 153, 89, 153, 121, 89, 121, 121, 153, 153, 89, 137, 153, 137, 153, 121, 89, 137, 137, 153, 121, 153, 89, 153, 137, 89, 153, 121, 153, 89, 153, 121, 137, 137, 121, 89, 153, 137, 121, 137, 153, 137, 153, 137, 153, 89, 153, 121, 89, 137, 121, 89, 121, 57, 89, 41, 57, 41, 89, 89, 89, 57, 73, 73, 57, 89, 57, 89, 89, 89, 57, 89, 57, 57, 89, 89, 89, 73, 89, 73, 89, 57, 89, 73, 73, 89, 57, 89, 89, 89, 73, 89, 89, 57, 89, 89, 89, 57, 73, 73, 57, 89, 89, 73, 57, 73, 89, 73, 89, 73, 89, 89, 89, 57, 89, 73, 57, 89, 57, 121, 159, 110, 128, 109, 93, 92, 155, 128, 138, 141, 128, 155, 128, 158, 89, 154, 128, 92, 123, 128, 157, 158, 89, 137, 157, 138, 160, 128, 94, 138, 137, 157, 128, 156, 92, 159, 141, 89, 156, 128, 158, 89, 154, 128, 143, 138, 128, 96, 158, 139, 128, 141, 158, 143, 155, 142, 160, 89, 153, 128, 94, 138, 128, 92, 123, 105, 141, 110, 110, 109, 77, 74, 137, 110, 138, 141, 110, 137, 110, 142, 73, 138, 110, 74, 105, 110, 141, 142, 73, 137, 141, 138, 142, 110, 78, 138, 137, 141, 110, 138, 74, 141, 141, 73, 138, 110, 142, 73, 138, 110, 141, 138, 110, 78, 142, 137, 110, 141, 142, 141, 137, 142, 142, 73, 137, 110, 78, 138, 110, 74, 105, 121, 155, 105, 123, 105, 89, 91, 155, 123, 137, 137, 123, 155, 123, 153, 89, 153, 123, 91, 123, 123, 153, 153, 89, 137, 153, 137, 155, 123, 89, 137, 137, 153, 123, 155, 91, 155, 137, 89, 155, 123, 153, 89, 153, 123, 139, 137, 123, 91, 153, 139, 123, 137, 153, 139, 155, 137, 155, 89, 153, 123, 89, 137, 123, 91, 123, 105, 143, 109, 111, 109, 77, 75, 139, 111, 137, 141, 111, 139, 111, 141, 73, 137, 111, 75, 107, 111, 141, 141, 73, 137, 141, 137, 143, 111, 77, 137, 137, 141, 111, 139, 75, 143, 141, 73, 139, 111, 141, 73, 137, 111, 143, 137, 111, 79, 141, 139, 111, 141, 141, 143, 139, 141, 143, 73, 137, 111, 77, 137, 111, 75, 107, 121, 157, 110, 126, 109, 93, 90, 153, 126, 138, 141, 126, 153, 126, 158, 89, 154, 126, 90, 121, 126, 157, 158, 89, 137, 157, 138, 158, 126, 94, 138, 137, 157, 126, 154, 90, 157, 141, 89, 154, 126, 158, 89, 154, 126, 141, 138, 126, 94, 158, 137, 126, 141, 158, 141, 153, 142, 158, 89, 153, 126, 94, 138, 126, 90, 121, 105, 141, 117, 117, 117, 77, 73, 137, 117, 137, 141, 117, 137, 117, 141, 73, 145, 117, 73, 113, 117, 141, 141, 73, 145, 141, 145, 141, 117, 77, 145, 145, 141, 117, 137, 73, 141, 149, 73, 137, 117, 141, 73, 145, 117, 141, 145, 117, 77, 141, 137, 117, 149, 141, 141, 137, 141, 141, 73, 137, 117, 77, 145, 117, 73, 113, 121, 127, 118, 136, 117, 61, 60, 123, 136, 106, 109, 136, 123, 136, 126, 57, 130, 136, 60, 131, 136, 125, 126, 57, 113, 125, 114, 128, 136, 62, 114, 113, 125, 136, 124, 60, 127, 117, 57, 124, 136, 126, 57, 130, 136, 111, 114, 136, 64, 126, 107, 136, 117, 126, 111, 123, 110, 128, 57, 121, 136, 62, 114, 136, 60, 131, 105, 139, 105, 107, 105, 73, 75, 139, 107, 137, 137, 107, 139, 107, 137, 73, 137, 107, 75, 107, 107, 137, 137, 73, 137, 137, 137, 139, 107, 73, 137, 137, 137, 107, 139, 75, 139, 137, 73, 139, 107, 137, 73, 137, 107, 139, 137, 107, 75, 137, 139, 107, 137, 137, 139, 139, 137, 139, 73, 137, 107, 73, 137, 107, 75, 107, 121, 157, 110, 126, 109, 93, 90, 153, 126, 138, 141, 126, 153, 126, 158, 89, 154, 126, 90, 121, 126, 157, 158, 89, 137, 157, 138, 158, 126, 94, 138, 137, 157, 126, 154, 90, 157, 141, 89, 154, 126, 158, 89, 154, 126, 141, 138, 126, 94, 158, 137, 126, 141, 158, 141, 153, 142, 158, 89, 153, 126, 94, 138, 126, 90, 121, 57, 95, 46, 64, 45, 93, 92, 91, 64, 74, 77, 64, 91, 64, 94, 89, 90, 64, 92, 59, 64, 93, 94, 89, 73, 93, 74, 96, 64, 94, 74, 73, 93, 64, 92, 92, 95, 77, 89, 92, 64, 94, 89, 90, 64, 79, 74, 64, 96, 94, 75, 64, 77, 94, 79, 91, 78, 96, 89, 89, 64, 94, 74, 64, 92, 59, 121, 127, 118, 136, 117, 61, 60, 123, 136, 106, 109, 136, 123, 136, 126, 57, 130, 136, 60, 131, 136, 125, 126, 57, 113, 125, 114, 128, 136, 62, 114, 113, 125, 136, 124, 60, 127, 117, 57, 124, 136, 126, 57, 130, 136, 111, 114, 136, 64, 126, 107, 136, 117, 126, 111, 123, 110, 128, 57, 121, 136, 62, 114, 136, 60, 131, 105, 137, 114, 114, 113, 73, 74, 137, 114, 138, 137, 114, 137, 114, 138, 73, 146, 114, 74, 113, 114, 137, 138, 73, 145, 137, 146, 138, 114, 74, 146, 145, 137, 114, 138, 74, 137, 145, 73, 138, 114, 138, 73, 146, 114, 137, 146, 114, 74, 138, 137, 114, 145, 138, 137, 137, 138, 138, 73, 137, 114, 74, 146, 114, 74, 113, 105, 143, 109, 111, 109, 77, 75, 139, 111, 137, 141, 111, 139, 111, 141, 73, 137, 111, 75, 107, 111, 141, 141, 73, 137, 141, 137, 143, 111, 77, 137, 137, 141, 111, 139, 75, 143, 141, 73, 139, 111, 141, 73, 137, 111, 143, 137, 111, 79, 141, 139, 111, 141, 141, 143, 139, 141, 143, 73, 137, 111, 77, 137, 111, 75, 107, 121, 127, 118, 136, 117, 61, 60, 123, 136, 106, 109, 136, 123, 136, 126, 57, 130, 136, 60, 131, 136, 125, 126, 57, 113, 125, 114, 128, 136, 62, 114, 113, 125, 136, 124, 60, 127, 117, 57, 124, 136, 126, 57, 130, 136, 111, 114, 136, 64, 126, 107, 136, 117, 126, 111, 123, 110, 128, 57, 121, 136, 62, 114, 136, 60, 131, 121, 153, 114, 130, 113, 89, 90, 153, 130, 138, 137, 130, 153, 130, 154, 89, 162, 130, 90, 129, 130, 153, 154, 89, 145, 153, 146, 154, 130, 90, 146, 145, 153, 130, 154, 90, 153, 145, 89, 154, 130, 154, 89, 162, 130, 137, 146, 130, 90, 154, 137, 130, 145, 154, 137, 153, 138, 154, 89, 153, 130, 90, 146, 130, 90, 129, 57, 89, 41, 57, 41, 89, 89, 89, 57, 73, 73, 57, 89, 57, 89, 89, 89, 57, 89, 57, 57, 89, 89, 89, 73, 89, 73, 89, 57, 89, 73, 73, 89, 57, 89, 89, 89, 73, 89, 89, 57, 89, 89, 89, 57, 73, 73, 57, 89, 89, 73, 57, 73, 89, 73, 89, 73, 89, 89, 89, 57, 89, 73, 57, 89, 57, 121, 157, 110, 126, 109, 93, 90, 153, 126, 138, 141, 126, 153, 126, 158, 89, 154, 126, 90, 121, 126, 157, 158, 89, 137, 157, 138, 158, 126, 94, 138, 137, 157, 126, 154, 90, 157, 141, 89, 154, 126, 158, 89, 154, 126, 141, 138, 126, 94, 158, 137, 126, 141, 158, 141, 153, 142, 158, 89, 153, 126, 94, 138, 126, 90, 121, 121, 127, 118, 136, 117, 61, 60, 123, 136, 106, 109, 136, 123, 136, 126, 57, 130, 136, 60, 131, 136, 125, 126, 57, 113, 125, 114, 128, 136, 62, 114, 113, 125, 136, 124, 60, 127, 117, 57, 124, 136, 126, 57, 130, 136, 111, 114, 136, 64, 126, 107, 136, 117, 126, 111, 123, 110, 128, 57, 121, 136, 62, 114, 136, 60, 131, 121, 155, 106, 124, 105, 89, 92, 155, 124, 138, 137, 124, 155, 124, 154, 89, 154, 124, 92, 123, 124, 153, 154, 89, 137, 153, 138, 156, 124, 90, 138, 137, 153, 124, 156, 92, 155, 137, 89, 156, 124, 154, 89, 154, 124, 139, 138, 124, 92, 154, 139, 124, 137, 154, 139, 155, 138, 156, 89, 153, 124, 90, 138, 124, 92, 123, 57, 89, 41, 57, 41, 89, 89, 89, 57, 73, 73, 57, 89, 57, 89, 89, 89, 57, 89, 57, 57, 89, 89, 89, 73, 89, 73, 89, 57, 89, 73, 73, 89, 57, 89, 89, 89, 73, 89, 89, 57, 89, 89, 89, 57, 73, 73, 57, 89, 89, 73, 57, 73, 89, 73, 89, 73, 89, 89, 89, 57, 89, 73, 57, 89, 57, 105, 141, 117, 117, 117, 77, 73, 137, 117, 137, 141, 117, 137, 117, 141, 73, 145, 117, 73, 113, 117, 141, 141, 73, 145, 141, 145, 141, 117, 77, 145, 145, 141, 117, 137, 73, 141, 149, 73, 137, 117, 141, 73, 145, 117, 141, 145, 117, 77, 141, 137, 117, 149, 141, 141, 137, 141, 141, 73, 137, 117, 77, 145, 117, 73, 113, 121, 159, 109, 127, 109, 93, 91, 155, 127, 137, 141, 127, 155, 127, 157, 89, 153, 127, 91, 123, 127, 157, 157, 89, 137, 157, 137, 159, 127, 93, 137, 137, 157, 127, 155, 91, 159, 141, 89, 155, 127, 157, 89, 153, 127, 143, 137, 127, 95, 157, 139, 127, 141, 157, 143, 155, 141, 159, 89, 153, 127, 93, 137, 127, 91, 123, 57, 91, 42, 60, 41, 89, 92, 91, 60, 74, 73, 60, 91, 60, 90, 89, 90, 60, 92, 59, 60, 89, 90, 89, 73, 89, 74, 92, 60, 90, 74, 73, 89, 60, 92, 92, 91, 73, 89, 92, 60, 90, 89, 90, 60, 75, 74, 60, 92, 90, 75, 60, 73, 90, 75, 91, 74, 92, 89, 89, 60, 90, 74, 60, 92, 59, 121, 155, 106, 124, 105, 89, 92, 155, 124, 138, 137, 124, 155, 124, 154, 89, 154, 124, 92, 123, 124, 153, 154, 89, 137, 153, 138, 156, 124, 90, 138, 137, 153, 124, 156, 92, 155, 137, 89, 156, 124, 154, 89, 154, 124, 139, 138, 124, 92, 154, 139, 124, 137, 154, 139, 155, 138, 156, 89, 153, 124, 90, 138, 124, 92, 123, 121, 127, 118, 136, 117, 61, 60, 123, 136, 106, 109, 136, 123, 136, 126, 57, 130, 136, 60, 131, 136, 125, 126, 57, 113, 125, 114, 128, 136, 62, 114, 113, 125, 136, 124, 60, 127, 117, 57, 124, 136, 126, 57, 130, 136, 111, 114, 136, 64, 126, 107, 136, 117, 126, 111, 123, 110, 128, 57, 121, 136, 62, 114, 136, 60, 131, 121, 157, 109, 125, 109, 93, 89, 153, 125, 137, 141, 125, 153, 125, 157, 89, 153, 125, 89, 121, 125, 157, 157, 89, 137, 157, 137, 157, 125, 93, 137, 137, 157, 125, 153, 89, 157, 141, 89, 153, 125, 157, 89, 153, 125, 141, 137, 125, 93, 157, 137, 125, 141, 157, 141, 153, 141, 157, 89, 153, 125, 93, 137, 125, 89, 121, 105, 137, 113, 113, 113, 73, 73, 137, 113, 137, 137, 113, 137, 113, 137, 73, 145, 113, 73, 113, 113, 137, 137, 73, 145, 137, 145, 137, 113, 73, 145, 145, 137, 113, 137, 73, 137, 145, 73, 137, 113, 137, 73, 145, 113, 137, 145, 113, 73, 137, 137, 113, 145, 137, 137, 137, 137, 137, 73, 137, 113, 73, 145, 113, 73, 113, 105, 137, 114, 114, 113, 73, 74, 137, 114, 138, 137, 114, 137, 114, 138, 73, 146, 114, 74, 113, 114, 137, 138, 73, 145, 137, 146, 138, 114, 74, 146, 145, 137, 114, 138, 74, 137, 145, 73, 138, 114, 138, 73, 146, 114, 137, 146, 114, 74, 138, 137, 114, 145, 138, 137, 137, 138, 138, 73, 137, 114, 74, 146, 114, 74, 113, 57, 93, 46, 62, 45, 93, 90, 89, 62, 74, 77, 62, 89, 62, 94, 89, 90, 62, 90, 57, 62, 93, 94, 89, 73, 93, 74, 94, 62, 94, 74, 73, 93, 62, 90, 90, 93, 77, 89, 90, 62, 94, 89, 90, 62, 77, 74, 62, 94, 94, 73, 62, 77, 94, 77, 89, 78, 94, 89, 89, 62, 94, 74, 62, 90, 57, 121, 127, 118, 136, 117, 61, 60, 123, 136, 106, 109, 136, 123, 136, 126, 57, 130, 136, 60, 131, 136, 125, 126, 57, 113, 125, 114, 128, 136, 62, 114, 113, 125, 136, 124, 60, 127, 117, 57, 124, 136, 126, 57, 130, 136, 111, 114, 136, 64, 126, 107, 136, 117, 126, 111, 123, 110, 128, 57, 121, 136, 62, 114, 136, 60, 131, 121, 159, 110, 128, 109, 93, 92, 155, 128, 138, 141, 128, 155, 128, 158, 89, 154, 128, 92, 123, 128, 157, 158, 89, 137, 157, 138, 160, 128, 94, 138, 137, 157, 128, 156, 92, 159, 141, 89, 156, 128, 158, 89, 154, 128, 143, 138, 128, 96, 158, 139, 128, 141, 158, 143, 155, 142, 160, 89, 153, 128, 94, 138, 128, 92, 123, 105, 137, 114, 114, 113, 73, 74, 137, 114, 138, 137, 114, 137, 114, 138, 73, 146, 114, 74, 113, 114, 137, 138, 73, 145, 137, 146, 138, 114, 74, 146, 145, 137, 114, 138, 74, 137, 145, 73, 138, 114, 138, 73, 146, 114, 137, 146, 114, 74, 138, 137, 114, 145, 138, 137, 137, 138, 138, 73, 137, 114, 74, 146, 114, 74, 113, 121, 157, 109, 125, 109, 93, 89, 153, 125, 137, 141, 125, 153, 125, 157, 89, 153, 125, 89, 121, 125, 157, 157, 89, 137, 157, 137, 157, 125, 93, 137, 137, 157, 125, 153, 89, 157, 141, 89, 153, 125, 157, 89, 153, 125, 141, 137, 125, 93, 157, 137, 125, 141, 157, 141, 153, 141, 157, 89, 153, 125, 93, 137, 125, 89, 121, 105, 137, 113, 113, 113, 73, 73, 137, 113, 137, 137, 113, 137, 113, 137, 73, 145, 113, 73, 113, 113, 137, 137, 73, 145, 137, 145, 137, 113, 73, 145, 145, 137, 113, 137, 73, 137, 145, 73, 137, 113, 137, 73, 145, 113, 137, 145, 113, 73, 137, 137, 113, 145, 137, 137, 137, 137, 137, 73, 137, 113, 73, 145, 113, 73, 113, 57, 89, 41, 57, 41, 89, 89, 89, 57, 73, 73, 57, 89, 57, 89, 89, 89, 57, 89, 57, 57, 89, 89, 89, 73, 89, 73, 89, 57, 89, 73, 73, 89, 57, 89, 89, 89, 73, 89, 89, 57, 89, 89, 89, 57, 73, 73, 57, 89, 89, 73, 57, 73, 89, 73, 89, 73, 89, 89, 89, 57, 89, 73, 57, 89, 57, 121, 157, 110, 126, 109, 93, 90, 153, 126, 138, 141, 126, 153, 126, 158, 89, 154, 126, 90, 121, 126, 157, 158, 89, 137, 157, 138, 158, 126, 94, 138, 137, 157, 126, 154, 90, 157, 141, 89, 154, 126, 158, 89, 154, 126, 141, 138, 126, 94, 158, 137, 126, 141, 158, 141, 153, 142, 158, 89, 153, 126, 94, 138, 126, 90, 121, 121, 157, 109, 125, 109, 93, 89, 153, 125, 137, 141, 125, 153, 125, 157, 89, 153, 125, 89, 121, 125, 157, 157, 89, 137, 157, 137, 157, 125, 93, 137, 137, 157, 125, 153, 89, 157, 141, 89, 153, 125, 157, 89, 153, 125, 141, 137, 125, 93, 157, 137, 125, 141, 157, 141, 153, 141, 157, 89, 153, 125, 93, 137, 125, 89, 121, 121, 127, 118, 136, 117, 61, 60, 123, 136, 106, 109, 136, 123, 136, 126, 57, 130, 136, 60, 131, 136, 125, 126, 57, 113, 125, 114, 128, 136, 62, 114, 113, 125, 136, 124, 60, 127, 117, 57, 124, 136, 126, 57, 130, 136, 111, 114, 136, 64, 126, 107, 136, 117, 126, 111, 123, 110, 128, 57, 121, 136, 62, 114, 136, 60, 131, 121, 123, 113, 131, 113, 57, 59, 123, 131, 105, 105, 131, 123, 131, 121, 57, 129, 131, 59, 131, 131, 121, 121, 57, 113, 121, 113, 123, 131, 57, 113, 113, 121, 131, 123, 59, 123, 113, 57, 123, 131, 121, 57, 129, 131, 107, 113, 131, 59, 121, 107, 131, 113, 121, 107, 123, 105, 123, 57, 121, 131, 57, 113, 131, 59, 131, 57, 91, 42, 60, 41, 89, 92, 91, 60, 74, 73, 60, 91, 60, 90, 89, 90, 60, 92, 59, 60, 89, 90, 89, 73, 89, 74, 92, 60, 90, 74, 73, 89, 60, 92, 92, 91, 73, 89, 92, 60, 90, 89, 90, 60, 75, 74, 60, 92, 90, 75, 60, 73, 90, 75, 91, 74, 92, 89, 89, 60, 90, 74, 60, 92, 59, 121, 127, 118, 136, 117, 61, 60, 123, 136, 106, 109, 136, 123, 136, 126, 57, 130, 136, 60, 131, 136, 125, 126, 57, 113, 125, 114, 128, 136, 62, 114, 113, 125, 136, 124, 60, 127, 117, 57, 124, 136, 126, 57, 130, 136, 111, 114, 136, 64, 126, 107, 136, 117, 126, 111, 123, 110, 128, 57, 121, 136, 62, 114, 136, 60, 131, 121, 153, 114, 130, 113, 89, 90, 153, 130, 138, 137, 130, 153, 130, 154, 89, 162, 130, 90, 129, 130, 153, 154, 89, 145, 153, 146, 154, 130, 90, 146, 145, 153, 130, 154, 90, 153, 145, 89, 154, 130, 154, 89, 162, 130, 137, 146, 130, 90, 154, 137, 130, 145, 154, 137, 153, 138, 154, 89, 153, 130, 90, 146, 130, 90, 129, 57, 89, 41, 57, 41, 89, 89, 89, 57, 73, 73, 57, 89, 57, 89, 89, 89, 57, 89, 57, 57, 89, 89, 89, 73, 89, 73, 89, 57, 89, 73, 73, 89, 57, 89, 89, 89, 73, 89, 89, 57, 89, 89, 89, 57, 73, 73, 57, 89, 89, 73, 57, 73, 89, 73, 89, 73, 89, 89, 89, 57, 89, 73, 57, 89, 57, 121, 157, 110, 126, 109, 93, 90, 153, 126, 138, 141, 126, 153, 126, 158, 89, 154, 126, 90, 121, 126, 157, 158, 89, 137, 157, 138, 158, 126, 94, 138, 137, 157, 126, 154, 90, 157, 141, 89, 154, 126, 158, 89, 154, 126, 141, 138, 126, 94, 158, 137, 126, 141, 158, 141, 153, 142, 158, 89, 153, 126, 94, 138, 126, 90, 121, 121, 127, 118, 136, 117, 61, 60, 123, 136, 106, 109, 136, 123, 136, 126, 57, 130, 136, 60, 131, 136, 125, 126, 57, 113, 125, 114, 128, 136, 62, 114, 113, 125, 136, 124, 60, 127, 117, 57, 124, 136, 126, 57, 130, 136, 111, 114, 136, 64, 126, 107, 136, 117, 126, 111, 123, 110, 128, 57, 121, 136, 62, 114, 136, 60, 131, 121, 155, 105, 123, 105, 89, 91, 155, 123, 137, 137, 123, 155, 123, 153, 89, 153, 123, 91, 123, 123, 153, 153, 89, 137, 153, 137, 155, 123, 89, 137, 137, 153, 123, 155, 91, 155, 137, 89, 155, 123, 153, 89, 153, 123, 139, 137, 123, 91, 153, 139, 123, 137, 153, 139, 155, 137, 155, 89, 153, 123, 89, 137, 123, 91, 123, 121, 127, 118, 136, 117, 61, 60, 123, 136, 106, 109, 136, 123, 136, 126, 57, 130, 136, 60, 131, 136, 125, 126, 57, 113, 125, 114, 128, 136, 62, 114, 113, 125, 136, 124, 60, 127, 117, 57, 124, 136, 126, 57, 130, 136, 111, 114, 136, 64, 126, 107, 136, 117, 126, 111, 123, 110, 128, 57, 121, 136, 62, 114, 136, 60, 131, 105, 141, 109, 109, 109, 77, 73, 137, 109, 137, 141, 109, 137, 109, 141, 73, 137, 109, 73, 105, 109, 141, 141, 73, 137, 141, 137, 141, 109, 77, 137, 137, 141, 109, 137, 73, 141, 141, 73, 137, 109, 141, 73, 137, 109, 141, 137, 109, 77, 141, 137, 109, 141, 141, 141, 137, 141, 141, 73, 137, 109, 77, 137, 109, 73, 105, 105, 137, 106, 106, 105, 73, 74, 137, 106, 138, 137, 106, 137, 106, 138, 73, 138, 106, 74, 105, 106, 137, 138, 73, 137, 137, 138, 138, 106, 74, 138, 137, 137, 106, 138, 74, 137, 137, 73, 138, 106, 138, 73, 138, 106, 137, 138, 106, 74, 138, 137, 106, 137, 138, 137, 137, 138, 138, 73, 137, 106, 74, 138, 106, 74, 105, 121, 127, 118, 136, 117, 61, 60, 123, 136, 106, 109, 136, 123, 136, 126, 57, 130, 136, 60, 131, 136, 125, 126, 57, 113, 125, 114, 128, 136, 62, 114, 113, 125, 136, 124, 60, 127, 117, 57, 124, 136, 126, 57, 130, 136, 111, 114, 136, 64, 126, 107, 136, 117, 126, 111, 123, 110, 128, 57, 121, 136, 62, 114, 136, 60, 131, 121, 155, 105, 123, 105, 89, 91, 155, 123, 137, 137, 123, 155, 123, 153, 89, 153, 123, 91, 123, 123, 153, 153, 89, 137, 153, 137, 155, 123, 89, 137, 137, 153, 123, 155, 91, 155, 137, 89, 155, 123, 153, 89, 153, 123, 139, 137, 123, 91, 153, 139, 123, 137, 153, 139, 155, 137, 155, 89, 153, 123, 89, 137, 123, 91, 123, 57, 91, 42, 60, 41, 89, 92, 91, 60, 74, 73, 60, 91, 60, 90, 89, 90, 60, 92, 59, 60, 89, 90, 89, 73, 89, 74, 92, 60, 90, 74, 73, 89, 60, 92, 92, 91, 73, 89, 92, 60, 90, 89, 90, 60, 75, 74, 60, 92, 90, 75, 60, 73, 90, 75, 91, 74, 92, 89, 89, 60, 90, 74, 60, 92, 59, 57, 93, 45, 61, 45, 93, 89, 89, 61, 73, 77, 61, 89, 61, 93, 89, 89, 61, 89, 57, 61, 93, 93, 89, 73, 93, 73, 93, 61, 93, 73, 73, 93, 61, 89, 89, 93, 77, 89, 89, 61, 93, 89, 89, 61, 77, 73, 61, 93, 93, 73, 61, 77, 93, 77, 89, 77, 93, 89, 89, 61, 93, 73, 61, 89, 57, 105, 109, 117, 117, 117, 45, 41, 105, 117, 105, 109, 117, 105, 117, 109, 41, 113, 117, 41, 113, 117, 109, 109, 41, 113, 109, 113, 109, 117, 45, 113, 113, 109, 117, 105, 41, 109, 117, 41, 105, 117, 109, 41, 113, 117, 109, 113, 117, 45, 109, 105, 117, 117, 109, 109, 105, 109, 109, 41, 105, 117, 45, 113, 117, 41, 113, 121, 127, 118, 136, 117, 61, 60, 123, 136, 106, 109, 136, 123, 136, 126, 57, 130, 136, 60, 131, 136, 125, 126, 57, 113, 125, 114, 128, 136, 62, 114, 113, 125, 136, 124, 60, 127, 117, 57, 124, 136, 126, 57, 130, 136, 111, 114, 136, 64, 126, 107, 136, 117, 126, 111, 123, 110, 128, 57, 121, 136, 62, 114, 136, 60, 131, 105, 109, 118, 118, 117, 45, 42, 105, 118, 106, 109, 118, 105, 118, 110, 41, 114, 118, 42, 113, 118, 109, 110, 41, 113, 109, 114, 110, 118, 46, 114, 113, 109, 118, 106, 42, 109, 117, 41, 106, 118, 110, 41, 114, 118, 109, 114, 118, 46, 110, 105, 118, 117, 110, 109, 105, 110, 110, 41, 105, 118, 46, 114, 118, 42, 113, 121, 159, 109, 127, 109, 93, 91, 155, 127, 137, 141, 127, 155, 127, 157, 89, 153, 127, 91, 123, 127, 157, 157, 89, 137, 157, 137, 159, 127, 93, 137, 137, 157, 127, 155, 91, 159, 141, 89, 155, 127, 157, 89, 153, 127, 143, 137, 127, 95, 157, 139, 127, 141, 157, 143, 155, 141, 159, 89, 153, 127, 93, 137, 127, 91, 123, 121, 121, 105, 121, 105, 57, 57, 121, 121, 105, 105, 121, 121, 121, 121, 57, 121, 121, 57, 121, 121, 121, 121, 57, 105, 121, 105, 121, 121, 57, 105, 105, 121, 121, 121, 57, 121, 105, 57, 121, 121, 121, 57, 121, 121, 105, 105, 121, 57, 121, 105, 121, 105, 121, 105, 121, 105, 121, 57, 121, 121, 57, 105, 121, 57, 121]
counter = 0
for i in range(LEN):
for j in range(LEN-1,-1,-1):
s.add((a1[i]&a1[j])+x==a[counter])
counter+=1
# ========================================
if s.check() == sat:
model = s.model()
print (''.join([chr(model[x].as_long()) for x in a1]))
exit()
# ========================================
for i in range(256):
brute(i)
| 797.5
| 20,238
| 0.565662
| 4,429
| 20,735
| 2.648002
| 0.022127
| 0.028649
| 0.015348
| 0.026262
| 0.890433
| 0.86707
| 0.842173
| 0.83646
| 0.83646
| 0.83646
| 0
| 0.708374
| 0.214468
| 20,735
| 26
| 20,239
| 797.5
| 0.011665
| 0.005884
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055556
| false
| 0.055556
| 0.055556
| 0
| 0.111111
| 0.055556
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 11
|
1b81cf99ed85dccb1a5c581d4ed8fccab75890b2
| 137
|
py
|
Python
|
harrison/__init__.py
|
metabolize/harrison
|
0d0f26fda1947785ee7a00a8a7bf5b6a95e06372
|
[
"BSD-2-Clause"
] | 4
|
2019-10-02T03:23:04.000Z
|
2021-01-26T04:25:06.000Z
|
harrison/__init__.py
|
metabolize/harrison
|
0d0f26fda1947785ee7a00a8a7bf5b6a95e06372
|
[
"BSD-2-Clause"
] | 31
|
2019-08-29T17:13:06.000Z
|
2021-06-25T15:25:18.000Z
|
harrison/__init__.py
|
metabolize/harrison
|
0d0f26fda1947785ee7a00a8a7bf5b6a95e06372
|
[
"BSD-2-Clause"
] | 1
|
2017-10-24T23:24:48.000Z
|
2017-10-24T23:24:48.000Z
|
from .package_version import __version__ # noqa: F401
from .profile import profile # noqa: F401
from .timer import Timer # noqa: F401
| 34.25
| 54
| 0.759124
| 19
| 137
| 5.210526
| 0.421053
| 0.242424
| 0.242424
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.079646
| 0.175182
| 137
| 3
| 55
| 45.666667
| 0.79646
| 0.233577
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
1b837656f3afd006d4c614dbf7de8e8cb482f25d
| 127
|
py
|
Python
|
omnilearn/community/__init__.py
|
fleeb24/foundation
|
18c4179cfe2988267827e532f8d8cd0726ef8709
|
[
"MIT"
] | 1
|
2020-10-08T21:33:58.000Z
|
2020-10-08T21:33:58.000Z
|
omnilearn/community/__init__.py
|
felixludos/foundation
|
62ac096e6c53e12f2e29480506687c652c399d50
|
[
"MIT"
] | null | null | null |
omnilearn/community/__init__.py
|
felixludos/foundation
|
62ac096e6c53e12f2e29480506687c652c399d50
|
[
"MIT"
] | null | null | null |
# from . import fid
# from .metric import Metric, MetricBase
# from .stats import elbo, bits_per_dim
from . import fid
| 18.142857
| 41
| 0.700787
| 18
| 127
| 4.833333
| 0.555556
| 0.229885
| 0.298851
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.228346
| 127
| 6
| 42
| 21.166667
| 0.887755
| 0.740157
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
1b9b0127dee117ed12cf11b16d297add3cb88938
| 91
|
py
|
Python
|
autofit/example/__init__.py
|
caoxiaoyue/PyAutoFit
|
819cd2acc8d4069497a161c3bb6048128e44d828
|
[
"MIT"
] | null | null | null |
autofit/example/__init__.py
|
caoxiaoyue/PyAutoFit
|
819cd2acc8d4069497a161c3bb6048128e44d828
|
[
"MIT"
] | null | null | null |
autofit/example/__init__.py
|
caoxiaoyue/PyAutoFit
|
819cd2acc8d4069497a161c3bb6048128e44d828
|
[
"MIT"
] | null | null | null |
from .analysis import Analysis
from .model import Gaussian
from .model import Exponential
| 30.333333
| 31
| 0.824176
| 12
| 91
| 6.25
| 0.5
| 0.24
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 91
| 3
| 32
| 30.333333
| 0.961538
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
9413ea11150afe22d7de2df1677a408eec7fccc7
| 47
|
py
|
Python
|
Basics/ecommerce/shipping.py
|
caseysalvador/Python
|
19bc762a123e98ebbac427c69ce58925e507b045
|
[
"MIT"
] | null | null | null |
Basics/ecommerce/shipping.py
|
caseysalvador/Python
|
19bc762a123e98ebbac427c69ce58925e507b045
|
[
"MIT"
] | 9
|
2021-11-03T18:57:45.000Z
|
2022-03-26T06:29:38.000Z
|
Basics/ecommerce/shipping.py
|
caseysalvador/Python
|
19bc762a123e98ebbac427c69ce58925e507b045
|
[
"MIT"
] | null | null | null |
def calc_shipping():
print("calc_shipping")
| 23.5
| 26
| 0.723404
| 6
| 47
| 5.333333
| 0.666667
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.12766
| 47
| 2
| 26
| 23.5
| 0.780488
| 0
| 0
| 0
| 0
| 0
| 0.270833
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0
| 0.5
| 0.5
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 7
|
944e272b9a8e376d4baa821091666f98a198af30
| 33,154
|
py
|
Python
|
world_building.py
|
Orange-Joe/Python_RPG_Engine
|
956dea075fe8cc050928831f645ae9ae6c316e47
|
[
"MIT"
] | null | null | null |
world_building.py
|
Orange-Joe/Python_RPG_Engine
|
956dea075fe8cc050928831f645ae9ae6c316e47
|
[
"MIT"
] | null | null | null |
world_building.py
|
Orange-Joe/Python_RPG_Engine
|
956dea075fe8cc050928831f645ae9ae6c316e47
|
[
"MIT"
] | null | null | null |
from dialog import *
import os
import sys
import readchar
import time
__author__ = "0range-j0e"
__version__ = "1.0"
"""
This module is a major work in progress for an interactice map based on Unicode characters. Currently working on refactoring this code to improve
the ease in which new maps can be created and interacted with by creating a standard grid strucutre. Running this program will put the player in
a test map in which the player can move around and add new objects to the world by using the build function. Please see the building function below
for more information. Please note you will likely need to zoom into the terminal to see the map properly.
"""
inv = []
last_door = None
game_over = False
# List of map objects starting with the player icon.
p = (green + dim + 'π')
# Wizard icon
z = (green + dim + 'W')
# Wall
w = (normal + "|")
# First map door
d = (red+bright+"D")
# Second map door
d1 = black + bright + "D"
# Second map return door
d2 = white + dim + "D"
q = white + bright + "X"
w1 = black + bright + "|"
c1 = black + bright + "-"
k = [blue + 'K', 'BLUE KEY']
# Village Square Map
map = [w,'X', 'O', 'X','X','X','X','X','X','X', w,
w,'X','X','X','X','X','X','X','X','X',w,
w,'X','X','X','X','X','X','X','X','X',w,
w,'X','X','X','X','X','X','X','X','X',w,
w,'X','X','X','X','X', z, 'X', d,'X', w]
previous_map = None
# WIP maps
map1 = [' ', ' ', ' ', '\x1b[34m\x1b[2m_', '\x1b[34m\x1b[2m_', '\x1b[34m\x1b[2m_', '\x1b[34m\x1b[2m_', '\x1b[34m\x1b[2m_', '\x1b[34m\x1b[2m_', '\x1b[34m\x1b[2m_', ' ', '\x1b[34m\x1b[2m_', '\x1b[34m\x1b[2m_', '\x1b[34m\x1b[2m_', '\x1b[34m\x1b[2m_', '\x1b[34m\x1b[2m_', '\x1b[34m\x1b[2m_', '\x1b[34m\x1b[2m_', '\x1b[34m\x1b[2m_', '\x1b[34m\x1b[2m_', '\x1b[34m\x1b[2m_', ' ', '\x1b[34m\x1b[2m_', '\x1b[34m\x1b[2m_', '\x1b[34m\x1b[2m_', '\x1b[34m\x1b[2m_', '\x1b[34m\x1b[2m_', '\x1b[34m\x1b[2m_', '\x1b[34m\x1b[2m_', ' ', ' ', '\x1b[34m|', '\x1b[34m\x1b[2m⍿', '\x1b[34m\x1b[1m⏣', '\x1b[34m\x1b[2m⍿', '\x1b[33m\x1b[1m\x1b[34m\x1b[1m∞', '\x1b[34m\x1b[2m⍿', '\x1b[34m\x1b[1m⏣', '\x1b[34m\x1b[2m⍿', '\x1b[34m|', '\x1b[30m\x1b[1m⚱', '\x1b[30m\x1b[1m⚱', '\x1b[33m\x1b[1m⚚', '\x1b[34m|', '\x1b[31m\x1b[1m♨', '\x1b[31m\x1b[1m♨', '\x1b[34m|', '\x1b[33m\x1b[1m☤', '\x1b[30m\x1b[1m⚱', '\x1b[30m\x1b[1m⚱', '\x1b[34m\x1b[2m╠', '\x1b[34m\x1b[2m╬', '\x1b[34m\x1b[2m╬', '\x1b[34m\x1b[34m\x1b[1m⏣', '\x1b[34m\x1b[34m\x1b[1m⏣', '\x1b[34m\x1b[2m╬', '\x1b[34m\x1b[2m╬', '\x1b[34m\x1b[2m╣', ' ', ' ', '\x1b[34m|', '\x1b[34m\x1b[2m⍿', '\x1b[32m\x1b[2m⬖', '\x1b[34m\x1b[2m⍿', '\x1b[31m\x1b[2m⌔', '\x1b[34m\x1b[2m⍿', '\x1b[32m\x1b[2m⬗', '\x1b[34m\x1b[2m⍿', '\x1b[34m|', ' ', ' ', ' ', ' ', '\x1b[31m\x1b[2m‾', '\x1b[31m\x1b[2m‾', ' ', ' ', ' ', ' ', '\x1b[34m╠', '\x1b[34m╬', '\x1b[34m╬', '\x1b[34m╬', '\x1b[34m╬', '\x1b[34m╬', '\x1b[34m╬', '\x1b[34m╣', ' ', ' ', '\x1b[34m|', '\x1b[34m\x1b[2m⍿', ' ', ' ', ' ', ' ', ' ', ' ', '\x1b[34m|', ' ', ' ', '\x1b[32m☘', '\x1b[32m\x1b[2m☘', '\x1b[31m\x1b[2m☘', '\x1b[34m☘', '\x1b[32m☘', '\x1b[32m\x1b[2m☘', ' ', ' ', '\x1b[34m\x1b[2m╚', '\x1b[34m\x1b[2m╩', '\x1b[34m\x1b[2m╬', '\x1b[34m\x1b[2m╬', '\x1b[34m\x1b[2m╬', '\x1b[34m\x1b[2m╬', '\x1b[34m\x1b[2m╩', '\x1b[34m\x1b[2m╣', ' ', ' ', '\x1b[34m|', '\x1b[34m\x1b[1m▩', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', '\x1b[32m☘', '\x1b[32m\x1b[2m☘', '\x1b[34m☘', '\x1b[34m\x1b[2m☘', '\x1b[31m\x1b[2m☘', '\x1b[35m\x1b[2m☘', '\x1b[32m\x1b[2m☘', '\x1b[32m☘', ' ', ' ', ' ', '\x1b[34m\x1b[2m╚', '\x1b[34m\x1b[1m▩', '\x1b[34m\x1b[1m▩', '\x1b[34m\x1b[2m╝', ' ', '\x1b[34m\x1b[2m║', ' ', ' ', '\x1b[34m|', '\x1b[34m\x1b[1m▩', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', '\x1b[31m☘', '\x1b[32m☘', '\x1b[31m\x1b[2m☘', '\x1b[33m\x1b[2m☘', '\x1b[35m\x1b[2m☘', '\x1b[31m☘', '\x1b[32m\x1b[2m☘', '\x1b[31m\x1b[2m☘', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', '\x1b[34m\x1b[2m║', ' ', ' ', '\x1b[34m|', '\x1b[34m\x1b[2m⍿', ' ', ' ', ' ', ' ', ' ', ' ', '\x1b[34m|', ' ', ' ', '\x1b[33m\x1b[2m☘', '\x1b[32m☘', '\x1b[32m\x1b[2m☘', '\x1b[34m\x1b[2m☘', '\x1b[32m\x1b[2m☘', '\x1b[32m☘', ' ', ' ', '\x1b[34m\x1b[2m╔', '\x1b[34m\x1b[2m╦', '\x1b[34m\x1b[2m╦', '\x1b[34m▩', '\x1b[34m▩', '\x1b[34m\x1b[2m╦', '\x1b[34m\x1b[2m╦', '\x1b[34m\x1b[2m╣', ' ', ' ', '\x1b[34m|', '\x1b[34m\x1b[2m⍿', '\x1b[32m\x1b[2m⬖', '\x1b[34m\x1b[2m⍿', '\x1b[31m\x1b[2m▵', '\x1b[34m\x1b[2m⍿', '\x1b[32m\x1b[2m⬗', '\x1b[34m\x1b[2m⍿', '\x1b[34m|', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', '\x1b[34m\x1b[2m╠', '\x1b[34m\x1b[2m╬', '\x1b[34m\x1b[2m╬', '\x1b[34m\x1b[2m╬', '\x1b[34m\x1b[2m╬', '\x1b[34m\x1b[2m╬', '\x1b[34m\x1b[2m╬', '\x1b[34m\x1b[2m╣', ' ', ' ', '\x1b[34m|', '\x1b[34m\x1b[2m⍿', '\x1b[34m\x1b[1m⏣', '\x1b[34m\x1b[2m⍿', '\x1b[33m\x1b[1m\x1b[34m\x1b[1m∞', '\x1b[34m\x1b[2m⍿', '\x1b[34m\x1b[1m⏣', '\x1b[34m\x1b[2m⍿', '\x1b[34m|', ' ', '\x1b[30m\x1b[1m⚰', '\x1b[30m\x1b[1m⚰', '\x1b[30m\x1b[1m⚰', '\x1b[30m\x1b[1m⚰', '\x1b[30m\x1b[1m⚰', '\x1b[30m\x1b[1m⚰', '\x1b[30m\x1b[1m⚰', '\x1b[30m\x1b[1m⚰', ' ', '\x1b[34m╠', '\x1b[34m╬', '\x1b[34m╬', '\x1b[34m\x1b[34m\x1b[1m⏣', '\x1b[34m\x1b[34m\x1b[1m⏣', '\x1b[34m╬', '\x1b[34m╬', '\x1b[34m╣', ' ', ' ', ' ', '\x1b[34m\x1b[2m\x1b[34m\x1b[2m⎺', '\x1b[34m\x1b[2m\x1b[34m\x1b[2m⎺', '\x1b[34m\x1b[2m\x1b[34m\x1b[2m⎺', '\x1b[34m\x1b[2m\x1b[34m\x1b[2m⎺', '\x1b[34m\x1b[2m\x1b[34m\x1b[2m⎺', '\x1b[34m\x1b[2m\x1b[34m\x1b[2m⎺', '\x1b[34m\x1b[2m\x1b[34m\x1b[2m⎺', ' ', '\x1b[34m\x1b[2m\x1b[34m\x1b[2m⎺', '\x1b[34m\x1b[2m\x1b[34m\x1b[2m⎺', '\x1b[34m\x1b[2m\x1b[34m\x1b[2m⎺', '\x1b[34m\x1b[2m\x1b[34m\x1b[2m⎺', '\x1b[34m\x1b[2m\x1b[34m\x1b[2m⎺', '\x1b[34m\x1b[2m\x1b[34m\x1b[2m⎺', '\x1b[34m\x1b[2m\x1b[34m\x1b[2m⎺', '\x1b[34m\x1b[2m\x1b[34m\x1b[2m⎺', '\x1b[34m\x1b[2m\x1b[34m\x1b[2m⎺', '\x1b[34m\x1b[2m\x1b[34m\x1b[2m⎺', ' ', '\x1b[34m\x1b[2m\x1b[34m\x1b[2m⎺', '\x1b[34m\x1b[2m\x1b[34m\x1b[2m⎺', '\x1b[34m\x1b[2m\x1b[34m\x1b[2m⎺', '\x1b[34m\x1b[2m\x1b[34m\x1b[2m⎺', '\x1b[34m\x1b[2m\x1b[34m\x1b[2m⎺', '\x1b[34m\x1b[2m\x1b[34m\x1b[2m⎺', '\x1b[34m\x1b[2m⎺']
# Open World Map
map2 = ['┌', '-', '-', '-', '-', '-', '-', '-', '-', '-', '-', '-', '-', '-', '-', '-', '-', '-', '┐', ' ', ' ', ' ', '◬', ' ', ' ', '◬', ' ', ' ', ' ', '\x1b[22m|', '\x1b[37m\x1b[2mD', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', '/', '*', '\\', '/', '*', '\\', ' ', ' ', '\x1b[22m|', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', '☠', ' ', ' ', ' ', ' ', ' ', '-', '-', '/', '/', ' ', '\\', '/', ' ', '\\', '\\', ' ', '\x1b[22m|', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', '⚕', ' ', ' ', ' ', ' ', ' ', '└', '/', '/', ' ', ' ', ' ', ' ', ' ', ' ', '\\', '\\', '\x1b[22m|', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ⚱', ' ', ' ', ' ', '/', '/', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', '☺', ' ', ' ', ' ', '/', '/', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' |', '├', '\x1b[30m\x1b[1m-', '\x1b[30m\x1b[1m-', '\x1b[30m\x1b[1m-', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' /', '/', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', blue + '|', blue + '|', '\x1b[22m|', ' ', ' ', ' ', ' ', '\x1b[30m\x1b[1m|', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', '/', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' |', blue + '|', '\x1b[22m|', ' ', ' ', ' ', ' ', '\x1b[30m\x1b[1m|', '\x1b[32m\x1b[2mW', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' |', blue + '|', '└', '-', '-', '-', '-', '-', '-', '-', '-', '-', '-', '-', '-', '-', '-', '-', '-', '-', '-', '-', '-', '-', '-', '-', '-', '-', '-', '-', ' ']
# WIP maps
# map3 = [' ', ' ', ' ', blue + dim + '_', blue + dim + '_', blue + dim + '_', '⍙', blue + dim + '_', blue + dim + '_', blue + dim + '_', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', blue + '|', blue + dim + '⍿', blue + bright + '⏣', blue + dim + '⍿', yellow + bright + blue + bright + '∞', blue + dim + '⍿', blue + bright + '⏣', blue + dim + '⍿', blue + '|', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', blue + '|', blue + dim + '⍿', green + dim + '⬖', blue + dim + '⍿', red + dim + '⌔', blue + dim + '⍿', green + dim + '⬗', blue + dim + '⍿', blue + '|', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', blue + '|', ' ', ' ', ' ', ' ', ' ', ' ', ' ', blue + '|', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', blue + '|', ' ', ' ', ' ', ' ', ' ', ' ', ' ', blue + '|', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', blue + '|', yellow + bright + '☤', ' ', ' ', ' ', '♾', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', blue + '|', ' ', ' ', ' ', ' ', ' ', ' ', ' ', blue + '|', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', blue + '|', blue + dim + '⍿', green + dim + '⬖', blue + dim + '⍿', red + dim + '▵', blue + dim + '⍿', green + dim + '⬗', blue + dim + '⍿', blue + '|', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', blue + '|', blue + dim + '⍿', blue + bright + '⏣', blue + dim + '⍿', yellow + bright + blue + bright + '∞', blue + dim + '⍿', blue + bright + '⏣', blue + dim + '⍿', blue + '|', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', blue + dim + blue + dim + '⎺', blue + dim + blue + dim + '⎺', blue + dim + blue + dim + '⎺', blue + dim + blue + dim + '⎺', blue + dim + blue + dim + '⎺', blue + dim + blue + dim + '⎺', blue + dim + blue + dim + '⎺', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ']
map3 =['X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X']
location = 'Village Square'
"""
The build function allows the player to add their own Unicode characters to a map. To call the function, press 'b'.
Once you've pressed 'b', you can choose to add an object above, below, left, or right. Example: press 'b' > type: 'w ⌨' (no quotes and there
must be a space between 'b' and '⌨'. This will add a keyboard object above the player. This is a work in progress, but can theoretically be
used to create fully fleshed-out Unicode maps. To add colors to the Unicode character you're adding, type color + character, ex:
'w blue + ⌨'. In the future I want the build function to identify custom Python objects and add them into the game intellgiently, such as doors
that lead to a certain location or a sword with special properties. If you want to save a map you have modified, exit into the interpreter, print
the map object, and copy and paste it into the code.
"""
def build(map, w, a, s, d):
print("w = build above\nd = build to right\ns = build below\na = build to left")
x = input(":: ")
indices = []
# a = x
if len(x) > 2:
if ' ' in x:
# go = input('space found in command')
while ' ' in x:
x = list(x)
x.remove(' ')
x = ''.join(x)
if x[-1] == ' ':
x = list(x)
x.append(' ')
x = ''.join(x)
while '+' in x:
check = x.find('+')
print(check)
if check != -1:
indices.append(check)
print(indices)
x_list = list(x.strip(''))
x_list.pop(check)
x = ''.join(x_list)
if x[:1].lower() == 'w':
if len(x) < 3:
map[w] = x[1:]
if len(indices) == 1:
try:
map[w] = (Dialog.colors[x[1:(indices[0])]] + x[-1])
except:
pass
elif len(indices) == 2:
try:
map[w] = ((Dialog.colors[x[1:(indices[0])]]) + (Dialog.colors[x[(indices[0]):(indices[1])]]) + x[-1])
except:
pass
elif x[:1].lower() == 'a':
if len(x) < 3:
map[a] = x[1:]
if len(indices) == 1:
try:
map[a] = (Dialog.colors[x[1:(indices[0])]] + x[-1])
except:
pass
elif len(indices) == 2:
try:
map[a] = ((Dialog.colors[x[1:(indices[0])]]) + (Dialog.colors[x[(indices[0]):(indices[1])]]) + x[-1])
except:
pass
elif x[:1].lower() == 's':
if len(x) < 3:
map[s] = x[1:]
if len(indices) == 1:
try:
map[s] = (Dialog.colors[x[1:(indices[0])]] + x[-1])
except:
pass
elif len(indices) == 2:
try:
map[s] = ((Dialog.colors[x[1:(indices[0])]]) + (Dialog.colors[x[(indices[0]):(indices[1])]]) + x[-1])
except:
pass
elif x[:1].lower() == 'd':
if len(x) < 3:
map[d] = x[1:]
if len(indices) == 1:
try:
map[d] = (Dialog.colors[x[1:(indices[0])]] + x[-1])
except:
pass
elif len(indices) == 2:
try:
map[d] = ((Dialog.colors[x[1:(indices[0])]]) + (Dialog.colors[x[(indices[0]):(indices[1])]]) + x[-1])
except:
pass
elif x == 'u':
map = previous_map
def output(plots,check, end):
cursor.hide()
for i in range(check, end):
# if plots[i] in ['╔','╚', '═']:
# if plots[i+1] != ' ':
# print(f"{plots[i]}", end="═", flush=True)
# elif plots[i] in ['-']:
# if plots[i+1] != ' ':
# print(f"{plots[i]}", end="-", flush=True)
if type(plots[i]) is list:
a = plots[i][0]
print(f"{a}", end="", flush=True)
else:
# print(f"{plots[i]}", end="", flush=True)
# print(plots[i], end="", flush=True)
sys.stdout.write(plots[i])
cursor.show()
print("")
def point_check(plots, point):
if 'W' in plots[point]:
return True
else:
return False
def door_check(plots, point):
if 'D' in plots[point]:
return True
else:
return False
def block_check(plots, point):
block_points = ['╝', blue + '|', '-', '╚', '╗', '╔' ]
for i in block_points:
if i in plots[point]:
return True
return False
def village_square():
def wiz_chat():
Dialog.quick_chat("""\nWIZARD:\n\nWhy hello, Stranger! """, blue + dim)
input("Continue... ")
def checks(c, n, map, x):
block = block_check(map,n)
if block is False:
door = door_check(map,n)
if door is True:
map[(x)] = c
last_door = map[n]
return ('door', c)
elif door is False:
chat = point_check(map,n)
if chat is False:
map[(x)] = c
c = map[n]
x = n
first_round = False
return (x, c)
elif chat is True:
wiz_chat()
return (x, c)
elif block is True:
return (x, c)
else:
c = map[n]
return (x, c)
# In Village Square, player starts off on map plot map[1].
# If the player were returning from another location, there should be a bool to determine if that's the case
# and put the player at the map plot point next to the door instead of the first spawn point.
x = 1
# current plot point
c = map[x]
first_round = True
chat = False
block = False
door = False
while True:
map[x] = p
os.system('clear')
Dialog.quick_chat("\n-- Village Square --", blue + dim)
# print(f"LAST DOOR: {last_door}")
output(map,0,11)
output(map,11,22)
output(map,22,33)
output(map,33,44)
output(map,44,55)
if chat is False:
inp = repr(readchar.readchar())
try:
if inp == "'w'":
n = x-11
x, c = checks(c, n, map, x)
elif inp == "'a'":
n = x-1
x, c = checks(c, n, map, x)
elif inp == "'s'":
n = x+11
x, c = checks(c, n, map, x)
elif inp == "'d'":
n = x+1
x, c = checks(c, n, map, x)
elif inp == "'c'":
a = input("Enter command: ")
if a == 'quit':
os.system('quit')
break
elif inp == "'b'":
build(map, x-11, x-1, x+11, x+1)
elif inp == "'q'":
sys.exit()
else:
pass
if x == 'door':
break
block = False
except:
pass
def open_world():
def wiz_chat():
Dialog.quick_chat("""\nWIZARD:\n\nWhy hello, Stranger!""", blue + dim)
input("Continue... ")
def checks(c, n, map, x):
block = block_check(map,n)
if block is False:
door = door_check(map,n)
if door is True:
map[(x)] = c
last_door = map[n]
return ('door', c)
elif door is False:
chat = point_check(map,n)
if chat is False:
map[(x)] = c
c = map[n]
x = n
first_round = False
return (x, c)
elif chat is True:
wiz_chat()
return (x, c)
elif block is True:
return (x, c)
else:
c = map[n]
return (x, c)
# In Village Square, player starts off on map plot map[1].
# If the player were returning from another location, there should be a bool to determine if that's the case
# and put the player at the map plot point next to the door instead of the first spawn point.
x = 32
c = map1[x]
first_round = True
chat = False
block = False
door = False
while True:
map1[x] = p
os.system('clear')
output(map1,0,29)
output(map1,29,58)
output(map1,58,87)
output(map1,87,116)
output(map1,116,145)
output(map1,145,174)
output(map1,174,203)
output(map1,203,232)
output(map1,232,261)
output(map1,261,290)
if chat is False:
inp = repr(readchar.readchar())
try:
if inp == "'w'":
n = x-29
x, c = checks(c, n, map1, x)
elif inp == "'a'":
n = x-1
x, c = checks(c, n, map1, x)
elif inp == "'s'":
n = x+29
x, c = checks(c, n, map1, x)
elif inp == "'d'":
n = x+1
x, c = checks(c, n, map1, x)
elif inp == "'c'":
a = input(": ")
if a == 'quit':
os.system('quit')
break
else:
try:
os.system(f'{a}')
except:
pass
elif inp == "'b'":
build(map1, x-29, x-1, x+29, x+1)
if x == 'door':
break
block = False
except:
pass
def grid():
def wiz_chat():
Dialog.quick_chat("""\nWIZARD:\n\nWhy hello, Stranger! """, blue + dim)
input("Continue... ")
def checks(c, n, map, x):
block = block_check(map,n)
if block is False:
door = door_check(map,n)
if door is True:
map[(x)] = c
last_door = map[n]
return ('door', c)
elif door is False:
chat = point_check(map,n)
if chat is False:
map[(x)] = c
c = map[n]
x = n
first_round = False
return (x, c)
elif chat is True:
wiz_chat()
return (x, c)
elif block is True:
return (x, c)
else:
c = map[n]
return (x, c)
# In Village Square, player starts off on map plot map[1].
# If the player were returning from another location, there should be a bool to determine if that's the case
# and put the player at the map plot point next to the door instead of the first spawn point.
x = 500
c = map3[x]
first_round = True
chat = False
block = False
door = False
while True:
map3[x] = p
os.system('clear')
output(map3,0,100)
output(map3,100,200)
output(map3,200,300)
output(map3,300,400)
output(map3,400,500)
output(map3,500,600)
output(map3,600,700)
output(map3,700,800)
output(map3,800,900)
output(map3,900,1000)
output(map3,1000,1100)
output(map3,1100,1200)
output(map3,1200,1300)
output(map3,1300,1400)
output(map3,1400,1500)
output(map3,1500,1600)
output(map3,1600,1700)
output(map3,1700,1800)
output(map3,1800,1900)
output(map3,1900,2000)
if chat is False:
inp = repr(readchar.readchar())
try:
if inp == "'w'":
n = x-100
x, c = checks(c, n, map3, x)
elif inp == "'a'":
n = x-1
x, c = checks(c, n, map3, x)
elif inp == "'s'":
n = x+100
x, c = checks(c, n, map3, x)
elif inp == "'d'":
n = x+1
x, c = checks(c, n, map3, x)
elif inp == "'c'":
a = input(": ")
if a == 'quit':
os.system('quit')
break
else:
try:
os.system(f'{a}')
except:
pass
elif inp == "'b'":
build(map3, x-100, x-1, x+100, x+1)
if x == 'door':
break
block = False
except:
pass
open_world()
| 60.170599
| 10,006
| 0.33824
| 5,030
| 33,154
| 2.256064
| 0.070577
| 0.35883
| 0.535865
| 0.712372
| 0.735108
| 0.721361
| 0.708319
| 0.697127
| 0.686024
| 0.662231
| 0
| 0.063216
| 0.30482
| 33,154
| 550
| 10,007
| 60.28
| 0.419776
| 0.109006
| 0
| 0.62234
| 0
| 0.00266
| 0.223778
| 0.031032
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037234
| false
| 0.037234
| 0.013298
| 0
| 0.106383
| 0.013298
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
945c5dc3a25429f711f782303f7797e3bc6ad453
| 81,410
|
py
|
Python
|
presalytics_story/api/default_api.py
|
presalytics/story-python-client
|
48ac7830b85d65b94a9f6bbfc0c7ee8344327084
|
[
"MIT"
] | null | null | null |
presalytics_story/api/default_api.py
|
presalytics/story-python-client
|
48ac7830b85d65b94a9f6bbfc0c7ee8344327084
|
[
"MIT"
] | null | null | null |
presalytics_story/api/default_api.py
|
presalytics/story-python-client
|
48ac7830b85d65b94a9f6bbfc0c7ee8344327084
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
Communcations
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: 0.1
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from presalytics_story.api_client import ApiClient
from presalytics_story.exceptions import (
ApiTypeError,
ApiValueError
)
class DefaultApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def story_get(self, **kwargs): # noqa: E501
"""story_get # noqa: E501
Returns a list of stories for this user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.story_get(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: list[Story]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.story_get_with_http_info(**kwargs) # noqa: E501
def story_get_with_http_info(self, **kwargs): # noqa: E501
"""story_get # noqa: E501
Returns a list of stories for this user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.story_get_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(list[Story], status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method story_get" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[Story]', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def story_id_collaborators_get(self, id, **kwargs): # noqa: E501
"""story_id_collaborators_get # noqa: E501
Returns a list of that collaborators on the story # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.story_id_collaborators_get(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: the id from the story object (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: list[StoryCollaborator]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.story_id_collaborators_get_with_http_info(id, **kwargs) # noqa: E501
def story_id_collaborators_get_with_http_info(self, id, **kwargs): # noqa: E501
"""story_id_collaborators_get # noqa: E501
Returns a list of that collaborators on the story # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.story_id_collaborators_get_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: the id from the story object (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(list[StoryCollaborator], status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method story_id_collaborators_get" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in local_var_params or
local_var_params['id'] is None):
raise ApiValueError("Missing the required parameter `id` when calling `story_id_collaborators_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/{id}/collaborators', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[StoryCollaborator]', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def story_id_collaborators_post(self, id, unknown_base_type, **kwargs): # noqa: E501
"""story_id_collaborators_post # noqa: E501
Add a colloborator to this story # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.story_id_collaborators_post(id, unknown_base_type, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: the id from the story object (required)
:param UNKNOWN_BASE_TYPE unknown_base_type: Collaborator user id and permission type (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: StoryCollaborator
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.story_id_collaborators_post_with_http_info(id, unknown_base_type, **kwargs) # noqa: E501
def story_id_collaborators_post_with_http_info(self, id, unknown_base_type, **kwargs): # noqa: E501
"""story_id_collaborators_post # noqa: E501
Add a colloborator to this story # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.story_id_collaborators_post_with_http_info(id, unknown_base_type, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: the id from the story object (required)
:param UNKNOWN_BASE_TYPE unknown_base_type: Collaborator user id and permission type (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(StoryCollaborator, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['id', 'unknown_base_type'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method story_id_collaborators_post" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in local_var_params or
local_var_params['id'] is None):
raise ApiValueError("Missing the required parameter `id` when calling `story_id_collaborators_post`") # noqa: E501
# verify the required parameter 'unknown_base_type' is set
if ('unknown_base_type' not in local_var_params or
local_var_params['unknown_base_type'] is None):
raise ApiValueError("Missing the required parameter `unknown_base_type` when calling `story_id_collaborators_post`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'unknown_base_type' in local_var_params:
body_params = local_var_params['unknown_base_type']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/{id}/collaborators', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='StoryCollaborator', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def story_id_collaborators_userid_delete(self, id, story_collaborator_userid, **kwargs): # noqa: E501
"""story_id_collaborators_userid_delete # noqa: E501
Remove a collaborator from this story # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.story_id_collaborators_userid_delete(id, story_collaborator_userid, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: the id from the story object (required)
:param str story_collaborator_userid: The presalytics userid (NOT the Id of the story_collaborator object) (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.story_id_collaborators_userid_delete_with_http_info(id, story_collaborator_userid, **kwargs) # noqa: E501
def story_id_collaborators_userid_delete_with_http_info(self, id, story_collaborator_userid, **kwargs): # noqa: E501
"""story_id_collaborators_userid_delete # noqa: E501
Remove a collaborator from this story # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.story_id_collaborators_userid_delete_with_http_info(id, story_collaborator_userid, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: the id from the story object (required)
:param str story_collaborator_userid: The presalytics userid (NOT the Id of the story_collaborator object) (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['id', 'story_collaborator_userid'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method story_id_collaborators_userid_delete" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in local_var_params or
local_var_params['id'] is None):
raise ApiValueError("Missing the required parameter `id` when calling `story_id_collaborators_userid_delete`") # noqa: E501
# verify the required parameter 'story_collaborator_userid' is set
if ('story_collaborator_userid' not in local_var_params or
local_var_params['story_collaborator_userid'] is None):
raise ApiValueError("Missing the required parameter `story_collaborator_userid` when calling `story_id_collaborators_userid_delete`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
if 'story_collaborator_userid' in local_var_params:
path_params['story_collaborator_userid'] = local_var_params['story_collaborator_userid'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/{id}/collaborators/{story_collaborator_userid}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def story_id_collaborators_userid_get(self, id, story_collaborator_userid, **kwargs): # noqa: E501
"""story_id_collaborators_userid_get # noqa: E501
Get a collaborator's permissions # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.story_id_collaborators_userid_get(id, story_collaborator_userid, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: the id from the story object (required)
:param str story_collaborator_userid: The presalytics userid (NOT the Id of the story_collaborator object) (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: StoryCollaborator
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.story_id_collaborators_userid_get_with_http_info(id, story_collaborator_userid, **kwargs) # noqa: E501
def story_id_collaborators_userid_get_with_http_info(self, id, story_collaborator_userid, **kwargs): # noqa: E501
"""story_id_collaborators_userid_get # noqa: E501
Get a collaborator's permissions # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.story_id_collaborators_userid_get_with_http_info(id, story_collaborator_userid, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: the id from the story object (required)
:param str story_collaborator_userid: The presalytics userid (NOT the Id of the story_collaborator object) (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(StoryCollaborator, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['id', 'story_collaborator_userid'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method story_id_collaborators_userid_get" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in local_var_params or
local_var_params['id'] is None):
raise ApiValueError("Missing the required parameter `id` when calling `story_id_collaborators_userid_get`") # noqa: E501
# verify the required parameter 'story_collaborator_userid' is set
if ('story_collaborator_userid' not in local_var_params or
local_var_params['story_collaborator_userid'] is None):
raise ApiValueError("Missing the required parameter `story_collaborator_userid` when calling `story_id_collaborators_userid_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
if 'story_collaborator_userid' in local_var_params:
path_params['story_collaborator_userid'] = local_var_params['story_collaborator_userid'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/{id}/collaborators/{story_collaborator_userid}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='StoryCollaborator', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def story_id_collaborators_userid_permissiontype_get(self, id, story_collaborator_userid, permissiontype, **kwargs): # noqa: E501
"""story_id_collaborators_userid_permissiontype_get # noqa: E501
Returns a status code response whether a user a has permission 204 = Granted, 403 = forbidden # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.story_id_collaborators_userid_permissiontype_get(id, story_collaborator_userid, permissiontype, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: the id from the story object (required)
:param str story_collaborator_userid: The presalytics userid (NOT the Id of the story_collaborator object) (required)
:param str permissiontype: the type of permission requested. can be a permission_type object name (e.g., owner, editor, create, viewer, admin) or a permission type field (e.g., can_edit, can_view, can_add_collaborators, can_delete) (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.story_id_collaborators_userid_permissiontype_get_with_http_info(id, story_collaborator_userid, permissiontype, **kwargs) # noqa: E501
def story_id_collaborators_userid_permissiontype_get_with_http_info(self, id, story_collaborator_userid, permissiontype, **kwargs): # noqa: E501
"""story_id_collaborators_userid_permissiontype_get # noqa: E501
Returns a status code response whether a user a has permission 204 = Granted, 403 = forbidden # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.story_id_collaborators_userid_permissiontype_get_with_http_info(id, story_collaborator_userid, permissiontype, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: the id from the story object (required)
:param str story_collaborator_userid: The presalytics userid (NOT the Id of the story_collaborator object) (required)
:param str permissiontype: the type of permission requested. can be a permission_type object name (e.g., owner, editor, create, viewer, admin) or a permission type field (e.g., can_edit, can_view, can_add_collaborators, can_delete) (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['id', 'story_collaborator_userid', 'permissiontype'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method story_id_collaborators_userid_permissiontype_get" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in local_var_params or
local_var_params['id'] is None):
raise ApiValueError("Missing the required parameter `id` when calling `story_id_collaborators_userid_permissiontype_get`") # noqa: E501
# verify the required parameter 'story_collaborator_userid' is set
if ('story_collaborator_userid' not in local_var_params or
local_var_params['story_collaborator_userid'] is None):
raise ApiValueError("Missing the required parameter `story_collaborator_userid` when calling `story_id_collaborators_userid_permissiontype_get`") # noqa: E501
# verify the required parameter 'permissiontype' is set
if ('permissiontype' not in local_var_params or
local_var_params['permissiontype'] is None):
raise ApiValueError("Missing the required parameter `permissiontype` when calling `story_id_collaborators_userid_permissiontype_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
if 'story_collaborator_userid' in local_var_params:
path_params['story_collaborator_userid'] = local_var_params['story_collaborator_userid'] # noqa: E501
if 'permissiontype' in local_var_params:
path_params['permissiontype'] = local_var_params['permissiontype'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/{id}/collaborators/authorize/{story_collaborator_userid}/{permissiontype}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def story_id_collaborators_userid_put(self, id, story_collaborator_userid, story_collaborator, **kwargs): # noqa: E501
"""story_id_collaborators_userid_put # noqa: E501
Modify a collaborator's permissions # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.story_id_collaborators_userid_put(id, story_collaborator_userid, story_collaborator, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: the id from the story object (required)
:param str story_collaborator_userid: The presalytics userid (NOT the Id of the story_collaborator object) (required)
:param StoryCollaborator story_collaborator: Collaborator user id (presalytics userid) and permission type (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: StoryCollaborator
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.story_id_collaborators_userid_put_with_http_info(id, story_collaborator_userid, story_collaborator, **kwargs) # noqa: E501
def story_id_collaborators_userid_put_with_http_info(self, id, story_collaborator_userid, story_collaborator, **kwargs): # noqa: E501
"""story_id_collaborators_userid_put # noqa: E501
Modify a collaborator's permissions # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.story_id_collaborators_userid_put_with_http_info(id, story_collaborator_userid, story_collaborator, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: the id from the story object (required)
:param str story_collaborator_userid: The presalytics userid (NOT the Id of the story_collaborator object) (required)
:param StoryCollaborator story_collaborator: Collaborator user id (presalytics userid) and permission type (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(StoryCollaborator, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['id', 'story_collaborator_userid', 'story_collaborator'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method story_id_collaborators_userid_put" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in local_var_params or
local_var_params['id'] is None):
raise ApiValueError("Missing the required parameter `id` when calling `story_id_collaborators_userid_put`") # noqa: E501
# verify the required parameter 'story_collaborator_userid' is set
if ('story_collaborator_userid' not in local_var_params or
local_var_params['story_collaborator_userid'] is None):
raise ApiValueError("Missing the required parameter `story_collaborator_userid` when calling `story_id_collaborators_userid_put`") # noqa: E501
# verify the required parameter 'story_collaborator' is set
if ('story_collaborator' not in local_var_params or
local_var_params['story_collaborator'] is None):
raise ApiValueError("Missing the required parameter `story_collaborator` when calling `story_id_collaborators_userid_put`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
if 'story_collaborator_userid' in local_var_params:
path_params['story_collaborator_userid'] = local_var_params['story_collaborator_userid'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'story_collaborator' in local_var_params:
body_params = local_var_params['story_collaborator']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/{id}/collaborators/{story_collaborator_userid}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='StoryCollaborator', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def story_id_delete(self, id, **kwargs): # noqa: E501
"""Remove story # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.story_id_delete(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: the id from the story object (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.story_id_delete_with_http_info(id, **kwargs) # noqa: E501
def story_id_delete_with_http_info(self, id, **kwargs): # noqa: E501
"""Remove story # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.story_id_delete_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: the id from the story object (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method story_id_delete" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in local_var_params or
local_var_params['id'] is None):
raise ApiValueError("Missing the required parameter `id` when calling `story_id_delete`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/{id}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def story_id_file_ooxmlautomationid_get(self, id, ooxml_automation_id, **kwargs): # noqa: E501
"""story_id_file_ooxmlautomationid_get # noqa: E501
Get updated story as open office xml file (e.g., .pptx, .docx, .xlsx) # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.story_id_file_ooxmlautomationid_get(id, ooxml_automation_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: the id from the story object (required)
:param str ooxml_automation_id: the id of the ooxml_automation object (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: file
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.story_id_file_ooxmlautomationid_get_with_http_info(id, ooxml_automation_id, **kwargs) # noqa: E501
def story_id_file_ooxmlautomationid_get_with_http_info(self, id, ooxml_automation_id, **kwargs): # noqa: E501
"""story_id_file_ooxmlautomationid_get # noqa: E501
Get updated story as open office xml file (e.g., .pptx, .docx, .xlsx) # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.story_id_file_ooxmlautomationid_get_with_http_info(id, ooxml_automation_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: the id from the story object (required)
:param str ooxml_automation_id: the id of the ooxml_automation object (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(file, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['id', 'ooxml_automation_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method story_id_file_ooxmlautomationid_get" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in local_var_params or
local_var_params['id'] is None):
raise ApiValueError("Missing the required parameter `id` when calling `story_id_file_ooxmlautomationid_get`") # noqa: E501
# verify the required parameter 'ooxml_automation_id' is set
if ('ooxml_automation_id' not in local_var_params or
local_var_params['ooxml_automation_id'] is None):
raise ApiValueError("Missing the required parameter `ooxml_automation_id` when calling `story_id_file_ooxmlautomationid_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
if 'ooxml_automation_id' in local_var_params:
path_params['ooxml_automation_id'] = local_var_params['ooxml_automation_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/vnd.openxmlformats-officedocument.presentationml.presentation', 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet', 'application/vnd.openxmlformats-officedocument.wordprocessingml.document', 'application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/{id}/file/{ooxml_automation_id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='file', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def story_id_get(self, id, **kwargs): # noqa: E501
"""Returns story metadata, inlcuding json object with story outline # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.story_id_get(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: the id from the story object (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Story
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.story_id_get_with_http_info(id, **kwargs) # noqa: E501
def story_id_get_with_http_info(self, id, **kwargs): # noqa: E501
"""Returns story metadata, inlcuding json object with story outline # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.story_id_get_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: the id from the story object (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(Story, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method story_id_get" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in local_var_params or
local_var_params['id'] is None):
raise ApiValueError("Missing the required parameter `id` when calling `story_id_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/{id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Story', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def story_id_put(self, id, story, **kwargs): # noqa: E501
"""Update story metadata, including story outline # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.story_id_put(id, story, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: the id from the story object (required)
:param Story story: The updated story object (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Story
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.story_id_put_with_http_info(id, story, **kwargs) # noqa: E501
def story_id_put_with_http_info(self, id, story, **kwargs): # noqa: E501
"""Update story metadata, including story outline # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.story_id_put_with_http_info(id, story, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: the id from the story object (required)
:param Story story: The updated story object (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(Story, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['id', 'story'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method story_id_put" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in local_var_params or
local_var_params['id'] is None):
raise ApiValueError("Missing the required parameter `id` when calling `story_id_put`") # noqa: E501
# verify the required parameter 'story' is set
if ('story' not in local_var_params or
local_var_params['story'] is None):
raise ApiValueError("Missing the required parameter `story` when calling `story_id_put`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'story' in local_var_params:
body_params = local_var_params['story']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/{id}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Story', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def story_permission_types_get(self, **kwargs): # noqa: E501
"""story_permission_types_get # noqa: E501
Returns a list of possible user permission types # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.story_permission_types_get(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: list[PermissionType]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.story_permission_types_get_with_http_info(**kwargs) # noqa: E501
def story_permission_types_get_with_http_info(self, **kwargs): # noqa: E501
"""story_permission_types_get # noqa: E501
Returns a list of possible user permission types # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.story_permission_types_get_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(list[PermissionType], status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method story_permission_types_get" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/permission_types', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[PermissionType]', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def story_post(self, outline, **kwargs): # noqa: E501
"""Upload new story to presalytics api # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.story_post(outline, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param Outline outline: A story outline json object (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Story
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.story_post_with_http_info(outline, **kwargs) # noqa: E501
def story_post_with_http_info(self, outline, **kwargs): # noqa: E501
"""Upload new story to presalytics api # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.story_post_with_http_info(outline, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param Outline outline: A story outline json object (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(Story, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['outline'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method story_post" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'outline' is set
if ('outline' not in local_var_params or
local_var_params['outline'] is None):
raise ApiValueError("Missing the required parameter `outline` when calling `story_post`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'outline' in local_var_params:
body_params = local_var_params['outline']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Story', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def story_post_file(self, **kwargs): # noqa: E501
"""Upload new story to presalytics api via an Open Office Xml file # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.story_post_file(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param list[file] file:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Story
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.story_post_file_with_http_info(**kwargs) # noqa: E501
def story_post_file_with_http_info(self, **kwargs): # noqa: E501
"""Upload new story to presalytics api via an Open Office Xml file # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.story_post_file_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param list[file] file:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(Story, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['file'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method story_post_file" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
if 'file' in local_var_params:
local_var_files['file'] = local_var_params['file'] # noqa: E501
collection_formats['file'] = 'csv' # noqa: E501
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['multipart/form-data']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/file', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Story', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def story_teller_id_get(self, id, **kwargs): # noqa: E501
"""story_teller_id_get # noqa: E501
Render story as reveal.js web document # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.story_teller_id_get(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: the id from the story object (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: str
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.story_teller_id_get_with_http_info(id, **kwargs) # noqa: E501
def story_teller_id_get_with_http_info(self, id, **kwargs): # noqa: E501
"""story_teller_id_get # noqa: E501
Render story as reveal.js web document # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.story_teller_id_get_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: the id from the story object (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(str, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method story_teller_id_get" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in local_var_params or
local_var_params['id'] is None):
raise ApiValueError("Missing the required parameter `id` when calling `story_teller_id_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['text/html', 'application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/teller/{id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='str', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
| 47.303893
| 268
| 0.615711
| 9,302
| 81,410
| 5.120189
| 0.026553
| 0.039809
| 0.060553
| 0.028345
| 0.970228
| 0.967645
| 0.959961
| 0.954418
| 0.945221
| 0.93445
| 0
| 0.012722
| 0.311559
| 81,410
| 1,720
| 269
| 47.331395
| 0.83708
| 0.463555
| 0
| 0.786082
| 1
| 0
| 0.200595
| 0.078799
| 0
| 0
| 0
| 0
| 0
| 1
| 0.039948
| false
| 0
| 0.006443
| 0
| 0.08634
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
946f1bb1863c937e2374d221702e27c26cb0224e
| 115
|
py
|
Python
|
h_transformer_1d/__init__.py
|
stefaj/h-transformer-1d
|
2cbb266aa4f4d9480a093bcd9229c28019c35dbb
|
[
"MIT"
] | null | null | null |
h_transformer_1d/__init__.py
|
stefaj/h-transformer-1d
|
2cbb266aa4f4d9480a093bcd9229c28019c35dbb
|
[
"MIT"
] | null | null | null |
h_transformer_1d/__init__.py
|
stefaj/h-transformer-1d
|
2cbb266aa4f4d9480a093bcd9229c28019c35dbb
|
[
"MIT"
] | null | null | null |
from h_transformer_1d.h_transformer_1d import HTransformer1D,CausalHAttention1D,HAttention1D, FeedForward, PreNorm
| 57.5
| 114
| 0.904348
| 13
| 115
| 7.692308
| 0.769231
| 0.24
| 0.28
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.045872
| 0.052174
| 115
| 1
| 115
| 115
| 0.87156
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
84ab71dfc0e276124b989a8f43209ded0fa70ed3
| 8,862
|
py
|
Python
|
pyspark/local/classification/src/train.py
|
cipriancus/FakeTwitterDetection
|
24226473ecce07eae2f3bcce3b11d8b3b0741752
|
[
"Apache-2.0"
] | 3
|
2017-10-16T17:47:01.000Z
|
2018-03-05T07:54:56.000Z
|
pyspark/local/classification/src/train.py
|
cocageorgiana/FakeTwitterDetection
|
24226473ecce07eae2f3bcce3b11d8b3b0741752
|
[
"Apache-2.0"
] | 13
|
2017-10-25T17:06:21.000Z
|
2017-12-28T16:17:28.000Z
|
pyspark/local/classification/src/train.py
|
cocageorgiana/FakeTwitterDetection
|
24226473ecce07eae2f3bcce3b11d8b3b0741752
|
[
"Apache-2.0"
] | 2
|
2018-03-31T17:45:38.000Z
|
2019-05-21T08:06:31.000Z
|
from pyspark import SparkContext
import src.config as cfg
import src.processing as prp
import src.algorithms.SVM as svm
import src.algorithms.NB as nb
import src.algorithms.RandomForest as rf
import time
import warnings
warnings.filterwarnings("ignore")
testing_file_location = 'Test_feature_extracted.csv'
training_file_location = 'Training_feature_extracted.csv'
if __name__ == "__main__":
import os
print(os.getcwd())
spark_context = SparkContext("local", "Twitter");
testing_file_location = 'Test_feature_extracted.csv'
training_file_location = 'Training_feature_extracted.csv'
start_time = time.time()
###############################################################################
training_data = prp.PreProcessing('../data/training_dataset.csv', 'Training')
training_data.process()
test_data = prp.PreProcessing('../data/test_dataset.csv', 'Test')
test_data.process()
###############################################################################
##############################SVM##############################################
svm_time = time.time()
maxIter = 200
regParam = 0.0
tol = 1e-6
threshold = 0.0
aggregationDepth = 2
settings = [('maxIter', maxIter), ('regParam', regParam), ('tol', tol), ('threshold', threshold),
('aggregationDepth', aggregationDepth)]
svm_classifier = svm.SVMClassifier(training_file_location, spark_context, maxIter, regParam, tol,
threshold, aggregationDepth)
svm_predict_test_data_class = svm_classifier.classify_testdata(testing_file_location)
svm_classifier.confusion_matrix(svm_predict_test_data_class)
print("SVM EXECUTION TIME " + str(time.time() - svm_time) + "for settings " + str(settings))
###############################################################################
##############################SVM##############################################
svm_time = time.time()
maxIter = 100
regParam = 0.0
tol = 1e-7
threshold = 0.1
aggregationDepth = 3
settings = [('maxIter', maxIter), ('regParam', regParam), ('tol', tol), ('threshold', threshold),
('aggregationDepth', aggregationDepth)]
svm_classifier = svm.SVMClassifier(training_file_location, spark_context, maxIter, regParam, tol,
threshold, aggregationDepth)
svm_predict_test_data_class = svm_classifier.classify_testdata(testing_file_location)
svm_classifier.confusion_matrix(svm_predict_test_data_class)
print("SVM EXECUTION TIME " + str(time.time() - svm_time) + "for settings " + str(settings))
###############################################################################
##############################SVM##############################################
svm_time = time.time()
maxIter = 50
regParam = 0.1
tol = 1e-7
threshold = 0.1
aggregationDepth = 5
settings = [('maxIter', maxIter), ('regParam', regParam), ('tol', tol), ('threshold', threshold),
('aggregationDepth', aggregationDepth)]
svm_classifier = svm.SVMClassifier(training_file_location, spark_context, maxIter, regParam, tol,
threshold, aggregationDepth)
svm_predict_test_data_class = svm_classifier.classify_testdata(testing_file_location)
svm_classifier.confusion_matrix(svm_predict_test_data_class)
print("SVM EXECUTION TIME " + str(time.time() - svm_time) + "for settings " + str(settings))
###############################################################################
##############################RF###############################################
rf_time = time.time()
maxDepth = 5
maxBins = 32
minInstancesPerNode = 1
minInfoGain = 0.0
maxMemoryInMB = 256
impurity = "gini"
numTrees = 50
settings = [('maxDepth', maxDepth), ('maxBins', maxBins), ('minInstancesPerNode', minInstancesPerNode),
('minInfoGain', minInfoGain),
('maxMemoryInMB', maxMemoryInMB), ('impurity', impurity), ('numTrees', numTrees)]
rf_classifier = rf.RFClassifier(training_file_location, spark_context, maxDepth, maxBins, minInstancesPerNode,
minInfoGain, maxMemoryInMB, impurity, numTrees)
rf_predict_test_data_class = rf_classifier.classify_testdata(testing_file_location)
rf_classifier.confusion_matrix(rf_predict_test_data_class)
print("RF EXECUTION TIME " + str(time.time() - rf_time) + "for settings " + str(settings))
##############################################################################
##############################RF###############################################
rf_time = time.time()
maxDepth = 10
maxBins = 32
minInstancesPerNode = 1
minInfoGain = 0.0
maxMemoryInMB = 256
impurity = "gini"
numTrees = 100
settings = [('maxDepth', maxDepth), ('maxBins', maxBins), ('minInstancesPerNode', minInstancesPerNode),
('minInfoGain', minInfoGain),
('maxMemoryInMB', maxMemoryInMB), ('impurity', impurity), ('numTrees', numTrees)]
rf_classifier = rf.RFClassifier(training_file_location, spark_context, maxDepth, maxBins, minInstancesPerNode,
minInfoGain, maxMemoryInMB, impurity, numTrees)
rf_predict_test_data_class = rf_classifier.classify_testdata(testing_file_location)
rf_classifier.confusion_matrix(rf_predict_test_data_class)
print("RF EXECUTION TIME " + str(time.time() - rf_time) + "for settings " + str(settings))
##############################################################################
##############################RF###############################################
rf_time = time.time()
maxDepth = 10
maxBins = 32
minInstancesPerNode = 1
minInfoGain = 0.0
maxMemoryInMB = 1024
impurity = "gini"
numTrees = 200
settings = [('maxDepth', maxDepth), ('maxBins', maxBins), ('minInstancesPerNode', minInstancesPerNode),
('minInfoGain', minInfoGain),
('maxMemoryInMB', maxMemoryInMB), ('impurity', impurity), ('numTrees', numTrees)]
rf_classifier = rf.RFClassifier(training_file_location, spark_context, maxDepth, maxBins, minInstancesPerNode,
minInfoGain, maxMemoryInMB, impurity, numTrees)
rf_predict_test_data_class = rf_classifier.classify_testdata(testing_file_location)
rf_classifier.confusion_matrix(rf_predict_test_data_class)
print("RF EXECUTION TIME " + str(time.time() - rf_time) + "for settings " + str(settings))
##############################################################################
##############################NB###############################################
nb_time = time.time()
smoothing = 1.0
modelType = "multinomial"
settings = [('smoothing', smoothing), ('modelType', modelType)]
nb_classifier = nb.NbClassifier(training_file_location, spark_context, smoothing, modelType)
nb_predict_test_data_class = nb_classifier.classify_testdata(testing_file_location)
accuracy_nb = nb_classifier.confusion_matrix(nb_predict_test_data_class)
print("NB EXECUTION TIME " + str(time.time() - nb_time) + "for settings " + str(settings))
##############################################################################
##############################NB###############################################
nb_time = time.time()
smoothing = 3.0
modelType = "multinomial"
settings = [('smoothing', smoothing), ('modelType', modelType)]
nb_classifier = nb.NbClassifier(training_file_location, spark_context, smoothing, modelType)
nb_predict_test_data_class = nb_classifier.classify_testdata(testing_file_location)
accuracy_nb = nb_classifier.confusion_matrix(nb_predict_test_data_class)
print("NB EXECUTION TIME " + str(time.time() - nb_time) + "for settings " + str(settings))
##############################################################################
##############################NB###############################################
nb_time = time.time()
smoothing = 4.0
modelType = "multinomial"
settings = [('smoothing', smoothing), ('modelType', modelType)]
nb_classifier = nb.NbClassifier(training_file_location, spark_context, smoothing, modelType)
nb_predict_test_data_class = nb_classifier.classify_testdata(testing_file_location)
accuracy_nb = nb_classifier.confusion_matrix(nb_predict_test_data_class)
print("NB EXECUTION TIME " + str(time.time() - nb_time) + "for settings " + str(settings))
##############################################################################
print("Total Execution time is " + str(time.time() - start_time))
| 43.871287
| 114
| 0.565109
| 790
| 8,862
| 6.065823
| 0.11519
| 0.050083
| 0.056344
| 0.075125
| 0.882304
| 0.877087
| 0.87187
| 0.858097
| 0.858097
| 0.858097
| 0
| 0.009947
| 0.171857
| 8,862
| 201
| 115
| 44.089552
| 0.643003
| 0.00237
| 0
| 0.731343
| 0
| 0
| 0.131449
| 0.022503
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.067164
| 0
| 0.067164
| 0.08209
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
84b6de6f5aa72be9c3df47ea20307b20ea77b410
| 168
|
py
|
Python
|
app_server/app/api/dependencies/file.py
|
dumaevrinat/lung_diseases
|
caa24a0c263e82106f585b7bcb0c57417fc9a06a
|
[
"MIT"
] | 3
|
2021-05-09T01:50:41.000Z
|
2022-01-06T08:07:48.000Z
|
app_server/app/api/dependencies/file.py
|
dumaevrinat/lung_diseases
|
caa24a0c263e82106f585b7bcb0c57417fc9a06a
|
[
"MIT"
] | null | null | null |
app_server/app/api/dependencies/file.py
|
dumaevrinat/lung_diseases
|
caa24a0c263e82106f585b7bcb0c57417fc9a06a
|
[
"MIT"
] | null | null | null |
from fastapi import UploadFile, File
from app.service.file import get_extension
def get_file_extension(file: UploadFile = File(...)):
return get_extension(file)
| 21
| 53
| 0.77381
| 23
| 168
| 5.478261
| 0.478261
| 0.222222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.136905
| 168
| 7
| 54
| 24
| 0.868966
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.5
| 0.25
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
84d31487a0a92c188adfbfc38735fed7b9d7cc8f
| 32,059
|
py
|
Python
|
ch3/length_of_longest_substring.py
|
qiuhuachuan/leetcode
|
d5d4f123728093e3528180b64781719d608b61fa
|
[
"MIT"
] | null | null | null |
ch3/length_of_longest_substring.py
|
qiuhuachuan/leetcode
|
d5d4f123728093e3528180b64781719d608b61fa
|
[
"MIT"
] | null | null | null |
ch3/length_of_longest_substring.py
|
qiuhuachuan/leetcode
|
d5d4f123728093e3528180b64781719d608b61fa
|
[
"MIT"
] | null | null | null |
'''
给定一个字符串,请你找出其中不含有重复字符的 最长子串 的长度。
'''
class Solution:
def length_of_longest_substring(self, s: str) -> int:
st = {}
i, ans = 0, 0
for j in range(len(s)):
if s[j] in st:
i = max(st[s[j]], i)
ans = max(ans, j - i + 1)
st[s[j]] = j + 1
return ans
s_str = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCD"
s = Solution()
print(s.length_of_longest_substring(s_str))
| 1,781.055556
| 31,663
| 0.647837
| 389
| 32,059
| 52.532134
| 0.079692
| 1.972107
| 2.949058
| 3.919941
| 0.989087
| 0.989087
| 0.989087
| 0.989087
| 0.989087
| 0.989087
| 0
| 0.10333
| 0.014692
| 32,059
| 18
| 31,664
| 1,781.055556
| 0.543593
| 0.000998
| 0
| 0
| 0
| 0
| 0.652592
| 0.645815
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0
| 0
| 0.230769
| 0.076923
| 0
| 0
| 1
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 14
|
ca8c7312a69347b4be39f111ca257b7bf9560e5c
| 222
|
py
|
Python
|
chainerui/__init__.py
|
chainer/chainerui
|
91c5c26d9154a008079dbb0bcbf69b5590d105f7
|
[
"MIT"
] | 185
|
2017-12-15T09:24:07.000Z
|
2022-01-20T11:20:13.000Z
|
chainerui/__init__.py
|
chainer/chainerui
|
91c5c26d9154a008079dbb0bcbf69b5590d105f7
|
[
"MIT"
] | 191
|
2017-12-15T09:14:52.000Z
|
2022-02-17T14:09:19.000Z
|
chainerui/__init__.py
|
chainer/chainerui
|
91c5c26d9154a008079dbb0bcbf69b5590d105f7
|
[
"MIT"
] | 29
|
2017-12-15T09:40:45.000Z
|
2022-03-13T11:21:11.000Z
|
from chainerui import _version
from chainerui.client.client import init # NOQA
from chainerui.client.client import log # NOQA
from chainerui.client.client import log_reporter # NOQA
__version__ = _version.__version__
| 27.75
| 56
| 0.815315
| 29
| 222
| 5.862069
| 0.310345
| 0.305882
| 0.335294
| 0.441176
| 0.629412
| 0.447059
| 0.447059
| 0
| 0
| 0
| 0
| 0
| 0.135135
| 222
| 7
| 57
| 31.714286
| 0.885417
| 0.063063
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.8
| 0
| 0.8
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
04ab06be4c561f6cde5817bdcea2a271b22e838b
| 4,470
|
py
|
Python
|
Server/app/docs/user.py
|
JoMingyu/BookCheck-Backend
|
fbe71a39e385a3c739e7e40ab1153efbe7835576
|
[
"MIT"
] | 1
|
2018-04-12T10:51:49.000Z
|
2018-04-12T10:51:49.000Z
|
Server/app/docs/user.py
|
JoMingyu/BookCheck-Backend
|
fbe71a39e385a3c739e7e40ab1153efbe7835576
|
[
"MIT"
] | null | null | null |
Server/app/docs/user.py
|
JoMingyu/BookCheck-Backend
|
fbe71a39e385a3c739e7e40ab1153efbe7835576
|
[
"MIT"
] | null | null | null |
SIGNUP_POST = {
'tags': ['계정'],
'description': '일반 사용자 회원가입',
'parameters': [
{
'name': 'id',
'description': '사용자 ID',
'in': 'formData',
'type': 'str',
'required': True
},
{
'name': 'pw',
'description': '사용자 비밀번호',
'in': 'formData',
'type': 'str',
'required': True
}
],
'responses': {
'201': {
'description': '회원가입 성공'
},
'204': {
'description': '회원가입 실패(이미 가입된 ID)'
}
}
}
AUTH_COMMON_USER_POST = {
'tags': ['계정'],
'description': '일반 사용자 로그인. Access Token과 Refresh Token을 반환합니다.',
'parameters': [
{
'name': 'id',
'description': '사용자 ID',
'in': 'formData',
'type': 'str',
'required': True
},
{
'name': 'pw',
'description': '사용자 비밀번호',
'in': 'formData',
'type': 'str',
'required': True
}
],
'responses': {
'200': {
'description': '로그인 성공',
'examples': {
'application/json': {
"access_token": "eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJleHAiOjE1MTI2NDAzOTgsImlhdCI6MTUxMjM4MTE5OCwibmJmIjoxNTEyMzgxMTk4LCJqdGkiOiJiNGY5YzIxMS00Mzc4LTRhYmQtOTNlNi00ZjNmYTM1MGZiYWIiLCJpZGVudGl0eSI6ImNpdHk3MzEwQG5hdmVyLmNvbSIsImZyZXNoIjpmYWxzZSwidHlwZSI6ImFjY2VzcyJ9.ryXc6WsRutdBsZFFDZUtP9Cd_JV8w2fdsI4NE_XICYs",
"refresh_token": "eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJleHAiOjE1NDM5MTcxOTgsImlhdCI6MTUxMjM4MTE5OCwibmJmIjoxNTEyMzgxMTk4LCJqdGkiOiJiZDU0Nzk3Zi02MDU0LTQxZjQtOTc5ZC1mYzQ0ZjNjMTM5YzgiLCJpZGVudGl0eSI6IjcwMmRiNmI2LWE5NTEtNDJjZi1hOGRmLTc4MjdiYTRhNzhjYyIsInR5cGUiOiJyZWZyZXNoIn0.CyRz9KMKgWh0Fv1M7DVHTbntBG3uAPKre3fbFUk18eI"
}
}
},
'401': {
'description': '로그인 실패(올바르지 않은 ID 또는 PW)'
}
}
}
AUTH_ADMIN_POST = {
'tags': ['계정'],
'description': '관리자 로그인. Access Token과 Refresh Token을 반환합니다.',
'parameters': [
{
'name': 'id',
'description': '사용자 ID',
'in': 'formData',
'type': 'str',
'required': True
},
{
'name': 'pw',
'description': '사용자 비밀번호',
'in': 'formData',
'type': 'str',
'required': True
}
],
'responses': {
'200': {
'description': '로그인 성공',
'examples': {
'application/json': {
"access_token": "eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJleHAiOjE1MTI2NDAzOTgsImlhdCI6MTUxMjM4MTE5OCwibmJmIjoxNTEyMzgxMTk4LCJqdGkiOiJiNGY5YzIxMS00Mzc4LTRhYmQtOTNlNi00ZjNmYTM1MGZiYWIiLCJpZGVudGl0eSI6ImNpdHk3MzEwQG5hdmVyLmNvbSIsImZyZXNoIjpmYWxzZSwidHlwZSI6ImFjY2VzcyJ9.ryXc6WsRutdBsZFFDZUtP9Cd_JV8w2fdsI4NE_XICYs",
"refresh_token": "eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJleHAiOjE1NDM5MTcxOTgsImlhdCI6MTUxMjM4MTE5OCwibmJmIjoxNTEyMzgxMTk4LCJqdGkiOiJiZDU0Nzk3Zi02MDU0LTQxZjQtOTc5ZC1mYzQ0ZjNjMTM5YzgiLCJpZGVudGl0eSI6IjcwMmRiNmI2LWE5NTEtNDJjZi1hOGRmLTc4MjdiYTRhNzhjYyIsInR5cGUiOiJyZWZyZXNoIn0.CyRz9KMKgWh0Fv1M7DVHTbntBG3uAPKre3fbFUk18eI"
}
}
},
'401': {
'description': '로그인 실패(올바르지 않은 ID 또는 PW)'
}
}
}
REFRESH_POST = {
'tags': ['계정'],
'description': '새로운 Access Token 발급',
'parameters': [
{
'name': 'Authorization',
'description': 'JWT Refresh Token(JWT ***)',
'in': 'header',
'type': 'str',
'required': True
}
],
'responses': {
'200': {
'description': '인증 성공, 새로운 Access Token 발급',
'examples': {
'application/json': {
"access_token": "eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJleHAiOjE1MTI2NDAzOTgsImlhdCI6MTUxMjM4MTE5OCwibmJmIjoxNTEyMzgxMTk4LCJqdGkiOiJiNGY5YzIxMS00Mzc4LTRhYmQtOTNlNi00ZjNmYTM1MGZiYWIiLCJpZGVudGl0eSI6ImNpdHk3MzEwQG5hdmVyLmNvbSIsImZyZXNoIjpmYWxzZSwidHlwZSI6ImFjY2VzcyJ9.ryXc6WsRutdBsZFFDZUtP9Cd_JV8w2fdsI4NE_XICYs",
}
}
},
'205': {
'description': '다른 디바이스에서 비밀번호가 변경되어 재로그인 필요'
},
'403': {
'description': 'Refresh Token의 내부가 변조되어 재로그인 필요'
}
}
}
| 34.651163
| 339
| 0.569351
| 233
| 4,470
| 10.845494
| 0.300429
| 0.019391
| 0.041551
| 0.052632
| 0.877721
| 0.877721
| 0.857143
| 0.840522
| 0.840522
| 0.840522
| 0
| 0.060367
| 0.318121
| 4,470
| 128
| 340
| 34.921875
| 0.768701
| 0
| 0
| 0.544
| 0
| 0
| 0.569351
| 0.330872
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
04ba39da08c0e8bf893a2850e3ca8d73bd4d532f
| 60
|
py
|
Python
|
boost_python/lib/hello.py
|
red2901/sandbox
|
fae6c1624cc9957593d030f3b0306dbded29f0a2
|
[
"MIT"
] | 1
|
2016-05-16T02:27:46.000Z
|
2016-05-16T02:27:46.000Z
|
boost_python/lib/hello.py
|
red2901/sandbox
|
fae6c1624cc9957593d030f3b0306dbded29f0a2
|
[
"MIT"
] | null | null | null |
boost_python/lib/hello.py
|
red2901/sandbox
|
fae6c1624cc9957593d030f3b0306dbded29f0a2
|
[
"MIT"
] | null | null | null |
import boost_hello_world
print (boost_hello_world.greet())
| 15
| 33
| 0.833333
| 9
| 60
| 5.111111
| 0.666667
| 0.434783
| 0.652174
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.083333
| 60
| 3
| 34
| 20
| 0.836364
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.5
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 7
|
8e0c58b247f68b218268f5e091e012eac7a0d665
| 18,596
|
py
|
Python
|
tests/integ/vlen_test.py
|
JonosGit/hsds
|
4abc4fc22c1e75cc9b15c879c8d00448a115fc92
|
[
"Apache-2.0"
] | 1
|
2020-03-12T12:26:26.000Z
|
2020-03-12T12:26:26.000Z
|
tests/integ/vlen_test.py
|
JonosGit/hsds
|
4abc4fc22c1e75cc9b15c879c8d00448a115fc92
|
[
"Apache-2.0"
] | null | null | null |
tests/integ/vlen_test.py
|
JonosGit/hsds
|
4abc4fc22c1e75cc9b15c879c8d00448a115fc92
|
[
"Apache-2.0"
] | null | null | null |
##############################################################################
# Copyright by The HDF Group. #
# All rights reserved. #
# #
# This file is part of HSDS (HDF5 Scalable Data Service), Libraries and #
# Utilities. The full HSDS copyright notice, including #
# terms governing use, modification, and redistribution, is contained in #
# the file COPYING, which can be found at the root of the source code #
# distribution tree. If you do not have access to this file, you may #
# request a copy from help@hdfgroup.org. #
##############################################################################
import unittest
import requests
import json
import helper
import numpy as np
import sys
sys.path.append('../../hsds/util')
from arrayUtil import arrayToBytes, bytesToArray
from hdf5dtype import createDataType
class VlenTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(VlenTest, self).__init__(*args, **kwargs)
self.base_domain = helper.getTestDomainName(self.__class__.__name__)
helper.setupDomain(self.base_domain)
self.endpoint = helper.getEndpoint()
# main
def testPutVLenInt(self):
# Test PUT value for 1d attribute with variable length int types
print("testPutVLenInt", self.base_domain)
headers = helper.getRequestHeaders(domain=self.base_domain)
req = self.endpoint + '/'
# Get root uuid
rsp = requests.get(req, headers=headers)
self.assertEqual(rsp.status_code, 200)
rspJson = json.loads(rsp.text)
root_uuid = rspJson["root"]
helper.validateId(root_uuid)
# create dataset
vlen_type = {"class": "H5T_VLEN", "base": { "class": "H5T_INTEGER", "base": "H5T_STD_I32LE"}}
payload = {'type': vlen_type, 'shape': [4,]}
req = self.endpoint + "/datasets"
rsp = requests.post(req, data=json.dumps(payload), headers=headers)
self.assertEqual(rsp.status_code, 201) # create dataset
rspJson = json.loads(rsp.text)
dset_uuid = rspJson['id']
self.assertTrue(helper.validateId(dset_uuid))
# link new dataset as 'dset'
name = 'dset'
req = self.endpoint + "/groups/" + root_uuid + "/links/" + name
payload = {"id": dset_uuid}
rsp = requests.put(req, data=json.dumps(payload), headers=headers)
self.assertEqual(rsp.status_code, 201)
# write values to dataset
data = [[1,], [1,2], [1,2,3], [1,2,3,4]]
req = self.endpoint + "/datasets/" + dset_uuid + "/value"
payload = { 'value': data }
rsp = requests.put(req, data=json.dumps(payload), headers=headers)
self.assertEqual(rsp.status_code, 200)
# read values from dataset
rsp = requests.get(req, headers=headers)
self.assertEqual(rsp.status_code, 200)
rspJson = json.loads(rsp.text)
self.assertTrue("hrefs" in rspJson)
self.assertTrue("value" in rspJson)
value = rspJson["value"]
self.assertEqual(len(value), 4)
for i in range(4):
self.assertEqual(value[i], data[i])
# read back a selection
params = {"select": "[2:3]"}
rsp = requests.get(req, headers=headers, params=params)
self.assertEqual(rsp.status_code, 200)
rspJson = json.loads(rsp.text)
self.assertTrue("hrefs" in rspJson)
self.assertTrue("value" in rspJson)
value = rspJson["value"]
self.assertEqual(len(value), 1)
self.assertEqual(value[0], data[2])
def testPutVLenIntBinary(self):
# Test PUT value for 1d attribute with variable length int types using binary transfer
print("testPutVLenIntBinary", self.base_domain)
count = 4
test_values = []
for i in range(count):
e = [1,]
for j in range(0,i):
e.append(j+2)
test_values.append(e)
# test_values == [[1], [1,2]]
headers = helper.getRequestHeaders(domain=self.base_domain)
headers_bin_req = helper.getRequestHeaders(domain=self.base_domain)
headers_bin_req["Content-Type"] = "application/octet-stream"
headers_bin_rsp = helper.getRequestHeaders(domain=self.base_domain)
headers_bin_rsp["accept"] = "application/octet-stream"
req = self.endpoint + '/'
# Get root uuid
rsp = requests.get(req, headers=headers)
self.assertEqual(rsp.status_code, 200)
rspJson = json.loads(rsp.text)
root_uuid = rspJson["root"]
helper.validateId(root_uuid)
# create dataset
vlen_type = {"class": "H5T_VLEN", "base": { "class": "H5T_INTEGER", "base": "H5T_STD_I32LE"}}
payload = {'type': vlen_type, 'shape': [count,]}
req = self.endpoint + "/datasets"
rsp = requests.post(req, data=json.dumps(payload), headers=headers)
self.assertEqual(rsp.status_code, 201) # create dataset
rspJson = json.loads(rsp.text)
dset_uuid = rspJson['id']
self.assertTrue(helper.validateId(dset_uuid))
# link new dataset as 'dset'
name = 'dset'
req = self.endpoint + "/groups/" + root_uuid + "/links/" + name
payload = {"id": dset_uuid}
rsp = requests.put(req, data=json.dumps(payload), headers=headers)
self.assertEqual(rsp.status_code, 201)
# create numpy vlen array
dt = np.dtype('O', metadata={'vlen': np.dtype('int32')})
arr = np.zeros((count,), dtype=dt)
for i in range(count):
arr[i] = np.int32(test_values[i])
# write as binary data
data = arrayToBytes(arr)
self.assertEqual(len(data), 56)
req = self.endpoint + "/datasets/" + dset_uuid + "/value"
rsp = requests.put(req, data=data, headers=headers_bin_req)
self.assertEqual(rsp.status_code, 200)
# read values from dataset with json
rsp = requests.get(req, headers=headers)
self.assertEqual(rsp.status_code, 200)
rspJson = json.loads(rsp.text)
self.assertTrue("hrefs" in rspJson)
self.assertTrue("value" in rspJson)
value = rspJson["value"]
self.assertEqual(len(value), count)
for i in range(count):
self.assertEqual(value[i], test_values[i])
# read as binary
rsp = requests.get(req, headers=headers_bin_rsp)
self.assertEqual(rsp.status_code, 200)
self.assertEqual(rsp.headers['Content-Type'], "application/octet-stream")
data = rsp.content
self.assertEqual(len(data), 56)
arr = bytesToArray(data, dt, [count,])
for i in range(count):
self.assertEqual(value[i], test_values[i])
# read back a selection
params = {"select": "[2:3]"}
rsp = requests.get(req, headers=headers, params=params)
self.assertEqual(rsp.status_code, 200)
rspJson = json.loads(rsp.text)
self.assertTrue("hrefs" in rspJson)
self.assertTrue("value" in rspJson)
value = rspJson["value"]
self.assertEqual(len(value), 1)
self.assertEqual(value[0], [1,2,3])
def testPutVLen2DInt(self):
# Test PUT value for 1d attribute with variable length int types
print("testPutVLen2DInt", self.base_domain)
nrow = 2
ncol = 2
headers = helper.getRequestHeaders(domain=self.base_domain)
req = self.endpoint + '/'
# Get root uuid
rsp = requests.get(req, headers=headers)
self.assertEqual(rsp.status_code, 200)
rspJson = json.loads(rsp.text)
root_uuid = rspJson["root"]
helper.validateId(root_uuid)
# create dataset
vlen_type = {"class": "H5T_VLEN", "base": { "class": "H5T_INTEGER", "base": "H5T_STD_I32LE"}}
payload = {'type': vlen_type, 'shape': [nrow,ncol]}
req = self.endpoint + "/datasets"
rsp = requests.post(req, data=json.dumps(payload), headers=headers)
self.assertEqual(rsp.status_code, 201) # create dataset
rspJson = json.loads(rsp.text)
dset_uuid = rspJson['id']
self.assertTrue(helper.validateId(dset_uuid))
# link new dataset as 'dset'
name = 'dset'
req = self.endpoint + "/groups/" + root_uuid + "/links/" + name
payload = {"id": dset_uuid}
rsp = requests.put(req, data=json.dumps(payload), headers=headers)
self.assertEqual(rsp.status_code, 201)
# write values to dataset
data = []
for i in range(nrow):
row = []
for j in range(ncol):
start = i+j
end = start+j+1
row.append(list(range(start,end)))
data.append(row)
req = self.endpoint + "/datasets/" + dset_uuid + "/value"
payload = { 'value': data }
rsp = requests.put(req, data=json.dumps(payload), headers=headers)
self.assertEqual(rsp.status_code, 200)
# read values from dataset
rsp = requests.get(req, headers=headers)
self.assertEqual(rsp.status_code, 200)
rspJson = json.loads(rsp.text)
self.assertTrue("hrefs" in rspJson)
self.assertTrue("value" in rspJson)
value = rspJson["value"]
self.assertEqual(len(value), nrow)
for i in range(nrow):
for j in range(ncol):
self.assertEqual(value[i][j], data[i][j])
# read values from dataset using selection
params = {"select": "[0:1,0:2]"}
rsp = requests.get(req, headers=headers, params=params)
self.assertEqual(rsp.status_code, 200)
rspJson = json.loads(rsp.text)
self.assertTrue("hrefs" in rspJson)
self.assertTrue("value" in rspJson)
value = rspJson["value"]
self.assertEqual(len(value), 1)
self.assertEqual(len(value[0]), 2)
self.assertEqual(value[0][0], [0])
self.assertEqual(value[0][1], [1,2])
def testPutVLenString(self):
# Test PUT value for 1d attribute with variable length string types
print("testPutVLenString", self.base_domain)
headers = helper.getRequestHeaders(domain=self.base_domain)
req = self.endpoint + '/'
# Get root uuid
rsp = requests.get(req, headers=headers)
self.assertEqual(rsp.status_code, 200)
rspJson = json.loads(rsp.text)
root_uuid = rspJson["root"]
helper.validateId(root_uuid)
# create dataset
vlen_type = {"class": "H5T_STRING", "charSet": "H5T_CSET_ASCII",
"strPad": "H5T_STR_NULLTERM", "length": "H5T_VARIABLE"}
payload = {'type': vlen_type, 'shape': [4,]}
req = self.endpoint + "/datasets"
rsp = requests.post(req, data=json.dumps(payload), headers=headers)
self.assertEqual(rsp.status_code, 201) # create dataset
rspJson = json.loads(rsp.text)
dset_uuid = rspJson['id']
self.assertTrue(helper.validateId(dset_uuid))
# link new dataset as 'dset'
name = 'dset'
req = self.endpoint + "/groups/" + root_uuid + "/links/" + name
payload = {"id": dset_uuid}
rsp = requests.put(req, data=json.dumps(payload), headers=headers)
self.assertEqual(rsp.status_code, 201)
# write values to dataset
data = ["This is", "a variable length", "string", "array"]
req = self.endpoint + "/datasets/" + dset_uuid + "/value"
payload = { 'value': data }
rsp = requests.put(req, data=json.dumps(payload), headers=headers)
self.assertEqual(rsp.status_code, 200)
# read values from dataset
rsp = requests.get(req, headers=headers)
self.assertEqual(rsp.status_code, 200)
rspJson = json.loads(rsp.text)
self.assertTrue("hrefs" in rspJson)
self.assertTrue("value" in rspJson)
value = rspJson["value"]
self.assertEqual(len(value), 4)
for i in range(4):
self.assertEqual(value[i], data[i])
def testPutVLenCompound(self):
# Test PUT value for 1d attribute with variable length int types
print("testPutVLenCompound", self.base_domain)
headers = helper.getRequestHeaders(domain=self.base_domain)
req = self.endpoint + '/'
# Get root uuid
rsp = requests.get(req, headers=headers)
self.assertEqual(rsp.status_code, 200)
rspJson = json.loads(rsp.text)
root_uuid = rspJson["root"]
helper.validateId(root_uuid)
count = 4
# create dataset
fixed_str8_type = {"charSet": "H5T_CSET_ASCII",
"class": "H5T_STRING",
"length": 8,
"strPad": "H5T_STR_NULLPAD" }
fields = [ {"type": {"class": "H5T_INTEGER", "base": "H5T_STD_U64BE"}, "name": "VALUE1"},
{"type": fixed_str8_type, "name": "VALUE2"},
{"type": {"class": "H5T_ARRAY", "dims": [2], "base":
{"class": "H5T_STRING", "charSet": "H5T_CSET_ASCII",
"strPad": "H5T_STR_NULLTERM", "length": "H5T_VARIABLE"}}, "name": "VALUE3"}]
datatype = {'class': 'H5T_COMPOUND', 'fields': fields }
payload = {'type': datatype, 'shape': [count,]}
req = self.endpoint + "/datasets"
rsp = requests.post(req, data=json.dumps(payload), headers=headers)
self.assertEqual(rsp.status_code, 201) # create dataset
rspJson = json.loads(rsp.text)
dset_uuid = rspJson['id']
self.assertTrue(helper.validateId(dset_uuid))
# link new dataset as 'dset'
name = 'dset'
req = self.endpoint + "/groups/" + root_uuid + "/links/" + name
payload = {"id": dset_uuid}
rsp = requests.put(req, data=json.dumps(payload), headers=headers)
self.assertEqual(rsp.status_code, 201)
# write values to dataset
data = []
for i in range(count):
s = ''
for j in range(i+5):
offset = (i + j)%256
s += chr(ord('A') + offset)
e = [i+1, s, ["Hi! "*(i+1), "Bye!" *(i+1)]]
data.append(e)
req = self.endpoint + "/datasets/" + dset_uuid + "/value"
payload = { 'value': data }
rsp = requests.put(req, data=json.dumps(payload), headers=headers)
self.assertEqual(rsp.status_code, 200)
# read values from dataset
rsp = requests.get(req, headers=headers)
self.assertEqual(rsp.status_code, 200)
rspJson = json.loads(rsp.text)
self.assertTrue("hrefs" in rspJson)
self.assertTrue("value" in rspJson)
value = rspJson["value"]
self.assertEqual(len(value), count)
def testPutVLenCompoundBinary(self):
# Test PUT value for 1d attribute with variable length int types
print("testPutVLenCompoundBinary", self.base_domain)
headers = helper.getRequestHeaders(domain=self.base_domain)
headers_bin_req = helper.getRequestHeaders(domain=self.base_domain)
headers_bin_req["Content-Type"] = "application/octet-stream"
headers_bin_rsp = helper.getRequestHeaders(domain=self.base_domain)
headers_bin_rsp["accept"] = "application/octet-stream"
req = self.endpoint + '/'
# Get root uuid
rsp = requests.get(req, headers=headers)
self.assertEqual(rsp.status_code, 200)
rspJson = json.loads(rsp.text)
root_uuid = rspJson["root"]
helper.validateId(root_uuid)
count = 4
# create dataset
fixed_str8_type = {"charSet": "H5T_CSET_ASCII",
"class": "H5T_STRING",
"length": 8,
"strPad": "H5T_STR_NULLPAD" }
fields = [ {"type": {"class": "H5T_INTEGER", "base": "H5T_STD_U64BE"}, "name": "VALUE1"},
{"type": fixed_str8_type, "name": "VALUE2"},
{"type": {"class": "H5T_ARRAY", "dims": [2], "base":
{"class": "H5T_STRING", "charSet": "H5T_CSET_ASCII",
"strPad": "H5T_STR_NULLTERM", "length": "H5T_VARIABLE"}}, "name": "VALUE3"}]
datatype = {'class': 'H5T_COMPOUND', 'fields': fields }
payload = {'type': datatype, 'shape': [count,]}
req = self.endpoint + "/datasets"
rsp = requests.post(req, data=json.dumps(payload), headers=headers)
self.assertEqual(rsp.status_code, 201) # create dataset
rspJson = json.loads(rsp.text)
dset_uuid = rspJson['id']
self.assertTrue(helper.validateId(dset_uuid))
# link new dataset as 'dset'
name = 'dset'
req = self.endpoint + "/groups/" + root_uuid + "/links/" + name
payload = {"id": dset_uuid}
rsp = requests.put(req, data=json.dumps(payload), headers=headers)
self.assertEqual(rsp.status_code, 201)
dt_compound = createDataType(datatype)
# create numpy vlen array
arr = np.zeros((count,), dtype=dt_compound)
for i in range(count):
e = arr[i]
e['VALUE1'] = i+1
s = ''
for j in range(i+5):
offset = (i + j)%26
s += chr(ord('A') + offset)
e['VALUE2'] = s
e['VALUE3'] = ["Hi! "*(i+1), "Bye!" *(i+1)]
# write as binary data
data = arrayToBytes(arr)
self.assertEqual(len(data), 192) # will vary based on count
req = self.endpoint + "/datasets/" + dset_uuid + "/value"
rsp = requests.put(req, data=data, headers=headers_bin_req)
self.assertEqual(rsp.status_code, 200)
# read values from dataset as json
rsp = requests.get(req, headers=headers)
self.assertEqual(rsp.status_code, 200)
rspJson = json.loads(rsp.text)
self.assertTrue("hrefs" in rspJson)
self.assertTrue("value" in rspJson)
value = rspJson["value"]
self.assertEqual(len(value), count)
# read as binary
rsp = requests.get(req, headers=headers_bin_rsp)
self.assertEqual(rsp.status_code, 200)
self.assertEqual(rsp.headers['Content-Type'], "application/octet-stream")
data = rsp.content
self.assertEqual(len(data), 192)
arr = bytesToArray(data, dt_compound, [count,])
if __name__ == '__main__':
#setup test files
unittest.main()
| 39.315011
| 102
| 0.582921
| 2,160
| 18,596
| 4.917593
| 0.102315
| 0.08473
| 0.0627
| 0.079081
| 0.848899
| 0.838637
| 0.826775
| 0.826775
| 0.826586
| 0.826586
| 0
| 0.019229
| 0.278501
| 18,596
| 472
| 103
| 39.398305
| 0.772453
| 0.110508
| 0
| 0.794118
| 0
| 0
| 0.105866
| 0.01036
| 0
| 0
| 0
| 0
| 0.247059
| 1
| 0.020588
| false
| 0
| 0.023529
| 0
| 0.047059
| 0.017647
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f3cbb3ec15185163a600d7b1b8bf7e1226bac2a9
| 29,334
|
py
|
Python
|
systran_resources_api/apis/dictionary_api.py
|
SYSTRAN/resources-api-python-client
|
c1640a6ff81f7774d3c0cf02ddb28a412f7a027b
|
[
"Apache-2.0"
] | 13
|
2016-01-26T09:10:22.000Z
|
2016-10-10T15:39:48.000Z
|
systran_resources_api/apis/dictionary_api.py
|
SYSTRAN/resources-api-python-client
|
c1640a6ff81f7774d3c0cf02ddb28a412f7a027b
|
[
"Apache-2.0"
] | 1
|
2016-08-08T17:16:32.000Z
|
2016-08-09T09:34:14.000Z
|
systran_resources_api/apis/dictionary_api.py
|
SYSTRAN/resources-api-python-client
|
c1640a6ff81f7774d3c0cf02ddb28a412f7a027b
|
[
"Apache-2.0"
] | 2
|
2016-05-13T23:53:44.000Z
|
2021-02-09T23:18:01.000Z
|
#!/usr/bin/env python
# coding: utf-8
"""
Copyright 2015 SYSTRAN Software, Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import sys
import os
# python 2 and python 3 compatibility library
from six import iteritems
from .. import configuration
from ..api_client import ApiClient
class DictionaryApi(object):
def __init__(self, api_client=None):
if api_client:
self.api_client = api_client
else:
if not configuration.api_client:
configuration.api_client = ApiClient('https://api-platform.systran.net')
self.api_client = configuration.api_client
def resources_dictionary_add_post(self, input, **kwargs):
"""
Add dictionary
Add a new dictionary.
:param DictionaryAddBody input: Input with dictionary information (required)
:return: DictionaryAddResponse
"""
# verify the required parameter 'input' is set
if input is None:
raise ValueError("Missing the required parameter `input` when calling `resources_dictionary_add_post`")
all_params = ['input']
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError("Got an unexpected keyword argument '%s' to method resources_dictionary_add_post" % key)
params[key] = val
del params['kwargs']
resource_path = '/resources/dictionary/add'.replace('{format}', 'json')
method = 'POST'
path_params = {}
query_params = {}
header_params = {}
form_params = {}
files = {}
body_params = None
if 'input' in params:
body_params = params['input']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type([])
# Authentication setting
auth_settings = ['accessToken', 'apiKey']
response = self.api_client.call_api(resource_path, method, path_params, query_params, header_params,
body=body_params, post_params=form_params, files=files,
response='DictionaryAddResponse', auth_settings=auth_settings)
return response
def resources_dictionary_delete_post(self, dictionary_id, **kwargs):
"""
Delete a dictionary
Delete an existing dictionary.
:param str dictionary_id: Dictionary Id (required)
:return: None
"""
# verify the required parameter 'dictionary_id' is set
if dictionary_id is None:
raise ValueError("Missing the required parameter `dictionary_id` when calling `resources_dictionary_delete_post`")
all_params = ['dictionary_id']
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError("Got an unexpected keyword argument '%s' to method resources_dictionary_delete_post" % key)
params[key] = val
del params['kwargs']
resource_path = '/resources/dictionary/delete'.replace('{format}', 'json')
method = 'POST'
path_params = {}
query_params = {}
if 'dictionary_id' in params:
query_params['dictionaryId'] = params['dictionary_id']
header_params = {}
form_params = {}
files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type([])
# Authentication setting
auth_settings = ['accessToken', 'apiKey']
response = self.api_client.call_api(resource_path, method, path_params, query_params, header_params,
body=body_params, post_params=form_params, files=files,
response=None, auth_settings=auth_settings)
def resources_dictionary_entry_add_post(self, dictionary_id, input, **kwargs):
"""
Add an entry
Add a new entry to an existing dictionary.
:param str dictionary_id: Dictionary Id (required)
:param EntryAddBody input: Input with dictionary id and entries information (required)
:return: EntryAddResponse
"""
# verify the required parameter 'dictionary_id' is set
if dictionary_id is None:
raise ValueError("Missing the required parameter `dictionary_id` when calling `resources_dictionary_entry_add_post`")
# verify the required parameter 'input' is set
if input is None:
raise ValueError("Missing the required parameter `input` when calling `resources_dictionary_entry_add_post`")
all_params = ['dictionary_id', 'input']
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError("Got an unexpected keyword argument '%s' to method resources_dictionary_entry_add_post" % key)
params[key] = val
del params['kwargs']
resource_path = '/resources/dictionary/entry/add'.replace('{format}', 'json')
method = 'POST'
path_params = {}
query_params = {}
if 'dictionary_id' in params:
query_params['dictionaryId'] = params['dictionary_id']
header_params = {}
form_params = {}
files = {}
body_params = None
if 'input' in params:
body_params = params['input']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type([])
# Authentication setting
auth_settings = ['accessToken', 'apiKey']
response = self.api_client.call_api(resource_path, method, path_params, query_params, header_params,
body=body_params, post_params=form_params, files=files,
response='EntryAddResponse', auth_settings=auth_settings)
return response
def resources_dictionary_entry_delete_post(self, dictionary_id, input, **kwargs):
"""
Delete an entry
Delete an entry in an existing dictionary.
:param str dictionary_id: Dictionary Id (required)
:param EntryDeleteBody input: Input with dictionary id + entry id (src or tgt) to delete (required)
:return: EntryDeleteResponse
"""
# verify the required parameter 'dictionary_id' is set
if dictionary_id is None:
raise ValueError("Missing the required parameter `dictionary_id` when calling `resources_dictionary_entry_delete_post`")
# verify the required parameter 'input' is set
if input is None:
raise ValueError("Missing the required parameter `input` when calling `resources_dictionary_entry_delete_post`")
all_params = ['dictionary_id', 'input']
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError("Got an unexpected keyword argument '%s' to method resources_dictionary_entry_delete_post" % key)
params[key] = val
del params['kwargs']
resource_path = '/resources/dictionary/entry/delete'.replace('{format}', 'json')
method = 'POST'
path_params = {}
query_params = {}
if 'dictionary_id' in params:
query_params['dictionaryId'] = params['dictionary_id']
header_params = {}
form_params = {}
files = {}
body_params = None
if 'input' in params:
body_params = params['input']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type([])
# Authentication setting
auth_settings = ['accessToken', 'apiKey']
response = self.api_client.call_api(resource_path, method, path_params, query_params, header_params,
body=body_params, post_params=form_params, files=files,
response='EntryDeleteResponse', auth_settings=auth_settings)
return response
def resources_dictionary_entry_import_post(self, dictionary_id, source_lang, input_file, **kwargs):
"""
Import entries
Import entries to an existing dictionary.
:param str dictionary_id: Id of the dictionary where to import entries (required)
:param str source_lang: Source lang of the entries to import (required)
:param File input_file: File with entries to import (required)
:return: DictionariesImportResponse
"""
# verify the required parameter 'dictionary_id' is set
if dictionary_id is None:
raise ValueError("Missing the required parameter `dictionary_id` when calling `resources_dictionary_entry_import_post`")
# verify the required parameter 'source_lang' is set
if source_lang is None:
raise ValueError("Missing the required parameter `source_lang` when calling `resources_dictionary_entry_import_post`")
# verify the required parameter 'input_file' is set
if input_file is None:
raise ValueError("Missing the required parameter `input_file` when calling `resources_dictionary_entry_import_post`")
all_params = ['dictionary_id', 'source_lang', 'input_file']
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError("Got an unexpected keyword argument '%s' to method resources_dictionary_entry_import_post" % key)
params[key] = val
del params['kwargs']
resource_path = '/resources/dictionary/entry/import'.replace('{format}', 'json')
method = 'POST'
path_params = {}
query_params = {}
if 'dictionary_id' in params:
query_params['dictionaryId'] = params['dictionary_id']
if 'source_lang' in params:
query_params['sourceLang'] = params['source_lang']
header_params = {}
form_params = {}
files = {}
if 'input_file' in params:
files['inputFile'] = params['input_file']
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(['multipart/form-data', 'application/x-www-form-urlencoded', '*/*'])
# Authentication setting
auth_settings = ['accessToken', 'apiKey']
response = self.api_client.call_api(resource_path, method, path_params, query_params, header_params,
body=body_params, post_params=form_params, files=files,
response='DictionariesImportResponse', auth_settings=auth_settings)
return response
def resources_dictionary_entry_list_post(self, dictionary_id, **kwargs):
"""
List entries
List entries for a specific dictionary.
:param str dictionary_id: Dictionary Id (required)
:param EntriesListFilters filters: Different filters that can be applied to the list functionality (skip/limit/sort/match)
:return: EntriesListResponse
"""
# verify the required parameter 'dictionary_id' is set
if dictionary_id is None:
raise ValueError("Missing the required parameter `dictionary_id` when calling `resources_dictionary_entry_list_post`")
all_params = ['dictionary_id', 'filters']
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError("Got an unexpected keyword argument '%s' to method resources_dictionary_entry_list_post" % key)
params[key] = val
del params['kwargs']
resource_path = '/resources/dictionary/entry/list'.replace('{format}', 'json')
method = 'POST'
path_params = {}
query_params = {}
if 'dictionary_id' in params:
query_params['dictionaryId'] = params['dictionary_id']
header_params = {}
form_params = {}
files = {}
body_params = None
if 'filters' in params:
body_params = params['filters']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type([])
# Authentication setting
auth_settings = ['accessToken', 'apiKey']
response = self.api_client.call_api(resource_path, method, path_params, query_params, header_params,
body=body_params, post_params=form_params, files=files,
response='EntriesListResponse', auth_settings=auth_settings)
return response
def resources_dictionary_entry_update_post(self, dictionary_id, input, **kwargs):
"""
Update an entry
Update an entry in an existing dictionary.
:param str dictionary_id: Dictionary Id (required)
:param EntryUpdateBody input: Input with dictionary id + entry id (src or tgt) to delete (required)
:return: EntryUpdateResponse
"""
# verify the required parameter 'dictionary_id' is set
if dictionary_id is None:
raise ValueError("Missing the required parameter `dictionary_id` when calling `resources_dictionary_entry_update_post`")
# verify the required parameter 'input' is set
if input is None:
raise ValueError("Missing the required parameter `input` when calling `resources_dictionary_entry_update_post`")
all_params = ['dictionary_id', 'input']
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError("Got an unexpected keyword argument '%s' to method resources_dictionary_entry_update_post" % key)
params[key] = val
del params['kwargs']
resource_path = '/resources/dictionary/entry/update'.replace('{format}', 'json')
method = 'POST'
path_params = {}
query_params = {}
if 'dictionary_id' in params:
query_params['dictionaryId'] = params['dictionary_id']
header_params = {}
form_params = {}
files = {}
body_params = None
if 'input' in params:
body_params = params['input']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type([])
# Authentication setting
auth_settings = ['accessToken', 'apiKey']
response = self.api_client.call_api(resource_path, method, path_params, query_params, header_params,
body=body_params, post_params=form_params, files=files,
response='EntryUpdateResponse', auth_settings=auth_settings)
return response
def resources_dictionary_list_post(self, **kwargs):
"""
List dictionaries
List the dictionaries.
:param DictionariesListFilters filters: Different filters that can be applied to the list functionality (skip/limit/sort/match)
:return: DictionariesListResponse
"""
all_params = ['filters']
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError("Got an unexpected keyword argument '%s' to method resources_dictionary_list_post" % key)
params[key] = val
del params['kwargs']
resource_path = '/resources/dictionary/list'.replace('{format}', 'json')
method = 'POST'
path_params = {}
query_params = {}
header_params = {}
form_params = {}
files = {}
body_params = None
if 'filters' in params:
body_params = params['filters']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type([])
# Authentication setting
auth_settings = ['accessToken', 'apiKey']
response = self.api_client.call_api(resource_path, method, path_params, query_params, header_params,
body=body_params, post_params=form_params, files=files,
response='DictionariesListResponse', auth_settings=auth_settings)
return response
def resources_dictionary_lookup_get(self, source, target, input, **kwargs):
"""
Lookup
Lookup words from a source language to a target language.
:param str source: Language code of the source text\n (required)
:param str target: Language code in which to lookup the source text\n (required)
:param list[str] input: Input word (the 'input' parameter can be repeated)\n (required)
:param bool autocomplete: With this option, if the input word is not found in the source language, it will be filled in with autocompletion to perform the lookup\n\nDefault: false\n
:param str callback: Javascript callback function name for JSONP Support\n
:return: LookupResponse
"""
# verify the required parameter 'source' is set
if source is None:
raise ValueError("Missing the required parameter `source` when calling `resources_dictionary_lookup_get`")
# verify the required parameter 'target' is set
if target is None:
raise ValueError("Missing the required parameter `target` when calling `resources_dictionary_lookup_get`")
# verify the required parameter 'input' is set
if input is None:
raise ValueError("Missing the required parameter `input` when calling `resources_dictionary_lookup_get`")
all_params = ['source', 'target', 'input', 'autocomplete', 'callback']
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError("Got an unexpected keyword argument '%s' to method resources_dictionary_lookup_get" % key)
params[key] = val
del params['kwargs']
resource_path = '/resources/dictionary/lookup'.replace('{format}', 'json')
method = 'GET'
path_params = {}
query_params = {}
if 'source' in params:
query_params['source'] = params['source']
if 'target' in params:
query_params['target'] = params['target']
if 'input' in params:
query_params['input'] = params['input']
if 'autocomplete' in params:
query_params['autocomplete'] = params['autocomplete']
if 'callback' in params:
query_params['callback'] = params['callback']
header_params = {}
form_params = {}
files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type([])
# Authentication setting
auth_settings = ['accessToken', 'apiKey']
response = self.api_client.call_api(resource_path, method, path_params, query_params, header_params,
body=body_params, post_params=form_params, files=files,
response='LookupResponse', auth_settings=auth_settings)
return response
def resources_dictionary_lookup_supported_languages_get(self, **kwargs):
"""
Lookup Supported Languages
List of language pairs in which lookup is supported. This list can be limited to a specific source language or target language.\n
:param str source: Language code of the source text\n
:param str target: Language code into which to translate the source text\n
:param str callback: Javascript callback function name for JSONP Support\n
:return: LookupSupportedLanguageResponse
"""
all_params = ['source', 'target', 'callback']
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError("Got an unexpected keyword argument '%s' to method resources_dictionary_lookup_supported_languages_get" % key)
params[key] = val
del params['kwargs']
resource_path = '/resources/dictionary/lookup/supportedLanguages'.replace('{format}', 'json')
method = 'GET'
path_params = {}
query_params = {}
if 'source' in params:
query_params['source'] = params['source']
if 'target' in params:
query_params['target'] = params['target']
if 'callback' in params:
query_params['callback'] = params['callback']
header_params = {}
form_params = {}
files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type([])
# Authentication setting
auth_settings = ['accessToken', 'apiKey']
response = self.api_client.call_api(resource_path, method, path_params, query_params, header_params,
body=body_params, post_params=form_params, files=files,
response='LookupSupportedLanguageResponse', auth_settings=auth_settings)
return response
def resources_dictionary_supported_languages_get(self, **kwargs):
"""
Supported Languages
Get supported languages by dictionaries
:return: SupportedLanguagesResponse
"""
all_params = []
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError("Got an unexpected keyword argument '%s' to method resources_dictionary_supported_languages_get" % key)
params[key] = val
del params['kwargs']
resource_path = '/resources/dictionary/supportedLanguages'.replace('{format}', 'json')
method = 'GET'
path_params = {}
query_params = {}
header_params = {}
form_params = {}
files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type([])
# Authentication setting
auth_settings = ['accessToken', 'apiKey']
response = self.api_client.call_api(resource_path, method, path_params, query_params, header_params,
body=body_params, post_params=form_params, files=files,
response='SupportedLanguagesResponse', auth_settings=auth_settings)
return response
def resources_dictionary_update_post(self, dictionary_id, input, **kwargs):
"""
Update a dictionary
Update an existing dictionary.
:param str dictionary_id: Dictionary Id (required)
:param DictionaryUpdateBody input: Input with dictionary id (required)
:return: DictionaryUpdateResponse
"""
# verify the required parameter 'dictionary_id' is set
if dictionary_id is None:
raise ValueError("Missing the required parameter `dictionary_id` when calling `resources_dictionary_update_post`")
# verify the required parameter 'input' is set
if input is None:
raise ValueError("Missing the required parameter `input` when calling `resources_dictionary_update_post`")
all_params = ['dictionary_id', 'input']
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError("Got an unexpected keyword argument '%s' to method resources_dictionary_update_post" % key)
params[key] = val
del params['kwargs']
resource_path = '/resources/dictionary/update'.replace('{format}', 'json')
method = 'POST'
path_params = {}
query_params = {}
if 'dictionary_id' in params:
query_params['dictionaryId'] = params['dictionary_id']
header_params = {}
form_params = {}
files = {}
body_params = None
if 'input' in params:
body_params = params['input']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type([])
# Authentication setting
auth_settings = ['accessToken', 'apiKey']
response = self.api_client.call_api(resource_path, method, path_params, query_params, header_params,
body=body_params, post_params=form_params, files=files,
response='DictionaryUpdateResponse', auth_settings=auth_settings)
return response
| 36.621723
| 190
| 0.597805
| 3,043
| 29,334
| 5.554387
| 0.077884
| 0.051118
| 0.040232
| 0.029819
| 0.826589
| 0.801503
| 0.784404
| 0.780677
| 0.770146
| 0.743166
| 0
| 0.000548
| 0.315368
| 29,334
| 800
| 191
| 36.6675
| 0.84106
| 0.187325
| 0
| 0.746835
| 0
| 0
| 0.217567
| 0.07063
| 0
| 0
| 0
| 0
| 0
| 1
| 0.032911
| false
| 0
| 0.032911
| 0
| 0.096203
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
6d0e598a91ef3aa9fee046b828a0c4c6eee03a76
| 7,172
|
py
|
Python
|
Normalize.py
|
WoodSugar/GSTNet
|
3c21cfc8a873d61336f257030a28fdee12dcee2f
|
[
"MIT"
] | 8
|
2020-12-24T08:18:09.000Z
|
2021-12-30T16:50:16.000Z
|
Normalize.py
|
WoodSugar/GSTNet
|
3c21cfc8a873d61336f257030a28fdee12dcee2f
|
[
"MIT"
] | 2
|
2020-12-24T08:21:06.000Z
|
2021-08-18T11:03:47.000Z
|
Normalize.py
|
WoodSugar/GSTNet
|
3c21cfc8a873d61336f257030a28fdee12dcee2f
|
[
"MIT"
] | 1
|
2021-01-31T07:52:52.000Z
|
2021-01-31T07:52:52.000Z
|
# -*- coding: utf-8 -*-
"""
@Time : 2019/04/16 19:07
@Author : Yuppie
"""
import torch
import torch.nn as nn
class Switch_Norm_1D(nn.Module):
def __init__(self, in_channels, eps=1e-5, momentum=0.997, using_moving_average=True, using_bn=True,
last_gamma=False):
super(Switch_Norm_1D, self).__init__()
self.eps = eps
self.momentum = momentum
self.using_moving_average = using_moving_average
self.using_bn = using_bn
self.last_gamma = last_gamma
self.weight = nn.Parameter(torch.ones(1, 1, in_channels))
self.bias = nn.Parameter(torch.zeros(1, 1, in_channels))
if self.using_bn:
self.mean_weight = nn.Parameter(torch.ones(3))
self.var_weight = nn.Parameter(torch.ones(3))
else:
self.mean_weight = nn.Parameter(torch.ones(2))
self.var_weight = nn.Parameter(torch.ones(2))
if self.using_bn:
self.register_buffer('running_mean', torch.zeros(1, in_channels, 1))
self.register_buffer('running_var', torch.zeros(1, in_channels, 1))
def reset_parameters(self):
if self.using_bn:
self.running_mean.zero_()
self.running_var.zero_()
if self.last_gamma:
self.weight.data.fill_(0)
else:
self.weight.data.fill_(1)
self.bias.data.zero_()
@staticmethod
def _check_input_dim(inputs):
if inputs.dim() != 3:
raise ValueError('expected 3D input (got {}D input)'
.format(inputs.dim()))
def forward(self, inputs):
Switch_Norm_1D._check_input_dim(inputs)
inputs = inputs.transpose(1, 2)
mean_in = inputs.mean(-1, keepdim=True)
var_in = inputs.var(-1, keepdim=True)
mean_ln = mean_in.mean(1, keepdim=True)
temp = var_in + mean_in ** 2
var_ln = temp.mean(1, keepdim=True) - mean_ln ** 2
if self.using_bn:
if self.training:
mean_bn = mean_in.mean(0, keepdim=True)
var_bn = temp.mean(0, keepdim=True) - mean_bn ** 2
if self.using_moving_average:
self.running_mean.mul_(self.momentum)
self.running_mean.add_((1 - self.momentum) * mean_bn.data)
self.running_var.mul_(self.momentum)
self.running_var.add_((1 - self.momentum) * var_bn.data)
else:
self.running_mean.add_(mean_bn.data)
self.running_var.add_(mean_bn.data ** 2 + var_bn.data)
else:
mean_bn = torch.autograd.Variable(self.running_mean)
var_bn = torch.autograd.Variable(self.running_var)
softmax = nn.Softmax(0)
mean_weight = softmax(self.mean_weight)
var_weight = softmax(self.var_weight)
if self.using_bn:
mean = mean_weight[0] * mean_in + mean_weight[1] * mean_ln + mean_weight[2] * mean_bn
var = var_weight[0] * var_in + var_weight[1] * var_ln + var_weight[2] * var_bn
else:
mean = mean_weight[0] * mean_in + mean_weight[1] * mean_ln
var = var_weight[0] * var_in + var_weight[1] * var_ln
inputs = (inputs - mean) / (var + self.eps).sqrt()
inputs = inputs.transpose(1, 2)
return self.weight * inputs + self.bias
class Switch_Norm_2D(nn.Module):
def __init__(self, in_channels, eps=1e-5, momentum=0.997, using_moving_average=True, using_bn=True,
last_gamma=False):
super(Switch_Norm_2D, self).__init__()
self.eps = eps
self.momentum = momentum
self.using_moving_average = using_moving_average
self.using_bn = using_bn
self.last_gamma = last_gamma
self.weight = nn.Parameter(torch.ones(1, 1, 1, in_channels))
self.bias = nn.Parameter(torch.zeros(1, 1, 1, in_channels))
if self.using_bn:
self.mean_weight = nn.Parameter(torch.ones(3))
self.var_weight = nn.Parameter(torch.ones(3))
else:
self.mean_weight = nn.Parameter(torch.ones(2))
self.var_weight = nn.Parameter(torch.ones(2))
if self.using_bn:
self.register_buffer('running_mean', torch.zeros(1, in_channels, 1))
self.register_buffer('running_var', torch.zeros(1, in_channels, 1))
self.reset_parameters()
def reset_parameters(self):
if self.using_bn:
self.running_mean.zero_()
self.running_var.zero_()
if self.last_gamma:
self.weight.data.fill_(0)
else:
self.weight.data.fill_(1)
self.bias.data.zero_()
@staticmethod
def _check_input_dim(inputs):
if inputs.dim() != 4:
raise ValueError('expected 4D input (got {}D input)'
.format(inputs.dim()))
def forward(self, inputs):
self._check_input_dim(inputs)
batch_size, num_stations, seq_len, in_channels = inputs.size()
inputs = inputs.transpose(-2, -1).transpose(-3, -2)
inputs = inputs.contiguous().view(batch_size, in_channels, -1)
mean_in = inputs.mean(-1, keepdim=True)
var_in = inputs.var(-1, keepdim=True)
mean_ln = mean_in.mean(1, keepdim=True)
temp = var_in + mean_in ** 2
var_ln = temp.mean(1, keepdim=True) - mean_ln ** 2
if self.using_bn:
if self.training:
mean_bn = mean_in.mean(0, keepdim=True)
var_bn = temp.mean(0, keepdim=True) - mean_bn ** 2
if self.using_moving_average:
self.running_mean.mul_(self.momentum)
self.running_mean.add_((1 - self.momentum) * mean_bn.data)
self.running_var.mul_(self.momentum)
self.running_var.add_((1 - self.momentum) * var_bn.data)
else:
self.running_mean.add_(mean_bn.data)
self.running_var.add_(mean_bn.data ** 2 + var_bn.data)
else:
mean_bn = torch.autograd.Variable(self.running_mean)
var_bn = torch.autograd.Variable(self.running_var)
softmax = nn.Softmax(0)
mean_weight = softmax(self.mean_weight)
var_weight = softmax(self.var_weight)
if self.using_bn:
mean = mean_weight[0] * mean_in + mean_weight[1] * mean_ln + mean_weight[2] * mean_bn
var = var_weight[0] * var_in + var_weight[1] * var_ln + var_weight[2] * var_bn
else:
mean = mean_weight[0] * mean_in + mean_weight[1] * mean_ln
var = var_weight[0] * var_in + var_weight[1] * var_ln
inputs = (inputs - mean) / (var + self.eps).sqrt()
inputs = inputs.contiguous().view(batch_size, in_channels, num_stations, seq_len)
inputs = inputs.transpose(-3, -2).transpose(-2, -1)
return self.weight * inputs + self.bias
| 39.844444
| 104
| 0.572504
| 938
| 7,172
| 4.116205
| 0.102345
| 0.05698
| 0.034188
| 0.05698
| 0.907537
| 0.898731
| 0.883191
| 0.882155
| 0.861953
| 0.861953
| 0
| 0.025015
| 0.314417
| 7,172
| 180
| 105
| 39.844444
| 0.76022
| 0.009342
| 0
| 0.847222
| 0
| 0
| 0.016187
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055556
| false
| 0
| 0.013889
| 0
| 0.097222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
6d2cbd5a776d3e6fe744f531ed68453181c500e0
| 24,978
|
py
|
Python
|
sdk/python/pulumi_akamai/app_sec_configuration.py
|
pulumi/pulumi-akamai
|
85f933ccf2f61738b3074a13fa718132280f8364
|
[
"ECL-2.0",
"Apache-2.0"
] | 3
|
2021-01-21T15:22:12.000Z
|
2021-08-25T14:15:29.000Z
|
sdk/python/pulumi_akamai/app_sec_configuration.py
|
pulumi/pulumi-akamai
|
85f933ccf2f61738b3074a13fa718132280f8364
|
[
"ECL-2.0",
"Apache-2.0"
] | 59
|
2020-08-13T14:39:36.000Z
|
2022-03-31T15:19:48.000Z
|
sdk/python/pulumi_akamai/app_sec_configuration.py
|
pulumi/pulumi-akamai
|
85f933ccf2f61738b3074a13fa718132280f8364
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = ['AppSecConfigurationArgs', 'AppSecConfiguration']
@pulumi.input_type
class AppSecConfigurationArgs:
def __init__(__self__, *,
contract_id: pulumi.Input[str],
description: pulumi.Input[str],
group_id: pulumi.Input[int],
host_names: pulumi.Input[Sequence[pulumi.Input[str]]],
create_from_config_id: Optional[pulumi.Input[int]] = None,
create_from_version: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a AppSecConfiguration resource.
:param pulumi.Input[str] contract_id: . Unique identifier of the Akamai contract t associated with the new configuration.
:param pulumi.Input[str] description: . Brief description of the new configuration.
:param pulumi.Input[int] group_id: . Unique identifier of the contract group associated with the new configuration.
:param pulumi.Input[Sequence[pulumi.Input[str]]] host_names: . JSON array containing the hostnames to be protected by the new configuration. You must specify at least one hostname in order to create a new configuration.
:param pulumi.Input[int] create_from_config_id: . Unique identifier of the existing configuration being cloned in order to create the new configuration.
:param pulumi.Input[int] create_from_version: . Version number of the security configuration being cloned.
:param pulumi.Input[str] name: . Name of the new configuration.
"""
pulumi.set(__self__, "contract_id", contract_id)
pulumi.set(__self__, "description", description)
pulumi.set(__self__, "group_id", group_id)
pulumi.set(__self__, "host_names", host_names)
if create_from_config_id is not None:
pulumi.set(__self__, "create_from_config_id", create_from_config_id)
if create_from_version is not None:
pulumi.set(__self__, "create_from_version", create_from_version)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter(name="contractId")
def contract_id(self) -> pulumi.Input[str]:
"""
. Unique identifier of the Akamai contract t associated with the new configuration.
"""
return pulumi.get(self, "contract_id")
@contract_id.setter
def contract_id(self, value: pulumi.Input[str]):
pulumi.set(self, "contract_id", value)
@property
@pulumi.getter
def description(self) -> pulumi.Input[str]:
"""
. Brief description of the new configuration.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: pulumi.Input[str]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="groupId")
def group_id(self) -> pulumi.Input[int]:
"""
. Unique identifier of the contract group associated with the new configuration.
"""
return pulumi.get(self, "group_id")
@group_id.setter
def group_id(self, value: pulumi.Input[int]):
pulumi.set(self, "group_id", value)
@property
@pulumi.getter(name="hostNames")
def host_names(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
. JSON array containing the hostnames to be protected by the new configuration. You must specify at least one hostname in order to create a new configuration.
"""
return pulumi.get(self, "host_names")
@host_names.setter
def host_names(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "host_names", value)
@property
@pulumi.getter(name="createFromConfigId")
def create_from_config_id(self) -> Optional[pulumi.Input[int]]:
"""
. Unique identifier of the existing configuration being cloned in order to create the new configuration.
"""
return pulumi.get(self, "create_from_config_id")
@create_from_config_id.setter
def create_from_config_id(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "create_from_config_id", value)
@property
@pulumi.getter(name="createFromVersion")
def create_from_version(self) -> Optional[pulumi.Input[int]]:
"""
. Version number of the security configuration being cloned.
"""
return pulumi.get(self, "create_from_version")
@create_from_version.setter
def create_from_version(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "create_from_version", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
. Name of the new configuration.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@pulumi.input_type
class _AppSecConfigurationState:
def __init__(__self__, *,
config_id: Optional[pulumi.Input[int]] = None,
contract_id: Optional[pulumi.Input[str]] = None,
create_from_config_id: Optional[pulumi.Input[int]] = None,
create_from_version: Optional[pulumi.Input[int]] = None,
description: Optional[pulumi.Input[str]] = None,
group_id: Optional[pulumi.Input[int]] = None,
host_names: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
name: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering AppSecConfiguration resources.
:param pulumi.Input[str] contract_id: . Unique identifier of the Akamai contract t associated with the new configuration.
:param pulumi.Input[int] create_from_config_id: . Unique identifier of the existing configuration being cloned in order to create the new configuration.
:param pulumi.Input[int] create_from_version: . Version number of the security configuration being cloned.
:param pulumi.Input[str] description: . Brief description of the new configuration.
:param pulumi.Input[int] group_id: . Unique identifier of the contract group associated with the new configuration.
:param pulumi.Input[Sequence[pulumi.Input[str]]] host_names: . JSON array containing the hostnames to be protected by the new configuration. You must specify at least one hostname in order to create a new configuration.
:param pulumi.Input[str] name: . Name of the new configuration.
"""
if config_id is not None:
pulumi.set(__self__, "config_id", config_id)
if contract_id is not None:
pulumi.set(__self__, "contract_id", contract_id)
if create_from_config_id is not None:
pulumi.set(__self__, "create_from_config_id", create_from_config_id)
if create_from_version is not None:
pulumi.set(__self__, "create_from_version", create_from_version)
if description is not None:
pulumi.set(__self__, "description", description)
if group_id is not None:
pulumi.set(__self__, "group_id", group_id)
if host_names is not None:
pulumi.set(__self__, "host_names", host_names)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter(name="configId")
def config_id(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "config_id")
@config_id.setter
def config_id(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "config_id", value)
@property
@pulumi.getter(name="contractId")
def contract_id(self) -> Optional[pulumi.Input[str]]:
"""
. Unique identifier of the Akamai contract t associated with the new configuration.
"""
return pulumi.get(self, "contract_id")
@contract_id.setter
def contract_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "contract_id", value)
@property
@pulumi.getter(name="createFromConfigId")
def create_from_config_id(self) -> Optional[pulumi.Input[int]]:
"""
. Unique identifier of the existing configuration being cloned in order to create the new configuration.
"""
return pulumi.get(self, "create_from_config_id")
@create_from_config_id.setter
def create_from_config_id(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "create_from_config_id", value)
@property
@pulumi.getter(name="createFromVersion")
def create_from_version(self) -> Optional[pulumi.Input[int]]:
"""
. Version number of the security configuration being cloned.
"""
return pulumi.get(self, "create_from_version")
@create_from_version.setter
def create_from_version(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "create_from_version", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
. Brief description of the new configuration.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="groupId")
def group_id(self) -> Optional[pulumi.Input[int]]:
"""
. Unique identifier of the contract group associated with the new configuration.
"""
return pulumi.get(self, "group_id")
@group_id.setter
def group_id(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "group_id", value)
@property
@pulumi.getter(name="hostNames")
def host_names(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
. JSON array containing the hostnames to be protected by the new configuration. You must specify at least one hostname in order to create a new configuration.
"""
return pulumi.get(self, "host_names")
@host_names.setter
def host_names(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "host_names", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
. Name of the new configuration.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
class AppSecConfiguration(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
contract_id: Optional[pulumi.Input[str]] = None,
create_from_config_id: Optional[pulumi.Input[int]] = None,
create_from_version: Optional[pulumi.Input[int]] = None,
description: Optional[pulumi.Input[str]] = None,
group_id: Optional[pulumi.Input[int]] = None,
host_names: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
**Scopes**: Contract and group
Creates a new WAP (Web Application Protector) or KSD (Kona Site Defender) security configuration. KSD security configurations start out empty (i.e., unconfigured), while WAP configurations are created using preset values. The contract referenced in the request body determines the type of configuration you can create.
In addition to manually creating a new configuration, you can use the `create_from_config_id` argument to clone an existing configuration.
**Related API Endpoint**: [/appsec/v1/configs](https://developer.akamai.com/api/cloud_security/application_security/v1.html#postconfigurations)
## Example Usage
Basic usage:
```python
import pulumi
import pulumi_akamai as akamai
selectable_hostnames = akamai.get_app_sec_selectable_hostnames(config_id="Documentation")
create_config = akamai.AppSecConfiguration("createConfig",
description="This configuration is used as a testing environment for the documentation team.",
contract_id="5-2WA382",
group_id=12198,
host_names=[
"documentation.akamai.com",
"training.akamai.com",
])
pulumi.export("createConfigId", create_config.config_id)
clone_config = akamai.AppSecConfiguration("cloneConfig",
description="This configuration is used as a testing environment for the documentation team.",
create_from_config_id=data["akamai_appsec_configuration"]["configuration"]["config_id"],
create_from_version=data["akamai_appsec_configuration"]["configuration"]["latest_version"],
contract_id="5-2WA382",
group_id=12198,
host_names=selectable_hostnames.hostnames)
pulumi.export("cloneConfigId", clone_config.config_id)
```
## Output Options
The following options can be used to determine the information returned, and how that returned information is formatted:
- `config_id`. ID of the new security configuration.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] contract_id: . Unique identifier of the Akamai contract t associated with the new configuration.
:param pulumi.Input[int] create_from_config_id: . Unique identifier of the existing configuration being cloned in order to create the new configuration.
:param pulumi.Input[int] create_from_version: . Version number of the security configuration being cloned.
:param pulumi.Input[str] description: . Brief description of the new configuration.
:param pulumi.Input[int] group_id: . Unique identifier of the contract group associated with the new configuration.
:param pulumi.Input[Sequence[pulumi.Input[str]]] host_names: . JSON array containing the hostnames to be protected by the new configuration. You must specify at least one hostname in order to create a new configuration.
:param pulumi.Input[str] name: . Name of the new configuration.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: AppSecConfigurationArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
**Scopes**: Contract and group
Creates a new WAP (Web Application Protector) or KSD (Kona Site Defender) security configuration. KSD security configurations start out empty (i.e., unconfigured), while WAP configurations are created using preset values. The contract referenced in the request body determines the type of configuration you can create.
In addition to manually creating a new configuration, you can use the `create_from_config_id` argument to clone an existing configuration.
**Related API Endpoint**: [/appsec/v1/configs](https://developer.akamai.com/api/cloud_security/application_security/v1.html#postconfigurations)
## Example Usage
Basic usage:
```python
import pulumi
import pulumi_akamai as akamai
selectable_hostnames = akamai.get_app_sec_selectable_hostnames(config_id="Documentation")
create_config = akamai.AppSecConfiguration("createConfig",
description="This configuration is used as a testing environment for the documentation team.",
contract_id="5-2WA382",
group_id=12198,
host_names=[
"documentation.akamai.com",
"training.akamai.com",
])
pulumi.export("createConfigId", create_config.config_id)
clone_config = akamai.AppSecConfiguration("cloneConfig",
description="This configuration is used as a testing environment for the documentation team.",
create_from_config_id=data["akamai_appsec_configuration"]["configuration"]["config_id"],
create_from_version=data["akamai_appsec_configuration"]["configuration"]["latest_version"],
contract_id="5-2WA382",
group_id=12198,
host_names=selectable_hostnames.hostnames)
pulumi.export("cloneConfigId", clone_config.config_id)
```
## Output Options
The following options can be used to determine the information returned, and how that returned information is formatted:
- `config_id`. ID of the new security configuration.
:param str resource_name: The name of the resource.
:param AppSecConfigurationArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(AppSecConfigurationArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
contract_id: Optional[pulumi.Input[str]] = None,
create_from_config_id: Optional[pulumi.Input[int]] = None,
create_from_version: Optional[pulumi.Input[int]] = None,
description: Optional[pulumi.Input[str]] = None,
group_id: Optional[pulumi.Input[int]] = None,
host_names: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = AppSecConfigurationArgs.__new__(AppSecConfigurationArgs)
if contract_id is None and not opts.urn:
raise TypeError("Missing required property 'contract_id'")
__props__.__dict__["contract_id"] = contract_id
__props__.__dict__["create_from_config_id"] = create_from_config_id
__props__.__dict__["create_from_version"] = create_from_version
if description is None and not opts.urn:
raise TypeError("Missing required property 'description'")
__props__.__dict__["description"] = description
if group_id is None and not opts.urn:
raise TypeError("Missing required property 'group_id'")
__props__.__dict__["group_id"] = group_id
if host_names is None and not opts.urn:
raise TypeError("Missing required property 'host_names'")
__props__.__dict__["host_names"] = host_names
__props__.__dict__["name"] = name
__props__.__dict__["config_id"] = None
super(AppSecConfiguration, __self__).__init__(
'akamai:index/appSecConfiguration:AppSecConfiguration',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
config_id: Optional[pulumi.Input[int]] = None,
contract_id: Optional[pulumi.Input[str]] = None,
create_from_config_id: Optional[pulumi.Input[int]] = None,
create_from_version: Optional[pulumi.Input[int]] = None,
description: Optional[pulumi.Input[str]] = None,
group_id: Optional[pulumi.Input[int]] = None,
host_names: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
name: Optional[pulumi.Input[str]] = None) -> 'AppSecConfiguration':
"""
Get an existing AppSecConfiguration resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] contract_id: . Unique identifier of the Akamai contract t associated with the new configuration.
:param pulumi.Input[int] create_from_config_id: . Unique identifier of the existing configuration being cloned in order to create the new configuration.
:param pulumi.Input[int] create_from_version: . Version number of the security configuration being cloned.
:param pulumi.Input[str] description: . Brief description of the new configuration.
:param pulumi.Input[int] group_id: . Unique identifier of the contract group associated with the new configuration.
:param pulumi.Input[Sequence[pulumi.Input[str]]] host_names: . JSON array containing the hostnames to be protected by the new configuration. You must specify at least one hostname in order to create a new configuration.
:param pulumi.Input[str] name: . Name of the new configuration.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _AppSecConfigurationState.__new__(_AppSecConfigurationState)
__props__.__dict__["config_id"] = config_id
__props__.__dict__["contract_id"] = contract_id
__props__.__dict__["create_from_config_id"] = create_from_config_id
__props__.__dict__["create_from_version"] = create_from_version
__props__.__dict__["description"] = description
__props__.__dict__["group_id"] = group_id
__props__.__dict__["host_names"] = host_names
__props__.__dict__["name"] = name
return AppSecConfiguration(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="configId")
def config_id(self) -> pulumi.Output[int]:
return pulumi.get(self, "config_id")
@property
@pulumi.getter(name="contractId")
def contract_id(self) -> pulumi.Output[str]:
"""
. Unique identifier of the Akamai contract t associated with the new configuration.
"""
return pulumi.get(self, "contract_id")
@property
@pulumi.getter(name="createFromConfigId")
def create_from_config_id(self) -> pulumi.Output[Optional[int]]:
"""
. Unique identifier of the existing configuration being cloned in order to create the new configuration.
"""
return pulumi.get(self, "create_from_config_id")
@property
@pulumi.getter(name="createFromVersion")
def create_from_version(self) -> pulumi.Output[Optional[int]]:
"""
. Version number of the security configuration being cloned.
"""
return pulumi.get(self, "create_from_version")
@property
@pulumi.getter
def description(self) -> pulumi.Output[str]:
"""
. Brief description of the new configuration.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="groupId")
def group_id(self) -> pulumi.Output[int]:
"""
. Unique identifier of the contract group associated with the new configuration.
"""
return pulumi.get(self, "group_id")
@property
@pulumi.getter(name="hostNames")
def host_names(self) -> pulumi.Output[Sequence[str]]:
"""
. JSON array containing the hostnames to be protected by the new configuration. You must specify at least one hostname in order to create a new configuration.
"""
return pulumi.get(self, "host_names")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
. Name of the new configuration.
"""
return pulumi.get(self, "name")
| 46.951128
| 326
| 0.666707
| 2,953
| 24,978
| 5.411446
| 0.078903
| 0.077096
| 0.065394
| 0.039424
| 0.875594
| 0.858573
| 0.844743
| 0.818335
| 0.806133
| 0.795932
| 0
| 0.002365
| 0.23825
| 24,978
| 531
| 327
| 47.039548
| 0.837494
| 0.396349
| 0
| 0.686833
| 1
| 0
| 0.104453
| 0.01927
| 0
| 0
| 0
| 0
| 0
| 1
| 0.160142
| false
| 0.003559
| 0.017794
| 0.007117
| 0.274021
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
6d30b3a26a9ba38e33beb1e38cf93ee305e7c88f
| 144
|
py
|
Python
|
MapDown-PyCli/_init_.py
|
shlllshlll/GPSLoc
|
dee129d6fa0bf89519a85ade5d3c220fdf2c67d6
|
[
"MIT"
] | 22
|
2017-11-25T09:34:46.000Z
|
2021-07-02T04:24:47.000Z
|
MapDown-PyCli/_init_.py
|
lgqhaha/GPSLoc
|
dee129d6fa0bf89519a85ade5d3c220fdf2c67d6
|
[
"MIT"
] | 1
|
2018-12-18T04:43:26.000Z
|
2018-12-18T04:43:26.000Z
|
MapDown-PyCli/_init_.py
|
lgqhaha/GPSLoc
|
dee129d6fa0bf89519a85ade5d3c220fdf2c67d6
|
[
"MIT"
] | 11
|
2019-05-12T12:43:41.000Z
|
2020-12-17T15:22:53.000Z
|
# -*- coding: utf-8 -*-
# @Author: SHLLL
# @Date: 2017-11-23 14:24:08
# @Last Modified by: SHLLL
# @Last Modified time: 2017-11-23 14:24:24
| 24
| 42
| 0.611111
| 25
| 144
| 3.52
| 0.64
| 0.136364
| 0.181818
| 0.227273
| 0.272727
| 0
| 0
| 0
| 0
| 0
| 0
| 0.247863
| 0.1875
| 144
| 5
| 43
| 28.8
| 0.504274
| 0.923611
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
6d52273b7acc29132a02a17ee931c582b1c60888
| 55,448
|
py
|
Python
|
tests/skeu/test_runs.py
|
siq/flux
|
ca7563deb9ebef14840bbf0cb7bab4d9478b2470
|
[
"Linux-OpenIB"
] | null | null | null |
tests/skeu/test_runs.py
|
siq/flux
|
ca7563deb9ebef14840bbf0cb7bab4d9478b2470
|
[
"Linux-OpenIB"
] | null | null | null |
tests/skeu/test_runs.py
|
siq/flux
|
ca7563deb9ebef14840bbf0cb7bab4d9478b2470
|
[
"Linux-OpenIB"
] | null | null | null |
from time import sleep
from scheme import fields, Yaml
from spire.core import adhoc_configure, Unit
from spire.schema import SchemaDependency
from mesh.testing import MeshTestCase
from mesh.exceptions import InvalidError
from flux.bundles import API
from flux.models import Operation, Run, Workflow
adhoc_configure({
'schema:flux': {
'url': 'postgresql://postgres@localhost/flux'
},
'mesh:flux': {
'url': 'http://localhost:9997/',
'bundle': 'flux.API',
},
'mesh:docket': {
'url': 'http://localhost:9998/',
'specification': 'flux.bindings.docket.specification',
},
'mesh:platoon': {
'url': 'http://localhost:4321/',
'specification': 'flux.bindings.platoon.specification',
},
'mesh:truss': {
'url': 'http://localhost:9999/api',
'specification': 'flux.bindings.truss.specification',
},
})
class TestDependency(Unit):
schema = SchemaDependency('flux')
class BaseTestCase(MeshTestCase):
bundle = API
maxDiff = None
config = TestDependency()
def setUp(self):
self._workflows = []
self._runs = []
self._operations = []
def tearDown(self):
session = self.config.schema.session
model_instances = (
(Run, self._runs),
(Workflow, self._workflows),
(Operation, self._operations),
)
for model, instances in model_instances:
for instance in instances:
try:
session.delete(session.query(model).with_lockmode('update').get(instance))
session.commit()
except:
session.rollback()
continue
def _poll_run_status(self, client, run_id, status, include=None, limit=5, wait=6):
run = None
data = {'include': include} if include else None
while limit:
resp = client.execute('run', 'get', run_id, data=data)
self.assertEqual('OK', resp.status)
run = resp.content
if run['status'] == status:
break
limit -= 1
limit and sleep(wait)
else:
raise Exception(
'Status of Run(id=%s, status=%s) not updating to %s' % (
run_id, run['status'], status))
return run
def _setup_active_run(self, client, workflow_id,
steps=None, parameters=None, limit=5, wait=6):
data = {'workflow_id': workflow_id, 'parameters': parameters}
resp = client.execute('run', 'create', data=data)
self.assertEqual('OK', resp.status)
run_id = resp.content['id']
steps = list(steps) if steps else []
run = None
get_data = {'include': ['executions']}
while limit:
resp = client.execute('run', 'get', subject=run_id, data=get_data)
self.assertEqual('OK', resp.status)
run = resp.content
executions = resp.content['executions']
for e in executions:
try:
steps.remove(e['step'])
except ValueError:
continue
if not steps:
break
limit -= 1
limit and sleep(wait)
else:
raise Exception(
'Run(id=%s, status=%s) did not execute steps: %s' % (
run_id, run['status'], steps))
return run
def _setup_run(self, client, workflow_id, parameters=None, name=None):
data = {'workflow_id': workflow_id}
if name:
data['name'] = name
if parameters:
data['parameters'] = parameters
resp = client.execute('run', 'create', None, data=data)
try:
run_id = resp.content['id']
except (AttributeError, KeyError):
pass
else:
self._runs.append(run_id)
return resp
def _setup_workflow(self, client, name, specification=None):
if specification is None:
specification = Yaml.serialize({
'name': name,
'entry': 'step-0',
'steps': {
'step-0': {
'operation': 'flux:test-operation',
},
},
})
self._workflow_spec = specification
data = {'name': name, 'specification': specification}
resp = client.execute('workflow', 'create', None, data=data)
try:
workflow_id = resp.content['id']
except (AttributeError, KeyError):
pass
else:
self._workflows.append(workflow_id)
return resp
class TestSimpleRunCases(BaseTestCase):
def test_duplicate_name_run_on_create1(self, client):
"""Tests creating a run with an existing run name"""
workflow_name = 'test workflow'
resp = self._setup_workflow(client, workflow_name)
self.assertEqual('OK', resp.status)
workflow_id = resp.content['id']
duplicate_name = 'test duplicate name run 1'
resp = self._setup_run(client, workflow_id, name=duplicate_name)
self.assertEquals(resp.status, 'OK')
with self.assertRaises(InvalidError):
self._setup_run(client, workflow_id, name=duplicate_name)
def test_duplicate_name_run_on_create2(self, client):
"""Tests creating multiple runs off one workflow without providing a name"""
workflow_name = 'test workflow'
resp = self._setup_workflow(client, workflow_name)
self.assertEqual('OK', resp.status)
workflow_id = resp.content['id']
resp = self._setup_run(client, workflow_id)
self.assertEquals(resp.status, 'OK')
resp = self._setup_run(client, workflow_id)
self.assertEquals(resp.status, 'OK')
def test_duplicate_name_run_on_update1(self, client):
"""Tests updating a run with an existing run name"""
workflow_name = 'test workflow'
resp = self._setup_workflow(client, workflow_name)
self.assertEqual('OK', resp.status)
workflow_id = resp.content['id']
duplicate_name = 'test duplicate name run 2'
resp = self._setup_run(client, workflow_id, name=duplicate_name)
self.assertEquals(resp.status, 'OK')
resp = self._setup_run(client, workflow_id, 'test duplicate name run 2 a')
self.assertEquals(resp.status, 'OK')
run_id = resp.content['id']
with self.assertRaises(InvalidError):
client.execute('run', 'update', run_id, {'name': duplicate_name})
def test_duplicate_name_run_on_update2(self, client):
"""Tests against false positive when updating a run without name change."""
workflow_name = 'test workflow'
resp = self._setup_workflow(client, workflow_name)
self.assertEqual('OK', resp.status)
workflow_id = resp.content['id']
duplicate_name = 'test duplicate name run 3'
resp = self._setup_run(client, workflow_id, name=duplicate_name)
self.assertEquals(resp.status, 'OK')
run_id = resp.content['id']
resp = client.execute('run', 'update', run_id, {'name': duplicate_name})
self.assertEquals(resp.status, 'OK')
def test_run_workflow1(self, client):
"""Tests simple workflow run cycle"""
workflow_name = 'test run workflow 1'
resp1 = self._setup_workflow(client, workflow_name)
self.assertEqual('OK', resp1.status)
workflow_id = resp1.content['id']
resp = self._setup_run(client, workflow_id)
self.assertEqual('OK', resp.status)
run_id = resp.content['id']
result = self._poll_run_status(client, run_id, 'completed')
self.assertTrue(result.pop('ended') >= result.pop('started'))
expected = {
'id': run_id,
'name': workflow_name,
'parameters': None,
'workflow_id': workflow_id,
'products': {},
'status': 'completed',
}
self.assertEquals(expected, result)
def test_run_workflow2(self, client):
"""Tests simple workflow run and execution cycle"""
workflow_name = 'test run workflow 2'
resp1 = self._setup_workflow(client, workflow_name)
self.assertEqual('OK', resp1.status)
workflow_id = resp1.content['id']
resp = self._setup_run(client, workflow_id)
self.assertEqual('OK', resp.status)
run_id = resp.content['id']
result = self._poll_run_status(client, run_id, 'completed', include=['executions'])
run_ended = result.pop('ended')
run_started = result.pop('started')
result['executions'][0].pop('id')
execution_ended = result['executions'][0].pop('ended')
execution_started = result['executions'][0].pop('started')
self.assertTrue(run_ended >= run_started)
self.assertTrue(execution_ended >= execution_started)
self.assertTrue(execution_ended >= run_started)
self.assertTrue(run_ended >= execution_started)
expected = {
'id': run_id,
'name': workflow_name,
'parameters': None,
'workflow_id': workflow_id,
'products': {},
'status': 'completed',
'executions': [{
'execution_id': 1,
'ancestor_id': None,
'step': 'step-0',
'name': 'Test Operation',
'status': 'completed',
}]
}
self.assertEquals(expected, result)
def test_multi_step_run(self, client):
"""Tests for multistep workflow runs"""
workflow_name = 'test multistep workflow run'
specification = Yaml.serialize({
'name': workflow_name,
'entry': 'step-0',
'steps': {
'step-0': {
'operation': 'flux:test-operation',
'postoperation': [{
'actions': [{
'action': 'execute-step',
'step': 'step-1',
}],
}],
},
'step-1': {
'operation': 'flux:test-operation',
'postoperation': [{
'actions': [{
'action': 'execute-step',
'step': 'step-2',
}],
}],
},
'step-2': {
'operation': 'flux:test-operation',
},
},
})
resp1 = self._setup_workflow(client, workflow_name,
specification=specification)
self.assertEqual('OK', resp1.status)
workflow_id = resp1.content['id']
resp = self._setup_run(client, workflow_id)
self.assertEqual('OK', resp.status)
run_id = resp.content['id']
result = self._poll_run_status(client, run_id, 'completed', include=['executions'])
run_ended = result.pop('ended')
run_started = result.pop('started')
self.assertTrue(run_ended >= run_started)
ancestor_ids = []
for execution in result['executions']:
ancestor_ids.append(execution.pop('id'))
execution_ended = execution.pop('ended')
execution_started = execution.pop('started')
self.assertTrue(execution_ended >= execution_started)
self.assertTrue(execution_ended >= run_started)
self.assertTrue(run_ended >= execution_started)
expected = {
'id': run_id,
'name': workflow_name,
'parameters': None,
'workflow_id': workflow_id,
'products': {},
'status': 'completed',
'executions': [
{
'execution_id': 1,
'ancestor_id': None,
'step': 'step-0',
'name': 'Test Operation',
'status': 'completed',
},
{
'execution_id': 2,
'ancestor_id': ancestor_ids[0],
'step': 'step-1',
'name': 'Test Operation',
'status': 'completed',
},
{
'execution_id': 3,
'ancestor_id': ancestor_ids[1],
'step': 'step-2',
'name': 'Test Operation',
'status': 'completed',
},
]
}
self.assertEquals(expected, result)
class TestRunOutcomeCases(BaseTestCase):
"""Tests workflow runs interaction with different outcomes"""
def test_success_outcome(self, client):
'''Test successful run outcome'''
operation_id = 'flux:test-operation'
operation_name = 'Test Operation'
workflow_name = 'test sucess outcome'
specification = Yaml.serialize({
'name': workflow_name,
'entry': 'step-0',
'steps': {
'step-0': {
'operation': operation_id,
'parameters': {'outcome': 'completed'},
},
},
})
resp = self._setup_workflow(client, workflow_name, specification=specification)
self.assertEqual('OK', resp.status)
workflow_id = resp.content['id']
run = self._setup_active_run(client, workflow_id)
run_id = run['id']
result = self._poll_run_status(client, run_id, 'completed', ['executions'])
run_ended = result.pop('ended')
run_started = result.pop('started')
execution = result['executions'][-1]
execution_ended = execution.pop('ended')
execution_started = execution.pop('started')
self.assertTrue(run_ended >= run_started)
self.assertTrue(execution_ended >= execution_started)
self.assertTrue(execution_ended >= run_started)
self.assertTrue(run_ended >= execution_started)
expected = {
'id': run_id,
'name': workflow_name,
'parameters': None,
'workflow_id': workflow_id,
'products': {},
'status': 'completed',
'executions': [{
'id': execution['id'],
'execution_id': 1,
'ancestor_id': None,
'step': 'step-0',
'name': operation_name,
'status': 'completed',
}]
}
self.assertEquals(expected, result)
def test_failure_outcome(self, client):
'''Test failure run outcome'''
operation_id = 'flux:test-operation'
operation_name = 'Test Operation'
workflow_name = 'test failure outcome'
specification = Yaml.serialize({
'name': workflow_name,
'entry': 'step-0',
'steps': {
'step-0': {
'operation': operation_id,
'parameters': {'outcome': 'failed'},
},
},
})
resp = self._setup_workflow(client, workflow_name, specification=specification)
self.assertEqual('OK', resp.status)
workflow_id = resp.content['id']
run = self._setup_active_run(client, workflow_id)
run_id = run['id']
result = self._poll_run_status(client, run_id, 'failed', ['executions'])
run_ended = result.pop('ended')
run_started = result.pop('started')
execution = result['executions'][-1]
execution_ended = execution.pop('ended')
execution_started = execution.pop('started')
self.assertTrue(run_ended >= run_started)
self.assertTrue(execution_ended >= execution_started)
self.assertTrue(execution_ended >= run_started)
self.assertTrue(run_ended >= execution_started)
expected = {
'id': run_id,
'name': workflow_name,
'parameters': None,
'workflow_id': workflow_id,
'products': {},
'status': 'failed',
'executions': [{
'id': execution['id'],
'execution_id': 1,
'ancestor_id': None,
'step': 'step-0',
'name': None,
'name': operation_name,
'status': 'failed',
}]
}
self.assertEquals(expected, result)
def test_invalidated_run(self, client):
'''Test invalidated run outcome'''
operation_id = 'flux:test-operation'
operation_name = 'Test Operation'
workflow_name = 'test invalid run'
specification = Yaml.serialize({
'name': workflow_name,
'entry': 'step-0',
'steps': {
'step-0': {
'operation': operation_id,
'parameters': {'outcome': 'invalidated'},
},
},
})
resp = self._setup_workflow(client, workflow_name, specification=specification)
self.assertEqual('OK', resp.status)
workflow_id = resp.content['id']
run = self._setup_active_run(client, workflow_id)
run_id = run['id']
result = self._poll_run_status(client, run_id, 'invalidated', ['executions'])
run_ended = result.pop('ended')
run_started = result.pop('started')
execution = result['executions'][-1]
execution_ended = execution.pop('ended')
execution_started = execution.pop('started')
self.assertTrue(run_ended >= run_started)
self.assertTrue(execution_ended >= execution_started)
self.assertTrue(execution_ended >= run_started)
self.assertTrue(run_ended >= execution_started)
expected = {
'id': run_id,
'name': workflow_name,
'parameters': None,
'workflow_id': workflow_id,
'products': {},
'status': 'invalidated',
'executions': [{
'id': execution['id'],
'execution_id': 1,
'ancestor_id': None,
'step': 'step-0',
'name': operation_name,
'status': 'invalidated',
}]
}
self.assertEquals(expected, result)
def test_abort_run(self, client):
'''Test setting run status to aborted'''
operation_id = 'flux:test-operation'
workflow_name = 'test abort run'
specification = Yaml.serialize({
'name': workflow_name,
'entry': 'step-0',
'steps': {
'step-0': {
'operation': operation_id,
'parameters': {'duration': 30},
'postoperation': [{
'actions': [{
'action': 'execute-step',
'step': 'step-1',
}],
}],
},
'step-1': {
'operation': operation_id,
'parameters': {'duration': 30},
},
},
})
resp = self._setup_workflow(client, workflow_name, specification=specification)
self.assertEqual('OK', resp.status)
workflow_id = resp.content['id']
run = self._setup_active_run(client, workflow_id, ('step-0','step-1'))
run_id = run['id']
client.execute('run', 'update', subject=run_id, data={'status': 'aborting'})
result = self._poll_run_status(client, run_id, 'aborted', ['executions'])
run_ended = result.pop('ended')
run_started = result.pop('started')
self.assertTrue(run_ended >= run_started)
ancestor_ids = []
for execution in result['executions']:
ancestor_ids.append(execution.pop('id'))
execution_ended = execution.pop('ended')
execution_started = execution.pop('started')
self.assertTrue(execution_ended >= execution_started)
self.assertTrue(execution_ended >= run_started)
self.assertTrue(run_ended >= execution_started)
expected = {
'id': run_id,
'name': workflow_name,
'parameters': None,
'workflow_id': workflow_id,
'products': {},
'status': 'aborted',
'executions': [
{
'execution_id': 1,
'ancestor_id': None,
'step': 'step-0',
'name': 'Test Operation',
'status': 'completed',
},
{
'execution_id': 2,
'ancestor_id': ancestor_ids[0],
'step': 'step-1',
'name': 'Test Operation',
'status': 'aborted',
},
],
}
self.assertEquals(expected, result)
def test_abort_execution(self, client):
'''Test setting execution status to aborted'''
operation_id = 'flux:test-operation'
workflow_name = 'test abort execution'
specification = Yaml.serialize({
'name': workflow_name,
'entry': 'step-0',
'steps': {
'step-0': {
'operation': operation_id,
'parameters': {'duration': 60},
'postoperation': [{
'actions': [{
'action': 'execute-step',
'step': 'step-1',
}],
}],
},
'step-1': {
'operation': operation_id,
'parameters': {'duration': 30},
},
},
})
resp = self._setup_workflow(client, workflow_name, specification=specification)
self.assertEqual('OK', resp.status)
workflow_id = resp.content['id']
run = self._setup_active_run(client, workflow_id, ('step-0',))
run_id = run['id']
execution_id = run['executions'][-1]['id']
client.execute('execution', 'update', subject=execution_id, data={'status': 'aborting'})
result = self._poll_run_status(client, run_id, 'aborted', ['executions'])
run_ended = result.pop('ended')
run_started = result.pop('started')
self.assertTrue(run_ended >= run_started)
ancestor_ids = []
for execution in result['executions']:
ancestor_ids.append(execution.pop('id'))
execution_ended = execution.pop('ended')
execution_started = execution.pop('started')
self.assertTrue(execution_ended >= execution_started)
self.assertTrue(execution_ended >= run_started)
self.assertTrue(run_ended >= execution_started)
expected = {
'id': run_id,
'name': workflow_name,
'parameters': None,
'workflow_id': workflow_id,
'products': {},
'status': 'aborted',
'executions': [
{
'execution_id': 1,
'ancestor_id': None,
'step': 'step-0',
'name': 'Test Operation',
'status': 'aborted',
},
],
}
self.assertEquals(expected, result)
def test_success_outcome_with_concurrent_executions(self, client):
'''Test success run outcome with active concurrent executions'''
operation_id = 'flux:test-operation'
operation_name = 'Test Operation'
workflow_name = 'test success outcome with concurrent executions'
specification = Yaml.serialize({
'name': workflow_name,
'entry': 'step-0',
'steps': {
'step-0': {
'operation': operation_id,
'postoperation': [{
'actions': [
{
'action': 'execute-step',
'step': 'step-1',
},
{
'action': 'execute-step',
'step': 'step-1',
}
],
}],
},
'step-1': {
'operation': operation_id,
},
},
})
resp = self._setup_workflow(client, workflow_name, specification=specification)
self.assertEqual('OK', resp.status)
workflow_id = resp.content['id']
run = self._setup_active_run(client, workflow_id)
run_id = run['id']
result = self._poll_run_status(client, run_id, 'completed', ['executions'])
run_ended = result.pop('ended')
run_started = result.pop('started')
executions = result['executions']
for execution in executions:
execution_ended = execution.pop('ended')
execution_started = execution.pop('started')
self.assertTrue(execution_ended >= execution_started)
self.assertTrue(execution_ended >= run_started)
self.assertTrue(run_ended >= execution_started)
expected = {
'id': run_id,
'name': workflow_name,
'parameters': None,
'workflow_id': workflow_id,
'products': {},
'status': 'completed',
'executions': [
{
'id': executions[0]['id'],
'execution_id': 1,
'ancestor_id': None,
'step': 'step-0',
'name': operation_name,
'status': 'completed',
},
{
'id': executions[1]['id'],
'execution_id': 2,
'ancestor_id': executions[0]['id'],
'step': 'step-1',
'name': operation_name,
'status': 'completed',
},
{
'id': executions[2]['id'],
'execution_id': 3,
'ancestor_id': executions[0]['id'],
'step': 'step-1',
'name': operation_name,
'status': 'completed',
},
]
}
self.assertEquals(expected, result)
def test_failure_outcome_with_concurrent_executions(self, client):
'''Test failure run outcome with active concurrent executions'''
operation_id = 'flux:test-operation'
operation_name = 'Test Operation'
workflow_name = 'test failure outcome with concurrent executions'
specification = Yaml.serialize({
'name': workflow_name,
'entry': 'step-0',
'steps': {
'step-0': {
'operation': operation_id,
'postoperation': [{
'actions': [
{
'action': 'execute-step',
'step': 'step-1',
'parameters': {'duration': 60}
},
{
'action': 'execute-step',
'step': 'step-1',
'parameters': {'outcome': 'failed'}
}
],
}],
},
'step-1': {
'operation': operation_id,
},
},
})
resp = self._setup_workflow(client, workflow_name, specification=specification)
self.assertEqual('OK', resp.status)
workflow_id = resp.content['id']
run = self._setup_active_run(client, workflow_id)
run_id = run['id']
result = self._poll_run_status(client, run_id, 'failed', ['executions'])
run_ended = result.pop('ended')
run_started = result.pop('started')
executions = result['executions']
for execution in executions:
execution_ended = execution.pop('ended')
execution_started = execution.pop('started')
self.assertTrue(execution_ended >= execution_started)
self.assertTrue(execution_ended >= run_started)
self.assertTrue(run_ended >= execution_started)
expected = {
'id': run_id,
'name': workflow_name,
'parameters': None,
'workflow_id': workflow_id,
'products': {},
'status': 'failed',
'executions': [
{
'id': executions[0]['id'],
'execution_id': 1,
'ancestor_id': None,
'step': 'step-0',
'name': operation_name,
'status': 'completed',
},
{
'id': executions[1]['id'],
'execution_id': 2,
'ancestor_id': executions[0]['id'],
'step': 'step-1',
'name': operation_name,
'status': 'aborted',
},
{
'id': executions[2]['id'],
'execution_id': 3,
'ancestor_id': executions[0]['id'],
'step': 'step-1',
'name': operation_name,
'status': 'failed',
},
]
}
self.assertEquals(expected, result)
def test_invalidated_outcome_with_concurrent_executions(self, client):
'''Test invalidated run outcome with active concurrent executions'''
operation_id = 'flux:test-operation'
operation_name = 'Test Operation'
workflow_name = 'test invalidated outcome with concurrent executions'
specification = Yaml.serialize({
'name': workflow_name,
'entry': 'step-0',
'steps': {
'step-0': {
'operation': operation_id,
'postoperation': [{
'actions': [
{
'action': 'execute-step',
'step': 'step-1',
'parameters': {'duration': 60}
},
{
'action': 'execute-step',
'step': 'step-1',
'parameters': {'outcome': 'invalidated'}
}
],
}],
},
'step-1': {
'operation': operation_id,
},
},
})
resp = self._setup_workflow(client, workflow_name, specification=specification)
self.assertEqual('OK', resp.status)
workflow_id = resp.content['id']
run = self._setup_active_run(client, workflow_id)
run_id = run['id']
result = self._poll_run_status(client, run_id, 'invalidated', ['executions'])
run_ended = result.pop('ended')
run_started = result.pop('started')
executions = result['executions']
for execution in executions:
execution_ended = execution.pop('ended')
execution_started = execution.pop('started')
self.assertTrue(execution_ended >= execution_started)
self.assertTrue(execution_ended >= run_started)
self.assertTrue(run_ended >= execution_started)
expected = {
'id': run_id,
'name': workflow_name,
'parameters': None,
'workflow_id': workflow_id,
'products': {},
'status': 'invalidated',
'executions': [
{
'id': executions[0]['id'],
'execution_id': 1,
'ancestor_id': None,
'step': 'step-0',
'name': operation_name,
'status': 'completed',
},
{
'id': executions[1]['id'],
'execution_id': 2,
'ancestor_id': executions[0]['id'],
'step': 'step-1',
'name': operation_name,
'status': 'aborted',
},
{
'id': executions[2]['id'],
'execution_id': 3,
'ancestor_id': executions[0]['id'],
'step': 'step-1',
'name': operation_name,
'status': 'invalidated',
},
]
}
self.assertEquals(expected, result)
class TestIgnoreStatusRuns(BaseTestCase):
def test_failure(self, client):
"""Test failure run with failed step."""
name = 'test failure'
specification = Yaml.serialize({
'name': name,
'entry': 'step-0',
'steps': {
'step-0': {
'operation': 'flux:test-operation',
'description': 'test failed operation',
'parameters': {'outcome': 'failed'},
'postoperation': [{
'actions': [{
'action': 'execute-step',
'step': 'step-1',
'parameters': {'outcome': 'completed'},
}],
}],
},
'step-1': {
'operation': 'flux:test-operation',
'description': 'test completed operation',
'parameters': {'outcome':'completed'},
},
},
})
resp = self._setup_workflow(client, name, specification)
self.assertEquals('OK', resp.status)
workflow_id = resp.content['id']
resp = self._setup_run(client, workflow_id)
self.assertEquals('OK', resp.status)
run_id = resp.content['id']
result = self._poll_run_status(client, run_id, 'failed', include=['executions'])
run_started = result.pop('started')
run_ended = result.pop('ended')
self.assertTrue(run_ended >= run_started)
for execution in result['executions']:
execution.pop('id')
execution.pop('ancestor_id')
exec_started = execution.pop('started')
exec_ended = execution.pop('ended')
self.assertTrue(exec_ended >= exec_started)
self.assertTrue(run_ended >= exec_ended)
self.assertTrue(exec_started >= run_started)
expected = {
'id': run_id,
'name': name,
'workflow_id': workflow_id,
'status': 'failed',
'parameters': None,
'products': {},
'executions': [{
'execution_id': 1,
'step': 'step-0',
'name': 'test failed operation',
'status': 'failed',
}],
}
self.assertEquals(result, expected)
def test_ignore_failure(self, client):
"""Test use of ignore failure of failed step."""
name = 'test ignore failure'
specification = Yaml.serialize({
'name': name,
'entry': 'step-0',
'steps': {
'step-0': {
'operation': 'flux:test-operation',
'description': 'test failed operation',
'parameters': {'outcome': 'failed'},
'postoperation': [{
'terminal': False,
'actions': [
{
'action': 'ignore-step-failure',
},
{
'action': 'execute-step',
'step': 'step-1',
'parameters': {'outcome': 'completed'},
}
],
}],
},
'step-1': {
'operation': 'flux:test-operation',
'description': 'test operation',
'parameters': {'outcome': 'completed'},
},
},
})
resp = self._setup_workflow(client, name, specification)
self.assertEquals('OK', resp.status)
workflow_id = resp.content['id']
resp = self._setup_run(client, workflow_id)
self.assertEquals('OK', resp.status)
run_id = resp.content['id']
result = self._poll_run_status(client, run_id, 'failed', include=['executions'])
run_started = result.pop('started')
run_ended = result.pop('ended')
self.assertTrue(run_ended >= run_started)
for execution in result['executions']:
execution.pop('id')
execution.pop('ancestor_id')
exec_started = execution.pop('started')
exec_ended = execution.pop('ended')
self.assertTrue(exec_ended >= exec_started)
self.assertTrue(run_ended >= exec_ended)
self.assertTrue(exec_started >= run_started)
expected = {
'id': run_id,
'name': name,
'workflow_id': workflow_id,
'status': 'failed',
'parameters': None,
'products': {},
'executions': [
{
'execution_id': 1,
'step': 'step-0',
'name': 'test failed operation',
'status': 'failed',
},
{
'execution_id': 2,
'step': 'step-1',
'name': 'test operation',
'status': 'completed',
},
],
}
self.assertEquals(result, expected)
def test_ignore_failure_incorrect_use(self, client):
"""Test failure with ignore step with incorrect use case."""
name = 'test ignore failure bad case'
specification = Yaml.serialize({
'name': name,
'entry': 'step-0',
'steps': {
'step-0': {
'operation': 'flux:test-operation',
'description': 'test failed operation',
'parameters': {'outcome': 'failed'},
'postoperation': [{
'terminal': False,
'actions': [
{
'action': 'execute-step',
'step': 'step-1',
'parameters': {'outcome': 'completed'},
},
{
'action': 'ignore-step-failure',
},
],
}],
},
'step-1': {
'operation': 'flux:test-operation',
'description': 'test operation',
'parameters': {'outcome': 'completed'},
},
},
})
resp = self._setup_workflow(client, name, specification)
self.assertEquals('OK', resp.status)
workflow_id = resp.content['id']
resp = self._setup_run(client, workflow_id)
self.assertEquals('OK', resp.status)
run_id = resp.content['id']
result = self._poll_run_status(client, run_id, 'failed', include=['executions'])
run_started = result.pop('started')
run_ended = result.pop('ended')
self.assertTrue(run_ended >= run_started)
for execution in result['executions']:
execution.pop('id')
execution.pop('ancestor_id')
exec_started = execution.pop('started')
exec_ended = execution.pop('ended')
self.assertTrue(exec_ended >= exec_started)
self.assertTrue(run_ended >= exec_ended)
self.assertTrue(exec_started >= run_started)
expected = {
'id': run_id,
'name': name,
'workflow_id': workflow_id,
'status': 'failed',
'parameters': None,
'products': {},
'executions': [{
'execution_id': 1,
'step': 'step-0',
'name': 'test failed operation',
'status': 'failed',
}],
}
self.assertEquals(result, expected)
class TestRunTimedoutCases(BaseTestCase):
"""Test run cases involving the timedout status."""
def test_timedout_case(self, client):
"""Test timedout with multi-step run."""
name = 'test timedout run'
specification = Yaml.serialize({
'name': name,
'entry': 'step-0',
'steps': {
'step-0': {
'operation': 'flux:test-operation',
'description': 'test timedout operation',
'timeout': 1,
'parameters': {'duration': 100},
'postoperation': [{
'terminal': False,
'actions': [{
'action': 'execute-step',
'step': 'step-1',
'parameters': {'outcome': 'completed'},
}],
}],
},
'step-1': {
'operation': 'flux:test-operation',
'description': 'test completed operation',
'parameters': {'outcome': 'completed'},
},
},
})
resp = self._setup_workflow(client, name, specification)
self.assertEquals('OK', resp.status)
workflow_id = resp.content['id']
resp = self._setup_run(client, workflow_id)
self.assertEquals('OK', resp.status)
run_id = resp.content['id']
result = self._poll_run_status(client, run_id, 'timedout', include=['executions'])
run_started = result.pop('started')
run_ended = result.pop('ended')
self.assertTrue(run_ended >= run_started)
for execution in result['executions']:
execution.pop('id')
execution.pop('ancestor_id')
exec_started = execution.pop('started')
exec_ended = execution.pop('ended')
self.assertTrue(exec_ended >= exec_started)
self.assertTrue(run_ended >= exec_ended)
self.assertTrue(exec_started >= run_started)
expected = {
'id': run_id,
'name': name,
'workflow_id': workflow_id,
'status': 'timedout',
'parameters': None,
'products': {},
'executions': [
{
'execution_id': 1,
'step': 'step-0',
'name': 'test timedout operation',
'status': 'timedout',
},
],
}
self.assertEquals(result, expected)
def test_timedout_run_with_concurrent_executions(self, client):
'''Test timedout run with active concurrent executions'''
operation_id = 'flux:test-operation'
operation_name = 'Test Operation'
workflow_name = 'test timedout run with concurrent executions'
specification = Yaml.serialize({
'name': workflow_name,
'entry': 'step-0',
'steps': {
'step-0': {
'operation': operation_id,
'postoperation': [{
'actions': [
{
'action': 'execute-step',
'step': 'step-1',
'parameters': {'duration': 160}
},
{
'action': 'execute-step',
'step': 'step-2',
'parameters': {'duration': 100},
}
],
}],
},
'step-1': {
'operation': operation_id,
},
'step-2': {
'timeout': 1,
'operation': operation_id,
},
},
})
resp = self._setup_workflow(client, workflow_name, specification=specification)
self.assertEqual('OK', resp.status)
workflow_id = resp.content['id']
run = self._setup_active_run(client, workflow_id)
run_id = run['id']
result = self._poll_run_status(client, run_id, 'timedout', include=['executions'])
run_ended = result.pop('ended')
run_started = result.pop('started')
executions = result['executions']
for execution in executions:
execution_ended = execution.pop('ended')
execution_started = execution.pop('started')
self.assertTrue(execution_ended >= execution_started)
self.assertTrue(execution_ended >= run_started)
self.assertTrue(run_ended >= execution_started)
expected = {
'id': run_id,
'name': workflow_name,
'parameters': None,
'workflow_id': workflow_id,
'products': {},
'status': 'timedout',
'executions': [
{
'id': executions[0]['id'],
'execution_id': 1,
'ancestor_id': None,
'step': 'step-0',
'name': operation_name,
'status': 'completed',
},
{
'id': executions[1]['id'],
'execution_id': 2,
'ancestor_id': executions[0]['id'],
'step': 'step-1',
'name': operation_name,
'status': 'aborted',
},
{
'id': executions[2]['id'],
'execution_id': 3,
'ancestor_id': executions[0]['id'],
'step': 'step-2',
'name': operation_name,
'status': 'timedout',
},
]
}
self.assertEquals(expected, result)
def test_ignore_timedout_case(self, client):
"""Test ignore-failure on timedout step."""
name = 'test ignore timedout run'
specification = Yaml.serialize({
'name': name,
'entry': 'step-0',
'steps': {
'step-0': {
'operation': 'flux:test-operation',
'description': 'test timedout operation',
'timeout': 1,
'parameters': {'duration': 100},
'postoperation': [{
'terminal': False,
'actions': [
{
'action': 'ignore-step-failure',
},
{
'action': 'execute-step',
'step': 'step-1',
'parameters': {'outcome': 'completed'},
}
],
}],
},
'step-1': {
'operation': 'flux:test-operation',
'description': 'test completed operation',
'parameters': {'outcome': 'completed'},
},
},
})
resp = self._setup_workflow(client, name, specification)
self.assertEquals('OK', resp.status)
workflow_id = resp.content['id']
resp = self._setup_run(client, workflow_id)
self.assertEquals('OK', resp.status)
run_id = resp.content['id']
result = self._poll_run_status(client, run_id, 'timedout', include=['executions'])
run_started = result.pop('started')
run_ended = result.pop('ended')
self.assertTrue(run_ended >= run_started)
for execution in result['executions']:
execution.pop('id')
execution.pop('ancestor_id')
exec_started = execution.pop('started')
exec_ended = execution.pop('ended')
self.assertTrue(exec_ended >= exec_started)
self.assertTrue(run_ended >= exec_ended)
self.assertTrue(exec_started >= run_started)
expected = {
'id': run_id,
'name': name,
'workflow_id': workflow_id,
'status': 'timedout',
'parameters': None,
'products': {},
'executions': [
{
'execution_id': 1,
'step': 'step-0',
'name': 'test timedout operation',
'status': 'timedout',
},
{
'execution_id': 2,
'step': 'step-1',
'name': 'test completed operation',
'status': 'completed',
},
],
}
self.assertEquals(result, expected)
class TestInvalidRunCase(BaseTestCase):
"""Test run cases involving invalidated status."""
def test_invalidated_case(self, client):
"""Test invalidated run with multi-step run."""
name = 'test invalidated run'
specification = Yaml.serialize({
'name': name,
'entry': 'step-0',
'steps': {
'step-0': {
'operation': 'flux:test-operation',
'description': 'test invalid operation',
'parameters': {'outcome': 'invalidated'},
'postoperation': [{
'actions': [{
'action': 'execute-step',
'step': 'step-1',
'parameters': {'outcome': 'completed'},
}],
'terminal': False,
}],
},
'step-1': {
'operation': 'flux:test-operation',
'description': 'test completed operation',
'parameters': {'outcome': 'completed'},
},
},
})
resp = self._setup_workflow(client, name, specification)
self.assertEquals('OK', resp.status)
workflow_id = resp.content['id']
resp = self._setup_run(client, workflow_id)
self.assertEquals('OK', resp.status)
run_id = resp.content['id']
run = self._poll_run_status(client, run_id, 'invalidated', ['executions'])
result = run
run_started = result.pop('started')
run_ended = result.pop('ended')
self.assertTrue(run_ended >= run_started)
for execution in result['executions']:
execution.pop('id')
execution.pop('ancestor_id')
exec_started = execution.pop('started')
exec_ended = execution.pop('ended')
self.assertTrue(exec_ended >= exec_started)
self.assertTrue(run_ended >= exec_ended)
self.assertTrue(exec_started >= run_started)
expected = {
'id': run_id,
'name': name,
'workflow_id': workflow_id,
'status': 'invalidated',
'parameters': None,
'products': {},
'executions': [
{
'execution_id': 1,
'step': 'step-0',
'name': 'test invalid operation',
'status': 'invalidated',
},
],
}
self.assertEquals(result, expected)
def test_ignore_invalidated_case(self, client):
name = 'test ignore invalidated run'
specification = Yaml.serialize({
'name': name,
'entry': 'step-0',
'steps': {
'step-0': {
'operation': 'flux:test-operation',
'description': 'test invalid operation',
'parameters': {'outcome': 'invalidated'},
'postoperation': [{
'actions': [
{
'action': 'ignore-step-failure',
},
{
'action': 'execute-step',
'step': 'step-1',
'parameters': {'outcome': 'completed'},
},
],
'terminal': False,
}],
},
'step-1': {
'operation': 'flux:test-operation',
'description': 'test completed operation',
'parameters': {'outcome': 'completed'},
},
},
})
resp = self._setup_workflow(client, name, specification)
self.assertEquals('OK', resp.status)
workflow_id = resp.content['id']
resp = self._setup_run(client, workflow_id)
self.assertEquals('OK', resp.status)
run_id = resp.content['id']
result = self._poll_run_status(client, run_id, 'invalidated', include=['executions'])
run_started = result.pop('started')
run_ended = result.pop('ended')
self.assertTrue(run_ended >= run_started)
for execution in result['executions']:
execution.pop('id')
execution.pop('ancestor_id')
exec_started = execution.pop('started')
exec_ended = execution.pop('ended')
self.assertTrue(exec_ended >= exec_started)
self.assertTrue(run_ended >= exec_ended)
self.assertTrue(exec_started >= run_started)
expected = {
'id': run_id,
'name': name,
'workflow_id': workflow_id,
'status': 'invalidated',
'parameters': None,
'products': {},
'executions': [
{
'execution_id': 1,
'step': 'step-0',
'name': 'test invalid operation',
'status': 'invalidated',
},
],
}
self.assertEquals(result, expected)
| 35.98183
| 96
| 0.47311
| 4,617
| 55,448
| 5.50444
| 0.045051
| 0.037381
| 0.038837
| 0.020658
| 0.874557
| 0.853978
| 0.835327
| 0.819588
| 0.810695
| 0.793539
| 0
| 0.007136
| 0.403513
| 55,448
| 1,540
| 97
| 36.005195
| 0.761263
| 0.020416
| 0
| 0.764359
| 0
| 0
| 0.17243
| 0.002547
| 0
| 0
| 0
| 0
| 0.097938
| 1
| 0.021355
| false
| 0.001473
| 0.005891
| 0
| 0.038292
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
6d600b62eac5a357f75e569366238d136eee669d
| 215,123
|
py
|
Python
|
tests/template_builder/conftest.py
|
ZackPashkin/toloka-kit
|
8f650e5d8cdded1949ca633cf78f9b851ce839bb
|
[
"Apache-2.0"
] | 153
|
2021-02-06T13:41:11.000Z
|
2022-03-19T17:51:01.000Z
|
tests/template_builder/conftest.py
|
ZackPashkin/toloka-kit
|
8f650e5d8cdded1949ca633cf78f9b851ce839bb
|
[
"Apache-2.0"
] | 29
|
2021-01-15T12:54:37.000Z
|
2022-02-07T07:45:32.000Z
|
tests/template_builder/conftest.py
|
ZackPashkin/toloka-kit
|
8f650e5d8cdded1949ca633cf78f9b851ce839bb
|
[
"Apache-2.0"
] | 17
|
2021-01-29T15:20:04.000Z
|
2022-01-30T07:21:03.000Z
|
# flake8: noqa
import pytest
EXAMPLES = [
{
"config": "{\n \"vars\": {\n \"author\": \"assessorlead\",\n \"task\": \"old\",\n \"yandex\": {\n \"type\": \"helper.search-query\",\n \"engine\": \"yandex\",\n \"query\": {\n \"type\": \"data.input\",\n \"path\": \"query\"\n }\n }\n },\n \"view\": {\n \"type\": \"view.list\",\n \"items\": [\n {\n \"type\": \"view.alert\",\n \"theme\": \"info\",\n \"content\": {\n \"type\": \"view.text\",\n \"content\": {\n \"type\": \"data.input\",\n \"path\": \"query\"\n }\n }\n },\n {\n \"type\": \"view.action-button\",\n \"label\": \"\u0423\u0442\u043e\u0447\u043d\u0438\u0442\u044c \u0432 \u042f\u043d\u0434\u0435\u043a\u0441\u0435\",\n \"action\": {\n \"type\": \"action.open-link\",\n \"payload\": {\n \"$ref\": \"vars.yandex\"\n }\n }\n },\n {\n \"type\": \"field.button-radio-group\",\n \"options\": [\n {\n \"label\": \"\u0414\u0430\",\n \"value\": \"yes\"\n },\n {\n \"label\": \"\u0412\u043e\u0437\u043c\u043e\u0436\u043d\u043e\",\n \"value\": \"maybe\"\n },\n {\n \"label\": \"\u041d\u0435\u0442\",\n \"value\": \"no\"\n }\n ],\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"result\"\n }\n }\n ],\n \"validation\": {\n \"type\": \"condition.required\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"result\"\n }\n }\n },\n \"plugins\": [\n {\n \"1\": {\n \"type\": \"action.set\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"result\"\n },\n \"payload\": \"yes\"\n },\n \"2\": {\n \"type\": \"action.set\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"result\"\n },\n \"payload\": \"maybe\"\n },\n \"3\": {\n \"type\": \"action.set\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"result\"\n },\n \"payload\": \"no\"\n },\n \"type\": \"plugin.hotkeys\"\n },\n {\n \"type\": \"plugin.toloka\",\n \"layout\": {\n \"kind\": \"scroll\",\n \"taskWidth\": 500\n }\n }\n ]\n}",
"lock": {
"action.open-link": "1.0.1",
"action.set": "1.0.1",
"condition.required": "1.1.0",
"core": "1.3.1",
"field.button-radio-group": "1.0.1",
"helper.search-query": "1.1.1",
"plugin.hotkeys": "1.0.2",
"plugin.toloka": "1.1.0",
"view.action-button": "1.0.0",
"view.alert": "1.0.0",
"view.list": "1.0.0",
"view.text": "1.0.0"
},
"settings": {
"showFinish": True,
"showFullscreen": True,
"showInstructions": True,
"showMessage": True,
"showReward": True,
"showSkip": True,
"showSubmit": True,
"showTimer": True,
"showTitle": True
},
"type": "tb",
"assets": {
"script_urls": [
"https://yastat.net/s3/tb/launcher.template/_/master.825664685.2020-11-19.6390a7cf.js"
],
"style_urls": [
"https://yastat.net/s3/tb/launcher.template/_/master.825664685.2020-11-19.6390a7cf.css"
]
},
"markup": "",
"styles": "",
"script": "window.configureTemplate({\n \"vars\": {\n \"author\": \"assessorlead\",\n \"task\": \"old\",\n \"yandex\": {\n \"type\": \"helper.search-query\",\n \"engine\": \"yandex\",\n \"query\": {\n \"type\": \"data.input\",\n \"path\": \"query\"\n }\n }\n },\n \"view\": {\n \"type\": \"view.list\",\n \"items\": [\n {\n \"type\": \"view.alert\",\n \"theme\": \"info\",\n \"content\": {\n \"type\": \"view.text\",\n \"content\": {\n \"type\": \"data.input\",\n \"path\": \"query\"\n }\n }\n },\n {\n \"type\": \"view.action-button\",\n \"label\": \"\u0423\u0442\u043e\u0447\u043d\u0438\u0442\u044c \u0432 \u042f\u043d\u0434\u0435\u043a\u0441\u0435\",\n \"action\": {\n \"type\": \"action.open-link\",\n \"payload\": {\n \"$ref\": \"vars.yandex\"\n }\n }\n },\n {\n \"type\": \"field.button-radio-group\",\n \"options\": [\n {\n \"label\": \"\u0414\u0430\",\n \"value\": \"yes\"\n },\n {\n \"label\": \"\u0412\u043e\u0437\u043c\u043e\u0436\u043d\u043e\",\n \"value\": \"maybe\"\n },\n {\n \"label\": \"\u041d\u0435\u0442\",\n \"value\": \"no\"\n }\n ],\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"result\"\n }\n }\n ],\n \"validation\": {\n \"type\": \"condition.required\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"result\"\n }\n }\n },\n \"plugins\": [\n {\n \"1\": {\n \"type\": \"action.set\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"result\"\n },\n \"payload\": \"yes\"\n },\n \"2\": {\n \"type\": \"action.set\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"result\"\n },\n \"payload\": \"maybe\"\n },\n \"3\": {\n \"type\": \"action.set\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"result\"\n },\n \"payload\": \"no\"\n },\n \"type\": \"plugin.hotkeys\"\n },\n {\n \"type\": \"plugin.toloka\",\n \"layout\": {\n \"kind\": \"scroll\",\n \"taskWidth\": 500\n }\n }\n ]\n}, {\"action.open-link\":\"1.0.1\",\"action.set\":\"1.0.1\",\"condition.required\":\"1.1.0\",\"core\":\"1.3.1\",\"field.button-radio-group\":\"1.0.1\",\"helper.search-query\":\"1.1.1\",\"plugin.hotkeys\":\"1.0.2\",\"plugin.toloka\":\"1.1.0\",\"view.action-button\":\"1.0.0\",\"view.alert\":\"1.0.0\",\"view.list\":\"1.0.0\",\"view.text\":\"1.0.0\"}, 'https://tb.yandex.net/registry2')"
},
{
"config": "{\n \"vars\": {\n \"author\": \"assessorlead\",\n \"task\": \"old\"\n },\n \"view\": {\n \"type\": \"layout.bars\",\n \"content\": {\n \"type\": \"view.image\",\n \"fullHeight\": true,\n \"scrollable\": true,\n \"url\": {\n \"type\": \"data.input\",\n \"path\": \"screenshot_url\"\n }\n },\n \"barBefore\": {\n \"type\": \"view.list\",\n \"items\": [\n {\n \"type\": \"view.labeled-list\",\n \"minWidth\": 150,\n \"items\": [\n {\n \"label\": \"\u0417\u0430\u043f\u0440\u043e\u0441:\",\n \"content\": {\n \"type\": \"view.link\",\n \"url\": {\n \"type\": \"helper.search-query\",\n \"query\": {\n \"type\": \"data.input\",\n \"path\": \"query\"\n },\n \"engine\": \"yandex\"\n },\n \"content\": {\n \"type\": \"data.input\",\n \"path\": \"query\"\n }\n }\n },\n {\n \"label\": \"\u0420\u0435\u0433\u0438\u043e\u043d:\",\n \"content\": {\n \"type\": \"view.text\",\n \"content\": {\n \"type\": \"data.input\",\n \"path\": \"region_name\"\n }\n }\n },\n {\n \"label\": \"\u0414\u0435\u0432\u0430\u0439\u0441:\",\n \"content\": {\n \"type\": \"view.text\",\n \"content\": {\n \"type\": \"data.input\",\n \"path\": \"device\"\n }\n }\n }\n ]\n },\n {\n \"type\": \"view.alert\",\n \"theme\": \"warning\",\n \"content\": {\n \"type\": \"view.link\",\n \"url\": {\n \"type\": \"data.input\",\n \"path\": \"url\"\n }\n }\n }\n ]\n },\n \"barAfter\": {\n \"type\": \"field.button-radio-group\",\n \"validation\": {\n \"type\": \"condition.required\"\n },\n \"options\": [\n {\n \"label\": \"\u0420\u0435\u043b\",\n \"value\": \"RELEVANT\"\n },\n {\n \"label\": \"\u041d\u0435\u0440\u0435\u043b\",\n \"value\": \"IRRELEVANT\"\n },\n {\n \"label\": \"404\",\n \"value\": \"_404\"\n }\n ],\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"relevance\"\n }\n }\n },\n \"plugins\": [\n {\n \"type\": \"plugin.toloka\",\n \"layout\": {\n \"kind\": \"pager\"\n }\n }\n ]\n}",
"lock": {
"condition.required": "1.0.0",
"core": "1.0.0",
"field.button-radio-group": "1.0.0",
"helper.search-query": "1.0.0",
"layout.bars": "1.0.0",
"plugin.toloka": "1.0.0",
"view.alert": "1.0.0",
"view.image": "1.0.0",
"view.labeled-list": "1.0.0",
"view.link": "1.0.0",
"view.list": "1.0.0",
"view.text": "1.0.0"
},
"settings": {
"showFinish": True,
"showFullscreen": True,
"showInstructions": True,
"showMessage": True,
"showReward": True,
"showSkip": True,
"showSubmit": True,
"showTimer": True,
"showTitle": True
},
"type": "tb",
"assets": {
"script_urls": [
"https://yastat.net/s3/tb/launcher.template/_/master.825664685.2020-11-19.6390a7cf.js"
],
"style_urls": [
"https://yastat.net/s3/tb/launcher.template/_/master.825664685.2020-11-19.6390a7cf.css"
]
},
"markup": "",
"styles": "",
"script": "window.configureTemplate({\n \"vars\": {\n \"author\": \"assessorlead\",\n \"task\": \"old\"\n },\n \"view\": {\n \"type\": \"layout.bars\",\n \"content\": {\n \"type\": \"view.image\",\n \"fullHeight\": true,\n \"scrollable\": true,\n \"url\": {\n \"type\": \"data.input\",\n \"path\": \"screenshot_url\"\n }\n },\n \"barBefore\": {\n \"type\": \"view.list\",\n \"items\": [\n {\n \"type\": \"view.labeled-list\",\n \"minWidth\": 150,\n \"items\": [\n {\n \"label\": \"\u0417\u0430\u043f\u0440\u043e\u0441:\",\n \"content\": {\n \"type\": \"view.link\",\n \"url\": {\n \"type\": \"helper.search-query\",\n \"query\": {\n \"type\": \"data.input\",\n \"path\": \"query\"\n },\n \"engine\": \"yandex\"\n },\n \"content\": {\n \"type\": \"data.input\",\n \"path\": \"query\"\n }\n }\n },\n {\n \"label\": \"\u0420\u0435\u0433\u0438\u043e\u043d:\",\n \"content\": {\n \"type\": \"view.text\",\n \"content\": {\n \"type\": \"data.input\",\n \"path\": \"region_name\"\n }\n }\n },\n {\n \"label\": \"\u0414\u0435\u0432\u0430\u0439\u0441:\",\n \"content\": {\n \"type\": \"view.text\",\n \"content\": {\n \"type\": \"data.input\",\n \"path\": \"device\"\n }\n }\n }\n ]\n },\n {\n \"type\": \"view.alert\",\n \"theme\": \"warning\",\n \"content\": {\n \"type\": \"view.link\",\n \"url\": {\n \"type\": \"data.input\",\n \"path\": \"url\"\n }\n }\n }\n ]\n },\n \"barAfter\": {\n \"type\": \"field.button-radio-group\",\n \"validation\": {\n \"type\": \"condition.required\"\n },\n \"options\": [\n {\n \"label\": \"\u0420\u0435\u043b\",\n \"value\": \"RELEVANT\"\n },\n {\n \"label\": \"\u041d\u0435\u0440\u0435\u043b\",\n \"value\": \"IRRELEVANT\"\n },\n {\n \"label\": \"404\",\n \"value\": \"_404\"\n }\n ],\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"relevance\"\n }\n }\n },\n \"plugins\": [\n {\n \"type\": \"plugin.toloka\",\n \"layout\": {\n \"kind\": \"pager\"\n }\n }\n ]\n}, {\"condition.required\":\"1.0.0\",\"core\":\"1.0.0\",\"field.button-radio-group\":\"1.0.0\",\"helper.search-query\":\"1.0.0\",\"layout.bars\":\"1.0.0\",\"plugin.toloka\":\"1.0.0\",\"view.alert\":\"1.0.0\",\"view.image\":\"1.0.0\",\"view.labeled-list\":\"1.0.0\",\"view.link\":\"1.0.0\",\"view.list\":\"1.0.0\",\"view.text\":\"1.0.0\"}, 'https://tb.yandex.net/registry2')"
},
{
"config": "{\n \"vars\": {\n \"author\": \"assessorlead\",\n \"task\": \"old\"\n },\n \"view\": {\n \"type\": \"layout.sidebar\",\n \"content\": {\n \"type\": \"view.list\",\n \"items\": [\n {\n \"type\": \"layout.columns\",\n \"items\": [\n {\n \"type\": \"view.image\",\n \"url\": {\n \"type\": \"data.input\",\n \"path\": \"Front\"\n },\n \"minWidth\": 350,\n \"rotatable\": true\n },\n {\n \"type\": \"view.image\",\n \"url\": {\n \"type\": \"data.input\",\n \"path\": \"Back\"\n },\n \"minWidth\": 350,\n \"rotatable\": true\n }\n ]\n },\n {\n \"type\": \"layout.columns\",\n \"items\": [\n {\n \"type\": \"view.image\",\n \"url\": {\n \"type\": \"data.input\",\n \"path\": \"Left\"\n },\n \"minWidth\": 350,\n \"rotatable\": true\n },\n {\n \"type\": \"view.image\",\n \"url\": {\n \"type\": \"data.input\",\n \"path\": \"Right\"\n },\n \"minWidth\": 350,\n \"rotatable\": true\n }\n ]\n }\n ]\n },\n \"controls\": {\n \"type\": \"field.radio-group\",\n \"label\": \"\u041a \u043a\u0430\u043a\u043e\u0439 \u0433\u0440\u0443\u043f\u043f\u0435 \u043f\u043e \u0443\u0440\u043e\u0432\u043d\u044e \u043f\u043e\u0432\u0440\u0435\u0436\u0434\u0435\u043d\u0438\u0439 \u043e\u0442\u043d\u043e\u0441\u0438\u0442\u0441\u044f \u0422\u0421 \u0432\u044b\u0448\u0435?\",\n \"options\": [\n {\n \"label\": \"\u041d\u0435\u0442 \u0438\u043b\u0438 \u043f\u043e\u0447\u0442\u0438 \u043d\u0435\u0442 \u043f\u043e\u0432\u0440\u0435\u0436\u0434\u0435\u043d\u0438\u0439\",\n \"value\": \"YES\"\n },\n {\n \"label\": \"\u0415\u0441\u0442\u044c \u043f\u043e\u0432\u0440\u0435\u0436\u0434\u0435\u043d\u0438\u044f\",\n \"value\": \"NO\"\n }\n ],\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"isDefect4\"\n },\n \"validation\": {\n \"type\": \"condition.required\",\n \"hint\": \"\u041d\u0443\u0436\u043d\u043e \u0432\u044b\u0431\u0440\u0430\u0442\u044c \u043e\u0434\u0438\u043d \u0438\u0437 \u0432\u0430\u0440\u0438\u0430\u043d\u0442\u043e\u0432\"\n }\n }\n },\n \"plugins\": [\n {\n \"1\": {\n \"type\": \"action.set\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"isDefect4\"\n },\n \"payload\": \"YES\"\n },\n \"2\": {\n \"type\": \"action.set\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"isDefect4\"\n },\n \"payload\": \"NO\"\n },\n \"type\": \"plugin.hotkeys\"\n }\n ]\n}",
"lock": {
"action.set": "1.0.0",
"condition.required": "1.0.1",
"core": "1.2.2",
"field.radio-group": "1.0.0",
"layout.columns": "1.0.0",
"layout.sidebar": "1.0.0",
"plugin.hotkeys": "1.0.1",
"view.image": "1.0.2",
"view.list": "1.0.0"
},
"settings": {
"showFinish": True,
"showFullscreen": True,
"showInstructions": True,
"showMessage": True,
"showReward": True,
"showSkip": True,
"showSubmit": True,
"showTimer": True,
"showTitle": True
},
"type": "tb",
"assets": {
"script_urls": [
"https://yastat.net/s3/tb/launcher.template/_/master.825664685.2020-11-19.6390a7cf.js"
],
"style_urls": [
"https://yastat.net/s3/tb/launcher.template/_/master.825664685.2020-11-19.6390a7cf.css"
]
},
"markup": "",
"styles": "",
"script": "window.configureTemplate({\n \"vars\": {\n \"author\": \"assessorlead\",\n \"task\": \"old\"\n },\n \"view\": {\n \"type\": \"layout.sidebar\",\n \"content\": {\n \"type\": \"view.list\",\n \"items\": [\n {\n \"type\": \"layout.columns\",\n \"items\": [\n {\n \"type\": \"view.image\",\n \"url\": {\n \"type\": \"data.input\",\n \"path\": \"Front\"\n },\n \"minWidth\": 350,\n \"rotatable\": true\n },\n {\n \"type\": \"view.image\",\n \"url\": {\n \"type\": \"data.input\",\n \"path\": \"Back\"\n },\n \"minWidth\": 350,\n \"rotatable\": true\n }\n ]\n },\n {\n \"type\": \"layout.columns\",\n \"items\": [\n {\n \"type\": \"view.image\",\n \"url\": {\n \"type\": \"data.input\",\n \"path\": \"Left\"\n },\n \"minWidth\": 350,\n \"rotatable\": true\n },\n {\n \"type\": \"view.image\",\n \"url\": {\n \"type\": \"data.input\",\n \"path\": \"Right\"\n },\n \"minWidth\": 350,\n \"rotatable\": true\n }\n ]\n }\n ]\n },\n \"controls\": {\n \"type\": \"field.radio-group\",\n \"label\": \"\u041a \u043a\u0430\u043a\u043e\u0439 \u0433\u0440\u0443\u043f\u043f\u0435 \u043f\u043e \u0443\u0440\u043e\u0432\u043d\u044e \u043f\u043e\u0432\u0440\u0435\u0436\u0434\u0435\u043d\u0438\u0439 \u043e\u0442\u043d\u043e\u0441\u0438\u0442\u0441\u044f \u0422\u0421 \u0432\u044b\u0448\u0435?\",\n \"options\": [\n {\n \"label\": \"\u041d\u0435\u0442 \u0438\u043b\u0438 \u043f\u043e\u0447\u0442\u0438 \u043d\u0435\u0442 \u043f\u043e\u0432\u0440\u0435\u0436\u0434\u0435\u043d\u0438\u0439\",\n \"value\": \"YES\"\n },\n {\n \"label\": \"\u0415\u0441\u0442\u044c \u043f\u043e\u0432\u0440\u0435\u0436\u0434\u0435\u043d\u0438\u044f\",\n \"value\": \"NO\"\n }\n ],\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"isDefect4\"\n },\n \"validation\": {\n \"type\": \"condition.required\",\n \"hint\": \"\u041d\u0443\u0436\u043d\u043e \u0432\u044b\u0431\u0440\u0430\u0442\u044c \u043e\u0434\u0438\u043d \u0438\u0437 \u0432\u0430\u0440\u0438\u0430\u043d\u0442\u043e\u0432\"\n }\n }\n },\n \"plugins\": [\n {\n \"1\": {\n \"type\": \"action.set\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"isDefect4\"\n },\n \"payload\": \"YES\"\n },\n \"2\": {\n \"type\": \"action.set\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"isDefect4\"\n },\n \"payload\": \"NO\"\n },\n \"type\": \"plugin.hotkeys\"\n }\n ]\n}, {\"action.set\":\"1.0.0\",\"condition.required\":\"1.0.1\",\"core\":\"1.2.2\",\"field.radio-group\":\"1.0.0\",\"layout.columns\":\"1.0.0\",\"layout.sidebar\":\"1.0.0\",\"plugin.hotkeys\":\"1.0.1\",\"view.image\":\"1.0.2\",\"view.list\":\"1.0.0\"}, 'https://tb.yandex.net/registry2')"
},
{
"config": "{\n \"vars\": {\n \"author\": \"assessorlead\",\n \"task\": \"old\"\n },\n \"view\": {\n \"type\": \"layout.sidebar\",\n \"content\": {\n \"type\": \"view.list\",\n \"items\": [\n {\n \"type\": \"layout.columns\",\n \"items\": [\n {\n \"type\": \"view.image\",\n \"url\": {\n \"type\": \"data.input\",\n \"path\": \"Front\"\n },\n \"minWidth\": 350,\n \"rotatable\": true\n },\n {\n \"type\": \"view.image\",\n \"url\": {\n \"type\": \"data.input\",\n \"path\": \"Back\"\n },\n \"minWidth\": 350,\n \"rotatable\": true\n }\n ]\n },\n {\n \"type\": \"layout.columns\",\n \"items\": [\n {\n \"type\": \"view.image\",\n \"url\": {\n \"type\": \"data.input\",\n \"path\": \"Left\"\n },\n \"minWidth\": 350,\n \"rotatable\": true\n },\n {\n \"type\": \"view.image\",\n \"url\": {\n \"type\": \"data.input\",\n \"path\": \"Right\"\n },\n \"minWidth\": 350,\n \"rotatable\": true\n }\n ]\n }\n ]\n },\n \"controls\": {\n \"type\": \"field.radio-group\",\n \"label\": \"\u041e\u0446\u0435\u043d\u0438\u0442\u0435 \u043d\u0430\u043b\u0438\u0447\u0438\u0435 \u0438 \u043a\u0430\u0447\u0435\u0441\u0442\u0432\u043e \u0444\u043e\u0442\u043e:\",\n \"options\": [\n {\n \"label\": \"\u0424\u043e\u0442\u043e \u0441 \u0443\u0441\u0442\u0440\u043e\u0439\u0441\u0442\u0432\",\n \"value\": \"SCREEN_PHOTO\"\n },\n {\n \"label\": \"\u041d\u0435 \u0432\u0441\u0451 \u0422\u0421 \u043f\u043e\u043f\u0430\u043b\u043e \u0432 \u043a\u0430\u0434\u0440 (\u043d\u0435\u0432\u043e\u0437\u043c\u043e\u0436\u043d\u043e \u0440\u0430\u0437\u043b\u0438\u0447\u0438\u0442\u044c \u0433\u043e\u0441\u043d\u043e\u043c\u0435\u0440, \u043c\u043e\u0434\u0435\u043b\u044c, \u0446\u0432\u0435\u0442 \u0438\u043b\u0438 \u0441\u043e\u0441\u0442\u043e\u044f\u043d\u0438\u0435 \u043a\u0443\u0437\u043e\u0432\u0430)\",\n \"value\": \"INCOMPLETE_CAPTURE\"\n },\n {\n \"label\": \"\u041d\u0435\u0442 \u043e\u0434\u043d\u043e\u0439, \u043d\u0435\u0441\u043a\u043e\u043b\u044c\u043a\u0438\u0445 \u0438\u043b\u0438 \u043d\u0438 \u043e\u0434\u043d\u043e\u0439 \u0444\u043e\u0442\u043e\u0433\u0440\u0430\u0444\u0438\u0438 \u0422\u0421\",\n \"value\": \"LACK_OF_PHOTOS\"\n },\n {\n \"label\": \"\u041d\u0435\u0447\u0451\u0442\u043a\u043e\u0435 \u0444\u043e\u0442\u043e (\u043d\u0435\u0432\u043e\u0437\u043c\u043e\u0436\u043d\u043e \u0440\u0430\u0437\u043b\u0438\u0447\u0438\u0442\u044c \u0433\u043e\u0441\u043d\u043e\u043c\u0435\u0440, \u043c\u043e\u0434\u0435\u043b\u044c, \u0446\u0432\u0435\u0442 \u0438\u043b\u0438 \u0441\u043e\u0441\u0442\u043e\u044f\u043d\u0438\u0435 \u043a\u0443\u0437\u043e\u0432\u0430)\",\n \"value\": \"BLURRY_PHOTO\"\n },\n {\n \"label\": \"\u0422\u0451\u043c\u043d\u043e\u0435 \u0444\u043e\u0442\u043e (\u043d\u0435\u0432\u043e\u0437\u043c\u043e\u0436\u043d\u043e \u0440\u0430\u0437\u043b\u0438\u0447\u0438\u0442\u044c \u0433\u043e\u0441\u043d\u043e\u043c\u0435\u0440, \u043c\u043e\u0434\u0435\u043b\u044c, \u0446\u0432\u0435\u0442 \u0438\u043b\u0438 \u0441\u043e\u0441\u0442\u043e\u044f\u043d\u0438\u0435 \u043a\u0443\u0437\u043e\u0432\u0430)\",\n \"value\": \"DARK_PHOTO\"\n },\n {\n \"label\": \"\u0412\u0441\u0451 \u0432 \u043f\u043e\u0440\u044f\u0434\u043a\u0435\",\n \"value\": \"ALL_GOOD\"\n }\n ],\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"FraudType\"\n },\n \"validation\": {\n \"type\": \"condition.required\",\n \"hint\": \"\u041d\u0443\u0436\u043d\u043e \u0432\u044b\u0431\u0440\u0430\u0442\u044c \u043e\u0434\u0438\u043d \u0438\u0437 \u0432\u0430\u0440\u0438\u0430\u043d\u0442\u043e\u0432\"\n }\n }\n },\n \"plugins\": [\n {\n \"0\": {\n \"type\": \"action.set\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"FraudType\"\n },\n \"payload\": \"ALL_GOOD\"\n },\n \"1\": {\n \"type\": \"action.set\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"FraudType\"\n },\n \"payload\": \"SCREEN_PHOTO\"\n },\n \"2\": {\n \"type\": \"action.set\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"FraudType\"\n },\n \"payload\": \"INCOMPLETE_CAPTURE\"\n },\n \"3\": {\n \"type\": \"action.set\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"FraudType\"\n },\n \"payload\": \"LACK_OF_PHOTOS\"\n },\n \"4\": {\n \"type\": \"action.set\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"FraudType\"\n },\n \"payload\": \"BLURRY_PHOTO\"\n },\n \"5\": {\n \"type\": \"action.set\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"FraudType\"\n },\n \"payload\": \"DARK_PHOTO\"\n },\n \"type\": \"plugin.hotkeys\"\n }\n ]\n}",
"lock": {
"action.set": "1.0.0",
"condition.required": "1.0.1",
"core": "1.2.2",
"field.radio-group": "1.0.0",
"layout.columns": "1.0.0",
"layout.sidebar": "1.0.0",
"plugin.hotkeys": "1.0.1",
"view.image": "1.0.2",
"view.list": "1.0.0"
},
"settings": {
"showFinish": True,
"showFullscreen": True,
"showInstructions": True,
"showMessage": True,
"showReward": True,
"showSkip": True,
"showSubmit": True,
"showTimer": True,
"showTitle": True
},
"type": "tb",
"assets": {
"script_urls": [
"https://yastat.net/s3/tb/launcher.template/_/master.825664685.2020-11-19.6390a7cf.js"
],
"style_urls": [
"https://yastat.net/s3/tb/launcher.template/_/master.825664685.2020-11-19.6390a7cf.css"
]
},
"markup": "",
"styles": "",
"script": "window.configureTemplate({\n \"vars\": {\n \"author\": \"assessorlead\",\n \"task\": \"old\"\n },\n \"view\": {\n \"type\": \"layout.sidebar\",\n \"content\": {\n \"type\": \"view.list\",\n \"items\": [\n {\n \"type\": \"layout.columns\",\n \"items\": [\n {\n \"type\": \"view.image\",\n \"url\": {\n \"type\": \"data.input\",\n \"path\": \"Front\"\n },\n \"minWidth\": 350,\n \"rotatable\": true\n },\n {\n \"type\": \"view.image\",\n \"url\": {\n \"type\": \"data.input\",\n \"path\": \"Back\"\n },\n \"minWidth\": 350,\n \"rotatable\": true\n }\n ]\n },\n {\n \"type\": \"layout.columns\",\n \"items\": [\n {\n \"type\": \"view.image\",\n \"url\": {\n \"type\": \"data.input\",\n \"path\": \"Left\"\n },\n \"minWidth\": 350,\n \"rotatable\": true\n },\n {\n \"type\": \"view.image\",\n \"url\": {\n \"type\": \"data.input\",\n \"path\": \"Right\"\n },\n \"minWidth\": 350,\n \"rotatable\": true\n }\n ]\n }\n ]\n },\n \"controls\": {\n \"type\": \"field.radio-group\",\n \"label\": \"\u041e\u0446\u0435\u043d\u0438\u0442\u0435 \u043d\u0430\u043b\u0438\u0447\u0438\u0435 \u0438 \u043a\u0430\u0447\u0435\u0441\u0442\u0432\u043e \u0444\u043e\u0442\u043e:\",\n \"options\": [\n {\n \"label\": \"\u0424\u043e\u0442\u043e \u0441 \u0443\u0441\u0442\u0440\u043e\u0439\u0441\u0442\u0432\",\n \"value\": \"SCREEN_PHOTO\"\n },\n {\n \"label\": \"\u041d\u0435 \u0432\u0441\u0451 \u0422\u0421 \u043f\u043e\u043f\u0430\u043b\u043e \u0432 \u043a\u0430\u0434\u0440 (\u043d\u0435\u0432\u043e\u0437\u043c\u043e\u0436\u043d\u043e \u0440\u0430\u0437\u043b\u0438\u0447\u0438\u0442\u044c \u0433\u043e\u0441\u043d\u043e\u043c\u0435\u0440, \u043c\u043e\u0434\u0435\u043b\u044c, \u0446\u0432\u0435\u0442 \u0438\u043b\u0438 \u0441\u043e\u0441\u0442\u043e\u044f\u043d\u0438\u0435 \u043a\u0443\u0437\u043e\u0432\u0430)\",\n \"value\": \"INCOMPLETE_CAPTURE\"\n },\n {\n \"label\": \"\u041d\u0435\u0442 \u043e\u0434\u043d\u043e\u0439, \u043d\u0435\u0441\u043a\u043e\u043b\u044c\u043a\u0438\u0445 \u0438\u043b\u0438 \u043d\u0438 \u043e\u0434\u043d\u043e\u0439 \u0444\u043e\u0442\u043e\u0433\u0440\u0430\u0444\u0438\u0438 \u0422\u0421\",\n \"value\": \"LACK_OF_PHOTOS\"\n },\n {\n \"label\": \"\u041d\u0435\u0447\u0451\u0442\u043a\u043e\u0435 \u0444\u043e\u0442\u043e (\u043d\u0435\u0432\u043e\u0437\u043c\u043e\u0436\u043d\u043e \u0440\u0430\u0437\u043b\u0438\u0447\u0438\u0442\u044c \u0433\u043e\u0441\u043d\u043e\u043c\u0435\u0440, \u043c\u043e\u0434\u0435\u043b\u044c, \u0446\u0432\u0435\u0442 \u0438\u043b\u0438 \u0441\u043e\u0441\u0442\u043e\u044f\u043d\u0438\u0435 \u043a\u0443\u0437\u043e\u0432\u0430)\",\n \"value\": \"BLURRY_PHOTO\"\n },\n {\n \"label\": \"\u0422\u0451\u043c\u043d\u043e\u0435 \u0444\u043e\u0442\u043e (\u043d\u0435\u0432\u043e\u0437\u043c\u043e\u0436\u043d\u043e \u0440\u0430\u0437\u043b\u0438\u0447\u0438\u0442\u044c \u0433\u043e\u0441\u043d\u043e\u043c\u0435\u0440, \u043c\u043e\u0434\u0435\u043b\u044c, \u0446\u0432\u0435\u0442 \u0438\u043b\u0438 \u0441\u043e\u0441\u0442\u043e\u044f\u043d\u0438\u0435 \u043a\u0443\u0437\u043e\u0432\u0430)\",\n \"value\": \"DARK_PHOTO\"\n },\n {\n \"label\": \"\u0412\u0441\u0451 \u0432 \u043f\u043e\u0440\u044f\u0434\u043a\u0435\",\n \"value\": \"ALL_GOOD\"\n }\n ],\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"FraudType\"\n },\n \"validation\": {\n \"type\": \"condition.required\",\n \"hint\": \"\u041d\u0443\u0436\u043d\u043e \u0432\u044b\u0431\u0440\u0430\u0442\u044c \u043e\u0434\u0438\u043d \u0438\u0437 \u0432\u0430\u0440\u0438\u0430\u043d\u0442\u043e\u0432\"\n }\n }\n },\n \"plugins\": [\n {\n \"0\": {\n \"type\": \"action.set\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"FraudType\"\n },\n \"payload\": \"ALL_GOOD\"\n },\n \"1\": {\n \"type\": \"action.set\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"FraudType\"\n },\n \"payload\": \"SCREEN_PHOTO\"\n },\n \"2\": {\n \"type\": \"action.set\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"FraudType\"\n },\n \"payload\": \"INCOMPLETE_CAPTURE\"\n },\n \"3\": {\n \"type\": \"action.set\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"FraudType\"\n },\n \"payload\": \"LACK_OF_PHOTOS\"\n },\n \"4\": {\n \"type\": \"action.set\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"FraudType\"\n },\n \"payload\": \"BLURRY_PHOTO\"\n },\n \"5\": {\n \"type\": \"action.set\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"FraudType\"\n },\n \"payload\": \"DARK_PHOTO\"\n },\n \"type\": \"plugin.hotkeys\"\n }\n ]\n}, {\"action.set\":\"1.0.0\",\"condition.required\":\"1.0.1\",\"core\":\"1.2.2\",\"field.radio-group\":\"1.0.0\",\"layout.columns\":\"1.0.0\",\"layout.sidebar\":\"1.0.0\",\"plugin.hotkeys\":\"1.0.1\",\"view.image\":\"1.0.2\",\"view.list\":\"1.0.0\"}, 'https://tb.yandex.net/registry2')"
},
{
"config": "{\n \"vars\": {\n \"author\": \"assessorlead\",\n \"task\": \"old\"\n },\n \"view\": {\n \"type\": \"layout.sidebar\",\n \"content\": {\n \"type\": \"view.list\",\n \"items\": [\n {\n \"type\": \"layout.columns\",\n \"items\": [\n {\n \"type\": \"view.image\",\n \"url\": {\n \"type\": \"data.input\",\n \"path\": \"Front\"\n },\n \"minWidth\": 350,\n \"rotatable\": true\n },\n {\n \"type\": \"view.image\",\n \"url\": {\n \"type\": \"data.input\",\n \"path\": \"Back\"\n },\n \"minWidth\": 350,\n \"rotatable\": true\n }\n ]\n },\n {\n \"type\": \"layout.columns\",\n \"items\": [\n {\n \"type\": \"view.image\",\n \"url\": {\n \"type\": \"data.input\",\n \"path\": \"Left\"\n },\n \"minWidth\": 350,\n \"rotatable\": true\n },\n {\n \"type\": \"view.image\",\n \"url\": {\n \"type\": \"data.input\",\n \"path\": \"Right\"\n },\n \"minWidth\": 350,\n \"rotatable\": true\n }\n ]\n }\n ]\n },\n \"controls\": {\n \"type\": \"field.radio-group\",\n \"label\": \"\u041e\u0446\u0435\u043d\u0438\u0442\u0435 \u043d\u0430\u043b\u0438\u0447\u0438\u0435 \u0431\u0440\u0435\u043d\u0434\u0438\u043d\u0433\u0430:\",\n \"options\": [\n {\n \"label\": \"\u042f\u043d\u0434\u0435\u043a\u0441.\u0422\u0430\u043a\u0441\u0438 (Yango)\",\n \"value\": \"YANDEX\"\n },\n {\n \"label\": \"\u0423\u0431\u0435\u0440\",\n \"value\": \"UBER\"\n },\n {\n \"label\": \"\u041d\u0435\u0432\u043e\u0437\u043c\u043e\u0436\u043d\u043e \u0440\u0430\u0437\u0433\u043b\u044f\u0434\u0435\u0442\u044c \u0445\u043e\u0442\u044f \u0431\u044b \u043d\u0430 \u043e\u0434\u043d\u043e\u043c \u0438\u0437 \u0444\u043e\u0442\u043e\",\n \"value\": \"UNKNOWN\"\n },\n {\n \"label\": \"\u0414\u0440\u0443\u0433\u043e\u0435 \u0431\u0440\u0435\u043d\u0434\u0438\u0440\u043e\u0432\u0430\u043d\u0438\u0435\",\n \"value\": \"OTHER_BRANDING\"\n },\n {\n \"label\": \"\u041f\u0440\u043e\u0447\u0430\u044f \u0440\u0435\u043a\u043b\u0430\u043c\u0430\",\n \"value\": \"ADS\"\n },\n {\n \"label\": \"\u041d\u0435\u0442 \u043d\u0438 \u0431\u0440\u0435\u043d\u0434\u0438\u043d\u0433\u0430, \u043d\u0438 \u0440\u0435\u043a\u043b\u0430\u043c\u044b\",\n \"value\": \"BRANDING_FREE\"\n }\n ],\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"BrandingType\"\n },\n \"validation\": {\n \"type\": \"condition.required\",\n \"hint\": \"\u041d\u0443\u0436\u043d\u043e \u0432\u044b\u0431\u0440\u0430\u0442\u044c \u043e\u0434\u0438\u043d \u0438\u0437 \u0432\u0430\u0440\u0438\u0430\u043d\u0442\u043e\u0432\"\n }\n }\n },\n \"plugins\": [\n {\n \"y\": {\n \"type\": \"action.set\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"BrandingType\"\n },\n \"payload\": \"YANDEX\"\n },\n \"u\": {\n \"type\": \"action.set\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"BrandingType\"\n },\n \"payload\": \"UBER\"\n },\n \"v\": {\n \"type\": \"action.set\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"BrandingType\"\n },\n \"payload\": \"UNKNOWN\"\n },\n \"o\": {\n \"type\": \"action.set\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"BrandingType\"\n },\n \"payload\": \"OTHER_BRANDING\"\n },\n \"a\": {\n \"type\": \"action.set\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"BrandingType\"\n },\n \"payload\": \"ADS\"\n },\n \"n\": {\n \"type\": \"action.set\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"BrandingType\"\n },\n \"payload\": \"BRANDING_FREE\"\n },\n \"type\": \"plugin.hotkeys\"\n }\n ]\n}",
"lock": {
"action.set": "1.0.0",
"condition.required": "1.0.1",
"core": "1.2.2",
"field.radio-group": "1.0.0",
"layout.columns": "1.0.0",
"layout.sidebar": "1.0.0",
"plugin.hotkeys": "1.0.1",
"view.image": "1.0.2",
"view.list": "1.0.0"
},
"settings": {
"showFinish": True,
"showFullscreen": True,
"showInstructions": True,
"showMessage": True,
"showReward": True,
"showSkip": True,
"showSubmit": True,
"showTimer": True,
"showTitle": True
},
"type": "tb",
"assets": {
"script_urls": [
"https://yastat.net/s3/tb/launcher.template/_/master.825664685.2020-11-19.6390a7cf.js"
],
"style_urls": [
"https://yastat.net/s3/tb/launcher.template/_/master.825664685.2020-11-19.6390a7cf.css"
]
},
"markup": "",
"styles": "",
"script": "window.configureTemplate({\n \"vars\": {\n \"author\": \"assessorlead\",\n \"task\": \"old\"\n },\n \"view\": {\n \"type\": \"layout.sidebar\",\n \"content\": {\n \"type\": \"view.list\",\n \"items\": [\n {\n \"type\": \"layout.columns\",\n \"items\": [\n {\n \"type\": \"view.image\",\n \"url\": {\n \"type\": \"data.input\",\n \"path\": \"Front\"\n },\n \"minWidth\": 350,\n \"rotatable\": true\n },\n {\n \"type\": \"view.image\",\n \"url\": {\n \"type\": \"data.input\",\n \"path\": \"Back\"\n },\n \"minWidth\": 350,\n \"rotatable\": true\n }\n ]\n },\n {\n \"type\": \"layout.columns\",\n \"items\": [\n {\n \"type\": \"view.image\",\n \"url\": {\n \"type\": \"data.input\",\n \"path\": \"Left\"\n },\n \"minWidth\": 350,\n \"rotatable\": true\n },\n {\n \"type\": \"view.image\",\n \"url\": {\n \"type\": \"data.input\",\n \"path\": \"Right\"\n },\n \"minWidth\": 350,\n \"rotatable\": true\n }\n ]\n }\n ]\n },\n \"controls\": {\n \"type\": \"field.radio-group\",\n \"label\": \"\u041e\u0446\u0435\u043d\u0438\u0442\u0435 \u043d\u0430\u043b\u0438\u0447\u0438\u0435 \u0431\u0440\u0435\u043d\u0434\u0438\u043d\u0433\u0430:\",\n \"options\": [\n {\n \"label\": \"\u042f\u043d\u0434\u0435\u043a\u0441.\u0422\u0430\u043a\u0441\u0438 (Yango)\",\n \"value\": \"YANDEX\"\n },\n {\n \"label\": \"\u0423\u0431\u0435\u0440\",\n \"value\": \"UBER\"\n },\n {\n \"label\": \"\u041d\u0435\u0432\u043e\u0437\u043c\u043e\u0436\u043d\u043e \u0440\u0430\u0437\u0433\u043b\u044f\u0434\u0435\u0442\u044c \u0445\u043e\u0442\u044f \u0431\u044b \u043d\u0430 \u043e\u0434\u043d\u043e\u043c \u0438\u0437 \u0444\u043e\u0442\u043e\",\n \"value\": \"UNKNOWN\"\n },\n {\n \"label\": \"\u0414\u0440\u0443\u0433\u043e\u0435 \u0431\u0440\u0435\u043d\u0434\u0438\u0440\u043e\u0432\u0430\u043d\u0438\u0435\",\n \"value\": \"OTHER_BRANDING\"\n },\n {\n \"label\": \"\u041f\u0440\u043e\u0447\u0430\u044f \u0440\u0435\u043a\u043b\u0430\u043c\u0430\",\n \"value\": \"ADS\"\n },\n {\n \"label\": \"\u041d\u0435\u0442 \u043d\u0438 \u0431\u0440\u0435\u043d\u0434\u0438\u043d\u0433\u0430, \u043d\u0438 \u0440\u0435\u043a\u043b\u0430\u043c\u044b\",\n \"value\": \"BRANDING_FREE\"\n }\n ],\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"BrandingType\"\n },\n \"validation\": {\n \"type\": \"condition.required\",\n \"hint\": \"\u041d\u0443\u0436\u043d\u043e \u0432\u044b\u0431\u0440\u0430\u0442\u044c \u043e\u0434\u0438\u043d \u0438\u0437 \u0432\u0430\u0440\u0438\u0430\u043d\u0442\u043e\u0432\"\n }\n }\n },\n \"plugins\": [\n {\n \"y\": {\n \"type\": \"action.set\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"BrandingType\"\n },\n \"payload\": \"YANDEX\"\n },\n \"u\": {\n \"type\": \"action.set\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"BrandingType\"\n },\n \"payload\": \"UBER\"\n },\n \"v\": {\n \"type\": \"action.set\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"BrandingType\"\n },\n \"payload\": \"UNKNOWN\"\n },\n \"o\": {\n \"type\": \"action.set\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"BrandingType\"\n },\n \"payload\": \"OTHER_BRANDING\"\n },\n \"a\": {\n \"type\": \"action.set\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"BrandingType\"\n },\n \"payload\": \"ADS\"\n },\n \"n\": {\n \"type\": \"action.set\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"BrandingType\"\n },\n \"payload\": \"BRANDING_FREE\"\n },\n \"type\": \"plugin.hotkeys\"\n }\n ]\n}, {\"action.set\":\"1.0.0\",\"condition.required\":\"1.0.1\",\"core\":\"1.2.2\",\"field.radio-group\":\"1.0.0\",\"layout.columns\":\"1.0.0\",\"layout.sidebar\":\"1.0.0\",\"plugin.hotkeys\":\"1.0.1\",\"view.image\":\"1.0.2\",\"view.list\":\"1.0.0\"}, 'https://tb.yandex.net/registry2')"
},
{
"config": "{\n \"vars\": {\n \"author\": \"assessorlead\",\n \"task\": \"old\",\n \"outputLinks\": {\n \"type\": \"action.set\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"links\"\n },\n \"payload\": {\n \"type\": \"helper.transform\",\n \"items\": {\n \"type\": \"data.internal\",\n \"path\": \"result\"\n },\n \"into\": {\n \"type\": \"data.local\",\n \"path\": \"item.url\"\n }\n }\n },\n \"outputScreenshots\": {\n \"type\": \"action.set\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"screenshots\"\n },\n \"payload\": {\n \"type\": \"helper.transform\",\n \"items\": {\n \"type\": \"data.internal\",\n \"path\": \"result\"\n },\n \"into\": {\n \"type\": \"data.local\",\n \"path\": \"item.screenshot\"\n }\n }\n },\n \"previewImage\": {\n \"type\": \"view.image\",\n \"url\": {\n \"type\": \"data.input\",\n \"path\": \"coverImg\",\n \"default\": \"\"\n }\n },\n \"incomingData\": {\n \"type\": \"view.labeled-list\",\n \"minWidth\": 600,\n \"items\": [\n {\n \"label\": \"\u0417\u0430\u043f\u0440\u043e\u0441:\",\n \"content\": {\n \"type\": \"view.text\",\n \"content\": {\n \"type\": \"data.input\",\n \"path\": \"request\",\n \"default\": \"\"\n }\n }\n },\n {\n \"label\": \"\u041d\u0430\u0437\u0432\u0430\u043d\u0438\u0435:\",\n \"content\": {\n \"type\": \"view.text\",\n \"content\": {\n \"type\": \"data.input\",\n \"path\": \"contentName\",\n \"default\": \"\"\n }\n }\n },\n {\n \"label\": \"\u041d\u0430\u0437\u0432\u0430\u043d\u0438\u0435 \u043d\u0430 \u044f\u0437\u044b\u043a\u0435 \u043e\u0440\u0438\u0433\u0438\u043d\u0430\u043b\u0430:\",\n \"content\": {\n \"type\": \"view.text\",\n \"content\": {\n \"type\": \"data.input\",\n \"path\": \"contentNameEng\",\n \"default\": \"\"\n }\n }\n },\n {\n \"label\": \"\u0421\u0442\u0440\u0430\u043d\u0430:\",\n \"content\": {\n \"type\": \"view.text\",\n \"content\": {\n \"type\": \"data.input\",\n \"path\": \"country\",\n \"default\": \"\"\n }\n }\n },\n {\n \"label\": \"\u0413\u043e\u0434\u044b \u0432\u044b\u043f\u0443\u0441\u043a\u0430\",\n \"content\": {\n \"type\": \"view.text\",\n \"content\": {\n \"type\": \"data.input\",\n \"path\": \"year\",\n \"default\": \"\"\n }\n }\n },\n {\n \"label\": \"\u041d\u0443\u0436\u043d\u044b\u0435 \u0441\u0435\u0437\u043e\u043d\u044b:\",\n \"content\": {\n \"type\": \"view.text\",\n \"content\": {\n \"type\": \"data.input\",\n \"path\": \"seasons\",\n \"default\": \"\"\n }\n }\n },\n {\n \"label\": \"\u0421\u0435\u0440\u0438\u0439 \u0432 \u0441\u0435\u0437\u043e\u043d\u0435 (\u0434\u043b\u044f \u0441\u0435\u0440\u0438\u0430\u043b\u043e\u0432):\",\n \"content\": {\n \"type\": \"view.text\",\n \"content\": {\n \"type\": \"data.input\",\n \"path\": \"episodes\",\n \"default\": \"\"\n }\n }\n },\n {\n \"label\": \"\u041f\u0440\u0438\u043c\u0435\u0440\u043d\u0430\u044f \u043f\u0440\u043e\u0434\u043e\u043b\u0436\u0438\u0442\u0435\u043b\u044c\u043d\u043e\u0441\u0442\u044c \u0437\u0430\u043f\u0438\u0441\u0438:\",\n \"content\": {\n \"type\": \"view.text\",\n \"content\": {\n \"type\": \"data.input\",\n \"path\": \"episodeDurationMin\",\n \"default\": \"\"\n }\n }\n },\n {\n \"label\": \"\u041a\u043e\u043c\u043c\u0435\u043d\u0442\u0430\u0440\u0438\u0439:\",\n \"content\": {\n \"type\": \"view.text\",\n \"content\": {\n \"type\": \"data.input\",\n \"path\": \"comment\",\n \"default\": \"\"\n }\n }\n }\n ]\n },\n \"incomingData2\": {\n \"type\": \"view.list\",\n \"items\": [\n {\n \"type\": \"view.action-button\",\n \"action\": {\n \"type\": \"action.open-link\",\n \"payload\": {\n \"type\": \"data.input\",\n \"path\": \"kinopoiskLink\",\n \"default\": \"\"\n }\n },\n \"label\": \"\u0421\u0442\u0440\u0430\u043d\u0438\u0446\u0430 \u0444\u0438\u043b\u044c\u043c\u0430 \u043d\u0430 \u041a\u0438\u043d\u043e\u043f\u043e\u0438\u0441\u043a\u0435\"\n },\n {\n \"type\": \"view.action-button\",\n \"action\": {\n \"type\": \"action.open-link\",\n \"payload\": {\n \"type\": \"data.input\",\n \"path\": \"trailer\",\n \"default\": \"\"\n }\n },\n \"label\": \"\u0422\u0440\u0435\u0439\u043b\u0435\u0440\"\n }\n ]\n },\n \"yandex\": {\n \"type\": \"helper.search-query\",\n \"query\": {\n \"type\": \"data.input\",\n \"path\": \"request\",\n \"default\": \"\"\n },\n \"engine\": \"yandex\"\n },\n \"vk\": {\n \"type\": \"helper.join\",\n \"by\": \"\",\n \"items\": [\n \"https://vk.com/video?len=2&q=\",\n {\n \"type\": \"data.input\",\n \"path\": \"contentName\",\n \"default\": \"\"\n }\n ]\n },\n \"mail\": {\n \"type\": \"helper.join\",\n \"by\": \"\",\n \"items\": [\n \"https://my.mail.ru/video/search?q=\",\n {\n \"type\": \"data.input\",\n \"path\": \"contentName\",\n \"default\": \"\"\n },\n \"&duration=medium&duration=long\"\n ]\n },\n \"rutube\": {\n \"type\": \"helper.join\",\n \"by\": \"\",\n \"items\": [\n \"https://rutube.ru/search/?query=\",\n {\n \"type\": \"data.input\",\n \"path\": \"contentName\",\n \"default\": \"\"\n },\n \"&duration=long\"\n ]\n },\n \"youtube\": {\n \"type\": \"helper.join\",\n \"by\": \"\",\n \"items\": [\n \"https://www.youtube.com/results?search_query=\",\n {\n \"type\": \"data.input\",\n \"path\": \"contentName\",\n \"default\": \"\"\n }\n ]\n },\n \"dzen\": {\n \"type\": \"helper.join\",\n \"by\": \"\",\n \"items\": [\n \"https://zen.yandex.ru/search?query=\",\n {\n \"type\": \"data.input\",\n \"path\": \"contentName\",\n \"default\": \"\"\n }\n ]\n }\n },\n \"view\": {\n \"type\": \"view.list\",\n \"items\": [\n {\n \"type\": \"view.group\",\n \"label\": {\n \"type\": \"helper.join\",\n \"by\": \" \",\n \"items\": [\n \"\u041a\u043e\u043d\u0442\u0435\u043d\u0442: \",\n {\n \"type\": \"data.input\",\n \"path\": \"contentType\",\n \"default\": \"\"\n },\n {\n \"type\": \"helper.join\",\n \"by\": \"\",\n \"items\": [\n \" \\\"\",\n {\n \"type\": \"data.input\",\n \"path\": \"contentName\",\n \"default\": \"\"\n },\n \"\\\"\"\n ]\n }\n ]\n },\n \"content\": {\n \"type\": \"layout.columns\",\n \"ratio\": [\n 2,\n 2\n ],\n \"items\": [\n {\n \"$ref\": \"vars.previewImage\"\n },\n {\n \"type\": \"view.list\",\n \"items\": [\n {\n \"$ref\": \"vars.incomingData\"\n },\n {\n \"$ref\": \"vars.incomingData2\"\n }\n ]\n }\n ]\n }\n },\n {\n \"type\": \"view.group\",\n \"label\": \"\u041f\u043e\u0438\u0441\u043a \u043f\u043e\u0434\u043e\u0437\u0440\u0438\u0442\u0435\u043b\u044c\u043d\u044b\u0445 \u0441\u0441\u044b\u043b\u043e\u043a\",\n \"content\": {\n \"type\": \"view.list\",\n \"items\": [\n {\n \"type\": \"view.list\",\n \"label\": \"\u041f\u043e\u0438\u0449\u0438\u0442\u0435 \u0441\u0441\u044b\u043b\u043a\u0438 \u0441 \u043a\u043e\u043d\u0442\u0435\u043d\u0442\u043e\u043c \u0432 \u0440\u0430\u0437\u043d\u044b\u0445 \u0441\u0438\u0441\u0442\u0435\u043c\u0430\u0445\",\n \"direction\": \"horizontal\",\n \"items\": [\n {\n \"type\": \"view.action-button\",\n \"label\": \"\u0412\u044b\u0434\u0430\u0447\u0430 \u042f\u043d\u0434\u0435\u043a\u0441\u0430\",\n \"action\": {\n \"type\": \"action.open-link\",\n \"payload\": {\n \"$ref\": \"vars.yandex\"\n }\n }\n },\n {\n \"type\": \"view.action-button\",\n \"label\": \"\u0412\u0438\u0434\u0435\u043e \u0412\u041a\",\n \"action\": {\n \"type\": \"action.open-link\",\n \"payload\": {\n \"$ref\": \"vars.vk\"\n }\n }\n },\n {\n \"type\": \"view.action-button\",\n \"label\": \"\u0412\u0438\u0434\u0435\u043e Mail.ru\",\n \"action\": {\n \"type\": \"action.open-link\",\n \"payload\": {\n \"$ref\": \"vars.mail\"\n }\n }\n },\n {\n \"type\": \"view.action-button\",\n \"label\": \"Rutube\",\n \"action\": {\n \"type\": \"action.open-link\",\n \"payload\": {\n \"$ref\": \"vars.rutube\"\n }\n }\n },\n {\n \"type\": \"view.action-button\",\n \"label\": \"YouTube\",\n \"action\": {\n \"type\": \"action.open-link\",\n \"payload\": {\n \"$ref\": \"vars.youtube\"\n }\n }\n },\n {\n \"type\": \"view.action-button\",\n \"label\": \"\u042f\u043d\u0434\u0435\u043a\u0441.\u0414\u0437\u0435\u043d\",\n \"action\": {\n \"type\": \"action.open-link\",\n \"payload\": {\n \"$ref\": \"vars.dzen\"\n }\n }\n },\n {\n \"type\": \"view.action-button\",\n \"label\": \"\u041e\u0434\u043d\u043e\u043a\u043b\u0430\u0441\u0441\u043d\u0438\u043a\u0438 (\u043d\u0430\u0434\u043e \u0432\u0431\u0438\u0442\u044c \u043d\u0430\u0437\u0432\u0430\u043d\u0438\u0435 \u0440\u0443\u043a\u0430\u043c\u0438)\",\n \"action\": {\n \"type\": \"action.open-link\",\n \"payload\": \"https://ok.ru/video\"\n }\n }\n ]\n },\n {\n \"type\": \"helper.if\",\n \"condition\": {\n \"type\": \"condition.equals\",\n \"to\": false,\n \"data\": {\n \"type\": \"data.internal\",\n \"path\": \"noLinks\"\n }\n },\n \"then\": {\n \"type\": \"field.list\",\n \"data\": {\n \"type\": \"data.internal\",\n \"path\": \"result\",\n \"default\": [\n \"\"\n ]\n },\n \"label\": \"\u041f\u0440\u043e\u0432\u0435\u0440\u044c\u0442\u0435 \u0441\u0441\u044b\u043b\u043a\u0438 \u043d\u0430 \u0432\u044b\u0434\u0430\u0447\u0435 \u043d\u0430 \u043f\u0440\u0435\u0434\u043c\u0435\u0442 \u043f\u0438\u0440\u0430\u0442\u0441\u0442\u0432\u0430 \u0438 \u0432\u0441\u0442\u0430\u0432\u044c\u0442\u0435 \u0441\u0441\u044b\u043b\u043a\u0438 \u0441 \u043a\u043e\u043d\u0442\u0435\u043d\u0442\u043e\u043c \u0432 \u043f\u043e\u043b\u044f \u043d\u0438\u0436\u0435\",\n \"buttonLabel\": \"\u0414\u043e\u0431\u0430\u0432\u0438\u0442\u044c \u0435\u0449\u0451\",\n \"render\": {\n \"type\": \"view.group\",\n \"content\": {\n \"type\": \"view.list\",\n \"items\": [\n {\n \"type\": \"field.text\",\n \"placeholder\": \"\u0421\u0441\u044b\u043b\u043a\u0430 \u0441 \u043a\u043e\u043d\u0442\u0435\u043d\u0442\u043e\u043c\",\n \"data\": {\n \"type\": \"data.relative\",\n \"path\": \"url\"\n },\n \"validation\": {\n \"type\": \"condition.all\",\n \"conditions\": [\n {\n \"type\": \"condition.all\",\n \"hint\": \"\u041d\u0443\u0436\u043d\u043e \u0441\u043a\u043e\u043f\u0438\u0440\u043e\u0432\u0430\u0442\u044c \u0441\u044e\u0434\u0430 \u0441\u0441\u044b\u043b\u043a\u0443\",\n \"conditions\": [\n {\n \"type\": \"condition.schema\",\n \"schema\": {\n \"type\": \"string\",\n \"pattern\": \"^https?://(.{1,63}\\\\.)+.{2,}(/.*)*$\"\n }\n },\n {\n \"type\": \"condition.not\",\n \"condition\": {\n \"type\": \"condition.empty\"\n }\n }\n ]\n },\n {\n \"type\": \"condition.less\",\n \"hint\": \"\u041d\u0443\u0436\u043d\u043e \u0443\u0431\u0440\u0430\u0442\u044c \u043f\u043e\u0432\u0442\u043e\u0440\u044f\u044e\u0449\u0438\u0435\u0441\u044f \u0441\u0441\u044b\u043b\u043a\u0438\",\n \"data\": {\n \"type\": \"helper.sum\",\n \"items\": {\n \"type\": \"helper.transform\",\n \"items\": {\n \"type\": \"data.internal\",\n \"path\": \"result\"\n },\n \"into\": {\n \"type\": \"helper.if\",\n \"condition\": {\n \"type\": \"condition.equals\",\n \"data\": {\n \"type\": \"helper.replace\",\n \"data\": {\n \"type\": \"data.local\",\n \"path\": \"item.url\"\n },\n \"find\": \"http://\",\n \"replace\": \"https://\"\n },\n \"to\": {\n \"type\": \"helper.replace\",\n \"data\": {\n \"type\": \"data.relative\",\n \"path\": \"url\"\n },\n \"find\": \"http://\",\n \"replace\": \"https://\"\n }\n },\n \"then\": 1,\n \"else\": 0\n }\n }\n },\n \"then\": 2\n },\n {\n \"hint\": \"\u042d\u0442\u0430 \u0441\u0441\u044b\u043b\u043a\u0430 \u0443\u0436\u0435 \u0431\u044b\u043b\u0430 \u043d\u0430\u0439\u0434\u0435\u043d\u0430\",\n \"type\": \"condition.not\",\n \"condition\": {\n \"type\": \"condition.contains\",\n \"data\": {\n \"type\": \"helper.replace\",\n \"data\": {\n \"type\": \"data.relative\",\n \"path\": \"url\"\n },\n \"find\": \"http://\",\n \"replace\": \"https://\"\n },\n \"in\": {\n \"type\": \"helper.transform\",\n \"items\": {\n \"type\": \"data.input\",\n \"path\": \"foundUrls\",\n \"default\": []\n },\n \"into\": {\n \"type\": \"helper.replace\",\n \"data\": {\n \"type\": \"data.local\",\n \"path\": \"item\"\n },\n \"find\": \"http://\",\n \"replace\": \"https://\"\n }\n }\n }\n },\n {\n \"type\": \"condition.not\",\n \"hint\": \"\u041d\u0430 \u044d\u0442\u043e\u043c \u0434\u043e\u043c\u0435\u043d\u0435 \u043d\u0435\u0442 \u043f\u0438\u0440\u0430\u0442\u0441\u043a\u0438\u0445 \u0437\u0430\u043f\u0438\u0441\u0435\u0439. \u0415\u0441\u043b\u0438 \u044d\u0442\u043e \u043d\u0435 \u0442\u0430\u043a, \u043d\u0430\u043f\u0438\u0448\u0438\u0442\u0435 \u0437\u0430\u043a\u0430\u0437\u0447\u0438\u043a\u0443.\",\n \"condition\": {\n \"type\": \"condition.any\",\n \"conditions\": {\n \"type\": \"helper.transform\",\n \"items\": {\n \"type\": \"data.input\",\n \"path\": \"blackList\",\n \"default\": []\n },\n \"into\": {\n \"type\": \"condition.contains\",\n \"data\": {\n \"type\": \"helper.replace\",\n \"data\": {\n \"type\": \"data.local\",\n \"path\": \"item\"\n },\n \"find\": \"http://\",\n \"replace\": \"https://\"\n },\n \"in\": {\n \"type\": \"helper.join\",\n \"items\": [\n {\n \"type\": \"helper.replace\",\n \"data\": {\n \"type\": \"data.relative\",\n \"path\": \"url\"\n },\n \"find\": \"http://\",\n \"replace\": \"https://\"\n }\n ],\n \"by\": \"\"\n }\n }\n }\n }\n }\n ]\n }\n },\n {\n \"type\": \"field.file\",\n \"data\": {\n \"type\": \"data.relative\",\n \"path\": \"screenshot\"\n },\n \"accept\": [\n \"image/jpeg\",\n \"image/png\"\n ],\n \"validation\": {\n \"type\": \"condition.not\",\n \"condition\": {\n \"type\": \"condition.empty\",\n \"hint\": \"\u043f\u0440\u0438\u043a\u0440\u0435\u043f\u0438\u0442\u0435 \u0441\u043a\u0440\u0438\u043d\u0448\u043e\u0442\"\n }\n }\n }\n ]\n }\n }\n }\n },\n {\n \"type\": \"view.divider\"\n },\n {\n \"type\": \"field.checkbox\",\n \"preserveFalse\": true,\n \"label\": \"\u0421\u0441\u044b\u043b\u043e\u043a \u043d\u0435\u0442\",\n \"data\": {\n \"type\": \"data.internal\",\n \"path\": \"noLinks\"\n }\n }\n ]\n }\n }\n ]\n },\n \"plugins\": [\n {\n \"type\": \"plugin.trigger\",\n \"onChangeOf\": {\n \"type\": \"data.internal\",\n \"path\": \"result\"\n },\n \"action\": {\n \"type\": \"action.bulk\",\n \"payload\": [\n {\n \"$ref\": \"vars.outputLinks\"\n },\n {\n \"$ref\": \"vars.outputScreenshots\"\n }\n ]\n }\n },\n {\n \"type\": \"plugin.trigger\",\n \"onChangeOf\": {\n \"type\": \"data.internal\",\n \"path\": \"noLinks\"\n },\n \"condition\": {\n \"type\": \"condition.equals\",\n \"to\": true\n },\n \"action\": {\n \"type\": \"action.set\",\n \"data\": {\n \"type\": \"data.internal\",\n \"path\": \"result\"\n },\n \"payload\": {\n \"$empty\": true\n }\n }\n },\n {\n \"type\": \"plugin.trigger\",\n \"onChangeOf\": {\n \"type\": \"data.internal\",\n \"path\": \"result\"\n },\n \"condition\": {\n \"type\": \"condition.not\",\n \"condition\": {\n \"type\": \"condition.empty\"\n }\n },\n \"action\": {\n \"type\": \"action.set\",\n \"data\": {\n \"type\": \"data.internal\",\n \"path\": \"noLinks\"\n },\n \"payload\": false\n }\n },\n {\n \"1\": {\n \"type\": \"action.open-link\",\n \"payload\": {\n \"$ref\": \"vars.yandex\"\n }\n },\n \"2\": {\n \"type\": \"action.open-link\",\n \"payload\": {\n \"$ref\": \"vars.vk\"\n }\n },\n \"3\": {\n \"type\": \"action.open-link\",\n \"payload\": {\n \"$ref\": \"vars.mail\"\n }\n },\n \"4\": {\n \"type\": \"action.open-link\",\n \"payload\": {\n \"$ref\": \"vars.rutube\"\n }\n },\n \"5\": {\n \"type\": \"action.open-link\",\n \"payload\": {\n \"$ref\": \"vars.youtube\"\n }\n },\n \"6\": {\n \"type\": \"action.open-link\",\n \"payload\": {\n \"$ref\": \"vars.dzen\"\n }\n },\n \"7\": {\n \"type\": \"action.open-link\",\n \"payload\": \"https://ok.ru/video\"\n },\n \"up\": {\n \"type\": \"action.open-close\",\n \"view\": {\n \"$ref\": \"vars.previewImage\"\n }\n },\n \"down\": {\n \"type\": \"action.open-close\",\n \"view\": {\n \"$ref\": \"vars.previewImage\"\n }\n },\n \"type\": \"plugin.hotkeys\"\n },\n {\n \"type\": \"plugin.toloka\",\n \"layout\": {\n \"kind\": \"scroll\"\n }\n }\n ]\n}",
"lock": {
"action.bulk": "1.0.0",
"action.open-close": "1.0.0",
"action.open-link": "1.0.0",
"action.set": "1.0.0",
"condition.all": "1.0.1",
"condition.any": "1.0.1",
"condition.contains": "1.0.1",
"condition.empty": "1.0.1",
"condition.equals": "1.0.1",
"condition.less": "1.0.1",
"condition.not": "1.0.0",
"condition.schema": "1.0.1",
"core": "1.2.2",
"field.checkbox": "1.0.0",
"field.file": "1.1.0",
"field.list": "1.0.1",
"field.text": "1.0.0",
"helper.if": "1.0.0",
"helper.join": "1.0.0",
"helper.replace": "1.0.0",
"helper.search-query": "1.0.0",
"helper.sum": "1.0.1",
"helper.transform": "1.0.1",
"layout.columns": "1.0.0",
"plugin.hotkeys": "1.0.1",
"plugin.toloka": "1.0.5",
"plugin.trigger": "1.0.0",
"view.action-button": "1.0.0",
"view.divider": "1.0.0",
"view.group": "1.0.0",
"view.image": "1.0.2",
"view.labeled-list": "1.0.1",
"view.list": "1.0.0",
"view.text": "1.0.0"
},
"settings": {
"showFinish": True,
"showFullscreen": True,
"showInstructions": True,
"showMessage": True,
"showReward": True,
"showSkip": True,
"showSubmit": True,
"showTimer": True,
"showTitle": True
},
"type": "tb",
"assets": {
"script_urls": [
"https://yastat.net/s3/tb/launcher.template/_/master.825664685.2020-11-19.6390a7cf.js"
],
"style_urls": [
"https://yastat.net/s3/tb/launcher.template/_/master.825664685.2020-11-19.6390a7cf.css"
]
},
"markup": "",
"styles": "",
"script": "window.configureTemplate({\n \"vars\": {\n \"author\": \"assessorlead\",\n \"task\": \"old\",\n \"outputLinks\": {\n \"type\": \"action.set\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"links\"\n },\n \"payload\": {\n \"type\": \"helper.transform\",\n \"items\": {\n \"type\": \"data.internal\",\n \"path\": \"result\"\n },\n \"into\": {\n \"type\": \"data.local\",\n \"path\": \"item.url\"\n }\n }\n },\n \"outputScreenshots\": {\n \"type\": \"action.set\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"screenshots\"\n },\n \"payload\": {\n \"type\": \"helper.transform\",\n \"items\": {\n \"type\": \"data.internal\",\n \"path\": \"result\"\n },\n \"into\": {\n \"type\": \"data.local\",\n \"path\": \"item.screenshot\"\n }\n }\n },\n \"previewImage\": {\n \"type\": \"view.image\",\n \"url\": {\n \"type\": \"data.input\",\n \"path\": \"coverImg\",\n \"default\": \"\"\n }\n },\n \"incomingData\": {\n \"type\": \"view.labeled-list\",\n \"minWidth\": 600,\n \"items\": [\n {\n \"label\": \"\u0417\u0430\u043f\u0440\u043e\u0441:\",\n \"content\": {\n \"type\": \"view.text\",\n \"content\": {\n \"type\": \"data.input\",\n \"path\": \"request\",\n \"default\": \"\"\n }\n }\n },\n {\n \"label\": \"\u041d\u0430\u0437\u0432\u0430\u043d\u0438\u0435:\",\n \"content\": {\n \"type\": \"view.text\",\n \"content\": {\n \"type\": \"data.input\",\n \"path\": \"contentName\",\n \"default\": \"\"\n }\n }\n },\n {\n \"label\": \"\u041d\u0430\u0437\u0432\u0430\u043d\u0438\u0435 \u043d\u0430 \u044f\u0437\u044b\u043a\u0435 \u043e\u0440\u0438\u0433\u0438\u043d\u0430\u043b\u0430:\",\n \"content\": {\n \"type\": \"view.text\",\n \"content\": {\n \"type\": \"data.input\",\n \"path\": \"contentNameEng\",\n \"default\": \"\"\n }\n }\n },\n {\n \"label\": \"\u0421\u0442\u0440\u0430\u043d\u0430:\",\n \"content\": {\n \"type\": \"view.text\",\n \"content\": {\n \"type\": \"data.input\",\n \"path\": \"country\",\n \"default\": \"\"\n }\n }\n },\n {\n \"label\": \"\u0413\u043e\u0434\u044b \u0432\u044b\u043f\u0443\u0441\u043a\u0430\",\n \"content\": {\n \"type\": \"view.text\",\n \"content\": {\n \"type\": \"data.input\",\n \"path\": \"year\",\n \"default\": \"\"\n }\n }\n },\n {\n \"label\": \"\u041d\u0443\u0436\u043d\u044b\u0435 \u0441\u0435\u0437\u043e\u043d\u044b:\",\n \"content\": {\n \"type\": \"view.text\",\n \"content\": {\n \"type\": \"data.input\",\n \"path\": \"seasons\",\n \"default\": \"\"\n }\n }\n },\n {\n \"label\": \"\u0421\u0435\u0440\u0438\u0439 \u0432 \u0441\u0435\u0437\u043e\u043d\u0435 (\u0434\u043b\u044f \u0441\u0435\u0440\u0438\u0430\u043b\u043e\u0432):\",\n \"content\": {\n \"type\": \"view.text\",\n \"content\": {\n \"type\": \"data.input\",\n \"path\": \"episodes\",\n \"default\": \"\"\n }\n }\n },\n {\n \"label\": \"\u041f\u0440\u0438\u043c\u0435\u0440\u043d\u0430\u044f \u043f\u0440\u043e\u0434\u043e\u043b\u0436\u0438\u0442\u0435\u043b\u044c\u043d\u043e\u0441\u0442\u044c \u0437\u0430\u043f\u0438\u0441\u0438:\",\n \"content\": {\n \"type\": \"view.text\",\n \"content\": {\n \"type\": \"data.input\",\n \"path\": \"episodeDurationMin\",\n \"default\": \"\"\n }\n }\n },\n {\n \"label\": \"\u041a\u043e\u043c\u043c\u0435\u043d\u0442\u0430\u0440\u0438\u0439:\",\n \"content\": {\n \"type\": \"view.text\",\n \"content\": {\n \"type\": \"data.input\",\n \"path\": \"comment\",\n \"default\": \"\"\n }\n }\n }\n ]\n },\n \"incomingData2\": {\n \"type\": \"view.list\",\n \"items\": [\n {\n \"type\": \"view.action-button\",\n \"action\": {\n \"type\": \"action.open-link\",\n \"payload\": {\n \"type\": \"data.input\",\n \"path\": \"kinopoiskLink\",\n \"default\": \"\"\n }\n },\n \"label\": \"\u0421\u0442\u0440\u0430\u043d\u0438\u0446\u0430 \u0444\u0438\u043b\u044c\u043c\u0430 \u043d\u0430 \u041a\u0438\u043d\u043e\u043f\u043e\u0438\u0441\u043a\u0435\"\n },\n {\n \"type\": \"view.action-button\",\n \"action\": {\n \"type\": \"action.open-link\",\n \"payload\": {\n \"type\": \"data.input\",\n \"path\": \"trailer\",\n \"default\": \"\"\n }\n },\n \"label\": \"\u0422\u0440\u0435\u0439\u043b\u0435\u0440\"\n }\n ]\n },\n \"yandex\": {\n \"type\": \"helper.search-query\",\n \"query\": {\n \"type\": \"data.input\",\n \"path\": \"request\",\n \"default\": \"\"\n },\n \"engine\": \"yandex\"\n },\n \"vk\": {\n \"type\": \"helper.join\",\n \"by\": \"\",\n \"items\": [\n \"https://vk.com/video?len=2&q=\",\n {\n \"type\": \"data.input\",\n \"path\": \"contentName\",\n \"default\": \"\"\n }\n ]\n },\n \"mail\": {\n \"type\": \"helper.join\",\n \"by\": \"\",\n \"items\": [\n \"https://my.mail.ru/video/search?q=\",\n {\n \"type\": \"data.input\",\n \"path\": \"contentName\",\n \"default\": \"\"\n },\n \"&duration=medium&duration=long\"\n ]\n },\n \"rutube\": {\n \"type\": \"helper.join\",\n \"by\": \"\",\n \"items\": [\n \"https://rutube.ru/search/?query=\",\n {\n \"type\": \"data.input\",\n \"path\": \"contentName\",\n \"default\": \"\"\n },\n \"&duration=long\"\n ]\n },\n \"youtube\": {\n \"type\": \"helper.join\",\n \"by\": \"\",\n \"items\": [\n \"https://www.youtube.com/results?search_query=\",\n {\n \"type\": \"data.input\",\n \"path\": \"contentName\",\n \"default\": \"\"\n }\n ]\n },\n \"dzen\": {\n \"type\": \"helper.join\",\n \"by\": \"\",\n \"items\": [\n \"https://zen.yandex.ru/search?query=\",\n {\n \"type\": \"data.input\",\n \"path\": \"contentName\",\n \"default\": \"\"\n }\n ]\n }\n },\n \"view\": {\n \"type\": \"view.list\",\n \"items\": [\n {\n \"type\": \"view.group\",\n \"label\": {\n \"type\": \"helper.join\",\n \"by\": \" \",\n \"items\": [\n \"\u041a\u043e\u043d\u0442\u0435\u043d\u0442: \",\n {\n \"type\": \"data.input\",\n \"path\": \"contentType\",\n \"default\": \"\"\n },\n {\n \"type\": \"helper.join\",\n \"by\": \"\",\n \"items\": [\n \" \\\"\",\n {\n \"type\": \"data.input\",\n \"path\": \"contentName\",\n \"default\": \"\"\n },\n \"\\\"\"\n ]\n }\n ]\n },\n \"content\": {\n \"type\": \"layout.columns\",\n \"ratio\": [\n 2,\n 2\n ],\n \"items\": [\n {\n \"$ref\": \"vars.previewImage\"\n },\n {\n \"type\": \"view.list\",\n \"items\": [\n {\n \"$ref\": \"vars.incomingData\"\n },\n {\n \"$ref\": \"vars.incomingData2\"\n }\n ]\n }\n ]\n }\n },\n {\n \"type\": \"view.group\",\n \"label\": \"\u041f\u043e\u0438\u0441\u043a \u043f\u043e\u0434\u043e\u0437\u0440\u0438\u0442\u0435\u043b\u044c\u043d\u044b\u0445 \u0441\u0441\u044b\u043b\u043e\u043a\",\n \"content\": {\n \"type\": \"view.list\",\n \"items\": [\n {\n \"type\": \"view.list\",\n \"label\": \"\u041f\u043e\u0438\u0449\u0438\u0442\u0435 \u0441\u0441\u044b\u043b\u043a\u0438 \u0441 \u043a\u043e\u043d\u0442\u0435\u043d\u0442\u043e\u043c \u0432 \u0440\u0430\u0437\u043d\u044b\u0445 \u0441\u0438\u0441\u0442\u0435\u043c\u0430\u0445\",\n \"direction\": \"horizontal\",\n \"items\": [\n {\n \"type\": \"view.action-button\",\n \"label\": \"\u0412\u044b\u0434\u0430\u0447\u0430 \u042f\u043d\u0434\u0435\u043a\u0441\u0430\",\n \"action\": {\n \"type\": \"action.open-link\",\n \"payload\": {\n \"$ref\": \"vars.yandex\"\n }\n }\n },\n {\n \"type\": \"view.action-button\",\n \"label\": \"\u0412\u0438\u0434\u0435\u043e \u0412\u041a\",\n \"action\": {\n \"type\": \"action.open-link\",\n \"payload\": {\n \"$ref\": \"vars.vk\"\n }\n }\n },\n {\n \"type\": \"view.action-button\",\n \"label\": \"\u0412\u0438\u0434\u0435\u043e Mail.ru\",\n \"action\": {\n \"type\": \"action.open-link\",\n \"payload\": {\n \"$ref\": \"vars.mail\"\n }\n }\n },\n {\n \"type\": \"view.action-button\",\n \"label\": \"Rutube\",\n \"action\": {\n \"type\": \"action.open-link\",\n \"payload\": {\n \"$ref\": \"vars.rutube\"\n }\n }\n },\n {\n \"type\": \"view.action-button\",\n \"label\": \"YouTube\",\n \"action\": {\n \"type\": \"action.open-link\",\n \"payload\": {\n \"$ref\": \"vars.youtube\"\n }\n }\n },\n {\n \"type\": \"view.action-button\",\n \"label\": \"\u042f\u043d\u0434\u0435\u043a\u0441.\u0414\u0437\u0435\u043d\",\n \"action\": {\n \"type\": \"action.open-link\",\n \"payload\": {\n \"$ref\": \"vars.dzen\"\n }\n }\n },\n {\n \"type\": \"view.action-button\",\n \"label\": \"\u041e\u0434\u043d\u043e\u043a\u043b\u0430\u0441\u0441\u043d\u0438\u043a\u0438 (\u043d\u0430\u0434\u043e \u0432\u0431\u0438\u0442\u044c \u043d\u0430\u0437\u0432\u0430\u043d\u0438\u0435 \u0440\u0443\u043a\u0430\u043c\u0438)\",\n \"action\": {\n \"type\": \"action.open-link\",\n \"payload\": \"https://ok.ru/video\"\n }\n }\n ]\n },\n {\n \"type\": \"helper.if\",\n \"condition\": {\n \"type\": \"condition.equals\",\n \"to\": false,\n \"data\": {\n \"type\": \"data.internal\",\n \"path\": \"noLinks\"\n }\n },\n \"then\": {\n \"type\": \"field.list\",\n \"data\": {\n \"type\": \"data.internal\",\n \"path\": \"result\",\n \"default\": [\n \"\"\n ]\n },\n \"label\": \"\u041f\u0440\u043e\u0432\u0435\u0440\u044c\u0442\u0435 \u0441\u0441\u044b\u043b\u043a\u0438 \u043d\u0430 \u0432\u044b\u0434\u0430\u0447\u0435 \u043d\u0430 \u043f\u0440\u0435\u0434\u043c\u0435\u0442 \u043f\u0438\u0440\u0430\u0442\u0441\u0442\u0432\u0430 \u0438 \u0432\u0441\u0442\u0430\u0432\u044c\u0442\u0435 \u0441\u0441\u044b\u043b\u043a\u0438 \u0441 \u043a\u043e\u043d\u0442\u0435\u043d\u0442\u043e\u043c \u0432 \u043f\u043e\u043b\u044f \u043d\u0438\u0436\u0435\",\n \"buttonLabel\": \"\u0414\u043e\u0431\u0430\u0432\u0438\u0442\u044c \u0435\u0449\u0451\",\n \"render\": {\n \"type\": \"view.group\",\n \"content\": {\n \"type\": \"view.list\",\n \"items\": [\n {\n \"type\": \"field.text\",\n \"placeholder\": \"\u0421\u0441\u044b\u043b\u043a\u0430 \u0441 \u043a\u043e\u043d\u0442\u0435\u043d\u0442\u043e\u043c\",\n \"data\": {\n \"type\": \"data.relative\",\n \"path\": \"url\"\n },\n \"validation\": {\n \"type\": \"condition.all\",\n \"conditions\": [\n {\n \"type\": \"condition.all\",\n \"hint\": \"\u041d\u0443\u0436\u043d\u043e \u0441\u043a\u043e\u043f\u0438\u0440\u043e\u0432\u0430\u0442\u044c \u0441\u044e\u0434\u0430 \u0441\u0441\u044b\u043b\u043a\u0443\",\n \"conditions\": [\n {\n \"type\": \"condition.schema\",\n \"schema\": {\n \"type\": \"string\",\n \"pattern\": \"^https?://(.{1,63}\\\\.)+.{2,}(/.*)*$\"\n }\n },\n {\n \"type\": \"condition.not\",\n \"condition\": {\n \"type\": \"condition.empty\"\n }\n }\n ]\n },\n {\n \"type\": \"condition.less\",\n \"hint\": \"\u041d\u0443\u0436\u043d\u043e \u0443\u0431\u0440\u0430\u0442\u044c \u043f\u043e\u0432\u0442\u043e\u0440\u044f\u044e\u0449\u0438\u0435\u0441\u044f \u0441\u0441\u044b\u043b\u043a\u0438\",\n \"data\": {\n \"type\": \"helper.sum\",\n \"items\": {\n \"type\": \"helper.transform\",\n \"items\": {\n \"type\": \"data.internal\",\n \"path\": \"result\"\n },\n \"into\": {\n \"type\": \"helper.if\",\n \"condition\": {\n \"type\": \"condition.equals\",\n \"data\": {\n \"type\": \"helper.replace\",\n \"data\": {\n \"type\": \"data.local\",\n \"path\": \"item.url\"\n },\n \"find\": \"http://\",\n \"replace\": \"https://\"\n },\n \"to\": {\n \"type\": \"helper.replace\",\n \"data\": {\n \"type\": \"data.relative\",\n \"path\": \"url\"\n },\n \"find\": \"http://\",\n \"replace\": \"https://\"\n }\n },\n \"then\": 1,\n \"else\": 0\n }\n }\n },\n \"then\": 2\n },\n {\n \"hint\": \"\u042d\u0442\u0430 \u0441\u0441\u044b\u043b\u043a\u0430 \u0443\u0436\u0435 \u0431\u044b\u043b\u0430 \u043d\u0430\u0439\u0434\u0435\u043d\u0430\",\n \"type\": \"condition.not\",\n \"condition\": {\n \"type\": \"condition.contains\",\n \"data\": {\n \"type\": \"helper.replace\",\n \"data\": {\n \"type\": \"data.relative\",\n \"path\": \"url\"\n },\n \"find\": \"http://\",\n \"replace\": \"https://\"\n },\n \"in\": {\n \"type\": \"helper.transform\",\n \"items\": {\n \"type\": \"data.input\",\n \"path\": \"foundUrls\",\n \"default\": []\n },\n \"into\": {\n \"type\": \"helper.replace\",\n \"data\": {\n \"type\": \"data.local\",\n \"path\": \"item\"\n },\n \"find\": \"http://\",\n \"replace\": \"https://\"\n }\n }\n }\n },\n {\n \"type\": \"condition.not\",\n \"hint\": \"\u041d\u0430 \u044d\u0442\u043e\u043c \u0434\u043e\u043c\u0435\u043d\u0435 \u043d\u0435\u0442 \u043f\u0438\u0440\u0430\u0442\u0441\u043a\u0438\u0445 \u0437\u0430\u043f\u0438\u0441\u0435\u0439. \u0415\u0441\u043b\u0438 \u044d\u0442\u043e \u043d\u0435 \u0442\u0430\u043a, \u043d\u0430\u043f\u0438\u0448\u0438\u0442\u0435 \u0437\u0430\u043a\u0430\u0437\u0447\u0438\u043a\u0443.\",\n \"condition\": {\n \"type\": \"condition.any\",\n \"conditions\": {\n \"type\": \"helper.transform\",\n \"items\": {\n \"type\": \"data.input\",\n \"path\": \"blackList\",\n \"default\": []\n },\n \"into\": {\n \"type\": \"condition.contains\",\n \"data\": {\n \"type\": \"helper.replace\",\n \"data\": {\n \"type\": \"data.local\",\n \"path\": \"item\"\n },\n \"find\": \"http://\",\n \"replace\": \"https://\"\n },\n \"in\": {\n \"type\": \"helper.join\",\n \"items\": [\n {\n \"type\": \"helper.replace\",\n \"data\": {\n \"type\": \"data.relative\",\n \"path\": \"url\"\n },\n \"find\": \"http://\",\n \"replace\": \"https://\"\n }\n ],\n \"by\": \"\"\n }\n }\n }\n }\n }\n ]\n }\n },\n {\n \"type\": \"field.file\",\n \"data\": {\n \"type\": \"data.relative\",\n \"path\": \"screenshot\"\n },\n \"accept\": [\n \"image/jpeg\",\n \"image/png\"\n ],\n \"validation\": {\n \"type\": \"condition.not\",\n \"condition\": {\n \"type\": \"condition.empty\",\n \"hint\": \"\u043f\u0440\u0438\u043a\u0440\u0435\u043f\u0438\u0442\u0435 \u0441\u043a\u0440\u0438\u043d\u0448\u043e\u0442\"\n }\n }\n }\n ]\n }\n }\n }\n },\n {\n \"type\": \"view.divider\"\n },\n {\n \"type\": \"field.checkbox\",\n \"preserveFalse\": true,\n \"label\": \"\u0421\u0441\u044b\u043b\u043e\u043a \u043d\u0435\u0442\",\n \"data\": {\n \"type\": \"data.internal\",\n \"path\": \"noLinks\"\n }\n }\n ]\n }\n }\n ]\n },\n \"plugins\": [\n {\n \"type\": \"plugin.trigger\",\n \"onChangeOf\": {\n \"type\": \"data.internal\",\n \"path\": \"result\"\n },\n \"action\": {\n \"type\": \"action.bulk\",\n \"payload\": [\n {\n \"$ref\": \"vars.outputLinks\"\n },\n {\n \"$ref\": \"vars.outputScreenshots\"\n }\n ]\n }\n },\n {\n \"type\": \"plugin.trigger\",\n \"onChangeOf\": {\n \"type\": \"data.internal\",\n \"path\": \"noLinks\"\n },\n \"condition\": {\n \"type\": \"condition.equals\",\n \"to\": true\n },\n \"action\": {\n \"type\": \"action.set\",\n \"data\": {\n \"type\": \"data.internal\",\n \"path\": \"result\"\n },\n \"payload\": {\n \"$empty\": true\n }\n }\n },\n {\n \"type\": \"plugin.trigger\",\n \"onChangeOf\": {\n \"type\": \"data.internal\",\n \"path\": \"result\"\n },\n \"condition\": {\n \"type\": \"condition.not\",\n \"condition\": {\n \"type\": \"condition.empty\"\n }\n },\n \"action\": {\n \"type\": \"action.set\",\n \"data\": {\n \"type\": \"data.internal\",\n \"path\": \"noLinks\"\n },\n \"payload\": false\n }\n },\n {\n \"1\": {\n \"type\": \"action.open-link\",\n \"payload\": {\n \"$ref\": \"vars.yandex\"\n }\n },\n \"2\": {\n \"type\": \"action.open-link\",\n \"payload\": {\n \"$ref\": \"vars.vk\"\n }\n },\n \"3\": {\n \"type\": \"action.open-link\",\n \"payload\": {\n \"$ref\": \"vars.mail\"\n }\n },\n \"4\": {\n \"type\": \"action.open-link\",\n \"payload\": {\n \"$ref\": \"vars.rutube\"\n }\n },\n \"5\": {\n \"type\": \"action.open-link\",\n \"payload\": {\n \"$ref\": \"vars.youtube\"\n }\n },\n \"6\": {\n \"type\": \"action.open-link\",\n \"payload\": {\n \"$ref\": \"vars.dzen\"\n }\n },\n \"7\": {\n \"type\": \"action.open-link\",\n \"payload\": \"https://ok.ru/video\"\n },\n \"up\": {\n \"type\": \"action.open-close\",\n \"view\": {\n \"$ref\": \"vars.previewImage\"\n }\n },\n \"down\": {\n \"type\": \"action.open-close\",\n \"view\": {\n \"$ref\": \"vars.previewImage\"\n }\n },\n \"type\": \"plugin.hotkeys\"\n },\n {\n \"type\": \"plugin.toloka\",\n \"layout\": {\n \"kind\": \"scroll\"\n }\n }\n ]\n}, {\"action.bulk\":\"1.0.0\",\"action.open-close\":\"1.0.0\",\"action.open-link\":\"1.0.0\",\"action.set\":\"1.0.0\",\"condition.all\":\"1.0.1\",\"condition.any\":\"1.0.1\",\"condition.contains\":\"1.0.1\",\"condition.empty\":\"1.0.1\",\"condition.equals\":\"1.0.1\",\"condition.less\":\"1.0.1\",\"condition.not\":\"1.0.0\",\"condition.schema\":\"1.0.1\",\"core\":\"1.2.2\",\"field.checkbox\":\"1.0.0\",\"field.file\":\"1.1.0\",\"field.list\":\"1.0.1\",\"field.text\":\"1.0.0\",\"helper.if\":\"1.0.0\",\"helper.join\":\"1.0.0\",\"helper.replace\":\"1.0.0\",\"helper.search-query\":\"1.0.0\",\"helper.sum\":\"1.0.1\",\"helper.transform\":\"1.0.1\",\"layout.columns\":\"1.0.0\",\"plugin.hotkeys\":\"1.0.1\",\"plugin.toloka\":\"1.0.5\",\"plugin.trigger\":\"1.0.0\",\"view.action-button\":\"1.0.0\",\"view.divider\":\"1.0.0\",\"view.group\":\"1.0.0\",\"view.image\":\"1.0.2\",\"view.labeled-list\":\"1.0.1\",\"view.list\":\"1.0.0\",\"view.text\":\"1.0.0\"}, 'https://tb.yandex.net/registry2')"
},
{
"config": "{\n \"vars\": {\n \"author\": \"assessorlead\",\n \"task\": \"old\"\n },\n \"view\": {\n \"type\": \"view.list\",\n \"items\": [\n {\n \"type\": \"view.group\",\n \"label\": \"\u041a\u043e\u043d\u0442\u0435\u043a\u0441\u0442\",\n \"content\": {\n \"type\": \"view.labeled-list\",\n \"items\": [\n {\n \"label\": \"\u041f\u043e\u043b\u044c\u0437\u043e\u0432\u0430\u0442\u0435\u043b\u044c\",\n \"content\": {\n \"type\": \"view.text\",\n \"content\": {\n \"type\": \"data.input\",\n \"path\": \"row.query\"\n }\n }\n },\n {\n \"label\": \"\u0410\u043b\u0438\u0441\u0430\",\n \"content\": {\n \"type\": \"view.text\",\n \"content\": {\n \"type\": \"data.input\",\n \"path\": \"row.reply\"\n }\n }\n }\n ]\n }\n },\n {\n \"type\": \"view.group\",\n \"label\": \"\u0417\u0430\u043f\u0440\u043e\u0441\",\n \"content\": {\n \"type\": \"view.labeled-list\",\n \"items\": [\n {\n \"label\": \"\u041f\u043e\u043b\u044c\u0437\u043e\u0432\u0430\u0442\u0435\u043b\u044c\",\n \"content\": {\n \"type\": \"view.text\",\n \"content\": {\n \"type\": \"data.input\",\n \"path\": \"row.next_query\"\n }\n }\n }\n ]\n }\n },\n {\n \"type\": \"view.group\",\n \"label\": \"\u041e\u0446\u0435\u043d\u043a\u0430\",\n \"content\": {\n \"type\": \"view.list\",\n \"size\": \"s\",\n \"items\": [\n {\n \"type\": \"field.checkbox-group\",\n \"label\": \"\u041a \u043a\u0430\u043a\u043e\u0439 \u0433\u0440\u0443\u043f\u043f\u0435 \u043e\u0442\u043d\u043e\u0441\u0438\u0442\u0441\u044f \u043f\u043e\u0441\u043b\u0435\u0434\u043d\u044f\u044f \u0444\u0440\u0430\u0437\u0430 \u043f\u043e\u043b\u044c\u0437\u043e\u0432\u0430\u0442\u0435\u043b\u044f?\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"result\"\n },\n \"options\": [\n {\n \"label\": \"\u041d\u0438\u0447\u0435\u0433\u043e \u043d\u0435 \u043f\u043e\u0434\u0445\u043e\u0434\u0438\u0442\",\n \"value\": \"nomatch\"\n }\n ]\n },\n {\n \"type\": \"field.checkbox-group\",\n \"disabled\": {\n \"type\": \"condition.equals\",\n \"to\": true,\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"result.nomatch\"\n }\n },\n \"options\": {\n \"type\": \"helper.transform\",\n \"items\": {\n \"type\": \"data.input\",\n \"path\": \"row.actions\"\n },\n \"into\": {\n \"type\": \"data.local\",\n \"path\": \"item\"\n }\n },\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"result\"\n },\n \"validation\": {\n \"type\": \"helper.if\",\n \"condition\": {\n \"type\": \"condition.any\",\n \"conditions\": [\n {\n \"type\": \"condition.equals\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"result.nomatch\"\n },\n \"to\": false\n },\n {\n \"type\": \"condition.empty\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"result.nomatch\"\n }\n }\n ]\n },\n \"then\": {\n \"type\": \"condition.required\"\n }\n }\n }\n ]\n }\n }\n ]\n },\n \"plugins\": [\n {\n \"type\": \"plugin.trigger\",\n \"onChangeOf\": {\n \"type\": \"data.output\",\n \"path\": \"result\"\n },\n \"condition\": {\n \"type\": \"condition.equals\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"result.nomatch\"\n },\n \"to\": true\n },\n \"action\": {\n \"type\": \"helper.if\",\n \"condition\": {\n \"type\": \"condition.all\",\n \"conditions\": [\n {\n \"type\": \"condition.equals\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"result.nomatch\"\n },\n \"to\": true\n },\n {\n \"type\": \"condition.not\",\n \"condition\": {\n \"type\": \"condition.equals\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"result\"\n },\n \"to\": {\n \"nomatch\": true\n }\n }\n }\n ]\n },\n \"then\": {\n \"type\": \"action.set\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"result\"\n },\n \"payload\": {\n \"nomatch\": true\n }\n }\n }\n },\n {\n \"1\": {\n \"type\": \"action.toggle\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"result.nomatch\"\n }\n },\n \"2\": {\n \"type\": \"helper.if\",\n \"condition\": {\n \"type\": \"condition.required\",\n \"data\": {\n \"type\": \"data.input\",\n \"path\": \"row.actions.0.value\"\n }\n },\n \"then\": {\n \"type\": \"action.toggle\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": {\n \"type\": \"helper.join\",\n \"items\": [\n \"result\",\n {\n \"type\": \"data.input\",\n \"path\": \"row.actions.0.value\"\n }\n ],\n \"by\": \".\"\n }\n }\n }\n },\n \"3\": {\n \"type\": \"helper.if\",\n \"condition\": {\n \"type\": \"condition.required\",\n \"data\": {\n \"type\": \"data.input\",\n \"path\": \"row.actions.1.value\"\n }\n },\n \"then\": {\n \"type\": \"action.toggle\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": {\n \"type\": \"helper.join\",\n \"items\": [\n \"result\",\n {\n \"type\": \"data.input\",\n \"path\": \"row.actions.1.value\"\n }\n ],\n \"by\": \".\"\n }\n }\n }\n },\n \"4\": {\n \"type\": \"helper.if\",\n \"condition\": {\n \"type\": \"condition.required\",\n \"data\": {\n \"type\": \"data.input\",\n \"path\": \"row.actions.2.value\"\n }\n },\n \"then\": {\n \"type\": \"action.toggle\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": {\n \"type\": \"helper.join\",\n \"items\": [\n \"result\",\n {\n \"type\": \"data.input\",\n \"path\": \"row.actions.2.value\"\n }\n ],\n \"by\": \".\"\n }\n }\n }\n },\n \"5\": {\n \"type\": \"helper.if\",\n \"condition\": {\n \"type\": \"condition.required\",\n \"data\": {\n \"type\": \"data.input\",\n \"path\": \"row.actions.3.value\"\n }\n },\n \"then\": {\n \"type\": \"action.toggle\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": {\n \"type\": \"helper.join\",\n \"items\": [\n \"result\",\n {\n \"type\": \"data.input\",\n \"path\": \"row.actions.3.value\"\n }\n ],\n \"by\": \".\"\n }\n }\n }\n },\n \"6\": {\n \"type\": \"helper.if\",\n \"condition\": {\n \"type\": \"condition.required\",\n \"data\": {\n \"type\": \"data.input\",\n \"path\": \"row.actions.4.value\"\n }\n },\n \"then\": {\n \"type\": \"action.toggle\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": {\n \"type\": \"helper.join\",\n \"items\": [\n \"result\",\n {\n \"type\": \"data.input\",\n \"path\": \"row.actions.4.value\"\n }\n ],\n \"by\": \".\"\n }\n }\n }\n },\n \"q\": {\n \"type\": \"helper.if\",\n \"condition\": {\n \"type\": \"condition.required\",\n \"data\": {\n \"type\": \"data.input\",\n \"path\": \"row.actions.5.value\"\n }\n },\n \"then\": {\n \"type\": \"action.toggle\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": {\n \"type\": \"helper.join\",\n \"items\": [\n \"result\",\n {\n \"type\": \"data.input\",\n \"path\": \"row.actions.5.value\"\n }\n ],\n \"by\": \".\"\n }\n }\n }\n },\n \"w\": {\n \"type\": \"helper.if\",\n \"condition\": {\n \"type\": \"condition.required\",\n \"data\": {\n \"type\": \"data.input\",\n \"path\": \"row.actions.6.value\"\n }\n },\n \"then\": {\n \"type\": \"action.toggle\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": {\n \"type\": \"helper.join\",\n \"items\": [\n \"result\",\n {\n \"type\": \"data.input\",\n \"path\": \"row.actions.6.value\"\n }\n ],\n \"by\": \".\"\n }\n }\n }\n },\n \"e\": {\n \"type\": \"helper.if\",\n \"condition\": {\n \"type\": \"condition.required\",\n \"data\": {\n \"type\": \"data.input\",\n \"path\": \"row.actions.7.value\"\n }\n },\n \"then\": {\n \"type\": \"action.toggle\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": {\n \"type\": \"helper.join\",\n \"items\": [\n \"result\",\n {\n \"type\": \"data.input\",\n \"path\": \"row.actions.7.value\"\n }\n ],\n \"by\": \".\"\n }\n }\n }\n },\n \"r\": {\n \"type\": \"helper.if\",\n \"condition\": {\n \"type\": \"condition.required\",\n \"data\": {\n \"type\": \"data.input\",\n \"path\": \"row.actions.8.value\"\n }\n },\n \"then\": {\n \"type\": \"action.toggle\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": {\n \"type\": \"helper.join\",\n \"items\": [\n \"result\",\n {\n \"type\": \"data.input\",\n \"path\": \"row.actions.8.value\"\n }\n ],\n \"by\": \".\"\n }\n }\n }\n },\n \"t\": {\n \"type\": \"helper.if\",\n \"condition\": {\n \"type\": \"condition.required\",\n \"data\": {\n \"type\": \"data.input\",\n \"path\": \"row.actions.9.value\"\n }\n },\n \"then\": {\n \"type\": \"action.toggle\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": {\n \"type\": \"helper.join\",\n \"items\": [\n \"result\",\n {\n \"type\": \"data.input\",\n \"path\": \"row.actions.9.value\"\n }\n ],\n \"by\": \".\"\n }\n }\n }\n },\n \"y\": {\n \"type\": \"helper.if\",\n \"condition\": {\n \"type\": \"condition.required\",\n \"data\": {\n \"type\": \"data.input\",\n \"path\": \"row.actions.10.value\"\n }\n },\n \"then\": {\n \"type\": \"action.toggle\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": {\n \"type\": \"helper.join\",\n \"items\": [\n \"result\",\n {\n \"type\": \"data.input\",\n \"path\": \"row.actions.10.value\"\n }\n ],\n \"by\": \".\"\n }\n }\n }\n },\n \"type\": \"plugin.hotkeys\"\n }\n ]\n}",
"lock": {
"action.set": "1.0.0",
"action.toggle": "1.0.0",
"condition.all": "1.0.1",
"condition.any": "1.0.1",
"condition.empty": "1.0.1",
"condition.equals": "1.0.1",
"condition.not": "1.0.0",
"condition.required": "1.0.1",
"core": "1.2.1",
"field.checkbox-group": "1.1.0",
"helper.if": "1.0.0",
"helper.join": "1.0.0",
"helper.transform": "1.0.1",
"plugin.hotkeys": "1.0.1",
"plugin.trigger": "1.0.0",
"view.group": "1.0.0",
"view.labeled-list": "1.0.1",
"view.list": "1.0.0",
"view.text": "1.0.0"
},
"settings": {
"showFinish": True,
"showFullscreen": True,
"showInstructions": True,
"showMessage": True,
"showReward": True,
"showSkip": True,
"showSubmit": True,
"showTimer": True,
"showTitle": True
},
"type": "tb",
"assets": {
"script_urls": [
"https://yastat.net/s3/tb/launcher.template/_/master.825664685.2020-11-19.6390a7cf.js"
],
"style_urls": [
"https://yastat.net/s3/tb/launcher.template/_/master.825664685.2020-11-19.6390a7cf.css"
]
},
"markup": "",
"styles": "",
"script": "window.configureTemplate({\n \"vars\": {\n \"author\": \"assessorlead\",\n \"task\": \"old\"\n },\n \"view\": {\n \"type\": \"view.list\",\n \"items\": [\n {\n \"type\": \"view.group\",\n \"label\": \"\u041a\u043e\u043d\u0442\u0435\u043a\u0441\u0442\",\n \"content\": {\n \"type\": \"view.labeled-list\",\n \"items\": [\n {\n \"label\": \"\u041f\u043e\u043b\u044c\u0437\u043e\u0432\u0430\u0442\u0435\u043b\u044c\",\n \"content\": {\n \"type\": \"view.text\",\n \"content\": {\n \"type\": \"data.input\",\n \"path\": \"row.query\"\n }\n }\n },\n {\n \"label\": \"\u0410\u043b\u0438\u0441\u0430\",\n \"content\": {\n \"type\": \"view.text\",\n \"content\": {\n \"type\": \"data.input\",\n \"path\": \"row.reply\"\n }\n }\n }\n ]\n }\n },\n {\n \"type\": \"view.group\",\n \"label\": \"\u0417\u0430\u043f\u0440\u043e\u0441\",\n \"content\": {\n \"type\": \"view.labeled-list\",\n \"items\": [\n {\n \"label\": \"\u041f\u043e\u043b\u044c\u0437\u043e\u0432\u0430\u0442\u0435\u043b\u044c\",\n \"content\": {\n \"type\": \"view.text\",\n \"content\": {\n \"type\": \"data.input\",\n \"path\": \"row.next_query\"\n }\n }\n }\n ]\n }\n },\n {\n \"type\": \"view.group\",\n \"label\": \"\u041e\u0446\u0435\u043d\u043a\u0430\",\n \"content\": {\n \"type\": \"view.list\",\n \"size\": \"s\",\n \"items\": [\n {\n \"type\": \"field.checkbox-group\",\n \"label\": \"\u041a \u043a\u0430\u043a\u043e\u0439 \u0433\u0440\u0443\u043f\u043f\u0435 \u043e\u0442\u043d\u043e\u0441\u0438\u0442\u0441\u044f \u043f\u043e\u0441\u043b\u0435\u0434\u043d\u044f\u044f \u0444\u0440\u0430\u0437\u0430 \u043f\u043e\u043b\u044c\u0437\u043e\u0432\u0430\u0442\u0435\u043b\u044f?\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"result\"\n },\n \"options\": [\n {\n \"label\": \"\u041d\u0438\u0447\u0435\u0433\u043e \u043d\u0435 \u043f\u043e\u0434\u0445\u043e\u0434\u0438\u0442\",\n \"value\": \"nomatch\"\n }\n ]\n },\n {\n \"type\": \"field.checkbox-group\",\n \"disabled\": {\n \"type\": \"condition.equals\",\n \"to\": true,\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"result.nomatch\"\n }\n },\n \"options\": {\n \"type\": \"helper.transform\",\n \"items\": {\n \"type\": \"data.input\",\n \"path\": \"row.actions\"\n },\n \"into\": {\n \"type\": \"data.local\",\n \"path\": \"item\"\n }\n },\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"result\"\n },\n \"validation\": {\n \"type\": \"helper.if\",\n \"condition\": {\n \"type\": \"condition.any\",\n \"conditions\": [\n {\n \"type\": \"condition.equals\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"result.nomatch\"\n },\n \"to\": false\n },\n {\n \"type\": \"condition.empty\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"result.nomatch\"\n }\n }\n ]\n },\n \"then\": {\n \"type\": \"condition.required\"\n }\n }\n }\n ]\n }\n }\n ]\n },\n \"plugins\": [\n {\n \"type\": \"plugin.trigger\",\n \"onChangeOf\": {\n \"type\": \"data.output\",\n \"path\": \"result\"\n },\n \"condition\": {\n \"type\": \"condition.equals\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"result.nomatch\"\n },\n \"to\": true\n },\n \"action\": {\n \"type\": \"helper.if\",\n \"condition\": {\n \"type\": \"condition.all\",\n \"conditions\": [\n {\n \"type\": \"condition.equals\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"result.nomatch\"\n },\n \"to\": true\n },\n {\n \"type\": \"condition.not\",\n \"condition\": {\n \"type\": \"condition.equals\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"result\"\n },\n \"to\": {\n \"nomatch\": true\n }\n }\n }\n ]\n },\n \"then\": {\n \"type\": \"action.set\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"result\"\n },\n \"payload\": {\n \"nomatch\": true\n }\n }\n }\n },\n {\n \"1\": {\n \"type\": \"action.toggle\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"result.nomatch\"\n }\n },\n \"2\": {\n \"type\": \"helper.if\",\n \"condition\": {\n \"type\": \"condition.required\",\n \"data\": {\n \"type\": \"data.input\",\n \"path\": \"row.actions.0.value\"\n }\n },\n \"then\": {\n \"type\": \"action.toggle\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": {\n \"type\": \"helper.join\",\n \"items\": [\n \"result\",\n {\n \"type\": \"data.input\",\n \"path\": \"row.actions.0.value\"\n }\n ],\n \"by\": \".\"\n }\n }\n }\n },\n \"3\": {\n \"type\": \"helper.if\",\n \"condition\": {\n \"type\": \"condition.required\",\n \"data\": {\n \"type\": \"data.input\",\n \"path\": \"row.actions.1.value\"\n }\n },\n \"then\": {\n \"type\": \"action.toggle\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": {\n \"type\": \"helper.join\",\n \"items\": [\n \"result\",\n {\n \"type\": \"data.input\",\n \"path\": \"row.actions.1.value\"\n }\n ],\n \"by\": \".\"\n }\n }\n }\n },\n \"4\": {\n \"type\": \"helper.if\",\n \"condition\": {\n \"type\": \"condition.required\",\n \"data\": {\n \"type\": \"data.input\",\n \"path\": \"row.actions.2.value\"\n }\n },\n \"then\": {\n \"type\": \"action.toggle\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": {\n \"type\": \"helper.join\",\n \"items\": [\n \"result\",\n {\n \"type\": \"data.input\",\n \"path\": \"row.actions.2.value\"\n }\n ],\n \"by\": \".\"\n }\n }\n }\n },\n \"5\": {\n \"type\": \"helper.if\",\n \"condition\": {\n \"type\": \"condition.required\",\n \"data\": {\n \"type\": \"data.input\",\n \"path\": \"row.actions.3.value\"\n }\n },\n \"then\": {\n \"type\": \"action.toggle\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": {\n \"type\": \"helper.join\",\n \"items\": [\n \"result\",\n {\n \"type\": \"data.input\",\n \"path\": \"row.actions.3.value\"\n }\n ],\n \"by\": \".\"\n }\n }\n }\n },\n \"6\": {\n \"type\": \"helper.if\",\n \"condition\": {\n \"type\": \"condition.required\",\n \"data\": {\n \"type\": \"data.input\",\n \"path\": \"row.actions.4.value\"\n }\n },\n \"then\": {\n \"type\": \"action.toggle\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": {\n \"type\": \"helper.join\",\n \"items\": [\n \"result\",\n {\n \"type\": \"data.input\",\n \"path\": \"row.actions.4.value\"\n }\n ],\n \"by\": \".\"\n }\n }\n }\n },\n \"q\": {\n \"type\": \"helper.if\",\n \"condition\": {\n \"type\": \"condition.required\",\n \"data\": {\n \"type\": \"data.input\",\n \"path\": \"row.actions.5.value\"\n }\n },\n \"then\": {\n \"type\": \"action.toggle\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": {\n \"type\": \"helper.join\",\n \"items\": [\n \"result\",\n {\n \"type\": \"data.input\",\n \"path\": \"row.actions.5.value\"\n }\n ],\n \"by\": \".\"\n }\n }\n }\n },\n \"w\": {\n \"type\": \"helper.if\",\n \"condition\": {\n \"type\": \"condition.required\",\n \"data\": {\n \"type\": \"data.input\",\n \"path\": \"row.actions.6.value\"\n }\n },\n \"then\": {\n \"type\": \"action.toggle\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": {\n \"type\": \"helper.join\",\n \"items\": [\n \"result\",\n {\n \"type\": \"data.input\",\n \"path\": \"row.actions.6.value\"\n }\n ],\n \"by\": \".\"\n }\n }\n }\n },\n \"e\": {\n \"type\": \"helper.if\",\n \"condition\": {\n \"type\": \"condition.required\",\n \"data\": {\n \"type\": \"data.input\",\n \"path\": \"row.actions.7.value\"\n }\n },\n \"then\": {\n \"type\": \"action.toggle\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": {\n \"type\": \"helper.join\",\n \"items\": [\n \"result\",\n {\n \"type\": \"data.input\",\n \"path\": \"row.actions.7.value\"\n }\n ],\n \"by\": \".\"\n }\n }\n }\n },\n \"r\": {\n \"type\": \"helper.if\",\n \"condition\": {\n \"type\": \"condition.required\",\n \"data\": {\n \"type\": \"data.input\",\n \"path\": \"row.actions.8.value\"\n }\n },\n \"then\": {\n \"type\": \"action.toggle\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": {\n \"type\": \"helper.join\",\n \"items\": [\n \"result\",\n {\n \"type\": \"data.input\",\n \"path\": \"row.actions.8.value\"\n }\n ],\n \"by\": \".\"\n }\n }\n }\n },\n \"t\": {\n \"type\": \"helper.if\",\n \"condition\": {\n \"type\": \"condition.required\",\n \"data\": {\n \"type\": \"data.input\",\n \"path\": \"row.actions.9.value\"\n }\n },\n \"then\": {\n \"type\": \"action.toggle\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": {\n \"type\": \"helper.join\",\n \"items\": [\n \"result\",\n {\n \"type\": \"data.input\",\n \"path\": \"row.actions.9.value\"\n }\n ],\n \"by\": \".\"\n }\n }\n }\n },\n \"y\": {\n \"type\": \"helper.if\",\n \"condition\": {\n \"type\": \"condition.required\",\n \"data\": {\n \"type\": \"data.input\",\n \"path\": \"row.actions.10.value\"\n }\n },\n \"then\": {\n \"type\": \"action.toggle\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": {\n \"type\": \"helper.join\",\n \"items\": [\n \"result\",\n {\n \"type\": \"data.input\",\n \"path\": \"row.actions.10.value\"\n }\n ],\n \"by\": \".\"\n }\n }\n }\n },\n \"type\": \"plugin.hotkeys\"\n }\n ]\n}, {\"action.set\":\"1.0.0\",\"action.toggle\":\"1.0.0\",\"condition.all\":\"1.0.1\",\"condition.any\":\"1.0.1\",\"condition.empty\":\"1.0.1\",\"condition.equals\":\"1.0.1\",\"condition.not\":\"1.0.0\",\"condition.required\":\"1.0.1\",\"core\":\"1.2.1\",\"field.checkbox-group\":\"1.1.0\",\"helper.if\":\"1.0.0\",\"helper.join\":\"1.0.0\",\"helper.transform\":\"1.0.1\",\"plugin.hotkeys\":\"1.0.1\",\"plugin.trigger\":\"1.0.0\",\"view.group\":\"1.0.0\",\"view.labeled-list\":\"1.0.1\",\"view.list\":\"1.0.0\",\"view.text\":\"1.0.0\"}, 'https://tb.yandex.net/registry2')"
},
{
"config": "{\n \"vars\": {\n \"product\": \"PRODUCT\",\n \"service\": \"SERVICE\",\n \"other\": \"OTHER\",\n \"product_not_available\": \"PRODUCT_NOT_AVAILABLE\",\n \"product_available\": \"PRODUCT_AVAILABLE\",\n \"service_not_available\": \"SERVICE_NOT_AVAILABLE\",\n \"service_available\": \"SERVICE_AVAILABLE\",\n \"other_not_available\": \"OTHER_NOT_AVAILABLE\",\n \"other_available\": \"OTHER_AVAILABLE\",\n \"tickets\": \"TICKETS\",\n \"non_purchase\": \"NON_PURCHASE\"\n },\n \"view\": {\n \"type\": \"layout.bars\",\n \"content\": {\n \"type\": \"layout.sidebar\",\n \"content\": {\n \"type\": \"layout.bars\",\n \"content\": {\n \"type\": \"view.image\",\n \"scrollable\": true,\n \"url\": {\n \"type\": \"data.input\",\n \"path\": \"takenScreen\"\n }\n },\n \"barAfter\": {\n \"type\": \"view.link\",\n \"url\": {\n \"type\": \"data.input\",\n \"path\": \"url\"\n }\n }\n },\n \"minWidth\": 1000,\n \"controlsWidth\": 350,\n \"controls\": {\n \"type\": \"view.list\",\n \"items\": [\n {\n \"type\": \"field.radio-group\",\n \"label\": \"\u0412\u044b\u0431\u0435\u0440\u0438\u0442\u0435 \u0442\u0438\u043f \u0437\u0430\u043f\u0440\u043e\u0441\u0430:\",\n \"options\": [\n {\n \"label\": \"\u0412 \u0437\u0430\u043f\u0440\u043e\u0441\u0435 \u0442\u043e\u0432\u0430\u0440\",\n \"value\": {\n \"$ref\": \"vars.product\"\n }\n },\n {\n \"label\": \"\u0412 \u0437\u0430\u043f\u0440\u043e\u0441\u0435 \u0443\u0441\u043b\u0443\u0433\u0430\",\n \"value\": {\n \"$ref\": \"vars.service\"\n }\n },\n {\n \"label\": \"\u0412 \u0437\u0430\u043f\u0440\u043e\u0441\u0435 \u043e\u0442\u0435\u043b\u044c/\u0431\u0438\u043b\u0435\u0442\u044b/\u0442\u0443\u0440/...\",\n \"value\": {\n \"$ref\": \"vars.tickets\"\n }\n },\n {\n \"label\": \"\u0414\u0440\u0443\u0433\u043e\u0439 \u0437\u0430\u043f\u0440\u043e\u0441\",\n \"value\": {\n \"$ref\": \"vars.non_purchase\"\n }\n }\n ],\n \"data\": {\n \"type\": \"data.internal\",\n \"path\": \"query_result\"\n },\n \"validation\": {\n \"type\": \"condition.required\",\n \"data\": \"data.output\"\n }\n },\n {\n \"type\": \"helper.if\",\n \"condition\": {\n \"type\": \"condition.equals\",\n \"data\": {\n \"type\": \"data.internal\",\n \"path\": \"query_result\"\n },\n \"to\": {\n \"$ref\": \"vars.product\"\n }\n },\n \"then\": {\n \"type\": \"field.radio-group\",\n \"label\": \"\u0412\u044b\u0431\u0435\u0440\u0438\u0442\u0435 \u0442\u0438\u043f \u0441\u0442\u0440\u0430\u043d\u0438\u0446\u044b:\",\n \"options\": [\n {\n \"label\": \"\u041d\u0435\u043b\u044c\u0437\u044f \u043a\u0443\u043f\u0438\u0442\u044c/\u043d\u0435\u0442 \u0432 \u043d\u0430\u043b\u0438\u0447\u0438\u0438/\u043f\u043e\u043a\u0443\u043f\u043a\u0430 - \u043f\u0435\u0440\u0435\u0445\u043e\u0434 \u043d\u0430 \u0441\u0442\u043e\u0440\u043e\u043d\u043d\u0438\u0439 \u0441\u0430\u0439\u0442/...\",\n \"value\": {\n \"$ref\": \"vars.product_not_available\"\n }\n },\n {\n \"label\": \"\u041c\u043e\u0436\u043d\u043e \u043a\u0443\u043f\u0438\u0442\u044c/\u0437\u0430\u043a\u0430\u0437\u0430\u0442\u044c/\u043f\u043e\u043b\u043e\u0436\u0438\u0442\u044c \u0432 \u043a\u043e\u0440\u0437\u0438\u043d\u0443/... \u043d\u0430 \u044d\u0442\u043e\u0439 \u0441\u0442\u0440\u0430\u043d\u0438\u0446\u0435\",\n \"value\": {\n \"$ref\": \"vars.product_available\"\n }\n }\n ],\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"url_result\"\n },\n \"validation\": {\n \"type\": \"condition.required\"\n }\n }\n },\n {\n \"type\": \"helper.if\",\n \"condition\": {\n \"type\": \"condition.equals\",\n \"data\": {\n \"type\": \"data.internal\",\n \"path\": \"query_result\"\n },\n \"to\": {\n \"$ref\": \"vars.service\"\n }\n },\n \"then\": {\n \"type\": \"field.radio-group\",\n \"label\": \"\u0412\u044b\u0431\u0435\u0440\u0438\u0442\u0435 \u0442\u0438\u043f \u0441\u0442\u0440\u0430\u043d\u0438\u0446\u044b:\",\n \"options\": [\n {\n \"label\": \"\u041d\u0435 \u0438\u043c\u0435\u0435\u0442 \u043e\u0442\u043d\u043e\u0448\u0435\u043d\u0438\u044f \u043a \u0437\u0430\u043a\u0430\u0437\u0443 \u0443\u0441\u043b\u0443\u0433\u0438\",\n \"value\": {\n \"$ref\": \"vars.service_not_available\"\n }\n },\n {\n \"label\": \"\u041c\u043e\u0436\u043d\u043e \u043f\u0435\u0440\u0435\u0439\u0442\u0438 \u043a \u0437\u0430\u043a\u0430\u0437\u0443 \u0443\u0441\u043b\u0443\u0433\u0438\",\n \"value\": {\n \"$ref\": \"vars.service_available\"\n }\n }\n ],\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"url_result\"\n },\n \"validation\": {\n \"type\": \"condition.required\"\n }\n }\n },\n {\n \"type\": \"helper.if\",\n \"condition\": {\n \"type\": \"condition.equals\",\n \"data\": {\n \"type\": \"data.internal\",\n \"path\": \"query_result\"\n },\n \"to\": {\n \"$ref\": \"vars.tickets\"\n }\n },\n \"then\": {\n \"type\": \"field.radio-group\",\n \"label\": \"\u0412\u044b\u0431\u0435\u0440\u0438\u0442\u0435 \u0442\u0438\u043f \u0441\u0442\u0440\u0430\u043d\u0438\u0446\u044b:\",\n \"options\": [\n {\n \"label\": \"\u041d\u0435 \u0438\u043c\u0435\u0435\u0442 \u043e\u0442\u043d\u043e\u0448\u0435\u043d\u0438\u044f \u043a \u0431\u0440\u043e\u043d\u0438\u0440\u043e\u0432\u0430\u043d\u0438\u044e \u043e\u0442\u0435\u043b\u044f/\u0431\u0438\u043b\u0435\u0442\u0430/\u0442\u0443\u0440\u0430/...\",\n \"value\": {\n \"$ref\": \"vars.other_not_available\"\n }\n },\n {\n \"label\": \"\u041c\u043e\u0436\u043d\u043e \u043f\u0435\u0440\u0435\u0439\u0442\u0438 \u043a \u0431\u0440\u043e\u043d\u0438\u0440\u043e\u0432\u0430\u043d\u0438\u044e \u043e\u0442\u0435\u043b\u044f/\u0431\u0438\u043b\u0435\u0442\u0430/\u0442\u0443\u0440\u0430/...\",\n \"value\": {\n \"$ref\": \"vars.other_available\"\n }\n }\n ],\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"url_result\"\n },\n \"validation\": {\n \"type\": \"condition.required\"\n }\n }\n },\n {\n \"type\": \"helper.if\",\n \"condition\": {\n \"type\": \"condition.equals\",\n \"data\": {\n \"type\": \"data.internal\",\n \"path\": \"query_result\"\n },\n \"to\": {\n \"$ref\": \"vars.non_purchase\"\n }\n },\n \"then\": {\n \"type\": \"field.radio-group\",\n \"label\": \"\u0412\u044b\u0431\u0435\u0440\u0438\u0442\u0435 \u0442\u0438\u043f \u0441\u0442\u0440\u0430\u043d\u0438\u0446\u044b:\",\n \"options\": [\n {\n \"label\": \"\u0417\u0430\u043f\u0440\u043e\u0441 \u043d\u0435 \u043f\u043e\u0434\u0440\u0430\u0437\u0443\u043c\u0435\u0432\u0430\u0435\u0442 \u043f\u043e\u043a\u0443\u043f\u043a\u0443/\u043d\u0435\u043b\u044c\u0437\u044f \u043a\u0443\u043f\u0438\u0442\u044c/...\",\n \"value\": {\n \"$ref\": \"vars.other_not_available\"\n }\n },\n {\n \"label\": \"\u041c\u043e\u0436\u043d\u043e \u043a\u0443\u043f\u0438\u0442\u044c\",\n \"value\": {\n \"$ref\": \"vars.other_available\"\n }\n }\n ],\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"url_result\"\n },\n \"validation\": {\n \"type\": \"condition.required\"\n }\n }\n }\n ]\n }\n },\n \"barBefore\": {\n \"type\": \"view.list\",\n \"items\": [\n {\n \"type\": \"view.list\",\n \"direction\": \"horizontal\",\n \"items\": [\n {\n \"type\": \"view.list\",\n \"direction\": \"horizontal\",\n \"items\": [\n {\n \"type\": \"view.text\",\n \"content\": \"\u0417\u0430\u043f\u0440\u043e\u0441:\"\n },\n {\n \"type\": \"view.markdown\",\n \"content\": {\n \"type\": \"helper.join\",\n \"items\": [\n \"**\",\n {\n \"type\": \"data.input\",\n \"path\": \"query.query_text\"\n },\n \"**\"\n ]\n }\n }\n ]\n },\n {\n \"type\": \"view.list\",\n \"direction\": \"horizontal\",\n \"items\": [\n {\n \"type\": \"view.text\",\n \"content\": \"\u0420\u0435\u0433\u0438\u043e\u043d:\"\n },\n {\n \"type\": \"view.markdown\",\n \"content\": {\n \"type\": \"helper.join\",\n \"items\": [\n \"**\",\n {\n \"type\": \"data.input\",\n \"path\": \"query.region_name\"\n },\n \"**\"\n ]\n }\n }\n ]\n },\n {\n \"type\": \"view.list\",\n \"direction\": \"horizontal\",\n \"items\": [\n {\n \"type\": \"view.text\",\n \"content\": \"\u0423\u0441\u0442\u0440\u043e\u0439\u0441\u0442\u0432\u043e:\"\n },\n {\n \"type\": \"view.markdown\",\n \"content\": {\n \"type\": \"helper.join\",\n \"items\": [\n \"**\",\n {\n \"type\": \"data.input\",\n \"path\": \"query.device\"\n },\n \"**\"\n ]\n }\n }\n ]\n }\n ]\n },\n {\n \"type\": \"view.list\",\n \"direction\": \"horizontal\",\n \"items\": [\n {\n \"type\": \"view.action-button\",\n \"label\": \"\u0418\u0441\u043a\u0430\u0442\u044c \u0432 \u042f\u043d\u0434\u0435\u043a\u0441\u0435\",\n \"action\": {\n \"type\": \"action.open-link\",\n \"payload\": {\n \"type\": \"helper.search-query\",\n \"query\": {\n \"type\": \"data.input\",\n \"path\": \"query.query_text\"\n },\n \"engine\": \"yandex\"\n }\n }\n },\n {\n \"type\": \"view.action-button\",\n \"label\": \"\u0418\u0441\u043a\u0430\u0442\u044c \u0432 Google\",\n \"action\": {\n \"type\": \"action.open-link\",\n \"payload\": {\n \"type\": \"helper.search-query\",\n \"query\": {\n \"type\": \"data.input\",\n \"path\": \"query.query_text\"\n },\n \"engine\": \"google\"\n }\n }\n }\n ]\n }\n ]\n }\n },\n \"plugins\": [\n {\n \"type\": \"plugin.trigger\",\n \"onChangeOf\": {\n \"type\": \"data.internal\",\n \"path\": \"query_result\"\n },\n \"action\": {\n \"type\": \"action.set\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"query_result\"\n },\n \"payload\": {\n \"type\": \"helper.switch\",\n \"cases\": [\n {\n \"condition\": {\n \"type\": \"condition.equals\",\n \"to\": {\n \"$ref\": \"vars.product\"\n },\n \"data\": {\n \"type\": \"data.internal\",\n \"path\": \"query_result\"\n }\n },\n \"result\": {\n \"$ref\": \"vars.product\"\n }\n },\n {\n \"condition\": {\n \"type\": \"condition.equals\",\n \"to\": {\n \"$ref\": \"vars.service\"\n },\n \"data\": {\n \"type\": \"data.internal\",\n \"path\": \"query_result\"\n }\n },\n \"result\": {\n \"$ref\": \"vars.service\"\n }\n }\n ],\n \"default\": {\n \"$ref\": \"vars.other\"\n }\n }\n }\n },\n {\n \"type\": \"plugin.trigger\",\n \"onChangeOf\": {\n \"type\": \"data.internal\",\n \"path\": \"query_result\"\n },\n \"action\": {\n \"type\": \"action.set\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"url_result\"\n },\n \"payload\": {\n \"$empty\": true\n }\n }\n },\n {\n \"q\": {\n \"type\": \"action.open-link\",\n \"payload\": {\n \"type\": \"helper.search-query\",\n \"query\": {\n \"type\": \"data.input\",\n \"path\": \"query.query_text\"\n },\n \"engine\": \"yandex\"\n }\n },\n \"w\": {\n \"type\": \"action.open-link\",\n \"payload\": {\n \"type\": \"helper.search-query\",\n \"query\": {\n \"type\": \"data.input\",\n \"path\": \"query.query_text\"\n },\n \"engine\": \"google\"\n }\n },\n \"type\": \"plugin.hotkeys\"\n }\n ]\n}",
"lock": {
"action.open-link": "1.0.1",
"action.set": "1.0.1",
"condition.equals": "1.1.0",
"condition.required": "1.1.0",
"core": "1.3.1",
"field.radio-group": "1.1.0",
"helper.if": "1.0.1",
"helper.join": "1.0.1",
"helper.search-query": "1.1.1",
"helper.switch": "1.0.2",
"layout.bars": "1.0.0",
"layout.sidebar": "1.0.0",
"plugin.hotkeys": "1.0.2",
"plugin.trigger": "1.0.0",
"view.action-button": "1.0.0",
"view.image": "1.1.1",
"view.link": "1.1.0",
"view.list": "1.0.0",
"view.markdown": "1.0.1",
"view.text": "1.0.0"
},
"settings": {
"showFinish": True,
"showFullscreen": True,
"showInstructions": True,
"showMessage": True,
"showReward": True,
"showSkip": True,
"showSubmit": True,
"showTimer": True,
"showTitle": True
},
"type": "tb",
"assets": {
"script_urls": [
"https://yastat.net/s3/tb/launcher.template/_/master.825664685.2020-11-19.6390a7cf.js"
],
"style_urls": [
"https://yastat.net/s3/tb/launcher.template/_/master.825664685.2020-11-19.6390a7cf.css"
]
},
"markup": "",
"styles": "",
"script": "window.configureTemplate({\n \"vars\": {\n \"product\": \"PRODUCT\",\n \"service\": \"SERVICE\",\n \"other\": \"OTHER\",\n \"product_not_available\": \"PRODUCT_NOT_AVAILABLE\",\n \"product_available\": \"PRODUCT_AVAILABLE\",\n \"service_not_available\": \"SERVICE_NOT_AVAILABLE\",\n \"service_available\": \"SERVICE_AVAILABLE\",\n \"other_not_available\": \"OTHER_NOT_AVAILABLE\",\n \"other_available\": \"OTHER_AVAILABLE\",\n \"tickets\": \"TICKETS\",\n \"non_purchase\": \"NON_PURCHASE\"\n },\n \"view\": {\n \"type\": \"layout.bars\",\n \"content\": {\n \"type\": \"layout.sidebar\",\n \"content\": {\n \"type\": \"layout.bars\",\n \"content\": {\n \"type\": \"view.image\",\n \"scrollable\": true,\n \"url\": {\n \"type\": \"data.input\",\n \"path\": \"takenScreen\"\n }\n },\n \"barAfter\": {\n \"type\": \"view.link\",\n \"url\": {\n \"type\": \"data.input\",\n \"path\": \"url\"\n }\n }\n },\n \"minWidth\": 1000,\n \"controlsWidth\": 350,\n \"controls\": {\n \"type\": \"view.list\",\n \"items\": [\n {\n \"type\": \"field.radio-group\",\n \"label\": \"\u0412\u044b\u0431\u0435\u0440\u0438\u0442\u0435 \u0442\u0438\u043f \u0437\u0430\u043f\u0440\u043e\u0441\u0430:\",\n \"options\": [\n {\n \"label\": \"\u0412 \u0437\u0430\u043f\u0440\u043e\u0441\u0435 \u0442\u043e\u0432\u0430\u0440\",\n \"value\": {\n \"$ref\": \"vars.product\"\n }\n },\n {\n \"label\": \"\u0412 \u0437\u0430\u043f\u0440\u043e\u0441\u0435 \u0443\u0441\u043b\u0443\u0433\u0430\",\n \"value\": {\n \"$ref\": \"vars.service\"\n }\n },\n {\n \"label\": \"\u0412 \u0437\u0430\u043f\u0440\u043e\u0441\u0435 \u043e\u0442\u0435\u043b\u044c/\u0431\u0438\u043b\u0435\u0442\u044b/\u0442\u0443\u0440/...\",\n \"value\": {\n \"$ref\": \"vars.tickets\"\n }\n },\n {\n \"label\": \"\u0414\u0440\u0443\u0433\u043e\u0439 \u0437\u0430\u043f\u0440\u043e\u0441\",\n \"value\": {\n \"$ref\": \"vars.non_purchase\"\n }\n }\n ],\n \"data\": {\n \"type\": \"data.internal\",\n \"path\": \"query_result\"\n },\n \"validation\": {\n \"type\": \"condition.required\",\n \"data\": \"data.output\"\n }\n },\n {\n \"type\": \"helper.if\",\n \"condition\": {\n \"type\": \"condition.equals\",\n \"data\": {\n \"type\": \"data.internal\",\n \"path\": \"query_result\"\n },\n \"to\": {\n \"$ref\": \"vars.product\"\n }\n },\n \"then\": {\n \"type\": \"field.radio-group\",\n \"label\": \"\u0412\u044b\u0431\u0435\u0440\u0438\u0442\u0435 \u0442\u0438\u043f \u0441\u0442\u0440\u0430\u043d\u0438\u0446\u044b:\",\n \"options\": [\n {\n \"label\": \"\u041d\u0435\u043b\u044c\u0437\u044f \u043a\u0443\u043f\u0438\u0442\u044c/\u043d\u0435\u0442 \u0432 \u043d\u0430\u043b\u0438\u0447\u0438\u0438/\u043f\u043e\u043a\u0443\u043f\u043a\u0430 - \u043f\u0435\u0440\u0435\u0445\u043e\u0434 \u043d\u0430 \u0441\u0442\u043e\u0440\u043e\u043d\u043d\u0438\u0439 \u0441\u0430\u0439\u0442/...\",\n \"value\": {\n \"$ref\": \"vars.product_not_available\"\n }\n },\n {\n \"label\": \"\u041c\u043e\u0436\u043d\u043e \u043a\u0443\u043f\u0438\u0442\u044c/\u0437\u0430\u043a\u0430\u0437\u0430\u0442\u044c/\u043f\u043e\u043b\u043e\u0436\u0438\u0442\u044c \u0432 \u043a\u043e\u0440\u0437\u0438\u043d\u0443/... \u043d\u0430 \u044d\u0442\u043e\u0439 \u0441\u0442\u0440\u0430\u043d\u0438\u0446\u0435\",\n \"value\": {\n \"$ref\": \"vars.product_available\"\n }\n }\n ],\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"url_result\"\n },\n \"validation\": {\n \"type\": \"condition.required\"\n }\n }\n },\n {\n \"type\": \"helper.if\",\n \"condition\": {\n \"type\": \"condition.equals\",\n \"data\": {\n \"type\": \"data.internal\",\n \"path\": \"query_result\"\n },\n \"to\": {\n \"$ref\": \"vars.service\"\n }\n },\n \"then\": {\n \"type\": \"field.radio-group\",\n \"label\": \"\u0412\u044b\u0431\u0435\u0440\u0438\u0442\u0435 \u0442\u0438\u043f \u0441\u0442\u0440\u0430\u043d\u0438\u0446\u044b:\",\n \"options\": [\n {\n \"label\": \"\u041d\u0435 \u0438\u043c\u0435\u0435\u0442 \u043e\u0442\u043d\u043e\u0448\u0435\u043d\u0438\u044f \u043a \u0437\u0430\u043a\u0430\u0437\u0443 \u0443\u0441\u043b\u0443\u0433\u0438\",\n \"value\": {\n \"$ref\": \"vars.service_not_available\"\n }\n },\n {\n \"label\": \"\u041c\u043e\u0436\u043d\u043e \u043f\u0435\u0440\u0435\u0439\u0442\u0438 \u043a \u0437\u0430\u043a\u0430\u0437\u0443 \u0443\u0441\u043b\u0443\u0433\u0438\",\n \"value\": {\n \"$ref\": \"vars.service_available\"\n }\n }\n ],\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"url_result\"\n },\n \"validation\": {\n \"type\": \"condition.required\"\n }\n }\n },\n {\n \"type\": \"helper.if\",\n \"condition\": {\n \"type\": \"condition.equals\",\n \"data\": {\n \"type\": \"data.internal\",\n \"path\": \"query_result\"\n },\n \"to\": {\n \"$ref\": \"vars.tickets\"\n }\n },\n \"then\": {\n \"type\": \"field.radio-group\",\n \"label\": \"\u0412\u044b\u0431\u0435\u0440\u0438\u0442\u0435 \u0442\u0438\u043f \u0441\u0442\u0440\u0430\u043d\u0438\u0446\u044b:\",\n \"options\": [\n {\n \"label\": \"\u041d\u0435 \u0438\u043c\u0435\u0435\u0442 \u043e\u0442\u043d\u043e\u0448\u0435\u043d\u0438\u044f \u043a \u0431\u0440\u043e\u043d\u0438\u0440\u043e\u0432\u0430\u043d\u0438\u044e \u043e\u0442\u0435\u043b\u044f/\u0431\u0438\u043b\u0435\u0442\u0430/\u0442\u0443\u0440\u0430/...\",\n \"value\": {\n \"$ref\": \"vars.other_not_available\"\n }\n },\n {\n \"label\": \"\u041c\u043e\u0436\u043d\u043e \u043f\u0435\u0440\u0435\u0439\u0442\u0438 \u043a \u0431\u0440\u043e\u043d\u0438\u0440\u043e\u0432\u0430\u043d\u0438\u044e \u043e\u0442\u0435\u043b\u044f/\u0431\u0438\u043b\u0435\u0442\u0430/\u0442\u0443\u0440\u0430/...\",\n \"value\": {\n \"$ref\": \"vars.other_available\"\n }\n }\n ],\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"url_result\"\n },\n \"validation\": {\n \"type\": \"condition.required\"\n }\n }\n },\n {\n \"type\": \"helper.if\",\n \"condition\": {\n \"type\": \"condition.equals\",\n \"data\": {\n \"type\": \"data.internal\",\n \"path\": \"query_result\"\n },\n \"to\": {\n \"$ref\": \"vars.non_purchase\"\n }\n },\n \"then\": {\n \"type\": \"field.radio-group\",\n \"label\": \"\u0412\u044b\u0431\u0435\u0440\u0438\u0442\u0435 \u0442\u0438\u043f \u0441\u0442\u0440\u0430\u043d\u0438\u0446\u044b:\",\n \"options\": [\n {\n \"label\": \"\u0417\u0430\u043f\u0440\u043e\u0441 \u043d\u0435 \u043f\u043e\u0434\u0440\u0430\u0437\u0443\u043c\u0435\u0432\u0430\u0435\u0442 \u043f\u043e\u043a\u0443\u043f\u043a\u0443/\u043d\u0435\u043b\u044c\u0437\u044f \u043a\u0443\u043f\u0438\u0442\u044c/...\",\n \"value\": {\n \"$ref\": \"vars.other_not_available\"\n }\n },\n {\n \"label\": \"\u041c\u043e\u0436\u043d\u043e \u043a\u0443\u043f\u0438\u0442\u044c\",\n \"value\": {\n \"$ref\": \"vars.other_available\"\n }\n }\n ],\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"url_result\"\n },\n \"validation\": {\n \"type\": \"condition.required\"\n }\n }\n }\n ]\n }\n },\n \"barBefore\": {\n \"type\": \"view.list\",\n \"items\": [\n {\n \"type\": \"view.list\",\n \"direction\": \"horizontal\",\n \"items\": [\n {\n \"type\": \"view.list\",\n \"direction\": \"horizontal\",\n \"items\": [\n {\n \"type\": \"view.text\",\n \"content\": \"\u0417\u0430\u043f\u0440\u043e\u0441:\"\n },\n {\n \"type\": \"view.markdown\",\n \"content\": {\n \"type\": \"helper.join\",\n \"items\": [\n \"**\",\n {\n \"type\": \"data.input\",\n \"path\": \"query.query_text\"\n },\n \"**\"\n ]\n }\n }\n ]\n },\n {\n \"type\": \"view.list\",\n \"direction\": \"horizontal\",\n \"items\": [\n {\n \"type\": \"view.text\",\n \"content\": \"\u0420\u0435\u0433\u0438\u043e\u043d:\"\n },\n {\n \"type\": \"view.markdown\",\n \"content\": {\n \"type\": \"helper.join\",\n \"items\": [\n \"**\",\n {\n \"type\": \"data.input\",\n \"path\": \"query.region_name\"\n },\n \"**\"\n ]\n }\n }\n ]\n },\n {\n \"type\": \"view.list\",\n \"direction\": \"horizontal\",\n \"items\": [\n {\n \"type\": \"view.text\",\n \"content\": \"\u0423\u0441\u0442\u0440\u043e\u0439\u0441\u0442\u0432\u043e:\"\n },\n {\n \"type\": \"view.markdown\",\n \"content\": {\n \"type\": \"helper.join\",\n \"items\": [\n \"**\",\n {\n \"type\": \"data.input\",\n \"path\": \"query.device\"\n },\n \"**\"\n ]\n }\n }\n ]\n }\n ]\n },\n {\n \"type\": \"view.list\",\n \"direction\": \"horizontal\",\n \"items\": [\n {\n \"type\": \"view.action-button\",\n \"label\": \"\u0418\u0441\u043a\u0430\u0442\u044c \u0432 \u042f\u043d\u0434\u0435\u043a\u0441\u0435\",\n \"action\": {\n \"type\": \"action.open-link\",\n \"payload\": {\n \"type\": \"helper.search-query\",\n \"query\": {\n \"type\": \"data.input\",\n \"path\": \"query.query_text\"\n },\n \"engine\": \"yandex\"\n }\n }\n },\n {\n \"type\": \"view.action-button\",\n \"label\": \"\u0418\u0441\u043a\u0430\u0442\u044c \u0432 Google\",\n \"action\": {\n \"type\": \"action.open-link\",\n \"payload\": {\n \"type\": \"helper.search-query\",\n \"query\": {\n \"type\": \"data.input\",\n \"path\": \"query.query_text\"\n },\n \"engine\": \"google\"\n }\n }\n }\n ]\n }\n ]\n }\n },\n \"plugins\": [\n {\n \"type\": \"plugin.trigger\",\n \"onChangeOf\": {\n \"type\": \"data.internal\",\n \"path\": \"query_result\"\n },\n \"action\": {\n \"type\": \"action.set\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"query_result\"\n },\n \"payload\": {\n \"type\": \"helper.switch\",\n \"cases\": [\n {\n \"condition\": {\n \"type\": \"condition.equals\",\n \"to\": {\n \"$ref\": \"vars.product\"\n },\n \"data\": {\n \"type\": \"data.internal\",\n \"path\": \"query_result\"\n }\n },\n \"result\": {\n \"$ref\": \"vars.product\"\n }\n },\n {\n \"condition\": {\n \"type\": \"condition.equals\",\n \"to\": {\n \"$ref\": \"vars.service\"\n },\n \"data\": {\n \"type\": \"data.internal\",\n \"path\": \"query_result\"\n }\n },\n \"result\": {\n \"$ref\": \"vars.service\"\n }\n }\n ],\n \"default\": {\n \"$ref\": \"vars.other\"\n }\n }\n }\n },\n {\n \"type\": \"plugin.trigger\",\n \"onChangeOf\": {\n \"type\": \"data.internal\",\n \"path\": \"query_result\"\n },\n \"action\": {\n \"type\": \"action.set\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"url_result\"\n },\n \"payload\": {\n \"$empty\": true\n }\n }\n },\n {\n \"q\": {\n \"type\": \"action.open-link\",\n \"payload\": {\n \"type\": \"helper.search-query\",\n \"query\": {\n \"type\": \"data.input\",\n \"path\": \"query.query_text\"\n },\n \"engine\": \"yandex\"\n }\n },\n \"w\": {\n \"type\": \"action.open-link\",\n \"payload\": {\n \"type\": \"helper.search-query\",\n \"query\": {\n \"type\": \"data.input\",\n \"path\": \"query.query_text\"\n },\n \"engine\": \"google\"\n }\n },\n \"type\": \"plugin.hotkeys\"\n }\n ]\n}, {\"action.open-link\":\"1.0.1\",\"action.set\":\"1.0.1\",\"condition.equals\":\"1.1.0\",\"condition.required\":\"1.1.0\",\"core\":\"1.3.1\",\"field.radio-group\":\"1.1.0\",\"helper.if\":\"1.0.1\",\"helper.join\":\"1.0.1\",\"helper.search-query\":\"1.1.1\",\"helper.switch\":\"1.0.2\",\"layout.bars\":\"1.0.0\",\"layout.sidebar\":\"1.0.0\",\"plugin.hotkeys\":\"1.0.2\",\"plugin.trigger\":\"1.0.0\",\"view.action-button\":\"1.0.0\",\"view.image\":\"1.1.1\",\"view.link\":\"1.1.0\",\"view.list\":\"1.0.0\",\"view.markdown\":\"1.0.1\",\"view.text\":\"1.0.0\"}, 'https://tb.yandex.net/registry2')"
},
{
"config": "{\n \"view\": {\n \"type\": \"view.list\",\n \"items\": [\n {\n \"type\": \"view.markdown\",\n \"content\": {\n \"type\": \"data.input\",\n \"path\": \"text\"\n }\n },\n {\n \"type\": \"view.list\",\n \"items\": {\n \"type\": \"helper.transform\",\n \"items\": {\n \"type\": \"data.input\",\n \"path\": \"quiz\"\n },\n \"into\": {\n \"type\": \"view.group\",\n \"content\": {\n \"type\": \"view.list\",\n \"items\": [\n {\n \"type\": \"view.markdown\",\n \"content\": {\n \"type\": \"data.local\",\n \"path\": \"item.question\"\n }\n },\n {\n \"type\": \"field.radio-group\",\n \"options\": {\n \"type\": \"data.local\",\n \"path\": \"item.options\"\n },\n \"validation\": {\n \"type\": \"condition.required\",\n \"hint\": \"\u0412\u044b\u0431\u0435\u0440\u0438\u0442\u0435 \u0432\u0430\u0440\u0438\u0430\u043d\u0442 \u043e\u0442\u0432\u0435\u0442\u0430\"\n },\n \"data\": {\n \"type\": \"data.output\",\n \"path\": {\n \"type\": \"helper.join\",\n \"items\": [\n \"answers\",\n {\n \"type\": \"data.local\",\n \"path\": \"index\"\n }\n ],\n \"by\": \".\"\n }\n }\n }\n ]\n }\n }\n }\n }\n ]\n },\n \"plugins\": [\n {\n \"type\": \"plugin.toloka\",\n \"layout\": {\n \"kind\": \"pager\",\n \"taskWidth\": 730\n }\n }\n ]\n}",
"lock": {
"condition.required": "1.0.1",
"core": "1.2.3",
"field.radio-group": "1.0.0",
"helper.join": "1.0.0",
"helper.transform": "1.0.1",
"plugin.toloka": "1.0.5",
"view.group": "1.0.0",
"view.list": "1.0.0",
"view.markdown": "1.0.1"
},
"settings": {
"showFinish": True,
"showFullscreen": True,
"showInstructions": True,
"showMessage": True,
"showReward": True,
"showSkip": True,
"showSubmit": True,
"showTimer": True,
"showTitle": True
},
"type": "tb",
"assets": {
"script_urls": [
"https://yastat.net/s3/tb/launcher.template/_/master.825664685.2020-11-19.6390a7cf.js"
],
"style_urls": [
"https://yastat.net/s3/tb/launcher.template/_/master.825664685.2020-11-19.6390a7cf.css"
]
},
"markup": "",
"styles": "",
"script": "window.configureTemplate({\n \"view\": {\n \"type\": \"view.list\",\n \"items\": [\n {\n \"type\": \"view.markdown\",\n \"content\": {\n \"type\": \"data.input\",\n \"path\": \"text\"\n }\n },\n {\n \"type\": \"view.list\",\n \"items\": {\n \"type\": \"helper.transform\",\n \"items\": {\n \"type\": \"data.input\",\n \"path\": \"quiz\"\n },\n \"into\": {\n \"type\": \"view.group\",\n \"content\": {\n \"type\": \"view.list\",\n \"items\": [\n {\n \"type\": \"view.markdown\",\n \"content\": {\n \"type\": \"data.local\",\n \"path\": \"item.question\"\n }\n },\n {\n \"type\": \"field.radio-group\",\n \"options\": {\n \"type\": \"data.local\",\n \"path\": \"item.options\"\n },\n \"validation\": {\n \"type\": \"condition.required\",\n \"hint\": \"\u0412\u044b\u0431\u0435\u0440\u0438\u0442\u0435 \u0432\u0430\u0440\u0438\u0430\u043d\u0442 \u043e\u0442\u0432\u0435\u0442\u0430\"\n },\n \"data\": {\n \"type\": \"data.output\",\n \"path\": {\n \"type\": \"helper.join\",\n \"items\": [\n \"answers\",\n {\n \"type\": \"data.local\",\n \"path\": \"index\"\n }\n ],\n \"by\": \".\"\n }\n }\n }\n ]\n }\n }\n }\n }\n ]\n },\n \"plugins\": [\n {\n \"type\": \"plugin.toloka\",\n \"layout\": {\n \"kind\": \"pager\",\n \"taskWidth\": 730\n }\n }\n ]\n}, {\"condition.required\":\"1.0.1\",\"core\":\"1.2.3\",\"field.radio-group\":\"1.0.0\",\"helper.join\":\"1.0.0\",\"helper.transform\":\"1.0.1\",\"plugin.toloka\":\"1.0.5\",\"view.group\":\"1.0.0\",\"view.list\":\"1.0.0\",\"view.markdown\":\"1.0.1\"}, 'https://tb.yandex.net/registry2')"
},
{
"config": "{\n \"vars\": {\n \"author\": \"sukhodolskaya\",\n \"task\": \"old\",\n \"top_text\": {\n \"show\": \"yes\",\n \"message\": \"\u0414\u043e\u0431\u0430\u0432\u043b\u0435\u043d hotkey 3 \u043d\u0430 \u0440\u0430\u0437\u0432\u0435\u0440\u0442\u044b\u0432\u0430\u043d\u0438\u0435/\u0441\u0432\u0435\u0440\u0442\u044b\u0432\u0430\u043d\u0438\u0435 \u043a\u0430\u0440\u0442\u0438\u043d\u043a\u0438!\"\n }\n },\n \"view\": {\n \"type\": \"layout.columns\",\n \"items\": [\n {\n \"type\": \"view.image\",\n \"url\": {\n \"type\": \"data.input\",\n \"path\": \"image\"\n }\n },\n {\n \"type\": \"layout.columns\",\n \"ratio\": [\n 1,\n 10\n ],\n \"minWidth\": 0,\n \"items\": [\n {\n \"type\": \"field.button-radio\",\n \"label\": \"\u0414\u0430\",\n \"validation\": {\n \"type\": \"condition.required\"\n },\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"estimation\"\n },\n \"valueToSet\": \"OK\"\n },\n {\n \"type\": \"view.list\",\n \"items\": [\n {\n \"type\": \"field.button-radio\",\n \"label\": \"\u041d\u0435\u0442\",\n \"validation\": {\n \"type\": \"condition.required\"\n },\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"estimation\"\n },\n \"valueToSet\": \"BAD\"\n },\n {\n \"type\": \"field.checkbox-group\",\n \"validation\": {\n \"type\": \"helper.if\",\n \"condition\": {\n \"type\": \"condition.equals\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"estimation\"\n },\n \"to\": \"BAD\"\n },\n \"then\": {\n \"type\": \"condition.required\"\n }\n },\n \"options\": [\n {\n \"label\": \"\u0422\u043e\u0432\u0430\u0440\u044b \u0434\u043b\u044f \u0432\u0437\u0440\u043e\u0441\u043b\u044b\u0445\",\n \"value\": \"porno\",\n \"hint\": \"- \u0421\u0435\u043a\u0441-\u0438\u0433\u0440\u0443\u0448\u043a\u0438 (\u0432\u0438\u0431\u0440\u0430\u0442\u043e\u0440\u044b, \u043d\u0430\u0440\u0443\u0447\u043d\u0438\u043a\u0438, \u0430\u0442\u0440\u0438\u0431\u0443\u0442\u0438\u043a\u0430 \u0411\u0414\u0421\u041c \u0438 \u0442.\u0434.)\\n\"\n },\n {\n \"label\": \"\u042d\u0440\u043e\u0442\u0438\u043a\u0430, \u043f\u043e\u0440\u043d\u043e\",\n \"value\": \"porno_grey_zone\",\n \"hint\": \"- \u041e\u0431\u043d\u0430\u0436\u0435\u043d\u043a\u0430/\u043f\u043e\u0441\u0442\u0435\u043b\u044c\u043d\u044b\u0435 \u0441\u0446\u0435\u043d\u044b\\n- \u041d\u0430 \u0438\u0437\u043e\u0431\u0440\u0430\u0436\u0435\u043d\u0438\u0438 \u0430\u043a\u0446\u0435\u043d\u0442 \u043d\u0430 \u0433\u0440\u0443\u0434\u044c/\u043f\u043e\u043f\u0443/\u0433\u0435\u043d\u0438\u0442\u0430\u043b\u0438\u0438\\n- \u041d\u0430\u0440\u0438\u0441\u043e\u0432\u0430\u043d\u043d\u044b\u0435 \u043f\u0435\u0440\u0441\u043e\u043d\u0430\u0436\u0438 (\u043d\u0430\u043f\u0440\u0438\u043c\u0435\u0440, \u0430\u043d\u0438\u043c\u044d) \u0441 \u043d\u0430\u043c\u0435\u043a\u043e\u043c \u043d\u0430 \u044d\u0440\u043e\u0442\u0438\u043a\u0443/\u043f\u043e\u0440\u043d\u043e\\n- \u042d\u0440\u043e\u0442\u0438\u0447\u0435\u0441\u043a\u043e\u0435 \u0431\u0435\u043b\u044c\u0435 \u043d\u0430 \u043c\u043e\u0434\u0435\u043b\u044f\u0445 \u043e\u0431\u043e\u0435\u0433\u043e \u043f\u043e\u043b\u0430 (\u0441\u0442\u0440\u0438\u043d\u0433\u0438, \u043a\u0440\u0443\u0436\u0435\u0432\u043d\u043e\u0435, \u043f\u0440\u043e\u0441\u0432\u0435\u0447\u0438\u0432\u0430\u044e\u0449\u0435\u0435)\"\n },\n {\n \"label\": \"\u0410\u043b\u043a\u043e\u0433\u043e\u043b\u044c, \u043a\u0443\u0440\u0435\u043d\u0438\u0435, \u043d\u0430\u0440\u043a\u043e\u0442\u0438\u043a\u0438\",\n \"value\": \"illegal\",\n \"hint\": \"- \u0420\u0435\u043a\u043b\u0430\u043c\u0438\u0440\u0443\u0435\u0442\u0441\u044f \u0430\u043b\u043a\u043e\u0433\u043e\u043b\u044c\u043d\u0430\u044f \u043f\u0440\u043e\u0434\u0443\u043a\u0446\u0438\u044f/\u0435\u0435 \u0443\u043f\u043e\u0442\u0440\u0435\u0431\u043b\u0435\u043d\u0438\u0435\\n- \u0420\u0435\u043a\u043b\u0430\u043c\u0438\u0440\u0443\u0435\u0442\u0441\u044f \u043a\u0443\u0440\u0435\u043d\u0438\u0435/\u043f\u0430\u0440\u0435\u043d\u0438\u0435 \u0418 \u0441\u0432\u044f\u0437\u0430\u043d\u043d\u0430\u044f \u0441 \u043d\u0438\u043c\u0438 \u0430\u0442\u0440\u0438\u0431\u0443\u0442\u0438\u043a\u0430 (\u0442\u0430\u0431\u0430\u0447\u043d\u044b\u0435 \u0438\u0437\u0434\u0435\u043b\u0438\u044f, \u044d\u043b\u0435\u043a\u0442\u0440\u043e\u043d\u043d\u044b\u0435 \u0441\u0438\u0433\u0430\u0440\u0435\u0442\u044b, \u043a\u0430\u043b\u044c\u044f\u043d\u044b \u0438 \u0442.\u0434.)\\n- \u0420\u0435\u043a\u043b\u0430\u043c\u0438\u0440\u0443\u044e\u0442\u0441\u044f \u043d\u0430\u0440\u043a\u043e\u0442\u0438\u0447\u0435\u0441\u043a\u0438\u0435 \u0432\u0435\u0449\u0435\u0441\u0442\u0432\u0430/\u0438\u0445 \u0443\u043f\u043e\u0442\u0440\u0435\u0431\u043b\u0435\u043d\u0438\u0435\"\n },\n {\n \"label\": \"\u041a\u0430\u0437\u0438\u043d\u043e, \u0430\u0437\u0430\u0440\u0442\u043d\u044b\u0435 \u0438\u0433\u0440\u044b (\u043d\u0430 \u0434\u0435\u043d\u044c\u0433\u0438, \u0430 \u041d\u0415 \u043d\u0430\u0441\u0442\u043e\u043b\u044c\u043d\u044b\u0435 \u0438\u0433\u0440\u044b)\",\n \"value\": \"casino\",\n \"hint\": \"- \u041b\u043e\u0433\u043e\u0442\u0438\u043f\u044b \u043a\u0430\u0437\u0438\u043d\u043e (Vavada, \u0412\u0443\u043b\u043a\u0430\u043d, \u0410\u0437\u0438\u043d\u043e 777)\\n- \u0418\u0437\u043e\u0431\u0440\u0430\u0436\u0435\u043d\u044b \u0438\u0433\u043e\u0440\u043d\u044b\u0435 \u0441\u0442\u043e\u043b\u044b, \u0438\u0433\u0440\u043e\u0432\u044b\u0435 \u0430\u0432\u0442\u043e\u043c\u0430\u0442\u044b, \u043f\u0440\u0438\u043b\u043e\u0436\u0435\u043d\u0438\u044f \u0441 \u043e\u043d\u043b\u0430\u0439\u043d-\u043a\u0430\u0437\u0438\u043d\u043e/\u0430\u0437\u0430\u0440\u0442\u043d\u044b\u043c\u0438 \u0438\u0433\u0440\u0430\u043c\u0438 \u0438 \u0437\u0430\u0441\u0442\u0430\u0432\u043a\u0438 \u0441 \u0438\u0433\u0440\u043e\u0432\u044b\u0445 \u0430\u0432\u0442\u043e\u043c\u0430\u0442\u043e\u0432\\n- \u041f\u0440\u0438\u0437\u044b\u0432 \u043a \u0438\u0433\u0440\u0435 \u043d\u0430 \u0434\u0435\u043d\u044c\u0433\u0438\"\n },\n {\n \"label\": \"\u0414\u0435\u043d\u044c\u0433\u0438, \u0431\u0430\u043d\u043a\u043e\u0432\u0441\u043a\u0438\u0435 \u043a\u0430\u0440\u0442\u044b, \u0440\u0435\u043a\u043b\u0430\u043c\u0430 \u0437\u0430\u0440\u0430\u0431\u043e\u0442\u043a\u0430/\u0437\u0430\u0439\u043c\u043e\u0432\",\n \"value\": \"casino_grey_zone\",\n \"hint\": \"- \u0418\u0437\u043e\u0431\u0440\u0430\u0436\u0435\u043d\u0438\u0435 \u043a\u0443\u043f\u044e\u0440 \u0438\u043b\u0438 \u0431\u0430\u043d\u043a\u043e\u0432\u0441\u043a\u0438\u0445 \u043a\u0430\u0440\u0442 (\u043a\u043b\u044e\u0447\u0435\u0439 \u043e\u0442 \u043c\u0430\u0448\u0438\u043d \u0438 \u0442.\u0434.) \u0431\u0435\u0437 \u043f\u0440\u044f\u043c\u043e\u0439 \u0441\u0432\u044f\u0437\u0438 \u0441 \u043a\u0430\u0437\u0438\u043d\u043e\\n- \u0420\u0435\u043a\u043b\u0430\u043c\u0430 \u0437\u0430\u0440\u0430\u0431\u043e\u0442\u043a\u0430/\u0437\u0430\u0439\u043c\u043e\u0432\"\n },\n {\n \"label\": \"\u041e\u0440\u0443\u0436\u0438\u0435\",\n \"value\": \"guns\",\n \"hint\": \"- \u0425\u043e\u043b\u043e\u0434\u043d\u043e\u0435 \u043e\u0440\u0443\u0436\u0438\u0435, \u043d\u0430 \u0438\u0437\u043e\u0431\u0440\u0430\u0436\u0435\u043d\u0438\u0438 \u0412\u0418\u0414\u041d\u041e \u0440\u0435\u0436\u0443\u0449\u0443\u044e \u0447\u0430\u0441\u0442\u044c\\n- \u041e\u0433\u043d\u0435\u0441\u0442\u0440\u0435\u043b\u044c\u043d\u043e\u0435, \u043f\u043d\u0435\u0432\u043c\u0430\u0442\u0438\u0447\u0435\u0441\u043a\u043e\u0435 \u043e\u0440\u0443\u0436\u0438\u0435 \u0411\u0415\u0417 \u0443\u043a\u0430\u0437\u0430\u043d\u0438\u044f \u043d\u0430 \u0438\u0437\u043e\u0431\u0440\u0430\u0436\u0435\u043d\u0438\u0438 \u0435\u0433\u043e \u043c\u043e\u0434\u0435\u043b\u0438\\n- \u041c\u0435\u0442\u0430\u0442\u0435\u043b\u044c\u043d\u043e\u0435 \u043e\u0440\u0443\u0436\u0438\u0435\\n- \u041c\u0438\u043d\u044b \u0438 \u0433\u0440\u0430\u043d\u0430\u0442\u044b\\n- \u0420\u0430\u043a\u0435\u0442\u043d\u043e\u0435/\u0442\u043e\u0440\u043f\u0435\u0434\u043d\u043e\u0435 \u043e\u0440\u0443\u0436\u0438\u0435\\n- \u042d\u043b\u0435\u043a\u0442\u0440\u043e\u0448\u043e\u043a\u0435\u0440\u044b\"\n },\n {\n \"label\": \"\u041f\u0443\u0433\u0430\u044e\u0449\u0435\u0435/\u0442\u0440\u0430\u0433\u0438\u0447\u0435\u0441\u043a\u043e\u0435/\u043e\u0442\u0432\u0440\u0430\u0442\u0438\u0442\u0435\u043b\u044c\u043d\u043e\u0435 \u0438\u043b\u0438 \u043e\u0441\u043a\u043e\u0440\u0431\u0438\u0442\u0435\u043b\u044c\u043d\u043e\u0435 \u0438\u0437\u043e\u0431\u0440\u0430\u0436\u0435\u043d\u0438\u0435\",\n \"value\": \"yellow\",\n \"hint\": \"- \u041f\u0443\u0433\u0430\u044e\u0449\u0438\u0435 \u0438\u0437\u043e\u0431\u0440\u0430\u0436\u0435\u043d\u0438\u044f: \u043d\u0435\u0447\u0438\u0441\u0442\u044c (\u0411\u0415\u0417 \u043a\u043e\u043d\u0442\u0435\u043a\u0441\u0442\u0430 \u0432 \u0432\u0438\u0434\u0435 \u0444\u0438\u043b\u044c\u043c\u0430/\u043a\u043d\u0438\u0433\u0438/\u043f\u043e\u0441\u0442\u0430\u043d\u043e\u0432\u043a\u0438), \u0441\u0446\u0435\u043d\u044b \u043d\u0430\u0441\u0438\u043b\u0438\u044f, \u0440\u0430\u0441\u0447\u043b\u0435\u043d\u0435\u043d\u043a\u0430, \u0438\u0441\u043a\u0440\u0438\u0432\u043b\u0435\u043d\u043d\u044b\u0435 \u043e\u0442 \u0443\u0436\u0430\u0441\u0430/\u0421\u0418\u041b\u042c\u041d\u041e\u0419 \u0431\u043e\u043b\u0438 \u043b\u0438\u0446\u0430 \u043b\u044e\u0434\u0435\u0439, \u043f\u0440\u0438\u0437\u044b\u0432\u044b \u043a \u0443\u0431\u0438\u0439\u0441\u0442\u0432\u0443/\u0441\u043c\u0435\u0440\u0442\u0438, \u0443\u0433\u0440\u043e\u0437\u044b\\n- \u0422\u0440\u0430\u0433\u0438\u0447\u0435\u0441\u043a\u0438\u0435 \u0438\u0437\u043e\u0431\u0440\u0430\u0436\u0435\u043d\u0438\u044f: \u043a\u0430\u0434\u0440\u044b \u0441 \u043c\u0435\u0441\u0442\u0430 \u0430\u0432\u0430\u0440\u0438\u0439, \u0441\u0432\u044f\u0437\u044c \u0441 \u0441\u0430\u043c\u043e\u0443\u0431\u0438\u0439\u0441\u0442\u0432\u043e\u043c/\u0430\u0431\u043e\u0440\u0442\u043e\u043c, \u0440\u0438\u0442\u0443\u0430\u043b\u044c\u043d\u044b\u0435 \u0430\u0442\u0440\u0438\u0431\u0443\u0442\u044b\\n- \u041e\u0442\u0432\u0440\u0430\u0442\u0438\u0442\u0435\u043b\u044c\u043d\u044b\u0435 \u0438\u0437\u043e\u0431\u0440\u0430\u0436\u0435\u043d\u0438\u044f - \u0433\u043d\u043e\u0439, \u043f\u0440\u044b\u0449\u0438, \u0432\u043e\u0441\u043f\u0430\u043b\u0435\u043d\u0438\u044f, \u0433\u0440\u044f\u0437\u044c, \u0440\u0430\u0437\u043b\u043e\u0436\u0435\u043d\u0438\u0435, \u043e\u0431\u044a\u0435\u0434\u043a\u0438, \u0440\u0432\u043e\u0442\u0430\\n- \u041e\u0441\u043a\u043e\u0440\u0431\u0438\u0442\u0435\u043b\u044c\u043d\u044b\u0435 \u0438\u0437\u043e\u0431\u0440\u0430\u0436\u0435\u043d\u0438\u044f: \u0441\u043e\u0434\u0435\u0440\u0436\u0430\u0449\u0438\u0435 \u043c\u0430\u0442, \u0436\u0430\u0440\u0433\u043e\u043d, \u0440\u0435\u043a\u043b\u0430\u043c\u0438\u0440\u0443\u044e\u0449\u0438\u0435 \u0440\u0430\u0441\u0438\u0437\u043c/\u0441\u0435\u043a\u0441\u0438\u0437\u043c/\u043d\u0430\u0446\u0438\u0437\u043c, \u043e\u0441\u043a\u043e\u0440\u0431\u043b\u044f\u044e\u0449\u0438\u0435 \u0432\u0435\u0442\u0435\u0440\u0430\u043d\u043e\u0432/\u0438\u043d\u0432\u0430\u043b\u0438\u0434\u043e\u0432\"\n },\n {\n \"label\": \"\u042d\u043b\u0435\u043c\u0435\u043d\u0442\u044b \u043f\u043e\u043b\u044c\u0437\u043e\u0432\u0430\u0442\u0435\u043b\u044c\u0441\u043a\u043e\u0433\u043e \u0438\u043d\u0442\u0435\u0440\u0444\u0435\u0439\u0441\u0430\",\n \"value\": \"interface_elements\",\n \"hint\": \"- \u0418\u043c\u0438\u0442\u0430\u0446\u0438\u044f \u0441\u0442\u0430\u043d\u0434\u0430\u0440\u0442\u043d\u044b\u0445 \u044d\u043b\u0435\u043c\u0435\u043d\u0442\u043e\u0432 \u043f\u043e\u043b\u044c\u0437\u043e\u0432\u0430\u0442\u0435\u043b\u044c\u0441\u043a\u043e\u0433\u043e \u0438\u043d\u0442\u0435\u0440\u0444\u0435\u0439\u0441\u0430 (\u0441\u0442\u0430\u043d\u0434\u0430\u0440\u0442\u043d\u044b\u0435 \u043a\u043d\u043e\u043f\u043a\u0438, \u043a\u043e\u0442\u043e\u0440\u044b\u0435 \u043d\u0435\u043b\u044c\u0437\u044f \u043d\u0430\u0436\u0430\u0442\u044c; \u043a\u0443\u0440\u0441\u043e\u0440, \u043a\u043e\u0442\u043e\u0440\u044b\u0439 \u043d\u0435\u043b\u044c\u0437\u044f \u043f\u043e\u0434\u0432\u0438\u0433\u0430\u0442\u044c)\"\n },\n {\n \"label\": \"\u0420\u0435\u043a\u043b\u0430\u043c\u0430 \u043c\u0433\u043d\u043e\u0432\u0435\u043d\u043d\u043e\u0433\u043e/\\\"\u0447\u0443\u0434\u0435\u0441\u043d\u043e\u0433\u043e\\\" \u043f\u043e\u0445\u0443\u0434\u0435\u043d\u0438\u044f, \u0443\u043b\u0443\u0447\u0448\u0435\u043d\u0438\u044f \u043f\u043e\u0442\u0435\u043d\u0446\u0438\u0438/\u0432\u043d\u0435\u0448\u043d\u043e\u0441\u0442\u0438/\u0437\u0434\u043e\u0440\u043e\u0432\u044c\u044f\",\n \"value\": \"yellow_grey_zone\",\n \"hint\": \"- \u041d\u0435 \u043e\u0444\u0438\u0446\u0438\u0430\u043b\u044c\u043d\u044b\u0435 \u043c\u0435\u0434\u0438\u0446\u0438\u043d\u0441\u043a\u0438\u0435 \u0441\u0440\u0435\u0434\u0441\u0442\u0432\u0430, \u043e\u0431\u0435\u0449\u0430\u044e\u0449\u0438\u0435 \u043c\u0433\u043d\u043e\u0432\u0435\u043d\u043d\u043e\u0435/\\\"\u0447\u0443\u0434\u0435\u0441\u043d\u043e\u0435\\\" \u0438\u0441\u0446\u0435\u043b\u0435\u043d\u0438\u0435/\u043f\u043e\u0445\u0443\u0434\u0435\u043d\u0438\u0435/\u0443\u043b\u0443\u0447\u0448\u0435\u043d\u0438\u0435 \u0441\u0430\u043c\u043e\u0447\u0443\u0432\u0441\u0442\u0432\u0438\u044f \u0438\u043b\u0438 \u0432\u043d\u0435\u0448\u043d\u043e\u0441\u0442\u0438: \u0411\u0410\u0414\u044b, \u043d\u0430\u0440\u043e\u0434\u043d\u044b\u0435 \u0441\u0440\u0435\u0434\u0441\u0442\u0432\u0430\\n- \u0420\u0435\u0437\u0443\u043b\u044c\u0442\u0430\u0442\u044b \u0434\u043e \u0438/\u0438\u043b\u0438 \u043f\u043e\u0441\u043b\u0435 \u043c\u0433\u043d\u043e\u0432\u0435\u043d\u043d\u043e\u0433\u043e/\\\"\u0447\u0443\u0434\u0435\u0441\u043d\u043e\u0433\u043e\\\" \u043f\u043e\u0445\u0443\u0434\u0435\u043d\u0438\u044f/\u0443\u043b\u0443\u0447\u0448\u0435\u043d\u0438\u044f \u043f\u043e\u0442\u0435\u043d\u0446\u0438\u0438/\u0443\u043b\u0443\u0447\u0448\u0435\u043d\u0438\u044f \u0437\u0434\u043e\u0440\u043e\u0432\u044c\u044f/\u0432\u043d\u0435\u0448\u043d\u043e\u0441\u0442\u0438\"\n },\n {\n \"label\": \"\u041d\u0435\u044f\u0441\u043d\u043e, \u0447\u0442\u043e \u0438\u0437\u043e\u0431\u0440\u0430\u0436\u0435\u043d\u043e\",\n \"value\": \"undefined\",\n \"hint\": \"- \u041d\u0435\u0442 \u0432\u043e\u0437\u043c\u043e\u0436\u043d\u043e\u0441\u0442\u0438 \u0438\u0434\u0435\u043d\u0442\u0438\u0444\u0438\u0446\u0438\u0440\u043e\u0432\u0430\u0442\u044c \u043e\u0431\u044a\u0435\u043a\u0442, \u0438\u0437\u043e\u0431\u0440\u0430\u0436\u0435\u043d\u043d\u044b\u0439 \u043d\u0430 \u043a\u0430\u0440\u0442\u0438\u043d\u043a\u0435\"\n }\n ],\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"reasons\"\n }\n }\n ]\n }\n ]\n }\n ]\n },\n \"plugins\": [\n {\n \"type\": \"plugin.toloka\",\n \"layout\": {\n \"kind\": \"scroll\",\n \"taskWidth\": 690\n },\n \"notifications\": [\n {\n \"type\": \"helper.if\",\n \"condition\": {\n \"type\": \"condition.equals\",\n \"to\": \"yes\",\n \"data\": {\n \"$ref\": \"vars.top_text.show\"\n }\n },\n \"then\": {\n \"type\": \"view.text\",\n \"content\": {\n \"$ref\": \"vars.top_text.message\"\n }\n }\n }\n ]\n },\n {\n \"type\": \"plugin.trigger\",\n \"onChangeOf\": {\n \"type\": \"data.output\",\n \"path\": \"estimation\"\n },\n \"condition\": {\n \"type\": \"condition.equals\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"estimation\"\n },\n \"to\": \"OK\"\n },\n \"action\": {\n \"type\": \"action.set\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"reasons\"\n },\n \"payload\": {\n \"$empty\": true\n }\n }\n },\n {\n \"type\": \"plugin.trigger\",\n \"onChangeOf\": {\n \"type\": \"data.output\",\n \"path\": \"reasons\"\n },\n \"condition\": {\n \"type\": \"condition.not\",\n \"condition\": {\n \"type\": \"condition.empty\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"reasons\"\n }\n }\n },\n \"action\": {\n \"type\": \"action.set\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"estimation\"\n },\n \"payload\": \"BAD\"\n }\n },\n {\n \"1\": {\n \"type\": \"action.set\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"estimation\"\n },\n \"payload\": \"OK\"\n },\n \"2\": {\n \"type\": \"action.set\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"estimation\"\n },\n \"payload\": \"BAD\"\n },\n \"3\": {\n \"type\": \"action.open-close\",\n \"view\": {\n \"$ref\": \"view.items.0\"\n }\n },\n \"q\": {\n \"type\": \"action.toggle\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"reasons.porno\"\n }\n },\n \"w\": {\n \"type\": \"action.toggle\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"reasons.porno_grey_zone\"\n }\n },\n \"e\": {\n \"type\": \"action.toggle\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"reasons.illegal\"\n }\n },\n \"r\": {\n \"type\": \"action.toggle\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"reasons.casino\"\n }\n },\n \"t\": {\n \"type\": \"action.toggle\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"reasons.casino_grey_zone\"\n }\n },\n \"y\": {\n \"type\": \"action.toggle\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"reasons.guns\"\n }\n },\n \"a\": {\n \"type\": \"action.toggle\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"reasons.yellow\"\n }\n },\n \"s\": {\n \"type\": \"action.toggle\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"reasons.interface_elements\"\n }\n },\n \"d\": {\n \"type\": \"action.toggle\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"reasons.yellow_grey_zone\"\n }\n },\n \"f\": {\n \"type\": \"action.toggle\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"reasons.undefined\"\n }\n },\n \"type\": \"plugin.hotkeys\"\n }\n ]\n}",
"lock": {
"action.open-close": "1.0.0",
"action.set": "1.0.0",
"action.toggle": "1.0.0",
"condition.empty": "1.0.0",
"condition.equals": "1.0.0",
"condition.not": "1.0.0",
"condition.required": "1.0.0",
"core": "1.0.0",
"field.button-radio": "1.0.0",
"field.checkbox-group": "1.0.0",
"helper.if": "1.0.0",
"layout.columns": "1.0.0",
"plugin.hotkeys": "1.0.0",
"plugin.toloka": "1.0.0",
"plugin.trigger": "1.0.0",
"view.image": "1.0.0",
"view.list": "1.0.0",
"view.text": "1.0.0"
},
"settings": {
"showFinish": True,
"showFullscreen": True,
"showInstructions": True,
"showMessage": True,
"showReward": True,
"showSkip": True,
"showSubmit": True,
"showTimer": True,
"showTitle": True
},
"type": "tb",
"assets": {
"script_urls": [
"https://yastat.net/s3/tb/launcher.template/_/master.825664685.2020-11-19.6390a7cf.js"
],
"style_urls": [
"https://yastat.net/s3/tb/launcher.template/_/master.825664685.2020-11-19.6390a7cf.css"
]
},
"markup": "",
"styles": "",
"script": "window.configureTemplate({\n \"vars\": {\n \"author\": \"sukhodolskaya\",\n \"task\": \"old\",\n \"top_text\": {\n \"show\": \"yes\",\n \"message\": \"\u0414\u043e\u0431\u0430\u0432\u043b\u0435\u043d hotkey 3 \u043d\u0430 \u0440\u0430\u0437\u0432\u0435\u0440\u0442\u044b\u0432\u0430\u043d\u0438\u0435/\u0441\u0432\u0435\u0440\u0442\u044b\u0432\u0430\u043d\u0438\u0435 \u043a\u0430\u0440\u0442\u0438\u043d\u043a\u0438!\"\n }\n },\n \"view\": {\n \"type\": \"layout.columns\",\n \"items\": [\n {\n \"type\": \"view.image\",\n \"url\": {\n \"type\": \"data.input\",\n \"path\": \"image\"\n }\n },\n {\n \"type\": \"layout.columns\",\n \"ratio\": [\n 1,\n 10\n ],\n \"minWidth\": 0,\n \"items\": [\n {\n \"type\": \"field.button-radio\",\n \"label\": \"\u0414\u0430\",\n \"validation\": {\n \"type\": \"condition.required\"\n },\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"estimation\"\n },\n \"valueToSet\": \"OK\"\n },\n {\n \"type\": \"view.list\",\n \"items\": [\n {\n \"type\": \"field.button-radio\",\n \"label\": \"\u041d\u0435\u0442\",\n \"validation\": {\n \"type\": \"condition.required\"\n },\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"estimation\"\n },\n \"valueToSet\": \"BAD\"\n },\n {\n \"type\": \"field.checkbox-group\",\n \"validation\": {\n \"type\": \"helper.if\",\n \"condition\": {\n \"type\": \"condition.equals\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"estimation\"\n },\n \"to\": \"BAD\"\n },\n \"then\": {\n \"type\": \"condition.required\"\n }\n },\n \"options\": [\n {\n \"label\": \"\u0422\u043e\u0432\u0430\u0440\u044b \u0434\u043b\u044f \u0432\u0437\u0440\u043e\u0441\u043b\u044b\u0445\",\n \"value\": \"porno\",\n \"hint\": \"- \u0421\u0435\u043a\u0441-\u0438\u0433\u0440\u0443\u0448\u043a\u0438 (\u0432\u0438\u0431\u0440\u0430\u0442\u043e\u0440\u044b, \u043d\u0430\u0440\u0443\u0447\u043d\u0438\u043a\u0438, \u0430\u0442\u0440\u0438\u0431\u0443\u0442\u0438\u043a\u0430 \u0411\u0414\u0421\u041c \u0438 \u0442.\u0434.)\\n\"\n },\n {\n \"label\": \"\u042d\u0440\u043e\u0442\u0438\u043a\u0430, \u043f\u043e\u0440\u043d\u043e\",\n \"value\": \"porno_grey_zone\",\n \"hint\": \"- \u041e\u0431\u043d\u0430\u0436\u0435\u043d\u043a\u0430/\u043f\u043e\u0441\u0442\u0435\u043b\u044c\u043d\u044b\u0435 \u0441\u0446\u0435\u043d\u044b\\n- \u041d\u0430 \u0438\u0437\u043e\u0431\u0440\u0430\u0436\u0435\u043d\u0438\u0438 \u0430\u043a\u0446\u0435\u043d\u0442 \u043d\u0430 \u0433\u0440\u0443\u0434\u044c/\u043f\u043e\u043f\u0443/\u0433\u0435\u043d\u0438\u0442\u0430\u043b\u0438\u0438\\n- \u041d\u0430\u0440\u0438\u0441\u043e\u0432\u0430\u043d\u043d\u044b\u0435 \u043f\u0435\u0440\u0441\u043e\u043d\u0430\u0436\u0438 (\u043d\u0430\u043f\u0440\u0438\u043c\u0435\u0440, \u0430\u043d\u0438\u043c\u044d) \u0441 \u043d\u0430\u043c\u0435\u043a\u043e\u043c \u043d\u0430 \u044d\u0440\u043e\u0442\u0438\u043a\u0443/\u043f\u043e\u0440\u043d\u043e\\n- \u042d\u0440\u043e\u0442\u0438\u0447\u0435\u0441\u043a\u043e\u0435 \u0431\u0435\u043b\u044c\u0435 \u043d\u0430 \u043c\u043e\u0434\u0435\u043b\u044f\u0445 \u043e\u0431\u043e\u0435\u0433\u043e \u043f\u043e\u043b\u0430 (\u0441\u0442\u0440\u0438\u043d\u0433\u0438, \u043a\u0440\u0443\u0436\u0435\u0432\u043d\u043e\u0435, \u043f\u0440\u043e\u0441\u0432\u0435\u0447\u0438\u0432\u0430\u044e\u0449\u0435\u0435)\"\n },\n {\n \"label\": \"\u0410\u043b\u043a\u043e\u0433\u043e\u043b\u044c, \u043a\u0443\u0440\u0435\u043d\u0438\u0435, \u043d\u0430\u0440\u043a\u043e\u0442\u0438\u043a\u0438\",\n \"value\": \"illegal\",\n \"hint\": \"- \u0420\u0435\u043a\u043b\u0430\u043c\u0438\u0440\u0443\u0435\u0442\u0441\u044f \u0430\u043b\u043a\u043e\u0433\u043e\u043b\u044c\u043d\u0430\u044f \u043f\u0440\u043e\u0434\u0443\u043a\u0446\u0438\u044f/\u0435\u0435 \u0443\u043f\u043e\u0442\u0440\u0435\u0431\u043b\u0435\u043d\u0438\u0435\\n- \u0420\u0435\u043a\u043b\u0430\u043c\u0438\u0440\u0443\u0435\u0442\u0441\u044f \u043a\u0443\u0440\u0435\u043d\u0438\u0435/\u043f\u0430\u0440\u0435\u043d\u0438\u0435 \u0418 \u0441\u0432\u044f\u0437\u0430\u043d\u043d\u0430\u044f \u0441 \u043d\u0438\u043c\u0438 \u0430\u0442\u0440\u0438\u0431\u0443\u0442\u0438\u043a\u0430 (\u0442\u0430\u0431\u0430\u0447\u043d\u044b\u0435 \u0438\u0437\u0434\u0435\u043b\u0438\u044f, \u044d\u043b\u0435\u043a\u0442\u0440\u043e\u043d\u043d\u044b\u0435 \u0441\u0438\u0433\u0430\u0440\u0435\u0442\u044b, \u043a\u0430\u043b\u044c\u044f\u043d\u044b \u0438 \u0442.\u0434.)\\n- \u0420\u0435\u043a\u043b\u0430\u043c\u0438\u0440\u0443\u044e\u0442\u0441\u044f \u043d\u0430\u0440\u043a\u043e\u0442\u0438\u0447\u0435\u0441\u043a\u0438\u0435 \u0432\u0435\u0449\u0435\u0441\u0442\u0432\u0430/\u0438\u0445 \u0443\u043f\u043e\u0442\u0440\u0435\u0431\u043b\u0435\u043d\u0438\u0435\"\n },\n {\n \"label\": \"\u041a\u0430\u0437\u0438\u043d\u043e, \u0430\u0437\u0430\u0440\u0442\u043d\u044b\u0435 \u0438\u0433\u0440\u044b (\u043d\u0430 \u0434\u0435\u043d\u044c\u0433\u0438, \u0430 \u041d\u0415 \u043d\u0430\u0441\u0442\u043e\u043b\u044c\u043d\u044b\u0435 \u0438\u0433\u0440\u044b)\",\n \"value\": \"casino\",\n \"hint\": \"- \u041b\u043e\u0433\u043e\u0442\u0438\u043f\u044b \u043a\u0430\u0437\u0438\u043d\u043e (Vavada, \u0412\u0443\u043b\u043a\u0430\u043d, \u0410\u0437\u0438\u043d\u043e 777)\\n- \u0418\u0437\u043e\u0431\u0440\u0430\u0436\u0435\u043d\u044b \u0438\u0433\u043e\u0440\u043d\u044b\u0435 \u0441\u0442\u043e\u043b\u044b, \u0438\u0433\u0440\u043e\u0432\u044b\u0435 \u0430\u0432\u0442\u043e\u043c\u0430\u0442\u044b, \u043f\u0440\u0438\u043b\u043e\u0436\u0435\u043d\u0438\u044f \u0441 \u043e\u043d\u043b\u0430\u0439\u043d-\u043a\u0430\u0437\u0438\u043d\u043e/\u0430\u0437\u0430\u0440\u0442\u043d\u044b\u043c\u0438 \u0438\u0433\u0440\u0430\u043c\u0438 \u0438 \u0437\u0430\u0441\u0442\u0430\u0432\u043a\u0438 \u0441 \u0438\u0433\u0440\u043e\u0432\u044b\u0445 \u0430\u0432\u0442\u043e\u043c\u0430\u0442\u043e\u0432\\n- \u041f\u0440\u0438\u0437\u044b\u0432 \u043a \u0438\u0433\u0440\u0435 \u043d\u0430 \u0434\u0435\u043d\u044c\u0433\u0438\"\n },\n {\n \"label\": \"\u0414\u0435\u043d\u044c\u0433\u0438, \u0431\u0430\u043d\u043a\u043e\u0432\u0441\u043a\u0438\u0435 \u043a\u0430\u0440\u0442\u044b, \u0440\u0435\u043a\u043b\u0430\u043c\u0430 \u0437\u0430\u0440\u0430\u0431\u043e\u0442\u043a\u0430/\u0437\u0430\u0439\u043c\u043e\u0432\",\n \"value\": \"casino_grey_zone\",\n \"hint\": \"- \u0418\u0437\u043e\u0431\u0440\u0430\u0436\u0435\u043d\u0438\u0435 \u043a\u0443\u043f\u044e\u0440 \u0438\u043b\u0438 \u0431\u0430\u043d\u043a\u043e\u0432\u0441\u043a\u0438\u0445 \u043a\u0430\u0440\u0442 (\u043a\u043b\u044e\u0447\u0435\u0439 \u043e\u0442 \u043c\u0430\u0448\u0438\u043d \u0438 \u0442.\u0434.) \u0431\u0435\u0437 \u043f\u0440\u044f\u043c\u043e\u0439 \u0441\u0432\u044f\u0437\u0438 \u0441 \u043a\u0430\u0437\u0438\u043d\u043e\\n- \u0420\u0435\u043a\u043b\u0430\u043c\u0430 \u0437\u0430\u0440\u0430\u0431\u043e\u0442\u043a\u0430/\u0437\u0430\u0439\u043c\u043e\u0432\"\n },\n {\n \"label\": \"\u041e\u0440\u0443\u0436\u0438\u0435\",\n \"value\": \"guns\",\n \"hint\": \"- \u0425\u043e\u043b\u043e\u0434\u043d\u043e\u0435 \u043e\u0440\u0443\u0436\u0438\u0435, \u043d\u0430 \u0438\u0437\u043e\u0431\u0440\u0430\u0436\u0435\u043d\u0438\u0438 \u0412\u0418\u0414\u041d\u041e \u0440\u0435\u0436\u0443\u0449\u0443\u044e \u0447\u0430\u0441\u0442\u044c\\n- \u041e\u0433\u043d\u0435\u0441\u0442\u0440\u0435\u043b\u044c\u043d\u043e\u0435, \u043f\u043d\u0435\u0432\u043c\u0430\u0442\u0438\u0447\u0435\u0441\u043a\u043e\u0435 \u043e\u0440\u0443\u0436\u0438\u0435 \u0411\u0415\u0417 \u0443\u043a\u0430\u0437\u0430\u043d\u0438\u044f \u043d\u0430 \u0438\u0437\u043e\u0431\u0440\u0430\u0436\u0435\u043d\u0438\u0438 \u0435\u0433\u043e \u043c\u043e\u0434\u0435\u043b\u0438\\n- \u041c\u0435\u0442\u0430\u0442\u0435\u043b\u044c\u043d\u043e\u0435 \u043e\u0440\u0443\u0436\u0438\u0435\\n- \u041c\u0438\u043d\u044b \u0438 \u0433\u0440\u0430\u043d\u0430\u0442\u044b\\n- \u0420\u0430\u043a\u0435\u0442\u043d\u043e\u0435/\u0442\u043e\u0440\u043f\u0435\u0434\u043d\u043e\u0435 \u043e\u0440\u0443\u0436\u0438\u0435\\n- \u042d\u043b\u0435\u043a\u0442\u0440\u043e\u0448\u043e\u043a\u0435\u0440\u044b\"\n },\n {\n \"label\": \"\u041f\u0443\u0433\u0430\u044e\u0449\u0435\u0435/\u0442\u0440\u0430\u0433\u0438\u0447\u0435\u0441\u043a\u043e\u0435/\u043e\u0442\u0432\u0440\u0430\u0442\u0438\u0442\u0435\u043b\u044c\u043d\u043e\u0435 \u0438\u043b\u0438 \u043e\u0441\u043a\u043e\u0440\u0431\u0438\u0442\u0435\u043b\u044c\u043d\u043e\u0435 \u0438\u0437\u043e\u0431\u0440\u0430\u0436\u0435\u043d\u0438\u0435\",\n \"value\": \"yellow\",\n \"hint\": \"- \u041f\u0443\u0433\u0430\u044e\u0449\u0438\u0435 \u0438\u0437\u043e\u0431\u0440\u0430\u0436\u0435\u043d\u0438\u044f: \u043d\u0435\u0447\u0438\u0441\u0442\u044c (\u0411\u0415\u0417 \u043a\u043e\u043d\u0442\u0435\u043a\u0441\u0442\u0430 \u0432 \u0432\u0438\u0434\u0435 \u0444\u0438\u043b\u044c\u043c\u0430/\u043a\u043d\u0438\u0433\u0438/\u043f\u043e\u0441\u0442\u0430\u043d\u043e\u0432\u043a\u0438), \u0441\u0446\u0435\u043d\u044b \u043d\u0430\u0441\u0438\u043b\u0438\u044f, \u0440\u0430\u0441\u0447\u043b\u0435\u043d\u0435\u043d\u043a\u0430, \u0438\u0441\u043a\u0440\u0438\u0432\u043b\u0435\u043d\u043d\u044b\u0435 \u043e\u0442 \u0443\u0436\u0430\u0441\u0430/\u0421\u0418\u041b\u042c\u041d\u041e\u0419 \u0431\u043e\u043b\u0438 \u043b\u0438\u0446\u0430 \u043b\u044e\u0434\u0435\u0439, \u043f\u0440\u0438\u0437\u044b\u0432\u044b \u043a \u0443\u0431\u0438\u0439\u0441\u0442\u0432\u0443/\u0441\u043c\u0435\u0440\u0442\u0438, \u0443\u0433\u0440\u043e\u0437\u044b\\n- \u0422\u0440\u0430\u0433\u0438\u0447\u0435\u0441\u043a\u0438\u0435 \u0438\u0437\u043e\u0431\u0440\u0430\u0436\u0435\u043d\u0438\u044f: \u043a\u0430\u0434\u0440\u044b \u0441 \u043c\u0435\u0441\u0442\u0430 \u0430\u0432\u0430\u0440\u0438\u0439, \u0441\u0432\u044f\u0437\u044c \u0441 \u0441\u0430\u043c\u043e\u0443\u0431\u0438\u0439\u0441\u0442\u0432\u043e\u043c/\u0430\u0431\u043e\u0440\u0442\u043e\u043c, \u0440\u0438\u0442\u0443\u0430\u043b\u044c\u043d\u044b\u0435 \u0430\u0442\u0440\u0438\u0431\u0443\u0442\u044b\\n- \u041e\u0442\u0432\u0440\u0430\u0442\u0438\u0442\u0435\u043b\u044c\u043d\u044b\u0435 \u0438\u0437\u043e\u0431\u0440\u0430\u0436\u0435\u043d\u0438\u044f - \u0433\u043d\u043e\u0439, \u043f\u0440\u044b\u0449\u0438, \u0432\u043e\u0441\u043f\u0430\u043b\u0435\u043d\u0438\u044f, \u0433\u0440\u044f\u0437\u044c, \u0440\u0430\u0437\u043b\u043e\u0436\u0435\u043d\u0438\u0435, \u043e\u0431\u044a\u0435\u0434\u043a\u0438, \u0440\u0432\u043e\u0442\u0430\\n- \u041e\u0441\u043a\u043e\u0440\u0431\u0438\u0442\u0435\u043b\u044c\u043d\u044b\u0435 \u0438\u0437\u043e\u0431\u0440\u0430\u0436\u0435\u043d\u0438\u044f: \u0441\u043e\u0434\u0435\u0440\u0436\u0430\u0449\u0438\u0435 \u043c\u0430\u0442, \u0436\u0430\u0440\u0433\u043e\u043d, \u0440\u0435\u043a\u043b\u0430\u043c\u0438\u0440\u0443\u044e\u0449\u0438\u0435 \u0440\u0430\u0441\u0438\u0437\u043c/\u0441\u0435\u043a\u0441\u0438\u0437\u043c/\u043d\u0430\u0446\u0438\u0437\u043c, \u043e\u0441\u043a\u043e\u0440\u0431\u043b\u044f\u044e\u0449\u0438\u0435 \u0432\u0435\u0442\u0435\u0440\u0430\u043d\u043e\u0432/\u0438\u043d\u0432\u0430\u043b\u0438\u0434\u043e\u0432\"\n },\n {\n \"label\": \"\u042d\u043b\u0435\u043c\u0435\u043d\u0442\u044b \u043f\u043e\u043b\u044c\u0437\u043e\u0432\u0430\u0442\u0435\u043b\u044c\u0441\u043a\u043e\u0433\u043e \u0438\u043d\u0442\u0435\u0440\u0444\u0435\u0439\u0441\u0430\",\n \"value\": \"interface_elements\",\n \"hint\": \"- \u0418\u043c\u0438\u0442\u0430\u0446\u0438\u044f \u0441\u0442\u0430\u043d\u0434\u0430\u0440\u0442\u043d\u044b\u0445 \u044d\u043b\u0435\u043c\u0435\u043d\u0442\u043e\u0432 \u043f\u043e\u043b\u044c\u0437\u043e\u0432\u0430\u0442\u0435\u043b\u044c\u0441\u043a\u043e\u0433\u043e \u0438\u043d\u0442\u0435\u0440\u0444\u0435\u0439\u0441\u0430 (\u0441\u0442\u0430\u043d\u0434\u0430\u0440\u0442\u043d\u044b\u0435 \u043a\u043d\u043e\u043f\u043a\u0438, \u043a\u043e\u0442\u043e\u0440\u044b\u0435 \u043d\u0435\u043b\u044c\u0437\u044f \u043d\u0430\u0436\u0430\u0442\u044c; \u043a\u0443\u0440\u0441\u043e\u0440, \u043a\u043e\u0442\u043e\u0440\u044b\u0439 \u043d\u0435\u043b\u044c\u0437\u044f \u043f\u043e\u0434\u0432\u0438\u0433\u0430\u0442\u044c)\"\n },\n {\n \"label\": \"\u0420\u0435\u043a\u043b\u0430\u043c\u0430 \u043c\u0433\u043d\u043e\u0432\u0435\u043d\u043d\u043e\u0433\u043e/\\\"\u0447\u0443\u0434\u0435\u0441\u043d\u043e\u0433\u043e\\\" \u043f\u043e\u0445\u0443\u0434\u0435\u043d\u0438\u044f, \u0443\u043b\u0443\u0447\u0448\u0435\u043d\u0438\u044f \u043f\u043e\u0442\u0435\u043d\u0446\u0438\u0438/\u0432\u043d\u0435\u0448\u043d\u043e\u0441\u0442\u0438/\u0437\u0434\u043e\u0440\u043e\u0432\u044c\u044f\",\n \"value\": \"yellow_grey_zone\",\n \"hint\": \"- \u041d\u0435 \u043e\u0444\u0438\u0446\u0438\u0430\u043b\u044c\u043d\u044b\u0435 \u043c\u0435\u0434\u0438\u0446\u0438\u043d\u0441\u043a\u0438\u0435 \u0441\u0440\u0435\u0434\u0441\u0442\u0432\u0430, \u043e\u0431\u0435\u0449\u0430\u044e\u0449\u0438\u0435 \u043c\u0433\u043d\u043e\u0432\u0435\u043d\u043d\u043e\u0435/\\\"\u0447\u0443\u0434\u0435\u0441\u043d\u043e\u0435\\\" \u0438\u0441\u0446\u0435\u043b\u0435\u043d\u0438\u0435/\u043f\u043e\u0445\u0443\u0434\u0435\u043d\u0438\u0435/\u0443\u043b\u0443\u0447\u0448\u0435\u043d\u0438\u0435 \u0441\u0430\u043c\u043e\u0447\u0443\u0432\u0441\u0442\u0432\u0438\u044f \u0438\u043b\u0438 \u0432\u043d\u0435\u0448\u043d\u043e\u0441\u0442\u0438: \u0411\u0410\u0414\u044b, \u043d\u0430\u0440\u043e\u0434\u043d\u044b\u0435 \u0441\u0440\u0435\u0434\u0441\u0442\u0432\u0430\\n- \u0420\u0435\u0437\u0443\u043b\u044c\u0442\u0430\u0442\u044b \u0434\u043e \u0438/\u0438\u043b\u0438 \u043f\u043e\u0441\u043b\u0435 \u043c\u0433\u043d\u043e\u0432\u0435\u043d\u043d\u043e\u0433\u043e/\\\"\u0447\u0443\u0434\u0435\u0441\u043d\u043e\u0433\u043e\\\" \u043f\u043e\u0445\u0443\u0434\u0435\u043d\u0438\u044f/\u0443\u043b\u0443\u0447\u0448\u0435\u043d\u0438\u044f \u043f\u043e\u0442\u0435\u043d\u0446\u0438\u0438/\u0443\u043b\u0443\u0447\u0448\u0435\u043d\u0438\u044f \u0437\u0434\u043e\u0440\u043e\u0432\u044c\u044f/\u0432\u043d\u0435\u0448\u043d\u043e\u0441\u0442\u0438\"\n },\n {\n \"label\": \"\u041d\u0435\u044f\u0441\u043d\u043e, \u0447\u0442\u043e \u0438\u0437\u043e\u0431\u0440\u0430\u0436\u0435\u043d\u043e\",\n \"value\": \"undefined\",\n \"hint\": \"- \u041d\u0435\u0442 \u0432\u043e\u0437\u043c\u043e\u0436\u043d\u043e\u0441\u0442\u0438 \u0438\u0434\u0435\u043d\u0442\u0438\u0444\u0438\u0446\u0438\u0440\u043e\u0432\u0430\u0442\u044c \u043e\u0431\u044a\u0435\u043a\u0442, \u0438\u0437\u043e\u0431\u0440\u0430\u0436\u0435\u043d\u043d\u044b\u0439 \u043d\u0430 \u043a\u0430\u0440\u0442\u0438\u043d\u043a\u0435\"\n }\n ],\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"reasons\"\n }\n }\n ]\n }\n ]\n }\n ]\n },\n \"plugins\": [\n {\n \"type\": \"plugin.toloka\",\n \"layout\": {\n \"kind\": \"scroll\",\n \"taskWidth\": 690\n },\n \"notifications\": [\n {\n \"type\": \"helper.if\",\n \"condition\": {\n \"type\": \"condition.equals\",\n \"to\": \"yes\",\n \"data\": {\n \"$ref\": \"vars.top_text.show\"\n }\n },\n \"then\": {\n \"type\": \"view.text\",\n \"content\": {\n \"$ref\": \"vars.top_text.message\"\n }\n }\n }\n ]\n },\n {\n \"type\": \"plugin.trigger\",\n \"onChangeOf\": {\n \"type\": \"data.output\",\n \"path\": \"estimation\"\n },\n \"condition\": {\n \"type\": \"condition.equals\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"estimation\"\n },\n \"to\": \"OK\"\n },\n \"action\": {\n \"type\": \"action.set\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"reasons\"\n },\n \"payload\": {\n \"$empty\": true\n }\n }\n },\n {\n \"type\": \"plugin.trigger\",\n \"onChangeOf\": {\n \"type\": \"data.output\",\n \"path\": \"reasons\"\n },\n \"condition\": {\n \"type\": \"condition.not\",\n \"condition\": {\n \"type\": \"condition.empty\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"reasons\"\n }\n }\n },\n \"action\": {\n \"type\": \"action.set\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"estimation\"\n },\n \"payload\": \"BAD\"\n }\n },\n {\n \"1\": {\n \"type\": \"action.set\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"estimation\"\n },\n \"payload\": \"OK\"\n },\n \"2\": {\n \"type\": \"action.set\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"estimation\"\n },\n \"payload\": \"BAD\"\n },\n \"3\": {\n \"type\": \"action.open-close\",\n \"view\": {\n \"$ref\": \"view.items.0\"\n }\n },\n \"q\": {\n \"type\": \"action.toggle\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"reasons.porno\"\n }\n },\n \"w\": {\n \"type\": \"action.toggle\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"reasons.porno_grey_zone\"\n }\n },\n \"e\": {\n \"type\": \"action.toggle\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"reasons.illegal\"\n }\n },\n \"r\": {\n \"type\": \"action.toggle\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"reasons.casino\"\n }\n },\n \"t\": {\n \"type\": \"action.toggle\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"reasons.casino_grey_zone\"\n }\n },\n \"y\": {\n \"type\": \"action.toggle\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"reasons.guns\"\n }\n },\n \"a\": {\n \"type\": \"action.toggle\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"reasons.yellow\"\n }\n },\n \"s\": {\n \"type\": \"action.toggle\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"reasons.interface_elements\"\n }\n },\n \"d\": {\n \"type\": \"action.toggle\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"reasons.yellow_grey_zone\"\n }\n },\n \"f\": {\n \"type\": \"action.toggle\",\n \"data\": {\n \"type\": \"data.output\",\n \"path\": \"reasons.undefined\"\n }\n },\n \"type\": \"plugin.hotkeys\"\n }\n ]\n}, {\"action.open-close\":\"1.0.0\",\"action.set\":\"1.0.0\",\"action.toggle\":\"1.0.0\",\"condition.empty\":\"1.0.0\",\"condition.equals\":\"1.0.0\",\"condition.not\":\"1.0.0\",\"condition.required\":\"1.0.0\",\"core\":\"1.0.0\",\"field.button-radio\":\"1.0.0\",\"field.checkbox-group\":\"1.0.0\",\"helper.if\":\"1.0.0\",\"layout.columns\":\"1.0.0\",\"plugin.hotkeys\":\"1.0.0\",\"plugin.toloka\":\"1.0.0\",\"plugin.trigger\":\"1.0.0\",\"view.image\":\"1.0.0\",\"view.list\":\"1.0.0\",\"view.text\":\"1.0.0\"}, 'https://tb.yandex.net/registry2')"
}
]
@pytest.fixture
def example_view_spec(request):
return EXAMPLES[request.param]
| 486.70362
| 27,383
| 0.410147
| 22,384
| 215,123
| 3.93138
| 0.016664
| 0.044045
| 0.034364
| 0.023727
| 0.998932
| 0.998932
| 0.998886
| 0.998886
| 0.998886
| 0.998795
| 0
| 0.214177
| 0.353198
| 215,123
| 441
| 27,384
| 487.807256
| 0.418271
| 0.000056
| 0
| 0.7254
| 0
| 0.059497
| 0.474894
| 0.003059
| 0
| 0
| 0
| 0
| 0
| 1
| 0.002288
| false
| 0
| 0.002288
| 0.002288
| 0.006865
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 11
|
eda2269d5816b8e86e8a13c6502789e2bafef128
| 769
|
py
|
Python
|
models/managment.py
|
AhmedAlaa2024/Heal-and-Cure
|
c5db8f0e87cfe2f7394bc7f84c9ef490ceedf7b4
|
[
"MIT"
] | 1
|
2022-01-16T16:37:53.000Z
|
2022-01-16T16:37:53.000Z
|
models/managment.py
|
AhmedAlaa2024/Heal-and-Cure
|
c5db8f0e87cfe2f7394bc7f84c9ef490ceedf7b4
|
[
"MIT"
] | null | null | null |
models/managment.py
|
AhmedAlaa2024/Heal-and-Cure
|
c5db8f0e87cfe2f7394bc7f84c9ef490ceedf7b4
|
[
"MIT"
] | null | null | null |
from ..app import cursor
class Department():
def __init__(self):
pass
def get():
pass
def insert():
pass
def update():
pass
def delete():
pass
class Room():
def __init__(self):
pass
def get():
pass
def insert():
pass
def update():
pass
def delete():
pass
class EmploymentContract():
def __init__(self):
pass
def get():
pass
def insert():
pass
def update():
pass
def delete():
pass
class OperationContract():
def __init__(self):
pass
def get():
pass
def insert():
pass
def update():
pass
def delete():
pass
| 11.144928
| 27
| 0.46814
| 76
| 769
| 4.526316
| 0.223684
| 0.325581
| 0.127907
| 0.174419
| 0.787791
| 0.787791
| 0.787791
| 0.787791
| 0.787791
| 0.787791
| 0
| 0
| 0.435631
| 769
| 69
| 28
| 11.144928
| 0.792627
| 0
| 0
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.444444
| false
| 0.444444
| 0.022222
| 0
| 0.555556
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 11
|
edad1a8e164ac1af4510970df1f716f3ae01dafd
| 7,023
|
py
|
Python
|
appengine-mapreduce/python/test/mapreduce/output_writers_end_to_end_test.py
|
bslatkin/8-bits
|
1608a53bdd5ff491519396212679dc79cc07fca4
|
[
"Apache-2.0"
] | 2
|
2015-02-18T08:12:23.000Z
|
2015-09-24T20:35:41.000Z
|
appengine-mapreduce/python/test/mapreduce/output_writers_end_to_end_test.py
|
bslatkin/8-bits
|
1608a53bdd5ff491519396212679dc79cc07fca4
|
[
"Apache-2.0"
] | null | null | null |
appengine-mapreduce/python/test/mapreduce/output_writers_end_to_end_test.py
|
bslatkin/8-bits
|
1608a53bdd5ff491519396212679dc79cc07fca4
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
#
# Copyright 2011 Google Inc. All Rights Reserved.
import unittest
from google.appengine.api import files
from google.appengine.ext import db
from mapreduce import control
from mapreduce import model
from mapreduce import output_writers
from mapreduce import test_support
from testlib import testutil
BLOBSTORE_WRITER_NAME = (output_writers.__name__ + "." +
output_writers.BlobstoreOutputWriter.__name__)
FILE_WRITER_NAME = (output_writers.__name__ + "." +
output_writers.FileOutputWriter.__name__)
class TestEntity(db.Model):
"""Test entity class."""
def test_handler_yield_key_str(entity):
"""Test handler which yields entity key."""
yield str(entity.key()) + "\n"
class FileOutputWriterEndToEndTest(testutil.HandlerTestBase):
"""End-to-end tests for FileOutputWriter using googlestore."""
def testSingleShard(self):
entity_count = 1000
for _ in range(entity_count):
TestEntity().put()
mapreduce_id = control.start_map(
"test_map",
__name__ + ".test_handler_yield_key_str",
"mapreduce.input_readers.DatastoreInputReader",
{
"entity_kind": __name__ + "." + TestEntity.__name__,
"filesystem": "gs",
"gs_bucket_name": "bucket"
},
shard_count=4,
base_path="/mapreduce_base_path",
output_writer_spec=FILE_WRITER_NAME)
test_support.execute_until_empty(self.taskqueue)
mapreduce_state = model.MapreduceState.get_by_job_id(mapreduce_id)
filenames = output_writers.FileOutputWriter.get_filenames(mapreduce_state)
self.assertEqual(1, len(filenames))
self.assertTrue(filenames[0].startswith("/gs/bucket/"))
with files.open(filenames[0], "r") as f:
data = f.read(10000000)
self.assertEquals(1000, len(data.strip().split("\n")))
def testDedicatedParams(self):
entity_count = 1000
for _ in range(entity_count):
TestEntity().put()
mapreduce_id = control.start_map(
"test_map",
__name__ + ".test_handler_yield_key_str",
"mapreduce.input_readers.DatastoreInputReader",
{
"input_reader": {
"entity_kind": __name__ + "." + TestEntity.__name__,
},
"output_writer": {
"filesystem": "gs",
"gs_bucket_name": "bucket",
},
},
shard_count=4,
base_path="/mapreduce_base_path",
output_writer_spec=FILE_WRITER_NAME)
test_support.execute_until_empty(self.taskqueue)
mapreduce_state = model.MapreduceState.get_by_job_id(mapreduce_id)
filenames = output_writers.FileOutputWriter.get_filenames(mapreduce_state)
self.assertEqual(1, len(filenames))
self.assertTrue(filenames[0].startswith("/gs/bucket/"))
with files.open(filenames[0], "r") as f:
data = f.read(10000000)
self.assertEquals(1000, len(data.strip().split("\n")))
def testMultipleShards(self):
entity_count = 1000
for _ in range(entity_count):
TestEntity().put()
mapreduce_id = control.start_map(
"test_map",
__name__ + ".test_handler_yield_key_str",
"mapreduce.input_readers.DatastoreInputReader",
{
"entity_kind": __name__ + "." + TestEntity.__name__,
"output_sharding": "input",
"filesystem": "gs",
},
shard_count=4,
base_path="/mapreduce_base_path",
output_writer_spec=BLOBSTORE_WRITER_NAME)
test_support.execute_until_empty(self.taskqueue)
mapreduce_state = model.MapreduceState.get_by_job_id(mapreduce_id)
filenames = output_writers.BlobstoreOutputWriter.get_filenames(
mapreduce_state)
self.assertEqual(4, len(filenames))
file_lengths = []
for filename in filenames:
self.assertTrue(filename.startswith("/blobstore/"))
self.assertFalse(filename.startswith("/blobstore/writable:"))
with files.open(filename, "r") as f:
data = f.read(10000000)
file_lengths.append(len(data.strip().split("\n")))
# these numbers are totally random and depend on our sharding,
# which is quite deterministic.
expected_lengths = [199, 210, 275, 316]
self.assertEqual(1000, sum(expected_lengths))
self.assertEquals(expected_lengths, file_lengths)
class BlobstoreOutputWriterEndToEndTest(testutil.HandlerTestBase):
"""End-to-end tests for BlobstoreOutputWriter.
BlobstoreOutputWriter isn't complex enough yet to do extensive
unit tests. Do end-to-end tests just to check that it works.
"""
def testSingleShard(self):
entity_count = 1000
for _ in range(entity_count):
TestEntity().put()
mapreduce_id = control.start_map(
"test_map",
__name__ + ".test_handler_yield_key_str",
"mapreduce.input_readers.DatastoreInputReader",
{
"entity_kind": __name__ + "." + TestEntity.__name__,
},
shard_count=4,
base_path="/mapreduce_base_path",
output_writer_spec=BLOBSTORE_WRITER_NAME)
test_support.execute_until_empty(self.taskqueue)
mapreduce_state = model.MapreduceState.get_by_job_id(mapreduce_id)
filenames = output_writers.BlobstoreOutputWriter.get_filenames(
mapreduce_state)
self.assertEqual(1, len(filenames))
blob_name = filenames[0]
self.assertTrue(blob_name.startswith("/blobstore/"))
self.assertFalse(blob_name.startswith("/blobstore/writable:"))
with files.open(blob_name, "r") as f:
data = f.read(10000000)
self.assertEquals(1000, len(data.strip().split("\n")))
def testMultipleShards(self):
entity_count = 1000
for _ in range(entity_count):
TestEntity().put()
mapreduce_id = control.start_map(
"test_map",
__name__ + ".test_handler_yield_key_str",
"mapreduce.input_readers.DatastoreInputReader",
{
"entity_kind": __name__ + "." + TestEntity.__name__,
"output_sharding": "input",
},
shard_count=4,
base_path="/mapreduce_base_path",
output_writer_spec=BLOBSTORE_WRITER_NAME)
test_support.execute_until_empty(self.taskqueue)
mapreduce_state = model.MapreduceState.get_by_job_id(mapreduce_id)
filenames = output_writers.BlobstoreOutputWriter.get_filenames(
mapreduce_state)
self.assertEqual(4, len(filenames))
file_lengths = []
for filename in filenames:
self.assertTrue(filename.startswith("/blobstore/"))
self.assertFalse(filename.startswith("/blobstore/writable:"))
with files.open(filename, "r") as f:
data = f.read(10000000)
file_lengths.append(len(data.strip().split("\n")))
# these numbers are totally random and depend on our sharding,
# which is quite deterministic.
expected_lengths = [199, 210, 275, 316]
self.assertEqual(1000, sum(expected_lengths))
self.assertEquals(expected_lengths, file_lengths)
if __name__ == "__main__":
unittest.main()
| 31.075221
| 78
| 0.677488
| 787
| 7,023
| 5.693774
| 0.185515
| 0.029011
| 0.021424
| 0.025441
| 0.820353
| 0.815443
| 0.798929
| 0.763669
| 0.763669
| 0.760768
| 0
| 0.022214
| 0.21159
| 7,023
| 225
| 79
| 31.213333
| 0.787069
| 0.075893
| 0
| 0.745223
| 0
| 0
| 0.129802
| 0.054988
| 0
| 0
| 0
| 0
| 0.127389
| 1
| 0.038217
| false
| 0
| 0.050955
| 0
| 0.10828
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
edb43ce7aa8578f52842a2ecd0bd1d1222305d42
| 205
|
py
|
Python
|
skyflow/service_account/__init__.py
|
skyflowapi/skyflow-python
|
1c7220de6698fd6a807932d3d3846b7fe4c61a5c
|
[
"MIT"
] | 2
|
2022-03-08T22:08:34.000Z
|
2022-03-31T15:36:23.000Z
|
skyflow/service_account/__init__.py
|
skyflowapi/skyflow-python
|
1c7220de6698fd6a807932d3d3846b7fe4c61a5c
|
[
"MIT"
] | 1
|
2022-03-23T04:55:58.000Z
|
2022-03-23T04:55:58.000Z
|
skyflow/service_account/__init__.py
|
skyflowapi/skyflow-python
|
1c7220de6698fd6a807932d3d3846b7fe4c61a5c
|
[
"MIT"
] | 4
|
2022-01-04T10:38:36.000Z
|
2022-01-27T06:16:45.000Z
|
from ._token import generate_bearer_token
from ._token import generate_bearer_token
from ._token import ResponseToken
from ._token import generate_bearer_token_from_creds
from ._validity import is_expired
| 34.166667
| 52
| 0.878049
| 29
| 205
| 5.724138
| 0.344828
| 0.216867
| 0.361446
| 0.415663
| 0.728916
| 0.728916
| 0.728916
| 0.5
| 0.5
| 0
| 0
| 0
| 0.097561
| 205
| 5
| 53
| 41
| 0.897297
| 0
| 0
| 0.4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 11
|
6106119816964315880ef3fd38c2875cc83aa41d
| 1,850
|
py
|
Python
|
rdmo/questions/migrations/0037_rename_en_to_lang1.py
|
Raspeanut/rdmo
|
9f785010a499c372a2f8368ccf76d2ea4150adcb
|
[
"Apache-2.0"
] | 77
|
2016-08-09T11:40:20.000Z
|
2022-03-06T11:03:26.000Z
|
rdmo/questions/migrations/0037_rename_en_to_lang1.py
|
Raspeanut/rdmo
|
9f785010a499c372a2f8368ccf76d2ea4150adcb
|
[
"Apache-2.0"
] | 377
|
2016-07-01T13:59:36.000Z
|
2022-03-30T13:53:19.000Z
|
rdmo/questions/migrations/0037_rename_en_to_lang1.py
|
Raspeanut/rdmo
|
9f785010a499c372a2f8368ccf76d2ea4150adcb
|
[
"Apache-2.0"
] | 47
|
2016-06-23T11:32:19.000Z
|
2022-03-01T11:34:37.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.18 on 2019-01-29 16:18
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('questions', '0036_remove_subsection'),
]
operations = [
migrations.RenameField(
model_name='catalog',
old_name='title_en',
new_name='title_lang1',
),
migrations.RenameField(
model_name='question',
old_name='help_en',
new_name='help_lang1',
),
migrations.RenameField(
model_name='question',
old_name='text_en',
new_name='text_lang1',
),
migrations.RenameField(
model_name='question',
old_name='verbose_name_en',
new_name='verbose_name_lang1',
),
migrations.RenameField(
model_name='question',
old_name='verbose_name_plural_en',
new_name='verbose_name_plural_lang1',
),
migrations.RenameField(
model_name='questionset',
old_name='help_en',
new_name='help_lang1',
),
migrations.RenameField(
model_name='questionset',
old_name='title_en',
new_name='title_lang1',
),
migrations.RenameField(
model_name='questionset',
old_name='verbose_name_en',
new_name='verbose_name_lang1',
),
migrations.RenameField(
model_name='questionset',
old_name='verbose_name_plural_en',
new_name='verbose_name_plural_lang1',
),
migrations.RenameField(
model_name='section',
old_name='title_en',
new_name='title_lang1',
),
]
| 28.030303
| 49
| 0.553514
| 177
| 1,850
| 5.39548
| 0.254237
| 0.219895
| 0.272251
| 0.314136
| 0.73822
| 0.73822
| 0.73822
| 0.73822
| 0.658639
| 0.658639
| 0
| 0.026273
| 0.341622
| 1,850
| 65
| 50
| 28.461538
| 0.7578
| 0.037297
| 0
| 0.793103
| 1
| 0
| 0.218785
| 0.065242
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.034483
| 0
| 0.086207
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
61164bc77f140f27555395498ad7d63ea8af6103
| 30,212
|
py
|
Python
|
sdk/python/pulumi_azure/compute/snapshot.py
|
henriktao/pulumi-azure
|
f1cbcf100b42b916da36d8fe28be3a159abaf022
|
[
"ECL-2.0",
"Apache-2.0"
] | 109
|
2018-06-18T00:19:44.000Z
|
2022-02-20T05:32:57.000Z
|
sdk/python/pulumi_azure/compute/snapshot.py
|
henriktao/pulumi-azure
|
f1cbcf100b42b916da36d8fe28be3a159abaf022
|
[
"ECL-2.0",
"Apache-2.0"
] | 663
|
2018-06-18T21:08:46.000Z
|
2022-03-31T20:10:11.000Z
|
sdk/python/pulumi_azure/compute/snapshot.py
|
henriktao/pulumi-azure
|
f1cbcf100b42b916da36d8fe28be3a159abaf022
|
[
"ECL-2.0",
"Apache-2.0"
] | 41
|
2018-07-19T22:37:38.000Z
|
2022-03-14T10:56:26.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['SnapshotArgs', 'Snapshot']
@pulumi.input_type
class SnapshotArgs:
def __init__(__self__, *,
create_option: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
disk_size_gb: Optional[pulumi.Input[int]] = None,
encryption_settings: Optional[pulumi.Input['SnapshotEncryptionSettingsArgs']] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
source_resource_id: Optional[pulumi.Input[str]] = None,
source_uri: Optional[pulumi.Input[str]] = None,
storage_account_id: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a Snapshot resource.
:param pulumi.Input[str] create_option: Indicates how the snapshot is to be created. Possible values are `Copy` or `Import`. Changing this forces a new resource to be created.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which to create the Snapshot. Changing this forces a new resource to be created.
:param pulumi.Input[int] disk_size_gb: The size of the Snapshotted Disk in GB.
:param pulumi.Input[str] location: Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
:param pulumi.Input[str] name: Specifies the name of the Snapshot resource. Changing this forces a new resource to be created.
:param pulumi.Input[str] source_resource_id: Specifies a reference to an existing snapshot, when `create_option` is `Copy`. Changing this forces a new resource to be created.
:param pulumi.Input[str] source_uri: Specifies the URI to a Managed or Unmanaged Disk. Changing this forces a new resource to be created.
:param pulumi.Input[str] storage_account_id: Specifies the ID of an storage account. Used with `source_uri` to allow authorization during import of unmanaged blobs from a different subscription. Changing this forces a new resource to be created.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
"""
pulumi.set(__self__, "create_option", create_option)
pulumi.set(__self__, "resource_group_name", resource_group_name)
if disk_size_gb is not None:
pulumi.set(__self__, "disk_size_gb", disk_size_gb)
if encryption_settings is not None:
pulumi.set(__self__, "encryption_settings", encryption_settings)
if location is not None:
pulumi.set(__self__, "location", location)
if name is not None:
pulumi.set(__self__, "name", name)
if source_resource_id is not None:
pulumi.set(__self__, "source_resource_id", source_resource_id)
if source_uri is not None:
pulumi.set(__self__, "source_uri", source_uri)
if storage_account_id is not None:
pulumi.set(__self__, "storage_account_id", storage_account_id)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="createOption")
def create_option(self) -> pulumi.Input[str]:
"""
Indicates how the snapshot is to be created. Possible values are `Copy` or `Import`. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "create_option")
@create_option.setter
def create_option(self, value: pulumi.Input[str]):
pulumi.set(self, "create_option", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group in which to create the Snapshot. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="diskSizeGb")
def disk_size_gb(self) -> Optional[pulumi.Input[int]]:
"""
The size of the Snapshotted Disk in GB.
"""
return pulumi.get(self, "disk_size_gb")
@disk_size_gb.setter
def disk_size_gb(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "disk_size_gb", value)
@property
@pulumi.getter(name="encryptionSettings")
def encryption_settings(self) -> Optional[pulumi.Input['SnapshotEncryptionSettingsArgs']]:
return pulumi.get(self, "encryption_settings")
@encryption_settings.setter
def encryption_settings(self, value: Optional[pulumi.Input['SnapshotEncryptionSettingsArgs']]):
pulumi.set(self, "encryption_settings", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the name of the Snapshot resource. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="sourceResourceId")
def source_resource_id(self) -> Optional[pulumi.Input[str]]:
"""
Specifies a reference to an existing snapshot, when `create_option` is `Copy`. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "source_resource_id")
@source_resource_id.setter
def source_resource_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "source_resource_id", value)
@property
@pulumi.getter(name="sourceUri")
def source_uri(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the URI to a Managed or Unmanaged Disk. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "source_uri")
@source_uri.setter
def source_uri(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "source_uri", value)
@property
@pulumi.getter(name="storageAccountId")
def storage_account_id(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the ID of an storage account. Used with `source_uri` to allow authorization during import of unmanaged blobs from a different subscription. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "storage_account_id")
@storage_account_id.setter
def storage_account_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "storage_account_id", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A mapping of tags to assign to the resource.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@pulumi.input_type
class _SnapshotState:
def __init__(__self__, *,
create_option: Optional[pulumi.Input[str]] = None,
disk_size_gb: Optional[pulumi.Input[int]] = None,
encryption_settings: Optional[pulumi.Input['SnapshotEncryptionSettingsArgs']] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
source_resource_id: Optional[pulumi.Input[str]] = None,
source_uri: Optional[pulumi.Input[str]] = None,
storage_account_id: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
Input properties used for looking up and filtering Snapshot resources.
:param pulumi.Input[str] create_option: Indicates how the snapshot is to be created. Possible values are `Copy` or `Import`. Changing this forces a new resource to be created.
:param pulumi.Input[int] disk_size_gb: The size of the Snapshotted Disk in GB.
:param pulumi.Input[str] location: Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
:param pulumi.Input[str] name: Specifies the name of the Snapshot resource. Changing this forces a new resource to be created.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which to create the Snapshot. Changing this forces a new resource to be created.
:param pulumi.Input[str] source_resource_id: Specifies a reference to an existing snapshot, when `create_option` is `Copy`. Changing this forces a new resource to be created.
:param pulumi.Input[str] source_uri: Specifies the URI to a Managed or Unmanaged Disk. Changing this forces a new resource to be created.
:param pulumi.Input[str] storage_account_id: Specifies the ID of an storage account. Used with `source_uri` to allow authorization during import of unmanaged blobs from a different subscription. Changing this forces a new resource to be created.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
"""
if create_option is not None:
pulumi.set(__self__, "create_option", create_option)
if disk_size_gb is not None:
pulumi.set(__self__, "disk_size_gb", disk_size_gb)
if encryption_settings is not None:
pulumi.set(__self__, "encryption_settings", encryption_settings)
if location is not None:
pulumi.set(__self__, "location", location)
if name is not None:
pulumi.set(__self__, "name", name)
if resource_group_name is not None:
pulumi.set(__self__, "resource_group_name", resource_group_name)
if source_resource_id is not None:
pulumi.set(__self__, "source_resource_id", source_resource_id)
if source_uri is not None:
pulumi.set(__self__, "source_uri", source_uri)
if storage_account_id is not None:
pulumi.set(__self__, "storage_account_id", storage_account_id)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="createOption")
def create_option(self) -> Optional[pulumi.Input[str]]:
"""
Indicates how the snapshot is to be created. Possible values are `Copy` or `Import`. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "create_option")
@create_option.setter
def create_option(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "create_option", value)
@property
@pulumi.getter(name="diskSizeGb")
def disk_size_gb(self) -> Optional[pulumi.Input[int]]:
"""
The size of the Snapshotted Disk in GB.
"""
return pulumi.get(self, "disk_size_gb")
@disk_size_gb.setter
def disk_size_gb(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "disk_size_gb", value)
@property
@pulumi.getter(name="encryptionSettings")
def encryption_settings(self) -> Optional[pulumi.Input['SnapshotEncryptionSettingsArgs']]:
return pulumi.get(self, "encryption_settings")
@encryption_settings.setter
def encryption_settings(self, value: Optional[pulumi.Input['SnapshotEncryptionSettingsArgs']]):
pulumi.set(self, "encryption_settings", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the name of the Snapshot resource. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the resource group in which to create the Snapshot. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="sourceResourceId")
def source_resource_id(self) -> Optional[pulumi.Input[str]]:
"""
Specifies a reference to an existing snapshot, when `create_option` is `Copy`. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "source_resource_id")
@source_resource_id.setter
def source_resource_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "source_resource_id", value)
@property
@pulumi.getter(name="sourceUri")
def source_uri(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the URI to a Managed or Unmanaged Disk. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "source_uri")
@source_uri.setter
def source_uri(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "source_uri", value)
@property
@pulumi.getter(name="storageAccountId")
def storage_account_id(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the ID of an storage account. Used with `source_uri` to allow authorization during import of unmanaged blobs from a different subscription. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "storage_account_id")
@storage_account_id.setter
def storage_account_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "storage_account_id", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A mapping of tags to assign to the resource.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
class Snapshot(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
create_option: Optional[pulumi.Input[str]] = None,
disk_size_gb: Optional[pulumi.Input[int]] = None,
encryption_settings: Optional[pulumi.Input[pulumi.InputType['SnapshotEncryptionSettingsArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
source_resource_id: Optional[pulumi.Input[str]] = None,
source_uri: Optional[pulumi.Input[str]] = None,
storage_account_id: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
"""
Manages a Disk Snapshot.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_managed_disk = azure.compute.ManagedDisk("exampleManagedDisk",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
storage_account_type="Standard_LRS",
create_option="Empty",
disk_size_gb=10)
example_snapshot = azure.compute.Snapshot("exampleSnapshot",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
create_option="Copy",
source_uri=example_managed_disk.id)
```
## Import
Snapshots can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:compute/snapshot:Snapshot example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.Compute/snapshots/snapshot1
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] create_option: Indicates how the snapshot is to be created. Possible values are `Copy` or `Import`. Changing this forces a new resource to be created.
:param pulumi.Input[int] disk_size_gb: The size of the Snapshotted Disk in GB.
:param pulumi.Input[str] location: Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
:param pulumi.Input[str] name: Specifies the name of the Snapshot resource. Changing this forces a new resource to be created.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which to create the Snapshot. Changing this forces a new resource to be created.
:param pulumi.Input[str] source_resource_id: Specifies a reference to an existing snapshot, when `create_option` is `Copy`. Changing this forces a new resource to be created.
:param pulumi.Input[str] source_uri: Specifies the URI to a Managed or Unmanaged Disk. Changing this forces a new resource to be created.
:param pulumi.Input[str] storage_account_id: Specifies the ID of an storage account. Used with `source_uri` to allow authorization during import of unmanaged blobs from a different subscription. Changing this forces a new resource to be created.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: SnapshotArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Manages a Disk Snapshot.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_managed_disk = azure.compute.ManagedDisk("exampleManagedDisk",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
storage_account_type="Standard_LRS",
create_option="Empty",
disk_size_gb=10)
example_snapshot = azure.compute.Snapshot("exampleSnapshot",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
create_option="Copy",
source_uri=example_managed_disk.id)
```
## Import
Snapshots can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:compute/snapshot:Snapshot example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.Compute/snapshots/snapshot1
```
:param str resource_name: The name of the resource.
:param SnapshotArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(SnapshotArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
create_option: Optional[pulumi.Input[str]] = None,
disk_size_gb: Optional[pulumi.Input[int]] = None,
encryption_settings: Optional[pulumi.Input[pulumi.InputType['SnapshotEncryptionSettingsArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
source_resource_id: Optional[pulumi.Input[str]] = None,
source_uri: Optional[pulumi.Input[str]] = None,
storage_account_id: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = SnapshotArgs.__new__(SnapshotArgs)
if create_option is None and not opts.urn:
raise TypeError("Missing required property 'create_option'")
__props__.__dict__["create_option"] = create_option
__props__.__dict__["disk_size_gb"] = disk_size_gb
__props__.__dict__["encryption_settings"] = encryption_settings
__props__.__dict__["location"] = location
__props__.__dict__["name"] = name
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["source_resource_id"] = source_resource_id
__props__.__dict__["source_uri"] = source_uri
__props__.__dict__["storage_account_id"] = storage_account_id
__props__.__dict__["tags"] = tags
super(Snapshot, __self__).__init__(
'azure:compute/snapshot:Snapshot',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
create_option: Optional[pulumi.Input[str]] = None,
disk_size_gb: Optional[pulumi.Input[int]] = None,
encryption_settings: Optional[pulumi.Input[pulumi.InputType['SnapshotEncryptionSettingsArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
source_resource_id: Optional[pulumi.Input[str]] = None,
source_uri: Optional[pulumi.Input[str]] = None,
storage_account_id: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None) -> 'Snapshot':
"""
Get an existing Snapshot resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] create_option: Indicates how the snapshot is to be created. Possible values are `Copy` or `Import`. Changing this forces a new resource to be created.
:param pulumi.Input[int] disk_size_gb: The size of the Snapshotted Disk in GB.
:param pulumi.Input[str] location: Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
:param pulumi.Input[str] name: Specifies the name of the Snapshot resource. Changing this forces a new resource to be created.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which to create the Snapshot. Changing this forces a new resource to be created.
:param pulumi.Input[str] source_resource_id: Specifies a reference to an existing snapshot, when `create_option` is `Copy`. Changing this forces a new resource to be created.
:param pulumi.Input[str] source_uri: Specifies the URI to a Managed or Unmanaged Disk. Changing this forces a new resource to be created.
:param pulumi.Input[str] storage_account_id: Specifies the ID of an storage account. Used with `source_uri` to allow authorization during import of unmanaged blobs from a different subscription. Changing this forces a new resource to be created.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _SnapshotState.__new__(_SnapshotState)
__props__.__dict__["create_option"] = create_option
__props__.__dict__["disk_size_gb"] = disk_size_gb
__props__.__dict__["encryption_settings"] = encryption_settings
__props__.__dict__["location"] = location
__props__.__dict__["name"] = name
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["source_resource_id"] = source_resource_id
__props__.__dict__["source_uri"] = source_uri
__props__.__dict__["storage_account_id"] = storage_account_id
__props__.__dict__["tags"] = tags
return Snapshot(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="createOption")
def create_option(self) -> pulumi.Output[str]:
"""
Indicates how the snapshot is to be created. Possible values are `Copy` or `Import`. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "create_option")
@property
@pulumi.getter(name="diskSizeGb")
def disk_size_gb(self) -> pulumi.Output[int]:
"""
The size of the Snapshotted Disk in GB.
"""
return pulumi.get(self, "disk_size_gb")
@property
@pulumi.getter(name="encryptionSettings")
def encryption_settings(self) -> pulumi.Output[Optional['outputs.SnapshotEncryptionSettings']]:
return pulumi.get(self, "encryption_settings")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Specifies the name of the Snapshot resource. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Output[str]:
"""
The name of the resource group in which to create the Snapshot. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "resource_group_name")
@property
@pulumi.getter(name="sourceResourceId")
def source_resource_id(self) -> pulumi.Output[Optional[str]]:
"""
Specifies a reference to an existing snapshot, when `create_option` is `Copy`. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "source_resource_id")
@property
@pulumi.getter(name="sourceUri")
def source_uri(self) -> pulumi.Output[Optional[str]]:
"""
Specifies the URI to a Managed or Unmanaged Disk. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "source_uri")
@property
@pulumi.getter(name="storageAccountId")
def storage_account_id(self) -> pulumi.Output[Optional[str]]:
"""
Specifies the ID of an storage account. Used with `source_uri` to allow authorization during import of unmanaged blobs from a different subscription. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "storage_account_id")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
A mapping of tags to assign to the resource.
"""
return pulumi.get(self, "tags")
| 48.886731
| 253
| 0.670429
| 3,748
| 30,212
| 5.191836
| 0.055763
| 0.080837
| 0.076263
| 0.064443
| 0.914281
| 0.904003
| 0.893417
| 0.880158
| 0.877537
| 0.871165
| 0
| 0.003157
| 0.234576
| 30,212
| 617
| 254
| 48.965964
| 0.838314
| 0.371574
| 0
| 0.806268
| 1
| 0
| 0.115604
| 0.020303
| 0
| 0
| 0
| 0
| 0
| 1
| 0.162393
| false
| 0.002849
| 0.019943
| 0.008547
| 0.279202
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
b655edf9dd7b607a02515c766a6d86551984ed21
| 4,523
|
py
|
Python
|
app/middleware/tracers/utils/trace_wrappers.py
|
publichealthengland/coronavirus-dashboard-easy-read
|
786409d79341b4ded3c0204e7b487423681e9c28
|
[
"MIT"
] | 1
|
2022-02-21T14:23:08.000Z
|
2022-02-21T14:23:08.000Z
|
app/middleware/tracers/utils/trace_wrappers.py
|
publichealthengland/coronavirus-dashboard-easy-read
|
786409d79341b4ded3c0204e7b487423681e9c28
|
[
"MIT"
] | 25
|
2021-01-19T13:41:36.000Z
|
2022-03-04T09:07:50.000Z
|
app/middleware/tracers/utils/trace_wrappers.py
|
publichealthengland/coronavirus-dashboard-easy-read
|
786409d79341b4ded3c0204e7b487423681e9c28
|
[
"MIT"
] | 3
|
2021-04-14T11:30:22.000Z
|
2022-01-17T20:23:50.000Z
|
#!/usr/bin python3
# Imports
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Python:
from logging import getLogger
from functools import wraps
from inspect import signature
# 3rd party:
from opencensus.trace.execution_context import get_opencensus_tracer
from opencensus.trace.span import SpanKind
# Internal:
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
__all__ = [
'trace_async_method_operation',
'trace_method_operation'
]
logger = getLogger("app")
def trace_async_method_operation(*cls_attrs, dep_type="name", name="name", **attrs):
def wrapper(func):
sig = signature(func)
@wraps(func)
async def process(klass, *args, **kwargs):
nonlocal sig, name, cls_attrs, attrs
cls_attrs = list(cls_attrs)
bound_inputs = sig.bind(klass, *args, **kwargs)
tracer = get_opencensus_tracer()
if tracer is None:
return await func(*bound_inputs.args, **bound_inputs.kwargs)
span = tracer.start_span()
span.span_kind = SpanKind.UNSPECIFIED
span.name = getattr(klass, name, None)
if "operation" in attrs:
span.name = f'{attrs.pop("operation")} {span.name}'
dependency_type = getattr(klass, dep_type)
span.add_attribute('dependency.type', dependency_type)
if "url" in cls_attrs and dependency_type.lower() == "azure blob":
cls_attrs.remove("url")
span.add_attribute(f"{dependency_type}.data", getattr(klass, "url", None))
if "query" in bound_inputs.arguments:
span.add_attribute(f"{dependency_type}.query", bound_inputs.arguments['query'])
span.add_attribute(f"{dependency_type}.method.name", func.__name__)
for key in cls_attrs:
span.add_attribute(f"{dependency_type}.{key}", getattr(klass, key, None))
for key, value in attrs.items():
span.add_attribute(f"{dependency_type}.{key}", value)
success = True
try:
return await func(klass, *args, **kwargs)
except Exception as err:
success = False
logger.exception(err, exc_info=True)
raise err
finally:
span.add_attribute(f'{dependency_type}.success', success)
tracer.end_span()
return process
return wrapper
def trace_method_operation(*cls_attrs, dep_type="name", name="name", **attrs):
def wrapper(func):
sig = signature(func)
@wraps(func)
def process(klass, *args, **kwargs):
nonlocal sig, name, cls_attrs, attrs
cls_attrs = list(cls_attrs)
bound_inputs = sig.bind(klass, *args, **kwargs)
tracer = get_opencensus_tracer()
if tracer is None:
return func(*bound_inputs.args, **bound_inputs.kwargs)
span = tracer.start_span()
span.span_kind = SpanKind.UNSPECIFIED
span.name = getattr(klass, name, None)
if "operation" in attrs:
span.name = f'{attrs.pop("operation")} {span.name}'
dependency_type = getattr(klass, dep_type)
span.add_attribute('dependency.type', dependency_type)
if "url" in cls_attrs and dependency_type.lower() == "azure blob":
cls_attrs.remove("url")
span.add_attribute(f"{dependency_type}.data", getattr(klass, "url", None))
if "query" in bound_inputs.arguments:
span.add_attribute(f"{dependency_type}.query", bound_inputs.arguments['query'])
span.add_attribute(f"{dependency_type}.method.name", func.__name__)
for key in cls_attrs:
span.add_attribute(f"{dependency_type}.{key}", getattr(klass, key, None))
for key, value in attrs.items():
span.add_attribute(f"{dependency_type}.{key}", value)
success = True
try:
return func(klass, *args, **kwargs)
except Exception as err:
success = False
logger.exception(err, exc_info=True)
raise err
finally:
span.add_attribute(f'{dependency_type}.success', success)
tracer.end_span()
return process
return wrapper
| 33.257353
| 95
| 0.562901
| 490
| 4,523
| 4.997959
| 0.181633
| 0.114332
| 0.091466
| 0.083299
| 0.870559
| 0.870559
| 0.870559
| 0.870559
| 0.870559
| 0.870559
| 0
| 0.000627
| 0.294274
| 4,523
| 135
| 96
| 33.503704
| 0.766604
| 0.051293
| 0
| 0.8
| 0
| 0
| 0.12535
| 0.09057
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055556
| false
| 0
| 0.055556
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
fcaf26a4837f7a7073b300fb06847dd865b52c51
| 2,253
|
py
|
Python
|
src/weather_parser/migrations/0010_auto_20160107_0731.py
|
livingbio/weather-parser
|
8f612293a4ab436750df4bd59348faaaf90488cf
|
[
"MIT"
] | null | null | null |
src/weather_parser/migrations/0010_auto_20160107_0731.py
|
livingbio/weather-parser
|
8f612293a4ab436750df4bd59348faaaf90488cf
|
[
"MIT"
] | null | null | null |
src/weather_parser/migrations/0010_auto_20160107_0731.py
|
livingbio/weather-parser
|
8f612293a4ab436750df4bd59348faaaf90488cf
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-01-07 07:31
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('weather_parser', '0009_remove_city_airports'),
]
operations = [
migrations.AlterField(
model_name='airport',
name='airport_id',
field=models.CharField(blank=True, max_length=1024),
),
migrations.AlterField(
model_name='airport',
name='altitude',
field=models.FloatField(blank=True),
),
migrations.AlterField(
model_name='airport',
name='city_name',
field=models.CharField(blank=True, max_length=1024),
),
migrations.AlterField(
model_name='airport',
name='content',
field=models.TextField(blank=True),
),
migrations.AlterField(
model_name='airport',
name='country_name',
field=models.CharField(blank=True, max_length=1024),
),
migrations.AlterField(
model_name='airport',
name='dst',
field=models.CharField(blank=True, max_length=1024),
),
migrations.AlterField(
model_name='airport',
name='iata',
field=models.CharField(blank=True, max_length=1024),
),
migrations.AlterField(
model_name='airport',
name='icao',
field=models.CharField(blank=True, max_length=1024),
),
migrations.AlterField(
model_name='airport',
name='latitude',
field=models.FloatField(blank=True),
),
migrations.AlterField(
model_name='airport',
name='longitude',
field=models.FloatField(blank=True),
),
migrations.AlterField(
model_name='airport',
name='name',
field=models.CharField(blank=True, max_length=1024),
),
migrations.AlterField(
model_name='airport',
name='timezone',
field=models.FloatField(blank=True),
),
]
| 29.644737
| 64
| 0.548158
| 205
| 2,253
| 5.873171
| 0.263415
| 0.118771
| 0.249169
| 0.289037
| 0.759967
| 0.73505
| 0.701827
| 0.701827
| 0.66113
| 0.66113
| 0
| 0.031354
| 0.334665
| 2,253
| 75
| 65
| 30.04
| 0.771848
| 0.02885
| 0
| 0.691176
| 1
| 0
| 0.095652
| 0.011442
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.029412
| 0
| 0.073529
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
fcafb9d3aea822046192e196061935150e9a5ca0
| 71
|
py
|
Python
|
bot/__init__.py
|
shauncameron/ChatBot
|
0b3168204788e3e44369c0a96f8e0321f74790ad
|
[
"Unlicense"
] | null | null | null |
bot/__init__.py
|
shauncameron/ChatBot
|
0b3168204788e3e44369c0a96f8e0321f74790ad
|
[
"Unlicense"
] | null | null | null |
bot/__init__.py
|
shauncameron/ChatBot
|
0b3168204788e3e44369c0a96f8e0321f74790ad
|
[
"Unlicense"
] | null | null | null |
from bot.intents import *
from bot.bot import *
from bot.entry import *
| 23.666667
| 25
| 0.760563
| 12
| 71
| 4.5
| 0.416667
| 0.388889
| 0.481481
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.15493
| 71
| 3
| 26
| 23.666667
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
fcee897f9d7d90aafca388a5892c78c84898829a
| 805
|
py
|
Python
|
src/sailing_robot/tests/test_gps_utils.py
|
sjdsm/sailing-robot
|
15789066bb6dead147fc24d763ea384454588cb0
|
[
"MIT"
] | 87
|
2016-02-04T08:44:00.000Z
|
2022-03-19T19:53:48.000Z
|
src/sailing_robot/tests/test_gps_utils.py
|
LaurieChen/sailing-robot
|
840fb10d18026ea0f2ea546691c9bf958b8842d3
|
[
"MIT"
] | 269
|
2016-01-29T08:19:59.000Z
|
2020-02-13T12:33:26.000Z
|
src/sailing_robot/tests/test_gps_utils.py
|
LaurieChen/sailing-robot
|
840fb10d18026ea0f2ea546691c9bf958b8842d3
|
[
"MIT"
] | 45
|
2016-02-11T22:59:53.000Z
|
2020-12-10T02:58:50.000Z
|
from nose.tools import assert_equal
from sailing_robot.gps_utils import ubx_checksum
def test_ubx_checksum():
assert_equal(ubx_checksum(b'\x06\x01\x08\x00\xF0\x02\x00\x00\x00\x00\x00\x01'),
b'\x02\x32')
assert_equal(ubx_checksum(b'\x06\x01\x08\x00\xF0\x03\x00\x00\x00\x00\x00\x01'),
b'\x03\x39')
assert_equal(ubx_checksum(b'\x06\x01\x08\x00\xF0\x04\x00\x00\x00\x00\x00\x01'),
b'\x04\x40')
assert_equal(ubx_checksum(b'\x06\x01\x08\x00\xF0\x05\x00\x00\x00\x00\x00\x01'),
b'\x05\x47')
assert_equal(ubx_checksum(b'\x06\x01\x08\x00\xF0\x01\x00\x00\x00\x00\x00\x01'),
b'\x01\x2B')
assert_equal(ubx_checksum(b'\x06\x08\x06\x00\xC8\x00\x01\x00\x01\x00'),
b'\xDE\x6A')
| 47.352941
| 83
| 0.617391
| 136
| 805
| 3.522059
| 0.235294
| 0.250522
| 0.281837
| 0.250522
| 0.649269
| 0.649269
| 0.59499
| 0.39666
| 0.39666
| 0.39666
| 0
| 0.241379
| 0.207453
| 805
| 16
| 84
| 50.3125
| 0.509404
| 0
| 0
| 0
| 0
| 0.333333
| 0.407453
| 0.347826
| 0
| 0
| 0
| 0
| 0.466667
| 1
| 0.066667
| true
| 0
| 0.133333
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
1e0595c5da4b3f606ac804b0b528eb1dabcc4d6e
| 23,877
|
py
|
Python
|
msgraph/cli/command_modules/usersfunctions/azext_usersfunctions/generated/custom.py
|
microsoftgraph/msgraph-cli-archived
|
489f70bf4ede1ce67b84bfb31e66da3e4db76062
|
[
"MIT"
] | null | null | null |
msgraph/cli/command_modules/usersfunctions/azext_usersfunctions/generated/custom.py
|
microsoftgraph/msgraph-cli-archived
|
489f70bf4ede1ce67b84bfb31e66da3e4db76062
|
[
"MIT"
] | 22
|
2022-03-29T22:54:37.000Z
|
2022-03-29T22:55:27.000Z
|
msgraph/cli/command_modules/usersfunctions/azext_usersfunctions/generated/custom.py
|
microsoftgraph/msgraph-cli-archived
|
489f70bf4ede1ce67b84bfb31e66da3e4db76062
|
[
"MIT"
] | null | null | null |
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
# pylint: disable=line-too-long
# pylint: disable=too-many-lines
def usersfunctions_usersactivity_recent(client,
user_id):
return client.recent(user_id=user_id)
def usersfunctions_userscalendarviewcalendar_allowed_calendar_sharing_role(client,
user_id,
event_id,
user):
return client.allowed_calendar_sharing_roles(user_id=user_id,
event_id=event_id,
user=user)
def usersfunctions_userscalendarviewinstance_delta(client,
user_id,
event_id):
return client.delta(user_id=user_id,
event_id=event_id)
def usersfunctions_userscalendarview_delta(client,
user_id):
return client.delta(user_id=user_id)
def usersfunctions_userscalendareventscalendar_allowed_calendar_sharing_role(client,
user_id,
event_id,
user):
return client.allowed_calendar_sharing_roles(user_id=user_id,
event_id=event_id,
user=user)
def usersfunctions_userscalendareventsinstance_delta(client,
user_id,
event_id):
return client.delta(user_id=user_id,
event_id=event_id)
def usersfunctions_userscalendarevent_delta(client,
user_id):
return client.delta(user_id=user_id)
def usersfunctions_userscalendar_allowed_calendar_sharing_role(client,
user_id,
user):
return client.allowed_calendar_sharing_roles(user_id=user_id,
user=user)
def usersfunctions_userscalendargroupscalendarscalendarviewcalendar_allowed_calendar_sharing_role(client,
user_id,
calendar_group_id,
calendar_id,
event_id,
user):
return client.allowed_calendar_sharing_roles(user_id=user_id,
calendar_group_id=calendar_group_id,
calendar_id=calendar_id,
event_id=event_id,
user=user)
def usersfunctions_userscalendargroupscalendarscalendarviewinstance_delta(client,
user_id,
calendar_group_id,
calendar_id,
event_id):
return client.delta(user_id=user_id,
calendar_group_id=calendar_group_id,
calendar_id=calendar_id,
event_id=event_id)
def usersfunctions_userscalendargroupscalendarscalendarview_delta(client,
user_id,
calendar_group_id,
calendar_id):
return client.delta(user_id=user_id,
calendar_group_id=calendar_group_id,
calendar_id=calendar_id)
def usersfunctions_userscalendargroupscalendarseventscalendar_allowed_calendar_sharing_role(client,
user_id,
calendar_group_id,
calendar_id,
event_id,
user):
return client.allowed_calendar_sharing_roles(user_id=user_id,
calendar_group_id=calendar_group_id,
calendar_id=calendar_id,
event_id=event_id,
user=user)
def usersfunctions_userscalendargroupscalendarseventsinstance_delta(client,
user_id,
calendar_group_id,
calendar_id,
event_id):
return client.delta(user_id=user_id,
calendar_group_id=calendar_group_id,
calendar_id=calendar_id,
event_id=event_id)
def usersfunctions_userscalendargroupscalendarsevent_delta(client,
user_id,
calendar_group_id,
calendar_id):
return client.delta(user_id=user_id,
calendar_group_id=calendar_group_id,
calendar_id=calendar_id)
def usersfunctions_userscalendargroupscalendar_allowed_calendar_sharing_role(client,
user_id,
calendar_group_id,
calendar_id,
user):
return client.allowed_calendar_sharing_roles(user_id=user_id,
calendar_group_id=calendar_group_id,
calendar_id=calendar_id,
user=user)
def usersfunctions_userscalendarscalendarviewcalendar_allowed_calendar_sharing_role(client,
user_id,
calendar_id,
event_id,
user):
return client.allowed_calendar_sharing_roles(user_id=user_id,
calendar_id=calendar_id,
event_id=event_id,
user=user)
def usersfunctions_userscalendarscalendarviewinstance_delta(client,
user_id,
calendar_id,
event_id):
return client.delta(user_id=user_id,
calendar_id=calendar_id,
event_id=event_id)
def usersfunctions_userscalendarscalendarview_delta(client,
user_id,
calendar_id):
return client.delta(user_id=user_id,
calendar_id=calendar_id)
def usersfunctions_userscalendarseventscalendar_allowed_calendar_sharing_role(client,
user_id,
calendar_id,
event_id,
user):
return client.allowed_calendar_sharing_roles(user_id=user_id,
calendar_id=calendar_id,
event_id=event_id,
user=user)
def usersfunctions_userscalendarseventsinstance_delta(client,
user_id,
calendar_id,
event_id):
return client.delta(user_id=user_id,
calendar_id=calendar_id,
event_id=event_id)
def usersfunctions_userscalendarsevent_delta(client,
user_id,
calendar_id):
return client.delta(user_id=user_id,
calendar_id=calendar_id)
def usersfunctions_userscalendar_allowed_calendar_sharing_role(client,
user_id,
calendar_id,
user):
return client.allowed_calendar_sharing_roles(user_id=user_id,
calendar_id=calendar_id,
user=user)
def usersfunctions_userscalendarviewcalendarview_delta(client,
user_id,
event_id):
return client.delta(user_id=user_id,
event_id=event_id)
def usersfunctions_userscalendarviewcalendarevent_delta(client,
user_id,
event_id):
return client.delta(user_id=user_id,
event_id=event_id)
def usersfunctions_userscalendarviewcalendar_allowed_calendar_sharing_role(client,
user_id,
event_id,
user):
return client.allowed_calendar_sharing_roles(user_id=user_id,
event_id=event_id,
user=user)
def usersfunctions_userscalendarviewinstance_delta(client,
user_id,
event_id):
return client.delta(user_id=user_id,
event_id=event_id)
def usersfunctions_userscalendarview_delta(client,
user_id):
return client.delta(user_id=user_id)
def usersfunctions_userscontactfolderschildfolder_delta(client,
user_id,
contact_folder_id):
return client.delta(user_id=user_id,
contact_folder_id=contact_folder_id)
def usersfunctions_userscontactfolderscontact_delta(client,
user_id,
contact_folder_id):
return client.delta(user_id=user_id,
contact_folder_id=contact_folder_id)
def usersfunctions_userscontactfolder_delta(client,
user_id):
return client.delta(user_id=user_id)
def usersfunctions_userscontact_delta(client,
user_id):
return client.delta(user_id=user_id)
def usersfunctions_userseventscalendarview_delta(client,
user_id,
event_id):
return client.delta(user_id=user_id,
event_id=event_id)
def usersfunctions_userseventscalendarevent_delta(client,
user_id,
event_id):
return client.delta(user_id=user_id,
event_id=event_id)
def usersfunctions_userseventscalendar_allowed_calendar_sharing_role(client,
user_id,
event_id,
user):
return client.allowed_calendar_sharing_roles(user_id=user_id,
event_id=event_id,
user=user)
def usersfunctions_userseventsinstance_delta(client,
user_id,
event_id):
return client.delta(user_id=user_id,
event_id=event_id)
def usersfunctions_usersevent_delta(client,
user_id):
return client.delta(user_id=user_id)
def usersfunctions_usersmailfolderschildfolder_delta(client,
user_id,
mail_folder_id):
return client.delta(user_id=user_id,
mail_folder_id=mail_folder_id)
def usersfunctions_usersmailfoldersmessage_delta(client,
user_id,
mail_folder_id):
return client.delta(user_id=user_id,
mail_folder_id=mail_folder_id)
def usersfunctions_usersmailfolder_delta(client,
user_id):
return client.delta(user_id=user_id)
def usersfunctions_usersmanagedappregistration_show_user_id_with_flagged_app_registration(client,
user_id):
return client.get_user_ids_with_flagged_app_registration(user_id=user_id)
def usersfunctions_usersmessage_delta(client,
user_id):
return client.delta(user_id=user_id)
def usersfunctions_user_delta(client):
return client.delta()
def usersfunctions_user_reminder_view(client,
user_id,
start_date_time,
end_date_time):
return client.reminder_view(user_id=user_id,
start_date_time=start_date_time,
end_date_time=end_date_time)
def usersfunctions_user_show_managed_app_diagnostic_statuses(client,
user_id):
return client.get_managed_app_diagnostic_statuses(user_id=user_id)
def usersfunctions_user_show_managed_app_policy(client,
user_id):
return client.get_managed_app_policies(user_id=user_id)
def usersfunctions_usersonenotenotebookssectiongroupssectionspage_preview(client,
user_id,
notebook_id,
section_group_id,
onenote_section_id,
onenote_page_id):
return client.preview(user_id=user_id,
notebook_id=notebook_id,
section_group_id=section_group_id,
onenote_section_id=onenote_section_id,
onenote_page_id=onenote_page_id)
def usersfunctions_usersonenotenotebookssectionspage_preview(client,
user_id,
notebook_id,
onenote_section_id,
onenote_page_id):
return client.preview(user_id=user_id,
notebook_id=notebook_id,
onenote_section_id=onenote_section_id,
onenote_page_id=onenote_page_id)
def usersfunctions_usersonenotenotebook_show_recent_notebook(client,
user_id,
include_personal_notebooks):
return client.get_recent_notebooks(user_id=user_id,
include_personal_notebooks=False if include_personal_notebooks is None else include_personal_notebooks)
def usersfunctions_usersonenotepage_preview(client,
user_id,
onenote_page_id):
return client.preview(user_id=user_id,
onenote_page_id=onenote_page_id)
def usersfunctions_usersonenotepagesparentnotebooksectiongroupssectionspage_preview(client,
user_id,
onenote_page_id,
section_group_id,
onenote_section_id,
onenote_page_id1):
return client.preview(user_id=user_id,
onenote_page_id=onenote_page_id,
section_group_id=section_group_id,
onenote_section_id=onenote_section_id,
onenote_page_id1=onenote_page_id1)
def usersfunctions_usersonenotepagesparentnotebooksectionspage_preview(client,
user_id,
onenote_page_id,
onenote_section_id,
onenote_page_id1):
return client.preview(user_id=user_id,
onenote_page_id=onenote_page_id,
onenote_section_id=onenote_section_id,
onenote_page_id1=onenote_page_id1)
def usersfunctions_usersonenotepagesparentsectionpage_preview(client,
user_id,
onenote_page_id,
onenote_page_id1):
return client.preview(user_id=user_id,
onenote_page_id=onenote_page_id,
onenote_page_id1=onenote_page_id1)
def usersfunctions_usersonenotesectiongroupsparentnotebooksectionspage_preview(client,
user_id,
section_group_id,
onenote_section_id,
onenote_page_id):
return client.preview(user_id=user_id,
section_group_id=section_group_id,
onenote_section_id=onenote_section_id,
onenote_page_id=onenote_page_id)
def usersfunctions_usersonenotesectiongroupssectionspage_preview(client,
user_id,
section_group_id,
onenote_section_id,
onenote_page_id):
return client.preview(user_id=user_id,
section_group_id=section_group_id,
onenote_section_id=onenote_section_id,
onenote_page_id=onenote_page_id)
def usersfunctions_usersonenotesectionspage_preview(client,
user_id,
onenote_section_id,
onenote_page_id):
return client.preview(user_id=user_id,
onenote_section_id=onenote_section_id,
onenote_page_id=onenote_page_id)
def usersfunctions_usersoutlook_supported_language(client,
user_id):
return client.supported_languages(user_id=user_id)
def usersfunctions_usersoutlook_supported_time_zone_ee48(client,
user_id):
return client.supported_time_zones_ee48(user_id=user_id)
def usersfunctions_usersoutlook_supported_time_zones51_c6(client,
user_id,
time_zone_standard):
return client.supported_time_zones51_c6(user_id=user_id,
time_zone_standard=time_zone_standard)
| 50.694268
| 143
| 0.402898
| 1,571
| 23,877
| 5.65436
| 0.094844
| 0.116177
| 0.06079
| 0.077001
| 0.772712
| 0.747608
| 0.71316
| 0.702578
| 0.687493
| 0.661939
| 0
| 0.001807
| 0.559618
| 23,877
| 470
| 144
| 50.802128
| 0.842986
| 0.020941
| 0
| 0.801749
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.169096
| false
| 0
| 0
| 0.169096
| 0.338192
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 7
|
1e470937a2165ae6c56c74eb71ccb89fc2b0db23
| 40
|
py
|
Python
|
molecule/shared/tests/rustup_default.py
|
chasinglogic/ansible-rustup
|
ae947dd1138b27eeb884ce64a0d8e262e7c9d4fc
|
[
"MIT"
] | 10
|
2020-06-12T09:21:42.000Z
|
2022-03-30T06:17:00.000Z
|
molecule/shared/tests/rustup_default.py
|
chasinglogic/ansible-rustup
|
ae947dd1138b27eeb884ce64a0d8e262e7c9d4fc
|
[
"MIT"
] | 7
|
2020-02-26T22:01:21.000Z
|
2022-01-26T20:52:46.000Z
|
molecule/shared/tests/rustup_default.py
|
chasinglogic/ansible-rustup
|
ae947dd1138b27eeb884ce64a0d8e262e7c9d4fc
|
[
"MIT"
] | 7
|
2020-01-24T04:37:15.000Z
|
2022-03-28T13:30:56.000Z
|
def get_user_home():
return "/root"
| 13.333333
| 20
| 0.65
| 6
| 40
| 4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2
| 40
| 2
| 21
| 20
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0.125
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0.5
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
94ef2201dabd59c892b51bdcc370c043e5f1e349
| 4,147
|
py
|
Python
|
src/blockchain_users/camille.py
|
MikitaSaladukha/my-blockchain
|
c09091762dc559d41b8aa29fbe8267aff834a57c
|
[
"Apache-2.0"
] | 4
|
2021-11-14T17:16:03.000Z
|
2022-03-17T21:01:42.000Z
|
src/blockchain_users/camille.py
|
MikitaSaladukha/my-blockchain
|
c09091762dc559d41b8aa29fbe8267aff834a57c
|
[
"Apache-2.0"
] | null | null | null |
src/blockchain_users/camille.py
|
MikitaSaladukha/my-blockchain
|
c09091762dc559d41b8aa29fbe8267aff834a57c
|
[
"Apache-2.0"
] | 5
|
2021-07-30T14:27:37.000Z
|
2021-12-15T12:08:46.000Z
|
private_key = b'0\x82\x04\xa4\x02\x01\x00\x02\x82\x01\x01\x00\xa1\x02M\xa7\x01A\x1e\xa0e\x11\x9f@\x07\x9b\x8e\x18\xb3\xf3Tj\x9a\xac\xb4\xa5\xfdy\xa1\x90\xcb\x05,]\xf8\xf4i\xd0\re\x9c\x98\x17\xa5\x9a$;\xc7\x81\xda\x15\xdf^\xd5\xcd\xe5(\x04Un(?\xa6 ]\x1cv\xc2\x90\xb32\xedAR6Dj\xd1/V\xa7\xf2\xb0nd\xfdU#r\xbcw_z\xc2\xd16|\xa7\x98\x16\xce\x01\t\x80\xa3:\x14\xb3\x95"Qn\x02>\x8aD\xd9\r[\xa9\xcf\xd3#\x1b\ni\xef\xe9\xe8-t\x89=\x04 \xfb\x8b\xb7\x96\x1b\xa0\xe5\xd0Mi}\x98\xb9f\x9e\\\r\xbc\xd8\x0b\x99B\xd6\xe7v\xe0\x89\xfb\xb5\xcd\xe7\xc1v\x84X\xfcw\x8bt!\xfd\xc0\xeb\xebn\x01\xc4\xb2_t\xde\xf4\xc4\x9b\x7f\xa7\xea\xce\xc5\x82\xfd\x03\x05Ap\xd5\xedl#\xbf\x9d9\xb8\xaf\xfa\x10N$\xa5"\xc1b\xa7\xe2\x01\x83Db\xb4\xe5\xbdU02{\xb6\x14\x90sX\xe7[\xb5#J\x12+F\x05Pq\x0e\x17\x8d\x03\x05X\xa0\xd8\xe2\xd5R\x83\x02\x03\x01\x00\x01\x02\x82\x01\x00\x05\xca\xb4\xebd\x86\xa8\xa8\xe1i\xdc^+\xc7\xe39-C\xfc\r\xe7\xa9}\xc8\x18\xd1\xa6\xc7\xb4\x1d\xe8\x06+]\xf3nz\x04\xe58\xade\x16!\xe6\x8a\x9b7\xb2\xa0\xb9\xe3\x1c\x08y\xc3\x8a%/\xcavY\xe4\xaf\x90\xf0\x17\xcfrn\ts\x01#\xbd\xe9\xefw\x81\x0c _\xa0?b\xb0\'@\xf3TZp\xa6\xa0\xfc\xa3\xf4\xbb!F\xf5F\x0c\xd9\x1a\x84\x89AVG\x8cJ\x1e\xa42`\xa2"\xfe(\\\xb3l\xd0\xf9\xc1\xe2\xc9\xad2\xe9\xbe\x91\x829\xdc\x8f\xe7n\xdfv\xbc\x81\nW\xd0\xf1"<9\xa22\xfc@?\xe5\xad:\xe9\xb1\xfdzOjx\r\xcbv\xc3C\n,\xf3\xb0u\x8e\x8c\x90\x90\xa4\xdbr\xe4\xc9f\xae\x1b\x99A]b\x98\x11\xc9Z\x9f\xb0^\x19 O\xd3\xda\x11NJ!\xd05\xa1\x92\x1c\xb8iFOn\xbd\xf4j\xbf\xc6\x81\xebV\x89Bu\xd4%z\xff\xf8$\xa3\xcdZ`n\x9e\xca\xddpj\xde\xba\x98\xfd\xa8\xc9\xe4;<?\xe6\xa5\x02\x81\x81\x00\xc6\xd0\xed\xa7 \x1eL\xb6\xfb\x05\xdb\xc3K\x96J8\xe3\xafKi\xbeB\x13! \x9b\x00\xa8\x94{s\xe4\x8f\xc1\xd1j\xb3TL\xeb\x9d\xf7\x8b\x8e\xf7\xe0\x8c\xff!\xd6:*\xbe\x8b\xe5\xeb\xf5\xac\x87\x95\xd3\xc1\xdb\xae\x14\x91\x00\xa1\xa7\x1ch\xb5\xf1\x80\xd6E\r\xa6\x1e\xd8H]\x8a\xb9Q\xe5 z\x80\x06\xd6\xa3Z\x18\xe3\xa6\xe1z\xa0\xff\x87TK\x19\xf2g \x9bQ@!>U\x0e\xdc\xad\xe82\x13\xd0\x97\x12-\xb5\x08\xa3\xa5\x07\x02\x81\x81\x00\xcfQ\x97$\x9e-\xb9\x9f\x7fs\x12\x9f\xedt\x17\xa1!\xec\x85c\r\xc4\x03\xfem\xfb%\xca\xf3i\xde=\xce\xf4D\xf1\x16$\x01\x9deA\x06?\xa2b\xff\xcd\x10\xec\x82\xdfb\xabL\xe5\x0b\xda4\xab\x03}a\xe9\x82\xcap\x0fV\x97\xce\xa1\xa1b\x95c5\xa2\x99\x90\x9aMA\x8dI;\x85\x96\xc3etwPw\xc5\xba\x89%$\xfe\x96O\xddaO\xd9\x1e\r\xcdH\xc2\xb2@\x0b\x8a\xcd\xd4\x05\xf8\xde\xd78\xb6\x04x\xd4#\xa5\x02\x81\x80CX\xd2\xa8"A!Kz\x8c\xe9|\xa6F*\xaeJ\xb2>\xa1{Iv\xa1j"\x17\x7f\x03\x8d.\x1c\xe6u\x892\xd3\xbcb\xb2\r\xb8\xa5\x15\xb0\xf1\xe7\xd1$\xed$\x97\x06$\xed\xa5\x98z\xf1\x12\xd7\xc0{a\xe4\xa5\x99\xc9(\x8a\x7f\r\xe2\xd8\xf9\xbc:{cGp{\xffY\xf7[\xde<\xa0\xd1\xb03uy\xa8\xe4\x06\xcd;lS\xb3B\x1do\xf7o\x1c-\xd1\xc3q\x11\xef\x0e\xe1\xfa\x1d\xbc\x88\x94$\x1cG\x8e\xbd\xa0Q\x02\x81\x81\x00\x86t\x0c\xc8\xd3\xc9%\xd4Z,\xc0\x0cvLO2\xd24y\xc1f\xe1\x14\x12\x033\xd9+\xc97\x84\xc9\xa3\x19jH\xcc\xaa\']\xf3\x97\xfb<s\xcd.\xc6\xc8\xce\n\x86c\x90b\xfb<\xf7\x94&\xc0\xc9\xa5!s\x10e"\x9do0\xb4D]\x123XJ\x8e\xbbhF\xe8W\x80\x02\x19>P\x94\xd0\xb6\xbc\xba \xc3<D\x99\xbc~\xb1g\n\xc0e8\x07\x8dv=\xc6\xaa\xa0\x91\xb1\xb1j\xfa\x1fS\x87U\x0c\x8blQ}\x02\x81\x81\x00\xa9\xdeM\xb8\xec\xba\x0c4,\xa8\xb0\xb3J\xc9\x19\xe9\x1f\x13z1\xb1\xdbI<\xa8%\x05\xf1\x80\xeb\x90\x8e\x8d\xc9\xb7\xbb\x19\xa64\xb5\xa1\xfb\xa4\xc9\xea\x1e6\xac\x82-7\xee0\xb6Mt\xf4\xd9\xcf\x7f\x91\xeb\xa4\xa3o{\x8fB\xc8\x05\xbe:qq\xcb\xd5cg\xa8<\x8e\xfc\x02\x7fFuE\xc4\xe6\xb9\x8fID\xb3\xadeG\x17\x01V\x00\x7fy\x8d\xee\x85\x8a\x87K~\x13\xb7\xbe\xd41\xcf\x8a\x18\x9eF@\x81\x08\x97Y\x9c\xf3\xb6'
public_key_hex = "30820122300d06092a864886f70d01010105000382010f003082010a0282010100a1024da701411ea065119f40079b8e18b3f3546a9aacb4a5fd79a190cb052c5df8f469d00d659c9817a59a243bc781da15df5ed5cde52804556e283fa6205d1c76c290b332ed415236446ad12f56a7f2b06e64fd552372bc775f7ac2d1367ca79816ce010980a33a14b39522516e023e8a44d90d5ba9cfd3231b0a69efe9e82d74893d0420fb8bb7961ba0e5d04d697d98b9669e5c0dbcd80b9942d6e776e089fbb5cde7c1768458fc778b7421fdc0ebeb6e01c4b25f74def4c49b7fa7eacec582fd03054170d5ed6c23bf9d39b8affa104e24a522c162a7e201834462b4e5bd5530327bb614907358e75bb5234a122b460550710e178d030558a0d8e2d552830203010001"
public_key_hash = "7681c82af05a85f68a5810d967ee3a4087711867"
| 1,036.75
| 3,477
| 0.775741
| 810
| 4,147
| 3.961728
| 0.448148
| 0.009349
| 0.011218
| 0.014958
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.309034
| 0.00434
| 4,147
| 3
| 3,478
| 1,382.333333
| 0.468152
| 0
| 0
| 0
| 0
| 1
| 0.595129
| 0.591271
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
94f7e3b28e28b7ad147d4f222f143231449e7e24
| 42
|
py
|
Python
|
part23/mylib/__init__.py
|
ThomasAlbin/SpaceScienceTutorial
|
9ca5c1340480a29a112dec91e075b7ee2eff38ed
|
[
"MIT"
] | 167
|
2020-04-21T21:04:14.000Z
|
2022-03-29T15:07:52.000Z
|
part23/mylib/__init__.py
|
wellyington/SpaceScienceTutorial
|
9ca5c1340480a29a112dec91e075b7ee2eff38ed
|
[
"MIT"
] | 11
|
2020-05-19T18:49:24.000Z
|
2021-06-08T01:51:29.000Z
|
part23/mylib/__init__.py
|
wellyington/SpaceScienceTutorial
|
9ca5c1340480a29a112dec91e075b7ee2eff38ed
|
[
"MIT"
] | 41
|
2020-05-03T06:13:17.000Z
|
2022-02-12T17:32:51.000Z
|
from . import tests
from . import general
| 14
| 21
| 0.761905
| 6
| 42
| 5.333333
| 0.666667
| 0.625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.190476
| 42
| 2
| 22
| 21
| 0.941176
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
a204f3c2dcd7a02a37691e1e8bf29659e575aed8
| 234
|
py
|
Python
|
server/loggerConfig.py
|
C3RV1/Prive
|
dc866902d3f581965e8c06a7c494ae757caff78d
|
[
"MIT"
] | 2
|
2019-07-08T12:35:10.000Z
|
2019-07-08T12:35:15.000Z
|
server/loggerConfig.py
|
C3RV1/Prive
|
dc866902d3f581965e8c06a7c494ae757caff78d
|
[
"MIT"
] | null | null | null |
server/loggerConfig.py
|
C3RV1/Prive
|
dc866902d3f581965e8c06a7c494ae757caff78d
|
[
"MIT"
] | null | null | null |
spaces = 40
def name_and_message(name, message):
# [NAME] + " "*(((40 - (len(name) + 2)) > 0) * (spaces - (len(name) + 2))) * message
return "[" + name + "]" + " "*(((spaces - (len(name)+2)) > 0)*(spaces-(len(name)+2))) + message
| 33.428571
| 96
| 0.512821
| 31
| 234
| 3.806452
| 0.322581
| 0.237288
| 0.271186
| 0.355932
| 0.508475
| 0.508475
| 0.508475
| 0.508475
| 0.508475
| 0
| 0
| 0.052632
| 0.188034
| 234
| 6
| 97
| 39
| 0.568421
| 0.350427
| 0
| 0
| 0
| 0
| 0.02
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0.333333
| 0.666667
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
a206ad7ec009a4f471307f1abbfdf0806b9f3b1c
| 300
|
py
|
Python
|
znail/ui/api/disciplines/__init__.py
|
Risca/znail
|
01c94465a4bd5d48c1f0e7c4f339138b3a1d0a6f
|
[
"Apache-2.0"
] | 15
|
2019-10-23T07:02:35.000Z
|
2022-03-07T08:33:18.000Z
|
znail/ui/api/disciplines/__init__.py
|
Risca/znail
|
01c94465a4bd5d48c1f0e7c4f339138b3a1d0a6f
|
[
"Apache-2.0"
] | 8
|
2019-12-11T12:02:03.000Z
|
2021-10-15T20:38:18.000Z
|
znail/ui/api/disciplines/__init__.py
|
Risca/znail
|
01c94465a4bd5d48c1f0e7c4f339138b3a1d0a6f
|
[
"Apache-2.0"
] | 4
|
2019-11-19T21:32:52.000Z
|
2021-08-22T17:19:27.000Z
|
import znail.ui.api.disciplines.packet_corruption
import znail.ui.api.disciplines.packet_delay
import znail.ui.api.disciplines.packet_duplication
import znail.ui.api.disciplines.packet_loss
import znail.ui.api.disciplines.packet_rate_control
import znail.ui.api.disciplines.packet_reordering # noqa
| 42.857143
| 57
| 0.866667
| 44
| 300
| 5.75
| 0.318182
| 0.26087
| 0.3083
| 0.379447
| 0.782609
| 0.782609
| 0
| 0
| 0
| 0
| 0
| 0
| 0.05
| 300
| 6
| 58
| 50
| 0.887719
| 0.013333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
bf68a2e89ba03d96149cab5b5f44ea528cd94744
| 4,386
|
py
|
Python
|
free-speech-django/app/migrations/0009_auto_20160804_2214.py
|
BerkeleyAutomation/free-speech
|
5e046d1f856deb13ff8fd1a68e861d687fdc8705
|
[
"MIT"
] | null | null | null |
free-speech-django/app/migrations/0009_auto_20160804_2214.py
|
BerkeleyAutomation/free-speech
|
5e046d1f856deb13ff8fd1a68e861d687fdc8705
|
[
"MIT"
] | null | null | null |
free-speech-django/app/migrations/0009_auto_20160804_2214.py
|
BerkeleyAutomation/free-speech
|
5e046d1f856deb13ff8fd1a68e861d687fdc8705
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-08-04 22:14
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pcari', '0008_auto_20160804_2137'),
]
operations = [
migrations.AddField(
model_name='generalsetting',
name='main_comment_description',
field=models.CharField(default='Please write your comment below', max_length=500),
),
migrations.AddField(
model_name='generalsetting',
name='main_feedback_description',
field=models.CharField(default="At the end you'll have a chance to give us more feedback", max_length=500),
),
migrations.AddField(
model_name='generalsetting',
name='main_graph_description',
field=models.CharField(default='The plots below show the average trend of the ratings.', max_length=500),
),
migrations.AddField(
model_name='generalsetting',
name='main_landing_description',
field=models.CharField(default="Join others to amplify Philippine's collective intelligence", max_length=500),
),
migrations.AddField(
model_name='generalsetting',
name='main_peer_evaluation_description',
field=models.CharField(default='How important is this issue?', max_length=500),
),
migrations.AddField(
model_name='generalsetting',
name='main_question_description',
field=models.CharField(default="Please grade Philippine government's effectiveness on:", max_length=500),
),
migrations.AddField(
model_name='generalsetting',
name='secondary_begin_button',
field=models.CharField(default='Begin', max_length=10),
),
migrations.AddField(
model_name='generalsetting',
name='secondary_comment_description',
field=models.CharField(default='Please write your comment below', max_length=500),
),
migrations.AddField(
model_name='generalsetting',
name='secondary_feedback_description',
field=models.CharField(default="At the end you'll have a chance to give us more feedback", max_length=500),
),
migrations.AddField(
model_name='generalsetting',
name='secondary_graph_description',
field=models.CharField(default='The plots below show the average trend of the ratings.', max_length=500),
),
migrations.AddField(
model_name='generalsetting',
name='secondary_landing_description',
field=models.CharField(default="Join others to amplify Philippine's collective intelligence", max_length=500),
),
migrations.AddField(
model_name='generalsetting',
name='secondary_next_button',
field=models.CharField(default='Next', max_length=10),
),
migrations.AddField(
model_name='generalsetting',
name='secondary_peer_evaluation_description',
field=models.CharField(default='How important is this issue?', max_length=500),
),
migrations.AddField(
model_name='generalsetting',
name='secondary_post_button',
field=models.CharField(default='Post', max_length=10),
),
migrations.AddField(
model_name='generalsetting',
name='secondary_question_description',
field=models.CharField(default="Please grade Philippine government's effectiveness on:", max_length=500),
),
migrations.AddField(
model_name='generalsetting',
name='secondary_skip_button',
field=models.CharField(default='Skip', max_length=10),
),
migrations.AddField(
model_name='generalsetting',
name='secondary_submit_button',
field=models.CharField(default='Submit', max_length=10),
),
migrations.AlterField(
model_name='generalsetting',
name='main_language',
field=models.CharField(choices=[('English', 'English'), ('Tagalog', 'Tagalog')], default='English', max_length=15),
),
]
| 41.377358
| 127
| 0.623575
| 433
| 4,386
| 6.12933
| 0.235566
| 0.06104
| 0.155991
| 0.18312
| 0.866616
| 0.792766
| 0.792766
| 0.774303
| 0.774303
| 0.774303
| 0
| 0.025023
| 0.27109
| 4,386
| 105
| 128
| 41.771429
| 0.80513
| 0.015276
| 0
| 0.663265
| 1
| 0
| 0.314411
| 0.107739
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.040816
| 0
| 0.071429
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
bf7ee142eb9c6ebac6b32546ce24c9c3e8da3baf
| 213
|
gyp
|
Python
|
binding.gyp
|
cstur/catnodeclient
|
f0529100fdaadcdfdfc033619e45733afda1014a
|
[
"Apache-2.0"
] | null | null | null |
binding.gyp
|
cstur/catnodeclient
|
f0529100fdaadcdfdfc033619e45733afda1014a
|
[
"Apache-2.0"
] | null | null | null |
binding.gyp
|
cstur/catnodeclient
|
f0529100fdaadcdfdfc033619e45733afda1014a
|
[
"Apache-2.0"
] | null | null | null |
{"targets": [{"target_name": "ccat","sources": [ "./src/addon/msg.c","./src/addon/ccat.cc","./src/addon/socket.c","./src/addon/m.c","./src/addon/manager.c","./src/addon/gettimeofday.c","./src/addon/snprintf.c"]}]}
| 213
| 213
| 0.629108
| 33
| 213
| 4.030303
| 0.454545
| 0.421053
| 0.338346
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.018779
| 213
| 1
| 213
| 213
| 0.636364
| 0
| 0
| 0
| 0
| 0
| 0.78972
| 0.32243
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 7
|
bf8b8ba6ab6fa0cb97bba8219ce5009e1b598a6f
| 14,470
|
py
|
Python
|
SmartMedApp/backend/modules/PredictionModule.py
|
vovochkab/SmartMed
|
123540263db8ec2225e7bb0ea949ba96a8c5d4f3
|
[
"Apache-2.0"
] | 2
|
2020-10-05T18:03:46.000Z
|
2020-11-15T18:54:53.000Z
|
SmartMedApp/backend/modules/PredictionModule.py
|
vovochkab/SmartMed
|
123540263db8ec2225e7bb0ea949ba96a8c5d4f3
|
[
"Apache-2.0"
] | 2
|
2021-09-25T15:20:19.000Z
|
2021-09-26T15:56:52.000Z
|
SmartMedApp/backend/modules/PredictionModule.py
|
vovochkab/SmartMed
|
123540263db8ec2225e7bb0ea949ba96a8c5d4f3
|
[
"Apache-2.0"
] | 7
|
2020-09-07T17:28:54.000Z
|
2021-10-01T14:32:18.000Z
|
import pandas as pd
import numpy as np
import sklearn.model_selection as sm
import sklearn.preprocessing as sp
from .ModuleInterface import Module
from .dash import PredictionDashboard
from .ModelManipulator import ModelManipulator
from .dataprep import PandasPreprocessor
class PredictionModule(Module, PredictionDashboard):
def _prepare_data(self):
prep = {'fillna': self.settings['preprocessing'],
'encoding': 'label_encoding',
'scaling': False}
dict_pp = {
'preprocessing': prep,
'path': self.settings['path'],
'fillna': self.settings['preprocessing']
}
self.pp = PandasPreprocessor(dict_pp)
self.pp.preprocess()
return self.pp.df
def _prepare_dashboard_settings(self):
if self.settings['model'] == 'linreg':
names = self.pp.df.columns.tolist()
names.remove(self.settings['variable'])
self.df_X = pd.DataFrame()
for name in names:
self.df_X = pd.concat([self.df_X, self.pp.df[name]], axis=1)
self.df_X = self.pp.get_numeric_df(self.df_X)
# похоже что работает и без этого, но на всякий случай оставлю
df_cat = self.pp.get_categorical_df(self.df_X)
names_cat = df_cat.columns.tolist()
if len(names_cat) > 0:
df_dum = pd.get_dummies(df_cat, prefix=[names_cat])
self.df_X = pd.concat([self.df_X, df_dum], axis=1)
self.df_Y = self.pp.df[self.settings['variable']]
dfX_train, dfX_test, dfY_train, dfY_test = sm.train_test_split(
self.df_X, self.df_Y, test_size=0.3, random_state=42)
self.df_X_train = dfX_train
self.df_X_test = dfX_test
self.df_Y_train = dfY_train
self.df_Y_test = dfY_test
self.model = ModelManipulator(
x=self.df_X_train, y=self.df_Y_train, model_type=self.settings['model']).create()
self.model.fit()
self.mean = sum(dfY_test) / len(dfY_test)
settings = dict()
# prepare metrics as names list from str -> bool
settings['path'] = []
settings['preprocessing'] = []
settings['model'] = []
settings['metrics'] = []
settings['y'] = []
settings['x'] = self.pp.df.columns.tolist()
for metric in self.settings.keys():
if metric == 'model':
settings['model'] = self.settings['model']
elif metric == 'path':
settings['path'] = self.settings['path']
elif metric == 'preprocessing':
settings['preprocessing'] = self.settings['preprocessing']
elif metric == 'variable':
settings['y'] = self.settings['variable']
settings['x'].remove(self.settings['variable'])
elif self.settings[metric]:
settings['metrics'].append(metric)
prep = {'fillna': self.settings['preprocessing'],
'encoding': 'label_encoding',
'scaling': False}
dict_pp = {
'preprocessing': prep,
'path': self.settings['path'],
'fillna': self.settings['preprocessing']
}
settings['data'] = dict_pp
elif self.settings['model'] == 'logreg':
names = self.pp.df.columns.tolist()
names.remove(self.settings['variable'])
self.df_X = pd.DataFrame()
for name in names:
self.df_X = pd.concat([self.df_X, self.pp.df[name]], axis=1)
self.df_X = self.pp.get_numeric_df(self.df_X)
df_cat = self.pp.get_categorical_df(self.df_X)
names_cat = df_cat.columns.tolist()
if len(names_cat) > 0:
df_dum = pd.get_dummies(df_cat, prefix=[names_cat])
self.df_X = pd.concat([self.df_X, df_dum], axis=1)
numerics_list = {'int16', 'int32', 'int', 'float', 'bool',
'int64', 'float16', 'float32', 'float64'}
#self.df_Y = self.pp.df[self.settings['variable']]
df_Y = self.pp.df[self.settings['variable']]
print('first', type(df_Y), df_Y.dtype, df_Y.nunique())
print(df_Y)
if df_Y.nunique() == 2:
print('12')
self.df_Y = df_Y
else:
if df_Y.dtype not in numerics_list:
print('23')
labelencoder = sp.LabelEncoder()
df_Y = labelencoder.fit_transform(df_Y)
mean_Y = df_Y.mean()
df_Y1 = df_Y
print('type', type(df_Y1))
for i in range(len(df_Y)):
if df_Y[i] < mean_Y:
df_Y1[i] = 0
else:
df_Y1[i] = 1
self.df_Y = pd.Series(df_Y1)
print('second', type(self.df_Y), self.df_Y.dtype, self.df_Y.nunique())
print(self.df_Y)
dfX_train, dfX_test, dfY_train, dfY_test = sm.train_test_split(self.df_X, self.df_Y, test_size=0.3,
random_state=42)
self.df_X_train = dfX_train
self.df_X_test = dfX_test
self.df_Y_train = dfY_train
self.df_Y_test = dfY_test
self.model = ModelManipulator(
x=self.df_X_train, y=self.df_Y_train, model_type=self.settings['model']).create()
self.model.fit()
self.mean = sum(dfY_test) / len(dfY_test)
settings = dict()
# prepare metrics as names list from str -> bool
settings['path'] = []
settings['preprocessing'] = []
settings['model'] = []
settings['metrics'] = []
settings['y'] = []
settings['x'] = self.pp.df.columns.tolist()
for metric in self.settings.keys():
if metric == 'model':
settings['model'] = self.settings['model']
elif metric == 'path':
settings['path'] = self.settings['path']
elif metric == 'preprocessing':
settings['preprocessing'] = self.settings['preprocessing']
elif metric == 'variable':
settings['y'] = self.settings['variable']
settings['x'].remove(self.settings['variable'])
elif self.settings[metric]:
settings['metrics'].append(metric)
prep = {'fillna': self.settings['preprocessing'],
'encoding': 'label_encoding',
'scaling': False}
dict_pp = {
'preprocessing': prep,
'path': self.settings['path'],
'fillna': self.settings['preprocessing']
}
settings['data'] = dict_pp
elif self.settings['model'] == 'roc':
names = self.pp.df.columns.tolist()
names.remove(self.settings['variable'])
self.df_X = pd.DataFrame()
#self.df_Y = self.pp.df[self.settings['variable']]
for name in names:
self.df_X = pd.concat([self.df_X, self.pp.df[name]], axis=1)
self.df_X = self.pp.get_numeric_df(self.df_X)
df_cat = self.pp.get_categorical_df(self.df_X)
names_cat = df_cat.columns.tolist()
if len(names_cat) > 0:
df_dum = pd.get_dummies(df_cat, prefix=[names_cat])
self.df_X = pd.concat([self.df_X, df_dum], axis=1)
numerics_list = {'int16', 'int32', 'int', 'float', 'bool',
'int64', 'float16', 'float32', 'float64'}
df_Y = self.pp.df[self.settings['variable']]
#print('first', type(df_Y), df_Y.dtype, df_Y.nunique())
#print(df_Y)
if df_Y.nunique() == 2:
#print('12')
self.df_Y = df_Y
else:
if df_Y.dtype not in numerics_list:
#print('23')
labelencoder = sp.LabelEncoder()
df_Y = labelencoder.fit_transform(df_Y)
mean_Y = df_Y.mean()
df_Y1 = df_Y
#print('type', type(df_Y1))
for i in range(len(df_Y)):
if df_Y[i] < mean_Y:
df_Y1[i] = 0
else:
df_Y1[i] = 1
self.df_Y = pd.Series(df_Y1)
#print('second', type(self.df_Y), self.df_Y.dtype, self.df_Y.nunique())
#print(self.df_Y)
settings = dict()
# prepare metrics as names list from str -> bool
settings['path'] = []
settings['preprocessing'] = []
settings['model'] = []
settings['metrics'] = []
settings['graphs'] = []
settings['spec_and_sens'] = []
settings['spec_and_sens_table'] = []
settings['y'] = []
settings['x'] = self.pp.df.columns.tolist()
for metric in self.settings.keys():
if metric == 'model':
settings['model'] = self.settings['model']
elif metric == 'path':
settings['path'] = self.settings['path']
elif metric == 'preprocessing':
settings['preprocessing'] = self.settings['preprocessing']
elif metric == 'variable':
settings['y'] = self.settings['variable']
settings['x'].remove(self.settings['variable'])
elif metric == 'auc' or metric == 'diff_graphics' or metric == 'paint':
settings['graphs'].append(metric)
#elif metric == 'spec_and_sens':
# settings['spec_and_sens'] = self.settings['spec_and_sens']
#elif metric == 'spec_and_sens_table':
# settings['spec_and_sens_table'] = self.settings[
# 'spec_and_sens_table']
elif self.settings[metric]:
settings['metrics'].append(metric)
prep = {'fillna': self.settings['preprocessing'],
'encoding': 'label_encoding',
'scaling': False}
dict_pp = {
'preprocessing': prep,
'path': self.settings['path'],
'fillna': self.settings['preprocessing']
}
settings['data'] = dict_pp
elif self.settings['model'] == 'polynomreg':
names = self.pp.df.columns.tolist()
names.remove(self.settings['variable'])
self.df_X = pd.DataFrame()
for name in names:
self.df_X = pd.concat([self.df_X, self.pp.df[name]], axis=1)
self.df_X = self.pp.get_numeric_df(self.df_X)
df_cat = self.pp.get_categorical_df(self.df_X)
names_cat = df_cat.columns.tolist()
if len(names_cat) > 0:
df_dum = pd.get_dummies(df_cat, prefix=[names_cat])
self.df_X = pd.concat([self.df_X, df_dum], axis=1)
count = len(self.df_X.columns)
for i in range(count):
for j in range(i, count):
data_list_1 = np.array(self.df_X.iloc[:, [i]])
data_list_2 = np.array(self.df_X.iloc[:, [j]])
data_list = data_list_1 * data_list_2
if i == j:
data_name = str(self.df_X.columns[i] + '^2')
else:
data_name = str(self.df_X.columns[i] + ' * ' + str(self.df_X.columns[j]))
self.df_X.insert(len(self.df_X.columns), data_name, data_list, True)
self.df_Y = self.pp.df[self.settings['variable']]
numerics_list = {'int16', 'int32', 'int', 'float', 'bool',
'int64', 'float16', 'float32', 'float64'}
if self.df_Y.dtype not in numerics_list:
labelencoder = sp.LabelEncoder()
self.df_Y = labelencoder.fit_transform(self.df_Y)
dfX_train, dfX_test, dfY_train, dfY_test = sm.train_test_split(self.df_X, self.df_Y, test_size=0.3,
random_state=42)
self.df_X_train = dfX_train
self.df_X_test = dfX_test
self.df_Y_train = dfY_train
self.df_Y_test = dfY_test
self.model = ModelManipulator(
x=self.df_X_train, y=self.df_Y_train, model_type='polyreg').create()
self.model.fit()
self.mean = sum(dfY_test) / len(dfY_test)
settings = dict()
# prepare metrics as names list from str -> bool
settings['path'] = []
settings['preprocessing'] = []
settings['model'] = []
settings['metrics'] = []
settings['y'] = []
settings['x'] = self.df_X.columns.tolist()
for metric in self.settings.keys():
if metric == 'model':
settings['model'] = self.settings['model']
elif metric == 'path':
settings['path'] = self.settings['path']
elif metric == 'preprocessing':
settings['preprocessing'] = self.settings['preprocessing']
elif metric == 'variable':
settings['y'] = self.settings['variable']
# settings['x'].remove(self.settings['variable'])
elif self.settings[metric]:
settings['metrics'].append(metric)
prep = {'fillna': self.settings['preprocessing'],
'encoding': 'label_encoding',
'scaling': False}
dict_pp = {
'preprocessing': prep,
'path': self.settings['path'],
'fillna': self.settings['preprocessing']
}
settings['data'] = dict_pp
return settings
def _prepare_dashboard(self):
pass
| 42.434018
| 111
| 0.499171
| 1,591
| 14,470
| 4.346952
| 0.091138
| 0.072874
| 0.053644
| 0.015616
| 0.887507
| 0.859456
| 0.854251
| 0.841961
| 0.834442
| 0.814199
| 0
| 0.0098
| 0.372357
| 14,470
| 340
| 112
| 42.558824
| 0.751707
| 0.055632
| 0
| 0.805755
| 0
| 0
| 0.097186
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.010791
| false
| 0.003597
| 0.028777
| 0
| 0.05036
| 0.02518
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
44bd3e125c481d3cf59b3f02a61a364296d5bbfe
| 478
|
py
|
Python
|
docker_manage/Environments/EnvironmentJava11.py
|
acdh-oeaw/docker-tools
|
4f0134f5220b7a78dba5adca6d6085b3379853d2
|
[
"MIT"
] | null | null | null |
docker_manage/Environments/EnvironmentJava11.py
|
acdh-oeaw/docker-tools
|
4f0134f5220b7a78dba5adca6d6085b3379853d2
|
[
"MIT"
] | null | null | null |
docker_manage/Environments/EnvironmentJava11.py
|
acdh-oeaw/docker-tools
|
4f0134f5220b7a78dba5adca6d6085b3379853d2
|
[
"MIT"
] | 1
|
2020-06-08T07:23:34.000Z
|
2020-06-08T07:23:34.000Z
|
import os
from . import *
class EnvironmentJava11(EnvironmentJava8, IEnvironment):
def __init__(self, conf, owner):
if 'DockerfileDir' not in conf :
conf['DockerfileDir'] = 'java11'
super(EnvironmentJava11, self).__init__(conf, owner)
class EnvironmentJava9(EnvironmentJava8, IEnvironment):
def __init__(self, conf, owner):
if 'DockerfileDir' not in conf :
conf['DockerfileDir'] = 'java11'
super(EnvironmentJava11, self).__init__(conf, owner)
| 29.875
| 56
| 0.721757
| 50
| 478
| 6.58
| 0.38
| 0.109422
| 0.18845
| 0.212766
| 0.81459
| 0.81459
| 0.81459
| 0.81459
| 0.81459
| 0.81459
| 0
| 0.032581
| 0.165272
| 478
| 15
| 57
| 31.866667
| 0.79198
| 0
| 0
| 0.666667
| 0
| 0
| 0.133891
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.166667
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
7845b13f369d685672d58fe2253ce6b5cbf5f4d5
| 72
|
py
|
Python
|
CodeHS/Unit 8/8.4/createAnImage.py
|
nitrospam/APCSP2020
|
275f576036805d244c3244f3f3646951940c9575
|
[
"MIT"
] | null | null | null |
CodeHS/Unit 8/8.4/createAnImage.py
|
nitrospam/APCSP2020
|
275f576036805d244c3244f3f3646951940c9575
|
[
"MIT"
] | null | null | null |
CodeHS/Unit 8/8.4/createAnImage.py
|
nitrospam/APCSP2020
|
275f576036805d244c3244f3f3646951940c9575
|
[
"MIT"
] | null | null | null |
11110000
00001111
11110000
00001111
11110000
11111111
11000011
00111100
| 8
| 8
| 0.888889
| 8
| 72
| 8
| 0.625
| 0.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| 72
| 8
| 9
| 9
| 0
| 0
| 0
| 0.625
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
788153333530897025d9d8b931a9024bd588f53c
| 2,282
|
py
|
Python
|
snr/std_mods/io/.controller/mappings.py
|
sfshaw/SNR
|
593b7b78a91e23e0fcb03985b72f29a66101579c
|
[
"MIT"
] | 1
|
2021-03-09T21:54:56.000Z
|
2021-03-09T21:54:56.000Z
|
snr/std_mods/io/.controller/mappings.py
|
sfshaw-calpoly/SNR
|
593b7b78a91e23e0fcb03985b72f29a66101579c
|
[
"MIT"
] | null | null | null |
snr/std_mods/io/.controller/mappings.py
|
sfshaw-calpoly/SNR
|
593b7b78a91e23e0fcb03985b72f29a66101579c
|
[
"MIT"
] | 1
|
2021-12-04T19:51:18.000Z
|
2021-12-04T19:51:18.000Z
|
'''Mapping of pygame joystick output to values we can make sense of
Examples:
"pygame_name": ["name_we_use"],
"pygame_name": ["name_we_use", cast_type],
"pygame_name": ["name_we_use", cast_type, scale_factor],
"pygame_name": ["name_we_use", cast_type, scale_factor, shift_ammount],
"pygame_name": ["name_we_use", cast_type, scale_factor, shift_ammount,
dead_zone],
to drop a value use "pygame_name": [None],
'''
lin_control_mappings = {
"number": [None],
"name": ["controller_name"],
"axis_0": ["stick_left_x", int, 100, 0, 0],
"axis_1": ["stick_left_y", int, -100, 0, 0],
"axis_2": ["trigger_left", int, 50, 50, 0],
"axis_3": ["stick_right_x", int, 100, 0, 0],
"axis_4": ["stick_right_y", int, -100, 0, 0],
"axis_5": ["trigger_right", int, 50, 50, 0],
"button_0": ["button_a", bool],
"button_1": ["button_b", bool],
"button_2": ["button_x", bool],
"button_3": ["button_y", bool],
"button_4": ["button_left_bumper", bool],
"button_5": ["button_right_bumper", bool],
"button_6": ["button_back", bool],
"button_7": ["button_start", bool],
"button_8": ["button_xbox", bool],
"button_9": ["button_left_stick", bool],
"button_10": ["button_right_stick", bool],
"dpad": ["dpad", tuple],
"num_buttons": [None],
"num_dpad": [None],
"num_axes": [None],
}
win_control_mappings = {
"number": [None],
"name": ["controller_name"],
"axis_0": ["stick_left_x", int, 100, 0, 0],
"axis_1": ["stick_left_y", int, -100, 0, 0],
"axis_2": ["trigger_left", int, 100, 0, 0],
"axis_3": ["stick_right_y", int, 100, 0, 0],
"axis_4": ["stick_right_x", int, -100, 0, 0],
"axis_5": ["trigger_right", int, 100, 0, 0],
"button_0": ["button_a", bool],
"button_1": ["button_b", bool],
"button_2": ["button_x", bool],
"button_3": ["button_y", bool],
"button_4": ["button_left_bumper", bool],
"button_5": ["button_right_bumper", bool],
"button_6": ["button_back", bool],
"button_7": ["button_start", bool],
"button_8": ["button_xbox", bool],
"button_9": ["button_left_stick", bool],
"button_10": ["button_right_stick", bool],
"dpad": ["dpad", tuple],
"num_buttons": [None],
"num_dpad": [None],
"num_axes": [None],
}
| 36.806452
| 71
| 0.600351
| 328
| 2,282
| 3.804878
| 0.198171
| 0.160256
| 0.05609
| 0.064103
| 0.907051
| 0.879006
| 0.879006
| 0.857372
| 0.801282
| 0.725962
| 0
| 0.051419
| 0.181858
| 2,282
| 61
| 72
| 37.409836
| 0.617033
| 0.184487
| 0
| 0.76
| 0
| 0
| 0.429342
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
78c31e66695596823404282ad4d923ff73085907
| 2,035
|
py
|
Python
|
code-challanges/401_code_challenges/graphs/test_depth_first.py
|
schoentr/data-structures-and-algorithms
|
535ac617a2ab32293014946b043bdb40a647d43b
|
[
"MIT"
] | null | null | null |
code-challanges/401_code_challenges/graphs/test_depth_first.py
|
schoentr/data-structures-and-algorithms
|
535ac617a2ab32293014946b043bdb40a647d43b
|
[
"MIT"
] | 1
|
2019-03-11T02:13:58.000Z
|
2019-03-11T02:13:58.000Z
|
code-challanges/401_code_challenges/graphs/test_depth_first.py
|
schoentr/data-structures-and-algorithms
|
535ac617a2ab32293014946b043bdb40a647d43b
|
[
"MIT"
] | null | null | null |
from graphs.graph import Graph
def test_neighbors_one():
g = Graph()
vertex_a = g.add('a')
vertex_b = g.add('b')
vertex_c = g.add('c')
vertex_d = g.add('d')
vertex_e = g.add('e')
vertex_f = g.add('f')
vertex_g = g.add('g')
vertex_h = g.add('h')
g.add_edge(vertex_a, vertex_b)
g.add_edge(vertex_c, vertex_g)
g.add_edge(vertex_b, vertex_c)
g.add_edge(vertex_b, vertex_d)
g.add_edge(vertex_e, vertex_d)
g.add_edge(vertex_d, vertex_h)
g.add_edge(vertex_f, vertex_h)
g.add_edge(vertex_d, vertex_f)
g.add_edge(vertex_d, vertex_a)
actual = g.depth_first('a')
assert actual == ['a', 'b', 'c', 'g', 'd', 'e', 'h', 'f']
def test_neighbors_two():
g = Graph()
vertex_a = g.add('a')
vertex_b = g.add('b')
vertex_c = g.add('c')
vertex_d = g.add('d')
vertex_e = g.add('e')
vertex_f = g.add('f')
vertex_g = g.add('g')
vertex_h = g.add('h')
g.add_edge(vertex_a, vertex_b)
g.add_edge(vertex_c, vertex_g)
g.add_edge(vertex_b, vertex_c)
g.add_edge(vertex_b, vertex_d)
g.add_edge(vertex_e, vertex_d)
g.add_edge(vertex_d, vertex_h)
g.add_edge(vertex_f, vertex_h)
g.add_edge(vertex_d, vertex_f)
g.add_edge(vertex_d, vertex_a)
actual = g.depth_first('d')
assert actual == ['d', 'b', 'a', 'c', 'g', 'e', 'h', 'f']
def test_neighbors_three():
g = Graph()
vertex_a = g.add('a')
vertex_b = g.add('b')
vertex_c = g.add('c')
vertex_d = g.add('d')
vertex_e = g.add('e')
vertex_f = g.add('f')
vertex_g = g.add('g')
vertex_h = g.add('h')
g.add_edge(vertex_a, vertex_b)
g.add_edge(vertex_c, vertex_g)
g.add_edge(vertex_b, vertex_c)
g.add_edge(vertex_b, vertex_d)
g.add_edge(vertex_e, vertex_d)
g.add_edge(vertex_d, vertex_h)
g.add_edge(vertex_f, vertex_h)
g.add_edge(vertex_d, vertex_f)
g.add_edge(vertex_d, vertex_a)
actual = g.depth_first('f')
assert actual == ['f', 'h', 'd', 'b', 'a', 'c', 'g', 'e']
| 27.133333
| 61
| 0.596069
| 377
| 2,035
| 2.915119
| 0.066313
| 0.185623
| 0.196542
| 0.343949
| 0.909918
| 0.909918
| 0.865332
| 0.865332
| 0.865332
| 0.865332
| 0
| 0
| 0.222113
| 2,035
| 75
| 62
| 27.133333
| 0.694251
| 0
| 0
| 0.84375
| 0
| 0
| 0.025049
| 0
| 0
| 0
| 0
| 0
| 0.046875
| 1
| 0.046875
| false
| 0
| 0.015625
| 0
| 0.0625
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.