hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6fa4f17ff71dd0ae5155b92fda82e4d7d5fff9e9
| 2,017
|
py
|
Python
|
src/config/experiments/test.py
|
DanielTrosten/mvc
|
b0a08fc6c75bdb1fae796f82a7cbfb001bf02047
|
[
"MIT"
] | 16
|
2021-04-13T13:21:12.000Z
|
2022-03-30T03:40:46.000Z
|
src/config/experiments/test.py
|
DanielTrosten/mvc
|
b0a08fc6c75bdb1fae796f82a7cbfb001bf02047
|
[
"MIT"
] | 2
|
2021-08-13T14:02:19.000Z
|
2022-01-19T12:52:29.000Z
|
src/config/experiments/test.py
|
DanielTrosten/mvc
|
b0a08fc6c75bdb1fae796f82a7cbfb001bf02047
|
[
"MIT"
] | 7
|
2021-04-13T14:27:49.000Z
|
2022-03-09T21:23:17.000Z
|
from config.defaults import Experiment, Dataset, SiMVC, MLP, DDC, Fusion, Loss, CoMVC
blobs_overlap = Experiment(
dataset_config=Dataset(name="blobs_overlap"),
model_config=SiMVC(
backbone_configs=(
MLP(layers=[32, 32, 32], input_size=(2,)),
MLP(layers=[32, 32, 32], input_size=(2,)),
),
fusion_config=Fusion(method="weighted_mean", n_views=2),
cm_config=DDC(n_clusters=3),
loss_config=Loss(
funcs="ddc_1|ddc_2|ddc_3",
),
),
n_runs=1,
n_epochs=10,
)
blobs_overlap_contrast = Experiment(
dataset_config=Dataset(name="blobs_overlap"),
model_config=CoMVC(
backbone_configs=(
MLP(layers=[32, 32, 32], input_size=(2,)),
MLP(layers=[32, 32, 32], input_size=(2,)),
),
fusion_config=Fusion(method="weighted_mean", n_views=2),
projector_config=None,
cm_config=DDC(n_clusters=3),
loss_config=Loss(
funcs="ddc_1|ddc_2|ddc_3|contrast",
)
),
n_runs=1,
)
blobs_overlap_5 = Experiment(
dataset_config=Dataset(name="blobs_overlap_5"),
model_config=SiMVC(
backbone_configs=(
MLP(layers=[32, 32, 32], input_size=(2,)),
MLP(layers=[32, 32, 32], input_size=(2,)),
),
fusion_config=Fusion(method="weighted_mean", n_views=2),
cm_config=DDC(n_clusters=5),
loss_config=Loss(
funcs="ddc_1|ddc_2|ddc_3",
),
),
n_runs=1,
)
blobs_overlap_5_contrast = Experiment(
dataset_config=Dataset(name="blobs_overlap_5"),
model_config=CoMVC(
backbone_configs=(
MLP(layers=[32, 32, 32], input_size=(2,)),
MLP(layers=[32, 32, 32], input_size=(2,)),
),
fusion_config=Fusion(method="weighted_mean", n_views=2),
projector_config=None,
cm_config=DDC(n_clusters=5),
loss_config=Loss(
funcs="ddc_1|ddc_2|ddc_3|contrast",
)
),
n_runs=1,
)
| 28.408451
| 85
| 0.588498
| 261
| 2,017
| 4.252874
| 0.153257
| 0.057658
| 0.079279
| 0.093694
| 0.90991
| 0.90991
| 0.886486
| 0.886486
| 0.872072
| 0.789189
| 0
| 0.058069
| 0.265741
| 2,017
| 70
| 86
| 28.814286
| 0.691425
| 0
| 0
| 0.8125
| 0
| 0
| 0.096182
| 0.025781
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.015625
| 0
| 0.015625
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
6ff38e5a87c08ddb61d54b1bdd0e0f86d2350777
| 106
|
py
|
Python
|
Python/Tests/TestData/Grammar/DecoratorsClassDef.py
|
nanshuiyu/pytools
|
9f9271fe8cf564b4f94e9456d400f4306ea77c23
|
[
"Apache-2.0"
] | null | null | null |
Python/Tests/TestData/Grammar/DecoratorsClassDef.py
|
nanshuiyu/pytools
|
9f9271fe8cf564b4f94e9456d400f4306ea77c23
|
[
"Apache-2.0"
] | null | null | null |
Python/Tests/TestData/Grammar/DecoratorsClassDef.py
|
nanshuiyu/pytools
|
9f9271fe8cf564b4f94e9456d400f4306ea77c23
|
[
"Apache-2.0"
] | null | null | null |
@fob
class C: pass
@fob.oar
class C: pass
@fob(oar)
class C: pass
@fob
@oar
class C: pass
| 8.153846
| 14
| 0.575472
| 20
| 106
| 3.1
| 0.3
| 0.387097
| 0.645161
| 0.629032
| 0.935484
| 0.935484
| 0.935484
| 0.935484
| 0.935484
| 0.935484
| 0
| 0
| 0.301887
| 106
| 13
| 15
| 8.153846
| 0.824324
| 0
| 0
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.444444
| 0
| null | null | 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 12
|
b50964ac7171e26405b480e793db10a184c005a6
| 201
|
py
|
Python
|
blueprints/pages/login.py
|
LeaveMyYard/Hillelgram
|
4ba5131d84477ce5fb1479b4de2c2b2a1f09f8fd
|
[
"MIT"
] | null | null | null |
blueprints/pages/login.py
|
LeaveMyYard/Hillelgram
|
4ba5131d84477ce5fb1479b4de2c2b2a1f09f8fd
|
[
"MIT"
] | 2
|
2021-11-27T10:54:55.000Z
|
2021-11-27T12:57:02.000Z
|
blueprints/pages/login.py
|
LeaveMyYard/Hillelgram
|
4ba5131d84477ce5fb1479b4de2c2b2a1f09f8fd
|
[
"MIT"
] | null | null | null |
from flask import Blueprint, render_template
login_blueprint = Blueprint("login_blueprint", __name__)
@login_blueprint.route("/login")
def get_login_page():
return render_template("login.html")
| 22.333333
| 56
| 0.78607
| 25
| 201
| 5.88
| 0.56
| 0.285714
| 0.258503
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.104478
| 201
| 8
| 57
| 25.125
| 0.816667
| 0
| 0
| 0
| 0
| 0
| 0.154229
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.2
| 0.2
| 0.6
| 0.6
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
|
0
| 7
|
d22cae4485a0d759513e75bf5d9c96f3ead384ce
| 35,931
|
py
|
Python
|
codes/py_countreg.py
|
statcompute/py_countreg
|
3f62b8f16b95be5be46cacb93f544bbca6b1ec55
|
[
"MIT"
] | null | null | null |
codes/py_countreg.py
|
statcompute/py_countreg
|
3f62b8f16b95be5be46cacb93f544bbca6b1ec55
|
[
"MIT"
] | null | null | null |
codes/py_countreg.py
|
statcompute/py_countreg
|
3f62b8f16b95be5be46cacb93f544bbca6b1ec55
|
[
"MIT"
] | null | null | null |
# py_countreg/py_countreg.py
# exec(open('py_countreg/py_countreg.py').read())
# 0.0.4
import numpy, scipy
from statsmodels.base.model import GenericLikelihoodModel as gll
from statsmodels.api import Logit as logit
#################### 01. Standard Poisson Regression ####################
def _ll_stdpoisson(y, x, beta):
"""
The function calculates the log likelihood function of a standard poisson
regression.
Parameters:
y : the frequency outcome
x : variables of the poisson regression
beta : coefficients of the poisson regression
"""
mu = numpy.exp(numpy.dot(x, beta))
pr = numpy.exp(-mu) * numpy.float_power(mu, y) / scipy.special.factorial(y)
ll = numpy.log(pr)
return(ll)
################################################################################
def stdpoisson(Y, X):
"""
The function estimates a standard poisson regression.
Parameters:
Y : a pandas series for the frequency outcome with integer values.
X : a pandas dataframe with model variables that are all numeric values.
Example:
stdpoisson(Y, X).fit().summary()
"""
class stdpoisson(gll):
def __init__(self, endog, exog, **kwds):
super(stdpoisson, self).__init__(endog, exog, **kwds)
def nloglikeobs(self, params):
beta = params
ll = _ll_stdpoisson(self.endog, self.exog, beta)
return(-ll)
def fit(self, start_params = None, maxiter = 10000, maxfun = 5000, **kwds):
if start_params == None:
start_params = numpy.zeros(self.exog.shape[1])
return(super(stdpoisson, self).fit(start_params = start_params,
maxiter = maxiter, maxfun = maxfun, **kwds))
_Y = Y.copy()
_X = X.copy()
_X.insert(loc = 0, column = "_CONST", value = 1)
return(stdpoisson(_Y, _X))
#################### 02. Negative Binomial Regression ####################
def _ll_negbinom2(y, x, beta, alpha):
"""
The function calculates the log likelihood function of the negative binomial
(NB-2) regression.
Parameters:
y : the frequency outcome
x : variables of the negative binomial regression
beta : coefficients of the negative binomial regression
alpha : the dispersion parameter of the negative binomial regression
"""
mu = numpy.exp(numpy.dot(x, beta))
a1 = 1 / alpha
pr = scipy.special.gamma(y + a1) / (scipy.special.gamma(y + 1) * scipy.special.gamma(a1)) * \
numpy.float_power(a1 / (a1 + mu), a1) * numpy.float_power(mu / (a1 + mu), y)
ll = numpy.log(pr)
return(ll)
################################################################################
def negbinom2(Y, X):
"""
The function estimates a negative binomial (NB-2) regression.
Parameters:
Y : a pandas series for the frequency outcome with integer values.
X : a pandas dataframe with model variables that are all numeric values.
Example:
negbinom2(Y, X).fit().summary()
"""
class negbinom2(gll):
def __init__(self, endog, exog, **kwds):
super(negbinom2, self).__init__(endog, exog, **kwds)
def nloglikeobs(self, params):
alpha = params[-1]
beta = params[:-1]
ll = _ll_negbinom2(self.endog, self.exog, beta, alpha)
return(-ll)
def fit(self, start_params = None, maxiter = 10000, maxfun = 5000, method = "ncg", **kwds):
self.exog_names.append('_ALPHA')
if start_params == None:
start_params = numpy.append(p0, a0)
return(super(negbinom2, self).fit(start_params = start_params, method = method,
maxiter = maxiter, maxfun = maxfun, **kwds))
_Y = Y.copy()
_X = X.copy()
_X.insert(loc = 0, column = "_CONST", value = 1)
p0 = stdpoisson(Y, X).fit(disp = 0).params
a0 = 1
return(negbinom2(_Y, _X))
#################### 03. Generalized Poisson Regression ####################
def _ll_genpoisson(y, x, beta, s):
"""
The function calculates the log likelihood function of the generalized poisson
regression.
Parameters:
y : the frequency outcome
x : variables of the generalized poisson regression
beta : coefficients of the negative binomial regression
s : the scale parameter for the generalized poisson distribution
"""
mu = numpy.exp(numpy.dot(x, beta))
xi = numpy.exp(s)
_a = mu * (1 - xi)
pr = _a / scipy.special.factorial(y) * numpy.exp(-_a - xi * y) * \
numpy.float_power(_a + xi * y, y - 1)
ll = numpy.log(pr)
return(ll)
################################################################################
def genpoisson(Y, X):
"""
The function estimates a generalized poisson regression. In addition to regression
coefficients, there is a scale parameter S such that Xi = Exp(S). In a generalized
poisson distribution, the VAR(Y) = E(Y) / [(1 - Xi) ^ 2] such that [(1 - Xi) ^ 2] > 1
indicates the under-dispersion and [(1 - Xi) ^ 2] < 1 indicates the over-dispersion.
Parameters:
Y : a pandas series for the frequency outcome with integer values.
X : a pandas dataframe with model variables that are all numeric
Example:
genpoisson(Y, X).fit().summary()
"""
class genpoisson(gll):
def __init__(self, endog, exog, **kwds):
super(genpoisson, self).__init__(endog, exog, **kwds)
def nloglikeobs(self, params):
_s = params[-1]
beta = params[:-1]
ll = _ll_genpoisson(self.endog, self.exog, beta, _s)
return(-ll)
def fit(self, start_params = None, maxiter = 10000, maxfun = 5000, method = "ncg", **kwds):
self.exog_names.append('_S')
if start_params == None:
start_params = numpy.append(p0, s0)
return(super(genpoisson, self).fit(start_params = start_params, method = method,
maxiter = maxiter, maxfun = maxfun, **kwds))
_Y = Y.copy()
_X = X.copy()
_X.insert(loc = 0, column = "_CONST", value = 1)
p0 = stdpoisson(Y, X).fit(disp = 0).params
s0 = numpy.log(max(1e-4, 1 - numpy.float_power(numpy.mean(Y) / numpy.var(Y), 0.5)))
return(genpoisson(_Y, _X))
#################### 04. Hurdle Poisson Regression ####################
def _ll_hdlpoisson(y, x1, x2, beta1, beta2):
"""
The function calculates the log likelihood function of the hurdle poisson
regression.
Parameters:
y : the frequency outcome
x1 : variables for the probability model in the hurdle poisson regression
x2 : variables for the count model in the hurdle poisson regression
beta1 : coefficients for the probability model in the hurdle poisson regression
beta2 : coefficients for the count model in the hurdle poisson regression
"""
xb1 = numpy.dot(x1, beta1)
xb2 = numpy.dot(x2, beta2)
p0 = numpy.exp(xb1) / (1 + numpy.exp(xb1))
mu = numpy.exp(xb2)
i0 = numpy.where(y == 0, 1, 0)
pr = p0 * i0 + \
(1 - p0) * numpy.exp(-mu) * numpy.float_power(mu, y) / \
((1 - numpy.exp(-mu)) * scipy.special.factorial(y)) * (1 - i0)
ll = numpy.log(pr)
return(ll)
################################################################################
def hdlpoisson(Y, X1, X2):
"""
The function estimates a hurdle poisson regression, which is the composite
between point mess at zero and a zero-trucated poisson distribution.
In the model output, estimated coefficients starting with "P0:" are used
to predict the probability of zero outcomes and estimated coefficients
starting with "MU:" are used to predict frequency outcomes for a zero-trucated
poisson.
Parameters:
Y : a pandas series for the frequency outcome with integer values, including zeros.
X1 : a pandas dataframe with the probability model variables that are all numeric values.
X2 : a pandas dataframe with the count model variables that are all numeric values.
Example:
hdlpoisson(Y, X1, X2).fit().summary()
"""
class hdlpoisson(gll):
def __init__(self, endog, exog, **kwds):
super(hdlpoisson, self).__init__(endog, exog, **kwds)
def nloglikeobs(self, params):
d1 = _X1.shape[1]
beta1 = params[:d1]
beta2 = params[d1:]
ll = _ll_hdlpoisson(self.endog, self.exog[:, :d1], self.exog[:, d1:], beta1, beta2)
return(-ll)
def fit(self, start_params = None, maxiter = 10000, maxfun = 5000, method = "ncg", **kwds):
if start_params == None:
start_params = numpy.concatenate([p10, p20])
return(super(hdlpoisson, self).fit(start_params = start_params, method = method,
maxiter = maxiter, maxfun = maxfun, **kwds))
_Y = Y.copy()
_X1 = X1.copy()
_X2 = X2.copy()
_X1.insert(loc = 0, column = "_CONST", value = 1)
_X1.columns = ["P0:" + _ for _ in _X1.columns]
_X2.insert(loc = 0, column = "_CONST", value = 1)
_X2.columns = ["MU:" + _ for _ in _X2.columns]
_X = _X1.join(_X2)
p10 = logit(numpy.where(_Y == 0, 1, 0), _X1).fit(disp = 0).params
p20 = ztrpoisson(Y[Y > 0], X2[Y > 0]).fit(disp = 0).params
return(hdlpoisson(_Y, _X))
#################### 05. Zero-Inflated Poisson Regression ####################
def _ll_zifpoisson(y, x1, x2, beta1, beta2):
"""
The function calculates the log likelihood function of the zero-inflated
poisson regression.
Parameters:
y : the frequency outcome
x1 : variables for the probability model in the zero-inflated poisson regression
x2 : variables for the count model in the zero-inflated poisson regression
beta1 : coefficients for the probability model in the zero-inflated poisson regression
beta2 : coefficients for the count model in the zero-inflated poisson regression
"""
xb1 = numpy.dot(x1, beta1)
xb2 = numpy.dot(x2, beta2)
p0 = numpy.exp(xb1) / (1 + numpy.exp(xb1))
mu = numpy.exp(xb2)
i0 = numpy.where(y == 0, 1, 0)
pr = (p0 + (1 - p0) * numpy.exp(-mu)) * i0 + \
(1 - p0) * numpy.exp(-mu) * numpy.float_power(mu, y) / scipy.special.factorial(y) * (1 - i0)
ll = numpy.log(pr)
return(ll)
################################################################################
def zifpoisson(Y, X1, X2):
"""
The function estimates a zero-inflated poisson regression, which is the
composite between point mess at zero and a standard poisson distribution.
In the model outcome, estimated coefficients starting with "P0:" are used
to predict the probability of zero outcomes and estimated coefficients
starting with "MU:" are used to predict frequency outcomes for a standard
poisson.
Parameters:
Y : a pandas series for the frequency outcome with integer values, including zeros.
X1 : a pandas dataframe with the probability model variables that are all numeric values.
X2 : a pandas dataframe with the count model variables that are all numeric values.
Example:
zifpoisson(Y, X1, X2).fit().summary()
"""
class zifpoisson(gll):
def __init__(self, endog, exog, **kwds):
super(zifpoisson, self).__init__(endog, exog, **kwds)
def nloglikeobs(self, params):
d1 = _X1.shape[1]
beta1 = params[:d1]
beta2 = params[d1:]
ll = _ll_zifpoisson(self.endog, self.exog[:, :d1], self.exog[:, d1:], beta1, beta2)
return(-ll)
def fit(self, start_params = None, maxiter = 10000, maxfun = 5000, method = "ncg", **kwds):
if start_params == None:
start_params = numpy.concatenate([p10, p20])
return(super(zifpoisson, self).fit(start_params = start_params, method = method,
maxiter = maxiter, maxfun = maxfun, **kwds))
_Y = Y.copy()
_X1 = X1.copy()
_X2 = X2.copy()
_X1.insert(loc = 0, column = "_CONST", value = 1)
_X1.columns = ["P0:" + _ for _ in _X1.columns]
_X2.insert(loc = 0, column = "_CONST", value = 1)
_X2.columns = ["MU:" + _ for _ in _X2.columns]
_X = _X1.join(_X2)
p10 = logit(numpy.where(_Y == 0, 1, 0), _X1).fit(disp = 0).params
p20 = ztrpoisson(Y[Y > 0], X2[Y > 0]).fit(disp = 0).params
return(zifpoisson(_Y, _X))
#################### 06. Conway-Maxwell Poisson Regression ####################
def _ll_compoisson(y, x, beta, s):
"""
The function calculates the log likelihood function of the Conway-Maxwell
poisson regression.
Parameters:
y : the frequency outcome.
x : variables in the conway-maxwell poisson regression
beta : coefficients in the conway maxwell poisson regression
s : the scale parameter in the Conway-Maxwell distribution and is equal to log(nv)
"""
mu = numpy.exp(numpy.dot(x, beta))
nv = numpy.exp(s)
_z = 0
for _n in range(100):
_z = _z + numpy.float_power(mu, _n) / numpy.float_power(scipy.special.factorial(_n), nv)
pr = numpy.float_power(mu, y) / numpy.float_power(scipy.special.factorial(y), nv) * numpy.float_power(_z, -1)
ll = numpy.log(pr)
return(ll)
################################################################################
def compoisson(Y, X):
"""
The function estimates a Conway-Maxwell poisson regression.
Given MU = exp(x * beta), E(Y) ~= MU + nv / 2 - 0.5. In addition to estimated
coefficients beta, there is a scaled parameter S such that nv = Exp(S).
In the COMpoisson, since VAR(Y) ~= E(Y) / nv, nv > 1 suggests the under-dispersion
and nv < 1 suggests the over-dispersion.
Parameters:
Y : a pandas series for the frequency outcome with integer values.
X : a pandas dataframe with the probability model variables that are all numeric values.
Example:
compoisson(Y, X).fit().summary()
"""
class compoisson(gll):
def __init__(self, endog, exog, **kwds):
super(compoisson, self).__init__(endog, exog, **kwds)
def nloglikeobs(self, params):
_s = params[-1]
beta = params[:-1]
ll = _ll_compoisson(self.endog, self.exog, beta, _s)
return(-ll)
def fit(self, start_params = None, maxiter = 10000, maxfun = 5000, method = "ncg", **kwds):
self.exog_names.append('_S')
if start_params == None:
start_params = numpy.append(p0, s0)
return(super(compoisson, self).fit(start_params = start_params, method = method,
maxiter = maxiter, maxfun = maxfun, **kwds))
_Y = Y.copy()
_X = X.copy()
_X.insert(loc = 0, column = "_CONST", value = 1)
p0 = stdpoisson(Y, X).fit(disp = 0).params
s0 = numpy.log(numpy.mean(Y) / numpy.var(Y))
return(compoisson(_Y, _X))
#################### 07. Hurdle Negative Binomial Regression ####################
def _ll_hdlnegbin2(y, x1, x2, beta1, beta2, alpha):
"""
The function calculates the log likelihood function of the hurdle negative
binomial regression.
Parameters:
y : the frequency outcome
x1 : variables for the probability model in the hurdle negative binomial regression
x2 : variables for the count model in the hurdle negative binomial regression
beta1 : coefficients for the probability model in the hurdle negative binomial regression
beta2 : coefficients for the count model in the hurdle negative binomial regression
alpha : the dispersion parameter in the negative binomial distribution
"""
xb1 = numpy.dot(x1, beta1)
xb2 = numpy.dot(x2, beta2)
p0 = numpy.exp(xb1) / (1 + numpy.exp(xb1))
mu = numpy.exp(xb2)
i0 = numpy.where(y == 0, 1, 0)
a1 = 1 / alpha
pr = p0 * i0 + \
(1 - p0) / (1 - numpy.float_power(a1 / (a1 + mu), a1)) * \
scipy.special.gamma(y + a1) / (scipy.special.gamma(y + 1) * scipy.special.gamma(a1)) * \
numpy.float_power(a1 / (a1 + mu), a1) * numpy.float_power(mu / (a1 + mu), y) * (1 - i0)
ll = numpy.log(pr)
return(ll)
################################################################################
def hdlnegbin2(Y, X1, X2):
"""
The function estimates a hurdle negative binomial regression, which is the
composite between point mess at zero and a zero-truncated negative binomial
distribution.
In the model outcome, estimated coefficients starting with "P0:" are used
to predict the probability of zero outcomes and estimated coefficients
starting with "MU:" are used to predict frequency outcomes for a zero-trucated
negative binomial.
Parameters:
Y : a pandas series for the frequency outcome with integer values, including zeros.
X1 : a pandas dataframe with the probability model variables that are all numeric values.
X2 : a pandas dataframe with the count model variables that are all numeric values.
Example:
hdlnegbin2(Y, X1, X2).fit().summary()
"""
class hdlnegbin2(gll):
def __init__(self, endog, exog, **kwds):
super(hdlnegbin2, self).__init__(endog, exog, **kwds)
def nloglikeobs(self, params):
d1 = _X1.shape[1]
beta1 = params[:d1]
beta2 = params[d1:-1]
alpha = params[-1]
ll = _ll_hdlnegbin2(self.endog, self.exog[:, :d1], self.exog[:, d1:], beta1, beta2, alpha)
return(-ll)
def fit(self, start_params = None, maxiter = 10000, maxfun = 5000, method = "ncg", **kwds):
self.exog_names.append('_ALPHA')
if start_params == None:
start_params = numpy.concatenate([p10, p20])
return(super(hdlnegbin2, self).fit(start_params = start_params, method = method,
maxiter = maxiter, maxfun = maxfun, **kwds))
_Y = Y.copy()
_X1 = X1.copy()
_X2 = X2.copy()
_X1.insert(loc = 0, column = "_CONST", value = 1)
_X1.columns = ["P0:" + _ for _ in _X1.columns]
_X2.insert(loc = 0, column = "_CONST", value = 1)
_X2.columns = ["MU:" + _ for _ in _X2.columns]
_X = _X1.join(_X2)
p10 = logit(numpy.where(_Y == 0, 1, 0), _X1).fit(disp = 0).params
p20 = ztrnegbin2(Y[Y > 0], X2[Y > 0]).fit(disp = 0).params
return(hdlnegbin2(_Y, _X))
#################### 08. Zero-Inflated Negative Binomial Regression ####################
def _ll_zifnegbin2(y, x1, x2, beta1, beta2, alpha):
"""
The function calculates the log likelihood function of the zero-inflated
negative binomial regression.
Parameters:
y : the frequency outcome
x1 : variables for the probability model in the zero-inflated negative binomial regression
x2 : variables for the count model in the zero-inflated negative binomial regression
beta1 : coefficients for the probability model in the zero-inflated negative binomial regression
beta2 : coefficients for the count model in the zero-inflated negative binomial regression
alpha : the dispersion parameter in the negative binomial distribution
"""
xb1 = numpy.dot(x1, beta1)
xb2 = numpy.dot(x2, beta2)
p0 = numpy.exp(xb1) / (1 + numpy.exp(xb1))
mu = numpy.exp(xb2)
i0 = numpy.where(y == 0, 1, 0)
a1 = 1 / alpha
pr = (p0 + (1 - p0) * numpy.float_power(a1 / (a1 + mu), a1)) * i0 + \
(1 - p0) * scipy.special.gamma(y + a1) / (scipy.special.gamma(y + 1) * scipy.special.gamma(a1)) * \
numpy.float_power(a1 / (a1 + mu), a1) * numpy.float_power(mu / (a1 + mu), y) * (1 - i0)
ll = numpy.log(pr)
return(ll)
################################################################################
def zifnegbin2(Y, X1, X2):
"""
The function estimates a zero-inflated negative binomial regression, which is
the composite between point mess at zero and a negative binomial distribution.
In the model outcome, estimated coefficients starting with "P0:" are used to
predict the probability of zero outcomes and estimated coefficients starting
with "MU:" are used to predict frequency outcomes for a standard negative
binomial.
Parameters:
Y : a pandas series for the frequency outcome with integer values, including zeros.
X1 : a pandas dataframe with the probability model variables that are all numeric values.
X2 : a pandas dataframe with the count model variables that are all numeric values.
Example:
zifnegbin2(Y, X1, X2).fit().summary()
"""
class zifnegbin2(gll):
def __init__(self, endog, exog, **kwds):
super(zifnegbin2, self).__init__(endog, exog, **kwds)
def nloglikeobs(self, params):
d1 = _X1.shape[1]
beta1 = params[:d1]
beta2 = params[d1:-1]
alpha = params[-1]
ll = _ll_zifnegbin2(self.endog, self.exog[:, :d1], self.exog[:, d1:], beta1, beta2, alpha)
return(-ll)
def fit(self, start_params = None, maxiter = 10000, maxfun = 5000, method = "ncg", **kwds):
self.exog_names.append('_ALPHA')
if start_params == None:
start_params = numpy.concatenate([p10, p20])
return(super(zifnegbin2, self).fit(start_params = start_params, method = method,
maxiter = maxiter, maxfun = maxfun, **kwds))
_Y = Y.copy()
_X1 = X1.copy()
_X2 = X2.copy()
_X1.insert(loc = 0, column = "_CONST", value = 1)
_X1.columns = ["P0:" + _ for _ in _X1.columns]
_X2.insert(loc = 0, column = "_CONST", value = 1)
_X2.columns = ["MU:" + _ for _ in _X2.columns]
_X = _X1.join(_X2)
p10 = logit(numpy.where(_Y == 0, 1, 0), _X1).fit(disp = 0).params
p20 = ztrnegbin2(Y[Y > 0], X2[Y > 0]).fit(disp = 0).params
return(zifnegbin2(_Y, _X))
#################### 09. Zero-truncated Poisson Regression ####################
def _ll_ztrpoisson(y, x, beta):
"""
The function calculates the log likelihood function of the zero-truncated
Poisson regression.
Parameters:
y : the frequency outcome without zero
x : variables of the negative binomial regression
beta : coefficients of the negative binomial regression
"""
mu = numpy.exp(numpy.dot(x, beta))
p0 = numpy.exp(-mu)
pr = numpy.exp(-mu) * numpy.float_power(mu, y) / scipy.special.factorial(y) / (1 - p0)
ll = numpy.log(pr)
return(ll)
################################################################################
def ztrpoisson(Y, X):
"""
The function estimates a zero-truncated Poisson regression.
Parameters:
Y : a pandas series for the frequency outcome wit non-zero integer values.
X : a pandas dataframe with model variables that are all numeric values.
"""
class ztrpoisson(gll):
def __init__(self, endog, exog, **kwds):
super(ztrpoisson, self).__init__(endog, exog, **kwds)
def nloglikeobs(self, params):
beta = params
ll = _ll_ztrpoisson(self.endog, self.exog, beta)
return(-ll)
def fit(self, start_params = None, maxiter = 10000, maxfun = 5000, method = "ncg", **kwds):
if start_params == None:
start_params = numpy.zeros(self.exog.shape[1])
return(super(ztrpoisson, self).fit(start_params = start_params, method = method,
maxiter = maxiter, maxfun = maxfun, **kwds))
_Y = Y.copy()
_X = X.copy()
_X.insert(loc = 0, column = "_CONST", value = 1)
return(ztrpoisson(_Y, _X))
#################### 10. Zero-truncated Negative Binomial Regression ####################
def _ll_ztrnegbin2(y, x, beta, alpha):
"""
The function calculates the log likelihood function of the zero-truncated
negative binomial (NB-2) regression.
Parameters:
y : the frequency outcome with non-zero integer values.
x : variables of the negative binomial regression
beta : coefficients of the negative binomial regression
alpha : the dispersion parameter of the zero-truncated negative binomial regression
"""
mu = numpy.exp(numpy.dot(x, beta))
a1 = 1 / alpha
p0 = numpy.float_power(a1 / (a1 + mu), a1)
pr = scipy.special.gamma(y + a1) / (scipy.special.gamma(y + 1) * scipy.special.gamma(a1)) * \
numpy.float_power(a1 / (a1 + mu), a1) * numpy.float_power(mu / (a1 + mu), y) / (1 - p0)
ll = numpy.log(pr)
return(ll)
################################################################################
def ztrnegbin2(Y, X):
"""
The function estimates a zero-truncated negative binomial (NB-2) regression.
Parameters:
Y : a pandas series for the frequency outcome with non-zero integer values.
X : a pandas dataframe with model variables that are all numeric values.
"""
class ztrnegbin2(gll):
def __init__(self, endog, exog, **kwds):
super(ztrnegbin2, self).__init__(endog, exog, **kwds)
def nloglikeobs(self, params):
alpha = params[-1]
beta = params[:-1]
ll = _ll_ztrnegbin2(self.endog, self.exog, beta, alpha)
return(-ll)
def fit(self, start_params = None, maxiter = 10000, maxfun = 5000, method = "ncg", **kwds):
self.exog_names.append('_ALPHA')
if start_params == None:
start_params = numpy.append(p0, 1)
return(super(ztrnegbin2, self).fit(start_params = start_params, method = method,
maxiter = maxiter, maxfun = maxfun, **kwds))
_Y = Y.copy()
_X = X.copy()
_X.insert(loc = 0, column = "_CONST", value = 1)
p0 = ztrpoisson(Y, X).fit(disp = 0).params
a0 = 1
return(ztrnegbin2(_Y, _X))
#################### 11. Zero-truncated Generalized Poisson Regression ####################
def _ll_ztgpoisson(y, x, beta, s):
"""
The function calculates the log likelihood function of the zero-truncated
generalized poisson regression.
Parameters:
y : the frequency outcome with non-zero integer values.
x : variables of the negative binomial regression
beta : coefficients of the negative binomial regression
s : the scaled parameter of the zero-truncated generalized poisson regression
"""
mu = numpy.exp(numpy.dot(x, beta))
xi = numpy.exp(s)
_a = mu * (1 - xi)
p0 = numpy.exp(-_a)
pr = _a / scipy.special.factorial(y) * numpy.exp(-_a - xi * y) * \
numpy.float_power(_a + xi * y, y - 1) / (1 - p0)
ll = numpy.log(pr)
return(ll)
################################################################################
def ztgpoisson(Y, X):
"""
The function estimates a zero-truncated Generalized Poisson regression. The
scaled parameter S = Log(Xi). In the Generalized Poisson distribution,
VAR(Y) = E(Y) / [(1 - Xi) ^ 2]
such that [(1 - Xi) ^ 2] > 1 means the under-dispersion and [(1 - Xi) ^ 2] < 1
means the over-dispersion.
Parameters:
Y : a pandas series for the frequency outcome wit non-zero integer values.
X : a pandas dataframe with model variables that are all numeric values.
"""
class ztgpoisson(gll):
def __init__(self, endog, exog, **kwds):
super(ztgpoisson, self).__init__(endog, exog, **kwds)
def nloglikeobs(self, params):
_s = params[-1]
beta = params[:-1]
ll = _ll_ztgpoisson(self.endog, self.exog, beta, _s)
return(-ll)
def fit(self, start_params = None, maxiter = 10000, maxfun = 5000, method = "ncg", **kwds):
self.exog_names.append('_S')
if start_params == None:
start_params = numpy.append(p0, s0)
return(super(ztgpoisson, self).fit(start_params = start_params, method = method,
maxiter = maxiter, maxfun = maxfun, **kwds))
_Y = Y.copy()
_X = X.copy()
_X.insert(loc = 0, column = "_CONST", value = 1)
p0 = ztrpoisson(Y, X).fit(disp = 0).params
s0 = numpy.log(max(1e-4, 1 - numpy.float_power(numpy.mean(Y) / numpy.var(Y), 0.5)))
return(ztgpoisson(_Y, _X))
#################### 12. Zero-truncated Conway-Maxwell Poisson Regression ####################
def _ll_ztcpoisson(y, x, beta, s):
"""
The function calculates the log likelihood function of the zero-truncated
conway-maxwell poisson regression.
Parameters:
y : the frequency outcome with non-zero integer values.
x : variables of the negative binomial regression
beta : coefficients of the negative binomial regression
s : the scaled parameter of the zero-truncated conway-maxwell poisson regression
"""
mu = numpy.exp(numpy.dot(x, beta))
nv = numpy.exp(s)
_z = 0
for _n in range(100):
_z = _z + numpy.float_power(mu, _n) / numpy.float_power(scipy.special.factorial(_n), nv)
pr = numpy.float_power(mu, y) / numpy.float_power(scipy.special.factorial(y), nv) * numpy.float_power(_z, -1) / \
(1 - numpy.float_power(_z, -1))
ll = numpy.log(pr)
return(ll)
################################################################################
def ztcpoisson(Y, X):
"""
The function estimates a zero-truncated Conway-Maxwell Poisson regression.
The scaled parameter S = Log(nv). In the Conway-Maxwell Poisson distribution,
VAR(Y) ~= E(Y) / nv
such that nv > 1 means the under-dispersion and nv < 1 means the over-dispersion.
Parameters:
Y : a pandas series for the frequency outcome wit non-zero integer values.
X : a pandas dataframe with model variables that are all numeric values.
"""
class ztcpoisson(gll):
def __init__(self, endog, exog, **kwds):
super(ztcpoisson, self).__init__(endog, exog, **kwds)
def nloglikeobs(self, params):
_s = params[-1]
beta = params[:-1]
ll = _ll_ztcpoisson(self.endog, self.exog, beta, _s)
return(-ll)
def fit(self, start_params = None, maxiter = 10000, maxfun = 5000, method = "ncg", **kwds):
self.exog_names.append('_S')
if start_params == None:
start_params = numpy.append(p0, s0)
return(super(ztcpoisson, self).fit(start_params = start_params, method = method,
maxiter = maxiter, maxfun = maxfun, **kwds))
_Y = Y.copy()
_X = X.copy()
_X.insert(loc = 0, column = "_CONST", value = 1)
p0 = ztrpoisson(Y, X).fit(disp = 0).params
s0 = numpy.log(numpy.mean(Y) / numpy.var(Y))
return(ztcpoisson(_Y, _X))
#################### 13. Hurdle Generalized Poisson Regression ####################
def _ll_hdgpoisson(y, x1, x2, beta1, beta2, s):
"""
The function calculates the log likelihood function of the hurdle generalized
poisson regression.
Parameters:
y : the frequency outcome
x1 : variables for the probability model in the hurdle generalized poisson regression
x2 : variables for the count model in the hurdle generalized poisson regression
beta1 : coefficients for the probability model in the hurdle generalized poisson regression
beta2 : coefficients for the count model in the hurdle generalized poisson regression
s : the scale parameter for the generalized poisson distribution
"""
xb1 = numpy.dot(x1, beta1)
xb2 = numpy.dot(x2, beta2)
p0 = numpy.exp(xb1) / (1 + numpy.exp(xb1))
mu = numpy.exp(xb2)
xi = numpy.exp(s)
_a = mu * (1 - xi)
i0 = numpy.where(y == 0, 1, 0)
pr = p0 * i0 + \
(1 - p0) * _a / scipy.special.factorial(y) * numpy.exp(-_a - xi * y) * \
numpy.float_power(_a + xi * y, y - 1) / (1 - numpy.exp(-_a)) * (1 - i0)
ll = numpy.log(pr)
return(ll)
################################################################################
def hdgpoisson(Y, X1, X2):
"""
The function estimates a hurdle generalized poisson regression, which is the
composite between point mess at zero and a zero-trucated generalized poisson
distribution.
In the model outcome, estimated coefficients starting with "P0:" are used
to predict the probability of zero outcomes and estimated coefficients
starting with "MU:" are used to predict frequency outcomes for a zero-trucated
generalized poisson.
Parameters:
Y : a pandas series for the frequency outcome with integer values, including zeros.
X1 : a pandas dataframe with the probability model variables that are all numeric values.
X2 : a pandas dataframe with the count model variables that are all numeric values.
Example:
hdgpoisson(Y, X1, X2).fit().summary()
"""
class hdgpoisson(gll):
def __init__(self, endog, exog, **kwds):
super(hdgpoisson, self).__init__(endog, exog, **kwds)
def nloglikeobs(self, params):
_s = params[-1]
d1 = _X1.shape[1]
beta1 = params[:d1]
beta2 = params[d1:-1]
ll = _ll_hdgpoisson(self.endog, self.exog[:, :d1], self.exog[:, d1:], beta1, beta2, _s)
return(-ll)
def fit(self, start_params = None, maxiter = 10000, maxfun = 5000, method = "ncg", **kwds):
self.exog_names.append('_S')
if start_params == None:
start_params = numpy.concatenate([p10, p20])
return(super(hdgpoisson, self).fit(start_params = start_params, method = method,
maxiter = maxiter, maxfun = maxfun, **kwds))
_Y = Y.copy()
_X1 = X1.copy()
_X2 = X2.copy()
_X1.insert(loc = 0, column = "_CONST", value = 1)
_X1.columns = ["P0:" + _ for _ in _X1.columns]
_X2.insert(loc = 0, column = "_CONST", value = 1)
_X2.columns = ["MU:" + _ for _ in _X2.columns]
_X = _X1.join(_X2)
p10 = logit(numpy.where(_Y == 0, 1, 0), _X1).fit(disp = 0).params
p20 = ztgpoisson(Y[Y > 0], X2[Y > 0]).fit(disp = 0).params
return(hdgpoisson(_Y, _X))
#################### 14. Zero-Inflated Generalized Poisson Regression ####################
def _ll_zigpoisson(y, x1, x2, beta1, beta2, s):
"""
The function calculates the log likelihood function of the zero-inflated generalized
poisson regression.
Parameters:
y : the frequency outcome
x1 : variables for the probability model in the zero-inflated generalized poisson regression
x2 : variables for the count model in the zero-inflated generalized poisson regression
beta1 : coefficients for the probability model in the zero-inflated generalized poisson regression
beta2 : coefficients for the count model in the zero-inflated generalized poisson regression
s : the scale parameter for the generalized poisson distribution
"""
xb1 = numpy.dot(x1, beta1)
xb2 = numpy.dot(x2, beta2)
p0 = numpy.exp(xb1) / (1 + numpy.exp(xb1))
mu = numpy.exp(xb2)
xi = numpy.exp(s)
_a = mu * (1 - xi)
i0 = numpy.where(y == 0, 1, 0)
pr = (p0 + (1 - p0) * numpy.exp(-_a)) * i0 + \
(1 - p0) * _a / scipy.special.factorial(y) * numpy.exp(-_a - xi * y) * \
numpy.float_power(_a + xi * y, y - 1) * (1 - i0)
ll = numpy.log(pr)
return(ll)
################################################################################
def zigpoisson(Y, X1, X2):
"""
The function estimates a zero-inflated generalized poisson regression, which is
the composite between point mess at zero and a zero-trucated generalized poisson
distribution.
In the model outcome, estimated coefficients starting with "P0:" are used
to predict the probability of zero outcomes and estimated coefficients
starting with "MU:" are used to predict frequency outcomes for a zero-trucated
generalized poisson.
Parameters:
Y : a pandas series for the frequency outcome with integer values, including zeros.
X1 : a pandas dataframe with the probability model variables that are all numeric values.
X2 : a pandas dataframe with the count model variables that are all numeric values.
Example:
zigpoisson(Y, X1, X2).fit().summary()
"""
class zigpoisson(gll):
def __init__(self, endog, exog, **kwds):
super(zigpoisson, self).__init__(endog, exog, **kwds)
def nloglikeobs(self, params):
_s = params[-1]
d1 = _X1.shape[1]
beta1 = params[:d1]
beta2 = params[d1:-1]
ll = _ll_zigpoisson(self.endog, self.exog[:, :d1], self.exog[:, d1:], beta1, beta2, _s)
return(-ll)
def fit(self, start_params = None, maxiter = 10000, maxfun = 5000, method = "ncg", **kwds):
self.exog_names.append('_S')
if start_params == None:
start_params = numpy.concatenate([p10, p20])
return(super(zigpoisson, self).fit(start_params = start_params, method = method,
maxiter = maxiter, maxfun = maxfun, **kwds))
_Y = Y.copy()
_X1 = X1.copy()
_X2 = X2.copy()
_X1.insert(loc = 0, column = "_CONST", value = 1)
_X1.columns = ["P0:" + _ for _ in _X1.columns]
_X2.insert(loc = 0, column = "_CONST", value = 1)
_X2.columns = ["MU:" + _ for _ in _X2.columns]
_X = _X1.join(_X2)
p10 = logit(numpy.where(_Y == 0, 1, 0), _X1).fit(disp = 0).params
p20 = ztgpoisson(Y[Y > 0], X2[Y > 0]).fit(disp = 0).params
return(zigpoisson(_Y, _X))
| 38.143312
| 115
| 0.621998
| 4,809
| 35,931
| 4.53795
| 0.04242
| 0.035284
| 0.021995
| 0.018329
| 0.913165
| 0.889612
| 0.861201
| 0.853916
| 0.81327
| 0.795949
| 0
| 0.030797
| 0.217389
| 35,931
| 941
| 116
| 38.183847
| 0.74527
| 0.386101
| 0
| 0.724512
| 0
| 0
| 0.011755
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.151844
| false
| 0
| 0.006508
| 0
| 0.18872
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
965beed92f9f8b39b9b18ba204c09ea7f66dd728
| 13,174
|
py
|
Python
|
pyActionRecog/action_caffe.py
|
Ewenwan/DTPP
|
0a10dd8c61596d5326fbbe70dcac0eae59088c27
|
[
"BSD-2-Clause"
] | 1
|
2019-05-07T01:00:18.000Z
|
2019-05-07T01:00:18.000Z
|
pyActionRecog/action_caffe.py
|
Ewenwan/DTPP
|
0a10dd8c61596d5326fbbe70dcac0eae59088c27
|
[
"BSD-2-Clause"
] | null | null | null |
pyActionRecog/action_caffe.py
|
Ewenwan/DTPP
|
0a10dd8c61596d5326fbbe70dcac0eae59088c27
|
[
"BSD-2-Clause"
] | 1
|
2019-09-18T05:27:50.000Z
|
2019-09-18T05:27:50.000Z
|
import sys
proto_root = "/home2/lin_li/anaconda2/pkgs/libprotobuf-3.2.0-0/lib/"
sys.path.insert(0, proto_root)
import caffe
from caffe.io import oversample
import numpy as np
from utils.io import flow_stack_oversample, fast_list2arr, rgb_stack_oversample, oversample_for_rgb_stack, flow_stack_oversample_new, oversample_for_flow_stack_test
import cv2
import matplotlib.pyplot as plt
class CaffeNet(object):
def __init__(self, net_proto, net_weights, device_id, input_size=None):
caffe.set_mode_gpu()
caffe.set_device(device_id)
self._net = caffe.Net(net_proto, net_weights, caffe.TEST)
input_shape = self._net.blobs['data'].data.shape
if input_size is not None:
input_shape = input_shape[:2] + input_size
transformer = caffe.io.Transformer({'data': input_shape})
#if self._net.blobs['data'].data.shape[1] == 3:
#printf
# transformer.set_transpose('data', (2, 0, 1)) # move image channels to outermost dimension
# transformer.set_mean('data', np.array([104, 117, 123])) # subtract the dataset-mean value in each channel
#else:
# pass # non RGB data need not use transformer
self._transformer = transformer
self._sample_shape = self._net.blobs['data'].data.shape
def predict_single_frame(self, frame, score_name, over_sample=True, multiscale=None, frame_size=None):
if frame_size is not None:
frame1 = fast_list2arr([x for x in frame])
frame = [cv2.resize(x, frame_size) for x in frame]
#print frame1.shape
if over_sample:
if multiscale is None:
os_frame = oversample(frame, (self._sample_shape[2], self._sample_shape[3]))
else:
os_frame = []
for scale in multiscale:
resized_frame = [cv2.resize(x, (0,0), fx=1.0/scale, fy=1.0/scale) for x in frame]
os_frame.extend(oversample(resized_frame, (self._sample_shape[2], self._sample_shape[3])))
else:
os_frame = fast_list2arr(frame)
#print os_frame.shape
#data = fast_list2arr([self._transformer.preprocess('data', x) for x in os_frame])
def preprocess_1(r):
r = r.transpose(2,0,1)
r[0,:,:] = r[0,:,:] - 104
r[1,:,:] = r[1,:,:] - 117
r[2,:,:] = r[2,:,:] - 123
return r
data = fast_list2arr([preprocess_1(x) for x in os_frame])
#print data.shape
self._net.blobs['data'].reshape(*data.shape)
self._net.reshape()
out = self._net.forward(blobs=[score_name,], data=data)
return out[score_name].copy()
def predict_single_rgb_stack(self, frame, score_name, over_sample=True, multiscale=None, frame_size=None, stack_len=25):
if frame_size is not None:
frame = [cv2.resize(x, frame_size) for x in frame]
if over_sample:
if multiscale is None:
os_frame = oversample_for_rgb_stack(frame, (self._sample_shape[2], self._sample_shape[3]),stack_len)
else:
os_frame = []
for scale in multiscale:
resized_frame = [cv2.resize(x, (0,0), fx=1.0/scale, fy=1.0/scale) for x in frame]
os_frame.extend(oversample(resized_frame, (self._sample_shape[2], self._sample_shape[3])))
else:
os_frame = fast_list2arr(frame)
def preprocess_1(r):
r = r.transpose(2,0,1)
r[0,:,:] = r[0,:,:] - 104
r[1,:,:] = r[1,:,:] - 117
r[2,:,:] = r[2,:,:] - 123
return r
data = fast_list2arr([preprocess_1(x) for x in os_frame])
self._net.blobs['data'].reshape(*data.shape)
self._net.reshape()
out = self._net.forward(blobs=[score_name,], data=data)
return out[score_name].copy()
def predict_single_rgb_stack_memory(self, frame, score_name, over_sample=True, multiscale=None, frame_size=None, stack_len=25):
if frame_size is not None:
frame = [cv2.resize(x, frame_size) for x in frame]
if over_sample:
if multiscale is None:
os_frame = oversample_for_rgb_stack(frame, (self._sample_shape[2], self._sample_shape[3]),stack_len)
else:
os_frame = []
for scale in multiscale:
resized_frame = [cv2.resize(x, (0,0), fx=1.0/scale, fy=1.0/scale) for x in frame]
os_frame.extend(oversample(resized_frame, (self._sample_shape[2], self._sample_shape[3])))
else:
os_frame = fast_list2arr(frame)
def preprocess_1(r):
r = r.transpose(2,0,1)
r[0,:,:] = r[0,:,:] - 104
r[1,:,:] = r[1,:,:] - 117
r[2,:,:] = r[2,:,:] - 123
return r
data = fast_list2arr([preprocess_1(x) for x in os_frame])
# self._net.blobs['data'].reshape(*data.shape)
# self._net.reshape()
#
# out = self._net.forward(blobs=[score_name,], data=data)
# return out[score_name].copy()
data_new = data.reshape(-1,3*stack_len,224,224)
scores_new = []
for i in range(10):
data_ele = data_new[i]
self._net.blobs['data'].reshape(*data_ele.shape)
self._net.reshape()
out = self._net.forward(blobs=[score_name,], data=data_ele)
scores_new.append(out[score_name].copy())
scores_new = np.array(scores_new).reshape(10,-1)
return scores_new
def predict_single_flow_stack_test(self, frame, score_name, over_sample=True, multiscale=None, frame_size=None, stack_len=25):
if over_sample:
if multiscale is None:
os_frame = oversample_for_flow_stack_test(frame, (self._sample_shape[2], self._sample_shape[3]),stack_len)
else:
os_frame = []
for scale in multiscale:
resized_frame = [cv2.resize(x, (0,0), fx=1.0/scale, fy=1.0/scale) for x in frame]
os_frame.extend(oversample(resized_frame, (self._sample_shape[2], self._sample_shape[3])))
else:
os_frame = fast_list2arr(frame)
os_frame = np.array(os_frame).transpose(0,3,1,2)
data = os_frame - np.float32(128.0)
self._net.blobs['data'].reshape(*data.shape)
self._net.reshape()
out = self._net.forward(blobs=[score_name,], data=data)
return out[score_name].copy()
def predict_single_flow_stack_test_memory(self, frame, score_name, over_sample=True, multiscale=None, frame_size=None, stack_len=25):
if over_sample:
if multiscale is None:
os_frame = oversample_for_flow_stack_test(frame, (self._sample_shape[2], self._sample_shape[3]),stack_len)
else:
os_frame = []
for scale in multiscale:
resized_frame = [cv2.resize(x, (0,0), fx=1.0/scale, fy=1.0/scale) for x in frame]
os_frame.extend(oversample(resized_frame, (self._sample_shape[2], self._sample_shape[3])))
else:
os_frame = fast_list2arr(frame)
os_frame = np.array(os_frame).transpose(0,3,1,2)
data = os_frame - np.float32(128.0)
# self._net.blobs['data'].reshape(*data.shape)
# self._net.reshape()
# out = self._net.forward(blobs=[score_name,], data=data)
# return out[score_name].copy()
data_new = data.reshape(-1,10*stack_len,224,224)
scores_new = []
for i in range(10):
data_ele = data_new[i]
self._net.blobs['data'].reshape(*data_ele.shape)
self._net.reshape()
out = self._net.forward(blobs=[score_name,], data=data_ele)
scores_new.append(out[score_name].copy())
scores_new = np.array(scores_new).reshape(10, -1)
return scores_new
def predict_single_flow_stack(self, frame, score_name, over_sample=True, frame_size=None):
if frame_size is not None:
frame = fast_list2arr([cv2.resize(x, frame_size) for x in frame])
else:
frame = fast_list2arr(frame)
if over_sample:
os_frame = flow_stack_oversample(frame, (self._sample_shape[2], self._sample_shape[3]))
else:
os_frame = fast_list2arr([frame])
data = os_frame - np.float32(128.0)
self._net.blobs['data'].reshape(*data.shape)
self._net.reshape()
out = self._net.forward(blobs=[score_name,], data=data)
return out[score_name].copy()
def predict_single_flow_stack_feature_map(self, frame, score_name, over_sample=False, frame_size=None, blobname = 'conv1/7x7_s2', dim = 30):
if frame_size is not None:
frame = fast_list2arr([cv2.resize(x, frame_size) for x in frame])
else:
frame = fast_list2arr(frame)
print "frame", frame.shape
if over_sample:
os_frame = flow_stack_oversample(frame, (self._sample_shape[2], self._sample_shape[3]))
else:
os_frame = fast_list2arr([frame])
print "os_frame", os_frame.shape
# (10, 256, 340)
# (10, 10, 224, 224)
data = os_frame - np.float32(128.0)
print data.shape
#self._net.blobs['data'].reshape(*data.shape)
print self._net.blobs['data'].data[0].shape
self._net.blobs['data'].data[...] = data
#self._net.reshape()
out = self._net.forward()#data=data
feat = self._net.blobs[blobname].data[0,:dim]
return feat.copy()
def predict_single_flow_rgb_stack(self, flow_frame, rgb_frame, score_name, over_sample=True, frame_size=None, multiscale=None, score_name_1=None):
flow_1 = fast_list2arr([cv2.resize(x, frame_size) for x in flow_frame])
flow_2 = flow_stack_oversample(flow_1, (self._sample_shape[2], self._sample_shape[3]))
flow_data = flow_2 - np.float32(128.0)
rgb_1 = [cv2.resize(x, frame_size) for x in rgb_frame]
rgb_2 = oversample(rgb_1, (self._sample_shape[2], self._sample_shape[3]))
#rgb_data1 = fast_list2arr(os_frame_rgb)
# print rgb_data1.shape
def preprocess_1(r):
r = r.transpose(2,0,1)
r[0,:,:] = r[0,:,:] - 104
r[1,:,:] = r[1,:,:] - 117
r[2,:,:] = r[2,:,:] - 123
return r
rgb_data = fast_list2arr([preprocess_1(x) for x in rgb_2])
#print flow_data.shape
#print rgb_data.shape
#flow_data = np.reshape(flow_data, (10,-1,224,224))
rgb_data = np.reshape(rgb_data, (10,-1,224,224))
#print flow_data.shape
#print rgb_data.shape
#data = np.array([], dtype = rgb_data[0].dtype)
data = np.concatenate((flow_data, rgb_data), axis=1)
#print data.shape
self._net.blobs['data'].reshape(*data.shape)
self._net.reshape()
out = self._net.forward(blobs=[score_name,], data=data)
if score_name_1 is not None:
out_1 = self._net.forward(blobs=[score_name_1,], data=data)
return out[score_name].copy(), out_1[score_name_1].copy()
return out[score_name].copy()
def predict_single_flow_rgb_stack_3(self, flow_frame, rgb_frame, score_name, over_sample=True, frame_size=None,
multiscale=None, score_name_1=None, score_name_2=None):
flow_1 = fast_list2arr([cv2.resize(x, frame_size) for x in flow_frame])
flow_2 = flow_stack_oversample(flow_1, (self._sample_shape[2], self._sample_shape[3]))
flow_data = flow_2 - np.float32(128.0)
rgb_1 = [cv2.resize(x, frame_size) for x in rgb_frame]
rgb_2 = oversample(rgb_1, (self._sample_shape[2], self._sample_shape[3]))
# rgb_data1 = fast_list2arr(os_frame_rgb)
# print rgb_data1.shape
def preprocess_1(r):
r = r.transpose(2, 0, 1)
r[0, :, :] = r[0, :, :] - 104
r[1, :, :] = r[1, :, :] - 117
r[2, :, :] = r[2, :, :] - 123
return r
rgb_data = fast_list2arr([preprocess_1(x) for x in rgb_2])
# print flow_data.shape
# print rgb_data.shape
# flow_data = np.reshape(flow_data, (10,-1,224,224))
rgb_data = np.reshape(rgb_data, (10, -1, 224, 224))
# print flow_data.shape
# print rgb_data.shape
# data = np.array([], dtype = rgb_data[0].dtype)
data = np.concatenate((flow_data, rgb_data), axis=1)
# print data.shape
self._net.blobs['data'].reshape(*data.shape)
self._net.reshape()
out = self._net.forward(blobs=[score_name, ], data=data)
if score_name_1 is not None and score_name_2 is not None:
out_1 = self._net.forward(blobs=[score_name_1, ], data=data)
out_2 = self._net.forward(blobs=[score_name_2, ], data=data)
# print "here"
return out[score_name].copy(), out_1[score_name_1].copy(), out_2[score_name_2].copy()
return out[score_name].copy()
| 38.520468
| 164
| 0.595491
| 1,859
| 13,174
| 3.962883
| 0.075847
| 0.041808
| 0.067192
| 0.03475
| 0.851771
| 0.844849
| 0.824623
| 0.803584
| 0.803041
| 0.789059
| 0
| 0.042593
| 0.272886
| 13,174
| 342
| 165
| 38.520468
| 0.726485
| 0.1046
| 0
| 0.78341
| 0
| 0.004608
| 0.011063
| 0.00451
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.032258
| null | null | 0.018433
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
966f5ad7fe638cc4eb0694c610665c4b553994c5
| 44,090
|
py
|
Python
|
tests/unit/pypyr/pipeline_test.py
|
pypyr/pypyr-cli
|
dc0f694ac0c0e3c2844c1a20788c9af586a8a16e
|
[
"Apache-2.0"
] | 31
|
2017-03-24T11:27:34.000Z
|
2020-05-27T20:06:28.000Z
|
tests/unit/pypyr/pipeline_test.py
|
pypyr/pypyr-cli
|
dc0f694ac0c0e3c2844c1a20788c9af586a8a16e
|
[
"Apache-2.0"
] | 89
|
2017-04-12T09:50:32.000Z
|
2020-08-13T13:18:36.000Z
|
tests/unit/pypyr/pipeline_test.py
|
pypyr/pypyr-cli
|
dc0f694ac0c0e3c2844c1a20788c9af586a8a16e
|
[
"Apache-2.0"
] | 6
|
2017-06-04T14:19:59.000Z
|
2020-02-10T13:16:40.000Z
|
"""pipeline.py unit tests.
A lot of the tests for the Pipeline.new_pipe_and_args factory constructor
exist in ./pipelinerunner_test.py, which tests at a higher level that the
inputs from a run request map as expected into the Pipeline instance.
"""
import logging
from unittest.mock import call, patch, Mock
import pytest
from pypyr.cache.loadercache import loader_cache
from pypyr.cache.parsercache import contextparser_cache
from pypyr.context import Context
from pypyr.errors import (ContextError,
KeyNotInContextError,
PyModuleNotFoundError,
Stop,
StopPipeline,
StopStepGroup)
from pypyr.pipeline import Pipeline
from pypyr.pipedef import PipelineDefinition, PipelineInfo
from tests.common.utils import DeepCopyMagicMock
from tests.common.utils import patch_logger
def get_pipe_def(dict_in, info=None):
"""Wrap input dict & info into a PipelineDefinition."""
return PipelineDefinition(pipeline=dict_in, info=info)
# region context parser
# region parser mocks
def mock_parser_arb(args):
"""Arbitrary mock function to execute instead of get_parsed_context."""
return Context({'key1': 'created in mock parser', 'key2': args})
def mock_parser_none(args):
"""Return None, mocking get_parsed_context."""
return None
# endregion parser mocks
@patch('pypyr.cache.loadercache.Loader.get_pipeline')
def test_get_parsed_context_no_parser(mock_get_pipeline):
"""On get_parsed_context return empty Context when no parser specified."""
mock_get_pipeline.return_value = get_pipe_def({})
context = Context()
pipeline = Pipeline('arb')
pipeline.run(context)
assert context == {}
mock_get_pipeline.assert_called_once_with(name='arb', parent=None)
@patch('pypyr.cache.loadercache.Loader.get_pipeline')
def test_get_parsed_context_parser_not_found(mock_get_pipeline):
"""On get_parsed_context raise if parser module specified but not found."""
mock_get_pipeline.return_value = get_pipe_def({
'context_parser': 'unlikelyblahmodulenameherexxssz'})
context = Context()
pipeline = Pipeline('arb')
with pytest.raises(PyModuleNotFoundError):
pipeline.run(context)
assert context == {}
mock_get_pipeline.assert_called_once_with(name='arb', parent=None)
@patch('pypyr.moduleloader.get_module')
@patch('pypyr.cache.loadercache.Loader.get_pipeline')
def test_get_parsed_context_parser_returns_none(mock_get_pipeline,
mock_moduleloader):
"""On get_parsed_context return empty Context when parser returns None."""
mock_moduleloader.return_value.get_parsed_context = mock_parser_none
mock_get_pipeline.return_value = get_pipe_def(
{'context_parser': 'specifiedparserhere'})
pipeline = Pipeline('arb', context_args=['in arg here'])
context = Context()
pipeline.run(context)
mock_moduleloader.assert_called_once_with('specifiedparserhere')
mock_get_pipeline.assert_called_once_with(name='arb', parent=None)
assert context == {}
@patch('pypyr.moduleloader.get_module')
@patch('pypyr.cache.loadercache.Loader.get_pipeline')
def test_get_parsed_context_parser_pass(mock_get_pipeline, mock_moduleloader):
"""On get_parsed_context pass arg param and returns context."""
contextparser_cache.clear()
mock_moduleloader.return_value.get_parsed_context = mock_parser_arb
mock_get_pipeline.return_value = get_pipe_def(
{'context_parser': 'specifiedparserhere'})
pipeline = Pipeline('arb', context_args='in arg here')
context = Context()
pipeline.run(context)
mock_moduleloader.assert_called_once_with('specifiedparserhere')
mock_get_pipeline.assert_called_once_with(name='arb', parent=None)
assert isinstance(context, Context)
assert len(context) == 2
assert context['key1'] == 'created in mock parser'
assert context['key2'] == 'in arg here'
@patch('pypyr.moduleloader.get_module', return_value=3)
@patch('pypyr.cache.loadercache.Loader.get_pipeline')
def test_get_parser_context_signature_wrong(mock_get_pipeline,
mock_moduleloader):
"""Raise when parser found but no get_parsed_context attr."""
contextparser_cache.clear()
mock_get_pipeline.return_value = get_pipe_def(
{'context_parser': 'specifiedparserhere'})
pipeline = Pipeline('arb', context_args='in arg here')
context = Context()
with pytest.raises(AttributeError) as err_info:
pipeline.run(context)
mock_moduleloader.assert_called_once_with('specifiedparserhere')
mock_get_pipeline.assert_called_once_with(name='arb', parent=None)
assert str(err_info.value) == ("'int' object has no attribute "
"'get_parsed_context'")
@patch('pypyr.moduleloader.get_module')
@patch('pypyr.cache.loadercache.Loader.get_pipeline')
def test_prepare_context_empty_parse(mock_get_pipeline,
mock_moduleloader):
"""Empty parsed_context works."""
contextparser_cache.clear()
parser = Mock()
parser.return_value = {}
mock_moduleloader.return_value.get_parsed_context = parser
mock_get_pipeline.return_value = get_pipe_def(
{'context_parser': 'specifiedparserhere'})
pipeline = Pipeline('arb', context_args='arb context input')
context = Context({'c1': 'cv1', 'c2': 'cv2'})
pipeline.run(context)
mock_moduleloader.assert_called_once_with('specifiedparserhere')
parser.assert_called_once_with('arb context input')
mock_get_pipeline.assert_called_once_with(name='arb', parent=None)
assert context == {'c1': 'cv1', 'c2': 'cv2'}
@patch('pypyr.moduleloader.get_module')
@patch('pypyr.cache.loadercache.Loader.get_pipeline')
def test_prepare_context_with_parse_merge(mock_get_pipeline,
mock_moduleloader):
"""On parsed_context override context."""
contextparser_cache.clear()
parser = Mock()
parser.return_value = {'a': 'av1', 'c1': 'new value from parsed'}
mock_moduleloader.return_value.get_parsed_context = parser
mock_get_pipeline.return_value = get_pipe_def(
{'context_parser': 'specifiedparserhere'})
pipeline = Pipeline('arb', context_args='arb context input')
context = Context({'c1': 'cv1', 'c2': 'cv2'})
pipeline.run(context)
mock_moduleloader.assert_called_once_with('specifiedparserhere')
parser.assert_called_once_with('arb context input')
mock_get_pipeline.assert_called_once_with(name='arb', parent=None)
assert context == {'a': 'av1', 'c1': 'new value from parsed', 'c2': 'cv2'}
# endregion context parser
# region loader
def test_arbitrary_loader_module_not_found():
"""Raise when loader not found."""
loader_cache.clear()
pipeline = Pipeline('arb pipe',
context_args='arb context input',
loader='not_found_loader')
with pytest.raises(PyModuleNotFoundError):
pipeline.run(Context())
def test_loader_no_get_pipeline_definition():
"""Arbitrary loader module without `get_pipeline_definition` function."""
loader_cache.clear()
import sys
current_module = sys.modules[__name__]
pipeline = Pipeline('arb pipe',
context_args='arb context input',
loader=__name__)
with patch_logger(
'pypyr.cache.loadercache',
logging.ERROR) as mock_logger_error:
with pytest.raises(AttributeError) as err:
pipeline.run(Context())
assert str(err.value) == f"module '{__name__}' " \
"has no attribute 'get_pipeline_definition'"
mock_logger_error.assert_called_once_with(
f"The pipeline loader {current_module} doesn't have a "
"get_pipeline_definition(pipeline_name, parent) function."
)
@patch('pypyr.loaders.file.get_pipeline_definition')
def test_empty_loader_set_up_to_default(mock_get_pipeline_definition):
"""Default loader should be pypyr.loaders.file."""
loader_cache.clear()
mock_get_pipeline_definition.return_value = get_pipe_def({'steps': None})
pipeline = Pipeline('arb pipe', context_args='arb context input')
pipeline.run(Context())
mock_get_pipeline_definition.assert_called_once_with(
pipeline_name='arb pipe',
parent=None
)
@patch('pypyr.loaders.file.get_pipeline_definition')
def test_empty_loader_set_up_to_default_with_parent(
mock_get_pipeline_definition):
"""Default loader should be pypyr.loaders.file with parent."""
loader_cache.clear()
mock_get_pipeline_definition.return_value = get_pipe_def({'steps': None})
pipeline = Pipeline('arb pipe', context_args='arb context input')
pipeline.load_and_run_pipeline(Context(), parent='/arb/dir')
mock_get_pipeline_definition.assert_called_once_with(
pipeline_name='arb pipe',
parent='/arb/dir'
)
def test_arb_loader():
"""Test loader set up."""
loader_cache.clear()
pipeline = Pipeline('arb pipe',
context_args='arb context input',
loader='arbpack.arbloader',
py_dir='tests')
pipeline.load_and_run_pipeline(Context(), parent='/arb/dir')
loader = loader_cache.get_pype_loader('arbpack.arbloader')
assert loader.name == 'arbpack.arbloader'
assert loader.get_pipeline('arb pipe', '/arb/dir').pipeline == {
'pipeline_name': 'arb pipe', 'parent': '/arb/dir'}
loader_cache.clear()
def test_arb_loader_no_parent():
"""Test loader set up with no parent."""
loader_cache.clear()
pipeline = Pipeline('arb pipe',
context_args='arb context input',
loader='arbpack.arbloader',
py_dir='tests')
pipeline.load_and_run_pipeline(Context())
loader = loader_cache.get_pype_loader('arbpack.arbloader')
assert loader.name == 'arbpack.arbloader'
assert loader.get_pipeline('arb pipe', None).pipeline == {
'pipeline_name': 'arb pipe', 'parent': None}
loader_cache.clear()
# endregion loader
# region run_pipeline
@patch('pypyr.pipeline.StepsRunner', autospec=True)
@patch('pypyr.cache.parsercache.contextparser_cache.get_context_parser')
@patch('pypyr.cache.loadercache.Loader.get_pipeline')
@patch('pypyr.moduleloader.add_sys_path')
def test_load_and_run_pipeline_pass_minimal(
mock_add_sys_path,
mock_get_pipe,
mock_parser,
mock_steps_runner):
"""Create implicit context if doesn't exist & no context parser."""
pipe_def = get_pipe_def({'arb': 'pipe'})
mock_get_pipe.return_value = pipe_def
parser = Mock()
parser.return_value = {'a': 'b'}
mock_parser.return_value = parser
pipeline = Pipeline('arb pipe', context_args='arb context input')
context_instance = Context()
with patch('pypyr.pipeline.Context') as mock_context:
mock_context.return_value = context_instance
pipeline.load_and_run_pipeline(None)
mock_add_sys_path.assert_not_called()
mock_get_pipe.assert_called_once_with(name='arb pipe',
parent=None)
mock_parser.assert_not_called()
parser.assert_not_called()
mock_context.assert_called_once()
# assure that stack empty when done
assert not context_instance._stack
mock_steps_runner.assert_called_once_with(
pipeline_body={'arb': 'pipe'},
context=context_instance)
# No called steps
sr = mock_steps_runner.return_value
sr.run_step_groups.assert_called_once_with(groups=['steps'],
success_group='on_success',
failure_group='on_failure')
sr.run_failure_step_group.assert_not_called()
@patch('pypyr.pipeline.StepsRunner', autospec=True)
@patch('pypyr.cache.parsercache.contextparser_cache.get_context_parser')
@patch('pypyr.cache.loadercache.Loader.get_pipeline')
@patch('pypyr.moduleloader.add_sys_path')
def test_load_and_run_pipeline_pass_skip_parse_context(
mock_add_sys_path,
mock_get_pipe,
mock_parser,
mock_steps_runner):
"""Explicit False parse_input doesn't run parser."""
pipe_def = get_pipe_def({'arb': 'pipe'})
mock_get_pipe.return_value = pipe_def
parser = Mock()
parser.return_value = {'a': 'b'}
mock_parser.return_value = parser
context = Context({'c': 'd'})
pipeline = Pipeline('arb pipe', parse_input=False)
pipeline.load_and_run_pipeline(context)
mock_add_sys_path.assert_not_called()
mock_get_pipe.assert_called_once_with(name='arb pipe',
parent=None)
mock_parser.assert_not_called()
parser.assert_not_called()
mock_steps_runner.assert_called_once_with(
pipeline_body={'arb': 'pipe'},
context=context)
# No called steps, just on_failure since err on parse context already
sr = mock_steps_runner.return_value
sr.run_step_groups.assert_called_once_with(groups=['steps'],
success_group='on_success',
failure_group='on_failure')
sr.run_failure_step_group.assert_not_called()
@patch('pypyr.pipeline.StepsRunner', autospec=True)
@patch('pypyr.cache.parsercache.contextparser_cache.get_context_parser')
@patch('pypyr.cache.loadercache.Loader.get_pipeline')
@patch('pypyr.moduleloader.add_sys_path')
def test_load_and_run_pipeline_parse_context_error(
mock_add_sys_path,
mock_get_pipe,
mock_parser,
mock_steps_runner):
"""run_pipeline on_failure with Context as is if parse fails."""
pipe_def = get_pipe_def({'context_parser': 'arb parser'})
mock_get_pipe.return_value = pipe_def
parser = Mock()
parser.side_effect = ContextError
mock_parser.return_value = parser
context = Context({'c': 'd'})
pipeline = Pipeline('arb pipe',
context_args='arb context input',
parse_input=True)
with pytest.raises(ContextError):
pipeline.load_and_run_pipeline(context)
assert context == {'c': 'd'}
mock_add_sys_path.assert_not_called()
mock_get_pipe.assert_called_once_with(name='arb pipe',
parent=None)
mock_parser.assert_called_once_with('arb parser')
parser.assert_called_once_with('arb context input')
mock_steps_runner.assert_called_once_with(
pipeline_body=pipe_def.pipeline,
context=context)
# No called steps, just on_failure since err on parse context already
sr = mock_steps_runner.return_value
sr.run_step_groups.assert_not_called()
sr.run_failure_step_group.assert_called_once_with('on_failure')
@patch('pypyr.pipeline.StepsRunner', autospec=True)
@patch('pypyr.cache.parsercache.contextparser_cache.get_context_parser')
@patch('pypyr.cache.loadercache.Loader.get_pipeline')
@patch('pypyr.moduleloader.add_sys_path')
def test_load_and_run_pipeline_steps_error_raises(
mock_add_sys_path,
mock_get_pipe,
mock_parser,
mock_steps_runner):
"""Run on_failure and raise error if steps group fails."""
# First time it runs is steps - give a KeyNotInContextError.
pipe_def = get_pipe_def({'context_parser': 'arb parser'})
mock_get_pipe.return_value = pipe_def
parser = Mock()
parser.return_value = {'a': 'b'}
mock_parser.return_value = parser
context = Context({'c': 'd'})
pipeline = Pipeline('arb pipe',
context_args='arb context input',
parse_input=True)
mock_steps_runner.return_value.run_step_groups.side_effect = (
KeyNotInContextError)
with pytest.raises(KeyNotInContextError):
pipeline.run(context)
assert context == {'a': 'b', 'c': 'd'}
mock_add_sys_path.assert_not_called()
mock_get_pipe.assert_called_once_with(name='arb pipe',
parent=None)
mock_parser.assert_called_once_with('arb parser')
parser.assert_called_once_with('arb context input')
mock_steps_runner.return_value.run_step_groups.assert_called_once_with(
groups=['steps'],
success_group='on_success',
failure_group='on_failure'
)
mock_steps_runner.assert_called_once_with(
pipeline_body=pipe_def.pipeline,
context=context)
@patch('pypyr.pipeline.StepsRunner', autospec=True)
@patch('pypyr.cache.parsercache.contextparser_cache.get_context_parser')
@patch('pypyr.cache.loadercache.Loader.get_pipeline')
@patch('pypyr.moduleloader.add_sys_path')
def test_load_and_run_pipeline_with_existing_context_pass(
mock_add_sys_path,
mock_get_pipe,
mock_parser,
mock_steps_runner):
"""Pipeline runs with existing context."""
pipe_def = get_pipe_def({'context_parser': 'arb parser'})
mock_get_pipe.return_value = pipe_def
parser = Mock()
parser.return_value = {'1': 'context 1', '2': 'context2'}
mock_parser.return_value = parser
context = Context({'2': 'original', '3': 'new'})
pipeline = Pipeline('arb pipe',
context_args='arb context input')
pipeline.load_and_run_pipeline(context)
assert not context.is_in_pipeline_scope
mock_add_sys_path.assert_not_called()
mock_get_pipe.assert_called_once_with(name='arb pipe',
parent=None)
mock_parser.assert_called_once_with('arb parser')
parser.assert_called_once_with('arb context input')
mock_steps_runner.return_value.run_step_groups.assert_called_once_with(
groups=['steps'],
success_group='on_success',
failure_group='on_failure'
)
mock_steps_runner.assert_called_once_with(
pipeline_body={'context_parser': 'arb parser'},
context={'1': 'context 1',
'2': 'context2',
'3': 'new'})
@patch('pypyr.pipeline.StepsRunner', autospec=True)
@patch('pypyr.cache.parsercache.contextparser_cache.get_context_parser')
@patch('pypyr.cache.loadercache.Loader.get_pipeline')
@patch('pypyr.moduleloader.add_sys_path')
def test_load_and_run_pipeline_with_dir_specified(
mock_add_sys_path,
mock_get_pipe,
mock_parser,
mock_steps_runner):
"""Py dir passed to add_sys_path."""
pipe_yaml = {'context_parser': 'arb parser'}
pipe_def = get_pipe_def(pipe_yaml)
mock_get_pipe.return_value = pipe_def
parser = Mock()
parser.return_value = {'1': 'context 1', '2': 'context2'}
mock_parser.return_value = parser
context = Context({'2': 'original', '3': 'new'})
pipeline = Pipeline('arb pipe',
context_args='arb context input',
py_dir='/arb/dir')
pipeline.load_and_run_pipeline(context)
assert not context.is_in_pipeline_scope
mock_add_sys_path.assert_called_once_with('/arb/dir')
mock_get_pipe.assert_called_once_with(name='arb pipe',
parent=None)
mock_parser.assert_called_once_with('arb parser')
parser.assert_called_once_with('arb context input')
mock_steps_runner.return_value.run_step_groups.assert_called_once_with(
groups=['steps'],
success_group='on_success',
failure_group='on_failure'
)
mock_steps_runner.assert_called_once_with(pipeline_body=pipe_yaml,
context={'1': 'context 1',
'2': 'context2',
'3': 'new'})
@patch('pypyr.pipeline.StepsRunner', autospec=True)
@patch('pypyr.cache.parsercache.contextparser_cache.get_context_parser')
@patch('pypyr.cache.loadercache.Loader.get_pipeline')
@patch('pypyr.moduleloader.add_sys_path')
def test_load_and_run_pipeline_with_group_specified(
mock_add_sys_path,
mock_get_pipe,
mock_parser,
mock_steps_runner):
"""Run pipeline with specified groups."""
pipe_yaml = {'arb': 'pipe'}
pipe_def = get_pipe_def(pipe_yaml)
mock_get_pipe.return_value = pipe_def
parser = Mock()
parser.return_value = {'1': 'context 1', '2': 'context2'}
mock_parser.return_value = parser
context = Context({'2': 'original', '3': 'new'})
pipeline = Pipeline('arb pipe',
context_args='arb context input',
groups=['arb1', 'arb2'])
pipeline.load_and_run_pipeline(context)
assert not context.is_in_pipeline_scope
mock_add_sys_path.assert_not_called()
mock_get_pipe.assert_called_once_with(name='arb pipe',
parent=None)
mock_parser.assert_not_called()
parser.assert_not_called()
mock_steps_runner.return_value.run_step_groups.assert_called_once_with(
groups=['arb1', 'arb2'],
success_group=None,
failure_group=None
)
mock_steps_runner.assert_called_once_with(pipeline_body=pipe_yaml,
context={'2': 'original',
'3': 'new'})
@patch('pypyr.pipeline.StepsRunner', autospec=True)
@patch('pypyr.cache.parsercache.contextparser_cache.get_context_parser')
@patch('pypyr.cache.loadercache.Loader.get_pipeline')
@patch('pypyr.moduleloader.add_sys_path')
def test_load_and_run_pipeline_with_parent_specified(
mock_add_sys_path,
mock_get_pipe,
mock_parser,
mock_steps_runner):
"""Run pipeline with specified parent."""
pipe_yaml = {'arb': 'pipe'}
pipe_def = get_pipe_def(pipe_yaml)
mock_get_pipe.return_value = pipe_def
parser = Mock()
parser.return_value = {'1': 'context 1', '2': 'context2'}
mock_parser.return_value = parser
context = Context({'2': 'original', '3': 'new'})
pipeline = Pipeline('arb pipe')
pipeline.load_and_run_pipeline(context, '/parent')
assert not context.is_in_pipeline_scope
mock_add_sys_path.assert_not_called()
mock_get_pipe.assert_called_once_with(name='arb pipe',
parent='/parent')
mock_parser.assert_not_called()
parser.assert_not_called()
mock_steps_runner.return_value.run_step_groups.assert_called_once_with(
groups=['steps'],
success_group='on_success',
failure_group='on_failure'
)
mock_steps_runner.assert_called_once_with(pipeline_body=pipe_yaml,
context={'2': 'original',
'3': 'new'})
@patch('pypyr.pipeline.StepsRunner', autospec=True)
@patch('pypyr.cache.parsercache.contextparser_cache.get_context_parser')
@patch('pypyr.cache.loadercache.Loader.get_pipeline')
@patch('pypyr.moduleloader.add_sys_path')
def test_load_and_run_pipeline_with_success_group_specified(
mock_add_sys_path,
mock_get_pipe,
mock_parser,
mock_steps_runner):
"""Run pipeline with specified success group."""
pipe_yaml = {'context_parser': 'arb parser'}
pipe_def = get_pipe_def(pipe_yaml)
mock_get_pipe.return_value = pipe_def
parser = Mock()
parser.return_value = {'1': 'context 1', '2': 'context2'}
mock_parser.return_value = parser
context = Context({'2': 'original', '3': 'new'})
pipeline = Pipeline('arb pipe',
context_args='arb context input',
success_group='arb1')
pipeline.load_and_run_pipeline(context)
assert not context.is_in_pipeline_scope
mock_add_sys_path.assert_not_called()
mock_get_pipe.assert_called_once_with(name='arb pipe',
parent=None)
mock_parser.assert_called_once_with('arb parser')
parser.assert_called_once_with('arb context input')
mock_steps_runner.return_value.run_step_groups.assert_called_once_with(
groups=['steps'],
success_group='arb1',
failure_group=None
)
mock_steps_runner.assert_called_once_with(pipeline_body=pipe_yaml,
context={'1': 'context 1',
'2': 'context2',
'3': 'new'})
@patch('pypyr.pipeline.StepsRunner', autospec=True)
@patch('pypyr.cache.parsercache.contextparser_cache.get_context_parser')
@patch('pypyr.cache.loadercache.Loader.get_pipeline')
@patch('pypyr.moduleloader.add_sys_path')
def test_load_and_run_pipeline_with_failure_group_specified(
mock_add_sys_path,
mock_get_pipe,
mock_parser,
mock_steps_runner):
"""Run pipeline with specified failure group."""
pipe_yaml = {'context_parser': 'arb parser'}
pipe_def = get_pipe_def(pipe_yaml)
mock_get_pipe.return_value = pipe_def
parser = Mock()
parser.return_value = {'1': 'context 1', '2': 'context2'}
mock_parser.return_value = parser
context = Context({'2': 'original', '3': 'new'})
pipeline = Pipeline('arb pipe',
context_args='arb context input',
failure_group='arb1')
pipeline.load_and_run_pipeline(context)
assert not context.is_in_pipeline_scope
mock_add_sys_path.assert_not_called()
mock_get_pipe.assert_called_once_with(name='arb pipe',
parent=None)
mock_parser.assert_called_once_with('arb parser')
parser.assert_called_once_with('arb context input')
mock_steps_runner.return_value.run_step_groups.assert_called_once_with(
groups=['steps'],
success_group=None,
failure_group='arb1'
)
mock_steps_runner.assert_called_once_with(pipeline_body=pipe_yaml,
context={'1': 'context 1',
'2': 'context2',
'3': 'new'})
@patch('pypyr.pipeline.StepsRunner', autospec=True)
@patch('pypyr.cache.parsercache.contextparser_cache.get_context_parser')
@patch('pypyr.cache.loadercache.Loader.get_pipeline')
@patch('pypyr.moduleloader.add_sys_path')
def test_load_and_run_pipeline_with_group_and_failure_group_specified(
mock_add_sys_path,
mock_get_pipe,
mock_parser,
mock_steps_runner):
"""Pass run_pipeline with specified group and failure group."""
pipe_yaml = {'context_parser': 'arb parser'}
pipe_def = get_pipe_def(pipe_yaml)
mock_get_pipe.return_value = pipe_def
parser = Mock()
parser.return_value = {'1': 'context 1', '2': 'context2'}
mock_parser.return_value = parser
context = Context({'2': 'original', '3': 'new'})
pipeline = Pipeline('arb pipe',
context_args='arb context input',
groups=['arb1'],
failure_group='arb2')
pipeline.load_and_run_pipeline(context)
assert not context.is_in_pipeline_scope
mock_add_sys_path.assert_not_called()
mock_parser.assert_called_once_with('arb parser')
parser.assert_called_once_with('arb context input')
mock_get_pipe.assert_called_once_with(name='arb pipe',
parent=None)
mock_steps_runner.return_value.run_step_groups.assert_called_once_with(
groups=['arb1'],
success_group=None,
failure_group='arb2'
)
mock_steps_runner.assert_called_once_with(pipeline_body=pipe_yaml,
context={'1': 'context 1',
'2': 'context2',
'3': 'new'})
@patch('pypyr.pipeline.StepsRunner', autospec=True)
@patch('pypyr.cache.parsercache.contextparser_cache.get_context_parser')
@patch('pypyr.cache.loadercache.Loader.get_pipeline')
@patch('pypyr.moduleloader.add_sys_path')
def test_run_pipeline_parse_context_error_failure(
mock_add_sys_path,
mock_get_pipe,
mock_parser,
mock_steps_runner):
"""Run on_failure on context parse exception."""
pipe_yaml = {'context_parser': 'arb parser'}
pipe_def = get_pipe_def(pipe_yaml)
mock_get_pipe.return_value = pipe_def
parser = Mock()
parser.side_effect = ValueError('arb')
mock_parser.return_value = parser
context = Context({'2': 'original', '3': 'new'})
pipeline = Pipeline('arb pipe',
context_args='arb context input',
groups=['gr'],
success_group='sg',
failure_group='fg')
with pytest.raises(ValueError) as err:
pipeline.run(context)
assert str(err.value) == 'arb'
assert not context.is_in_pipeline_scope
mock_add_sys_path.assert_not_called()
mock_parser.assert_called_once_with('arb parser')
parser.assert_called_once_with('arb context input')
mock_steps_runner.assert_called_once_with(pipeline_body=pipe_yaml,
context={'2': 'original',
'3': 'new'})
# No called steps, just on_failure since err on parse context already
sr = mock_steps_runner.return_value
sr.run_step_groups.assert_not_called()
sr.run_failure_step_group.assert_called_once_with('fg')
@patch('pypyr.pipeline.StepsRunner', autospec=True)
@patch('pypyr.cache.parsercache.contextparser_cache.get_context_parser')
@patch('pypyr.cache.loadercache.Loader.get_pipeline')
@patch('pypyr.moduleloader.add_sys_path')
def test_run_pipeline_parse_context_error_failure_stop(
mock_add_sys_path,
mock_get_pipe,
mock_parser,
mock_steps_runner):
"""Run on_failure on context parser exception with Stop."""
pipe_yaml = {'context_parser': 'arb parser'}
pipe_def = get_pipe_def(pipe_yaml)
mock_get_pipe.return_value = pipe_def
parser = Mock()
parser.side_effect = ValueError('arb')
mock_parser.return_value = parser
sr = mock_steps_runner.return_value
sr.run_failure_step_group.side_effect = Stop()
context = Context()
pipeline = Pipeline('arb pipe',
context_args='arb context input')
with pytest.raises(Stop):
pipeline.load_and_run_pipeline(context)
assert not context.is_in_pipeline_scope
mock_add_sys_path.assert_not_called()
mock_parser.assert_called_once_with('arb parser')
parser.assert_called_once_with('arb context input')
mock_steps_runner.assert_called_once_with(pipeline_body=pipe_yaml,
context={})
# No called steps, just on_failure since err on parse context already
sr.run_step_groups.assert_not_called()
sr.run_failure_step_group.assert_called_once_with('on_failure')
@patch('pypyr.pipeline.StepsRunner', autospec=True)
@patch('pypyr.cache.parsercache.contextparser_cache.get_context_parser')
@patch('pypyr.cache.loadercache.Loader.get_pipeline')
@patch('pypyr.moduleloader.add_sys_path')
def test_run_pipeline_parse_context_error_failure_stopstepgroup(
mock_add_sys_path,
mock_get_pipe,
mock_parser,
mock_steps_runner):
"""Context failure handler swallows StopStepGroup."""
pipe_yaml = {'context_parser': 'arb parser'}
pipe_def = get_pipe_def(pipe_yaml)
mock_get_pipe.return_value = pipe_def
parser = Mock()
parser.side_effect = ValueError('arb')
mock_parser.return_value = parser
sr = mock_steps_runner.return_value
sr.run_failure_step_group.side_effect = StopStepGroup()
context = Context()
pipeline = Pipeline('arb pipe',
context_args='arb context input')
with pytest.raises(ValueError) as err:
pipeline.load_and_run_pipeline(context)
assert str(err.value) == 'arb'
assert not context.is_in_pipeline_scope
mock_add_sys_path.assert_not_called()
mock_parser.assert_called_once_with('arb parser')
parser.assert_called_once_with('arb context input')
mock_steps_runner.assert_called_once_with(pipeline_body=pipe_yaml,
context={})
# No called steps, just on_failure since err on parse context already
sr.run_step_groups.assert_not_called()
sr.run_failure_step_group.assert_called_once_with('on_failure')
# endregion run_pipeline
# region Stop & StopPipeline
# region stop helpers
def get_test_pipeline_definition(pipeline):
"""Wrap input pipeline (dict) into a PipelineDefinition.
Args:
pipeline (dict-like): pipeline payload.
Returns:
PipelineDefinition with pipeline payload and arb PipelineInfo.
"""
return PipelineDefinition(
pipeline=pipeline,
info=PipelineInfo(pipeline_name='arbpipe',
loader='arbloader',
parent='arbdir'))
def get_step_pipeline():
"""Test pipeline for jump wrapped in PipelineDefinition."""
return get_test_pipeline_definition(get_step_pipeline_payload())
def get_step_pipeline_payload():
"""Bare dict pipeline payload."""
return {
'sg1': [
'sg1.step1',
'sg1.step2'
],
'sg2': [
'sg2.step1',
'sg2.step2'
],
'sg3': [
'sg3.step1',
'sg3.step2'
],
'sg4': [
'sg4.step1',
'sg4.step2'
],
'sg5': [
'sg5.step1'
],
'sg6': [
'sg6.step1',
'sg6.step2'
]
}
def nothing_step(context):
"""Mock step."""
pass
def stop_pipe_step(context):
"""Mock stop pipeline step."""
raise StopPipeline()
def stop_all_step(context):
"""Mock stop all step."""
raise Stop()
# endregion stop helpers
@patch('pypyr.cache.loadercache.Loader.get_pipeline')
@patch('pypyr.cache.stepcache.step_cache.get_step')
def test_stop_pipeline(mock_step_cache, mock_get_pipe):
"""When StopPipeline stop pipeline execution."""
# Sequence: sg2 - sg2.1, 2.2
# sg3 - sg3.1 (StopPipeline)
mock_step_cache.side_effect = [
nothing_step, # 2.1
nothing_step, # 2.2
stop_pipe_step, # 3.1
]
mock_get_pipe.return_value = get_test_pipeline_definition(
get_step_pipeline_payload())
context = Context()
pipeline = Pipeline('arb pipe',
groups=['sg2', 'sg3', 'sg4', 'sg1'],
success_group='sg5',
failure_group=None)
pipeline.run(context)
assert not context.is_in_pipeline_scope
assert mock_step_cache.mock_calls == [call('sg2.step1'),
call('sg2.step2'),
call('sg3.step1')
]
@patch('pypyr.cache.loadercache.Loader.get_pipeline')
@patch('pypyr.cache.stepcache.step_cache.get_step')
def test_stop_pipeline_for(mock_step_cache, mock_get_pipe):
"""When StopPipeline stop pipeline execution in for loop."""
# Sequence: sg2 - sg2.1, 2.2
# sg3 - sg3.1 x2 (StopPipeline)
nothing_mock = DeepCopyMagicMock()
mock312 = DeepCopyMagicMock()
def step31(context):
mock312(context)
if context['i'] == 'two':
raise StopPipeline()
mock_step_cache.side_effect = [
nothing_mock, # 2.1
nothing_mock, # 2.2
step31, # 3.1
]
mock_get_pipe.return_value = get_for_step_pipeline()
context = Context()
pipeline = Pipeline('arb pipe',
groups=['sg2', 'sg3', 'sg4', 'sg1'],
success_group='sg5',
failure_group=None)
pipeline.run(context)
assert not context.is_in_pipeline_scope
assert nothing_mock.mock_calls == [call({}),
call({})
]
assert mock312.mock_calls == [call({'i': 'one'}),
call({'i': 'two'})]
assert mock_step_cache.mock_calls == [call('sg2.step1'),
call('sg2.step2'),
call('sg3.step1')
]
def get_retry_step_pipeline():
"""Test pipeline for retry loop."""
return {
'sg1': [
'sg1.step1',
'sg1.step2'
],
'sg2': [
'sg2.step1',
'sg2.step2'
],
'sg3': [
{'name': 'sg3.step1',
'retry': {'max': 3}
},
'sg3.step2'
],
'sg4': [
'sg4.step1',
'sg4.step2'
],
'sg5': [
'sg5.step1'
],
'sg6': [
'sg6.step1',
'sg6.step2'
]
}
@patch('pypyr.cache.loadercache.Loader.get_pipeline')
@patch('pypyr.cache.stepcache.step_cache.get_step')
def test_stop_pipeline_retry(mock_step_cache, mock_get_pipe):
"""When StopPipeline stop pipeline execution in retry loop."""
# Sequence: sg2 - sg2.1, 2.2
# sg3 - sg3.1 x2 (StopPipeline)
nothing_mock = DeepCopyMagicMock()
mock312 = DeepCopyMagicMock()
def step31(context):
mock312(context)
if context['retryCounter'] == 2:
raise StopPipeline()
else:
raise ValueError(context['retryCounter'])
mock_step_cache.side_effect = [
nothing_mock, # 2.1
nothing_mock, # 2.2
step31, # 3.1
]
pipe_yaml = get_retry_step_pipeline()
mock_get_pipe.return_value = get_test_pipeline_definition(pipe_yaml)
context = Context()
pipeline = Pipeline('arb pipe',
groups=['sg2', 'sg3', 'sg4', 'sg1'],
success_group='sg5',
failure_group=None)
pipeline.run(context)
assert not context.is_in_pipeline_scope
assert nothing_mock.mock_calls == [call({}),
call({})
]
assert mock312.mock_calls == [call({'retryCounter': 1}),
call({'retryCounter': 2})]
assert mock_step_cache.mock_calls == [call('sg2.step1'),
call('sg2.step2'),
call('sg3.step1')
]
@patch('pypyr.cache.loadercache.Loader.get_pipeline')
@patch('pypyr.cache.stepcache.step_cache.get_step')
def test_stop_all(mock_step_cache, mock_get_pipe):
"""Stop stops pipeline execution."""
# Sequence: sg2 - sg2.1, 2.2
# sg3 - sg3.1 (StopPipeline)
mock_step_cache.side_effect = [
nothing_step, # 2.1
nothing_step, # 2.2
stop_all_step, # 3.1
]
mock_get_pipe.return_value = get_step_pipeline()
context = Context()
pipeline = Pipeline('arb pipe',
groups=['sg2', 'sg3', 'sg4', 'sg1'],
success_group='sg5',
failure_group=None)
pipeline.run(context)
assert not context.is_in_pipeline_scope
assert mock_step_cache.mock_calls == [call('sg2.step1'),
call('sg2.step2'),
call('sg3.step1')
]
def get_while_step_pipeline():
"""Test pipeline for while."""
return get_test_pipeline_definition({
'sg1': [
'sg1.step1',
'sg1.step2'
],
'sg2': [
'sg2.step1',
'sg2.step2'
],
'sg3': [
{'name': 'sg3.step1',
'while': {
'max': 3},
},
'sg3.step2'
],
'sg4': [
'sg4.step1',
'sg4.step2'
],
'sg5': [
'sg5.step1'
],
'sg6': [
'sg6.step1',
'sg6.step2'
]
})
@patch('pypyr.cache.loadercache.Loader.get_pipeline')
@patch('pypyr.cache.stepcache.step_cache.get_step')
def test_stop_all_while(mock_step_cache, mock_get_pipe):
"""Stop stops pipeline execution inside a while."""
# Sequence: sg2 - sg2.1, 2.2
# sg3 - sg3.1 loop 3, StopPipeline on 2
nothing_mock = DeepCopyMagicMock()
mock312 = DeepCopyMagicMock()
def step31(context):
mock312(context)
if context['whileCounter'] == 2:
raise Stop()
mock_step_cache.side_effect = [
nothing_mock, # 2.1
nothing_mock, # 2.2
step31 # 3.1.2
]
mock_get_pipe.return_value = get_while_step_pipeline()
context = Context()
pipeline = Pipeline('arb pipe',
groups=['sg2', 'sg3', 'sg4', 'sg1'],
success_group='sg5',
failure_group=None)
pipeline.run(context)
assert not context.is_in_pipeline_scope
assert mock_step_cache.mock_calls == [call('sg2.step1'),
call('sg2.step2'),
call('sg3.step1')
]
assert nothing_mock.mock_calls == [call({}),
call({})
]
assert mock312.mock_calls == [call({'whileCounter': 1}),
call({'whileCounter': 2})]
def get_for_step_pipeline():
"""Test pipeline for for loop."""
return get_test_pipeline_definition(get_for_step_pipeline_payload())
def get_for_step_pipeline_payload():
"""Bare dict for for pipeline."""
return {
'sg1': [
'sg1.step1',
'sg1.step2'
],
'sg2': [
'sg2.step1',
'sg2.step2'
],
'sg3': [
{'name': 'sg3.step1',
'foreach': ['one', 'two', 'three']
},
'sg3.step2'
],
'sg4': [
'sg4.step1',
'sg4.step2'
],
'sg5': [
'sg5.step1'
],
'sg6': [
'sg6.step1',
'sg6.step2'
]
}
@patch('pypyr.cache.loadercache.Loader.get_pipeline')
@patch('pypyr.cache.stepcache.step_cache.get_step')
def test_stop_all_for(mock_step_cache, mock_get_pipe):
"""Stop stops pipeline execution inside a for loop."""
# Sequence: sg2 - sg2.1, 2.2
# sg3 - sg3.1 loop 3, StopPipeline on 2
nothing_mock = DeepCopyMagicMock()
mock312 = DeepCopyMagicMock()
def step31(context):
mock312(context)
if context['i'] == 'two':
raise Stop()
mock_step_cache.side_effect = [
nothing_mock, # 2.1
nothing_mock, # 2.2
step31 # 3.1.2
]
mock_get_pipe.return_value = get_for_step_pipeline()
context = Context()
pipeline = Pipeline('arb pipe',
groups=['sg2', 'sg3', 'sg4', 'sg1'],
success_group='sg5',
failure_group=None)
pipeline.run(context)
assert not context.is_in_pipeline_scope
assert mock_step_cache.mock_calls == [call('sg2.step1'),
call('sg2.step2'),
call('sg3.step1')
]
assert nothing_mock.mock_calls == [call({}),
call({})
]
assert mock312.mock_calls == [call({'i': 'one'}),
call({'i': 'two'})]
# endregion Stop & StopPipeline
| 32.878449
| 79
| 0.624768
| 5,107
| 44,090
| 5.062072
| 0.049148
| 0.032106
| 0.048275
| 0.05957
| 0.850805
| 0.826164
| 0.815063
| 0.800054
| 0.78961
| 0.769998
| 0
| 0.013638
| 0.266568
| 44,090
| 1,340
| 80
| 32.902985
| 0.785818
| 0.081719
| 0
| 0.781283
| 0
| 0
| 0.179517
| 0.08564
| 0
| 0
| 0
| 0
| 0.162986
| 1
| 0.052576
| false
| 0.005258
| 0.012618
| 0
| 0.07571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
968b63da30e5be571bc60f8da7f32d0e61de23ba
| 105,331
|
py
|
Python
|
CellProfiler/tests/modules/test_trackobjects.py
|
aidotse/Team-rahma.ai
|
66857731e1ca2472e0783e37ba472b55a7ac9cd4
|
[
"MIT"
] | null | null | null |
CellProfiler/tests/modules/test_trackobjects.py
|
aidotse/Team-rahma.ai
|
66857731e1ca2472e0783e37ba472b55a7ac9cd4
|
[
"MIT"
] | null | null | null |
CellProfiler/tests/modules/test_trackobjects.py
|
aidotse/Team-rahma.ai
|
66857731e1ca2472e0783e37ba472b55a7ac9cd4
|
[
"MIT"
] | null | null | null |
import centrosome.filter
import numpy
import six.moves
from cellprofiler_core.constants.measurement import (
GROUP_NUMBER,
GROUP_INDEX,
R_FIRST_IMAGE_NUMBER,
R_SECOND_IMAGE_NUMBER,
R_FIRST_OBJECT_NUMBER,
R_SECOND_OBJECT_NUMBER,
C_COUNT,
MCA_AVAILABLE_POST_GROUP,
M_LOCATION_CENTER_X,
M_LOCATION_CENTER_Y,
)
from cellprofiler_core.image import ImageSetList
import cellprofiler_core.measurement
from cellprofiler_core.object import ObjectSet, Objects
import cellprofiler.modules.trackobjects
import tests.modules
from cellprofiler_core.pipeline import Pipeline, LoadException, RunException
from cellprofiler_core.workspace import Workspace
OBJECT_NAME = "objects"
def test_load_v3():
file = tests.modules.get_test_resources_directory("trackobjects/v3.pipeline")
with open(file, "r") as fd:
data = fd.read()
pipeline = Pipeline()
def callback(caller, event):
assert not isinstance(event, LoadException)
pipeline.add_listener(callback)
pipeline.load(six.moves.StringIO(data))
module = pipeline.modules()[0]
assert isinstance(module, cellprofiler.modules.trackobjects.TrackObjects)
assert module.tracking_method == "LAP"
assert module.object_name.value == "Nuclei"
assert module.pixel_radius.value == 80
assert module.display_type.value == "Color and Number"
assert not module.wants_image
assert module.measurement == "AreaShape_Area"
assert module.image_name == "TrackedCells"
assert module.wants_second_phase
assert module.split_cost == 41
assert module.merge_cost == 42
assert module.max_gap_score == 53
assert module.max_split_score == 54
assert module.max_merge_score == 55
assert module.max_frame_distance == 6
def test_load_v4():
file = tests.modules.get_test_resources_directory("trackobjects/v4.pipeline")
with open(file, "r") as fd:
data = fd.read()
pipeline = Pipeline()
def callback(caller, event):
assert not isinstance(event, LoadException)
pipeline.add_listener(callback)
pipeline.load(six.moves.StringIO(data))
assert len(pipeline.modules()) == 3
for module, tracking_method, model, save_img, phase2, meas, dop in zip(
pipeline.modules(),
("Measurements", "Overlap", "Distance"),
(
cellprofiler.modules.trackobjects.M_BOTH,
cellprofiler.modules.trackobjects.M_RANDOM,
cellprofiler.modules.trackobjects.M_VELOCITY,
),
(True, False, True),
(True, False, True),
("Slothfulness", "Prescience", "Trepidation"),
(
cellprofiler.modules.trackobjects.DT_COLOR_AND_NUMBER,
cellprofiler.modules.trackobjects.DT_COLOR_ONLY,
cellprofiler.modules.trackobjects.DT_COLOR_AND_NUMBER,
),
):
assert isinstance(module, cellprofiler.modules.trackobjects.TrackObjects)
assert module.tracking_method == tracking_method
assert module.model == model
assert module.wants_image.value == save_img
assert module.wants_second_phase.value == phase2
assert module.measurement == meas
assert module.pixel_radius == 50
assert module.display_type == dop
assert module.image_name == "TrackByLAP"
assert module.radius_std == 3
assert module.radius_limit.min == 3.0
assert module.radius_limit.max == 10.0
assert module.gap_cost == 40
assert module.split_cost == 1
assert module.merge_cost == 1
assert module.max_gap_score == 51
assert module.max_split_score == 52
assert module.max_merge_score == 53
assert module.max_frame_distance == 4
def test_load_v5():
file = tests.modules.get_test_resources_directory("trackobjects/v5.pipeline")
with open(file, "r") as fd:
data = fd.read()
pipeline = Pipeline()
def callback(caller, event):
assert not isinstance(event, LoadException)
pipeline.add_listener(callback)
pipeline.load(six.moves.StringIO(data))
assert len(pipeline.modules()) == 1
m = pipeline.modules()[0]
assert isinstance(m, cellprofiler.modules.trackobjects.TrackObjects)
assert m.tracking_method == "LAP"
assert m.object_name == "Turtles"
assert m.measurement == "Steadiness"
assert m.pixel_radius == 44
assert m.display_type == cellprofiler.modules.trackobjects.DT_COLOR_AND_NUMBER
assert not m.wants_image
assert m.image_name == "TrackedTurtles"
assert m.model == cellprofiler.modules.trackobjects.M_BOTH
assert m.radius_std == 3
assert m.radius_limit.min == 3
assert m.radius_limit.max == 11
assert m.wants_second_phase
assert m.gap_cost == 39
assert m.split_cost == 41
assert m.merge_cost == 42
assert m.max_frame_distance == 8
assert m.wants_minimum_lifetime
assert m.min_lifetime == 2
assert not m.wants_maximum_lifetime
assert m.max_lifetime == 1000
def test_load_v6():
file = tests.modules.get_test_resources_directory("trackobjects/v6.pipeline")
with open(file, "r") as fd:
data = fd.read()
pipeline = Pipeline()
def callback(caller, event):
assert not isinstance(event, LoadException)
pipeline.add_listener(callback)
pipeline.load(six.moves.StringIO(data))
assert len(pipeline.modules()) == 1
m = pipeline.modules()[0]
assert isinstance(m, cellprofiler.modules.trackobjects.TrackObjects)
assert m.tracking_method == "LAP"
assert m.object_name == "Turtles"
assert m.measurement == "Steadiness"
assert m.pixel_radius == 44
assert m.display_type == cellprofiler.modules.trackobjects.DT_COLOR_AND_NUMBER
assert not m.wants_image
assert m.image_name == "TrackedTurtles"
assert m.model == cellprofiler.modules.trackobjects.M_BOTH
assert m.radius_std == 3
assert m.radius_limit.min == 3
assert m.radius_limit.max == 11
assert m.wants_second_phase
assert m.gap_cost == 39
assert m.split_cost == 41
assert m.merge_cost == 42
assert m.max_frame_distance == 8
assert m.wants_minimum_lifetime
assert m.min_lifetime == 2
assert not m.wants_maximum_lifetime
assert m.max_lifetime == 1000
assert m.mitosis_cost == 79
assert m.mitosis_max_distance == 41
def runTrackObjects(labels_list, fn=None, measurement=None):
"""Run two cycles of TrackObjects
labels1 - the labels matrix for the first cycle
labels2 - the labels matrix for the second cycle
fn - a callback function called with the module and workspace. It has
the signature, fn(module, workspace, n) where n is 0 when
called prior to prepare_run, 1 prior to first iteration
and 2 prior to second iteration.
returns the measurements
"""
module = cellprofiler.modules.trackobjects.TrackObjects()
module.set_module_num(1)
module.object_name.value = OBJECT_NAME
module.pixel_radius.value = 50
module.measurement.value = "measurement"
measurements = cellprofiler_core.measurement.Measurements()
measurements.add_all_measurements(
"Image", GROUP_NUMBER, [1] * len(labels_list),
)
measurements.add_all_measurements(
"Image", GROUP_INDEX, list(range(1, len(labels_list) + 1)),
)
pipeline = Pipeline()
pipeline.add_module(module)
image_set_list = ImageSetList()
if fn:
fn(module, None, 0)
module.prepare_run(
Workspace(pipeline, module, None, None, measurements, image_set_list)
)
first = True
for labels, index in zip(labels_list, list(range(len(labels_list)))):
object_set = ObjectSet()
objects = Objects()
objects.segmented = labels
object_set.add_objects(objects, OBJECT_NAME)
image_set = image_set_list.get_image_set(index)
if first:
first = False
else:
measurements.next_image_set()
if measurement is not None:
measurements.add_measurement(
OBJECT_NAME, "measurement", numpy.array(measurement[index])
)
workspace = Workspace(
pipeline, module, image_set, object_set, measurements, image_set_list
)
if fn:
fn(module, workspace, index + 1)
module.run(workspace)
return measurements
def test_track_nothing():
"""Run TrackObjects on an empty labels matrix"""
columns = []
def fn(module, workspace, index, columns=columns):
if workspace is not None and index == 0:
columns += module.get_measurement_columns(workspace.pipeline)
measurements = runTrackObjects(
(numpy.zeros((10, 10), int), numpy.zeros((10, 10), int)), fn
)
features = [
feature
for feature in measurements.get_feature_names(OBJECT_NAME)
if feature.startswith(cellprofiler.modules.trackobjects.F_PREFIX)
]
assert all(
[column[1] in features for column in columns if column[0] == OBJECT_NAME]
)
for feature in cellprofiler.modules.trackobjects.F_ALL:
name = "_".join((cellprofiler.modules.trackobjects.F_PREFIX, feature, "50"))
assert name in features
value = measurements.get_current_measurement(OBJECT_NAME, name)
assert len(value) == 0
features = [
feature
for feature in measurements.get_feature_names("Image")
if feature.startswith(cellprofiler.modules.trackobjects.F_PREFIX)
]
assert all([column[1] in features for column in columns if column[0] == "Image"])
for feature in cellprofiler.modules.trackobjects.F_IMAGE_ALL:
name = "_".join(
(cellprofiler.modules.trackobjects.F_PREFIX, feature, OBJECT_NAME, "50")
)
assert name in features
value = measurements.get_current_image_measurement(name)
assert value == 0
def test_00_track_one_then_nothing():
"""Run track objects on an object that disappears
Regression test of IMG-1090
"""
labels = numpy.zeros((10, 10), int)
labels[3:6, 2:7] = 1
measurements = runTrackObjects((labels, numpy.zeros((10, 10), int)))
feature = "_".join(
(
cellprofiler.modules.trackobjects.F_PREFIX,
cellprofiler.modules.trackobjects.F_LOST_OBJECT_COUNT,
OBJECT_NAME,
"50",
)
)
value = measurements.get_current_image_measurement(feature)
assert value == 1
def test_track_one_distance():
"""Track an object that doesn't move using distance"""
labels = numpy.zeros((10, 10), int)
labels[3:6, 2:7] = 1
def fn(module, workspace, idx):
if idx == 0:
module.pixel_radius.value = 1
module.tracking_method.value = "Distance"
measurements = runTrackObjects((labels, labels), fn)
def m(feature):
name = "_".join((cellprofiler.modules.trackobjects.F_PREFIX, feature, "1"))
values = measurements.get_current_measurement(OBJECT_NAME, name)
assert len(values) == 1
return values[0]
assert round(abs(m(cellprofiler.modules.trackobjects.F_TRAJECTORY_X) - 0), 7) == 0
assert round(abs(m(cellprofiler.modules.trackobjects.F_TRAJECTORY_Y) - 0), 7) == 0
assert (
round(abs(m(cellprofiler.modules.trackobjects.F_DISTANCE_TRAVELED) - 0), 7) == 0
)
assert (
round(abs(m(cellprofiler.modules.trackobjects.F_INTEGRATED_DISTANCE) - 0), 7)
== 0
)
assert m(cellprofiler.modules.trackobjects.F_LABEL) == 1
assert m(cellprofiler.modules.trackobjects.F_PARENT_OBJECT_NUMBER) == 1
assert m(cellprofiler.modules.trackobjects.F_PARENT_IMAGE_NUMBER) == 1
assert m(cellprofiler.modules.trackobjects.F_LIFETIME) == 2
def m(feature):
name = "_".join(
(cellprofiler.modules.trackobjects.F_PREFIX, feature, OBJECT_NAME, "1")
)
return measurements.get_current_image_measurement(name)
assert m(cellprofiler.modules.trackobjects.F_NEW_OBJECT_COUNT) == 0
assert m(cellprofiler.modules.trackobjects.F_LOST_OBJECT_COUNT) == 0
assert m(cellprofiler.modules.trackobjects.F_SPLIT_COUNT) == 0
assert m(cellprofiler.modules.trackobjects.F_MERGE_COUNT) == 0
check_relationships(measurements, [1], [1], [2], [1])
def test_track_one_moving():
"""Track an object that moves"""
labels_list = []
distance = 0
last_i, last_j = (0, 0)
for i_off, j_off in ((0, 0), (2, 0), (2, 1), (0, 1)):
distance = i_off - last_i + j_off - last_j
last_i, last_j = (i_off, j_off)
labels = numpy.zeros((10, 10), int)
labels[4 + i_off : 7 + i_off, 4 + j_off : 7 + j_off] = 1
labels_list.append(labels)
def fn(module, workspace, idx):
if idx == 0:
module.pixel_radius.value = 3
module.tracking_method.value = "Distance"
measurements = runTrackObjects(labels_list, fn)
def m(feature, expected):
name = "_".join((cellprofiler.modules.trackobjects.F_PREFIX, feature, "3"))
value_set = measurements.get_all_measurements(OBJECT_NAME, name)
assert len(expected) == len(value_set)
for values, x in zip(value_set, expected):
assert len(values) == 1
assert round(abs(values[0] - x), 7) == 0
m(cellprofiler.modules.trackobjects.F_TRAJECTORY_X, [0, 0, 1, 0])
m(cellprofiler.modules.trackobjects.F_TRAJECTORY_Y, [0, 2, 0, -2])
m(cellprofiler.modules.trackobjects.F_DISTANCE_TRAVELED, [0, 2, 1, 2])
m(cellprofiler.modules.trackobjects.F_INTEGRATED_DISTANCE, [0, 2, 3, 5])
m(cellprofiler.modules.trackobjects.F_LABEL, [1, 1, 1, 1])
m(cellprofiler.modules.trackobjects.F_LIFETIME, [1, 2, 3, 4])
m(
cellprofiler.modules.trackobjects.F_LINEARITY,
[1, 1, numpy.sqrt(5) / 3, 1.0 / 5.0],
)
def m(feature):
name = "_".join(
(cellprofiler.modules.trackobjects.F_PREFIX, feature, OBJECT_NAME, "3")
)
return measurements.get_current_image_measurement(name)
assert m(cellprofiler.modules.trackobjects.F_NEW_OBJECT_COUNT) == 0
assert m(cellprofiler.modules.trackobjects.F_LOST_OBJECT_COUNT) == 0
assert m(cellprofiler.modules.trackobjects.F_SPLIT_COUNT) == 0
assert m(cellprofiler.modules.trackobjects.F_MERGE_COUNT) == 0
image_numbers = numpy.arange(1, len(labels_list) + 1)
object_numbers = numpy.ones(len(image_numbers))
check_relationships(
measurements,
image_numbers[:-1],
object_numbers[:-1],
image_numbers[1:],
object_numbers[1:],
)
def test_track_split():
"""Track an object that splits"""
labels1 = numpy.zeros((11, 9), int)
labels1[1:10, 1:8] = 1
labels2 = numpy.zeros((10, 10), int)
labels2[1:6, 1:8] = 1
labels2[6:10, 1:8] = 2
def fn(module, workspace, idx):
if idx == 0:
module.pixel_radius.value = 5
module.tracking_method.value = "Distance"
measurements = runTrackObjects((labels1, labels2, labels2), fn)
def m(feature, idx):
name = "_".join((cellprofiler.modules.trackobjects.F_PREFIX, feature, "5"))
values = measurements.get_measurement(OBJECT_NAME, name, idx + 1)
assert len(values) == 2
return values
labels = m(cellprofiler.modules.trackobjects.F_LABEL, 2)
assert len(labels) == 2
assert numpy.all(labels == 1)
parents = m(cellprofiler.modules.trackobjects.F_PARENT_OBJECT_NUMBER, 1)
assert numpy.all(parents == 1)
assert numpy.all(m(cellprofiler.modules.trackobjects.F_PARENT_IMAGE_NUMBER, 1) == 1)
parents = m(cellprofiler.modules.trackobjects.F_PARENT_OBJECT_NUMBER, 2)
assert numpy.all(parents == numpy.array([1, 2]))
assert numpy.all(m(cellprofiler.modules.trackobjects.F_PARENT_IMAGE_NUMBER, 2) == 2)
def m(feature):
name = "_".join(
(cellprofiler.modules.trackobjects.F_PREFIX, feature, OBJECT_NAME, "5")
)
return measurements.get_all_measurements("Image", name)[1]
assert m(cellprofiler.modules.trackobjects.F_NEW_OBJECT_COUNT) == 0
assert m(cellprofiler.modules.trackobjects.F_LOST_OBJECT_COUNT) == 0
assert m(cellprofiler.modules.trackobjects.F_SPLIT_COUNT) == 1
assert m(cellprofiler.modules.trackobjects.F_MERGE_COUNT) == 0
check_relationships(
measurements, [1, 1, 2, 2], [1, 1, 1, 2], [2, 2, 3, 3], [1, 2, 1, 2]
)
def test_track_negative():
"""Track unrelated objects"""
labels1 = numpy.zeros((10, 10), int)
labels1[1:5, 1:5] = 1
labels2 = numpy.zeros((10, 10), int)
labels2[6:9, 6:9] = 1
def fn(module, workspace, idx):
if idx == 0:
module.pixel_radius.value = 1
module.tracking_method.value = "Distance"
measurements = runTrackObjects((labels1, labels2), fn)
def m(feature):
name = "_".join((cellprofiler.modules.trackobjects.F_PREFIX, feature, "1"))
values = measurements.get_current_measurement(OBJECT_NAME, name)
assert len(values) == 1
return values[0]
assert m(cellprofiler.modules.trackobjects.F_LABEL) == 2
assert m(cellprofiler.modules.trackobjects.F_PARENT_OBJECT_NUMBER) == 0
def m(feature):
name = "_".join(
(cellprofiler.modules.trackobjects.F_PREFIX, feature, OBJECT_NAME, "1")
)
return measurements.get_current_image_measurement(name)
assert m(cellprofiler.modules.trackobjects.F_NEW_OBJECT_COUNT) == 1
assert m(cellprofiler.modules.trackobjects.F_LOST_OBJECT_COUNT) == 1
assert m(cellprofiler.modules.trackobjects.F_SPLIT_COUNT) == 0
assert m(cellprofiler.modules.trackobjects.F_MERGE_COUNT) == 0
def test_track_ambiguous():
"""Track disambiguation from among two possible parents"""
labels1 = numpy.zeros((20, 20), int)
labels1[1:4, 1:4] = 1
labels1[16:19, 16:19] = 2
labels2 = numpy.zeros((20, 20), int)
labels2[10:15, 10:15] = 1
def fn(module, workspace, idx):
if idx == 0:
module.pixel_radius.value = 20
module.tracking_method.value = "Distance"
measurements = runTrackObjects((labels1, labels2), fn)
def m(feature):
name = "_".join((cellprofiler.modules.trackobjects.F_PREFIX, feature, "20"))
values = measurements.get_current_measurement(OBJECT_NAME, name)
assert len(values) == 1
return values[0]
assert m(cellprofiler.modules.trackobjects.F_LABEL) == 2
assert m(cellprofiler.modules.trackobjects.F_PARENT_OBJECT_NUMBER) == 2
def test_overlap_positive():
"""Track overlapping objects"""
labels1 = numpy.zeros((10, 10), int)
labels1[3:6, 4:7] = 1
labels2 = numpy.zeros((10, 10), int)
labels2[4:7, 5:9] = 1
def fn(module, workspace, idx):
if idx == 0:
module.pixel_radius.value = 2
module.tracking_method.value = "Overlap"
measurements = runTrackObjects((labels1, labels2), fn)
def m(feature):
name = "_".join((cellprofiler.modules.trackobjects.F_PREFIX, feature, "2"))
values = measurements.get_current_measurement(OBJECT_NAME, name)
assert len(values) == 1
return values[0]
assert m(cellprofiler.modules.trackobjects.F_LABEL) == 1
assert m(cellprofiler.modules.trackobjects.F_PARENT_OBJECT_NUMBER) == 1
def test_overlap_negative():
"""Track objects that don't overlap"""
labels1 = numpy.zeros((20, 20), int)
labels1[3:6, 4:7] = 1
labels2 = numpy.zeros((20, 20), int)
labels2[14:17, 15:19] = 1
def fn(module, workspace, idx):
if idx == 0:
module.pixel_radius.value = 2
module.tracking_method.value = "Overlap"
measurements = runTrackObjects((labels1, labels2), fn)
def m(feature):
name = "_".join((cellprofiler.modules.trackobjects.F_PREFIX, feature, "2"))
values = measurements.get_current_measurement(OBJECT_NAME, name)
assert len(values) == 1
return values[0]
assert m(cellprofiler.modules.trackobjects.F_LABEL) == 2
assert m(cellprofiler.modules.trackobjects.F_PARENT_OBJECT_NUMBER) == 0
def test_overlap_ambiguous():
"""Track an object that overlaps two parents"""
labels1 = numpy.zeros((20, 20), int)
labels1[1:5, 1:5] = 1
labels1[15:19, 15:19] = 2
labels2 = numpy.zeros((20, 20), int)
labels2[4:18, 4:18] = 1
def fn(module, workspace, idx):
if idx == 0:
module.pixel_radius.value = 2
module.tracking_method.value = "Overlap"
measurements = runTrackObjects((labels1, labels2), fn)
def m(feature):
name = "_".join((cellprofiler.modules.trackobjects.F_PREFIX, feature, "2"))
values = measurements.get_current_measurement(OBJECT_NAME, name)
assert len(values) == 1
return values[0]
assert m(cellprofiler.modules.trackobjects.F_LABEL) == 2
assert m(cellprofiler.modules.trackobjects.F_PARENT_OBJECT_NUMBER) == 2
def test_measurement_positive():
"""Test tracking an object by measurement"""
labels1 = numpy.zeros((10, 10), int)
labels1[3:6, 4:7] = 1
labels2 = numpy.zeros((10, 10), int)
labels2[4:7, 5:9] = 1
def fn(module, workspace, idx):
if idx == 0:
module.pixel_radius.value = 2
module.tracking_method.value = "Measurements"
measurements = runTrackObjects((labels1, labels2), fn, [[1], [1]])
def m(feature):
name = "_".join((cellprofiler.modules.trackobjects.F_PREFIX, feature, "2"))
values = measurements.get_current_measurement(OBJECT_NAME, name)
assert len(values) == 1
return values[0]
assert m(cellprofiler.modules.trackobjects.F_LABEL) == 1
assert m(cellprofiler.modules.trackobjects.F_PARENT_OBJECT_NUMBER) == 1
def test_measurement_negative():
"""Test tracking with too great a jump between successive images"""
labels1 = numpy.zeros((20, 20), int)
labels1[3:6, 4:7] = 1
labels2 = numpy.zeros((20, 20), int)
labels2[14:17, 15:19] = 1
def fn(module, workspace, idx):
if idx == 0:
module.pixel_radius.value = 2
module.tracking_method.value = "Measurements"
measurements = runTrackObjects((labels1, labels2), fn, [[1], [1]])
def m(feature):
name = "_".join((cellprofiler.modules.trackobjects.F_PREFIX, feature, "2"))
values = measurements.get_current_measurement(OBJECT_NAME, name)
assert len(values) == 1
return values[0]
assert m(cellprofiler.modules.trackobjects.F_LABEL) == 2
assert m(cellprofiler.modules.trackobjects.F_PARENT_OBJECT_NUMBER) == 0
def test_ambiguous():
"""Test measurement with ambiguous parent choice"""
labels1 = numpy.zeros((20, 20), int)
labels1[1:5, 1:5] = 1
labels1[15:19, 15:19] = 2
labels2 = numpy.zeros((20, 20), int)
labels2[6:14, 6:14] = 1
def fn(module, workspace, idx):
if idx == 0:
module.pixel_radius.value = 4
module.tracking_method.value = "Measurements"
measurements = runTrackObjects((labels1, labels2), fn, [[1, 10], [9]])
def m(feature):
name = "_".join((cellprofiler.modules.trackobjects.F_PREFIX, feature, "4"))
values = measurements.get_current_measurement(OBJECT_NAME, name)
assert len(values) == 1
return values[0]
assert m(cellprofiler.modules.trackobjects.F_LABEL) == 2
assert m(cellprofiler.modules.trackobjects.F_PARENT_OBJECT_NUMBER) == 2
def test_cross_numbered_objects():
"""Test labeling when object 1 in one image becomes object 2 in next"""
i, j = numpy.mgrid[0:10, 0:20]
labels = (i > 5) + (j > 10) * 2
pp = numpy.array(list(centrosome.filter.permutations([1, 2, 3, 4])))
def fn(module, workspace, idx):
if idx == 0:
module.tracking_method.value = "LAP"
measurements = runTrackObjects([numpy.array(p)[labels] for p in pp], fn)
def m(feature, i):
name = "_".join((cellprofiler.modules.trackobjects.F_PREFIX, feature))
values = measurements[OBJECT_NAME, name, i + 1]
assert len(values) == 4
return values
for i, p in enumerate(pp):
l = m(cellprofiler.modules.trackobjects.F_LABEL, i)
numpy.testing.assert_array_equal(numpy.arange(1, 5), p[l - 1])
if i > 0:
p_prev = pp[i - 1]
order = numpy.lexsort([p])
expected_po = p_prev[order]
po = m(cellprofiler.modules.trackobjects.F_PARENT_OBJECT_NUMBER, i)
numpy.testing.assert_array_equal(po, expected_po)
pi = m(cellprofiler.modules.trackobjects.F_PARENT_IMAGE_NUMBER, i)
numpy.testing.assert_array_equal(pi, i)
image_numbers, _ = numpy.mgrid[1 : (len(pp) + 1), 0:4]
check_relationships(
measurements,
image_numbers[:-1, :].flatten(),
pp[:-1, :].flatten(),
image_numbers[1:, :].flatten(),
pp[1:, :].flatten(),
)
def test_measurement_columns():
"""Test get_measurement_columns function"""
module = cellprofiler.modules.trackobjects.TrackObjects()
module.object_name.value = OBJECT_NAME
module.tracking_method.value = "Distance"
module.pixel_radius.value = 10
columns = module.get_measurement_columns(None)
assert len(columns) == len(cellprofiler.modules.trackobjects.F_ALL) + len(
cellprofiler.modules.trackobjects.F_IMAGE_ALL
)
for object_name, features in (
(OBJECT_NAME, cellprofiler.modules.trackobjects.F_ALL),
("Image", cellprofiler.modules.trackobjects.F_IMAGE_ALL,),
):
for feature in features:
if object_name == OBJECT_NAME:
name = "_".join(
(cellprofiler.modules.trackobjects.F_PREFIX, feature, "10")
)
else:
name = "_".join(
(
cellprofiler.modules.trackobjects.F_PREFIX,
feature,
OBJECT_NAME,
"10",
)
)
index = [column[1] for column in columns].index(name)
assert index != -1
column = columns[index]
assert column[0] == object_name
def test_measurement_columns_lap():
"""Test get_measurement_columns function for LAP"""
module = cellprofiler.modules.trackobjects.TrackObjects()
module.object_name.value = OBJECT_NAME
module.tracking_method.value = "LAP"
module.model.value = cellprofiler.modules.trackobjects.M_BOTH
second_phase = [
cellprofiler.modules.trackobjects.F_LINKING_DISTANCE,
cellprofiler.modules.trackobjects.F_MOVEMENT_MODEL,
]
for wants in (True, False):
module.wants_second_phase.value = wants
columns = module.get_measurement_columns(None)
# 2, 2, 4 for the static model
# 4, 4, 16 for the velocity model
other_features = [
cellprofiler.modules.trackobjects.F_AREA,
cellprofiler.modules.trackobjects.F_LINKING_DISTANCE,
cellprofiler.modules.trackobjects.F_LINK_TYPE,
cellprofiler.modules.trackobjects.F_MOVEMENT_MODEL,
cellprofiler.modules.trackobjects.F_STANDARD_DEVIATION,
]
if wants:
other_features += [
cellprofiler.modules.trackobjects.F_GAP_LENGTH,
cellprofiler.modules.trackobjects.F_GAP_SCORE,
cellprofiler.modules.trackobjects.F_MERGE_SCORE,
cellprofiler.modules.trackobjects.F_SPLIT_SCORE,
cellprofiler.modules.trackobjects.F_MITOSIS_SCORE,
]
assert (
len(columns)
== len(cellprofiler.modules.trackobjects.F_ALL)
+ len(cellprofiler.modules.trackobjects.F_IMAGE_ALL)
+ len(other_features)
+ 2
+ 2
+ 4
+ 4
+ 4
+ 16
)
kalman_features = [
cellprofiler.modules.trackobjects.kalman_feature(
cellprofiler.modules.trackobjects.F_STATIC_MODEL,
cellprofiler.modules.trackobjects.F_STATE,
cellprofiler.modules.trackobjects.F_X,
),
cellprofiler.modules.trackobjects.kalman_feature(
cellprofiler.modules.trackobjects.F_STATIC_MODEL,
cellprofiler.modules.trackobjects.F_STATE,
cellprofiler.modules.trackobjects.F_Y,
),
cellprofiler.modules.trackobjects.kalman_feature(
cellprofiler.modules.trackobjects.F_VELOCITY_MODEL,
cellprofiler.modules.trackobjects.F_STATE,
cellprofiler.modules.trackobjects.F_X,
),
cellprofiler.modules.trackobjects.kalman_feature(
cellprofiler.modules.trackobjects.F_VELOCITY_MODEL,
cellprofiler.modules.trackobjects.F_STATE,
cellprofiler.modules.trackobjects.F_Y,
),
cellprofiler.modules.trackobjects.kalman_feature(
cellprofiler.modules.trackobjects.F_VELOCITY_MODEL,
cellprofiler.modules.trackobjects.F_STATE,
cellprofiler.modules.trackobjects.F_VX,
),
cellprofiler.modules.trackobjects.kalman_feature(
cellprofiler.modules.trackobjects.F_VELOCITY_MODEL,
cellprofiler.modules.trackobjects.F_STATE,
cellprofiler.modules.trackobjects.F_VY,
),
cellprofiler.modules.trackobjects.kalman_feature(
cellprofiler.modules.trackobjects.F_STATIC_MODEL,
cellprofiler.modules.trackobjects.F_NOISE,
cellprofiler.modules.trackobjects.F_X,
),
cellprofiler.modules.trackobjects.kalman_feature(
cellprofiler.modules.trackobjects.F_STATIC_MODEL,
cellprofiler.modules.trackobjects.F_NOISE,
cellprofiler.modules.trackobjects.F_Y,
),
cellprofiler.modules.trackobjects.kalman_feature(
cellprofiler.modules.trackobjects.F_VELOCITY_MODEL,
cellprofiler.modules.trackobjects.F_NOISE,
cellprofiler.modules.trackobjects.F_X,
),
cellprofiler.modules.trackobjects.kalman_feature(
cellprofiler.modules.trackobjects.F_VELOCITY_MODEL,
cellprofiler.modules.trackobjects.F_NOISE,
cellprofiler.modules.trackobjects.F_Y,
),
cellprofiler.modules.trackobjects.kalman_feature(
cellprofiler.modules.trackobjects.F_VELOCITY_MODEL,
cellprofiler.modules.trackobjects.F_NOISE,
cellprofiler.modules.trackobjects.F_VX,
),
cellprofiler.modules.trackobjects.kalman_feature(
cellprofiler.modules.trackobjects.F_VELOCITY_MODEL,
cellprofiler.modules.trackobjects.F_NOISE,
cellprofiler.modules.trackobjects.F_VY,
),
cellprofiler.modules.trackobjects.kalman_feature(
cellprofiler.modules.trackobjects.F_STATIC_MODEL,
cellprofiler.modules.trackobjects.F_COV,
cellprofiler.modules.trackobjects.F_X,
cellprofiler.modules.trackobjects.F_X,
),
cellprofiler.modules.trackobjects.kalman_feature(
cellprofiler.modules.trackobjects.F_STATIC_MODEL,
cellprofiler.modules.trackobjects.F_COV,
cellprofiler.modules.trackobjects.F_X,
cellprofiler.modules.trackobjects.F_Y,
),
cellprofiler.modules.trackobjects.kalman_feature(
cellprofiler.modules.trackobjects.F_STATIC_MODEL,
cellprofiler.modules.trackobjects.F_COV,
cellprofiler.modules.trackobjects.F_Y,
cellprofiler.modules.trackobjects.F_X,
),
cellprofiler.modules.trackobjects.kalman_feature(
cellprofiler.modules.trackobjects.F_STATIC_MODEL,
cellprofiler.modules.trackobjects.F_COV,
cellprofiler.modules.trackobjects.F_X,
cellprofiler.modules.trackobjects.F_Y,
),
cellprofiler.modules.trackobjects.kalman_feature(
cellprofiler.modules.trackobjects.F_VELOCITY_MODEL,
cellprofiler.modules.trackobjects.F_COV,
cellprofiler.modules.trackobjects.F_X,
cellprofiler.modules.trackobjects.F_X,
),
cellprofiler.modules.trackobjects.kalman_feature(
cellprofiler.modules.trackobjects.F_VELOCITY_MODEL,
cellprofiler.modules.trackobjects.F_COV,
cellprofiler.modules.trackobjects.F_X,
cellprofiler.modules.trackobjects.F_Y,
),
cellprofiler.modules.trackobjects.kalman_feature(
cellprofiler.modules.trackobjects.F_VELOCITY_MODEL,
cellprofiler.modules.trackobjects.F_COV,
cellprofiler.modules.trackobjects.F_X,
cellprofiler.modules.trackobjects.F_VX,
),
cellprofiler.modules.trackobjects.kalman_feature(
cellprofiler.modules.trackobjects.F_VELOCITY_MODEL,
cellprofiler.modules.trackobjects.F_COV,
cellprofiler.modules.trackobjects.F_X,
cellprofiler.modules.trackobjects.F_VY,
),
cellprofiler.modules.trackobjects.kalman_feature(
cellprofiler.modules.trackobjects.F_VELOCITY_MODEL,
cellprofiler.modules.trackobjects.F_COV,
cellprofiler.modules.trackobjects.F_Y,
cellprofiler.modules.trackobjects.F_X,
),
cellprofiler.modules.trackobjects.kalman_feature(
cellprofiler.modules.trackobjects.F_VELOCITY_MODEL,
cellprofiler.modules.trackobjects.F_COV,
cellprofiler.modules.trackobjects.F_Y,
cellprofiler.modules.trackobjects.F_Y,
),
cellprofiler.modules.trackobjects.kalman_feature(
cellprofiler.modules.trackobjects.F_VELOCITY_MODEL,
cellprofiler.modules.trackobjects.F_COV,
cellprofiler.modules.trackobjects.F_Y,
cellprofiler.modules.trackobjects.F_VX,
),
cellprofiler.modules.trackobjects.kalman_feature(
cellprofiler.modules.trackobjects.F_VELOCITY_MODEL,
cellprofiler.modules.trackobjects.F_COV,
cellprofiler.modules.trackobjects.F_Y,
cellprofiler.modules.trackobjects.F_VY,
),
cellprofiler.modules.trackobjects.kalman_feature(
cellprofiler.modules.trackobjects.F_VELOCITY_MODEL,
cellprofiler.modules.trackobjects.F_COV,
cellprofiler.modules.trackobjects.F_VX,
cellprofiler.modules.trackobjects.F_X,
),
cellprofiler.modules.trackobjects.kalman_feature(
cellprofiler.modules.trackobjects.F_VELOCITY_MODEL,
cellprofiler.modules.trackobjects.F_COV,
cellprofiler.modules.trackobjects.F_VX,
cellprofiler.modules.trackobjects.F_Y,
),
cellprofiler.modules.trackobjects.kalman_feature(
cellprofiler.modules.trackobjects.F_VELOCITY_MODEL,
cellprofiler.modules.trackobjects.F_COV,
cellprofiler.modules.trackobjects.F_VX,
cellprofiler.modules.trackobjects.F_VX,
),
cellprofiler.modules.trackobjects.kalman_feature(
cellprofiler.modules.trackobjects.F_VELOCITY_MODEL,
cellprofiler.modules.trackobjects.F_COV,
cellprofiler.modules.trackobjects.F_VX,
cellprofiler.modules.trackobjects.F_VY,
),
cellprofiler.modules.trackobjects.kalman_feature(
cellprofiler.modules.trackobjects.F_VELOCITY_MODEL,
cellprofiler.modules.trackobjects.F_COV,
cellprofiler.modules.trackobjects.F_VY,
cellprofiler.modules.trackobjects.F_X,
),
cellprofiler.modules.trackobjects.kalman_feature(
cellprofiler.modules.trackobjects.F_VELOCITY_MODEL,
cellprofiler.modules.trackobjects.F_COV,
cellprofiler.modules.trackobjects.F_VY,
cellprofiler.modules.trackobjects.F_Y,
),
cellprofiler.modules.trackobjects.kalman_feature(
cellprofiler.modules.trackobjects.F_VELOCITY_MODEL,
cellprofiler.modules.trackobjects.F_COV,
cellprofiler.modules.trackobjects.F_VY,
cellprofiler.modules.trackobjects.F_VX,
),
cellprofiler.modules.trackobjects.kalman_feature(
cellprofiler.modules.trackobjects.F_VELOCITY_MODEL,
cellprofiler.modules.trackobjects.F_COV,
cellprofiler.modules.trackobjects.F_VY,
cellprofiler.modules.trackobjects.F_VY,
),
]
for object_name, features in (
(
OBJECT_NAME,
cellprofiler.modules.trackobjects.F_ALL
+ kalman_features
+ other_features,
),
("Image", cellprofiler.modules.trackobjects.F_IMAGE_ALL,),
):
for feature in features:
if object_name == OBJECT_NAME:
name = "_".join(
(cellprofiler.modules.trackobjects.F_PREFIX, feature)
)
else:
name = "_".join(
(
cellprofiler.modules.trackobjects.F_PREFIX,
feature,
OBJECT_NAME,
)
)
index = [column[1] for column in columns].index(name)
assert index != -1
column = columns[index]
assert column[0] == object_name
if wants or feature in second_phase:
assert len(column) == 4
assert MCA_AVAILABLE_POST_GROUP in column[3]
assert column[3][MCA_AVAILABLE_POST_GROUP]
else:
assert (
(len(column) == 3)
or (MCA_AVAILABLE_POST_GROUP not in column[3])
or (not column[3][MCA_AVAILABLE_POST_GROUP])
)
def test_measurements():
"""Test the different measurement pieces"""
module = cellprofiler.modules.trackobjects.TrackObjects()
module.object_name.value = OBJECT_NAME
module.image_name.value = "image"
module.pixel_radius.value = 10
categories = module.get_categories(None, "Foo")
assert len(categories) == 0
categories = module.get_categories(None, OBJECT_NAME)
assert len(categories) == 1
assert categories[0] == cellprofiler.modules.trackobjects.F_PREFIX
features = module.get_measurements(None, OBJECT_NAME, "Foo")
assert len(features) == 0
features = module.get_measurements(
None, OBJECT_NAME, cellprofiler.modules.trackobjects.F_PREFIX
)
assert len(features) == len(cellprofiler.modules.trackobjects.F_ALL)
assert all(
[feature in cellprofiler.modules.trackobjects.F_ALL for feature in features]
)
scales = module.get_measurement_scales(
None, OBJECT_NAME, cellprofiler.modules.trackobjects.F_PREFIX, "Foo", "image"
)
assert len(scales) == 0
for feature in cellprofiler.modules.trackobjects.F_ALL:
scales = module.get_measurement_scales(
None,
OBJECT_NAME,
cellprofiler.modules.trackobjects.F_PREFIX,
feature,
"image",
)
assert len(scales) == 1
assert int(scales[0]) == 10
def make_lap2_workspace(objs, nimages, group_numbers=None, group_indexes=None):
"""Make a workspace to test the second half of LAP
objs - a N x 7 array of "objects" composed of the
following pieces per object
objs[0] - image set # for object
objs[1] - label for object
objs[2] - parent image #
objs[3] - parent object #
objs[4] - x coordinate for object
objs[5] - y coordinate for object
objs[6] - area for object
nimages - # of image sets
group_numbers - group numbers for each image set, defaults to all 1
group_indexes - group indexes for each image set, defaults to range
"""
module = cellprofiler.modules.trackobjects.TrackObjects()
module.set_module_num(1)
module.object_name.value = OBJECT_NAME
module.tracking_method.value = "LAP"
module.wants_second_phase.value = True
module.wants_lifetime_filtering.value = False
module.wants_minimum_lifetime.value = False
module.min_lifetime.value = 1
module.wants_maximum_lifetime.value = False
module.max_lifetime.value = 100
module.pixel_radius.value = 50
pipeline = Pipeline()
def callback(caller, event):
assert not isinstance(event, RunException)
pipeline.add_listener(callback)
pipeline.add_module(module)
m = cellprofiler_core.measurement.Measurements()
if objs.shape[0] > 0:
nobjects = numpy.bincount(objs[:, 0].astype(int))
else:
nobjects = numpy.zeros(nimages, int)
for i in range(nimages):
m.next_image_set(i + 1)
for index, feature, dtype in (
(
1,
module.measurement_name(cellprofiler.modules.trackobjects.F_LABEL),
int,
),
(
2,
module.measurement_name(
cellprofiler.modules.trackobjects.F_PARENT_IMAGE_NUMBER
),
int,
),
(
3,
module.measurement_name(
cellprofiler.modules.trackobjects.F_PARENT_OBJECT_NUMBER
),
int,
),
(4, M_LOCATION_CENTER_X, float),
(5, M_LOCATION_CENTER_Y, float),
(
6,
module.measurement_name(cellprofiler.modules.trackobjects.F_AREA),
float,
),
):
values = objs[objs[:, 0] == i, index].astype(dtype)
m.add_measurement(OBJECT_NAME, feature, values, i + 1)
m.add_measurement("Image", "ImageNumber", i + 1)
m.add_measurement(
"Image",
GROUP_NUMBER,
1 if group_numbers is None else group_numbers[i],
image_set_number=i + 1,
)
m.add_measurement(
"Image",
GROUP_INDEX,
i if group_indexes is None else group_indexes[i],
image_set_number=i + 1,
)
#
# Add blanks of the right sizes for measurements that are recalculated
#
m.add_measurement(
"Image",
"_".join((C_COUNT, OBJECT_NAME)),
nobjects[i],
image_set_number=i + 1,
)
for feature in (
cellprofiler.modules.trackobjects.F_DISTANCE_TRAVELED,
cellprofiler.modules.trackobjects.F_DISPLACEMENT,
cellprofiler.modules.trackobjects.F_INTEGRATED_DISTANCE,
cellprofiler.modules.trackobjects.F_TRAJECTORY_X,
cellprofiler.modules.trackobjects.F_TRAJECTORY_Y,
cellprofiler.modules.trackobjects.F_LINEARITY,
cellprofiler.modules.trackobjects.F_LIFETIME,
cellprofiler.modules.trackobjects.F_FINAL_AGE,
cellprofiler.modules.trackobjects.F_LINKING_DISTANCE,
cellprofiler.modules.trackobjects.F_LINK_TYPE,
cellprofiler.modules.trackobjects.F_MOVEMENT_MODEL,
cellprofiler.modules.trackobjects.F_STANDARD_DEVIATION,
):
dtype = (
int
if feature
in (
cellprofiler.modules.trackobjects.F_PARENT_OBJECT_NUMBER,
cellprofiler.modules.trackobjects.F_PARENT_IMAGE_NUMBER,
cellprofiler.modules.trackobjects.F_LIFETIME,
cellprofiler.modules.trackobjects.F_LINK_TYPE,
cellprofiler.modules.trackobjects.F_MOVEMENT_MODEL,
)
else float
)
m.add_measurement(
OBJECT_NAME,
module.measurement_name(feature),
numpy.NaN * numpy.ones(nobjects[i], dtype)
if feature == cellprofiler.modules.trackobjects.F_FINAL_AGE
else numpy.zeros(nobjects[i], dtype),
image_set_number=i + 1,
)
for feature in (
cellprofiler.modules.trackobjects.F_SPLIT_COUNT,
cellprofiler.modules.trackobjects.F_MERGE_COUNT,
):
m.add_measurement(
"Image",
module.image_measurement_name(feature),
0,
image_set_number=i + 1,
)
#
# Figure out how many new and lost objects per image set
#
label_sets = [set() for i in range(nimages)]
for row in objs:
label_sets[row[0]].add(row[1])
if group_numbers is None:
group_numbers = numpy.ones(nimages, int)
if group_indexes is None:
group_indexes = numpy.arange(nimages) + 1
#
# New objects are ones without matching labels in the previous set
#
for i in range(0, nimages):
if group_indexes[i] == 1:
new_objects = len(label_sets[i])
lost_objects = 0
else:
new_objects = sum(
[1 for label in label_sets[i] if label not in label_sets[i - 1]]
)
lost_objects = sum(
[1 for label in label_sets[i - 1] if label not in label_sets[i]]
)
m.add_measurement(
"Image",
module.image_measurement_name(
cellprofiler.modules.trackobjects.F_NEW_OBJECT_COUNT
),
new_objects,
image_set_number=i + 1,
)
m.add_measurement(
"Image",
module.image_measurement_name(
cellprofiler.modules.trackobjects.F_LOST_OBJECT_COUNT
),
lost_objects,
image_set_number=i + 1,
)
m.image_set_number = nimages
image_set_list = ImageSetList()
for i in range(nimages):
image_set = image_set_list.get_image_set(i)
workspace = Workspace(pipeline, module, image_set, ObjectSet(), m, image_set_list,)
return workspace, module
def check_measurements(workspace, d):
"""Check measurements against expected values
workspace - workspace that was run
d - dictionary of feature name and list of expected measurement values
"""
m = workspace.measurements
assert isinstance(m,cellprofiler_core.measurement.Measurements)
module = workspace.module
assert isinstance(module, cellprofiler.modules.trackobjects.TrackObjects)
for feature, expected in list(d.items()):
if numpy.isscalar(expected[0]):
mname = module.image_measurement_name(feature)
values = m.get_all_measurements("Image", mname)
assert len(expected) == len(values), (
"Expected # image sets (%d) != actual (%d) for %s"
% (len(expected), len(values), feature)
)
assert all([v == e for v, e in zip(values, expected)]), (
"Values don't match for " + feature
)
else:
mname = module.measurement_name(feature)
values = m.get_all_measurements(OBJECT_NAME, mname)
assert len(expected) == len(values), (
"Expected # image sets (%d) != actual (%d) for %s"
% (len(expected), len(values), feature)
)
for i, (e, v) in enumerate(zip(expected, values)):
assert len(e) == len(v), (
"Expected # of objects (%d) != actual (%d) for %s:%d"
% (len(e), len(v), feature, i)
)
numpy.testing.assert_almost_equal(v, e)
def check_relationships(
m,
expected_parent_image_numbers,
expected_parent_object_numbers,
expected_child_image_numbers,
expected_child_object_numbers,
):
"""Check the relationship measurements against expected"""
expected_parent_image_numbers = numpy.atleast_1d(expected_parent_image_numbers)
expected_child_image_numbers = numpy.atleast_1d(expected_child_image_numbers)
expected_parent_object_numbers = numpy.atleast_1d(expected_parent_object_numbers)
expected_child_object_numbers = numpy.atleast_1d(expected_child_object_numbers)
assert isinstance(m,cellprofiler_core.measurement.Measurements)
r = m.get_relationships(
1, cellprofiler.modules.trackobjects.R_PARENT, OBJECT_NAME, OBJECT_NAME
)
actual_parent_image_numbers = r[R_FIRST_IMAGE_NUMBER]
actual_parent_object_numbers = r[R_FIRST_OBJECT_NUMBER]
actual_child_image_numbers = r[R_SECOND_IMAGE_NUMBER]
actual_child_object_numbers = r[R_SECOND_OBJECT_NUMBER]
assert len(actual_parent_image_numbers) == len(expected_parent_image_numbers)
#
# Sort similarly
#
for i1, o1, i2, o2 in (
(
expected_parent_image_numbers,
expected_parent_object_numbers,
expected_child_image_numbers,
expected_child_object_numbers,
),
(
actual_parent_image_numbers,
actual_parent_object_numbers,
actual_child_image_numbers,
actual_child_object_numbers,
),
):
order = numpy.lexsort((i1, o1, i2, o2))
for x in (i1, o1, i2, o2):
x[:] = x[order]
for expected, actual in zip(
(
expected_parent_image_numbers,
expected_parent_object_numbers,
expected_child_image_numbers,
expected_child_object_numbers,
),
(
actual_parent_image_numbers,
actual_parent_object_numbers,
actual_child_image_numbers,
actual_child_object_numbers,
),
):
numpy.testing.assert_array_equal(expected, actual)
def test_lap_none():
"""Run the second part of LAP on one image of nothing"""
with MonkeyPatchedDelete():
workspace, module = make_lap2_workspace(numpy.zeros((0, 7)), 1)
assert isinstance(module, cellprofiler.modules.trackobjects.TrackObjects)
module.run_as_data_tool(workspace)
check_measurements(
workspace,
{
cellprofiler.modules.trackobjects.F_LABEL: [numpy.zeros(0, int)],
cellprofiler.modules.trackobjects.F_DISTANCE_TRAVELED: [numpy.zeros(0)],
cellprofiler.modules.trackobjects.F_DISPLACEMENT: [numpy.zeros(0)],
cellprofiler.modules.trackobjects.F_INTEGRATED_DISTANCE: [
numpy.zeros(0)
],
cellprofiler.modules.trackobjects.F_TRAJECTORY_X: [numpy.zeros(0)],
cellprofiler.modules.trackobjects.F_TRAJECTORY_Y: [numpy.zeros(0)],
cellprofiler.modules.trackobjects.F_NEW_OBJECT_COUNT: [0],
cellprofiler.modules.trackobjects.F_LOST_OBJECT_COUNT: [0],
cellprofiler.modules.trackobjects.F_MERGE_COUNT: [0],
cellprofiler.modules.trackobjects.F_SPLIT_COUNT: [0],
},
)
def test_lap_one():
"""Run the second part of LAP on one image of one object"""
with MonkeyPatchedDelete():
workspace, module = make_lap2_workspace(
numpy.array([[0, 1, 0, 0, 100, 100, 25]]), 1
)
assert isinstance(module, cellprofiler.modules.trackobjects.TrackObjects)
module.run_as_data_tool(workspace)
check_measurements(
workspace,
{
cellprofiler.modules.trackobjects.F_LABEL: [numpy.array([1])],
cellprofiler.modules.trackobjects.F_PARENT_IMAGE_NUMBER: [
numpy.array([0])
],
cellprofiler.modules.trackobjects.F_PARENT_OBJECT_NUMBER: [
numpy.array([0])
],
cellprofiler.modules.trackobjects.F_DISPLACEMENT: [numpy.zeros(1)],
cellprofiler.modules.trackobjects.F_INTEGRATED_DISTANCE: [
numpy.zeros(1)
],
cellprofiler.modules.trackobjects.F_TRAJECTORY_X: [numpy.zeros(1)],
cellprofiler.modules.trackobjects.F_TRAJECTORY_Y: [numpy.zeros(1)],
cellprofiler.modules.trackobjects.F_NEW_OBJECT_COUNT: [1],
cellprofiler.modules.trackobjects.F_LOST_OBJECT_COUNT: [0],
cellprofiler.modules.trackobjects.F_MERGE_COUNT: [0],
cellprofiler.modules.trackobjects.F_SPLIT_COUNT: [0],
},
)
def test_bridge_gap():
"""Bridge a gap of zero frames between two objects"""
with MonkeyPatchedDelete():
workspace, module = make_lap2_workspace(
numpy.array([[0, 1, 0, 0, 1, 2, 25], [2, 2, 0, 0, 101, 102, 25]]), 3
)
assert isinstance(module, cellprofiler.modules.trackobjects.TrackObjects)
#
# The cost of bridging the gap should be 141. We set the alternative
# score to 142 so that bridging wins.
#
module.gap_cost.value = 142
module.max_gap_score.value = 142
module.run_as_data_tool(workspace)
distance = numpy.array([numpy.sqrt(2 * 100 * 100)])
check_measurements(
workspace,
{
cellprofiler.modules.trackobjects.F_LABEL: [
numpy.array([1]),
numpy.zeros(0),
numpy.array([1]),
],
cellprofiler.modules.trackobjects.F_PARENT_IMAGE_NUMBER: [
numpy.array([0]),
numpy.zeros(0, int),
numpy.array([1]),
],
cellprofiler.modules.trackobjects.F_PARENT_OBJECT_NUMBER: [
numpy.array([0]),
numpy.zeros(0, int),
numpy.array([1]),
],
cellprofiler.modules.trackobjects.F_DISTANCE_TRAVELED: [
numpy.zeros(1),
numpy.zeros(0),
distance,
],
cellprofiler.modules.trackobjects.F_INTEGRATED_DISTANCE: [
numpy.zeros(1),
numpy.zeros(0),
distance,
],
cellprofiler.modules.trackobjects.F_TRAJECTORY_X: [
numpy.zeros(1),
numpy.zeros(0),
numpy.array([100]),
],
cellprofiler.modules.trackobjects.F_TRAJECTORY_Y: [
numpy.zeros(1),
numpy.zeros(0),
numpy.array([100]),
],
cellprofiler.modules.trackobjects.F_LINEARITY: [
numpy.array([numpy.nan]),
numpy.zeros(0),
numpy.array([1]),
],
cellprofiler.modules.trackobjects.F_LIFETIME: [
numpy.ones(1),
numpy.zeros(0),
numpy.array([2]),
],
cellprofiler.modules.trackobjects.F_FINAL_AGE: [
numpy.array([numpy.nan]),
numpy.zeros(0),
numpy.array([2]),
],
cellprofiler.modules.trackobjects.F_NEW_OBJECT_COUNT: [1, 0, 0],
cellprofiler.modules.trackobjects.F_LOST_OBJECT_COUNT: [0, 0, 0],
cellprofiler.modules.trackobjects.F_MERGE_COUNT: [0, 0, 0],
cellprofiler.modules.trackobjects.F_SPLIT_COUNT: [0, 0, 0],
},
)
check_relationships(workspace.measurements, [1], [1], [3], [1])
def test_maintain_gap():
"""Maintain object identity across a large gap"""
with MonkeyPatchedDelete():
workspace, module = make_lap2_workspace(
numpy.array([[0, 1, 0, 0, 1, 2, 25], [2, 2, 0, 0, 101, 102, 25]]), 3
)
assert isinstance(module, cellprofiler.modules.trackobjects.TrackObjects)
#
# The cost of creating the gap should be 140 and the cost of
# bridging the gap should be 141.
#
module.gap_cost.value = 140
module.max_gap_score.value = 142
module.run_as_data_tool(workspace)
check_measurements(
workspace,
{
cellprofiler.modules.trackobjects.F_LABEL: [
numpy.array([1]),
numpy.zeros(0),
numpy.array([2]),
],
cellprofiler.modules.trackobjects.F_PARENT_IMAGE_NUMBER: [
numpy.array([0]),
numpy.zeros(0),
numpy.array([0]),
],
cellprofiler.modules.trackobjects.F_PARENT_OBJECT_NUMBER: [
numpy.array([0]),
numpy.zeros(0),
numpy.array([0]),
],
cellprofiler.modules.trackobjects.F_NEW_OBJECT_COUNT: [1, 0, 1],
cellprofiler.modules.trackobjects.F_LOST_OBJECT_COUNT: [0, 1, 0],
cellprofiler.modules.trackobjects.F_MERGE_COUNT: [0, 0, 0],
cellprofiler.modules.trackobjects.F_SPLIT_COUNT: [0, 0, 0],
},
)
def test_filter_gap():
"""Filter a gap due to an unreasonable score"""
with MonkeyPatchedDelete():
workspace, module = make_lap2_workspace(
numpy.array([[0, 1, 0, 0, 1, 2, 25], [2, 2, 0, 0, 101, 102, 25]]), 3
)
assert isinstance(module, cellprofiler.modules.trackobjects.TrackObjects)
#
# The cost of creating the gap should be 142 and the cost of
# bridging the gap should be 141. However, the gap should be filtered
# by the max score
#
module.gap_cost.value = 142
module.max_gap_score.value = 140
module.run_as_data_tool(workspace)
check_measurements(
workspace,
{
cellprofiler.modules.trackobjects.F_LABEL: [
numpy.array([1]),
numpy.zeros(0),
numpy.array([2]),
],
cellprofiler.modules.trackobjects.F_PARENT_IMAGE_NUMBER: [
numpy.array([0]),
numpy.zeros(0),
numpy.array([0]),
],
cellprofiler.modules.trackobjects.F_PARENT_OBJECT_NUMBER: [
numpy.array([0]),
numpy.zeros(0),
numpy.array([0]),
],
},
)
def test_split():
"""Track an object splitting"""
workspace, module = make_lap2_workspace(
numpy.array(
[
[0, 1, 0, 0, 100, 100, 50],
[1, 1, 1, 1, 110, 110, 25],
[1, 2, 0, 0, 90, 90, 25],
[2, 1, 2, 1, 113, 114, 25],
[2, 2, 2, 2, 86, 87, 25],
]
),
3,
)
assert isinstance(module, cellprofiler.modules.trackobjects.TrackObjects)
#
# The split score should be 20*sqrt(2) more than the null so a split
# alternative cost of 15 is too much and 14 too little. Values
# doulbed to mat
#
module.split_cost.value = 30
module.max_split_score.value = 30
module.run_as_data_tool(workspace)
d200 = numpy.sqrt(200)
tot = numpy.sqrt(13 ** 2 + 14 ** 2)
lin = tot / (d200 + 5)
check_measurements(
workspace,
{
cellprofiler.modules.trackobjects.F_LABEL: [
numpy.array([1]),
numpy.array([1, 1]),
numpy.array([1, 1]),
],
cellprofiler.modules.trackobjects.F_PARENT_IMAGE_NUMBER: [
numpy.array([0]),
numpy.array([1, 1]),
numpy.array([2, 2]),
],
cellprofiler.modules.trackobjects.F_PARENT_OBJECT_NUMBER: [
numpy.array([0]),
numpy.array([1, 1]),
numpy.array([1, 2]),
],
cellprofiler.modules.trackobjects.F_DISTANCE_TRAVELED: [
numpy.zeros(1),
numpy.ones(2) * d200,
numpy.array([5, 5]),
],
cellprofiler.modules.trackobjects.F_DISPLACEMENT: [
numpy.zeros(1),
numpy.ones(2) * d200,
numpy.array([tot, tot]),
],
cellprofiler.modules.trackobjects.F_INTEGRATED_DISTANCE: [
numpy.zeros(1),
numpy.ones(2) * d200,
numpy.ones(2) * d200 + 5,
],
cellprofiler.modules.trackobjects.F_TRAJECTORY_X: [
numpy.zeros(1),
numpy.array([10, -10]),
numpy.array([3, -4]),
],
cellprofiler.modules.trackobjects.F_TRAJECTORY_Y: [
numpy.zeros(1),
numpy.array([10, -10]),
numpy.array([4, -3]),
],
cellprofiler.modules.trackobjects.F_LINEARITY: [
numpy.array([numpy.nan]),
numpy.array([1, 1]),
numpy.array([lin, lin]),
],
cellprofiler.modules.trackobjects.F_LIFETIME: [
numpy.ones(1),
numpy.array([2, 2]),
numpy.array([3, 3]),
],
cellprofiler.modules.trackobjects.F_FINAL_AGE: [
numpy.array([numpy.nan]),
numpy.array([numpy.nan, numpy.nan]),
numpy.array([3, 3]),
],
cellprofiler.modules.trackobjects.F_NEW_OBJECT_COUNT: [1, 0, 0],
cellprofiler.modules.trackobjects.F_LOST_OBJECT_COUNT: [0, 0, 0],
cellprofiler.modules.trackobjects.F_MERGE_COUNT: [0, 0, 0],
cellprofiler.modules.trackobjects.F_SPLIT_COUNT: [0, 1, 0],
},
)
def test_dont_split():
"""Track an object splitting"""
workspace, module = make_lap2_workspace(
numpy.array(
[
[0, 1, 0, 0, 100, 100, 50],
[1, 1, 1, 1, 110, 110, 25],
[1, 2, 0, 0, 90, 90, 25],
[2, 1, 2, 1, 110, 110, 25],
[2, 2, 2, 2, 90, 90, 25],
]
),
3,
)
assert isinstance(module, cellprofiler.modules.trackobjects.TrackObjects)
module.split_cost.value = 28
module.max_split_score.value = 30
module.run_as_data_tool(workspace)
check_measurements(
workspace,
{
cellprofiler.modules.trackobjects.F_LABEL: [
numpy.array([1]),
numpy.array([1, 2]),
numpy.array([1, 2]),
],
cellprofiler.modules.trackobjects.F_PARENT_IMAGE_NUMBER: [
numpy.array([0]),
numpy.array([1, 0]),
numpy.array([2, 2]),
],
cellprofiler.modules.trackobjects.F_PARENT_OBJECT_NUMBER: [
numpy.array([0]),
numpy.array([1, 0]),
numpy.array([1, 2]),
],
cellprofiler.modules.trackobjects.F_LIFETIME: [
numpy.ones(1),
numpy.array([2, 1]),
numpy.array([3, 2]),
],
cellprofiler.modules.trackobjects.F_FINAL_AGE: [
numpy.array([numpy.nan]),
numpy.array([numpy.nan, numpy.nan]),
numpy.array([3, 2]),
],
cellprofiler.modules.trackobjects.F_NEW_OBJECT_COUNT: [1, 1, 0],
cellprofiler.modules.trackobjects.F_LOST_OBJECT_COUNT: [0, 0, 0],
cellprofiler.modules.trackobjects.F_MERGE_COUNT: [0, 0, 0],
cellprofiler.modules.trackobjects.F_SPLIT_COUNT: [0, 0, 0],
},
)
def test_split_filter():
"""Prevent a split by setting the filter too low"""
workspace, module = make_lap2_workspace(
numpy.array(
[
[0, 1, 0, 0, 100, 100, 50],
[1, 1, 1, 1, 110, 110, 25],
[1, 2, 0, 0, 90, 90, 25],
[2, 1, 2, 1, 110, 110, 25],
[2, 2, 2, 2, 90, 90, 25],
]
),
3,
)
assert isinstance(module, cellprofiler.modules.trackobjects.TrackObjects)
module.split_cost.value = 30
module.max_split_score.value = 28
module.run_as_data_tool(workspace)
check_measurements(
workspace,
{
cellprofiler.modules.trackobjects.F_LABEL: [
numpy.array([1]),
numpy.array([1, 2]),
numpy.array([1, 2]),
],
cellprofiler.modules.trackobjects.F_PARENT_IMAGE_NUMBER: [
numpy.array([0]),
numpy.array([1, 0]),
numpy.array([2, 2]),
],
cellprofiler.modules.trackobjects.F_PARENT_OBJECT_NUMBER: [
numpy.array([0]),
numpy.array([1, 0]),
numpy.array([1, 2]),
],
cellprofiler.modules.trackobjects.F_LIFETIME: [
numpy.array([1]),
numpy.array([2, 1]),
numpy.array([3, 2]),
],
cellprofiler.modules.trackobjects.F_FINAL_AGE: [
numpy.array([numpy.nan]),
numpy.array([numpy.nan, numpy.nan]),
numpy.array([3, 2]),
],
cellprofiler.modules.trackobjects.F_NEW_OBJECT_COUNT: [1, 1, 0],
cellprofiler.modules.trackobjects.F_LOST_OBJECT_COUNT: [0, 0, 0],
cellprofiler.modules.trackobjects.F_MERGE_COUNT: [0, 0, 0],
cellprofiler.modules.trackobjects.F_SPLIT_COUNT: [0, 0, 0],
},
)
def test_merge():
"""Merge two objects into one"""
workspace, module = make_lap2_workspace(
numpy.array(
[
[0, 1, 0, 0, 110, 110, 25],
[0, 2, 0, 0, 90, 90, 25],
[1, 1, 1, 1, 110, 110, 25],
[1, 2, 1, 2, 90, 90, 25],
[2, 1, 2, 1, 100, 100, 50],
]
),
3,
)
assert isinstance(module, cellprofiler.modules.trackobjects.TrackObjects)
module.merge_cost.value = 30
module.max_merge_score.value = 30
module.run_as_data_tool(workspace)
check_measurements(
workspace,
{
cellprofiler.modules.trackobjects.F_LABEL: [
numpy.array([1, 1]),
numpy.array([1, 1]),
numpy.array([1]),
],
cellprofiler.modules.trackobjects.F_PARENT_IMAGE_NUMBER: [
numpy.array([0, 0]),
numpy.array([1, 1]),
numpy.array([2]),
],
cellprofiler.modules.trackobjects.F_PARENT_OBJECT_NUMBER: [
numpy.array([0, 0]),
numpy.array([1, 2]),
numpy.array([1]),
],
cellprofiler.modules.trackobjects.F_LIFETIME: [
numpy.array([1, 1]),
numpy.array([2, 2]),
numpy.array([3]),
],
cellprofiler.modules.trackobjects.F_FINAL_AGE: [
numpy.array([numpy.nan, numpy.nan]),
numpy.array([numpy.nan, numpy.nan]),
numpy.array([3]),
],
cellprofiler.modules.trackobjects.F_NEW_OBJECT_COUNT: [2, 0, 0],
cellprofiler.modules.trackobjects.F_LOST_OBJECT_COUNT: [0, 0, 0],
cellprofiler.modules.trackobjects.F_MERGE_COUNT: [0, 0, 1],
cellprofiler.modules.trackobjects.F_SPLIT_COUNT: [0, 0, 0],
},
)
def test_dont_merge():
"""Don't merge because of low alternative merge cost"""
workspace, module = make_lap2_workspace(
numpy.array(
[
[0, 1, 0, 0, 110, 110, 25],
[0, 2, 0, 0, 90, 90, 25],
[1, 1, 1, 1, 110, 110, 25],
[1, 2, 1, 2, 90, 90, 25],
[2, 1, 2, 1, 100, 100, 50],
]
),
3,
)
assert isinstance(module, cellprofiler.modules.trackobjects.TrackObjects)
#
# The cost of the merge is 2x 10x sqrt(2) which is between 28 and 29
#
module.merge_cost.value = 28
module.max_merge_score.value = 30
module.run_as_data_tool(workspace)
labels = workspace.measurements.get_all_measurements(
OBJECT_NAME, module.measurement_name(cellprofiler.modules.trackobjects.F_LABEL)
)
assert len(labels) == 3
assert len(labels[0]) == 2
assert labels[0][0] == 1
assert labels[0][1] == 2
assert len(labels[1]) == 2
assert labels[1][0] == 1
assert labels[1][1] == 2
assert len(labels[2]) == 1
assert labels[2][0] == 1
def test_filter_merge():
"""Don't merge because of low alternative merge cost"""
workspace, module = make_lap2_workspace(
numpy.array(
[
[0, 1, 0, 0, 110, 110, 25],
[0, 2, 0, 0, 90, 90, 25],
[1, 1, 1, 1, 110, 110, 25],
[1, 2, 1, 2, 90, 90, 25],
[2, 1, 2, 1, 100, 100, 50],
]
),
3,
)
assert isinstance(module, cellprofiler.modules.trackobjects.TrackObjects)
#
# The cost of the merge is 2x 10x sqrt(2) which is between 28 and 29
#
module.merge_cost.value = 30
module.max_merge_score.value = 28
module.run_as_data_tool(workspace)
labels = workspace.measurements.get_all_measurements(
OBJECT_NAME, module.measurement_name(cellprofiler.modules.trackobjects.F_LABEL)
)
assert len(labels) == 3
assert len(labels[0]) == 2
assert labels[0][0] == 1
assert labels[0][1] == 2
assert len(labels[1]) == 2
assert labels[1][0] == 1
assert labels[1][1] == 2
assert len(labels[2]) == 1
assert labels[2][0] == 1
def test_img_1111():
"""Regression test of img-1111"""
data = numpy.array(
[
[9, 1, 0, 0, 225, 20, 50],
[9, 2, 0, 0, 116, 223, 31],
[25, 3, 0, 0, 43, 291, 26],
[28, 4, 0, 0, 410, 436, 24],
[29, 5, 0, 0, 293, 166, 23],
[29, 4, 29, 1, 409, 436, 24],
[30, 5, 30, 1, 293, 167, 30],
[32, 6, 0, 0, 293, 164, 69],
[33, 6, 33, 1, 292, 166, 37],
[35, 7, 0, 0, 290, 165, 63],
[36, 7, 36, 1, 290, 166, 38],
[39, 8, 0, 0, 287, 163, 28],
[40, 8, 40, 1, 287, 163, 21],
[44, 9, 0, 0, 54, 288, 20],
[77, 10, 0, 0, 514, 211, 49],
[78, 10, 78, 1, 514, 210, 42],
[79, 10, 79, 1, 514, 209, 73],
[80, 10, 80, 1, 514, 208, 49],
[81, 10, 81, 1, 515, 209, 38],
[98, 11, 0, 0, 650, 54, 24],
[102, 12, 0, 0, 586, 213, 46],
[104, 13, 0, 0, 586, 213, 27],
[106, 14, 0, 0, 587, 212, 54],
[107, 14, 107, 1, 587, 212, 40],
[113, 15, 0, 0, 17, 145, 51],
[116, 16, 0, 0, 45, 153, 21],
[117, 17, 0, 0, 53, 148, 44],
[117, 18, 0, 0, 90, 278, 87],
[119, 19, 0, 0, 295, 184, 75],
[120, 19, 120, 1, 295, 184, 79],
[121, 19, 121, 1, 295, 182, 75],
[123, 20, 0, 0, 636, 7, 20],
[124, 20, 124, 1, 635, 7, 45],
[124, 21, 0, 0, 133, 171, 22],
[124, 22, 0, 0, 417, 365, 65],
[126, 23, 0, 0, 125, 182, 77],
[126, 24, 0, 0, 358, 306, 48],
[126, 25, 0, 0, 413, 366, 60],
[127, 26, 0, 0, 141, 173, 71],
[127, 25, 127, 3, 413, 366, 35],
[128, 27, 0, 0, 131, 192, 76],
[129, 28, 0, 0, 156, 182, 74],
[130, 29, 0, 0, 147, 194, 56],
[131, 30, 0, 0, 152, 185, 56],
[132, 30, 132, 1, 154, 188, 78],
[133, 31, 0, 0, 142, 186, 64],
[133, 32, 0, 0, 91, 283, 23],
[134, 33, 0, 0, 150, 195, 80],
]
)
data = data[:8, :]
workspace, module = make_lap2_workspace(data, numpy.max(data[:, 0]) + 1)
module.run_as_data_tool(workspace)
def test_multi_group():
"""Run several tests in different groups"""
workspace, module = make_lap2_workspace(
numpy.array(
[
[0, 1, 0, 0, 1, 2, 25],
[2, 2, 0, 0, 101, 102, 25],
[3, 1, 0, 0, 100, 100, 50],
[4, 1, 4, 1, 110, 110, 25],
[4, 2, 0, 0, 90, 90, 25],
[5, 1, 5, 1, 113, 114, 25],
[5, 2, 5, 2, 86, 87, 25],
[6, 1, 0, 0, 110, 110, 25],
[6, 2, 0, 0, 90, 90, 25],
[7, 1, 7, 1, 110, 110, 25],
[7, 2, 7, 2, 90, 90, 25],
[8, 1, 8, 1, 104, 102, 50],
]
),
9,
group_numbers=[1, 1, 1, 2, 2, 2, 3, 3, 3],
group_indexes=[1, 2, 3, 1, 2, 3, 1, 2, 3],
)
assert isinstance(module, cellprofiler.modules.trackobjects.TrackObjects)
#
# The cost of bridging the gap should be 141. We set the alternative
# score to 142 so that bridging wins.
#
module.gap_cost.value = 142
module.max_gap_score.value = 142
module.split_cost.value = 30
module.max_split_score.value = 30
module.merge_cost.value = 30
module.max_merge_score.value = 30
module.run_as_data_tool(workspace)
distance = numpy.array([numpy.sqrt(2 * 100 * 100)])
d200 = numpy.sqrt(200)
tot = numpy.sqrt(13 ** 2 + 14 ** 2)
lin = tot / (d200 + 5)
check_measurements(
workspace,
{
cellprofiler.modules.trackobjects.F_LABEL: [
numpy.array([1]),
numpy.zeros(0),
numpy.array([1]),
numpy.array([1]),
numpy.array([1, 1]),
numpy.array([1, 1]),
numpy.array([1, 1]),
numpy.array([1, 1]),
numpy.array([1]),
],
cellprofiler.modules.trackobjects.F_PARENT_IMAGE_NUMBER: [
numpy.array([0]),
numpy.zeros(0),
numpy.array([1]),
numpy.array([0]),
numpy.array([4, 4]),
numpy.array([5, 5]),
numpy.array([0, 0]),
numpy.array([7, 7]),
numpy.array([8]),
],
cellprofiler.modules.trackobjects.F_PARENT_OBJECT_NUMBER: [
numpy.array([0]),
numpy.zeros(0),
numpy.array([1]),
numpy.array([0]),
numpy.array([1, 1]),
numpy.array([1, 2]),
numpy.array([0, 0]),
numpy.array([1, 2]),
numpy.array([1]),
],
cellprofiler.modules.trackobjects.F_DISPLACEMENT: [
numpy.zeros(1),
numpy.zeros(0),
distance,
numpy.zeros(1),
numpy.ones(2) * d200,
numpy.array([tot, tot]),
numpy.zeros(2),
numpy.zeros(2),
numpy.array([10]),
],
cellprofiler.modules.trackobjects.F_INTEGRATED_DISTANCE: [
numpy.zeros(1),
numpy.zeros(0),
distance,
numpy.zeros(1),
numpy.ones(2) * d200,
numpy.ones(2) * d200 + 5,
numpy.zeros(2),
numpy.zeros(2),
numpy.array([10]),
],
cellprofiler.modules.trackobjects.F_DISTANCE_TRAVELED: [
numpy.zeros(1),
numpy.zeros(0),
distance,
numpy.zeros(1),
numpy.ones(2) * d200,
numpy.array([5, 5]),
numpy.zeros(2),
numpy.zeros(2),
numpy.array([10]),
],
cellprofiler.modules.trackobjects.F_TRAJECTORY_X: [
numpy.zeros(1),
numpy.zeros(0),
numpy.array([100]),
numpy.zeros(1),
numpy.array([10, -10]),
numpy.array([3, -4]),
numpy.zeros(2),
numpy.zeros(2),
numpy.array([-6]),
],
cellprofiler.modules.trackobjects.F_TRAJECTORY_Y: [
numpy.zeros(1),
numpy.zeros(0),
numpy.array([100]),
numpy.zeros(1),
numpy.array([10, -10]),
numpy.array([4, -3]),
numpy.zeros(2),
numpy.zeros(2),
numpy.array([-8]),
],
cellprofiler.modules.trackobjects.F_LINEARITY: [
numpy.array([numpy.nan]),
numpy.zeros(0),
numpy.array([1]),
numpy.array([numpy.nan]),
numpy.array([1, 1]),
numpy.array([lin, lin]),
numpy.array([numpy.nan, numpy.nan]),
numpy.array([numpy.nan, numpy.nan]),
numpy.ones(1),
],
cellprofiler.modules.trackobjects.F_LIFETIME: [
numpy.ones(1),
numpy.zeros(0),
numpy.array([2]),
numpy.ones(1),
numpy.array([2, 2]),
numpy.array([3, 3]),
numpy.ones(2),
numpy.array([2, 2]),
numpy.array([3]),
],
cellprofiler.modules.trackobjects.F_FINAL_AGE: [
numpy.array([numpy.nan]),
numpy.zeros(0),
numpy.array([2]),
numpy.array([numpy.nan]),
numpy.array([numpy.nan, numpy.nan]),
numpy.array([3, 3]),
numpy.array([numpy.nan, numpy.nan]),
numpy.array([numpy.nan, numpy.nan]),
numpy.array([3]),
],
cellprofiler.modules.trackobjects.F_NEW_OBJECT_COUNT: [
1,
0,
0,
1,
0,
0,
2,
0,
0,
],
cellprofiler.modules.trackobjects.F_LOST_OBJECT_COUNT: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
cellprofiler.modules.trackobjects.F_MERGE_COUNT: [
0,
0,
0,
0,
0,
0,
0,
0,
1,
],
cellprofiler.modules.trackobjects.F_SPLIT_COUNT: [
0,
0,
0,
0,
1,
0,
0,
0,
0,
],
},
)
def test_filter_by_final_age():
"""Filter an object by the final age"""
workspace, module = make_lap2_workspace(
numpy.array(
[
[0, 1, 0, 0, 100, 100, 50],
[1, 1, 1, 1, 110, 110, 50],
[1, 2, 0, 0, 90, 90, 25],
[2, 1, 2, 1, 100, 100, 50],
]
),
3,
)
assert isinstance(module, cellprofiler.modules.trackobjects.TrackObjects)
#
# The split score should be between 14 and 15. Set the split
# alternative cost to 28 so that the split is inhibited.
#
module.split_cost.value = 28
module.max_split_score.value = 30
#
# The cost of the merge is 2x 10x sqrt(2) which is between 28 and 29
#
module.merge_cost.value = 28
module.max_merge_score.value = 30
module.wants_lifetime_filtering.value = True
module.wants_minimum_lifetime.value = True
module.min_lifetime.value = 1
module.wants_maximum_lifetime.value = False
module.max_lifetime.value = 100
module.run_as_data_tool(workspace)
check_measurements(
workspace,
{
cellprofiler.modules.trackobjects.F_LABEL: [
numpy.array([1]),
numpy.array([1, numpy.NaN]),
numpy.array([1]),
],
cellprofiler.modules.trackobjects.F_PARENT_IMAGE_NUMBER: [
numpy.array([0]),
numpy.array([1, 0]),
numpy.array([2]),
],
cellprofiler.modules.trackobjects.F_PARENT_OBJECT_NUMBER: [
numpy.array([0]),
numpy.array([1, 0]),
numpy.array([1]),
],
cellprofiler.modules.trackobjects.F_LIFETIME: [
numpy.array([1]),
numpy.array([2, 1]),
numpy.array([3]),
],
cellprofiler.modules.trackobjects.F_FINAL_AGE: [
numpy.array([numpy.nan]),
numpy.array([numpy.nan, 1]),
numpy.array([3]),
],
cellprofiler.modules.trackobjects.F_NEW_OBJECT_COUNT: [1, 1, 0],
cellprofiler.modules.trackobjects.F_LOST_OBJECT_COUNT: [0, 0, 1],
cellprofiler.modules.trackobjects.F_MERGE_COUNT: [0, 0, 0],
cellprofiler.modules.trackobjects.F_SPLIT_COUNT: [0, 0, 0],
},
)
def test_mitosis():
"""Track a mitosis"""
workspace, module = make_lap2_workspace(
numpy.array(
[
[0, 1, 0, 0, 103, 104, 50],
[1, 2, 0, 0, 110, 110, 25],
[1, 3, 0, 0, 90, 90, 25],
[2, 2, 2, 1, 113, 114, 25],
[2, 3, 2, 2, 86, 87, 25],
]
),
3,
)
assert isinstance(module, cellprofiler.modules.trackobjects.TrackObjects)
#
# The parent is off by np.sqrt(3*3+4*4) = 5, so an alternative of
# 4 loses and 6 wins
#
module.merge_cost.value = 1
module.gap_cost.value = 1
module.mitosis_cost.value = 6
module.mitosis_max_distance.value = 20
module.run_as_data_tool(workspace)
check_measurements(
workspace,
{
cellprofiler.modules.trackobjects.F_LABEL: [
numpy.array([1]),
numpy.array([1, 1]),
numpy.array([1, 1]),
],
cellprofiler.modules.trackobjects.F_PARENT_IMAGE_NUMBER: [
numpy.array([0]),
numpy.array([1, 1]),
numpy.array([2, 2]),
],
cellprofiler.modules.trackobjects.F_PARENT_OBJECT_NUMBER: [
numpy.array([0]),
numpy.array([1, 1]),
numpy.array([1, 2]),
],
cellprofiler.modules.trackobjects.F_LIFETIME: [
numpy.ones(1),
numpy.array([2, 2]),
numpy.array([3, 3]),
],
cellprofiler.modules.trackobjects.F_FINAL_AGE: [
numpy.array([numpy.nan]),
numpy.array([numpy.nan, numpy.nan]),
numpy.array([3, 3]),
],
cellprofiler.modules.trackobjects.F_LINK_TYPE: [
numpy.array([cellprofiler.modules.trackobjects.LT_NONE]),
numpy.array(
[
cellprofiler.modules.trackobjects.LT_MITOSIS,
cellprofiler.modules.trackobjects.LT_MITOSIS,
]
),
numpy.array(
[
cellprofiler.modules.trackobjects.LT_NONE,
cellprofiler.modules.trackobjects.LT_NONE,
]
),
],
cellprofiler.modules.trackobjects.F_MITOSIS_SCORE: [
numpy.array([numpy.nan]),
numpy.array([5, 5]),
numpy.array([numpy.nan, numpy.nan]),
],
cellprofiler.modules.trackobjects.F_NEW_OBJECT_COUNT: [1, 0, 0],
cellprofiler.modules.trackobjects.F_LOST_OBJECT_COUNT: [0, 0, 0],
cellprofiler.modules.trackobjects.F_MERGE_COUNT: [0, 0, 0],
cellprofiler.modules.trackobjects.F_SPLIT_COUNT: [0, 1, 0],
},
)
def test_no_mitosis():
"""Don't track a mitosis"""
workspace, module = make_lap2_workspace(
numpy.array(
[
[0, 1, 0, 0, 103, 104, 50],
[1, 2, 0, 0, 110, 110, 25],
[1, 3, 0, 0, 90, 90, 25],
[2, 2, 2, 1, 113, 114, 25],
[2, 3, 2, 2, 86, 87, 25],
]
),
3,
)
assert isinstance(module, cellprofiler.modules.trackobjects.TrackObjects)
#
# The parent is off by np.sqrt(3*3+4*4) = 5, so an alternative of
# 4 loses and 6 wins
#
module.merge_cost.value = 1
module.mitosis_cost.value = 4
module.mitosis_max_distance.value = 20
module.gap_cost.value = 1
module.run_as_data_tool(workspace)
check_measurements(
workspace,
{
cellprofiler.modules.trackobjects.F_LABEL: [
numpy.array([1]),
numpy.array([2, 3]),
numpy.array([2, 3]),
],
cellprofiler.modules.trackobjects.F_PARENT_IMAGE_NUMBER: [
numpy.array([0]),
numpy.array([0, 0]),
numpy.array([2, 2]),
],
cellprofiler.modules.trackobjects.F_PARENT_OBJECT_NUMBER: [
numpy.array([0]),
numpy.array([0, 0]),
numpy.array([1, 2]),
],
cellprofiler.modules.trackobjects.F_LIFETIME: [
numpy.ones(1),
numpy.array([1, 1]),
numpy.array([2, 2]),
],
cellprofiler.modules.trackobjects.F_FINAL_AGE: [
numpy.array([1]),
numpy.array([numpy.nan, numpy.nan]),
numpy.array([2, 2]),
],
cellprofiler.modules.trackobjects.F_NEW_OBJECT_COUNT: [1, 2, 0],
cellprofiler.modules.trackobjects.F_LOST_OBJECT_COUNT: [0, 1, 0],
cellprofiler.modules.trackobjects.F_MERGE_COUNT: [0, 0, 0],
cellprofiler.modules.trackobjects.F_SPLIT_COUNT: [0, 0, 0],
},
)
def test_mitosis_distance_filter():
"""Don't track a mitosis"""
workspace, module = make_lap2_workspace(
numpy.array(
[
[0, 1, 0, 0, 103, 104, 50],
[1, 2, 0, 0, 110, 110, 25],
[1, 3, 0, 0, 90, 90, 25],
[2, 2, 2, 1, 113, 114, 25],
[2, 3, 2, 2, 86, 87, 25],
]
),
3,
)
assert isinstance(module, cellprofiler.modules.trackobjects.TrackObjects)
#
# The parent is off by np.sqrt(3*3+4*4) = 5, so an alternative of
# 4 loses and 6 wins
#
module.merge_cost.value = 1
module.mitosis_cost.value = 6
module.mitosis_max_distance.value = 15
module.gap_cost.value = 1
module.run_as_data_tool(workspace)
check_measurements(
workspace,
{
cellprofiler.modules.trackobjects.F_LABEL: [
numpy.array([1]),
numpy.array([2, 3]),
numpy.array([2, 3]),
],
cellprofiler.modules.trackobjects.F_PARENT_IMAGE_NUMBER: [
numpy.array([0]),
numpy.array([0, 0]),
numpy.array([2, 2]),
],
cellprofiler.modules.trackobjects.F_PARENT_OBJECT_NUMBER: [
numpy.array([0]),
numpy.array([0, 0]),
numpy.array([1, 2]),
],
cellprofiler.modules.trackobjects.F_LIFETIME: [
numpy.ones(1),
numpy.array([1, 1]),
numpy.array([2, 2]),
],
cellprofiler.modules.trackobjects.F_FINAL_AGE: [
numpy.array([1]),
numpy.array([numpy.nan, numpy.nan]),
numpy.array([2, 2]),
],
cellprofiler.modules.trackobjects.F_NEW_OBJECT_COUNT: [1, 2, 0],
cellprofiler.modules.trackobjects.F_LOST_OBJECT_COUNT: [0, 1, 0],
cellprofiler.modules.trackobjects.F_MERGE_COUNT: [0, 0, 0],
cellprofiler.modules.trackobjects.F_SPLIT_COUNT: [0, 0, 0],
},
)
def test_alternate_child_mitoses():
# Test that LAP can pick the best of two possible child alternates
workspace, module = make_lap2_workspace(
numpy.array(
[
[0, 1, 0, 0, 103, 104, 50],
[1, 2, 0, 0, 110, 110, 25],
[1, 3, 0, 0, 91, 91, 25],
[1, 4, 0, 0, 90, 90, 25],
[2, 2, 2, 1, 113, 114, 25],
[2, 3, 2, 2, 86, 87, 25],
]
),
3,
)
assert isinstance(module, cellprofiler.modules.trackobjects.TrackObjects)
module.merge_cost.value = 1
module.gap_cost.value = 1
module.mitosis_cost.value = 6
module.mitosis_max_distance.value = 20
module.run_as_data_tool(workspace)
check_measurements(
workspace,
{
cellprofiler.modules.trackobjects.F_LABEL: [
numpy.array([1]),
numpy.array([1, 1, 2]),
numpy.array([1, 1]),
],
cellprofiler.modules.trackobjects.F_PARENT_IMAGE_NUMBER: [
numpy.array([0]),
numpy.array([1, 1, 0]),
numpy.array([2, 2]),
],
cellprofiler.modules.trackobjects.F_PARENT_OBJECT_NUMBER: [
numpy.array([0]),
numpy.array([1, 1, 0]),
numpy.array([1, 2]),
],
},
)
def test_alternate_parent_mitoses():
# Test that LAP can pick the best of two possible parent alternates
workspace, module = make_lap2_workspace(
numpy.array(
[
[0, 1, 0, 0, 100, 100, 50],
[0, 2, 0, 0, 103, 104, 50],
[1, 3, 0, 0, 110, 110, 25],
[1, 4, 0, 0, 90, 90, 25],
[2, 3, 2, 1, 113, 114, 25],
[2, 4, 2, 2, 86, 87, 25],
]
),
3,
)
assert isinstance(module, cellprofiler.modules.trackobjects.TrackObjects)
module.merge_cost.value = 1
module.gap_cost.value = 1
module.mitosis_cost.value = 6
module.mitosis_max_distance.value = 20
module.run_as_data_tool(workspace)
check_measurements(
workspace,
{
cellprofiler.modules.trackobjects.F_LABEL: [
numpy.array([1, 2]),
numpy.array([1, 1]),
numpy.array([1, 1]),
],
cellprofiler.modules.trackobjects.F_PARENT_IMAGE_NUMBER: [
numpy.array([0, 0]),
numpy.array([1, 1]),
numpy.array([2, 2]),
],
cellprofiler.modules.trackobjects.F_PARENT_OBJECT_NUMBER: [
numpy.array([0, 0]),
numpy.array([1, 1]),
numpy.array([1, 2]),
],
},
)
class MonkeyPatchedDelete(object):
"""Monkey patch np.delete inside of a scope
For regression test of issue #1571 - negative
indices in calls to numpy.delete
Usage:
with MonkeyPatchedDelete():
... do test ...
"""
def __init__(self, test=None):
__test = test
def __enter__(self):
self.old_delete = numpy.delete
numpy.delete = self.monkey_patched_delete
def __exit__(self, type, value, traceback):
numpy.delete = self.old_delete
def monkey_patched_delete(self, array, indices, axis):
# __test.assertTrue(numpy.all(indices >= 0))
return self.old_delete(array, indices, axis)
def test_save_image():
module = cellprofiler.modules.trackobjects.TrackObjects()
module.set_module_num(1)
module.object_name.value = OBJECT_NAME
module.pixel_radius.value = 50
module.wants_image.value = True
module.image_name.value = "outimage"
measurements = cellprofiler_core.measurement.Measurements()
measurements.add_image_measurement(GROUP_NUMBER, 1)
measurements.add_image_measurement(GROUP_INDEX, 1)
pipeline = Pipeline()
pipeline.add_module(module)
image_set_list = ImageSetList()
module.prepare_run(
Workspace(pipeline, module, None, None, measurements, image_set_list)
)
first = True
object_set = ObjectSet()
objects = Objects()
objects.segmented = numpy.zeros((640, 480), int)
object_set.add_objects(objects, OBJECT_NAME)
image_set = image_set_list.get_image_set(0)
workspace = Workspace(
pipeline, module, image_set, object_set, measurements, image_set_list
)
module.run(workspace)
image = workspace.image_set.get_image(module.image_name.value)
shape = image.pixel_data.shape
assert shape[0] == 640
assert shape[1] == 480
def test_get_no_gap_pair_scores():
for F, L, max_gap in (
(numpy.zeros((0, 3)), numpy.zeros((0, 3)), 1),
(numpy.ones((1, 3)), numpy.ones((1, 3)), 1),
(numpy.ones((2, 3)), numpy.ones((2, 3)), 1),
):
t = cellprofiler.modules.trackobjects.TrackObjects()
a, d = t.get_gap_pair_scores(F, L, max_gap)
assert tuple(a.shape) == (0, 2)
assert len(d) == 0
def test_get_gap_pair_scores():
L = numpy.array(
[
[0.0, 0.0, 1, 0, 0, 0, 1],
[1.0, 1.0, 5, 0, 0, 0, 1],
[3.0, 3.0, 8, 0, 0, 0, 1],
[2.0, 2.0, 9, 0, 0, 0, 1],
[0.0, 0.0, 9, 0, 0, 0, 1],
[0.0, 0.0, 9, 0, 0, 0, 1],
]
)
F = numpy.array(
[
[0.0, 0.0, 0, 0, 0, 0, 1],
[1.0, 0.0, 4, 0, 0, 0, 1],
[3.0, 0.0, 6, 0, 0, 0, 1],
[4.0, 0.0, 7, 0, 0, 0, 1],
[1.0, 0.0, 2, 0, 0, 0, 2],
[1.0, 0.0, 2, 0, 0, 0, 0.5],
]
)
expected = numpy.array([[0, 1], [0, 4], [0, 5], [1, 2], [1, 3]])
expected_d = numpy.sqrt(
numpy.sum((L[expected[:, 0], :2] - F[expected[:, 1], :2]) ** 2, 1)
)
expected_rho = numpy.array([1, 2, 2, 1, 1])
t = cellprofiler.modules.trackobjects.TrackObjects()
a, d = t.get_gap_pair_scores(F, L, 4)
order = numpy.lexsort((a[:, 1], a[:, 0]))
a, d = a[order], d[order]
numpy.testing.assert_array_equal(a, expected)
numpy.testing.assert_array_almost_equal(d, expected_d * expected_rho)
def test_neighbour_track_nothing():
"""Run TrackObjects on an empty labels matrix"""
columns = []
def fn(module, workspace, index, columns=columns):
if workspace is not None and index == 0:
columns += module.get_measurement_columns(workspace.pipeline)
module.tracking_method.value = "Follow Neighbors"
measurements = runTrackObjects(
(numpy.zeros((10, 10), int), numpy.zeros((10, 10), int)), fn
)
features = [
feature
for feature in measurements.get_feature_names(OBJECT_NAME)
if feature.startswith(cellprofiler.modules.trackobjects.F_PREFIX)
]
assert all(
[column[1] in features for column in columns if column[0] == OBJECT_NAME]
)
for feature in cellprofiler.modules.trackobjects.F_ALL:
name = "_".join((cellprofiler.modules.trackobjects.F_PREFIX, feature, "50"))
assert name in features
value = measurements.get_current_measurement(OBJECT_NAME, name)
assert len(value) == 0
features = [
feature
for feature in measurements.get_feature_names("Image")
if feature.startswith(cellprofiler.modules.trackobjects.F_PREFIX)
]
assert all([column[1] in features for column in columns if column[0] == "Image"])
for feature in cellprofiler.modules.trackobjects.F_IMAGE_ALL:
name = "_".join(
(cellprofiler.modules.trackobjects.F_PREFIX, feature, OBJECT_NAME, "50")
)
assert name in features
value = measurements.get_current_image_measurement(name)
assert value == 0
def test_00_neighbour_track_one_then_nothing():
"""Run track objects on an object that disappears
Regression test of IMG-1090
"""
labels = numpy.zeros((10, 10), int)
labels[3:6, 2:7] = 1
def fn(module, workspace, index):
if workspace is not None and index == 0:
module.tracking_method.value = "Follow Neighbors"
measurements = runTrackObjects((labels, numpy.zeros((10, 10), int)), fn)
feature = "_".join(
(
cellprofiler.modules.trackobjects.F_PREFIX,
cellprofiler.modules.trackobjects.F_LOST_OBJECT_COUNT,
OBJECT_NAME,
"50",
)
)
value = measurements.get_current_image_measurement(feature)
assert value == 1
def test_neighbour_track_one_by_distance():
"""Track an object that doesn't move."""
labels = numpy.zeros((10, 10), int)
labels[3:6, 2:7] = 1
def fn(module, workspace, idx):
if idx == 0:
module.pixel_radius.value = 1
module.tracking_method.value = "Follow Neighbors"
measurements = runTrackObjects((labels, labels), fn)
def m(feature):
name = "_".join((cellprofiler.modules.trackobjects.F_PREFIX, feature, "1"))
values = measurements.get_current_measurement(OBJECT_NAME, name)
assert len(values) == 1
return values[0]
assert round(abs(m(cellprofiler.modules.trackobjects.F_TRAJECTORY_X) - 0), 7) == 0
assert round(abs(m(cellprofiler.modules.trackobjects.F_TRAJECTORY_Y) - 0), 7) == 0
assert (
round(abs(m(cellprofiler.modules.trackobjects.F_DISTANCE_TRAVELED) - 0), 7) == 0
)
assert (
round(abs(m(cellprofiler.modules.trackobjects.F_INTEGRATED_DISTANCE) - 0), 7)
== 0
)
assert m(cellprofiler.modules.trackobjects.F_LABEL) == 1
assert m(cellprofiler.modules.trackobjects.F_PARENT_OBJECT_NUMBER) == 1
assert m(cellprofiler.modules.trackobjects.F_PARENT_IMAGE_NUMBER) == 1
assert m(cellprofiler.modules.trackobjects.F_LIFETIME) == 2
def m(feature):
name = "_".join(
(cellprofiler.modules.trackobjects.F_PREFIX, feature, OBJECT_NAME, "1")
)
return measurements.get_current_image_measurement(name)
assert m(cellprofiler.modules.trackobjects.F_NEW_OBJECT_COUNT) == 0
assert m(cellprofiler.modules.trackobjects.F_LOST_OBJECT_COUNT) == 0
assert m(cellprofiler.modules.trackobjects.F_SPLIT_COUNT) == 0
assert m(cellprofiler.modules.trackobjects.F_MERGE_COUNT) == 0
check_relationships(measurements, [1], [1], [2], [1])
def test_neighbour_track_one_moving():
"""Track an object that moves"""
labels_list = []
distance = 0
last_i, last_j = (0, 0)
for i_off, j_off in ((0, 0), (2, 0), (2, 1), (0, 1)):
distance = i_off - last_i + j_off - last_j
last_i, last_j = (i_off, j_off)
labels = numpy.zeros((10, 10), int)
labels[4 + i_off : 7 + i_off, 4 + j_off : 7 + j_off] = 1
labels_list.append(labels)
def fn(module, workspace, idx):
if idx == 0:
module.pixel_radius.value = 3
module.tracking_method.value = "Follow Neighbors"
measurements = runTrackObjects(labels_list, fn)
def m(feature, expected):
name = "_".join((cellprofiler.modules.trackobjects.F_PREFIX, feature, "3"))
value_set = measurements.get_all_measurements(OBJECT_NAME, name)
assert len(expected) == len(value_set)
for values, x in zip(value_set, expected):
assert len(values) == 1
assert round(abs(values[0] - x), 7) == 0
m(cellprofiler.modules.trackobjects.F_TRAJECTORY_X, [0, 0, 1, 0])
m(cellprofiler.modules.trackobjects.F_TRAJECTORY_Y, [0, 2, 0, -2])
m(cellprofiler.modules.trackobjects.F_DISTANCE_TRAVELED, [0, 2, 1, 2])
m(cellprofiler.modules.trackobjects.F_INTEGRATED_DISTANCE, [0, 2, 3, 5])
m(cellprofiler.modules.trackobjects.F_LABEL, [1, 1, 1, 1])
m(cellprofiler.modules.trackobjects.F_LIFETIME, [1, 2, 3, 4])
m(
cellprofiler.modules.trackobjects.F_LINEARITY,
[1, 1, numpy.sqrt(5) / 3, 1.0 / 5.0],
)
def m(feature):
name = "_".join(
(cellprofiler.modules.trackobjects.F_PREFIX, feature, OBJECT_NAME, "3")
)
return measurements.get_current_image_measurement(name)
assert m(cellprofiler.modules.trackobjects.F_NEW_OBJECT_COUNT) == 0
assert m(cellprofiler.modules.trackobjects.F_LOST_OBJECT_COUNT) == 0
assert m(cellprofiler.modules.trackobjects.F_SPLIT_COUNT) == 0
assert m(cellprofiler.modules.trackobjects.F_MERGE_COUNT) == 0
image_numbers = numpy.arange(1, len(labels_list) + 1)
object_numbers = numpy.ones(len(image_numbers))
check_relationships(
measurements,
image_numbers[:-1],
object_numbers[:-1],
image_numbers[1:],
object_numbers[1:],
)
def test_neighbour_track_negative():
"""Track unrelated objects"""
labels1 = numpy.zeros((10, 10), int)
labels1[1:5, 1:5] = 1
labels2 = numpy.zeros((10, 10), int)
labels2[6:9, 6:9] = 1
def fn(module, workspace, idx):
if idx == 0:
module.pixel_radius.value = 1
module.tracking_method.value = "Follow Neighbors"
measurements = runTrackObjects((labels1, labels2), fn)
def m(feature):
name = "_".join((cellprofiler.modules.trackobjects.F_PREFIX, feature, "1"))
values = measurements.get_current_measurement(OBJECT_NAME, name)
assert len(values) == 1
return values[0]
assert m(cellprofiler.modules.trackobjects.F_LABEL) == 2
assert m(cellprofiler.modules.trackobjects.F_PARENT_OBJECT_NUMBER) == 0
def m(feature):
name = "_".join(
(cellprofiler.modules.trackobjects.F_PREFIX, feature, OBJECT_NAME, "1")
)
return measurements.get_current_image_measurement(name)
assert m(cellprofiler.modules.trackobjects.F_NEW_OBJECT_COUNT) == 1
assert m(cellprofiler.modules.trackobjects.F_LOST_OBJECT_COUNT) == 1
assert m(cellprofiler.modules.trackobjects.F_SPLIT_COUNT) == 0
assert m(cellprofiler.modules.trackobjects.F_MERGE_COUNT) == 0
def test_neighbour_track_ambiguous():
"""Track disambiguation from among two possible parents"""
labels1 = numpy.zeros((20, 20), int)
labels1[1:4, 1:4] = 1
labels1[16:19, 16:19] = 2
labels2 = numpy.zeros((20, 20), int)
labels2[10:15, 10:15] = 1
def fn(module, workspace, idx):
if idx == 0:
module.pixel_radius.value = 20
module.tracking_method.value = "Follow Neighbors"
measurements = runTrackObjects((labels1, labels2), fn)
def m(feature):
name = "_".join((cellprofiler.modules.trackobjects.F_PREFIX, feature, "20"))
values = measurements.get_current_measurement(OBJECT_NAME, name)
assert len(values) == 1
return values[0]
assert m(cellprofiler.modules.trackobjects.F_LABEL) == 2
assert m(cellprofiler.modules.trackobjects.F_PARENT_OBJECT_NUMBER) == 2
def test_neighbour_track_group_with_drop():
"""Track groups with one lost"""
labels1 = numpy.zeros((20, 20), int)
labels1[2, 2] = 1
labels1[4, 2] = 2
labels1[2, 4] = 3
labels1[4, 4] = 4
labels2 = numpy.zeros((20, 20), int)
labels2[16, 16] = 1
labels2[18, 16] = 2
# labels2[16,18] = 3 is no longer present
labels2[18, 18] = 4
def fn(module, workspace, idx):
if idx == 0:
module.drop_cost.value = 100 # make it always try to match
module.pixel_radius.value = 200
module.average_cell_diameter.value = 5
module.tracking_method.value = "Follow Neighbors"
measurements = runTrackObjects((labels1, labels2), fn)
def m(feature):
name = "_".join((cellprofiler.modules.trackobjects.F_PREFIX, feature, "20"))
values = measurements.get_current_measurement(OBJECT_NAME, name)
assert len(values) == 1
return values[0]
check_relationships(measurements, [1, 1, 1], [1, 2, 4], [2, 2, 2], [1, 2, 4])
| 37.023199
| 88
| 0.572358
| 11,942
| 105,331
| 4.880171
| 0.049824
| 0.172137
| 0.280856
| 0.24544
| 0.847424
| 0.816984
| 0.792069
| 0.770672
| 0.737624
| 0.725973
| 0
| 0.052343
| 0.31439
| 105,331
| 2,844
| 89
| 37.036217
| 0.754667
| 0.043919
| 0
| 0.707824
| 0
| 0
| 0.009166
| 0.000957
| 0
| 0
| 0
| 0
| 0.107987
| 1
| 0.043602
| false
| 0
| 0.004482
| 0.000407
| 0.05868
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
73a846f8233adcfa1595907818c78244573b86d9
| 36,694
|
py
|
Python
|
huobitrade/core.py
|
hadrianl/huobi
|
7cfceba39189552489c1d9c88169f93109ee76ba
|
[
"MIT"
] | 177
|
2018-06-06T11:33:58.000Z
|
2022-01-22T03:58:52.000Z
|
huobitrade/core.py
|
jfhk/huobi
|
7cfceba39189552489c1d9c88169f93109ee76ba
|
[
"MIT"
] | 8
|
2018-05-31T07:32:52.000Z
|
2021-04-30T00:44:53.000Z
|
huobitrade/core.py
|
jfhk/huobi
|
7cfceba39189552489c1d9c88169f93109ee76ba
|
[
"MIT"
] | 61
|
2018-05-31T07:32:08.000Z
|
2021-10-10T09:15:30.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/9/20 0020 9:23
# @Author : Hadrianl
# @File : core.py
# @Contact : 137150224@qq.com
import websocket as ws
import gzip as gz
import json
from . import utils as u
from .utils import logger, zmq_ctx
from threading import Thread
import datetime as dt
from dateutil import parser
from functools import wraps
import zmq
import pickle
import time
from abc import abstractmethod
import uuid
from .handler import BaseHandler
from concurrent.futures import ThreadPoolExecutor
logger.debug(f'<TESTING>LOG_TESTING')
class BaseWebsocket(object):
ws_count = 0
def __new__(cls, *args, **kwargs):
cls.ws_count += 1
if cls is _AuthWS:
from .utils import ACCESS_KEY, SECRET_KEY
if not (ACCESS_KEY and SECRET_KEY):
raise Exception('ACCESS_KEY或SECRET_KEY未设置!')
return object.__new__(cls)
def send_message(self, msg): # 发送消息
msg_json = json.dumps(msg).encode()
self.ws.send(msg_json)
def on_message(self, _msg): # 接收ws的消息推送并处理,包括了pingpong,处理订阅列表,以及处理数据推送
json_data = gz.decompress(_msg).decode()
msg = json.loads(json_data)
logger.debug(f'{msg}')
@abstractmethod
def pub_msg(self, msg):
"""核心的处理函数,如果是handle_func直接处理,如果是handler,推送到handler的队列"""
raise NotImplementedError
def on_error(self, error):
logger.error(f'<错误>on_error:{error}')
def on_close(self):
logger.info(f'<连接>已断开与{self.addr}的连接')
if not self._active:
return
if self._reconn > 0:
logger.info(f'<连接>尝试与{self.addr}进行重连')
self.__start()
self._reconn -= 1
time.sleep(self._interval)
else:
logger.info(f'<连接>尝试与{self.addr}进行重连')
self.__start()
time.sleep(self._interval)
def on_open(self):
self._active = True
logger.info(f'<连接>建立与{self.addr}的连接')
# ------------------- 注册回调处理函数 -------------------------------
def register_onRsp(self, req):
"""
添加回调处理函数的装饰器
:param req: 具体的topic,如
:return:
"""
def wrapper(_callback):
callbackList = self._req_callbacks.setdefault(req, [])
callbackList.append(_callback)
return _callback
return wrapper
def unregister_onRsp(self, req):
return self._req_callbacks.pop(req)
# ------------------------------------------------------------------
# ------------------------- 注册handler -----------------------------
def register_handler(self, handler): # 注册handler
if handler not in self._handlers:
self._handlers.append(handler)
handler.start(self.name)
def unregister_handler(self, handler): # 注销handler
if handler in self._handlers:
self._handlers.remove(handler)
handler.stop(self.name)
def __add__(self, handler):
if isinstance(handler, BaseHandler):
self.register_handler(handler)
else:
raise Exception('{handler} is not aHandler')
return self
def __sub__(self, handler):
if isinstance(handler, BaseHandler):
self.unregister_handler(handler)
else:
raise Exception('{handler} is not aHandler')
return self
# -----------------------------------------------------------------
# --------------------- 注册handle_func --------------------------
def register_handle_func(self, topic): # 注册handle_func
def _wrapper(_handle_func):
if topic not in self._handle_funcs:
self._handle_funcs[topic] = []
self._handle_funcs[topic].append(_handle_func)
return _handle_func
return _wrapper
def unregister_handle_func(self, _handle_func_name, topic):
""" 注销handle_func """
handler_list = self._handle_funcs.get(topic, [])
for i, h in enumerate(handler_list):
if h is _handle_func_name or h.__name__ == _handle_func_name:
handler_list.pop(i)
if self._handle_funcs.get(topic) == []:
self._handle_funcs.pop(topic)
# -----------------------------------------------------------------
# --------------------- handle属性 --------------------------------
@property
def handlers(self):
return self._handlers
@property
def handle_funcs(self):
return self._handle_funcs
@property
def OnRsp_callbacks(self):
return self._req_callbacks
# -----------------------------------------------------------------
# -------------------------开关ws-----------------------------------------
def run(self):
if not hasattr(self, 'ws_thread') or not self.ws_thread.is_alive():
self.__start()
def __start(self):
self.ws = ws.WebSocketApp(
self.addr,
on_open=self.on_open,
on_message=self.on_message,
on_error=self.on_error,
on_close=self.on_close,
# on_data=self.on_data
)
self.ws_thread = Thread(target=self.ws.run_forever, name=self.name)
self.ws_thread.setDaemon(True)
self.ws_thread.start()
def stop(self):
if hasattr(self, 'ws_thread') and self.ws_thread.is_alive():
self._active = False
self.ws.close()
# self.ws_thread.join()
# ------------------------------------------------------------------------
class _AuthWS(BaseWebsocket):
def __init__(self, host='api.huobi.br.com',
reconn=10, interval=3):
self._protocol = 'wss://'
self._host = host
self._path = '/ws/v1'
self.addr = self._protocol + self._host + self._path
self._threadPool = ThreadPoolExecutor(max_workers=3)
# self.name = f'HuoBiAuthWS{self.ws_count}'
self.name = f'HuoBiAuthWS_{uuid.uuid1()}'
self.sub_dict = {} # 订阅列表
self._handlers = [] # 对message做处理的处理函数或处理类
self._req_callbacks = {}
self._handle_funcs = {}
self._auth_callbacks = []
self.ctx = zmq_ctx
self.pub_socket = self.ctx.socket(zmq.PUB)
self.pub_socket.bind(f'inproc://{self.name}')
self._active = False
self._reconn = reconn
self._interval = interval
def on_open(self):
self._active = True
logger.info(f'<连接>建立与{self.addr}的连接')
self.auth()
logger.info(f'<鉴权>向{self.addr}发起鉴权请求')
def on_message(self, _msg): # 鉴权ws的消息处理
json_data = gz.decompress(_msg).decode()
msg = json.loads(json_data)
logger.debug(f'{msg}')
op = msg['op']
if op == 'ping':
pong = {'op': 'pong', 'ts': msg['ts']}
self.send_message(pong)
if msg.setdefault('err-code', 0) == 0:
if op == 'notify':
self.pub_msg(msg)
elif op == 'sub':
logger.info(
f'<订阅>Topic:{msg["topic"]}订阅成功 Time:{dt.datetime.fromtimestamp(msg["ts"] / 1000)} #{msg["cid"]}#')
elif op == 'unsub':
logger.info(
f'<订阅>Topic:{msg["topic"]}取消订阅成功 Time:{dt.datetime.fromtimestamp(msg["ts"] / 1000)} #{msg["cid"]}#')
elif op == 'req':
logger.info(f'<请求>Topic:{msg["topic"]}请求数据成功 #{msg["cid"]}#')
OnRsp = self._req_callbacks.get(msg['topic'], [])
def callbackThread(_m):
for cb in OnRsp:
try:
cb(_m)
except Exception as e:
logger.error(f'<请求回调>{msg["topic"]}的回调函数{cb.__name__}异常-{e}')
task = self._threadPool.submit(callbackThread, msg)
# _t = Thread(target=callbackThread, args=(msg,))
# _t.setDaemon(True)
# _t.start()
elif op == 'auth':
logger.info(
f'<鉴权>鉴权成功 Time:{dt.datetime.fromtimestamp(msg["ts"] / 1000)} #{msg["cid"]}#')
for cb in self._auth_callbacks:
cb()
else:
logger.error(
f'<错误>{msg.get("cid")}-OP:{op} ErrTime:{dt.datetime.fromtimestamp(msg["ts"] / 1000)} ErrCode:{msg["err-code"]} ErrMsg:{msg["err-msg"]}'
)
def pub_msg(self, msg):
"""核心的处理函数,如果是handle_func直接处理,如果是handler,推送到handler的队列"""
topic = msg.get('topic')
self.pub_socket.send_multipart(
[pickle.dumps(topic), pickle.dumps(msg)])
for h in self._handle_funcs.get(topic, []):
h(msg)
def auth(self, cid:str =''):
from .utils import ACCESS_KEY, SECRET_KEY, createSign
timestamp = dt.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S')
params = {
"AccessKeyId": ACCESS_KEY,
"SignatureMethod": "HmacSHA256",
"SignatureVersion": "2",
"Timestamp": timestamp,}
signature = createSign(params, 'GET', self._host, self._path, SECRET_KEY)
params['Signature'] = signature
params['op'] = 'auth'
params['cid'] = cid
self.send_message(params)
return 'auth', cid
def sub_accounts(self, cid:str=''):
msg = {'op': 'sub', 'cid': cid, 'topic': 'accounts'}
self.send_message(msg)
logger.info(f'<订阅>accouts-发送订阅请求 #{cid}#')
return msg['topic'], cid
def unsub_accounts(self, cid:str=''):
msg = {'op': 'unsub', 'cid': cid, 'topic': 'accounts'}
self.send_message(msg)
logger.info(f'<订阅>accouts-发送订阅取消请求 #{cid}#')
return msg['topic'], cid
def sub_orders(self, symbol='*', cid:str=''):
"""
:param symbol: '*'为订阅所有订单变化
:param cid:
:return:
"""
msg = {'op': 'sub', 'cid': cid, 'topic': f'orders.{symbol}'}
self.send_message(msg)
logger.info(f'<订阅>orders-发送订阅请求*{symbol}* #{cid}#')
return msg['topic'], cid
def unsub_orders(self, symbol='*', cid:str=''):
"""
:param symbol: '*'为订阅所有订单变化
:param cid:
:return:
"""
msg = {'op': 'unsub', 'cid': cid, 'topic': f'orders.{symbol}'}
self.send_message(msg)
logger.info(f'<订阅>orders-发送取消订阅请求*{symbol}* #{cid}#')
return msg['topic'], cid
# ------------------------------------------------------------------------
# ----------------------帐户请求函数--------------------------------------
def req_accounts(self, cid:str=''):
msg = {'op': 'req', 'cid': cid, 'topic': 'accounts.list'}
self.send_message(msg)
logger.info(f'<请求>accounts-发送请求 #{cid}#')
return msg['topic'], cid
def req_orders(self, acc_id, symbol, states:list,
types:list=None,
start_date=None, end_date=None,
_from=None, direct=None,
size=None, cid:str=''):
states = ','.join(states)
msg = {'op': 'req', 'account-id': acc_id, 'symbol': symbol, 'states': states, 'cid': cid,
'topic': 'orders.list'}
if types:
types = ','.join(types)
msg['types'] = types
if start_date:
start_date = parser.parse(start_date).strftime('%Y-%m-%d')
msg['start-date'] = start_date
if end_date:
end_date = parser.parse(end_date).strftime('%Y-%m-%d')
msg['end-date'] = end_date
if _from:
msg['_from'] = _from
if direct:
msg['direct'] = direct
if size:
msg['size'] = size
self.send_message(msg)
logger.info(f'<请求>orders-发送请求 #{cid}#')
return msg['topic'], cid
def req_orders_detail(self, order_id, cid:str=''):
msg = {'op': 'req', 'order-id': order_id, 'cid': cid, 'topic': 'orders.detail'}
self.send_message(msg)
logger.info(f'<请求>accounts-发送请求 #{cid}#')
return msg['topic'], cid
def after_auth(self,_func): # ws开启之后需要完成的初始化处理
@wraps(_func)
def _callback():
try:
_func()
except Exception as e:
logger.exception(f'afer_open回调处理错误{e}')
self._auth_callbacks.append(_callback)
return _callback
class _HBWS(BaseWebsocket):
def __init__(self, host='api.huobi.br.com',
reconn=10, interval=3):
self._protocol = 'wss://'
self._host = host
self._path = '/ws'
self.addr = self._protocol + self._host + self._path
self._threadPool = ThreadPoolExecutor(max_workers=3)
# self.name = f'HuoBiWS{self.ws_count}'
self.name = f'HuoBiWS_{uuid.uuid1()}'
self.sub_dict = {} # 订阅列表
self._handlers = [] # 对message做处理的处理函数或处理类
self._req_callbacks = {}
self._handle_funcs = {}
self._open_callbacks = []
self.ctx = zmq_ctx
self.pub_socket = self.ctx.socket(zmq.PUB)
self.pub_socket.bind(f'inproc://{self.name}')
self._active = False
self._reconn = reconn
self._interval = interval
def on_open(self):
self._active = True
logger.info(f'<连接>建立与{self.addr}的连接')
for topic, subbed in self.sub_dict.items():
msg = {'sub': subbed['topic'], 'id': subbed['id']}
self.send_message(msg)
else:
logger.info(f'<订阅>初始化订阅完成')
for fun in self._open_callbacks:
fun()
def on_message(self, _msg): # 接收ws的消息推送并处理,包括了pingpong,处理订阅列表,以及处理数据推送
json_data = gz.decompress(_msg).decode()
msg = json.loads(json_data)
logger.debug(f'{msg}')
if 'ping' in msg:
pong = {'pong': msg['ping']}
self.send_message(pong)
elif 'status' in msg:
if msg['status'] == 'ok':
if 'subbed' in msg:
self.sub_dict.update({
msg['subbed']: {
'topic': msg['subbed'],
'id': msg['id']
}
})
logger.info(
f'<订阅>Topic:{msg["subbed"]}订阅成功 Time:{dt.datetime.fromtimestamp(msg["ts"] / 1000)} #{msg["id"]}#'
)
elif 'unsubbed' in msg:
self.sub_dict.pop(msg['unsubbed'])
logger.info(
f'<订阅>Topic:{msg["unsubbed"]}取消订阅成功 Time:{dt.datetime.fromtimestamp(msg["ts"] / 1000)} #{msg["id"]}#'
)
elif 'rep' in msg:
logger.info(f'<请求>Topic:{msg["rep"]}请求数据成功 #{msg["id"]}#')
OnRsp = self._req_callbacks.get(msg['rep'], [])
def callbackThread(_m):
for cb in OnRsp:
try:
cb(_m)
except Exception as e:
logger.error(f'<请求回调>{msg["rep"]}的回调函数{cb.__name__}异常-{e}')
task = self._threadPool.submit(callbackThread, msg)
elif 'data' in msg:
self.pub_msg(msg)
# _t = Thread(target=callbackThread, args=(msg, ))
# _t.setDaemon(True)
# _t.start()
elif msg['status'] == 'error':
logger.error(
f'<错误>{msg.get("id")}-ErrTime:{dt.datetime.fromtimestamp(msg["ts"] / 1000)} ErrCode:{msg["err-code"]} ErrMsg:{msg["err-msg"]}'
)
else:
self.pub_msg(msg)
def pub_msg(self, msg):
"""核心的处理函数,如果是handle_func直接处理,如果是handler,推送到handler的队列"""
if 'ch' in msg:
topic = msg.get('ch')
self.pub_socket.send_multipart(
[pickle.dumps(topic), pickle.dumps(msg)])
for h in self._handle_funcs.get(topic, []):
h(msg)
@staticmethod
def _check_info(**kwargs):
log = []
if 'period' in kwargs and kwargs['period'] not in u.PERIOD:
log.append(f'<验证>不存在Period:{kwargs["period"]}')
if 'depth' in kwargs and kwargs['depth'] not in u.DEPTH:
log.append(f'<验证>不存在Depth:{kwargs["depth"]}')
if log:
for l in log:
logger.warning(l)
return False
else:
return True
# ----------------------行情订阅函数---------------------------------------
def sub_overview(self, _id=''):
msg = {'sub': 'market.overview', 'id': _id}
self.send_message(msg)
logger.info(f'<订阅>overview-发送订阅请求 #{_id}#')
return msg['sub'], _id
def unsub_overview(self, _id=''):
msg = {'unsub': 'market.overview', 'id': _id}
self.send_message(msg)
logger.info(f'<订阅>overview-发送取消订阅请求 #{_id}#')
return msg['unsub'], _id
def sub_kline(self, symbol, period, _id=''):
if self._check_info(symbol=symbol, period=period):
msg = {'sub': f'market.{symbol}.kline.{period}', 'id': _id}
self.send_message(msg)
logger.info(f'<订阅>kline-发送订阅请求*{symbol}*@{period} #{_id}#')
return msg['sub'], _id
def unsub_kline(self, symbol, period, _id=''):
if self._check_info(symbol=symbol, period=period):
msg = {'unsub': f'market.{symbol}.kline.{period}', 'id': _id}
self.send_message(msg)
logger.info(f'<订阅>kline-发送取消订阅请求*{symbol}*@{period} #{_id}#')
return msg['unsub'], _id
def sub_depth(self, symbol, depth=0, _id=''):
if self._check_info(symbol=symbol, depth=depth):
msg = {'sub': f'market.{symbol}.depth.{u.DEPTH[depth]}', 'id': _id}
self.send_message(msg)
logger.info(f'<订阅>depth-发送订阅请求*{symbol}*@{u.DEPTH[depth]} #{_id}#')
return msg['sub'], _id
def unsub_depth(self, symbol, depth=0, _id=''):
if self._check_info(symbol=symbol, depth=depth):
msg = {
'unsub': f'market.{symbol}.depth.{u.DEPTH[depth]}',
'id': _id
}
self.send_message(msg)
logger.info(
f'<订阅>depth-发送取消订阅请求*{symbol}*@{u.DEPTH[depth]} #{_id}#')
return msg['unsub'], _id
def sub_tick(self, symbol, _id=''):
if self._check_info(symbol=symbol):
msg = {'sub': f'market.{symbol}.trade.detail', 'id': _id}
self.send_message(msg)
logger.info(f'<订阅>tick-发送订阅请求*{symbol}* #{_id}#')
return msg['sub'], _id
def unsub_tick(self, symbol, _id=''):
if self._check_info(symbol=symbol):
msg = {'unsub': f'market.{symbol}.trade.detail', 'id': _id}
self.send_message(msg)
logger.info(f'<订阅>tick-发送取消订阅请求*{symbol}* #{_id}#')
return msg['unsub'], _id
def sub_all_lastest_24h_ohlc(self, _id=''):
msg = {'sub': f'market.tickers', 'id': _id}
self.send_message(msg)
logger.info(f'<订阅>all_ticks-发送订阅请求 #{_id}#')
return msg['sub'], _id
def unsub_all_lastest_24h_ohlc(self, _id=''):
msg = {'unsub': f'market.tickers', 'id': _id}
self.send_message(msg)
logger.info(f'<订阅>all_ticks-发送取消订阅请求 #{_id}#')
return msg['unsub'], _id
# -------------------------------------------------------------------------
# -------------------------行情请求函数----------------------------------------
def req_kline(self, symbol, period, _id='', **kwargs):
if self._check_info(symbol=symbol, period=period):
msg = {'req': f'market.{symbol}.kline.{period}', 'id': _id}
if '_from' in kwargs:
_from = parser.parse(kwargs['_from']).timestamp() if isinstance(
kwargs['_from'], str) else kwargs['_from']
msg.update({'from': int(_from)})
if '_to' in kwargs:
_to = parser.parse(kwargs['_to']).timestamp() if isinstance(
kwargs['_to'], str) else kwargs['_to']
msg.update({'to': int(_to)})
self.send_message(msg)
logger.info(f'<请求>kline-发送请求*{symbol}*@{period} #{_id}#')
return msg['req'], _id
def req_depth(self, symbol, depth=0, _id=''):
if self._check_info(depth=depth):
msg = {'req': f'market.{symbol}.depth.{u.DEPTH[depth]}', 'id': _id}
self.send_message(msg)
logger.info(f'<请求>depth-发送请求*{symbol}*@{u.DEPTH[depth]} #{_id}#')
return msg['req'], _id
def req_tick(self, symbol, _id=''):
msg = {'req': f'market.{symbol}.trade.detail', 'id': _id}
self.send_message(msg)
logger.info(f'<请求>tick-发送请求*{symbol}* #{_id}#')
return msg['req'], _id
def req_symbol(self, symbol, _id=''):
msg = {'req': f'market.{symbol}.detail', 'id': _id}
self.send_message(msg)
logger.info(f'<请求>symbol-发送请求*{symbol}* #{_id}#')
return msg['req'], _id
# -------------------------------------------------------------------------
def after_open(self,_func): # ws开启之后需要完成的初始化处理
@wraps(_func)
def _callback():
try:
_func()
except Exception as e:
logger.exception(f'afer_open回调处理错误{e}')
self._open_callbacks.append(_callback)
return _callback
class _HBDerivativesWS(BaseWebsocket):
def __init__(self, host='www.hbdm.com',
reconn=10, interval=3):
self._protocol = 'wss://'
self._host = host
self._path = '/ws'
self.addr = self._protocol + self._host + self._path
self._threadPool = ThreadPoolExecutor(max_workers=3)
# self.name = f'HuoBiWS{self.ws_count}'
self.name = f'HuoBiDerivativesWS_{uuid.uuid1()}'
self.sub_dict = {} # 订阅列表
self._handlers = [] # 对message做处理的处理函数或处理类
self._req_callbacks = {}
self._handle_funcs = {}
self._open_callbacks = []
self.ctx = zmq_ctx
self.pub_socket = self.ctx.socket(zmq.PUB)
self.pub_socket.bind(f'inproc://{self.name}')
self._active = False
self._reconn = reconn
self._interval = interval
def on_open(self):
self._active = True
logger.info(f'<连接>建立与{self.addr}的连接')
for topic, subbed in self.sub_dict.items():
msg = {'sub': subbed['topic'], 'id': subbed['id']}
self.send_message(msg)
else:
logger.info(f'<订阅>初始化订阅完成')
for fun in self._open_callbacks:
fun()
def on_message(self, _msg): # 接收ws的消息推送并处理,包括了pingpong,处理订阅列表,以及处理数据推送
json_data = gz.decompress(_msg).decode()
msg = json.loads(json_data)
logger.debug(f'{msg}')
if 'ping' in msg:
pong = {'pong': msg['ping']}
self.send_message(pong)
elif 'status' in msg:
if msg['status'] == 'ok':
if 'subbed' in msg:
self.sub_dict.update({
msg['subbed']: {
'topic': msg['subbed'],
'id': msg['id']
}
})
logger.info(
f'<订阅>Topic:{msg["subbed"]}订阅成功 Time:{dt.datetime.fromtimestamp(msg["ts"] / 1000)} #{msg["id"]}#'
)
elif 'unsubbed' in msg:
self.sub_dict.pop(msg['unsubbed'])
logger.info(
f'<订阅>Topic:{msg["unsubbed"]}取消订阅成功 Time:{dt.datetime.fromtimestamp(msg["ts"] / 1000)} #{msg["id"]}#'
)
elif 'rep' in msg:
logger.info(f'<请求>Topic:{msg["rep"]}请求数据成功 #{msg["id"]}#')
OnRsp = self._req_callbacks.get(msg['rep'], [])
def callbackThread(_m):
for cb in OnRsp:
try:
cb(_m)
except Exception as e:
logger.error(f'<请求回调>{msg["rep"]}的回调函数{cb.__name__}异常-{e}')
task = self._threadPool.submit(callbackThread, msg)
elif 'data' in msg:
self.pub_msg(msg)
# _t = Thread(target=callbackThread, args=(msg, ))
# _t.setDaemon(True)
# _t.start()
elif msg['status'] == 'error':
logger.error(
f'<错误>{msg.get("id")}-ErrTime:{dt.datetime.fromtimestamp(msg["ts"] / 1000)} ErrCode:{msg["err-code"]} ErrMsg:{msg["err-msg"]}'
)
else:
self.pub_msg(msg)
def pub_msg(self, msg):
"""核心的处理函数,如果是handle_func直接处理,如果是handler,推送到handler的队列"""
if 'ch' in msg:
topic = msg.get('ch')
self.pub_socket.send_multipart(
[pickle.dumps(topic), pickle.dumps(msg)])
for h in self._handle_funcs.get(topic, []):
h(msg)
@staticmethod
def _check_info(**kwargs):
log = []
if 'period' in kwargs and kwargs['period'] not in u.PERIOD:
log.append(f'<验证>不存在Period:{kwargs["period"]}')
if 'depth' in kwargs and kwargs['depth'] not in u.DerivativesDEPTH:
log.append(f'<验证>不存在Depth:{kwargs["depth"]}')
if log:
for l in log:
logger.warning(l)
return False
else:
return True
def sub_kline(self, symbol, period, _id=''):
if self._check_info(symbol=symbol, period=period):
msg = {'sub': f'market.{symbol}.kline.{period}', 'id': _id}
self.send_message(msg)
logger.info(f'<订阅>kline-发送订阅请求*{symbol}*@{period} #{_id}#')
return msg['sub'], _id
def unsub_kline(self, symbol, period, _id=''):
if self._check_info(symbol=symbol, period=period):
msg = {'unsub': f'market.{symbol}.kline.{period}', 'id': _id}
self.send_message(msg)
logger.info(f'<订阅>kline-发送取消订阅请求*{symbol}*@{period} #{_id}#')
return msg['unsub'], _id
def sub_depth(self, symbol, depth=0, _id=''):
if self._check_info(symbol=symbol, depth=depth):
msg = {'sub': f'market.{symbol}.depth.{u.DEPTH[depth]}', 'id': _id}
self.send_message(msg)
logger.info(f'<订阅>depth-发送订阅请求*{symbol}*@{u.DEPTH[depth]} #{_id}#')
return msg['sub'], _id
def unsub_depth(self, symbol, depth=0, _id=''):
if self._check_info(symbol=symbol, depth=depth):
msg = {
'unsub': f'market.{symbol}.depth.{u.DEPTH[depth]}',
'id': _id
}
self.send_message(msg)
logger.info(
f'<订阅>depth-发送取消订阅请求*{symbol}*@{u.DEPTH[depth]} #{_id}#')
return msg['unsub'], _id
def sub_last_24h_kline(self, symbol, _id=''):
msg = {'sub': f'market.{symbol}.detail', 'id': _id}
self.send_message(msg)
logger.info(f'<订阅>Last_24h_kline-发送订阅请求*{symbol}* #{_id}#')
return msg['sub'], _id
def unsub_last_24h_kline(self, symbol, _id=''):
msg = {
'unsub': f'market.{symbol}.detail',
'id': _id
}
self.send_message(msg)
logger.info(
f'<订阅>Last_24h_kline-发送取消订阅请求*{symbol}* #{_id}#')
return msg['unsub'], _id
def sub_tick(self, symbol, _id=''):
if self._check_info(symbol=symbol):
msg = {'sub': f'market.{symbol}.trade.detail', 'id': _id}
self.send_message(msg)
logger.info(f'<订阅>tick-发送订阅请求*{symbol}* #{_id}#')
return msg['sub'], _id
def unsub_tick(self, symbol, _id=''):
if self._check_info(symbol=symbol):
msg = {'unsub': f'market.{symbol}.trade.detail', 'id': _id}
self.send_message(msg)
logger.info(f'<订阅>tick-发送取消订阅请求*{symbol}* #{_id}#')
return msg['unsub'], _id
# -------------------------------------------------------------------------
# -------------------------行情请求函数----------------------------------------
def req_kline(self, symbol, period, _id='', **kwargs):
if self._check_info(symbol=symbol, period=period):
msg = {'req': f'market.{symbol}.kline.{period}', 'id': _id}
if '_from' in kwargs:
_from = parser.parse(kwargs['_from']).timestamp() if isinstance(
kwargs['_from'], str) else kwargs['_from']
msg.update({'from': int(_from)})
if '_to' in kwargs:
_to = parser.parse(kwargs['_to']).timestamp() if isinstance(
kwargs['_to'], str) else kwargs['_to']
msg.update({'to': int(_to)})
self.send_message(msg)
logger.info(f'<请求>kline-发送请求*{symbol}*@{period} #{_id}#')
return msg['req'], _id
def req_tick(self, symbol, _id=''):
msg = {'req': f'market.{symbol}.trade.detail', 'id': _id}
self.send_message(msg)
logger.info(f'<请求>tick-发送请求*{symbol}* #{_id}#')
return msg['req'], _id
# -------------------------------------------------------------------------
def after_open(self,_func): # ws开启之后需要完成的初始化处理
@wraps(_func)
def _callback():
try:
_func()
except Exception as e:
logger.exception(f'afer_open回调处理错误{e}')
self._open_callbacks.append(_callback)
return _callback
class _DerivativesAuthWS(BaseWebsocket):
def __init__(self, host='api.hbdm.com',
reconn=10, interval=3):
self._protocol = 'wss://'
self._host = host
self._path = '/notification'
self.addr = self._protocol + self._host + self._path
self._threadPool = ThreadPoolExecutor(max_workers=3)
self.name = f'HuoBiDerivativesAuthWS_{uuid.uuid1()}'
self.sub_dict = {} # 订阅列表
self._handlers = [] # 对message做处理的处理函数或处理类
self._req_callbacks = {}
self._handle_funcs = {}
self._auth_callbacks = []
self.ctx = zmq_ctx
self.pub_socket = self.ctx.socket(zmq.PUB)
self.pub_socket.bind(f'inproc://{self.name}')
self._active = False
self._reconn = reconn
self._interval = interval
def on_open(self):
self._active = True
logger.info(f'<连接>建立与{self.addr}的连接')
self.auth()
logger.info(f'<鉴权>向{self.addr}发起鉴权请求')
def on_message(self, _msg): # 鉴权ws的消息处理
json_data = gz.decompress(_msg).decode()
msg = json.loads(json_data)
logger.debug(f'{msg}')
op = msg['op']
if op == 'ping':
pong = {'op': 'pong', 'ts': msg['ts']}
self.send_message(pong)
if msg.setdefault('err-code', 0) == 0:
if op == 'notify':
self.pub_msg(msg)
elif op == 'sub':
logger.info(
f'<订阅>Topic:{msg["topic"]}订阅成功 Time:{dt.datetime.fromtimestamp(msg["ts"] / 1000)} #{msg["cid"]}#')
elif op == 'unsub':
logger.info(
f'<订阅>Topic:{msg["topic"]}取消订阅成功 Time:{dt.datetime.fromtimestamp(msg["ts"] / 1000)} #{msg["cid"]}#')
elif op == 'req':
logger.info(f'<请求>Topic:{msg["topic"]}请求数据成功 #{msg["cid"]}#')
OnRsp = self._req_callbacks.get(msg['topic'], [])
def callbackThread(_m):
for cb in OnRsp:
try:
cb(_m)
except Exception as e:
logger.error(f'<请求回调>{msg["topic"]}的回调函数{cb.__name__}异常-{e}')
task = self._threadPool.submit(callbackThread, msg)
# _t = Thread(target=callbackThread, args=(msg,))
# _t.setDaemon(True)
# _t.start()
elif op == 'auth':
logger.info(
f'<鉴权>鉴权成功 Time:{dt.datetime.fromtimestamp(msg["ts"] / 1000)}')
for cb in self._auth_callbacks:
cb()
else:
logger.error(
f'<错误>{msg.get("cid")}-OP:{op} ErrTime:{dt.datetime.fromtimestamp(msg["ts"] / 1000)} ErrCode:{msg["err-code"]} ErrMsg:{msg["err-msg"]}'
)
def pub_msg(self, msg):
"""核心的处理函数,如果是handle_func直接处理,如果是handler,推送到handler的队列"""
topic = msg.get('topic')
self.pub_socket.send_multipart(
[pickle.dumps(topic), pickle.dumps(msg)])
for h in self._handle_funcs.get(topic, []):
h(msg)
def auth(self, cid:str =''):
from .utils import ACCESS_KEY, SECRET_KEY, createSign
timestamp = dt.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S')
params = {
"AccessKeyId": ACCESS_KEY,
"SignatureMethod": "HmacSHA256",
"SignatureVersion": "2",
"Timestamp": timestamp,}
signature = createSign(params, 'GET', self._host, self._path, SECRET_KEY)
params['Signature'] = signature
params['op'] = 'auth'
params['cid'] = cid
params['type'] = 'api'
self.send_message(params)
return 'auth', cid
# def sub_accounts(self, cid:str=''):
# msg = {'op': 'sub', 'cid': cid, 'topic': 'accounts'}
# self.send_message(msg)
# logger.info(f'<订阅>accouts-发送订阅请求 #{cid}#')
# return msg['topic'], cid
#
# def unsub_accounts(self, cid:str=''):
# msg = {'op': 'unsub', 'cid': cid, 'topic': 'accounts'}
# self.send_message(msg)
# logger.info(f'<订阅>accouts-发送订阅取消请求 #{cid}#')
# return msg['topic'], cid
def sub_orders(self, symbol='*', cid:str=''):
"""
:param symbol: '*'为订阅所有订单变化
:param cid:
:return:
"""
msg = {'op': 'sub', 'cid': cid, 'topic': f'orders.{symbol}'}
self.send_message(msg)
logger.info(f'<订阅>orders-发送订阅请求*{symbol}* #{cid}#')
return msg['topic'], cid
def unsub_orders(self, symbol='*', cid:str=''):
"""
:param symbol: '*'为订阅所有订单变化
:param cid:
:return:
"""
msg = {'op': 'unsub', 'cid': cid, 'topic': f'orders.{symbol}'}
self.send_message(msg)
logger.info(f'<订阅>orders-发送取消订阅请求*{symbol}* #{cid}#')
return msg['topic'], cid
# # ------------------------------------------------------------------------
# # ----------------------帐户请求函数--------------------------------------
# def req_accounts(self, cid:str=''):
# msg = {'op': 'req', 'cid': cid, 'topic': 'accounts.list'}
# self.send_message(msg)
# logger.info(f'<请求>accounts-发送请求 #{cid}#')
# return msg['topic'], cid
#
# def req_orders(self, acc_id, symbol, states:list,
# types:list=None,
# start_date=None, end_date=None,
# _from=None, direct=None,
# size=None, cid:str=''):
# states = ','.join(states)
# msg = {'op': 'req', 'account-id': acc_id, 'symbol': symbol, 'states': states, 'cid': cid,
# 'topic': 'orders.list'}
# if types:
# types = ','.join(types)
# msg['types'] = types
#
# if start_date:
# start_date = parser.parse(start_date).strftime('%Y-%m-%d')
# msg['start-date'] = start_date
#
# if end_date:
# end_date = parser.parse(end_date).strftime('%Y-%m-%d')
# msg['end-date'] = end_date
#
# if _from:
# msg['_from'] = _from
#
# if direct:
# msg['direct'] = direct
#
# if size:
# msg['size'] = size
#
# self.send_message(msg)
# logger.info(f'<请求>orders-发送请求 #{cid}#')
# return msg['topic'], cid
#
# def req_orders_detail(self, order_id, cid:str=''):
# msg = {'op': 'req', 'order-id': order_id, 'cid': cid, 'topic': 'orders.detail'}
# self.send_message(msg)
# logger.info(f'<请求>accounts-发送请求 #{cid}#')
# return msg['topic'], cid
def after_auth(self,_func): # ws开启之后需要完成的初始化处理
@wraps(_func)
def _callback():
try:
_func()
except Exception as e:
logger.exception(f'afer_open回调处理错误{e}')
self._auth_callbacks.append(_callback)
return _callback
| 36.584247
| 151
| 0.50327
| 4,057
| 36,694
| 4.373675
| 0.079615
| 0.036069
| 0.039675
| 0.040577
| 0.865419
| 0.856008
| 0.849358
| 0.83431
| 0.829914
| 0.82642
| 0
| 0.005301
| 0.311168
| 36,694
| 1,003
| 152
| 36.584247
| 0.696708
| 0.12746
| 0
| 0.75102
| 0
| 0.017687
| 0.182777
| 0.104783
| 0
| 0
| 0
| 0
| 0
| 1
| 0.119728
| false
| 0
| 0.02585
| 0.005442
| 0.228571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
fb4951e3f6f85b70885f23093a8a47325e0b4bfe
| 118
|
py
|
Python
|
Py60/main.py
|
xhexe/Py8R
|
44238c5403e7f76988760a040bf5c292824c22e7
|
[
"WTFPL"
] | null | null | null |
Py60/main.py
|
xhexe/Py8R
|
44238c5403e7f76988760a040bf5c292824c22e7
|
[
"WTFPL"
] | null | null | null |
Py60/main.py
|
xhexe/Py8R
|
44238c5403e7f76988760a040bf5c292824c22e7
|
[
"WTFPL"
] | null | null | null |
from shutil import copyfile
copyfile("/home/xhexe/Py/Py8R/files/text.txt", "/home/xhexe/Py/Py8R/files/textcopy.txt")
| 29.5
| 88
| 0.762712
| 19
| 118
| 4.736842
| 0.631579
| 0.2
| 0.244444
| 0.333333
| 0.444444
| 0
| 0
| 0
| 0
| 0
| 0
| 0.018018
| 0.059322
| 118
| 3
| 89
| 39.333333
| 0.792793
| 0
| 0
| 0
| 0
| 0
| 0.610169
| 0.610169
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 8
|
fb86253d87070b9e6a99e9cf409dddbefdd496bc
| 2,553
|
py
|
Python
|
1/day1-1.py
|
das-keyboard/adventofcode-2017
|
4aa9cc67f2a2be5db1caa808dce00b579bd4d788
|
[
"Unlicense"
] | null | null | null |
1/day1-1.py
|
das-keyboard/adventofcode-2017
|
4aa9cc67f2a2be5db1caa808dce00b579bd4d788
|
[
"Unlicense"
] | null | null | null |
1/day1-1.py
|
das-keyboard/adventofcode-2017
|
4aa9cc67f2a2be5db1caa808dce00b579bd4d788
|
[
"Unlicense"
] | null | null | null |
def process(data: str):
sum = 0
for i in range(0, len(data)):
if i == len(data) - 1:
if data[i] == data[0]:
sum += int(data[i])
else:
if data[i] == data[i + 1]:
sum += int(data[i])
return sum
print(process("1122"))
print(process("1111"))
print(process("91212129"))
print(process("1234"))
print("Let's get real!")
print(process("9384274494683632359351641411374573466273164687337536769779487433749179185568461296233353611992672753778126935276769885424719553291616136172298883156626254151278852582397949697874462178536295341822137377563322815527592267791213115418635363174876132196234374887626324931371241841242873783493835919238421879116421481543826222278152238576762132577763214642569545298668935216911493462229629786978273548147171384321525952959196377728493632872618291183256888417779495124837828187298244786175872713299271766246696631257484453347125176233373232245382158656142179687576388951175953419286858673221138553912229576523123114871637487978775855777483921896568333282333137175739746234262744256254149233843517254613981476355147487975859685936527161737644929119345127273149762325158784595946931447738173246311763677997888425452294562823751136515271874725143582623717324394587398371298523368386595426714148717735345237657249712685895921433468949182235146698174393928288313985355769799485511749423552935992391624424575278333625476148888355716967628454862834463357834291788479677576561681171516128495737923155533438413156639155128831349894646317546536886319328573512622325789672115171618195548534941184939233914166432349321992879287349932819135919518955561456615989137221875483561599493342981595678961836562435436285673764213941758954489582656271121429555455368545289416981624961261963953364918377483776322142975937971552271642224933926326665557787586927667898255947116988278131974381388514274833852552695679713424836536348449273149415872522111522749448188993159814183411853994579147867385867619467777654943169814287928966652552129439822741856512265955664872454951159255617513136142717471774698224566543617595742753244142364438589729356939483387466363477224283477843889679221229344974441624448489853764111425798141258155246636844914711222931548722647298953744242682551562166463942694715631497895981643174194294826868561578586851326262619731272665397711381459745281218196515155917877694663186732599688912878149242688741584822831861748845817871681621697944472377688658368145698614861456518138376989688166921187224726942589996534179549171859786241718727295379"))
| 127.65
| 2,152
| 0.91696
| 60
| 2,553
| 39.016667
| 0.433333
| 0.010679
| 0.00598
| 0.009398
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.891412
| 0.051312
| 2,553
| 19
| 2,153
| 134.368421
| 0.075145
| 0
| 0
| 0.125
| 0
| 0
| 0.849922
| 0.836207
| 0
| 1
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0
| 0
| 0
| 0.125
| 0.375
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
fb91df10a4703ba9cfc19a232acbd4eb3b699fed
| 3,408
|
py
|
Python
|
biosys/apps/main/tests/test_download_templates.py
|
parksandwildlife/biosys
|
0682cf1b4055e7cae59fb53045fa441af6d48f5e
|
[
"Apache-2.0"
] | 2
|
2018-04-09T04:02:30.000Z
|
2019-08-20T03:12:55.000Z
|
biosys/apps/main/tests/test_download_templates.py
|
parksandwildlife/biosys
|
0682cf1b4055e7cae59fb53045fa441af6d48f5e
|
[
"Apache-2.0"
] | 29
|
2016-01-20T08:14:15.000Z
|
2017-07-13T07:17:32.000Z
|
biosys/apps/main/tests/test_download_templates.py
|
parksandwildlife/biosys
|
0682cf1b4055e7cae59fb53045fa441af6d48f5e
|
[
"Apache-2.0"
] | 5
|
2016-01-14T23:02:36.000Z
|
2016-09-21T05:35:03.000Z
|
import re
from os import path
from openpyxl import load_workbook
from django.test import TestCase
from django.test.client import Client
from django.shortcuts import reverse
from django.utils import six
from rest_framework import status
class TestDownloadSiteTemplates(TestCase):
def test_lat_long_no_logging(self):
"""
Test lat-long template download.
Important: Logging should not be necessary
"""
client = Client()
url = reverse('download:site-template-lat-long')
resp = client.get(url)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
self.assertEqual(resp.get('content-type'),
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')
content_disposition = resp.get('content-disposition')
# should be something like:
# 'attachment; filename=Sites_template_lat_long.xlsx
match = re.match('attachment; filename=(.+)', content_disposition)
self.assertIsNotNone(match)
filename, ext = path.splitext(match.group(1))
self.assertEqual(ext, '.xlsx')
self.assertEqual(filename, 'Sites_template_lat_long')
# read content
wb = load_workbook(six.BytesIO(resp.content), read_only=True)
# one datasheet named 'Sites'
expected_sheet_name = 'Sites'
sheet_names = wb.sheetnames
self.assertEqual(1, len(sheet_names))
self.assertEqual(sheet_names[0], expected_sheet_name)
ws = wb[expected_sheet_name]
rows = list(ws.rows)
# only one row
self.assertEqual(len(rows), 1)
got_headers = [c.value for c in rows[0]]
expected_headers = ['Name', 'Code', 'Description', 'Latitude', 'Longitude', 'Datum']
self.assertEqual(got_headers, expected_headers)
def test_easting_northing_no_logging(self):
"""
Test easting-northing template download.
Important: Logging should not be necessary
"""
client = Client()
url = reverse('download:site-template-easting-northing')
resp = client.get(url)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
self.assertEqual(resp.get('content-type'),
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')
content_disposition = resp.get('content-disposition')
# should be something like:
# 'attachment; filename=Sites_template_lat_long.xlsx
match = re.match('attachment; filename=(.+)', content_disposition)
self.assertIsNotNone(match)
filename, ext = path.splitext(match.group(1))
self.assertEqual(ext, '.xlsx')
self.assertEqual(filename, 'Sites_template_easting_northing')
# read content
wb = load_workbook(six.BytesIO(resp.content), read_only=True)
# one datasheet named 'Sites'
expected_sheet_name = 'Sites'
sheet_names = wb.sheetnames
self.assertEqual(1, len(sheet_names))
self.assertEqual(sheet_names[0], expected_sheet_name)
ws = wb[expected_sheet_name]
rows = list(ws.rows)
# only one row
self.assertEqual(len(rows), 1)
got_headers = [c.value for c in rows[0]]
expected_headers = ['Name', 'Code', 'Description', 'Easting', 'Northing', 'Datum', 'Zone']
self.assertEqual(got_headers, expected_headers)
| 41.060241
| 98
| 0.658744
| 391
| 3,408
| 5.58312
| 0.242967
| 0.10994
| 0.046725
| 0.032982
| 0.811269
| 0.808062
| 0.771415
| 0.771415
| 0.771415
| 0.771415
| 0
| 0.006135
| 0.234742
| 3,408
| 82
| 99
| 41.560976
| 0.830905
| 0.123826
| 0
| 0.701754
| 0
| 0
| 0.161346
| 0.087195
| 0
| 0
| 0
| 0
| 0.315789
| 1
| 0.035088
| false
| 0
| 0.140351
| 0
| 0.192982
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
fb9b3556074257de7a56a580cd7fe819a540d804
| 9,433
|
py
|
Python
|
fermi/multiband_bubble.py
|
maryprimary/frg
|
e789439f599eb884a6220ae5b471cf610b0c2b2a
|
[
"MIT"
] | null | null | null |
fermi/multiband_bubble.py
|
maryprimary/frg
|
e789439f599eb884a6220ae5b471cf610b0c2b2a
|
[
"MIT"
] | 12
|
2021-02-04T06:46:36.000Z
|
2021-07-01T00:43:38.000Z
|
fermi/multiband_bubble.py
|
maryprimary/frg
|
e789439f599eb884a6220ae5b471cf610b0c2b2a
|
[
"MIT"
] | null | null | null |
"""定义在10.112中的bubble integrals
"""
import warnings
import numpy
from basics import Point
from basics.point import middle_point
#pylint: disable=pointless-string-statement
#warnings.simplefilter('once', RuntimeWarning)
def pi_ab_plus_ec(posia, negaa, lamb, qval, dispb, ksft, area):
'''使用能量cutoff作为flow parameter的bubble\n
posi是dispa为+LAMBDA的边,nega是dispa为-LAMBDA的边, lamb是LAMBDA\n
dispb是第二个色散关系,qval是需要平移的大小,应该用一个Point来包装,\n
kshf是动量相加的函数, 这个函数应该能处理好到第一布里渊区的映射\n
```(10.112)本身已经处理好了动量守恒,k, k-q是需要满足动量守恒的关系的,而处理好```
```k-q到第一布里渊区的映射就处理好了Umklapp```
'''
'''
10.112中的 PI^+(n, q) = +LAMBDA (2pi)^-2 beta^-1 Int_{k in k_n} G'(k)G(k - Q)
其中有一个beta是频率积分带来的,2pi^2是动量积分带来的
G(k)=CITA(LAMBDA < abs(disp(k))) / i*omega - disp(k)
G'(k)=-DELTA(abs(disp(k))-LAMBDA) / i*omege - disp(k)
在零温的情况下10.112中的频率部分可以积分出来,此后的k都是不包含频率的
= +LAMBDA (2pi)^-2 Int_{k in k_n} CITA() -DELTA()
{ beta^-1 sum_{omega} [(i*omega-disp(k))(i*omega-disp(k - q))]^-1 }
花括号中的内容求和完之后等于 - CITA(-disp(k)disp(k-q)) / (abs(disp(k)) + abs(disp(k-p)))
积分会变成
= +LAMBDA (2pi)^-2 Int_{k in k_n} DELTA(abs(disp(k))-LAMBDA) CITA(LAMBDA<abs(disp(k-q)))
CITA(-disp(k)disp(k-q)) / (abs(disp(k)) + abs(disp(k-p)))
因为采用的能量cutoff中有一个 DELTA(abs(disp(k))-LAMBDA),disp(k)等于正的或者负的LAMBDA
而CITA(-disp(k)disp(k-q))限制了disp(k)和disp(k-q)符号相反
所以上式变成
(第一项disp(k)=LAMBDA>0,于是disp(k-q)<0,而且abs(disp(k))=-disp(k)>LAMBDA)
(第二项类似,分子中的abs(disp(k))都可以直接换成LAMBDA,abs(disp(k-q))也都知道符号)
= +LAMBDA (2pi)^-2 Int_{k in kn} {
DELTA(disp(k)-LAMBDA)CITA(-disp(k-q)-LAMBDA) / (LAMBDA - disp(k - q))
DELTA(disp(k)+LAMBDA)CITA(disp(k-q)-LAMBDA) / (LAMBDA + disp(k - q)) }
还可以从积分里面把DELTA给积分掉,这样对于二维平面的积分也会变成对
disp(k) = LAMBDA 或者 -LAMBDA的线的积分
= +LAMBDA (2pi)^-2 *
[Int_{disp(k) = +LAMBDA} CITA(-disp(k-q)-LAMBDA) / (LAMBDA - disp(k - q))]
+[Int_{disp(k) = -LAMBDA} CITA(disp(k-q)-LAMBDA) / (LAMBDA + disp(k - q)) ]
'''
nega_q = Point(-qval.coord[0], -qval.coord[1], 1)
#积分正LAMBDA的线
intposi = 0.
for edg in posia:
kval = middle_point(edg.ends[0], edg.ends[1])
kprim = ksft(kval, nega_q)
#CITA
disp_kprim = dispb(kprim.coord[0], kprim.coord[1])
if -disp_kprim < lamb:
continue
#线积分,计算线元的长度
intposi += edg.length / (lamb - disp_kprim)
#积分负LAMBDA的线
intnega = 0.
for edg in negaa:
kval = middle_point(edg.ends[0], edg.ends[1])
kprim = ksft(kval, nega_q)
#CITA
disp_kprim = dispb(kprim.coord[0], kprim.coord[1])
if disp_kprim < lamb:
continue
intnega += edg.length / (lamb + disp_kprim)
#乘上系数
result = lamb * (intposi + intnega) / area#numpy.square(numpy.pi*2)
return result
def pi_ab_minus_ec(posia, negaa, lamb, qval, dispb, ksft, area):
'''使用能量cutoff作为flow parameter的bubble\n
posi是dispa为+LAMBDA的边,nega是dispa为-LAMBDA的边, lamb是LAMBDA\n
这两个边应该是限制在dispa这个带的第n个patch中的,这两个边也就暗含了n\n
dispa和dispb是两个带的色散关系\n
qval是需要平移的大小,应该用一个Point来包装,\n
kshf是动量相加的函数, 这个函数应该能处理好到第一布里渊区的映射\n
```(10.112)本身已经处理好了动量守恒,k, k-q是需要满足动量守恒的关系的,而处理好```
```k-q到第一布里渊区的映射就处理好了Umklapp```
'''
'''
10.112中的 PI^-(n, q) = -LAMBDA (2pi)^-2 beta^-1 Int_{k in k_n} G'(k)G(- k + Q)
= -LAMBDA (2pi)^-2 Int_{k in k_n} CITA() -DELTA()
{ beta^-1 sum_{omega} [(i*omega-disp(k))(-i*omega-disp(-k + q))]^-1 }
在零温下这个频率积分等于,注意-k那里把频率也给反过来了
+CITA(+disp(k)disp(-k+q)) / (abs(disp(k)) + abs(disp(-k+q)))
原式就等于
= LAMBDA (2pi)^-2 Int_{k in k_n} {
DELTA(abs(disp(k))-LAMBDA) CITA(abs(disp(-k+q)-LAMBDA))
CITA(disp(k)disp(-k+q)) / (abs(disp(k)) + abs(disp(-k+q))) }
第二个CITA限制了disp(k)和disp(-k+q)同号,积分积掉DELTA,分类讨论正负
= LAMBDA (2pi)^-2 {
Int_{disp(k) = +LAMBDA} CITA(disp(-k+q) - LAMBDA) / (LAMBDA + disp(-k+q)) +
Int_{disp(k) = -LAMBDA} CITA(-disp(-k+q) -LAMBDA) / (LAMBDA - disp(-k+q))
}
'''
#积分正LAMBDA的线
intposi = 0.
for edg in posia:
kval = middle_point(edg.ends[0], edg.ends[1])
nega_k = Point(-kval.coord[0], -kval.coord[1], 1)
kprim = ksft(nega_k, qval)
#CITA
disp_kprim = dispb(kprim.coord[0], kprim.coord[1])
if disp_kprim < lamb:
continue
#要计算线元的长度
intposi += edg.length / (lamb + disp_kprim)
#积分负LAMBDA的线
intnega = 0.
for edg in negaa:
kval = middle_point(edg.ends[0], edg.ends[1])
nega_k = Point(-kval.coord[0], -kval.coord[1], 1)
kprim = ksft(nega_k, qval)
#CITA
disp_kprim = dispb(kprim.coord[0], kprim.coord[1])
if -disp_kprim < lamb:
continue
intnega += edg.length / (lamb - disp_kprim)
#乘上系数
result = lamb * (intposi + intnega) / area#numpy.square(numpy.pi*2)
return result
def pi_ab_plus_tf(ltris, tarea, lamb, dispa, dispb, qval, ksft, area):
'''温度流的+
这里的lamb就是T,ltris中的所有三角都应该要在同一个patch中,
tarea是每个小三角形的面积,dispa是和k相关的那个能带,dispb是k-q相关的
'''
nega_q = Point(-qval.coord[0], -qval.coord[1], 1)
result = 0.
for tri in ltris:
#这个小三角形的k值
kval = tri.center
#k-q
kprim = ksft(kval, nega_q)
#epsilon_k
eps_k = dispa(kval.coord[0], kval.coord[1])
#epsilon_{k-q}
eps_kp = dispb(kprim.coord[0], kprim.coord[1])
if numpy.abs(eps_k - eps_kp) < 1.e-10:
#如果特别小,可以利用
# lim (eps_k -> eps_kp) Pi^{+} =
# 1/T (e^{eps/T} (-eps/T*e^{eps/T} + eps/T + e^{eps/T} + 1)) / (e^{eps/T} + 1)^3
bval = eps_kp / lamb
#如果本身就很大,分母会比较大导致接近0
if bval > 25:
warnings.warn("数值不稳定", RuntimeWarning)
return 0.
expb = numpy.exp(bval)
num = expb * (-bval * expb + bval + expb + 1)
den = numpy.power((1+expb), 3)
d_val = num / den / lamb
else:
if (eps_k / lamb) > 25:
warnings.warn("数值不稳定", RuntimeWarning)
num_left = 0.
else:
#exp^{epsilon_k / T}
exp_k_t = numpy.exp(eps_k / lamb)
num_left = eps_k / lamb * exp_k_t / numpy.square(1 + exp_k_t)
if (eps_kp / lamb) > 25:
warnings.warn("数值不稳定", RuntimeWarning)
num_righ = 0.
else:
#e^{epsilon_{k-q} / T}
exp_kp_t = numpy.exp(eps_kp / lamb)
num_righ = eps_kp / lamb * exp_kp_t\
/ numpy.square(1 + exp_kp_t)
d_val = (num_left - num_righ) / (eps_k - eps_kp)
result += d_val * tarea
result = result / area
return result
def pi_ab_minus_tf(ltris, tarea, lamb, dispa, dispb, qval, ksft, area):
'''温度流的-
这里的lamb就是T,ltris中的所有三角都应该要在同一个patch中,
tarea是每个小三角形的面积,dispa是和k相关的那个能带,dispb是-k+q相关的
'''
result = 0.
for tri in ltris:
#这个小三角形的k值
kval = tri.center
nega_k = Point(-kval.coord[0], -kval.coord[1], 1)
#-k+q
kprim = ksft(nega_k, qval)
#epsilon_k
eps_k = dispa(kval.coord[0], kval.coord[1])
#-epsilon_{-k+q}
neps_kp = -dispb(kprim.coord[0], kprim.coord[1])
#这个时候,因为epsilon_{-k+q}前面已经有了负号,分母上还是负号
if numpy.abs(eps_k - neps_kp) < 1.e-10:
#如果两个数值比较接近, Pi^{-}和Pi^{+}的公式完全一样,就是第二个能量要加个负号
# lim (eps_k -> -eps_kp) Pi^{-} =
# 1/T (e^{eps/T} (-eps/T*e^{eps/T} + eps/T + e^{eps/T} + 1)) / (e^{eps/T} + 1)^3
bval = eps_k / lamb
if bval > 25:
warnings.warn("数值不稳定", RuntimeWarning)
return 0.
expb = numpy.exp(bval)
num = expb * (-bval * expb + bval + expb + 1)
den = numpy.power((1+expb), 3)
d_val = num / den / lamb
else:
if (eps_k / lamb) > 25:
warnings.warn("数值不稳定", RuntimeWarning)
num_left = 0.
else:
#e^{epsilon_k / T}
exp_k_t = numpy.exp(eps_k / lamb)
num_left = eps_k / lamb * exp_k_t / numpy.square(1 + exp_k_t)
if (neps_kp / lamb) > 25:
warnings.warn("数值不稳定", RuntimeWarning)
num_righ = 0.
else:
#e^{-epsilon_{-k+q} / T}
exp_nkp_t = numpy.exp(neps_kp / lamb)
num_righ = neps_kp / lamb * exp_nkp_t\
/ numpy.square(1 + exp_nkp_t)
d_val = (num_left - num_righ) / (eps_k - neps_kp)
result += d_val * tarea
result = result / area
return result
def val_test(eps_k, neps_kp, lamb):
#如果两个数值比较接近, Pi^{-}和Pi^{+}的公式完全一样,就是第二个能量要加个负号
# lim (eps_k -> -eps_kp) Pi^{-} =
# 1/T (e^{eps/T} (-eps/T*e^{eps/T} + eps/T + e^{eps/T} + 1)) / (e^{eps/T} + 1)^3
bval = eps_k / lamb
expb = numpy.exp(bval)
num = expb * (-bval * expb + bval + expb + 1)
den = numpy.power((1+expb), 3)
d_val1 = num / den / lamb
#
#e^{epsilon_k / T}
exp_k_t = numpy.exp(eps_k / lamb)
#e^{-epsilon_{-k+q} / T}
exp_nkp_t = numpy.exp(neps_kp / lamb)
#
num_left = eps_k / lamb * exp_k_t / numpy.square(1 + exp_k_t)
num_righ = neps_kp / lamb * exp_nkp_t\
/ numpy.square(1 + exp_nkp_t)
#e^{epsilon_k / T}
exp_k_t = numpy.exp(eps_k / lamb)
d_val2 = (num_left - num_righ) / (eps_k - neps_kp)
return d_val1, d_val2
| 37.284585
| 92
| 0.554331
| 1,339
| 9,433
| 3.772218
| 0.135176
| 0.054445
| 0.028509
| 0.010691
| 0.819244
| 0.788755
| 0.779648
| 0.779648
| 0.762225
| 0.753316
| 0
| 0.023838
| 0.284003
| 9,433
| 252
| 93
| 37.43254
| 0.724015
| 0.178416
| 0
| 0.782946
| 0
| 0
| 0.0055
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.03876
| false
| 0
| 0.031008
| 0
| 0.124031
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
fbd7d19e4361b4677c90c5cfedc3747f39479464
| 11,114
|
py
|
Python
|
tests/integration/test_linked_objects_it.py
|
corylevine/okta-sdk-python
|
c86b8fdc4525e84199143c27213c0aebc6b2af8f
|
[
"Apache-2.0"
] | 145
|
2017-06-13T21:54:04.000Z
|
2022-02-25T05:44:34.000Z
|
tests/integration/test_linked_objects_it.py
|
corylevine/okta-sdk-python
|
c86b8fdc4525e84199143c27213c0aebc6b2af8f
|
[
"Apache-2.0"
] | 146
|
2017-06-02T17:46:12.000Z
|
2022-03-29T15:52:15.000Z
|
tests/integration/test_linked_objects_it.py
|
corylevine/okta-sdk-python
|
c86b8fdc4525e84199143c27213c0aebc6b2af8f
|
[
"Apache-2.0"
] | 98
|
2017-06-27T03:44:51.000Z
|
2022-03-23T04:58:18.000Z
|
import pytest
from tests.mocks import MockOktaClient
import okta.models as models
from http import HTTPStatus
from okta.errors.okta_api_error import OktaAPIError
class TestLinkedObjectsResource:
"""
Integration Tests for the Linked Objects Resource
"""
SDK_PREFIX = "python_sdk"
@pytest.mark.vcr()
@pytest.mark.asyncio
async def test_add_get_linked_object(self, fs):
# Instantiate Mock Client
client = MockOktaClient(fs)
# Add Linked Object definition
linked_object_model = models.LinkedObject({
"primary": models.LinkedObjectDetails({
"name": f"{TestLinkedObjectsResource.SDK_PREFIX}_primary_test",
"title": "Primary",
"description": "Primary Link Property",
"type": models.LinkedObjectDetailsType.USER
}),
"associated": models.LinkedObjectDetails({
"name": f"{TestLinkedObjectsResource.SDK_PREFIX}_assoc_test",
"title": "Associated",
"description": "Associated Link Property",
"type": models.LinkedObjectDetailsType.USER
})
})
try:
created_linked_object_definition, _, err = await client.\
add_linked_object_definition(linked_object_model)
assert err is None
assert isinstance(created_linked_object_definition,
models.LinkedObject)
assert created_linked_object_definition.primary
assert created_linked_object_definition.associated
# Retrieve by Primary Name
retrieved_linked_object_definition, _, err = await \
client.get_linked_object_definition(
linked_object_model.primary.name)
assert err is None
assert isinstance(retrieved_linked_object_definition,
models.LinkedObject)
assert retrieved_linked_object_definition.primary.name ==\
created_linked_object_definition.primary.name
assert retrieved_linked_object_definition.associated.name ==\
created_linked_object_definition.associated.name
assert retrieved_linked_object_definition.primary.title ==\
created_linked_object_definition.primary.title
assert retrieved_linked_object_definition.associated.title ==\
created_linked_object_definition.associated.title
assert retrieved_linked_object_definition.primary.type ==\
created_linked_object_definition.primary.type
assert retrieved_linked_object_definition.associated.type ==\
created_linked_object_definition.associated.type
# Retrieve by Associated Name
retrieved_linked_object_definition, _, err = await \
client.get_linked_object_definition(
linked_object_model.associated.name)
assert err is None
assert isinstance(retrieved_linked_object_definition,
models.LinkedObject)
assert retrieved_linked_object_definition.primary.name ==\
created_linked_object_definition.primary.name
assert retrieved_linked_object_definition.associated.name ==\
created_linked_object_definition.associated.name
assert retrieved_linked_object_definition.primary.title ==\
created_linked_object_definition.primary.title
assert retrieved_linked_object_definition.associated.title ==\
created_linked_object_definition.associated.title
assert retrieved_linked_object_definition.primary.type ==\
created_linked_object_definition.primary.type
assert retrieved_linked_object_definition.associated.type ==\
created_linked_object_definition.associated.type
finally:
# Delete Linked Object definition
_, err = await \
client.delete_linked_object_definition(
linked_object_model.primary.name)
assert err is None
@pytest.mark.vcr()
@pytest.mark.asyncio
async def test_get_all_linked_objects(self, fs):
# Instantiate Mock Client
client = MockOktaClient(fs)
# Add Linked Object definition
linked_object_model_1 = models.LinkedObject({
"primary": models.LinkedObjectDetails({
"name": f"{TestLinkedObjectsResource.SDK_PREFIX}_primary_t1",
"title": "Primary",
"description": "Primary Link Property",
"type": models.LinkedObjectDetailsType.USER
}),
"associated": models.LinkedObjectDetails({
"name": f"{TestLinkedObjectsResource.SDK_PREFIX}_assoc_t1",
"title": "Associated",
"description": "Associated Link Property",
"type": models.LinkedObjectDetailsType.USER
})
})
linked_object_model_2 = models.LinkedObject({
"primary": models.LinkedObjectDetails({
"name": f"{TestLinkedObjectsResource.SDK_PREFIX}_primary_t2",
"title": "Primary",
"description": "Primary Link Property",
"type": models.LinkedObjectDetailsType.USER
}),
"associated": models.LinkedObjectDetails({
"name": f"{TestLinkedObjectsResource.SDK_PREFIX}_assoc_t2",
"title": "Associated",
"description": "Associated Link Property",
"type": models.LinkedObjectDetailsType.USER
})
})
try:
created_linked_object_definition, _, err = await client.\
add_linked_object_definition(linked_object_model_1)
assert err is None
assert isinstance(created_linked_object_definition,
models.LinkedObject)
assert created_linked_object_definition.primary
assert created_linked_object_definition.associated
created_linked_object_definition_2, _, err = await client.\
add_linked_object_definition(linked_object_model_2)
assert err is None
assert isinstance(created_linked_object_definition_2,
models.LinkedObject)
assert created_linked_object_definition_2.primary
assert created_linked_object_definition_2.associated
# List
all_linked_obj_defs, _, err = await\
client.list_linked_object_definitions()
assert err is None
assert len(all_linked_obj_defs) > 0
assert next((lo for lo in all_linked_obj_defs
if linked_object_model_1.primary.name == lo.primary.name))
assert next((lo for lo in all_linked_obj_defs
if linked_object_model_2.primary.name == lo.primary.name))
finally:
errors = []
# Delete Linked Object definition
try:
_, err = await \
client.delete_linked_object_definition(
linked_object_model_1.primary.name)
assert err is None
except Exception as exc:
errors.append(exc)
try:
_, err = await \
client.delete_linked_object_definition(
linked_object_model_2.primary.name)
assert err is None
except Exception as exc:
errors.append(exc)
assert len(errors) == 0
@pytest.mark.vcr()
@pytest.mark.asyncio
async def test_delete_linked_object(self, fs):
# Instantiate Mock Client
client = MockOktaClient(fs)
# Add Linked Object definition
linked_object_model = models.LinkedObject({
"primary": models.LinkedObjectDetails({
"name": f"{TestLinkedObjectsResource.SDK_PREFIX}_primary_test",
"title": "Primary",
"description": "Primary Link Property",
"type": models.LinkedObjectDetailsType.USER
}),
"associated": models.LinkedObjectDetails({
"name": f"{TestLinkedObjectsResource.SDK_PREFIX}_assoc_test",
"title": "Associated",
"description": "Associated Link Property",
"type": models.LinkedObjectDetailsType.USER
})
})
try:
created_linked_object_definition, _, err = await client.\
add_linked_object_definition(linked_object_model)
assert err is None
assert isinstance(created_linked_object_definition,
models.LinkedObject)
assert created_linked_object_definition.primary
assert created_linked_object_definition.associated
# Retrieve by Primary Name
retrieved_linked_object_definition, _, err = await \
client.get_linked_object_definition(
linked_object_model.primary.name)
assert err is None
assert isinstance(retrieved_linked_object_definition,
models.LinkedObject)
assert retrieved_linked_object_definition.primary.name ==\
created_linked_object_definition.primary.name
assert retrieved_linked_object_definition.associated.name ==\
created_linked_object_definition.associated.name
assert retrieved_linked_object_definition.primary.title ==\
created_linked_object_definition.primary.title
assert retrieved_linked_object_definition.associated.title ==\
created_linked_object_definition.associated.title
assert retrieved_linked_object_definition.primary.type ==\
created_linked_object_definition.primary.type
assert retrieved_linked_object_definition.associated.type ==\
created_linked_object_definition.associated.type
# Delete Linked Object definition
_, err = await \
client.delete_linked_object_definition(
linked_object_model.primary.name)
# Retrieve by Primary Name
retrieved_linked_object_definition, resp, err = await \
client.get_linked_object_definition(
linked_object_model.primary.name)
assert err is not None
assert isinstance(err, OktaAPIError)
assert resp.get_status() == HTTPStatus.NOT_FOUND
assert retrieved_linked_object_definition is None
finally:
# Delete Linked Object definition
try:
_, err = await \
client.delete_linked_object_definition(
linked_object_model.primary.name)
except Exception:
pass
| 44.995951
| 83
| 0.61634
| 1,009
| 11,114
| 6.453915
| 0.09217
| 0.187961
| 0.27027
| 0.151413
| 0.917076
| 0.897267
| 0.886824
| 0.878225
| 0.869932
| 0.850584
| 0
| 0.002383
| 0.320317
| 11,114
| 246
| 84
| 45.178862
| 0.859677
| 0.04004
| 0
| 0.811594
| 0
| 0
| 0.085534
| 0.036846
| 0
| 0
| 0
| 0
| 0.251208
| 1
| 0
| false
| 0.004831
| 0.024155
| 0
| 0.033816
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
83b431dc38202ad69d6c8a435c77cec889045689
| 10,725
|
py
|
Python
|
nova/api/openstack/compute/schemas/servers.py
|
bopopescu/nova-token
|
ec98f69dea7b3e2b9013b27fd55a2c1a1ac6bfb2
|
[
"Apache-2.0"
] | null | null | null |
nova/api/openstack/compute/schemas/servers.py
|
bopopescu/nova-token
|
ec98f69dea7b3e2b9013b27fd55a2c1a1ac6bfb2
|
[
"Apache-2.0"
] | null | null | null |
nova/api/openstack/compute/schemas/servers.py
|
bopopescu/nova-token
|
ec98f69dea7b3e2b9013b27fd55a2c1a1ac6bfb2
|
[
"Apache-2.0"
] | 2
|
2017-07-20T17:31:34.000Z
|
2020-07-24T02:42:19.000Z
|
begin_unit
comment|'# Copyright 2014 NEC Corporation. All rights reserved.'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# Licensed under the Apache License, Version 2.0 (the "License"); you may'
nl|'\n'
comment|'# not use this file except in compliance with the License. You may obtain'
nl|'\n'
comment|'# a copy of the License at'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# http://www.apache.org/licenses/LICENSE-2.0'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# Unless required by applicable law or agreed to in writing, software'
nl|'\n'
comment|'# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT'
nl|'\n'
comment|'# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the'
nl|'\n'
comment|'# License for the specific language governing permissions and limitations'
nl|'\n'
comment|'# under the License.'
nl|'\n'
nl|'\n'
name|'import'
name|'copy'
newline|'\n'
nl|'\n'
name|'from'
name|'nova'
op|'.'
name|'api'
op|'.'
name|'validation'
name|'import'
name|'parameter_types'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|variable|base_create
name|'base_create'
op|'='
op|'{'
nl|'\n'
string|"'type'"
op|':'
string|"'object'"
op|','
nl|'\n'
string|"'properties'"
op|':'
op|'{'
nl|'\n'
string|"'server'"
op|':'
op|'{'
nl|'\n'
string|"'type'"
op|':'
string|"'object'"
op|','
nl|'\n'
string|"'properties'"
op|':'
op|'{'
nl|'\n'
string|"'name'"
op|':'
name|'parameter_types'
op|'.'
name|'name'
op|','
nl|'\n'
string|"'imageRef'"
op|':'
name|'parameter_types'
op|'.'
name|'image_ref'
op|','
nl|'\n'
string|"'flavorRef'"
op|':'
name|'parameter_types'
op|'.'
name|'flavor_ref'
op|','
nl|'\n'
string|"'adminPass'"
op|':'
name|'parameter_types'
op|'.'
name|'admin_password'
op|','
nl|'\n'
string|"'metadata'"
op|':'
name|'parameter_types'
op|'.'
name|'metadata'
op|','
nl|'\n'
string|"'networks'"
op|':'
op|'{'
nl|'\n'
string|"'type'"
op|':'
string|"'array'"
op|','
nl|'\n'
string|"'items'"
op|':'
op|'{'
nl|'\n'
string|"'type'"
op|':'
string|"'object'"
op|','
nl|'\n'
string|"'properties'"
op|':'
op|'{'
nl|'\n'
string|"'fixed_ip'"
op|':'
name|'parameter_types'
op|'.'
name|'ip_address'
op|','
nl|'\n'
string|"'port'"
op|':'
op|'{'
nl|'\n'
string|"'oneOf'"
op|':'
op|'['
op|'{'
string|"'type'"
op|':'
string|"'string'"
op|','
string|"'format'"
op|':'
string|"'uuid'"
op|'}'
op|','
nl|'\n'
op|'{'
string|"'type'"
op|':'
string|"'null'"
op|'}'
op|']'
nl|'\n'
op|'}'
op|','
nl|'\n'
string|"'uuid'"
op|':'
op|'{'
string|"'type'"
op|':'
string|"'string'"
op|'}'
op|','
nl|'\n'
op|'}'
op|','
nl|'\n'
string|"'additionalProperties'"
op|':'
name|'False'
op|','
nl|'\n'
op|'}'
nl|'\n'
op|'}'
nl|'\n'
op|'}'
op|','
nl|'\n'
string|"'required'"
op|':'
op|'['
string|"'name'"
op|','
string|"'flavorRef'"
op|']'
op|','
nl|'\n'
string|"'additionalProperties'"
op|':'
name|'False'
op|','
nl|'\n'
op|'}'
op|','
nl|'\n'
op|'}'
op|','
nl|'\n'
string|"'required'"
op|':'
op|'['
string|"'server'"
op|']'
op|','
nl|'\n'
string|"'additionalProperties'"
op|':'
name|'False'
op|','
nl|'\n'
op|'}'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|variable|base_create_v20
name|'base_create_v20'
op|'='
name|'copy'
op|'.'
name|'deepcopy'
op|'('
name|'base_create'
op|')'
newline|'\n'
name|'base_create_v20'
op|'['
string|"'properties'"
op|']'
op|'['
string|"'server'"
op|']'
op|'['
nl|'\n'
string|"'properties'"
op|']'
op|'['
string|"'name'"
op|']'
op|'='
name|'parameter_types'
op|'.'
name|'name_with_leading_trailing_spaces'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|variable|base_create_v219
name|'base_create_v219'
op|'='
name|'copy'
op|'.'
name|'deepcopy'
op|'('
name|'base_create'
op|')'
newline|'\n'
name|'base_create_v219'
op|'['
string|"'properties'"
op|']'
op|'['
string|"'server'"
op|']'
op|'['
nl|'\n'
string|"'properties'"
op|']'
op|'['
string|"'description'"
op|']'
op|'='
name|'parameter_types'
op|'.'
name|'description'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|variable|base_update
name|'base_update'
op|'='
op|'{'
nl|'\n'
string|"'type'"
op|':'
string|"'object'"
op|','
nl|'\n'
string|"'properties'"
op|':'
op|'{'
nl|'\n'
string|"'server'"
op|':'
op|'{'
nl|'\n'
string|"'type'"
op|':'
string|"'object'"
op|','
nl|'\n'
string|"'properties'"
op|':'
op|'{'
nl|'\n'
string|"'name'"
op|':'
name|'parameter_types'
op|'.'
name|'name'
op|','
nl|'\n'
op|'}'
op|','
nl|'\n'
string|"'additionalProperties'"
op|':'
name|'False'
op|','
nl|'\n'
op|'}'
op|','
nl|'\n'
op|'}'
op|','
nl|'\n'
string|"'required'"
op|':'
op|'['
string|"'server'"
op|']'
op|','
nl|'\n'
string|"'additionalProperties'"
op|':'
name|'False'
op|','
nl|'\n'
op|'}'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|variable|base_update_v20
name|'base_update_v20'
op|'='
name|'copy'
op|'.'
name|'deepcopy'
op|'('
name|'base_update'
op|')'
newline|'\n'
name|'base_update_v20'
op|'['
string|"'properties'"
op|']'
op|'['
string|"'server'"
op|']'
op|'['
nl|'\n'
string|"'properties'"
op|']'
op|'['
string|"'name'"
op|']'
op|'='
name|'parameter_types'
op|'.'
name|'name_with_leading_trailing_spaces'
newline|'\n'
nl|'\n'
DECL|variable|base_update_v219
name|'base_update_v219'
op|'='
name|'copy'
op|'.'
name|'deepcopy'
op|'('
name|'base_update'
op|')'
newline|'\n'
name|'base_update_v219'
op|'['
string|"'properties'"
op|']'
op|'['
string|"'server'"
op|']'
op|'['
nl|'\n'
string|"'properties'"
op|']'
op|'['
string|"'description'"
op|']'
op|'='
name|'parameter_types'
op|'.'
name|'description'
newline|'\n'
nl|'\n'
DECL|variable|base_rebuild
name|'base_rebuild'
op|'='
op|'{'
nl|'\n'
string|"'type'"
op|':'
string|"'object'"
op|','
nl|'\n'
string|"'properties'"
op|':'
op|'{'
nl|'\n'
string|"'rebuild'"
op|':'
op|'{'
nl|'\n'
string|"'type'"
op|':'
string|"'object'"
op|','
nl|'\n'
string|"'properties'"
op|':'
op|'{'
nl|'\n'
string|"'name'"
op|':'
name|'parameter_types'
op|'.'
name|'name'
op|','
nl|'\n'
string|"'imageRef'"
op|':'
name|'parameter_types'
op|'.'
name|'image_ref'
op|','
nl|'\n'
string|"'adminPass'"
op|':'
name|'parameter_types'
op|'.'
name|'admin_password'
op|','
nl|'\n'
string|"'metadata'"
op|':'
name|'parameter_types'
op|'.'
name|'metadata'
op|','
nl|'\n'
string|"'preserve_ephemeral'"
op|':'
name|'parameter_types'
op|'.'
name|'boolean'
op|','
nl|'\n'
op|'}'
op|','
nl|'\n'
string|"'required'"
op|':'
op|'['
string|"'imageRef'"
op|']'
op|','
nl|'\n'
string|"'additionalProperties'"
op|':'
name|'False'
op|','
nl|'\n'
op|'}'
op|','
nl|'\n'
op|'}'
op|','
nl|'\n'
string|"'required'"
op|':'
op|'['
string|"'rebuild'"
op|']'
op|','
nl|'\n'
string|"'additionalProperties'"
op|':'
name|'False'
op|','
nl|'\n'
op|'}'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|variable|base_rebuild_v20
name|'base_rebuild_v20'
op|'='
name|'copy'
op|'.'
name|'deepcopy'
op|'('
name|'base_rebuild'
op|')'
newline|'\n'
name|'base_rebuild_v20'
op|'['
string|"'properties'"
op|']'
op|'['
string|"'rebuild'"
op|']'
op|'['
nl|'\n'
string|"'properties'"
op|']'
op|'['
string|"'name'"
op|']'
op|'='
name|'parameter_types'
op|'.'
name|'name_with_leading_trailing_spaces'
newline|'\n'
nl|'\n'
DECL|variable|base_rebuild_v219
name|'base_rebuild_v219'
op|'='
name|'copy'
op|'.'
name|'deepcopy'
op|'('
name|'base_rebuild'
op|')'
newline|'\n'
name|'base_rebuild_v219'
op|'['
string|"'properties'"
op|']'
op|'['
string|"'rebuild'"
op|']'
op|'['
nl|'\n'
string|"'properties'"
op|']'
op|'['
string|"'description'"
op|']'
op|'='
name|'parameter_types'
op|'.'
name|'description'
newline|'\n'
nl|'\n'
DECL|variable|base_resize
name|'base_resize'
op|'='
op|'{'
nl|'\n'
string|"'type'"
op|':'
string|"'object'"
op|','
nl|'\n'
string|"'properties'"
op|':'
op|'{'
nl|'\n'
string|"'resize'"
op|':'
op|'{'
nl|'\n'
string|"'type'"
op|':'
string|"'object'"
op|','
nl|'\n'
string|"'properties'"
op|':'
op|'{'
nl|'\n'
string|"'flavorRef'"
op|':'
name|'parameter_types'
op|'.'
name|'flavor_ref'
op|','
nl|'\n'
op|'}'
op|','
nl|'\n'
string|"'required'"
op|':'
op|'['
string|"'flavorRef'"
op|']'
op|','
nl|'\n'
string|"'additionalProperties'"
op|':'
name|'False'
op|','
nl|'\n'
op|'}'
op|','
nl|'\n'
op|'}'
op|','
nl|'\n'
string|"'required'"
op|':'
op|'['
string|"'resize'"
op|']'
op|','
nl|'\n'
string|"'additionalProperties'"
op|':'
name|'False'
op|','
nl|'\n'
op|'}'
newline|'\n'
nl|'\n'
DECL|variable|create_image
name|'create_image'
op|'='
op|'{'
nl|'\n'
string|"'type'"
op|':'
string|"'object'"
op|','
nl|'\n'
string|"'properties'"
op|':'
op|'{'
nl|'\n'
string|"'createImage'"
op|':'
op|'{'
nl|'\n'
string|"'type'"
op|':'
string|"'object'"
op|','
nl|'\n'
string|"'properties'"
op|':'
op|'{'
nl|'\n'
string|"'name'"
op|':'
name|'parameter_types'
op|'.'
name|'name'
op|','
nl|'\n'
string|"'metadata'"
op|':'
name|'parameter_types'
op|'.'
name|'metadata'
nl|'\n'
op|'}'
op|','
nl|'\n'
string|"'required'"
op|':'
op|'['
string|"'name'"
op|']'
op|','
nl|'\n'
string|"'additionalProperties'"
op|':'
name|'False'
nl|'\n'
op|'}'
nl|'\n'
op|'}'
op|','
nl|'\n'
string|"'required'"
op|':'
op|'['
string|"'createImage'"
op|']'
op|','
nl|'\n'
string|"'additionalProperties'"
op|':'
name|'False'
nl|'\n'
op|'}'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|variable|create_image_v20
name|'create_image_v20'
op|'='
name|'copy'
op|'.'
name|'deepcopy'
op|'('
name|'create_image'
op|')'
newline|'\n'
name|'create_image_v20'
op|'['
string|"'properties'"
op|']'
op|'['
string|"'createImage'"
op|']'
op|'['
nl|'\n'
string|"'properties'"
op|']'
op|'['
string|"'name'"
op|']'
op|'='
name|'parameter_types'
op|'.'
name|'name_with_leading_trailing_spaces'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|variable|reboot
name|'reboot'
op|'='
op|'{'
nl|'\n'
string|"'type'"
op|':'
string|"'object'"
op|','
nl|'\n'
string|"'properties'"
op|':'
op|'{'
nl|'\n'
string|"'reboot'"
op|':'
op|'{'
nl|'\n'
string|"'type'"
op|':'
string|"'object'"
op|','
nl|'\n'
string|"'properties'"
op|':'
op|'{'
nl|'\n'
string|"'type'"
op|':'
op|'{'
nl|'\n'
string|"'enum'"
op|':'
op|'['
string|"'HARD'"
op|','
string|"'Hard'"
op|','
string|"'hard'"
op|','
string|"'SOFT'"
op|','
string|"'Soft'"
op|','
string|"'soft'"
op|']'
nl|'\n'
op|'}'
nl|'\n'
op|'}'
op|','
nl|'\n'
string|"'required'"
op|':'
op|'['
string|"'type'"
op|']'
op|','
nl|'\n'
string|"'additionalProperties'"
op|':'
name|'False'
nl|'\n'
op|'}'
nl|'\n'
op|'}'
op|','
nl|'\n'
string|"'required'"
op|':'
op|'['
string|"'reboot'"
op|']'
op|','
nl|'\n'
string|"'additionalProperties'"
op|':'
name|'False'
nl|'\n'
op|'}'
newline|'\n'
nl|'\n'
DECL|variable|trigger_crash_dump
name|'trigger_crash_dump'
op|'='
op|'{'
nl|'\n'
string|"'type'"
op|':'
string|"'object'"
op|','
nl|'\n'
string|"'properties'"
op|':'
op|'{'
nl|'\n'
string|"'trigger_crash_dump'"
op|':'
op|'{'
nl|'\n'
string|"'type'"
op|':'
string|"'null'"
nl|'\n'
op|'}'
nl|'\n'
op|'}'
op|','
nl|'\n'
string|"'required'"
op|':'
op|'['
string|"'trigger_crash_dump'"
op|']'
op|','
nl|'\n'
string|"'additionalProperties'"
op|':'
name|'False'
nl|'\n'
op|'}'
newline|'\n'
endmarker|''
end_unit
| 12.370242
| 88
| 0.581818
| 1,553
| 10,725
| 3.942692
| 0.086929
| 0.079373
| 0.096358
| 0.165278
| 0.848277
| 0.821493
| 0.803201
| 0.781153
| 0.758125
| 0.73624
| 0
| 0.006057
| 0.091748
| 10,725
| 866
| 89
| 12.384527
| 0.622523
| 0
| 0
| 0.941109
| 0
| 0
| 0.389371
| 0.041026
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.004619
| 0.002309
| 0
| 0.002309
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
83ce8a7e61c9f89cf1e2d6d0e4fc8971302ed6e6
| 25,859
|
py
|
Python
|
bot/Run.py
|
Dechrissen/Twitch-Speedrunning-Bot
|
7dd693d1d0c6b25211427f4c761fbc7148506261
|
[
"MIT"
] | 1
|
2019-09-15T05:22:00.000Z
|
2019-09-15T05:22:00.000Z
|
bot/Run.py
|
Dechrissen/Twitch-Speedrunning-Bot
|
7dd693d1d0c6b25211427f4c761fbc7148506261
|
[
"MIT"
] | null | null | null |
bot/Run.py
|
Dechrissen/Twitch-Speedrunning-Bot
|
7dd693d1d0c6b25211427f4c761fbc7148506261
|
[
"MIT"
] | null | null | null |
import string
import time
import math
import urllib.request
from urllib.request import urlopen
from json import loads
from Socket import openSocket, sendMessage
from Initialize import joinRoom
from Read import getUser, getMessage
from Settings import CHANNEL, COOLDOWN, IDENT, CHANNELPASS, SRC_USERNAME, GAMES, CATEGORIES
#Returns the world record for the category that's written in the stream title
def worldRecord(input):
if input == message.lower().split()[0].strip():
#Check to see if an argument is specified first
argument = False
try:
message.lower().split()[1]
except IndexError as err:
pass
else:
argument = True
#Get the stream title from the Twitch API
try:
response = urlopen('https://api.twitch.tv/kraken/channels/{}?oauth_token={}'.format(CHANNEL, CHANNELPASS.strip('oauth:')))
except urllib.error.HTTPError as err:
sendMessage(s, "Error: Invalid CHANNEL/CHANNELPASS in settings file")
cooldown()
return
readable = response.read().decode('utf-8')
lst = loads(readable)
title = lst['status'].lower()
game = None
for i in range(len(GAMES)):
if GAMES[i][0].lower() in title:
game = GAMES[i][1]
platform = GAMES[i][3]
break
category = None
category_title = None
#Check again to see if an argument was specified
if argument == False:
for i in range(len(CATEGORIES)):
if CATEGORIES[i][0].lower() in title:
category = CATEGORIES[i][1]
category_title = CATEGORIES[i][0]
break
elif argument == True:
specified_category = message.lower().split(input, 1)[-1].strip()
for i in range(len(CATEGORIES)):
if specified_category == CATEGORIES[i][0].lower():
category_title = CATEGORIES[i][0]
category = CATEGORIES[i][1]
break
if category == None:
sendMessage(s, "Error: Invalid category specified")
cooldown()
return
if game == None:
sendMessage(s, "No game and/or category detected in stream title.")
cooldown()
return
if category != None:
response = urlopen('https://www.speedrun.com/api/v1/leaderboards/{}/category/{}?top=1&embed=players&platform={}'.format(game, category, platform))
readable = response.read().decode('utf-8')
lst = loads(readable)
runner = lst['data']['players']['data'][0]['names']['international']
time_in_sec = int(lst['data']['runs'][0]['run']['times']['realtime_t'])
hours = divmod(time_in_sec, 3600)
minutes = divmod(hours[1], 60)
seconds = minutes[1]
wr = ''
if hours[0] > 0:
wr = str(hours[0]) + "h " + str(minutes[0]) + "m " + str(seconds) + "s "
elif minutes[0] > 0:
wr = str(minutes[0]) + "m " + str(seconds) + "s "
else:
wr = str(seconds) + "s "
sendMessage(s, "The " + category_title + " world record is " + wr + "by " + runner + ".")
cooldown()
return
elif category == None:
sendMessage(s, "No game and/or category detected in stream title.")
cooldown()
return
def second(input):
if input == message.lower().split()[0].strip():
#Check to see if an argument is specified first
argument = False
try:
message.lower().split()[1]
except IndexError as err:
pass
else:
argument = True
#Get the stream title from the Twitch API
try:
response = urlopen('https://api.twitch.tv/kraken/channels/{}?oauth_token={}'.format(CHANNEL, CHANNELPASS.strip('oauth:')))
except urllib.error.HTTPError as err:
sendMessage(s, "Error: Invalid CHANNEL/CHANNELPASS in settings file")
cooldown()
return
readable = response.read().decode('utf-8')
lst = loads(readable)
title = lst['status'].lower()
game = None
for i in range(len(GAMES)):
if GAMES[i][0].lower() in title:
game = GAMES[i][1]
platform = GAMES[i][3]
break
category = None
category_title = None
#Check again to see if an argument was specified
if argument == False:
for i in range(len(CATEGORIES)):
if CATEGORIES[i][0].lower() in title:
category = CATEGORIES[i][1]
category_title = CATEGORIES[i][0]
break
elif argument == True:
specified_category = message.lower().split(input, 1)[-1].strip()
for i in range(len(CATEGORIES)):
if specified_category == CATEGORIES[i][0].lower():
category_title = CATEGORIES[i][0]
category = CATEGORIES[i][1]
break
if category == None:
sendMessage(s, "Error: Invalid category specified")
cooldown()
return
if game == None:
sendMessage(s, "No game and/or category detected in stream title.")
cooldown()
return
if category != None:
response = urlopen('https://www.speedrun.com/api/v1/leaderboards/{}/category/{}?top=2&embed=players&platform={}'.format(game, category, platform))
readable = response.read().decode('utf-8')
lst = loads(readable)
runner = lst['data']['players']['data'][1]['names']['international']
time_in_sec = int(lst['data']['runs'][1]['run']['times']['realtime_t'])
hours = divmod(time_in_sec, 3600)
minutes = divmod(hours[1], 60)
seconds = minutes[1]
place2nd = ''
if hours[0] > 0:
place2nd = str(hours[0]) + "h " + str(minutes[0]) + "m " + str(seconds) + "s "
elif minutes[0] > 0:
place2nd = str(minutes[0]) + "m " + str(seconds) + "s "
else:
place2nd = str(seconds) + "s "
sendMessage(s, "The 2nd place time for " + category_title + " is " + place2nd + "by " + runner + ".")
cooldown()
return
elif category == None:
sendMessage(s, "No game and/or category detected in stream title.")
cooldown()
return
def third(input):
if input == message.lower().split()[0].strip():
#Check to see if an argument is specified first
argument = False
try:
message.lower().split()[1]
except IndexError as err:
pass
else:
argument = True
#Get the stream title from the Twitch API
try:
response = urlopen('https://api.twitch.tv/kraken/channels/{}?oauth_token={}'.format(CHANNEL, CHANNELPASS.strip('oauth:')))
except urllib.error.HTTPError as err:
sendMessage(s, "Error: Invalid CHANNEL/CHANNELPASS in settings file")
cooldown()
return
readable = response.read().decode('utf-8')
lst = loads(readable)
title = lst['status'].lower()
game = None
for i in range(len(GAMES)):
if GAMES[i][0].lower() in title:
game = GAMES[i][1]
platform = GAMES[i][3]
break
category = None
category_title = None
#Check again to see if an argument was specified
if argument == False:
for i in range(len(CATEGORIES)):
if CATEGORIES[i][0].lower() in title:
category = CATEGORIES[i][1]
category_title = CATEGORIES[i][0]
break
elif argument == True:
specified_category = message.lower().split(input, 1)[-1].strip()
for i in range(len(CATEGORIES)):
if specified_category == CATEGORIES[i][0].lower():
category_title = CATEGORIES[i][0]
category = CATEGORIES[i][1]
break
if category == None:
sendMessage(s, "Error: Invalid category specified")
cooldown()
return
if game == None:
sendMessage(s, "No game and/or category detected in stream title.")
cooldown()
return
if category != None:
response = urlopen('https://www.speedrun.com/api/v1/leaderboards/{}/category/{}?top=3&embed=players&platform={}'.format(game, category, platform))
readable = response.read().decode('utf-8')
lst = loads(readable)
runner = lst['data']['players']['data'][2]['names']['international']
time_in_sec = int(lst['data']['runs'][2]['run']['times']['realtime_t'])
hours = divmod(time_in_sec, 3600)
minutes = divmod(hours[1], 60)
seconds = minutes[1]
place3rd = ''
if hours[0] > 0:
place3rd = str(hours[0]) + "h " + str(minutes[0]) + "m " + str(seconds) + "s "
elif minutes[0] > 0:
place3rd = str(minutes[0]) + "m " + str(seconds) + "s "
else:
place3rd = str(seconds) + "s "
sendMessage(s, "The 3rd place time for " + category_title + " is " + place3rd + "by " + runner + ".")
cooldown()
return
elif category == None:
sendMessage(s, "No game and/or category detected in stream title.")
cooldown()
return
def fourth(input):
if input == message.lower().split()[0].strip():
#Check to see if an argument is specified first
argument = False
try:
message.lower().split()[1]
except IndexError as err:
pass
else:
argument = True
#Get the stream title from the Twitch API
try:
response = urlopen('https://api.twitch.tv/kraken/channels/{}?oauth_token={}'.format(CHANNEL, CHANNELPASS.strip('oauth:')))
except urllib.error.HTTPError as err:
sendMessage(s, "Error: Invalid CHANNEL/CHANNELPASS in settings file")
cooldown()
return
readable = response.read().decode('utf-8')
lst = loads(readable)
title = lst['status'].lower()
game = None
for i in range(len(GAMES)):
if GAMES[i][0].lower() in title:
game = GAMES[i][1]
platform = GAMES[i][3]
break
category = None
category_title = None
#Check again to see if an argument was specified
if argument == False:
for i in range(len(CATEGORIES)):
if CATEGORIES[i][0].lower() in title:
category = CATEGORIES[i][1]
category_title = CATEGORIES[i][0]
break
elif argument == True:
specified_category = message.lower().split(input, 1)[-1].strip()
for i in range(len(CATEGORIES)):
if specified_category == CATEGORIES[i][0].lower():
category_title = CATEGORIES[i][0]
category = CATEGORIES[i][1]
break
if category == None:
sendMessage(s, "Error: Invalid category specified")
cooldown()
return
if game == None:
sendMessage(s, "No game and/or category detected in stream title.")
cooldown()
return
if category != None:
response = urlopen('https://www.speedrun.com/api/v1/leaderboards/{}/category/{}?top=4&embed=players&platform={}'.format(game, category, platform))
readable = response.read().decode('utf-8')
lst = loads(readable)
runner = lst['data']['players']['data'][3]['names']['international']
time_in_sec = int(lst['data']['runs'][3]['run']['times']['realtime_t'])
hours = divmod(time_in_sec, 3600)
minutes = divmod(hours[1], 60)
seconds = minutes[1]
place4th = ''
if hours[0] > 0:
place4th = str(hours[0]) + "h " + str(minutes[0]) + "m " + str(seconds) + "s "
elif minutes[0] > 0:
place4th = str(minutes[0]) + "m " + str(seconds) + "s "
else:
place4th = str(seconds) + "s "
sendMessage(s, "The 4th place time for " + category_title + " is " + place4th + "by " + runner + ".")
cooldown()
return
elif category == None:
sendMessage(s, "No game and/or category detected in stream title.")
cooldown()
return
#Returns the channel owner's personal best time for the category that's written in the stream title
def personalBest(input):
if input == message.lower().split()[0]:
category_specified = False
try:
message.split()[2]
except IndexError as err:
pass
else:
category_specified = True
#Get the stream title from the Twitch API
try:
response = urlopen('https://api.twitch.tv/kraken/channels/{}?oauth_token={}'.format(CHANNEL, CHANNELPASS.strip('oauth:')))
except urllib.error.HTTPError as err:
sendMessage(s, "Error: Invalid CHANNEL/CHANNELPASS in settings file")
cooldown()
return
readable = response.read().decode('utf-8')
lst = loads(readable)
title = lst['status'].lower()
game = None
for i in range(len(GAMES)):
if GAMES[i][0].lower() in title:
game = GAMES[i][1].lower()
platform_title = GAMES[i][2]
break
category_title = None
if category_specified == True:
category_title = message.lower().strip('!pb ')
first_word = category_title.lower().split()[0]
category_title = category_title.split(first_word, 1)[-1].strip()
check = False
for i in range(len(CATEGORIES)):
if CATEGORIES[i][0].lower() == category_title:
check = True
category_title = CATEGORIES[i][0]
break
if check == False:
sendMessage(s, "Error: Invalid category specified")
cooldown()
return
elif category_specified == False:
for i in range(len(CATEGORIES)):
if CATEGORIES[i][0].lower() in title:
category_title = CATEGORIES[i][0]
break
if game == None:
sendMessage(s, "No game and/or category detected in stream title.")
cooldown()
return
username = None
try:
message.split()[1]
except IndexError as err:
username = SRC_USERNAME
else:
username = message.split()[1]
if category_title != None:
try:
response = urlopen('https://www.speedrun.com/api/v1/users/{}/personal-bests?embed=category,game,platform'.format(username))
except urllib.error.HTTPError as err:
sendMessage(s, "Error: Speedrun.com user not found")
cooldown()
return
readable = response.read().decode('utf-8')
lst = loads(readable)
place = None
time_in_sec = None
for cat in lst['data']:
if cat['category']['data']['name'].lower() == category_title.lower() and cat['game']['data']['abbreviation'].lower() == game and cat['platform']['data']['name'] == platform_title:
time_in_sec = int(cat['run']['times']['realtime_t'])
place = cat['place']
break
if place == None:
sendMessage(s, username.title() + " currently does not have a PB for " + category_title + " on the leaderboard.")
cooldown()
return
ordinal = lambda n: "%d%s" % (n,"tsnrhtdd"[(math.floor(n/10)%10!=1)*(n%10<4)*n%10::4])
hours = divmod(time_in_sec, 3600)
minutes = divmod(hours[1], 60)
seconds = minutes[1]
pb = ''
if hours[0] > 0:
pb = str(hours[0]) + "h " + str(minutes[0]) + "m " + str(seconds) + "s"
elif minutes[0] > 0:
pb = str(minutes[0]) + "m " + str(seconds) + "s"
else:
pb = str(seconds) + "s"
sendMessage(s, username.title() + "\'s " + category_title + " PB is " + pb + " (" + ordinal(place) + " place).")
cooldown()
elif category_title == None:
sendMessage(s, "No game and/or category detected in stream title.")
cooldown()
return
#Tells user the leaderboard standing of the channel owner, or a specified user
def place(input):
if input == message.lower().split()[0]:
username = None
try:
message.split()[1]
except IndexError as err:
username = SRC_USERNAME
else:
username = message.split()[1]
#Get the stream title from the Twitch API
try:
response = urlopen('https://api.twitch.tv/kraken/channels/{}?oauth_token={}'.format(CHANNEL, CHANNELPASS.strip('oauth:')))
except urllib.error.HTTPError as err:
sendMessage(s, "Error: Invalid CHANNEL/CHANNELPASS in settings file")
cooldown()
return
readable = response.read().decode('utf-8')
lst = loads(readable)
title = lst['status'].lower()
game = None
for i in range(len(GAMES)):
if GAMES[i][0].lower() in title:
game = GAMES[i][1].lower()
platform_title = GAMES[i][2]
break
if game == None:
sendMessage(s, "No game and/or category detected in stream title.")
cooldown()
return
category_title = None
for i in range(len(CATEGORIES)):
if CATEGORIES[i][0].lower() in title:
category_title = CATEGORIES[i][0]
break
if category_title != None:
try:
response = urlopen('https://www.speedrun.com/api/v1/users/{}/personal-bests?embed=category,game,platform'.format(username))
except urllib.error.HTTPError as err:
sendMessage(s, "Error: Speedrun.com user not found")
cooldown()
return
readable = response.read().decode('utf-8')
lst = loads(readable)
place = None
time_in_sec = None
for cat in lst['data']:
if cat['category']['data']['name'].lower() == category_title.lower() and cat['game']['data']['abbreviation'].lower() == game and cat['platform']['data']['name'] == platform_title:
time_in_sec = int(cat['run']['times']['realtime_t'])
place = cat['place']
break
if place == None:
sendMessage(s, username.title() + " currently does not have a PB for " + category_title + " on the leaderboard.")
cooldown()
return
ordinal = lambda n: "%d%s" % (n,"tsnrhtdd"[(math.floor(n/10)%10!=1)*(n%10<4)*n%10::4])
sendMessage(s, username.title() + " is in " + ordinal(place) + " place for " + category_title + ".")
elif category_title == None:
sendMessage(s, "No game and/or category detected in stream title.")
cooldown()
return
def leaderboard(input):
if input == message.lower().strip():
try:
response = urlopen('https://api.twitch.tv/kraken/channels/{}?oauth_token={}'.format(CHANNEL, CHANNELPASS.strip('oauth:')))
except urllib.error.HTTPError as err:
sendMessage(s, "Error: Invalid CHANNEL/CHANNELPASS in settings file")
cooldown()
return
readable = response.read().decode('utf-8')
lst = loads(readable)
title = lst['status'].lower()
game = None
game_title = None
for i in range(len(GAMES)):
if GAMES[i][0].lower() in title:
game = GAMES[i][1]
game_title = GAMES[i][0]
break
category = None
category_title = None
for i in range(len(CATEGORIES)):
if CATEGORIES[i][0].lower() in title:
category = CATEGORIES[i][1]
category_title = CATEGORIES[i][0]
break
if game == None:
sendMessage(s, "No game and/or category detected in stream title.")
cooldown()
return
if category != None:
sendMessage(s, game_title + " " + category_title + " Leaderboard: https://www.speedrun.com/{}#{}".format(game, category))
cooldown()
return
elif category == None:
sendMessage(s, "No game and/or category detected in stream title.")
cooldown()
return
#Returns a kadgar.net link with the channel owner and the other racers if a race is happening
def raceCommand(input):
if input == message.lower().strip():
#Get the stream title from the Twitch API
try:
response = urlopen('https://api.twitch.tv/kraken/channels/{}?oauth_token={}'.format(CHANNEL, CHANNELPASS.strip('oauth:')))
except urllib.error.HTTPError as err:
sendMessage(s, "Error: Invalid CHANNEL/CHANNELPASS in settings file")
cooldown()
return
readable = response.read().decode('utf-8')
lst = loads(readable)
title = lst['status'].lower()
if 'race with' in title:
pass
elif 'race with' not in title:
sendMessage(s, CHANNEL.title() + " is not currently racing or no racers detected in stream title.")
cooldown()
return
title_list = title.split()
r = title_list.index('with') + 1
contenders = []
length = len(title_list)
diff = length - r
while True:
contenders.append(title_list[r].strip(','))
diff = diff - 1
r = r + 1
if diff == 0:
break
sendMessage(s, "Race link: http://kadgar.net/live/" + CHANNEL + "/".join(contenders))
cooldown()
#Displays commands
def getCommands(input):
if input == message.strip().lower():
sendMessage(s, 'Commands: !wr • !2nd • !3rd • !4th • !pb • !place • !leaderboard • !race')
cooldown()
#Global cooldown
def cooldown():
if user == CHANNEL:
pass
elif user:
abort_after = COOLDOWN
start = time.time()
while True:
delta = time.time() - start
if delta >= abort_after:
break
#Checks if a message is from Twitch or a user
def Console(line):
if "PRIVMSG" in line:
return False
else:
return True
#Quits the bot program
def quitCommand(input):
if input == message.strip().lower() and user == CHANNEL:
sendMessage(s, "[Disconnected]")
quit()
elif input == message.strip():
sendMessage(s, "@" + user.title() + " Only the channel owner may use the !kill command.")
cooldown()
s = openSocket()
joinRoom(s)
readbuffer = ""
while True:
readbuffer = s.recv(1024)
readbuffer = readbuffer.decode()
temp = readbuffer.split("\n")
readbuffer = readbuffer.encode()
readbuffer = temp.pop()
for line in temp:
print(line)
if "PING" in line and Console(line):
msgg = "PONG tmi.twitch.tv\r\n".encode()
s.send(msgg)
print(msgg)
break
user = getUser(line)
message = getMessage(line)
print(user + " said: " + message)
response = urlopen('https://tmi.twitch.tv/group/user/{}/chatters'.format(CHANNEL))
readable = response.read().decode('utf-8')
chatlist = loads(readable)
chatters = chatlist['chatters']
moderators = chatters['moderators']
vips = chatters['vips']
viewers = chatters['viewers']
getCommands('!commands')
worldRecord('!wr')
second('!2nd')
third('!3rd')
fourth('!4th')
personalBest('!pb')
place('!place')
leaderboard('!leaderboard')
raceCommand('!race')
quitCommand('!kill')
continue
| 37.207194
| 196
| 0.511659
| 2,756
| 25,859
| 4.763788
| 0.087083
| 0.039302
| 0.021936
| 0.015919
| 0.800366
| 0.7977
| 0.765329
| 0.75794
| 0.743469
| 0.730977
| 0
| 0.014761
| 0.365985
| 25,859
| 694
| 197
| 37.260807
| 0.785605
| 0.042229
| 0
| 0.758319
| 0
| 0.012259
| 0.153347
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.021016
| false
| 0.042032
| 0.017513
| 0
| 0.10683
| 0.005254
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f7fd8c191bd9b665e91705fe3371b26bde803c75
| 23,120
|
py
|
Python
|
ddganAE/architectures/cae/D2/cae.py
|
Zeff020/Adversarial_ROM
|
8c9e7ff86250e9370e5fdd2018f9ad04ded5f122
|
[
"MIT"
] | 1
|
2021-12-27T06:14:32.000Z
|
2021-12-27T06:14:32.000Z
|
ddganAE/architectures/cae/D2/cae.py
|
Zeff020/Adversarial_ROM
|
8c9e7ff86250e9370e5fdd2018f9ad04ded5f122
|
[
"MIT"
] | null | null | null |
ddganAE/architectures/cae/D2/cae.py
|
Zeff020/Adversarial_ROM
|
8c9e7ff86250e9370e5fdd2018f9ad04ded5f122
|
[
"MIT"
] | 3
|
2021-08-05T11:17:37.000Z
|
2021-09-02T02:37:44.000Z
|
"""
Collection of encoders and decoders that can readily be imported
and used by the 2D adversarial and convolutional autoencoder and predictive
models.
Note that these models are currently adjusted to a 55 by 42 input shape.
"""
from keras.layers import Dense, Flatten, Reshape, Conv2D, UpSampling2D, \
Cropping2D, MaxPool2D
from keras.models import Sequential
__author__ = "Zef Wolffs"
__credits__ = []
__license__ = "MIT"
__version__ = "1.0.0"
__maintainer__ = "Zef Wolffs"
__email__ = "zefwolffs@gmail.com"
__status__ = "Development"
def build_custom_conv_encoder(input_shape, latent_dim, initializer,
info=False):
"""
Builds a 2D convolutional encoder
Args:
input_shape (tuple): Shape tuple of input grids
latent_dim (int): Number of latent variables
initializer (tf.keras.initializers.Initializer): Weights initializer
info (bool, optional): Whether to print info. Defaults to False.
Returns:
tf.keras.Model: encoder
"""
encoder = Sequential()
encoder.add(Conv2D(32, (5, 5), padding="same", activation="relu",
input_shape=input_shape,
kernel_initializer=initializer))
encoder.add(Conv2D(64, (5, 5), strides=(2, 2), activation="relu",
padding="same", kernel_initializer=initializer))
encoder.add(Conv2D(128, (5, 5), strides=(2, 2), activation="relu",
padding="same", kernel_initializer=initializer))
encoder.add(Flatten())
encoder.add(Dense(latent_dim, activation="linear"))
if info:
print(encoder.summary())
return encoder
def build_custom_conv_decoder(latent_dim, initializer, info=False):
"""
Builds a 2D convolutional decoder
Args:
latent_dim (int): Number of latent variables
initializer (tf.keras.initializers.Initializer): Weights initializer
info (bool, optional): Whether to print info. Defaults to False.
Returns:
tf.keras.Model: encoder
"""
decoder = Sequential()
decoder.add(Dense(78848, input_dim=latent_dim,
kernel_initializer=initializer))
decoder.add(Reshape((56, 11, 128)))
decoder.add(Conv2D(64, (5, 5), activation="relu", padding="same",
kernel_initializer=initializer))
decoder.add(UpSampling2D())
decoder.add(Conv2D(32, (5, 5), activation="relu", padding="same",
kernel_initializer=initializer))
decoder.add(UpSampling2D())
decoder.add(Conv2D(2, (5, 5), activation="sigmoid", padding="same",
kernel_initializer=initializer))
decoder.add(Cropping2D(cropping=((2, 1), (1, 1))))
if info:
print(decoder.summary())
return decoder
def build_omata_encoder_decoder(input_shape, latent_dim, initializer,
info=False, act="elu", dense_act=None):
"""
This encoder-decoder pair currently works for 55 by 42 grids
Args:
input_shape (tuple): Shape tuple of input grids
latent_dim (int): Number of latent variables
initializer (tf.keras.initializers.Initializer): Weights initializer
info (bool, optional): Whether to print info. Defaults to False.
act (str, optional): Activation function to use. Defaults to "elu".
dense_act (str, optional): Dense layer activation function to use.
Defaults to None.
Returns:
tuple: encoder, decoder pair
"""
encoder = Sequential()
encoder.add(Conv2D(16, (3, 3), padding="same", activation=act,
input_shape=input_shape,
kernel_initializer=initializer))
encoder.add(MaxPool2D(padding="same"))
encoder.add(Conv2D(8, (3, 3), activation=act,
padding="same", kernel_initializer=initializer))
encoder.add(MaxPool2D(padding="same"))
encoder.add(Conv2D(8, (3, 3), activation=act,
padding="same", kernel_initializer=initializer))
encoder.add(MaxPool2D(padding="same"))
encoder.add(Flatten())
encoder.add(Dense(latent_dim, activation="linear"))
if info:
print(encoder.summary())
decoder = Sequential()
decoder.add(Dense(392, input_dim=latent_dim,
kernel_initializer=initializer,
activation=dense_act))
decoder.add(Reshape((encoder.layers[6].input_shape[1],
encoder.layers[6].input_shape[1], 8)))
decoder.add(Conv2D(8, (3, 3), activation=act, padding="same",
kernel_initializer=initializer))
decoder.add(UpSampling2D())
decoder.add(Conv2D(8, (3, 3), activation=act, padding="same",
kernel_initializer=initializer))
decoder.add(UpSampling2D())
decoder.add(Conv2D(16, (3, 3), activation=act, padding="same",
kernel_initializer=initializer))
decoder.add(UpSampling2D())
decoder.add(Conv2D(2, (3, 3), activation="linear", padding="same",
kernel_initializer=initializer))
decoder.add(Cropping2D(cropping=((1, 0), (1, 1))))
if info:
print(decoder.summary())
return encoder, decoder
def build_wider_omata_encoder_decoder(input_shape, latent_dim, initializer,
info=False, act="elu", dense_act=None):
"""
This encoder-decoder pair currently works for 55 by 42 grids
Args:
input_shape (tuple): Shape tuple of input grids
latent_dim (int): Number of latent variables
initializer (tf.keras.initializers.Initializer): Weights initializer
info (bool, optional): Whether to print info. Defaults to False.
act (str, optional): Activation function to use. Defaults to "elu".
dense_act (str, optional): Dense layer activation function to use.
Defaults to None.
Returns:
tuple: encoder, decoder pair
"""
encoder = Sequential()
encoder.add(Conv2D(16, (5, 5), padding="same", activation=act,
input_shape=input_shape,
kernel_initializer=initializer))
encoder.add(MaxPool2D(padding="same"))
encoder.add(Conv2D(32, (3, 3), activation=act,
padding="same", kernel_initializer=initializer))
encoder.add(MaxPool2D(padding="same"))
encoder.add(Conv2D(64, (3, 3), activation=act,
padding="same", kernel_initializer=initializer))
encoder.add(MaxPool2D(padding="same"))
encoder.add(Flatten())
encoder.add(Dense(latent_dim, activation="linear"))
if info:
print(encoder.summary())
decoder = Sequential()
decoder.add(Dense(2688, input_dim=latent_dim,
kernel_initializer=initializer,
activation=dense_act))
decoder.add(Reshape((encoder.layers[6].input_shape[1],
encoder.layers[6].input_shape[2], 64)))
decoder.add(Conv2D(64, (3, 3), activation=act, padding="same",
kernel_initializer=initializer))
decoder.add(UpSampling2D())
decoder.add(Conv2D(32, (3, 3), activation=act, padding="same",
kernel_initializer=initializer))
decoder.add(UpSampling2D())
decoder.add(Conv2D(16, (5, 5), activation=act, padding="same",
kernel_initializer=initializer))
decoder.add(UpSampling2D())
decoder.add(Conv2D(2, (3, 3), activation="linear", padding="same",
kernel_initializer=initializer))
decoder.add(Cropping2D(cropping=((1, 0), (3, 3))))
if info:
print(decoder.summary())
return encoder, decoder
def build_wide_omata_encoder_decoder(input_shape, latent_dim, initializer,
info=False, act="elu", dense_act=None):
"""
This encoder-decoder pair currently works for 55 by 42 grids
Args:
input_shape (tuple): Shape tuple of input grids
latent_dim (int): Number of latent variables
initializer (tf.keras.initializers.Initializer): Weights initializer
info (bool, optional): Whether to print info. Defaults to False.
act (str, optional): Activation function to use. Defaults to "elu".
dense_act (str, optional): Dense layer activation function to use.
Defaults to None.
Returns:
tuple: encoder, decoder pair
"""
encoder = Sequential()
encoder.add(Conv2D(32, (5, 5), padding="same", activation=act,
input_shape=input_shape,
kernel_initializer=initializer))
encoder.add(MaxPool2D(padding="same"))
encoder.add(Conv2D(64, (3, 3), activation=act,
padding="same", kernel_initializer=initializer))
encoder.add(MaxPool2D(padding="same"))
encoder.add(Conv2D(128, (3, 3), activation=act,
padding="same", kernel_initializer=initializer))
encoder.add(MaxPool2D(padding="same"))
encoder.add(Flatten())
encoder.add(Dense(latent_dim, activation="linear"))
if info:
print(encoder.summary())
decoder = Sequential()
decoder.add(Dense(5376, input_dim=latent_dim,
kernel_initializer=initializer,
activation=dense_act))
decoder.add(Reshape((encoder.layers[6].input_shape[1],
encoder.layers[6].input_shape[2], 128)))
decoder.add(Conv2D(128, (3, 3), activation=act, padding="same",
kernel_initializer=initializer))
decoder.add(UpSampling2D())
decoder.add(Conv2D(64, (3, 3), activation=act, padding="same",
kernel_initializer=initializer))
decoder.add(UpSampling2D())
decoder.add(Conv2D(32, (3, 3), activation=act, padding="same",
kernel_initializer=initializer))
decoder.add(UpSampling2D())
decoder.add(Conv2D(2, (3, 3), activation="linear", padding="same",
kernel_initializer=initializer))
decoder.add(Cropping2D(cropping=((1, 0), (3, 3))))
if info:
print(decoder.summary())
return encoder, decoder
def build_deeper_omata_encoder_decoder(input_shape, latent_dim, initializer,
info=False, act="elu", dense_act=None):
"""
This encoder-decoder pair currently works for 55 by 42 grids
Args:
input_shape (tuple): Shape tuple of input grids
latent_dim (int): Number of latent variables
initializer (tf.keras.initializers.Initializer): Weights initializer
info (bool, optional): Whether to print info. Defaults to False.
act (str, optional): Activation function to use. Defaults to "elu".
dense_act (str, optional): Dense layer activation function to use.
Defaults to None.
Returns:
tuple: encoder, decoder pair
"""
encoder = Sequential()
encoder.add(Conv2D(32, (5, 5), padding="same", activation=act,
input_shape=input_shape,
kernel_initializer=initializer))
encoder.add(MaxPool2D(padding="same"))
encoder.add(Conv2D(64, (3, 3), activation=act,
padding="same", kernel_initializer=initializer))
encoder.add(MaxPool2D(padding="same"))
encoder.add(Conv2D(64, (3, 3), activation=act,
padding="same", kernel_initializer=initializer))
encoder.add(MaxPool2D(padding="same"))
encoder.add(Conv2D(128, (3, 3), activation=act,
padding="same", kernel_initializer=initializer))
encoder.add(MaxPool2D(padding="same"))
encoder.add(Flatten())
encoder.add(Dense(latent_dim, activation="linear"))
if info:
print(encoder.summary())
decoder = Sequential()
decoder.add(Dense(1536, input_dim=latent_dim,
kernel_initializer=initializer,
activation=dense_act))
decoder.add(Reshape((encoder.layers[8].input_shape[1],
encoder.layers[8].input_shape[2], 128)))
decoder.add(Conv2D(128, (3, 3), activation=act, padding="same",
kernel_initializer=initializer))
decoder.add(UpSampling2D())
decoder.add(Conv2D(64, (3, 3), activation=act, padding="same",
kernel_initializer=initializer))
decoder.add(UpSampling2D())
decoder.add(Conv2D(64, (3, 3), activation=act, padding="same",
kernel_initializer=initializer))
decoder.add(UpSampling2D())
decoder.add(Conv2D(32, (3, 3), activation=act, padding="valid",
kernel_initializer=initializer))
decoder.add(UpSampling2D())
decoder.add(Conv2D(2, (3, 3), activation="linear", padding="valid",
kernel_initializer=initializer))
decoder.add(Cropping2D(cropping=((2, 1), (0, 0))))
if info:
print(decoder.summary())
return encoder, decoder
def build_denser_omata_encoder_decoder(input_shape, latent_dim, initializer,
info=False, act="elu", dense_act=None):
"""
This encoder-decoder pair currently works for 55 by 42 grids
Args:
input_shape (tuple): Shape tuple of input grids
latent_dim (int): Number of latent variables
initializer (tf.keras.initializers.Initializer): Weights initializer
info (bool, optional): Whether to print info. Defaults to False.
act (str, optional): Activation function to use. Defaults to "elu".
dense_act (str, optional): Dense layer activation function to use.
Defaults to None.
Returns:
tuple: encoder, decoder pair
"""
encoder = Sequential()
encoder.add(Conv2D(32, (5, 5), padding="same", activation=act,
input_shape=input_shape,
kernel_initializer=initializer))
encoder.add(MaxPool2D(padding="same"))
encoder.add(Conv2D(64, (3, 3), activation=act,
padding="same", kernel_initializer=initializer))
encoder.add(MaxPool2D(padding="same"))
encoder.add(Conv2D(128, (3, 3), activation=act,
padding="same", kernel_initializer=initializer))
encoder.add(MaxPool2D(padding="same"))
encoder.add(Flatten())
encoder.add(Dense(int(5376/2),
kernel_initializer=initializer,
activation=dense_act))
encoder.add(Dense(latent_dim, activation="linear"))
if info:
print(encoder.summary())
decoder = Sequential()
decoder.add(Dense(int(5376/2),
kernel_initializer=initializer,
activation=dense_act,
input_shape=(latent_dim,)))
decoder.add(Dense(5376,
kernel_initializer=initializer,
activation=dense_act,
input_shape=(int(5376/2),)))
decoder.add(Reshape((encoder.layers[6].input_shape[1],
encoder.layers[6].input_shape[2], 128)))
decoder.add(Conv2D(128, (3, 3), activation=act, padding="same",
kernel_initializer=initializer))
decoder.add(UpSampling2D())
decoder.add(Conv2D(64, (3, 3), activation=act, padding="same",
kernel_initializer=initializer))
decoder.add(UpSampling2D())
decoder.add(Conv2D(32, (3, 3), activation=act, padding="same",
kernel_initializer=initializer))
decoder.add(UpSampling2D())
decoder.add(Conv2D(2, (3, 3), activation="linear", padding="same",
kernel_initializer=initializer))
decoder.add(Cropping2D(cropping=((1, 0), (3, 3))))
decoder.build(input_shape)
if info:
print(decoder.summary())
return encoder, decoder
def build_densest_omata_encoder_decoder(input_shape, latent_dim, initializer,
info=False, act="elu", dense_act=None):
"""
This encoder-decoder pair currently works for 55 by 42 grids
Args:
input_shape (tuple): Shape tuple of input grids
latent_dim (int): Number of latent variables
initializer (tf.keras.initializers.Initializer): Weights initializer
info (bool, optional): Whether to print info. Defaults to False.
act (str, optional): Activation function to use. Defaults to "elu".
dense_act (str, optional): Dense layer activation function to use.
Defaults to None.
Returns:
tuple: encoder, decoder pair
"""
encoder = Sequential()
encoder.add(Conv2D(32, (5, 5), padding="same", activation=act,
input_shape=input_shape,
kernel_initializer=initializer))
encoder.add(MaxPool2D(padding="same"))
encoder.add(Conv2D(64, (3, 3), activation=act,
padding="same", kernel_initializer=initializer))
encoder.add(MaxPool2D(padding="same"))
encoder.add(Flatten())
encoder.add(Dense(int(9856),
kernel_initializer=initializer,
activation=dense_act))
encoder.add(Dense(int(9856/2),
kernel_initializer=initializer,
activation=dense_act))
encoder.add(Dense(latent_dim, activation="linear"))
if info:
print(encoder.summary())
decoder = Sequential()
decoder.add(Dense(int(9856/2),
kernel_initializer=initializer,
activation=dense_act,
input_shape=(latent_dim,)))
decoder.add(Dense(9856,
kernel_initializer=initializer,
activation=dense_act,
input_shape=(int(9856/2),)))
decoder.add(Dense(9856,
kernel_initializer=initializer,
activation=dense_act,
input_shape=(int(9856),)))
decoder.add(Reshape((encoder.layers[4].input_shape[1],
encoder.layers[4].input_shape[2], 64)))
decoder.add(Conv2D(64, (3, 3), activation=act, padding="same",
kernel_initializer=initializer))
decoder.add(UpSampling2D())
decoder.add(Conv2D(32, (3, 3), activation=act, padding="same",
kernel_initializer=initializer))
decoder.add(UpSampling2D())
decoder.add(Conv2D(2, (3, 3), activation="linear", padding="same",
kernel_initializer=initializer))
decoder.add(Cropping2D(cropping=((1, 0), (1, 1))))
decoder.build(input_shape)
if info:
print(decoder.summary())
return encoder, decoder
def build_agostini_encoder_decoder(input_shape, latent_dim, initializer,
info=False):
"""
This encoder-decoder pair currently works for 221 by 42 grids
Args:
input_shape (tuple): Shape tuple of input grids
latent_dim (int): Number of latent variables
initializer (tf.keras.initializers.Initializer): Weights initializer
info (bool, optional): Whether to print info. Defaults to False.
Returns:
tuple: encoder, decoder pair
"""
encoder = Sequential()
encoder.add(Conv2D(16, (5, 5), padding="same", activation="relu",
input_shape=input_shape,
kernel_initializer=initializer))
encoder.add(MaxPool2D(padding="same"))
encoder.add(Conv2D(32, (3, 3), activation="relu",
padding="same", kernel_initializer=initializer))
encoder.add(MaxPool2D(padding="same"))
encoder.add(Conv2D(64, (3, 3), activation="relu",
padding="same", kernel_initializer=initializer))
encoder.add(MaxPool2D(padding="same"))
encoder.add(Flatten())
encoder.add(Dense(latent_dim, activation="linear"))
if info:
print(encoder.summary())
decoder = Sequential()
decoder.add(Dense(9856, input_dim=latent_dim,
kernel_initializer=initializer))
decoder.add(Reshape((encoder.layers[5].input_shape[1],
encoder.layers[5].input_shape[2], 16)))
decoder.add(Conv2D(64, (3, 3), activation="relu", padding="same",
kernel_initializer=initializer))
decoder.add(UpSampling2D())
decoder.add(Conv2D(32, (3, 3), activation="relu", padding="same",
kernel_initializer=initializer))
decoder.add(UpSampling2D())
decoder.add(Conv2D(16, (5, 5), activation="sigmoid", padding="same",
kernel_initializer=initializer))
decoder.add(Conv2D(2, (3, 3), activation="sigmoid", padding="same",
kernel_initializer=initializer))
decoder.add(Cropping2D(cropping=((1, 2), (1, 1))))
if info:
print(decoder.summary())
return encoder, decoder
def build_mnist_wide_omata_encoder_decoder(input_shape, latent_dim,
initializer, info=False):
"""
This encoder-decoder pair currently works for 28 by 28 grids so can work
on MNIST dataset as a test
Args:
input_shape (tuple): Shape tuple of input grids
latent_dim (int): Number of latent variables
initializer (tf.keras.initializers.Initializer): Weights initializer
info (bool, optional): Whether to print info. Defaults to False.
Returns:
tuple: encoder, decoder pair
"""
encoder = Sequential()
encoder.add(Conv2D(128, (3, 3), padding="same", activation="relu",
input_shape=input_shape,
kernel_initializer=initializer))
encoder.add(MaxPool2D(padding="same"))
encoder.add(Conv2D(64, (3, 3), activation="relu",
padding="same", kernel_initializer=initializer))
encoder.add(MaxPool2D(padding="same"))
encoder.add(Conv2D(32, (3, 3), activation="relu",
padding="same", kernel_initializer=initializer))
encoder.add(MaxPool2D(padding="same"))
encoder.add(Flatten())
encoder.add(Dense(latent_dim, activation="linear"))
if info:
print(encoder.summary())
decoder = Sequential()
decoder.add(Dense(784, input_dim=latent_dim,
kernel_initializer=initializer))
decoder.add(Reshape((encoder.layers[5].input_shape[1],
encoder.layers[5].input_shape[2], 16)))
decoder.add(Conv2D(32, (3, 3), activation="relu", padding="same",
kernel_initializer=initializer))
decoder.add(UpSampling2D())
decoder.add(Conv2D(64, (3, 3), activation="relu", padding="same",
kernel_initializer=initializer))
decoder.add(UpSampling2D())
decoder.add(Conv2D(1, (3, 3), activation="sigmoid", padding="same",
kernel_initializer=initializer))
decoder.add(Cropping2D(cropping=((0, 0), (0, 0))))
if info:
print(decoder.summary())
return encoder, decoder
| 40.41958
| 79
| 0.611289
| 2,528
| 23,120
| 5.484177
| 0.057753
| 0.063474
| 0.153491
| 0.100981
| 0.954703
| 0.945398
| 0.9441
| 0.941864
| 0.939484
| 0.91972
| 0
| 0.034238
| 0.26981
| 23,120
| 571
| 80
| 40.490368
| 0.786992
| 0.210035
| 0
| 0.813699
| 0
| 0
| 0.033574
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027397
| false
| 0
| 0.005479
| 0
| 0.060274
| 0.049315
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
79319a292b194538fe6fed9f5c5e48746b45f4f8
| 179
|
py
|
Python
|
foronoi/nodes/__init__.py
|
yagna2652/foronoi
|
e5ce3ae825857a326267ee18e90f525e83ae2003
|
[
"MIT"
] | 24
|
2018-09-02T00:38:16.000Z
|
2021-03-09T02:46:21.000Z
|
foronoi/nodes/__init__.py
|
yagna2652/foronoi
|
e5ce3ae825857a326267ee18e90f525e83ae2003
|
[
"MIT"
] | 8
|
2019-12-23T04:15:30.000Z
|
2021-03-25T01:08:05.000Z
|
foronoi/nodes/__init__.py
|
yagna2652/foronoi
|
e5ce3ae825857a326267ee18e90f525e83ae2003
|
[
"MIT"
] | 6
|
2020-01-01T10:26:55.000Z
|
2021-04-02T09:14:23.000Z
|
from foronoi.nodes.arc import Arc
from foronoi.nodes.breakpoint import Breakpoint
from foronoi.nodes.internal_node import InternalNode
from foronoi.nodes.leaf_node import LeafNode
| 44.75
| 52
| 0.871508
| 26
| 179
| 5.923077
| 0.423077
| 0.285714
| 0.415584
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.083799
| 179
| 4
| 53
| 44.75
| 0.939024
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
7932c84412312175b892b0c8c71a4e053d978b2d
| 162
|
py
|
Python
|
python/maya/startup/__init__.py
|
CountZer0/PipelineConstructionSet
|
0aa73a8a63c72989b2d1c677efd78dad4388d335
|
[
"BSD-3-Clause"
] | 21
|
2015-04-27T05:01:36.000Z
|
2021-11-22T13:45:14.000Z
|
python/maya/startup/__init__.py
|
0xb1dd1e/PipelineConstructionSet
|
621349da1b6d1437e95d0c9e48ee9f36d59f19fd
|
[
"BSD-3-Clause"
] | null | null | null |
python/maya/startup/__init__.py
|
0xb1dd1e/PipelineConstructionSet
|
621349da1b6d1437e95d0c9e48ee9f36d59f19fd
|
[
"BSD-3-Clause"
] | 7
|
2015-04-11T11:37:19.000Z
|
2020-05-22T09:49:04.000Z
|
"""
Author: jason
Created: Jul 7, 2012
Module: maya.startup.__init__
Purpose: to import mayaMenuBoot
"""
print "maya.startup.__init__ imported"
| 18
| 39
| 0.679012
| 19
| 162
| 5.368421
| 0.842105
| 0.215686
| 0.294118
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.03937
| 0.216049
| 162
| 9
| 39
| 18
| 0.76378
| 0
| 0
| 0
| 0
| 0
| 0.588235
| 0.411765
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 1
| null | null | 1
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 1
|
0
| 8
|
f717e5abe192eeacd489fb3abdcfc529c914593b
| 8,031
|
py
|
Python
|
src/tests/test_markdown2man.py
|
dante-signal31/markdown2man
|
ce57b905b01a6fb8fe6d3d0989af3a15f42c78cf
|
[
"BSD-3-Clause"
] | null | null | null |
src/tests/test_markdown2man.py
|
dante-signal31/markdown2man
|
ce57b905b01a6fb8fe6d3d0989af3a15f42c78cf
|
[
"BSD-3-Clause"
] | null | null | null |
src/tests/test_markdown2man.py
|
dante-signal31/markdown2man
|
ce57b905b01a6fb8fe6d3d0989af3a15f42c78cf
|
[
"BSD-3-Clause"
] | null | null | null |
""" Test for markdown2man launcher."""
import gzip
import os
import sys
import tempfile
import test_common.fs.ops as test_ops
from test_common.fs.temp import temp_dir
# TODO: Refactor project layout to leave tests folder out of src.
sys.path.append("src")
import src.markdown2man as markdown2man
def test_launcher_all_options_given(temp_dir):
# Setup test.
temporal_markdown_file = os.path.join(temp_dir, "README.md")
test_ops.copy_file("src/tests/resources/README.md", temporal_markdown_file)
command_args = [f"{temporal_markdown_file}", "cifra", "-s", "1", "-t",
"cifra usage documentation"]
expected_output_file = os.path.join(temp_dir, "cifra.1.gz")
recovered_content = ""
expected_content = ""
with open("src/tests/resources/cifra.1") as manpage:
expected_content = manpage.read()
# Perform test.
assert not os.path.exists(expected_output_file)
markdown2man.main(command_args)
assert os.path.exists(expected_output_file)
with gzip.open(expected_output_file) as output_file:
recovered_content = "".join(line.decode() for line in output_file.readlines())
assert recovered_content == expected_content
def test_launcher_all_long_options_given(temp_dir):
# Setup test.
temporal_markdown_file = os.path.join(temp_dir, "README.md")
test_ops.copy_file("src/tests/resources/README.md", temporal_markdown_file)
command_args = [f"{temporal_markdown_file}", "cifra", "--manpage_section", "1", "--manpage_title",
"cifra usage documentation"]
expected_output_file = os.path.join(temp_dir, "cifra.1.gz")
recovered_content = ""
expected_content = ""
with open("src/tests/resources/cifra.1") as manpage:
expected_content = manpage.read()
# Perform test.
assert not os.path.exists(expected_output_file)
markdown2man.main(command_args)
assert os.path.exists(expected_output_file)
with gzip.open(expected_output_file) as output_file:
recovered_content = "".join(line.decode() for line in output_file.readlines())
assert recovered_content == expected_content
def test_launcher_section_changed(temp_dir):
# Setup test.
temporal_markdown_file = os.path.join(temp_dir, "README.md")
test_ops.copy_file("src/tests/resources/README.md", temporal_markdown_file)
command_args = [f"{temporal_markdown_file}", "cifra", "-s", "2", "-t",
"cifra usage documentation"]
expected_output_file = os.path.join(temp_dir, "cifra.2.gz")
recovered_content = ""
expected_content = ""
with open("src/tests/resources/cifra.1") as manpage:
expected_content = manpage.read()
expected_content = expected_content.replace(".TH \"cifra\" \"1\"",
".TH \"cifra\" \"2\"")
# Perform test.
assert not os.path.exists(expected_output_file)
markdown2man.main(command_args)
assert os.path.exists(expected_output_file)
with gzip.open(expected_output_file) as output_file:
recovered_content = "".join(line.decode() for line in output_file.readlines())
assert recovered_content == expected_content
def test_launcher_section_omitted(temp_dir):
# Setup test.
temporal_markdown_file = os.path.join(temp_dir, "README.md")
test_ops.copy_file("src/tests/resources/README.md", temporal_markdown_file)
command_args = [f"{temporal_markdown_file}", "cifra", "-t",
"cifra usage documentation"]
expected_output_file = os.path.join(temp_dir, "cifra.1.gz")
recovered_content = ""
expected_content = ""
with open("src/tests/resources/cifra.1") as manpage:
expected_content = manpage.read()
# Perform test.
assert not os.path.exists(expected_output_file)
markdown2man.main(command_args)
assert os.path.exists(expected_output_file)
with gzip.open(expected_output_file) as output_file:
recovered_content = "".join(line.decode() for line in output_file.readlines())
assert recovered_content == expected_content
def test_launcher_title_omitted(temp_dir):
# Setup test.
temporal_markdown_file = os.path.join(temp_dir, "README.md")
test_ops.copy_file("src/tests/resources/README.md", temporal_markdown_file)
command_args = [f"{temporal_markdown_file}", "cifra"]
expected_output_file = os.path.join(temp_dir, "cifra.1.gz")
recovered_content = ""
expected_line = ".TH \"cifra\" \"1\" \"\" \"\" \"cifra\"\n"
with open("src/tests/resources/cifra.1") as manpage:
expected_content = manpage.read()
# Perform test.
assert not os.path.exists(expected_output_file)
markdown2man.main(command_args)
assert os.path.exists(expected_output_file)
with gzip.open(expected_output_file) as output_file:
recovered_content = [line.decode() for line in output_file.readlines()]
assert expected_line in recovered_content
def test_launcher_uncompressed(temp_dir):
# Setup test.
temporal_markdown_file = os.path.join(temp_dir, "README.md")
test_ops.copy_file("src/tests/resources/README.md", temporal_markdown_file)
command_args = [f"{temporal_markdown_file}", "cifra", "-s", "1", "-t",
"cifra usage documentation", "-u"]
expected_output_file = os.path.join(temp_dir, "cifra.1")
recovered_content = ""
expected_content = ""
with open("src/tests/resources/cifra.1") as manpage:
expected_content = manpage.read()
# Perform test.
assert not os.path.exists(expected_output_file)
markdown2man.main(command_args)
assert os.path.exists(expected_output_file)
with open(expected_output_file) as output_file:
recovered_content = output_file.read()
assert recovered_content == expected_content
def test_launcher_different_output_folder(temp_dir):
with tempfile.TemporaryDirectory() as temp_output_folder:
# Setup test.
temporal_markdown_file = os.path.join(temp_dir, "README.md")
test_ops.copy_file("src/tests/resources/README.md", temporal_markdown_file)
command_args = [f"{temporal_markdown_file}", "cifra", "-s", "1", "-t",
"cifra usage documentation", "-f", f"{temp_output_folder}"]
expected_output_file = os.path.join(temp_output_folder, "cifra.1.gz")
recovered_content = ""
expected_content = ""
with open("src/tests/resources/cifra.1") as manpage:
expected_content = manpage.read()
# Perform test.
assert not os.path.exists(expected_output_file)
markdown2man.main(command_args)
assert os.path.exists(expected_output_file)
with gzip.open(expected_output_file) as output_file:
recovered_content = "".join(line.decode() for line in output_file.readlines())
assert recovered_content == expected_content
def test_launcher_different_non_existing_output_folder(temp_dir):
with tempfile.TemporaryDirectory() as temp_output_folder:
# Setup test.
temporal_markdown_file = os.path.join(temp_dir, "README.md")
temp_output_subfolder = os.path.join(temp_output_folder, "man/")
test_ops.copy_file("src/tests/resources/README.md", temporal_markdown_file)
command_args = [f"{temporal_markdown_file}", "cifra", "-s", "1", "-t",
"cifra usage documentation", "-f", f"{temp_output_subfolder}"]
expected_output_file = os.path.join(temp_output_subfolder, "cifra.1.gz")
recovered_content = ""
expected_content = ""
with open("src/tests/resources/cifra.1") as manpage:
expected_content = manpage.read()
# Perform test.
assert not os.path.exists(expected_output_file)
markdown2man.main(command_args)
assert os.path.exists(expected_output_file)
with gzip.open(expected_output_file) as output_file:
recovered_content = "".join(line.decode() for line in output_file.readlines())
assert recovered_content == expected_content
| 43.410811
| 102
| 0.696551
| 1,037
| 8,031
| 5.12054
| 0.082932
| 0.090395
| 0.108475
| 0.044821
| 0.906026
| 0.906026
| 0.9
| 0.9
| 0.885687
| 0.854802
| 0
| 0.00551
| 0.186527
| 8,031
| 185
| 103
| 43.410811
| 0.807286
| 0.037853
| 0
| 0.75
| 0
| 0
| 0.149124
| 0.086048
| 0
| 0
| 0
| 0.005405
| 0.171429
| 1
| 0.057143
| false
| 0
| 0.05
| 0
| 0.107143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f752845674c26214bf3d5d00aaba4581e39e6040
| 27,612
|
py
|
Python
|
source/deepsecurity/api/policy_log_inspection_rule_details_api.py
|
felipecosta09/cloudone-workload-controltower-lifecycle
|
7927c84d164058b034fc872701b5ee117641f4d1
|
[
"Apache-2.0"
] | 1
|
2021-10-30T16:40:09.000Z
|
2021-10-30T16:40:09.000Z
|
source/deepsecurity/api/policy_log_inspection_rule_details_api.py
|
felipecosta09/cloudone-workload-controltower-lifecycle
|
7927c84d164058b034fc872701b5ee117641f4d1
|
[
"Apache-2.0"
] | 1
|
2021-07-28T20:19:03.000Z
|
2021-07-28T20:19:03.000Z
|
source/deepsecurity/api/policy_log_inspection_rule_details_api.py
|
felipecosta09/cloudone-workload-controltower-lifecycle
|
7927c84d164058b034fc872701b5ee117641f4d1
|
[
"Apache-2.0"
] | 1
|
2021-10-30T16:40:02.000Z
|
2021-10-30T16:40:02.000Z
|
# coding: utf-8
"""
Trend Micro Deep Security API
Copyright 2018 - 2020 Trend Micro Incorporated.<br/>Get protected, stay secured, and keep informed with Trend Micro Deep Security's new RESTful API. Access system data and manage security configurations to automate your security workflows and integrate Deep Security into your CI/CD pipeline. # noqa: E501
OpenAPI spec version: 12.5.841
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from deepsecurity.api_client import ApiClient
class PolicyLogInspectionRuleDetailsApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def describe_log_inspection_rule_on_policy(self, policy_id, log_inspection_rule_id, api_version, **kwargs): # noqa: E501
"""Describe an log inspection rule # noqa: E501
Describe an log inspection rule including policy-level overrides. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.describe_log_inspection_rule_on_policy(policy_id, log_inspection_rule_id, api_version, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int policy_id: The ID number of the policy. (required)
:param int log_inspection_rule_id: The ID number of the log inspection rule. (required)
:param str api_version: The version of the api being called. (required)
:param bool overrides: Show only overrides defined for the current policy.
:return: LogInspectionRule
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.describe_log_inspection_rule_on_policy_with_http_info(policy_id, log_inspection_rule_id, api_version, **kwargs) # noqa: E501
else:
(data) = self.describe_log_inspection_rule_on_policy_with_http_info(policy_id, log_inspection_rule_id, api_version, **kwargs) # noqa: E501
return data
def describe_log_inspection_rule_on_policy_with_http_info(self, policy_id, log_inspection_rule_id, api_version, **kwargs): # noqa: E501
"""Describe an log inspection rule # noqa: E501
Describe an log inspection rule including policy-level overrides. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.describe_log_inspection_rule_on_policy_with_http_info(policy_id, log_inspection_rule_id, api_version, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int policy_id: The ID number of the policy. (required)
:param int log_inspection_rule_id: The ID number of the log inspection rule. (required)
:param str api_version: The version of the api being called. (required)
:param bool overrides: Show only overrides defined for the current policy.
:return: LogInspectionRule
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['policy_id', 'log_inspection_rule_id', 'api_version', 'overrides'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method describe_log_inspection_rule_on_policy" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'policy_id' is set
if ('policy_id' not in params or
params['policy_id'] is None):
raise ValueError("Missing the required parameter `policy_id` when calling `describe_log_inspection_rule_on_policy`") # noqa: E501
# verify the required parameter 'log_inspection_rule_id' is set
if ('log_inspection_rule_id' not in params or
params['log_inspection_rule_id'] is None):
raise ValueError("Missing the required parameter `log_inspection_rule_id` when calling `describe_log_inspection_rule_on_policy`") # noqa: E501
# verify the required parameter 'api_version' is set
if ('api_version' not in params or
params['api_version'] is None):
raise ValueError("Missing the required parameter `api_version` when calling `describe_log_inspection_rule_on_policy`") # noqa: E501
if 'policy_id' in params and not re.search('\\d+', str(params['policy_id'])): # noqa: E501
raise ValueError("Invalid value for parameter `policy_id` when calling `describe_log_inspection_rule_on_policy`, must conform to the pattern `/\\d+/`") # noqa: E501
if 'log_inspection_rule_id' in params and not re.search('\\d+', str(params['log_inspection_rule_id'])): # noqa: E501
raise ValueError("Invalid value for parameter `log_inspection_rule_id` when calling `describe_log_inspection_rule_on_policy`, must conform to the pattern `/\\d+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'policy_id' in params:
path_params['policyID'] = params['policy_id'] # noqa: E501
if 'log_inspection_rule_id' in params:
path_params['logInspectionRuleID'] = params['log_inspection_rule_id'] # noqa: E501
query_params = []
if 'overrides' in params:
query_params.append(('overrides', params['overrides'])) # noqa: E501
header_params = {}
if 'api_version' in params:
header_params['api-version'] = params['api_version'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['DefaultAuthentication'] # noqa: E501
return self.api_client.call_api(
'/policies/{policyID}/loginspection/rules/{logInspectionRuleID}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='LogInspectionRule', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_log_inspection_rules_on_policy(self, policy_id, api_version, **kwargs): # noqa: E501
"""List log inspection rules # noqa: E501
Lists all log inspection rules assigned to a policy. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_log_inspection_rules_on_policy(policy_id, api_version, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int policy_id: The ID number of the policy. (required)
:param str api_version: The version of the api being called. (required)
:param bool overrides: Show only rules assigned to the current policy.
:return: LogInspectionRules
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_log_inspection_rules_on_policy_with_http_info(policy_id, api_version, **kwargs) # noqa: E501
else:
(data) = self.list_log_inspection_rules_on_policy_with_http_info(policy_id, api_version, **kwargs) # noqa: E501
return data
def list_log_inspection_rules_on_policy_with_http_info(self, policy_id, api_version, **kwargs): # noqa: E501
"""List log inspection rules # noqa: E501
Lists all log inspection rules assigned to a policy. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_log_inspection_rules_on_policy_with_http_info(policy_id, api_version, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int policy_id: The ID number of the policy. (required)
:param str api_version: The version of the api being called. (required)
:param bool overrides: Show only rules assigned to the current policy.
:return: LogInspectionRules
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['policy_id', 'api_version', 'overrides'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_log_inspection_rules_on_policy" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'policy_id' is set
if ('policy_id' not in params or
params['policy_id'] is None):
raise ValueError("Missing the required parameter `policy_id` when calling `list_log_inspection_rules_on_policy`") # noqa: E501
# verify the required parameter 'api_version' is set
if ('api_version' not in params or
params['api_version'] is None):
raise ValueError("Missing the required parameter `api_version` when calling `list_log_inspection_rules_on_policy`") # noqa: E501
if 'policy_id' in params and not re.search('\\d+', str(params['policy_id'])): # noqa: E501
raise ValueError("Invalid value for parameter `policy_id` when calling `list_log_inspection_rules_on_policy`, must conform to the pattern `/\\d+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'policy_id' in params:
path_params['policyID'] = params['policy_id'] # noqa: E501
query_params = []
if 'overrides' in params:
query_params.append(('overrides', params['overrides'])) # noqa: E501
header_params = {}
if 'api_version' in params:
header_params['api-version'] = params['api_version'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['DefaultAuthentication'] # noqa: E501
return self.api_client.call_api(
'/policies/{policyID}/loginspection/rules', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='LogInspectionRules', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def modify_log_inspection_rule_on_policy(self, policy_id, log_inspection_rule_id, log_inspection_rule, api_version, **kwargs): # noqa: E501
"""Modify an log inspection rule # noqa: E501
Modify an log inspection rule assigned to a policy. Any unset elements will be left unchanged. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.modify_log_inspection_rule_on_policy(policy_id, log_inspection_rule_id, log_inspection_rule, api_version, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int policy_id: The ID number of the policy. (required)
:param int log_inspection_rule_id: The ID number of the log inspection rule to modify. (required)
:param LogInspectionRule log_inspection_rule: The settings of the log inspection rule to modify. (required)
:param str api_version: The version of the api being called. (required)
:param bool overrides: Show only overrides defined for the current policy.
:return: LogInspectionRule
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.modify_log_inspection_rule_on_policy_with_http_info(policy_id, log_inspection_rule_id, log_inspection_rule, api_version, **kwargs) # noqa: E501
else:
(data) = self.modify_log_inspection_rule_on_policy_with_http_info(policy_id, log_inspection_rule_id, log_inspection_rule, api_version, **kwargs) # noqa: E501
return data
def modify_log_inspection_rule_on_policy_with_http_info(self, policy_id, log_inspection_rule_id, log_inspection_rule, api_version, **kwargs): # noqa: E501
"""Modify an log inspection rule # noqa: E501
Modify an log inspection rule assigned to a policy. Any unset elements will be left unchanged. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.modify_log_inspection_rule_on_policy_with_http_info(policy_id, log_inspection_rule_id, log_inspection_rule, api_version, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int policy_id: The ID number of the policy. (required)
:param int log_inspection_rule_id: The ID number of the log inspection rule to modify. (required)
:param LogInspectionRule log_inspection_rule: The settings of the log inspection rule to modify. (required)
:param str api_version: The version of the api being called. (required)
:param bool overrides: Show only overrides defined for the current policy.
:return: LogInspectionRule
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['policy_id', 'log_inspection_rule_id', 'log_inspection_rule', 'api_version', 'overrides'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method modify_log_inspection_rule_on_policy" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'policy_id' is set
if ('policy_id' not in params or
params['policy_id'] is None):
raise ValueError("Missing the required parameter `policy_id` when calling `modify_log_inspection_rule_on_policy`") # noqa: E501
# verify the required parameter 'log_inspection_rule_id' is set
if ('log_inspection_rule_id' not in params or
params['log_inspection_rule_id'] is None):
raise ValueError("Missing the required parameter `log_inspection_rule_id` when calling `modify_log_inspection_rule_on_policy`") # noqa: E501
# verify the required parameter 'log_inspection_rule' is set
if ('log_inspection_rule' not in params or
params['log_inspection_rule'] is None):
raise ValueError("Missing the required parameter `log_inspection_rule` when calling `modify_log_inspection_rule_on_policy`") # noqa: E501
# verify the required parameter 'api_version' is set
if ('api_version' not in params or
params['api_version'] is None):
raise ValueError("Missing the required parameter `api_version` when calling `modify_log_inspection_rule_on_policy`") # noqa: E501
if 'policy_id' in params and not re.search('\\d+', str(params['policy_id'])): # noqa: E501
raise ValueError("Invalid value for parameter `policy_id` when calling `modify_log_inspection_rule_on_policy`, must conform to the pattern `/\\d+/`") # noqa: E501
if 'log_inspection_rule_id' in params and not re.search('\\d+', str(params['log_inspection_rule_id'])): # noqa: E501
raise ValueError("Invalid value for parameter `log_inspection_rule_id` when calling `modify_log_inspection_rule_on_policy`, must conform to the pattern `/\\d+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'policy_id' in params:
path_params['policyID'] = params['policy_id'] # noqa: E501
if 'log_inspection_rule_id' in params:
path_params['logInspectionRuleID'] = params['log_inspection_rule_id'] # noqa: E501
query_params = []
if 'overrides' in params:
query_params.append(('overrides', params['overrides'])) # noqa: E501
header_params = {}
if 'api_version' in params:
header_params['api-version'] = params['api_version'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'log_inspection_rule' in params:
body_params = params['log_inspection_rule']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['DefaultAuthentication'] # noqa: E501
return self.api_client.call_api(
'/policies/{policyID}/loginspection/rules/{logInspectionRuleID}', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='LogInspectionRule', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def reset_log_inspection_rule_on_policy(self, policy_id, log_inspection_rule_id, api_version, **kwargs): # noqa: E501
"""Reset log inspection rule overrides # noqa: E501
Remove all overrides for an log inspection rule from a policy. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.reset_log_inspection_rule_on_policy(policy_id, log_inspection_rule_id, api_version, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int policy_id: The ID number of the policy. (required)
:param int log_inspection_rule_id: The ID number of the log inspection rule to reset. (required)
:param str api_version: The version of the api being called. (required)
:param bool overrides: Show only overrides defined for the current policy.
:return: LogInspectionRule
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.reset_log_inspection_rule_on_policy_with_http_info(policy_id, log_inspection_rule_id, api_version, **kwargs) # noqa: E501
else:
(data) = self.reset_log_inspection_rule_on_policy_with_http_info(policy_id, log_inspection_rule_id, api_version, **kwargs) # noqa: E501
return data
def reset_log_inspection_rule_on_policy_with_http_info(self, policy_id, log_inspection_rule_id, api_version, **kwargs): # noqa: E501
"""Reset log inspection rule overrides # noqa: E501
Remove all overrides for an log inspection rule from a policy. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.reset_log_inspection_rule_on_policy_with_http_info(policy_id, log_inspection_rule_id, api_version, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int policy_id: The ID number of the policy. (required)
:param int log_inspection_rule_id: The ID number of the log inspection rule to reset. (required)
:param str api_version: The version of the api being called. (required)
:param bool overrides: Show only overrides defined for the current policy.
:return: LogInspectionRule
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['policy_id', 'log_inspection_rule_id', 'api_version', 'overrides'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method reset_log_inspection_rule_on_policy" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'policy_id' is set
if ('policy_id' not in params or
params['policy_id'] is None):
raise ValueError("Missing the required parameter `policy_id` when calling `reset_log_inspection_rule_on_policy`") # noqa: E501
# verify the required parameter 'log_inspection_rule_id' is set
if ('log_inspection_rule_id' not in params or
params['log_inspection_rule_id'] is None):
raise ValueError("Missing the required parameter `log_inspection_rule_id` when calling `reset_log_inspection_rule_on_policy`") # noqa: E501
# verify the required parameter 'api_version' is set
if ('api_version' not in params or
params['api_version'] is None):
raise ValueError("Missing the required parameter `api_version` when calling `reset_log_inspection_rule_on_policy`") # noqa: E501
if 'policy_id' in params and not re.search('\\d+', str(params['policy_id'])): # noqa: E501
raise ValueError("Invalid value for parameter `policy_id` when calling `reset_log_inspection_rule_on_policy`, must conform to the pattern `/\\d+/`") # noqa: E501
if 'log_inspection_rule_id' in params and not re.search('\\d+', str(params['log_inspection_rule_id'])): # noqa: E501
raise ValueError("Invalid value for parameter `log_inspection_rule_id` when calling `reset_log_inspection_rule_on_policy`, must conform to the pattern `/\\d+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'policy_id' in params:
path_params['policyID'] = params['policy_id'] # noqa: E501
if 'log_inspection_rule_id' in params:
path_params['logInspectionRuleID'] = params['log_inspection_rule_id'] # noqa: E501
query_params = []
if 'overrides' in params:
query_params.append(('overrides', params['overrides'])) # noqa: E501
header_params = {}
if 'api_version' in params:
header_params['api-version'] = params['api_version'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['DefaultAuthentication'] # noqa: E501
return self.api_client.call_api(
'/policies/{policyID}/loginspection/rules/{logInspectionRuleID}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='LogInspectionRule', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 52.594286
| 311
| 0.649428
| 3,387
| 27,612
| 5.015058
| 0.063183
| 0.107147
| 0.126104
| 0.060403
| 0.955552
| 0.955552
| 0.951725
| 0.944189
| 0.940834
| 0.936006
| 0
| 0.015514
| 0.266985
| 27,612
| 524
| 312
| 52.694656
| 0.823715
| 0.334492
| 0
| 0.780488
| 0
| 0
| 0.303074
| 0.123903
| 0
| 0
| 0
| 0
| 0
| 1
| 0.031359
| false
| 0
| 0.013937
| 0
| 0.090592
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f7546364aa146d729cb8fd1cae565084e05c8ba3
| 26,533
|
py
|
Python
|
pirates/leveleditor/worldData/A_GyedoVegasIsland.py
|
Willy5s/Pirates-Online-Rewritten
|
7434cf98d9b7c837d57c181e5dabd02ddf98acb7
|
[
"BSD-3-Clause"
] | 81
|
2018-04-08T18:14:24.000Z
|
2022-01-11T07:22:15.000Z
|
pirates/leveleditor/worldData/A_GyedoVegasIsland.py
|
Willy5s/Pirates-Online-Rewritten
|
7434cf98d9b7c837d57c181e5dabd02ddf98acb7
|
[
"BSD-3-Clause"
] | 4
|
2018-09-13T20:41:22.000Z
|
2022-01-08T06:57:00.000Z
|
pirates/leveleditor/worldData/A_GyedoVegasIsland.py
|
Willy5s/Pirates-Online-Rewritten
|
7434cf98d9b7c837d57c181e5dabd02ddf98acb7
|
[
"BSD-3-Clause"
] | 26
|
2018-05-26T12:49:27.000Z
|
2021-09-11T09:11:59.000Z
|
from pandac.PandaModules import Point3, VBase3, Vec4, Vec3
objectStruct = {'Objects': {'1149705583.09Shochet': {'Type': 'Island','Name': 'VegasIsland','File': '','Environment': 'Interior','Minimap': False,'Objects': {'1149705605.5Shochet': {'Type': 'Locator Node','Name': 'portal_exterior_1','Hpr': VBase3(-18.331, 0.0, 0.0),'Pos': Point3(-219.917, -319.235, 0.595),'Scale': VBase3(1.0, 1.0, 1.0)},'1149705605.5Shochet0': {'Type': 'Locator Node','Name': 'portal_exterior_2','Hpr': VBase3(68.97, 0.0, 0.0),'Pos': Point3(-285.103, -58.817, 44.049),'Scale': VBase3(1.0, 1.0, 1.0)},'1149705607.02Shochet': {'Type': 'Locator Node','Name': 'portal_exterior_1','Hpr': VBase3(-18.331, 0.0, 0.0),'Pos': Point3(-219.917, -319.235, 0.595),'Scale': VBase3(1.0, 1.0, 1.0)},'1149705607.63Shochet': {'Type': 'Locator Node','Name': 'portal_exterior_2','Hpr': VBase3(68.97, 0.0, 0.0),'Pos': Point3(-285.103, -58.817, 44.049),'Scale': VBase3(1.0, 1.0, 1.0)},'1149705619.08Shochet': {'Type': 'Cell Portal Area','Name': 'cell_pier','Hpr': Point3(0.0, 0.0, 0.0),'Objects': {'1149705619.08Shochet0': {'Type': 'Parlor Game','Category': 'Blackjack','BetMultiplier': '1','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(421.053, -131.608, 5.287),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/Cardtable_HalfCircle'}},'1149705632.05Shochet': {'Type': 'Parlor Game','Category': 'Holdem','BetMultiplier': '1','Hpr': VBase3(45.0, 0.0, 0.0),'Pos': Point3(443.184, -123.256, 5.295),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/Cardtable_Pill'}},'1169451658.54Shochet': {'Type': 'Searchable Container','Aggro Radius': 5.0,'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(508.36, -134.581, 5.213),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/desk_gov'},'searchTime': '6.0','type': 'Desk'},'1169451790.01Shochet': {'Type': 'Searchable Container','Aggro Radius': 5.0,'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(480.699, -161.909, 5.213),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/wellA'},'searchTime': '6.0','type': 'WellA'},'1171348677.72Shochet': {'Type': 'Interactive Prop','Hpr': VBase3(96.302, 0.0, 0.0),'Pos': Point3(305.078, -115.012, 4.769),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/dummy_zero'},'interactAble': 'player','interactType': 'hit'},'1186785500.34Shochet': {'Type': 'Player Spawn Node','Hpr': Point3(0.0, 0.0, 0.0),'Index': 1,'Pos': Point3(485.258, -86.884, 5.213),'Scale': VBase3(1.0, 1.0, 1.0),'Spawnables': 'All','Visual': {'Color': (0.5, 0.5, 0.5, 1),'Model': 'models/misc/smiley'}}},'Pos': Point3(0.0, 0.0, 0.0),'Scale': VBase3(1.0, 1.0, 1.0)},'1149706548.67Shochet': {'Type': 'Locator Node','Name': 'portal_exterior_1','Hpr': VBase3(-18.331, 0.0, 0.0),'Pos': Point3(-219.917, -319.235, 0.595),'Scale': VBase3(1.0, 1.0, 1.0)},'1149706548.67Shochet0': {'Type': 'Locator Node','Name': 'portal_exterior_2','Hpr': VBase3(68.97, 0.0, 0.0),'Pos': Point3(-285.103, -58.817, 44.049),'Scale': VBase3(1.0, 1.0, 1.0)},'1149706577.28Shochet': {'Type': 'Cell Portal Area','Name': 'cell_spanish_town','Hpr': Point3(0.0, 0.0, 0.0),'Objects': {'1149706632.97Shochet': {'Type': 'Spawn Node','Aggro Radius': '12.0000','AnimSet': 'default','AuraFX': 'None','Hpr': Point3(0.0, 0.0, 0.0),'Min Population': '2','Patrol Radius': '12.0000','Pause Chance': '100','Pause Duration': '30','Pos': Point3(500.886, 153.538, 45.292),'PoseAnim': '','PoseFrame': '','PropFXLeft': 'None','PropFXRight': 'None','PropLeft': 'None','PropRight': 'None','Scale': VBase3(1.0, 1.0, 1.0),'Spawnables': 'Scorpion','Start State': 'Ambush','StartFrame': '0','Team': '1','TrailFX': 'None','TrailLeft': 'None','TrailRight': 'None','Visual': {'Model': 'models/misc/smiley'}},'1157596044.44jasyeung': {'Type': 'Townsperson','Category': 'MedicineMan','AnimSet': 'default','AuraFX': 'None','CustomModel': 'None','GhostColor': 'None','GhostFX': 0,'Greeting Animation': '','Hpr': VBase3(-173.418, 0.0, 0.0),'Instanced World': 'None','Level': '37','Notice Animation 1': '','Notice Animation 2': '','Patrol Radius': 12,'Pos': Point3(454.118, 103.165, 41.541),'PoseAnim': '','PoseFrame': '','PropFXLeft': 'None','PropFXRight': 'None','PropLeft': 'None','PropRight': 'None','Requires Quest Interest': False,'Respawns': True,'Scale': VBase3(1.0, 1.0, 1.0),'Start State': 'Idle','StartFrame': '0','Team': 'Villager','TrailFX': 'None','TrailLeft': 'None','TrailRight': 'None','spawnTimeBegin': 0.0,'spawnTimeEnd': 0.0},'1169192829.53Shochet': {'Type': 'Townsperson','Category': 'Commoner','AnimSet': 'default','AuraFX': 'None','CustomModel': 'None','GhostColor': 'None','GhostFX': 0,'Greeting Animation': '','Hpr': VBase3(128.625, 0.0, 0.0),'Instanced World': 'None','Level': '37','Notice Animation 1': '','Notice Animation 2': '','Patrol Radius': 12,'Pos': Point3(572.566, 129.349, 42.792),'PoseAnim': '','PoseFrame': '','PropFXLeft': 'None','PropFXRight': 'None','PropLeft': 'None','PropRight': 'None','Requires Quest Interest': False,'Respawns': True,'Scale': VBase3(1.0, 1.0, 1.0),'Start State': 'Idle','StartFrame': '0','Team': 'Villager','TrailFX': 'None','TrailLeft': 'None','TrailRight': 'None','spawnTimeBegin': 0.0,'spawnTimeEnd': 0.0},'1169616338.61Shochet': {'Type': 'Townsperson','Category': 'Shipwright','AnimSet': 'default','AuraFX': 'None','CustomModel': 'None','GhostColor': 'None','GhostFX': 0,'Greeting Animation': '','Hpr': VBase3(95.14, 0.0, 0.0),'Instanced World': 'None','Level': '37','Notice Animation 1': '','Notice Animation 2': '','Patrol Radius': 12,'Pos': Point3(594.082, 77.364, 42.876),'PoseAnim': '','PoseFrame': '','PropFXLeft': 'None','PropFXRight': 'None','PropLeft': 'None','PropRight': 'None','Requires Quest Interest': False,'Respawns': True,'Scale': VBase3(1.0, 1.0, 1.0),'Start State': 'Idle','StartFrame': '0','Team': 'Player','TrailFX': 'None','TrailLeft': 'None','TrailRight': 'None','spawnTimeBegin': 0.0,'spawnTimeEnd': 0.0},'1169616428.63Shochet': {'Type': 'Object Spawn Node','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(470.936, 243.777, 49.657),'Priority': '1','Scale': VBase3(1.0, 1.0, 1.0),'SpawnDelay': '10','Spawnables': 'Buried Treasure','Visual': {'Color': (0.8, 0.2, 0.65, 1),'Model': 'models/misc/smiley'},'startingDepth': '5'},'1171691663.17Shochet': {'Type': 'Townsperson','Category': 'Gypsy','AnimSet': 'default','AuraFX': 'None','CustomModel': 'None','GhostColor': 'None','GhostFX': 0,'Greeting Animation': '','Hpr': VBase3(-161.55, 0.0, 0.0),'Instanced World': 'None','Level': '37','Notice Animation 1': '','Notice Animation 2': '','Patrol Radius': 12,'Pos': Point3(446.386, 327.483, 54.071),'PoseAnim': '','PoseFrame': '','PropFXLeft': 'None','PropFXRight': 'None','PropLeft': 'None','PropRight': 'None','Requires Quest Interest': False,'Respawns': True,'Scale': VBase3(1.0, 1.0, 1.0),'Start State': 'Idle','StartFrame': '0','Team': 'Villager','TrailFX': 'None','TrailLeft': 'None','TrailRight': 'None','spawnTimeBegin': 0.0,'spawnTimeEnd': 0.0}},'Pos': Point3(0.0, 0.0, 0.0),'Scale': VBase3(1.0, 1.0, 1.0)},'1154059325.91Shochet': {'Type': 'Locator Node','Name': 'portal_exterior_1','Hpr': VBase3(-18.331, 0.0, 0.0),'Pos': Point3(-219.917, -319.235, 0.595),'Scale': VBase3(1.0, 1.0, 1.0)},'1154059325.91Shochet0': {'Type': 'Locator Node','Name': 'portal_exterior_2','Hpr': VBase3(68.97, 0.0, 0.0),'Pos': Point3(-285.103, -58.817, 44.049),'Scale': VBase3(1.0, 1.0, 1.0)},'1154059341.09Shochet': {'Type': 'Animal','Hpr': Point3(0.0, 0.0, 0.0),'Patrol Radius': 12,'Pos': Point3(223.47, -25.232, 5.178),'PoseAnim': '','PoseFrame': '','Respawns': True,'Scale': VBase3(1.0, 1.0, 1.0),'Species': 'Pig','Start State': 'Walk','StartFrame': '0'},'1154059344.67Shochet': {'Type': 'Animal','Hpr': Point3(0.0, 0.0, 0.0),'Patrol Radius': 12,'Pos': Point3(230.53, -15.957, 5.073),'PoseAnim': '','PoseFrame': '','Respawns': True,'Scale': VBase3(1.0, 1.0, 1.0),'Species': 'Chicken','Start State': 'Walk','StartFrame': '0'},'1154059351.97Shochet': {'Type': 'Animal','Hpr': Point3(0.0, 0.0, 0.0),'Patrol Radius': 12,'Pos': Point3(239.433, -6.269, 4.768),'PoseAnim': '','PoseFrame': '','Respawns': True,'Scale': VBase3(1.0, 1.0, 1.0),'Species': 'Rooster','Start State': 'Walk','StartFrame': '0'},'1154059362.19Shochet': {'Type': 'Creature','Boss': False,'Boss Name': 'Anonymous','Hpr': Point3(0.0, 0.0, 0.0),'Level': '37','Patrol Radius': 12,'Pos': Point3(-93.119, -296.023, 1.391),'PoseAnim': '','PoseFrame': '','Respawns': True,'Scale': VBase3(1.0, 1.0, 1.0),'Species': 'Crab','Start State': 'Idle','StartFrame': '0'},'1154059366.69Shochet': {'Type': 'Creature','Boss': False,'Boss Name': 'Anonymous','Hpr': Point3(0.0, 0.0, 0.0),'Level': '37','Patrol Radius': 12,'Pos': Point3(267.997, 4.319, 7.507),'PoseAnim': '','PoseFrame': '','Respawns': True,'Scale': VBase3(1.0, 1.0, 1.0),'Species': 'FlyTrap','Start State': 'Idle','StartFrame': '0'},'1157596022.35jasyeung': {'Type': 'Locator Node','Name': 'portal_exterior_1','Hpr': VBase3(-18.331, 0.0, 0.0),'Pos': Point3(-219.917, -319.235, 0.595),'Scale': VBase3(1.0, 1.0, 1.0)},'1157596022.35jasyeung0': {'Type': 'Locator Node','Name': 'portal_exterior_2','Hpr': VBase3(68.97, 0.0, 0.0),'Pos': Point3(-285.103, -58.817, 44.049),'Scale': VBase3(1.0, 1.0, 1.0)},'1165018948.05sdnaik': {'Type': 'Locator Node','Name': 'portal_exterior_1','Hpr': VBase3(-18.331, 0.0, 0.0),'Pos': Point3(-219.917, -319.235, 0.595),'Scale': VBase3(1.0, 1.0, 1.0)},'1165018950.47sdnaik': {'Type': 'Locator Node','Name': 'portal_exterior_2','Hpr': VBase3(68.97, 0.0, 0.0),'Pos': Point3(-285.103, -58.817, 44.049),'Scale': VBase3(1.0, 1.0, 1.0)},'1169192695.17Shochet': {'Type': 'Port Collision Sphere','Name': 'VegasPort','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(147.216, -168.582, 0.0),'Scale': VBase3(470.212, 470.212, 470.212),'VisSize': '','Visual': {'Color': (0.5, 0.5, 1.0, 0.2),'Model': 'models/misc/smiley'}},'1169192874.58Shochet': {'Type': 'Cell Portal Area','Name': 'cell_green_area','Hpr': Point3(0.0, 0.0, 0.0),'Objects': {'1165019061.13sdnaik': {'Type': 'Spawn Node','AnimSet': 'default','AuraFX': 'None','Hpr': Point3(0.0, 0.0, 0.0),'Min Population': '2','Patrol Radius': '12.0000','Pause Chance': 100,'Pause Duration': 30,'Pos': Point3(119.094, -74.897, 4.167),'PoseAnim': '','PoseFrame': '','PropFXLeft': 'None','PropFXRight': 'None','PropLeft': 'None','PropRight': 'None','Scale': VBase3(1.0, 1.0, 1.0),'Spawnables': 'Bat','Start State': 'Ambush','StartFrame': '0','Team': '1','TrailFX': 'None','TrailLeft': 'None','TrailRight': 'None','VisSize': '','Visual': {'Color': (0, 0, 0.65, 1),'Model': 'models/misc/smiley'}}},'Pos': Point3(0.0, 0.0, 0.0),'Scale': VBase3(1.0, 1.0, 1.0)},'1169192882.95Shochet': {'Type': 'Cell Portal Area','Name': 'cell_shanty_town','Hpr': Point3(0.0, 0.0, 0.0),'Objects': {'1165019080.61sdnaik': {'Type': 'Spawn Node','AnimSet': 'default','AuraFX': 'None','Hpr': Point3(0.0, 0.0, 0.0),'Min Population': '2','Patrol Radius': 12,'Pause Chance': 100,'Pause Duration': 30,'Pos': Point3(180.048, 221.152, 66.347),'PoseAnim': '','PoseFrame': '','PropFXLeft': 'None','PropFXRight': 'None','PropLeft': 'None','PropRight': 'None','Scale': VBase3(1.0, 1.0, 1.0),'Spawnables': 'Wasp','Start State': 'Patrol','StartFrame': '0','Team': '1','TrailFX': 'None','TrailLeft': 'None','TrailRight': 'None','Visual': {'Color': (0, 0, 0.65, 1),'Model': 'models/misc/smiley'}}},'Pos': Point3(0.0, 0.0, 0.0),'Scale': VBase3(1.0, 1.0, 1.0)},'1169192926.38Shochet': {'Type': 'Spawn Node','AnimSet': 'default','AuraFX': 'None','Hpr': Point3(0.0, 0.0, 0.0),'Min Population': '1','Patrol Radius': '12.0000','Pause Chance': 100,'Pause Duration': 30,'Pos': Point3(640.128, -165.933, 6.871),'PoseAnim': '','PoseFrame': '','PropFXLeft': 'None','PropFXRight': 'None','PropLeft': 'None','PropRight': 'None','Scale': VBase3(1.0, 1.0, 1.0),'Spawnables': 'Alligator','Start State': 'Patrol','StartFrame': '0','Team': '1','TrailFX': 'None','TrailLeft': 'None','TrailRight': 'None','VisSize': '','Visual': {'Color': (0, 0, 0.65, 1),'Model': 'models/misc/smiley'}},'1169193027.28Shochet': {'Type': 'Cell Portal Area','Name': 'cell_graveyard','Hpr': Point3(0.0, 0.0, 0.0),'Objects': {'1169193004.98Shochet': {'Type': 'Spawn Node','AnimSet': 'default','AuraFX': 'None','Hpr': Point3(0.0, 0.0, 0.0),'Min Population': '1','Patrol Radius': '12.0000','Pause Chance': 100,'Pause Duration': 30,'Pos': Point3(-255.945, 98.577, 68.242),'PoseAnim': '','PoseFrame': '','PropFXLeft': 'None','PropFXRight': 'None','PropLeft': 'None','PropRight': 'None','Scale': VBase3(1.0, 1.0, 1.0),'Spawnables': 'Stump','Start State': 'Patrol','StartFrame': '0','Team': '1','TrailFX': 'None','TrailLeft': 'None','TrailRight': 'None','Visual': {'Color': (0, 0, 0.65, 1),'Model': 'models/misc/smiley'}}},'Pos': Point3(0.0, 0.0, 0.0),'Scale': VBase3(1.0, 1.0, 1.0)},'1169449533.4Shochet': {'Type': 'Searchable Container','Aggro Radius': 5.0,'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(494.568, -149.091, 5.213),'Scale': VBase3(3.885, 3.885, 3.885),'Visual': {'Color': (1.0, 0.0, 0.0, 1.0),'Model': 'models/props/barrel'},'searchTime': '6.0','type': 'Barrel'},'1169616489.03Shochet': {'Type': 'Creature','Boss': False,'Boss Name': 'Anonymous','Hpr': Point3(0.0, 0.0, 0.0),'Level': '37','Patrol Radius': 12,'Pos': Point3(327.376, 1.76, 10.049),'PoseAnim': '','PoseFrame': '','Respawns': True,'Scale': VBase3(1.0, 1.0, 1.0),'Species': 'Stump','Start State': 'Idle','StartFrame': '0'},'1179593088.0Shochet1': {'Type': 'Creature','Boss': False,'Boss Name': 'Anonymous','Hpr': Point3(0.0, 0.0, 0.0),'Level': '37','Patrol Radius': 12,'Pos': Point3(271.381, -60.852, 2.396),'PoseAnim': '','PoseFrame': '','Respawns': True,'Scale': VBase3(1.0, 1.0, 1.0),'Species': 'Scorpion','Start State': 'Idle','StartFrame': '0'},'1186784633.39Shochet': {'Type': 'Locator Node','Name': 'portal_exterior_1','Hpr': VBase3(-18.331, 0.0, 0.0),'Pos': Point3(-219.917, -319.235, 0.595),'Scale': VBase3(1.0, 1.0, 1.0)},'1186784634.77Shochet': {'Type': 'Locator Node','Name': 'portal_exterior_2','Hpr': VBase3(68.97, 0.0, 0.0),'Pos': Point3(-285.103, -58.817, 44.049),'Scale': VBase3(1.0, 1.0, 1.0)},'1186784662.83Shochet': {'Type': 'Dinghy','Aggro Radius': '12.0000','Hpr': VBase3(90.585, 0.0, 0.0),'Location': 'Water','Pos': Point3(373.406, -138.477, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/shipparts/dingy-geometry_High'}},'1186785480.13Shochet': {'Type': 'Locator Node','Name': 'portal_exterior_1','Hpr': VBase3(-18.331, 0.0, 0.0),'Pos': Point3(-219.917, -319.235, 0.595),'Scale': VBase3(1.0, 1.0, 1.0)},'1186785480.14Shochet': {'Type': 'Locator Node','Name': 'portal_exterior_2','Hpr': VBase3(68.97, 0.0, 0.0),'Pos': Point3(-285.103, -58.817, 44.049),'Scale': VBase3(1.0, 1.0, 1.0)},'1187140148.8gjeon': {'Type': 'Cannon','Hpr': Point3(0.0, 0.0, 0.0),'MaxPower': '1.0','MinPower': '0.2','Pos': Point3(376.873, -67.173, 4.441),'Scale': VBase3(2.833, 2.833, 2.833),'Visual': {'Model': 'models/shipparts/cannon_hi'}},'1187407776.73gjeon': {'Type': 'Building Exterior','File': 'A_GyedoInterior','ExtUid': '1187407776.73gjeon0','Hpr': Point3(0.0, 0.0, 0.0),'Objects': {'1234924975.76caoconno': {'Type': 'Door Locator Node','Name': 'door_locator','Hpr': VBase3(-180.0, 0.0, 0.0),'Pos': Point3(-8.035, -1.826, 0.068),'Scale': VBase3(1.0, 1.0, 1.0)},'1234924977.02caoconno': {'Type': 'Door Locator Node','Name': 'door_locator_2','Hpr': VBase3(-180.0, 0.0, 0.0),'Pos': Point3(8.421, -1.87, 0.156),'Scale': VBase3(1.0, 1.0, 1.0)}},'Pos': Point3(378.723, -30.806, 8.78),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Door': 'models/buildings/shanty_guildhall_door','Interior': 'models/buildings/interior_shanty_guildhall','Model': 'models/buildings/spanish_npc_house_a_exterior','SignFrame': '','SignImage': 'models/buildings/sign1_eng_a_icon_blacksmith'}},'1187634515.47gjeon': {'Type': 'Barrel','DisableCollision': False,'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(592.624, -103.901, 2.332),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/barrel_worn'}},'1187634559.22gjeon': {'Type': 'Barrel','DisableCollision': False,'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(604.356, -93.711, 2.764),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/barrel_worn'}},'1191607138.69gjeon': {'Type': 'Animated Avatar','Category': 'cast','Animation Track': 'shovel','AuraFX': 'None','Effect Type': 'None','Hpr': Point3(74.754, 0.0, 0.0),'Pos': Point3(640.247, -90.735, 2.301),'PoseAnim': '','PoseFrame': '','PropFXLeft': 'None','PropFXRight': 'None','PropLeft': 'None','PropRight': 'None','Scale': VBase3(1.0, 1.0, 1.0),'StartFrame': '0','SubCategory': 'models/char/jr_2000','TrailFX': 'None','TrailLeft': 'None','TrailRight': 'None'},'1191607940.19gjeon': {'Type': 'Animated Avatar - Townfolk','Animation Track': 'flute','Holiday': '','Hpr': Point3(85.387, 0.0, 0.0),'Pos': Point3(630.709, -86.24, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Beard': 6,'Belt': 2,'BeltColor': 3,'Coat': 2,'CoatColor': 14,'Gender': 'f','Hair': 4,'HairColor': 1,'Hat': 2,'Mustache': 4,'Pants': 1,'PantsColor': 2,'Shape': 2,'Shirt': 1,'ShirtColor': 1,'Shoe': 2,'Skin': 11,'Sock': 0}},'1234924978.07caoconno': {'Type': 'Door Locator Node','Name': 'door_locator','Hpr': VBase3(-180.0, 0.0, 0.0),'Pos': Point3(0.127, -4.354, 4.971),'Scale': VBase3(1.0, 1.0, 1.0)},'1234924978.08caoconno': {'Type': 'Door Locator Node','Name': 'door_locator_2','Hpr': VBase3(-180.0, 0.0, 0.0),'Pos': Point3(8.421, -1.87, 0.156),'Scale': VBase3(1.0, 1.0, 1.0)},'1235676532.77gjeon': {'Type': 'Building Exterior','File': '','ExtUid': '1235676532.77gjeon0','Hpr': Point3(0.0, 0.0, 0.0),'Objects': {'1235676532.83gjeon': {'Type': 'Door Locator Node','Name': 'door_locator','GridPos': Point3(656.644, -209.671, 11.583),'Hpr': VBase3(-180.0, 0.0, 0.0),'Pos': Point3(0.127, -4.354, 4.971),'Scale': VBase3(1.0, 1.0, 1.0)}},'Pos': Point3(656.517, -205.317, 6.612),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Door': 'models/buildings/shanty_guildhall_door','Model': 'models/buildings/shanty_npc_house_a_exterior','SignFrame': '','SignImage': 'models/buildings/sign1_eng_a_icon_barber'}},'1240961664.0gjeon1': {'Type': 'Spawn Node','AnimSet': 'default','AuraFX': 'None','Hpr': Point3(0.0, 0.0, 0.0),'Min Population': '1','Patrol Radius': '12.0000','Pause Chance': 100,'Pause Duration': 30,'Pos': Point3(639.664, -118.133, 3.765),'PoseAnim': '','PoseFrame': '','PropFXLeft': 'None','PropFXRight': 'None','PropLeft': 'None','PropRight': 'None','Scale': VBase3(1.0, 1.0, 1.0),'Spawnables': 'Noob Skeleton','Start State': 'Ambush','StartFrame': '0','Team': 'default','TrailFX': 'None','TrailLeft': 'None','TrailRight': 'None','VisSize': '','Visual': {'Color': (0, 0, 0.65, 1),'Model': 'models/misc/smiley'}},'1275692669.07gjeon': {'Type': 'Townsperson','Category': 'Commoner','AnimSet': 'default','AuraFX': 'None','CustomModel': 'None','GhostColor': 'None','GhostFX': 0,'Greeting Animation': '','HelpID': 'NONE','Holiday': '','Hpr': Point3(0.0, 0.0, 0.0),'Instanced World': 'None','Level': '37','Notice Animation 1': '','Notice Animation 2': '','Patrol Radius': '12.0000','Pos': Point3(662.545, -63.045, 0.0),'PoseAnim': '','PoseFrame': '','Private Status': 'All','PropFXLeft': 'None','PropFXRight': 'None','PropLeft': 'None','PropRight': 'None','Requires Quest Interest': False,'Respawns': True,'Scale': VBase3(1.0, 1.0, 1.0),'ShopID': 'PORT_ROYAL_DEFAULTS','Start State': 'Walk','StartFrame': '0','Team': 'Player','TrailFX': 'None','TrailLeft': 'None','TrailRight': 'None','VisSize': '','spawnTimeBegin': 9.25,'spawnTimeEnd': 18.5}},'Undockable': False,'Visibility': 'Grid','Visual': {'Model': 'models/islands/bilgewater_zero'}}},'Node Links': [],'Layers': {},'ObjectIds': {'1149705583.09Shochet': '["Objects"]["1149705583.09Shochet"]','1149705605.5Shochet': '["Objects"]["1149705583.09Shochet"]["Objects"]["1149705605.5Shochet"]','1149705605.5Shochet0': '["Objects"]["1149705583.09Shochet"]["Objects"]["1149705605.5Shochet0"]','1149705607.02Shochet': '["Objects"]["1149705583.09Shochet"]["Objects"]["1149705607.02Shochet"]','1149705607.63Shochet': '["Objects"]["1149705583.09Shochet"]["Objects"]["1149705607.63Shochet"]','1149705619.08Shochet': '["Objects"]["1149705583.09Shochet"]["Objects"]["1149705619.08Shochet"]','1149705619.08Shochet0': '["Objects"]["1149705583.09Shochet"]["Objects"]["1149705619.08Shochet"]["Objects"]["1149705619.08Shochet0"]','1149705632.05Shochet': '["Objects"]["1149705583.09Shochet"]["Objects"]["1149705619.08Shochet"]["Objects"]["1149705632.05Shochet"]','1149706548.67Shochet': '["Objects"]["1149705583.09Shochet"]["Objects"]["1149706548.67Shochet"]','1149706548.67Shochet0': '["Objects"]["1149705583.09Shochet"]["Objects"]["1149706548.67Shochet0"]','1149706577.28Shochet': '["Objects"]["1149705583.09Shochet"]["Objects"]["1149706577.28Shochet"]','1149706632.97Shochet': '["Objects"]["1149705583.09Shochet"]["Objects"]["1149706577.28Shochet"]["Objects"]["1149706632.97Shochet"]','1154059325.91Shochet': '["Objects"]["1149705583.09Shochet"]["Objects"]["1154059325.91Shochet"]','1154059325.91Shochet0': '["Objects"]["1149705583.09Shochet"]["Objects"]["1154059325.91Shochet0"]','1154059341.09Shochet': '["Objects"]["1149705583.09Shochet"]["Objects"]["1154059341.09Shochet"]','1154059344.67Shochet': '["Objects"]["1149705583.09Shochet"]["Objects"]["1154059344.67Shochet"]','1154059351.97Shochet': '["Objects"]["1149705583.09Shochet"]["Objects"]["1154059351.97Shochet"]','1154059362.19Shochet': '["Objects"]["1149705583.09Shochet"]["Objects"]["1154059362.19Shochet"]','1154059366.69Shochet': '["Objects"]["1149705583.09Shochet"]["Objects"]["1154059366.69Shochet"]','1157596022.35jasyeung': '["Objects"]["1149705583.09Shochet"]["Objects"]["1157596022.35jasyeung"]','1157596022.35jasyeung0': '["Objects"]["1149705583.09Shochet"]["Objects"]["1157596022.35jasyeung0"]','1157596044.44jasyeung': '["Objects"]["1149705583.09Shochet"]["Objects"]["1149706577.28Shochet"]["Objects"]["1157596044.44jasyeung"]','1165018948.05sdnaik': '["Objects"]["1149705583.09Shochet"]["Objects"]["1165018948.05sdnaik"]','1165018950.47sdnaik': '["Objects"]["1149705583.09Shochet"]["Objects"]["1165018950.47sdnaik"]','1165019061.13sdnaik': '["Objects"]["1149705583.09Shochet"]["Objects"]["1169192874.58Shochet"]["Objects"]["1165019061.13sdnaik"]','1165019080.61sdnaik': '["Objects"]["1149705583.09Shochet"]["Objects"]["1169192882.95Shochet"]["Objects"]["1165019080.61sdnaik"]','1169192695.17Shochet': '["Objects"]["1149705583.09Shochet"]["Objects"]["1169192695.17Shochet"]','1169192829.53Shochet': '["Objects"]["1149705583.09Shochet"]["Objects"]["1149706577.28Shochet"]["Objects"]["1169192829.53Shochet"]','1169192874.58Shochet': '["Objects"]["1149705583.09Shochet"]["Objects"]["1169192874.58Shochet"]','1169192882.95Shochet': '["Objects"]["1149705583.09Shochet"]["Objects"]["1169192882.95Shochet"]','1169192926.38Shochet': '["Objects"]["1149705583.09Shochet"]["Objects"]["1169192926.38Shochet"]','1169193004.98Shochet': '["Objects"]["1149705583.09Shochet"]["Objects"]["1169193027.28Shochet"]["Objects"]["1169193004.98Shochet"]','1169193027.28Shochet': '["Objects"]["1149705583.09Shochet"]["Objects"]["1169193027.28Shochet"]','1169449533.4Shochet': '["Objects"]["1149705583.09Shochet"]["Objects"]["1169449533.4Shochet"]','1169451658.54Shochet': '["Objects"]["1149705583.09Shochet"]["Objects"]["1149705619.08Shochet"]["Objects"]["1169451658.54Shochet"]','1169451790.01Shochet': '["Objects"]["1149705583.09Shochet"]["Objects"]["1149705619.08Shochet"]["Objects"]["1169451790.01Shochet"]','1169616338.61Shochet': '["Objects"]["1149705583.09Shochet"]["Objects"]["1149706577.28Shochet"]["Objects"]["1169616338.61Shochet"]','1169616428.63Shochet': '["Objects"]["1149705583.09Shochet"]["Objects"]["1149706577.28Shochet"]["Objects"]["1169616428.63Shochet"]','1169616489.03Shochet': '["Objects"]["1149705583.09Shochet"]["Objects"]["1169616489.03Shochet"]','1171348677.72Shochet': '["Objects"]["1149705583.09Shochet"]["Objects"]["1149705619.08Shochet"]["Objects"]["1171348677.72Shochet"]','1171691663.17Shochet': '["Objects"]["1149705583.09Shochet"]["Objects"]["1149706577.28Shochet"]["Objects"]["1171691663.17Shochet"]','1179593088.0Shochet1': '["Objects"]["1149705583.09Shochet"]["Objects"]["1179593088.0Shochet1"]','1186784633.39Shochet': '["Objects"]["1149705583.09Shochet"]["Objects"]["1186784633.39Shochet"]','1186784634.77Shochet': '["Objects"]["1149705583.09Shochet"]["Objects"]["1186784634.77Shochet"]','1186784662.83Shochet': '["Objects"]["1149705583.09Shochet"]["Objects"]["1186784662.83Shochet"]','1186785480.13Shochet': '["Objects"]["1149705583.09Shochet"]["Objects"]["1186785480.13Shochet"]','1186785480.14Shochet': '["Objects"]["1149705583.09Shochet"]["Objects"]["1186785480.14Shochet"]','1186785500.34Shochet': '["Objects"]["1149705583.09Shochet"]["Objects"]["1149705619.08Shochet"]["Objects"]["1186785500.34Shochet"]','1187140148.8gjeon': '["Objects"]["1149705583.09Shochet"]["Objects"]["1187140148.8gjeon"]','1187407776.73gjeon': '["Objects"]["1149705583.09Shochet"]["Objects"]["1187407776.73gjeon"]','1187407776.73gjeon0': '["Objects"]["1149705583.09Shochet"]["Objects"]["1187407776.73gjeon"]','1187634515.47gjeon': '["Objects"]["1149705583.09Shochet"]["Objects"]["1187634515.47gjeon"]','1187634559.22gjeon': '["Objects"]["1149705583.09Shochet"]["Objects"]["1187634559.22gjeon"]','1191607138.69gjeon': '["Objects"]["1149705583.09Shochet"]["Objects"]["1191607138.69gjeon"]','1191607940.19gjeon': '["Objects"]["1149705583.09Shochet"]["Objects"]["1191607940.19gjeon"]','1234924975.76caoconno': '["Objects"]["1149705583.09Shochet"]["Objects"]["1187407776.73gjeon"]["Objects"]["1234924975.76caoconno"]','1234924977.02caoconno': '["Objects"]["1149705583.09Shochet"]["Objects"]["1187407776.73gjeon"]["Objects"]["1234924977.02caoconno"]','1234924978.07caoconno': '["Objects"]["1149705583.09Shochet"]["Objects"]["1234924978.07caoconno"]','1234924978.08caoconno': '["Objects"]["1149705583.09Shochet"]["Objects"]["1234924978.08caoconno"]','1235676532.77gjeon': '["Objects"]["1149705583.09Shochet"]["Objects"]["1235676532.77gjeon"]','1235676532.77gjeon0': '["Objects"]["1149705583.09Shochet"]["Objects"]["1235676532.77gjeon"]','1235676532.83gjeon': '["Objects"]["1149705583.09Shochet"]["Objects"]["1235676532.77gjeon"]["Objects"]["1235676532.83gjeon"]','1240961664.0gjeon1': '["Objects"]["1149705583.09Shochet"]["Objects"]["1240961664.0gjeon1"]','1275692669.07gjeon': '["Objects"]["1149705583.09Shochet"]["Objects"]["1275692669.07gjeon"]'}}
extraInfo = {'camPos': Point3(652.314, -154.956, 60.7173),'camHpr': VBase3(-5.3369, -32.9357, 0),'focalLength': 1.39999997616,'skyState': 2,'fog': 0}
| 8,844.333333
| 26,324
| 0.659368
| 3,595
| 26,533
| 4.843672
| 0.157441
| 0.034802
| 0.037558
| 0.033538
| 0.639407
| 0.582438
| 0.537529
| 0.474818
| 0.46919
| 0.458853
| 0
| 0.221695
| 0.05495
| 26,533
| 3
| 26,325
| 8,844.333333
| 0.472742
| 0
| 0
| 0
| 0
| 0
| 0.600588
| 0.226351
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
f77f5fdc4ee4b2f799ff62dcb0683554c965717f
| 15,011
|
py
|
Python
|
safe_transaction_service/contracts/decoder_abis/sight.py
|
byteflyfunny/safe-transaction-service
|
2a1a855d9881181a57692057aeb91c9fd8ae3de5
|
[
"MIT"
] | 67
|
2019-08-16T16:26:42.000Z
|
2022-03-21T20:32:43.000Z
|
safe_transaction_service/contracts/decoder_abis/sight.py
|
byteflyfunny/safe-transaction-service
|
2a1a855d9881181a57692057aeb91c9fd8ae3de5
|
[
"MIT"
] | 550
|
2019-07-11T12:09:06.000Z
|
2022-03-31T16:32:00.000Z
|
safe_transaction_service/contracts/decoder_abis/sight.py
|
byteflyfunny/safe-transaction-service
|
2a1a855d9881181a57692057aeb91c9fd8ae3de5
|
[
"MIT"
] | 83
|
2019-12-06T11:22:32.000Z
|
2022-03-30T10:09:22.000Z
|
# flake8: noqa E501
import json
conditional_token_abi = json.loads(
'[{"constant":true,"inputs":[{"name":"owner","type":"address"},{"name":"id","type":"uint256"}],"name":"balanceOf","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"name":"interfaceId","type":"bytes4"}],"name":"supportsInterface","outputs":[{"name":"","type":"bool"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"name":"","type":"bytes32"},{"name":"","type":"uint256"}],"name":"payoutNumerators","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[{"name":"from","type":"address"},{"name":"to","type":"address"},{"name":"ids","type":"uint256[]"},{"name":"values","type":"uint256[]"},{"name":"data","type":"bytes"}],"name":"safeBatchTransferFrom","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":true,"inputs":[{"name":"owners","type":"address[]"},{"name":"ids","type":"uint256[]"}],"name":"balanceOfBatch","outputs":[{"name":"","type":"uint256[]"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[{"name":"operator","type":"address"},{"name":"approved","type":"bool"}],"name":"setApprovalForAll","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":true,"inputs":[{"name":"","type":"bytes32"}],"name":"payoutDenominator","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"name":"owner","type":"address"},{"name":"operator","type":"address"}],"name":"isApprovedForAll","outputs":[{"name":"","type":"bool"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[{"name":"from","type":"address"},{"name":"to","type":"address"},{"name":"id","type":"uint256"},{"name":"value","type":"uint256"},{"name":"data","type":"bytes"}],"name":"safeTransferFrom","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"anonymous":false,"inputs":[{"indexed":true,"name":"conditionId","type":"bytes32"},{"indexed":true,"name":"oracle","type":"address"},{"indexed":true,"name":"questionId","type":"bytes32"},{"indexed":false,"name":"outcomeSlotCount","type":"uint256"}],"name":"ConditionPreparation","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"name":"conditionId","type":"bytes32"},{"indexed":true,"name":"oracle","type":"address"},{"indexed":true,"name":"questionId","type":"bytes32"},{"indexed":false,"name":"outcomeSlotCount","type":"uint256"},{"indexed":false,"name":"payoutNumerators","type":"uint256[]"}],"name":"ConditionResolution","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"name":"stakeholder","type":"address"},{"indexed":false,"name":"collateralToken","type":"address"},{"indexed":true,"name":"parentCollectionId","type":"bytes32"},{"indexed":true,"name":"conditionId","type":"bytes32"},{"indexed":false,"name":"partition","type":"uint256[]"},{"indexed":false,"name":"amount","type":"uint256"}],"name":"PositionSplit","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"name":"stakeholder","type":"address"},{"indexed":false,"name":"collateralToken","type":"address"},{"indexed":true,"name":"parentCollectionId","type":"bytes32"},{"indexed":true,"name":"conditionId","type":"bytes32"},{"indexed":false,"name":"partition","type":"uint256[]"},{"indexed":false,"name":"amount","type":"uint256"}],"name":"PositionsMerge","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"name":"redeemer","type":"address"},{"indexed":true,"name":"collateralToken","type":"address"},{"indexed":true,"name":"parentCollectionId","type":"bytes32"},{"indexed":false,"name":"conditionId","type":"bytes32"},{"indexed":false,"name":"indexSets","type":"uint256[]"},{"indexed":false,"name":"payout","type":"uint256"}],"name":"PayoutRedemption","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"name":"operator","type":"address"},{"indexed":true,"name":"from","type":"address"},{"indexed":true,"name":"to","type":"address"},{"indexed":false,"name":"id","type":"uint256"},{"indexed":false,"name":"value","type":"uint256"}],"name":"TransferSingle","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"name":"operator","type":"address"},{"indexed":true,"name":"from","type":"address"},{"indexed":true,"name":"to","type":"address"},{"indexed":false,"name":"ids","type":"uint256[]"},{"indexed":false,"name":"values","type":"uint256[]"}],"name":"TransferBatch","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"name":"owner","type":"address"},{"indexed":true,"name":"operator","type":"address"},{"indexed":false,"name":"approved","type":"bool"}],"name":"ApprovalForAll","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"name":"value","type":"string"},{"indexed":true,"name":"id","type":"uint256"}],"name":"URI","type":"event"},{"constant":false,"inputs":[{"name":"oracle","type":"address"},{"name":"questionId","type":"bytes32"},{"name":"outcomeSlotCount","type":"uint256"}],"name":"prepareCondition","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"questionId","type":"bytes32"},{"name":"payouts","type":"uint256[]"}],"name":"reportPayouts","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"collateralToken","type":"address"},{"name":"parentCollectionId","type":"bytes32"},{"name":"conditionId","type":"bytes32"},{"name":"partition","type":"uint256[]"},{"name":"amount","type":"uint256"}],"name":"splitPosition","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"collateralToken","type":"address"},{"name":"parentCollectionId","type":"bytes32"},{"name":"conditionId","type":"bytes32"},{"name":"partition","type":"uint256[]"},{"name":"amount","type":"uint256"}],"name":"mergePositions","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"collateralToken","type":"address"},{"name":"parentCollectionId","type":"bytes32"},{"name":"conditionId","type":"bytes32"},{"name":"indexSets","type":"uint256[]"}],"name":"redeemPositions","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":true,"inputs":[{"name":"conditionId","type":"bytes32"}],"name":"getOutcomeSlotCount","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"name":"oracle","type":"address"},{"name":"questionId","type":"bytes32"},{"name":"outcomeSlotCount","type":"uint256"}],"name":"getConditionId","outputs":[{"name":"","type":"bytes32"}],"payable":false,"stateMutability":"pure","type":"function"},{"constant":true,"inputs":[{"name":"parentCollectionId","type":"bytes32"},{"name":"conditionId","type":"bytes32"},{"name":"indexSet","type":"uint256"}],"name":"getCollectionId","outputs":[{"name":"","type":"bytes32"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"name":"collateralToken","type":"address"},{"name":"collectionId","type":"bytes32"}],"name":"getPositionId","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"pure","type":"function"}]'
)
market_maker_abi = json.loads(
'[{"constant":true,"inputs":[{"name":"interfaceId","type":"bytes4"}],"name":"supportsInterface","outputs":[{"name":"","type":"bool"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[],"name":"resume","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":true,"inputs":[],"name":"pmSystem","outputs":[{"name":"","type":"address"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[{"name":"outcomeTokenAmounts","type":"int256[]"},{"name":"collateralLimit","type":"int256"}],"name":"trade","outputs":[{"name":"netCost","type":"int256"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[],"name":"close","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[],"name":"withdrawFees","outputs":[{"name":"fees","type":"uint256"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[],"name":"renounceOwnership","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[],"name":"pause","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"fundingChange","type":"int256"}],"name":"changeFunding","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":true,"inputs":[],"name":"owner","outputs":[{"name":"","type":"address"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"isOwner","outputs":[{"name":"","type":"bool"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"whitelist","outputs":[{"name":"","type":"address"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"name":"outcomeTokenCost","type":"uint256"}],"name":"calcMarketFee","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"collateralToken","outputs":[{"name":"","type":"address"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[{"name":"_operator","type":"address"},{"name":"","type":"address"},{"name":"","type":"uint256[]"},{"name":"","type":"uint256[]"},{"name":"","type":"bytes"}],"name":"onERC1155BatchReceived","outputs":[{"name":"","type":"bytes4"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":true,"inputs":[],"name":"stage","outputs":[{"name":"","type":"uint8"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"funding","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"name":"","type":"uint256"}],"name":"conditionIds","outputs":[{"name":"","type":"bytes32"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"atomicOutcomeSlotCount","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"fee","outputs":[{"name":"","type":"uint64"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[{"name":"_fee","type":"uint64"}],"name":"changeFee","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"operator","type":"address"},{"name":"","type":"address"},{"name":"","type":"uint256"},{"name":"","type":"uint256"},{"name":"","type":"bytes"}],"name":"onERC1155Received","outputs":[{"name":"","type":"bytes4"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"newOwner","type":"address"}],"name":"transferOwnership","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":true,"inputs":[],"name":"FEE_RANGE","outputs":[{"name":"","type":"uint64"}],"payable":false,"stateMutability":"view","type":"function"},{"anonymous":false,"inputs":[{"indexed":false,"name":"initialFunding","type":"uint256"}],"name":"AMMCreated","type":"event"},{"anonymous":false,"inputs":[],"name":"AMMPaused","type":"event"},{"anonymous":false,"inputs":[],"name":"AMMResumed","type":"event"},{"anonymous":false,"inputs":[],"name":"AMMClosed","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"name":"fundingChange","type":"int256"}],"name":"AMMFundingChanged","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"name":"newFee","type":"uint64"}],"name":"AMMFeeChanged","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"name":"fees","type":"uint256"}],"name":"AMMFeeWithdrawal","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"name":"transactor","type":"address"},{"indexed":false,"name":"outcomeTokenAmounts","type":"int256[]"},{"indexed":false,"name":"outcomeTokenNetCost","type":"int256"},{"indexed":false,"name":"marketFees","type":"uint256"}],"name":"AMMOutcomeTokenTrade","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"name":"previousOwner","type":"address"},{"indexed":true,"name":"newOwner","type":"address"}],"name":"OwnershipTransferred","type":"event"},{"constant":true,"inputs":[{"name":"outcomeTokenAmounts","type":"int256[]"}],"name":"calcNetCost","outputs":[{"name":"netCost","type":"int256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"name":"outcomeTokenIndex","type":"uint8"}],"name":"calcMarginalPrice","outputs":[{"name":"price","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"}]'
)
market_maker_factory_abi = json.loads(
'[{"constant":true,"inputs":[],"name":"implementationMaster","outputs":[{"name":"","type":"address"}],"payable":false,"stateMutability":"view","type":"function"},{"inputs":[],"payable":false,"stateMutability":"nonpayable","type":"constructor"},{"anonymous":false,"inputs":[{"indexed":true,"name":"creator","type":"address"},{"indexed":false,"name":"lmsrMarketMaker","type":"address"},{"indexed":false,"name":"pmSystem","type":"address"},{"indexed":false,"name":"collateralToken","type":"address"},{"indexed":false,"name":"conditionIds","type":"bytes32[]"},{"indexed":false,"name":"fee","type":"uint64"},{"indexed":false,"name":"funding","type":"uint256"}],"name":"LMSRMarketMakerCreation","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"name":"previousOwner","type":"address"},{"indexed":true,"name":"newOwner","type":"address"}],"name":"OwnershipTransferred","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"name":"initialFunding","type":"uint256"}],"name":"AMMCreated","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"name":"target","type":"address"},{"indexed":false,"name":"clone","type":"address"}],"name":"CloneCreated","type":"event"},{"constant":false,"inputs":[{"name":"consData","type":"bytes"}],"name":"cloneConstructor","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"pmSystem","type":"address"},{"name":"collateralToken","type":"address"},{"name":"conditionIds","type":"bytes32[]"},{"name":"fee","type":"uint64"},{"name":"whitelist","type":"address"},{"name":"funding","type":"uint256"}],"name":"createLMSRMarketMaker","outputs":[{"name":"lmsrMarketMaker","type":"address"}],"payable":false,"stateMutability":"nonpayable","type":"function"}]'
)
| 1,154.692308
| 7,374
| 0.659183
| 1,525
| 15,011
| 6.481967
| 0.098361
| 0.062317
| 0.131108
| 0.057865
| 0.839049
| 0.757714
| 0.722408
| 0.676378
| 0.624987
| 0.609914
| 0
| 0.018428
| 0.002265
| 15,011
| 12
| 7,375
| 1,250.916667
| 0.641584
| 0.001133
| 0
| 0
| 0
| 0.3
| 0.990195
| 0.990195
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.1
| 0
| 0.1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
f79c82336527701b43ddb503f8ccda04b069c4e0
| 8,030
|
py
|
Python
|
benchmarks/import_cost/functions_100_with_1_contract.py
|
kklein/icontract
|
718ef1733cc2cce6d3c8f59a5a37de96f8be6664
|
[
"MIT"
] | 244
|
2018-08-15T22:58:58.000Z
|
2022-03-12T16:10:39.000Z
|
benchmarks/import_cost/functions_100_with_1_contract.py
|
kklein/icontract
|
718ef1733cc2cce6d3c8f59a5a37de96f8be6664
|
[
"MIT"
] | 157
|
2018-08-29T21:36:47.000Z
|
2022-02-14T19:30:24.000Z
|
benchmarks/import_cost/functions_100_with_1_contract.py
|
kklein/icontract
|
718ef1733cc2cce6d3c8f59a5a37de96f8be6664
|
[
"MIT"
] | 23
|
2019-04-24T11:09:10.000Z
|
2022-02-14T15:56:26.000Z
|
#!/usr/bin/env python3
import icontract
@icontract.require(lambda x: x > 0)
def some_func0(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
def some_func1(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
def some_func2(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
def some_func3(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
def some_func4(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
def some_func5(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
def some_func6(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
def some_func7(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
def some_func8(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
def some_func9(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
def some_func10(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
def some_func11(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
def some_func12(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
def some_func13(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
def some_func14(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
def some_func15(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
def some_func16(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
def some_func17(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
def some_func18(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
def some_func19(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
def some_func20(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
def some_func21(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
def some_func22(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
def some_func23(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
def some_func24(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
def some_func25(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
def some_func26(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
def some_func27(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
def some_func28(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
def some_func29(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
def some_func30(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
def some_func31(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
def some_func32(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
def some_func33(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
def some_func34(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
def some_func35(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
def some_func36(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
def some_func37(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
def some_func38(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
def some_func39(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
def some_func40(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
def some_func41(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
def some_func42(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
def some_func43(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
def some_func44(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
def some_func45(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
def some_func46(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
def some_func47(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
def some_func48(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
def some_func49(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
def some_func50(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
def some_func51(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
def some_func52(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
def some_func53(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
def some_func54(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
def some_func55(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
def some_func56(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
def some_func57(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
def some_func58(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
def some_func59(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
def some_func60(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
def some_func61(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
def some_func62(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
def some_func63(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
def some_func64(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
def some_func65(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
def some_func66(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
def some_func67(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
def some_func68(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
def some_func69(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
def some_func70(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
def some_func71(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
def some_func72(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
def some_func73(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
def some_func74(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
def some_func75(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
def some_func76(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
def some_func77(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
def some_func78(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
def some_func79(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
def some_func80(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
def some_func81(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
def some_func82(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
def some_func83(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
def some_func84(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
def some_func85(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
def some_func86(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
def some_func87(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
def some_func88(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
def some_func89(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
def some_func90(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
def some_func91(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
def some_func92(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
def some_func93(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
def some_func94(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
def some_func95(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
def some_func96(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
def some_func97(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
def some_func98(x: int) -> None:
pass
@icontract.require(lambda x: x > 0)
def some_func99(x: int) -> None:
pass
| 15.964215
| 35
| 0.637733
| 1,306
| 8,030
| 3.844564
| 0.088055
| 0.318662
| 0.43816
| 0.458076
| 0.87393
| 0.87393
| 0.87393
| 0.87393
| 0.867556
| 0.867556
| 0
| 0.046001
| 0.212204
| 8,030
| 502
| 36
| 15.996016
| 0.747708
| 0.002615
| 0
| 0.664452
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.332226
| false
| 0.332226
| 0.003322
| 0
| 0.335548
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 11
|
f7a1d6fbbfdca9f78dcd50cacb077aa663d3fa10
| 5,238
|
py
|
Python
|
hallo/test/modules/math/test_simplify_fraction.py
|
joshcoales/Hallo
|
17145d8f76552ecd4cbc5caef8924bd2cf0cbf24
|
[
"MIT"
] | 1
|
2018-05-19T22:27:20.000Z
|
2018-05-19T22:27:20.000Z
|
hallo/test/modules/math/test_simplify_fraction.py
|
joshcoales/Hallo
|
17145d8f76552ecd4cbc5caef8924bd2cf0cbf24
|
[
"MIT"
] | 75
|
2015-09-26T18:07:18.000Z
|
2022-01-04T07:15:11.000Z
|
hallo/test/modules/math/test_simplify_fraction.py
|
SpangleLabs/Hallo
|
17145d8f76552ecd4cbc5caef8924bd2cf0cbf24
|
[
"MIT"
] | 1
|
2021-04-10T12:02:47.000Z
|
2021-04-10T12:02:47.000Z
|
from hallo.events import EventMessage
def test_fraction_simple(hallo_getter):
test_hallo = hallo_getter({"math"})
test_hallo.function_dispatcher.dispatch(
EventMessage(test_hallo.test_server, None, test_hallo.test_user, "fraction 6/4")
)
data = test_hallo.test_server.get_send_data(1, test_hallo.test_user, EventMessage)
assert "3/2." in data[0].text[-4:], "Simplify fraction fails for small fractions."
def test_fraction_complex(hallo_getter):
test_hallo = hallo_getter({"math"})
test_hallo.function_dispatcher.dispatch(
EventMessage(test_hallo.test_server, None, test_hallo.test_user, "fraction 360679/22")
)
data = test_hallo.test_server.get_send_data(1, test_hallo.test_user, EventMessage)
assert (
"32789/2." in data[0].text[-8:]
), "Simplify fraction fails for large fractions."
def test_fraction_multi_slash(hallo_getter):
test_hallo = hallo_getter({"math"})
test_hallo.function_dispatcher.dispatch(
EventMessage(test_hallo.test_server, None, test_hallo.test_user, "fraction 360679/22/2")
)
data = test_hallo.test_server.get_send_data(1, test_hallo.test_user, EventMessage)
assert (
"error" in data[0].text.lower()
), "Simplify fraction should return error when given more than 1 slash."
def test_fraction_integer(hallo_getter):
test_hallo = hallo_getter({"math"})
test_hallo.function_dispatcher.dispatch(
EventMessage(test_hallo.test_server, None, test_hallo.test_user, "fraction 22/2")
)
data = test_hallo.test_server.get_send_data(1, test_hallo.test_user, EventMessage)
assert (
" 11." == data[0].text[-4:]
), "Simplify fraction should return integer when result is integer."
def test_fraction_one_arg(hallo_getter):
test_hallo = hallo_getter({"math"})
test_hallo.function_dispatcher.dispatch(
EventMessage(test_hallo.test_server, None, test_hallo.test_user, "fraction 104779")
)
data = test_hallo.test_server.get_send_data(1, test_hallo.test_user, EventMessage)
assert (
"error" in data[0].text.lower()
), "Simplify fraction should return error when not given a fraction."
def test_fraction_unsimplify(hallo_getter):
test_hallo = hallo_getter({"math"})
test_hallo.function_dispatcher.dispatch(
EventMessage(test_hallo.test_server, None, test_hallo.test_user, "fraction 17/3")
)
data = test_hallo.test_server.get_send_data(1, test_hallo.test_user, EventMessage)
assert "17/3." == data[0].text[-5:]
def test_factors_float(hallo_getter):
test_hallo = hallo_getter({"math"})
test_hallo.function_dispatcher.dispatch(
EventMessage(test_hallo.test_server, None, test_hallo.test_user, "fraction 17.5/2")
)
data = test_hallo.test_server.get_send_data(1, test_hallo.test_user, EventMessage)
assert (
"error" in data[0].text.lower()
), "Simplify fraction should return error when given a float."
test_hallo.function_dispatcher.dispatch(
EventMessage(test_hallo.test_server, None, test_hallo.test_user, "fraction 17/2.2")
)
data = test_hallo.test_server.get_send_data(1, test_hallo.test_user, EventMessage)
assert (
"error" in data[0].text.lower()
), "Simplify fraction should return error when given a float."
test_hallo.function_dispatcher.dispatch(
EventMessage(test_hallo.test_server, None, test_hallo.test_user, "fraction 6.6/2.2")
)
data = test_hallo.test_server.get_send_data(1, test_hallo.test_user, EventMessage)
assert (
"error" in data[0].text.lower()
), "Simplify fraction should return error when given a float."
def test_factors_negative(hallo_getter):
test_hallo = hallo_getter({"math"})
test_hallo.function_dispatcher.dispatch(
EventMessage(test_hallo.test_server, None, test_hallo.test_user, "fraction 24/-10")
)
data = test_hallo.test_server.get_send_data(1, test_hallo.test_user, EventMessage)
assert (
" -12/5." in data[0].text[-7:]
), "Simplify fraction not working for negative denominators."
test_hallo.function_dispatcher.dispatch(
EventMessage(test_hallo.test_server, None, test_hallo.test_user, "fraction -24/10")
)
data = test_hallo.test_server.get_send_data(1, test_hallo.test_user, EventMessage)
assert (
" -12/5." in data[0].text[-7:]
), "Simplify fraction not working for negative numerators."
test_hallo.function_dispatcher.dispatch(
EventMessage(test_hallo.test_server, None, test_hallo.test_user, "fraction 24/10")
)
data = test_hallo.test_server.get_send_data(1, test_hallo.test_user, EventMessage)
assert (
" 12/5." in data[0].text[-6:]
), "Simplify fraction not working for negative numerators & denominators."
def test_factors_word(hallo_getter):
test_hallo = hallo_getter({"math"})
test_hallo.function_dispatcher.dispatch(
EventMessage(test_hallo.test_server, None, test_hallo.test_user, "factors hello/7")
)
data = test_hallo.test_server.get_send_data(1, test_hallo.test_user, EventMessage)
assert (
"error" in data[0].text.lower()
), "Simplify fraction should return error when invalid number used."
| 41.904
| 96
| 0.720122
| 721
| 5,238
| 4.950069
| 0.106796
| 0.186607
| 0.189409
| 0.138414
| 0.875595
| 0.871393
| 0.856823
| 0.840852
| 0.840852
| 0.840852
| 0
| 0.025712
| 0.168385
| 5,238
| 124
| 97
| 42.241935
| 0.793618
| 0
| 0
| 0.537736
| 0
| 0
| 0.190531
| 0
| 0
| 0
| 0
| 0
| 0.122642
| 1
| 0.084906
| false
| 0
| 0.009434
| 0
| 0.09434
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e398fc2a4b73410264c949acde42f0ab49d17f84
| 1,450
|
py
|
Python
|
tests/test_cell.py
|
Hourann/game-of-life
|
3b979ef992ad28810231c105888d0e0e0e582bb8
|
[
"Apache-2.0"
] | null | null | null |
tests/test_cell.py
|
Hourann/game-of-life
|
3b979ef992ad28810231c105888d0e0e0e582bb8
|
[
"Apache-2.0"
] | null | null | null |
tests/test_cell.py
|
Hourann/game-of-life
|
3b979ef992ad28810231c105888d0e0e0e582bb8
|
[
"Apache-2.0"
] | null | null | null |
from unittest import TestCase
from src.cell import Cell
class TestCell(TestCase):
def test_should_turn_dead_when_a_live_cell_have_2_alive_neighbour(self):
# given
cell = Cell(True)
number_of_alive_neighbour = 1
# when
cell.next(number_of_alive_neighbour)
# then
self.assertFalse(cell.state)
def test_should_turn_dead_when_a_cell_have_more_than_3_alive_neighbour(self):
# given
cell = Cell(True)
number_of_alive_neighbour = 4
# when
cell.next(number_of_alive_neighbour)
# then
self.assertFalse(cell.state)
def test_should_turn_alive_when_a_cell_have_3_alive_neighbour(self):
# given
cell = Cell(False)
number_of_alive_neighbour = 3
# when
cell.next(number_of_alive_neighbour)
# then
self.assertTrue(cell.state)
def test_should_keep_dead_when_a_dead_cell_have_2_alive_neighbour(self):
# given
cell = Cell(False)
number_of_alive_neighbour = 2
# when
cell.next(number_of_alive_neighbour)
# then
self.assertFalse(cell.state)
def test_should_keep_alive_when_a_alive_cell_have_2_alive_neighbour(self):
# given
cell = Cell(True)
number_of_alive_neighbour = 2
# when
cell.next(number_of_alive_neighbour)
# then
self.assertTrue(cell.state)
| 22.65625
| 81
| 0.655862
| 187
| 1,450
| 4.631016
| 0.197861
| 0.242494
| 0.150115
| 0.254042
| 0.841801
| 0.841801
| 0.817552
| 0.774827
| 0.774827
| 0.764434
| 0
| 0.009615
| 0.282759
| 1,450
| 63
| 82
| 23.015873
| 0.823077
| 0.054483
| 0
| 0.607143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.178571
| 1
| 0.178571
| false
| 0
| 0.071429
| 0
| 0.285714
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
e3bd6ec2674605c47c31fbfbe8907ffce13d874d
| 3,479
|
py
|
Python
|
Sender/plugins/__init__.py
|
Keys-007/AnonymousSender
|
d32368fc713e6dac45beba6089766aebaa708a8d
|
[
"MIT"
] | null | null | null |
Sender/plugins/__init__.py
|
Keys-007/AnonymousSender
|
d32368fc713e6dac45beba6089766aebaa708a8d
|
[
"MIT"
] | null | null | null |
Sender/plugins/__init__.py
|
Keys-007/AnonymousSender
|
d32368fc713e6dac45beba6089766aebaa708a8d
|
[
"MIT"
] | null | null | null |
'''
The Giant Penis License (GPL)
Copyright (c) 2021 @InukaAisth
▄▄██▄██▄▄
▄█ █ █▄
▄█ █▄
█ █
█ █
█ █
█ █
█ █
█▄ █ ▄█
█ ▄▄▄ █
█ █
█ █
█ █
█ █
█ █
█ █
█ █
█ █
█ █
█ █
█ █
█ █
▄████▄█ █▄████▄
▄█ █▄
█ █
█ █
█ █
█ █
█ ▄▄█▄▄ █
█ █ █ █
█▄ ▄█ █▄ ▄█
█▄▄▄▄▄█ █▄▄▄▄▄█
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
😂 There is no such penis lisence do anything you like
'''
from os.path import dirname, basename, isfile, join
import glob
# Loading Modules
# Also can use plugins dir defined in client for this. This is good for modular way
modules = glob.glob(join(dirname(__file__), "*.py"))
__all__ = [
basename(f)[:-3] for f in modules if isfile(f) and not f.endswith("__init__.py")
]
| 42.950617
| 84
| 0.566542
| 469
| 3,479
| 4.4371
| 0.298507
| 0.043248
| 0.059106
| 0.07112
| 0.823162
| 0.823162
| 0.818837
| 0.818837
| 0.818837
| 0.818837
| 0
| 0.002266
| 0.365622
| 3,479
| 80
| 85
| 43.4875
| 0.885365
| 0.933027
| 0
| 0
| 0
| 0
| 0.066964
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 8
|
e3c384a6f6a62c272174f99dd5c829d195cb4262
| 227
|
py
|
Python
|
pybarycuda/api.py
|
postmalloc/barycuda
|
8b27cc1fd3839c6b6a089e3de816f3cf3e32367a
|
[
"MIT"
] | 2
|
2020-10-21T03:20:05.000Z
|
2021-05-31T04:31:05.000Z
|
pybarycuda/api.py
|
postmalloc/barycuda
|
8b27cc1fd3839c6b6a089e3de816f3cf3e32367a
|
[
"MIT"
] | null | null | null |
pybarycuda/api.py
|
postmalloc/barycuda
|
8b27cc1fd3839c6b6a089e3de816f3cf3e32367a
|
[
"MIT"
] | null | null | null |
import pybarycuda.core as bary
def point_in_simplex(pts, n, dim, verts):
return list(map(bool, bary.point_in_simplex(pts, n, dim, verts)))
def bary_simplex(pts, n, dim, verts):
return bary.bary_simplex(pts, n, dim, verts)
| 32.428571
| 67
| 0.735683
| 40
| 227
| 4.025
| 0.425
| 0.248447
| 0.273292
| 0.347826
| 0.68323
| 0.68323
| 0.322981
| 0
| 0
| 0
| 0
| 0
| 0.132159
| 227
| 7
| 68
| 32.428571
| 0.817259
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0
| 0.2
| 0.4
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
58251b75f08dd2a6b3278b73cfbdb1e8358c73fc
| 12,535
|
py
|
Python
|
ixnetwork_restpy/testplatform/sessions/ixnetwork/availablehardware/chassis/card/card.py
|
rfrye-github/ixnetwork_restpy
|
23eeb24b21568a23d3f31bbd72814ff55eb1af44
|
[
"MIT"
] | null | null | null |
ixnetwork_restpy/testplatform/sessions/ixnetwork/availablehardware/chassis/card/card.py
|
rfrye-github/ixnetwork_restpy
|
23eeb24b21568a23d3f31bbd72814ff55eb1af44
|
[
"MIT"
] | null | null | null |
ixnetwork_restpy/testplatform/sessions/ixnetwork/availablehardware/chassis/card/card.py
|
rfrye-github/ixnetwork_restpy
|
23eeb24b21568a23d3f31bbd72814ff55eb1af44
|
[
"MIT"
] | null | null | null |
# MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class Card(Base):
"""This command allows the user to view version and type information for the card.
The Card class encapsulates a list of card resources that are managed by the system.
A list of resources can be retrieved from the server using the Card.find() method.
"""
__slots__ = ()
_SDM_NAME = 'card'
_SDM_ATT_MAP = {
'AggregationMode': 'aggregationMode',
'AggregationSupported': 'aggregationSupported',
'AvailableModes': 'availableModes',
'CardId': 'cardId',
'Description': 'description',
}
def __init__(self, parent):
super(Card, self).__init__(parent)
@property
def Aggregation(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.availablehardware.chassis.card.aggregation.aggregation.Aggregation): An instance of the Aggregation class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.availablehardware.chassis.card.aggregation.aggregation import Aggregation
return Aggregation(self)
@property
def Port(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.availablehardware.chassis.card.port.port.Port): An instance of the Port class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.availablehardware.chassis.card.port.port import Port
return Port(self)
@property
def AggregationMode(self):
"""
Returns
-------
- str(notSupported | mixed | normal | tenGigAggregation | fortyGigAggregation | singleMode | dualMode | hundredGigNonFanOut | fortyGigFanOut | threeByTenGigFanOut | eightByTenGigFanOut | fourByTwentyFiveGigNonFanOut | twoByTwentyFiveGigNonFanOut | oneByFiftyGigNonFanOut | fortyGigNonFanOut | oneByTenGigFanOut | fourByTenGigFanOut | incompatibleMode | hundredGigCapturePlayback | fortyGigCapturePlayback | novusHundredGigNonFanOut | novusFourByTwentyFiveGigNonFanOut | novusTwoByFiftyGigNonFanOut | novusOneByFortyGigNonFanOut | novusFourByTenGigNonFanOut | krakenOneByFourHundredGigNonFanOut | krakenOneByTwoHundredGigNonFanOut | krakenTwoByOneHundredGigFanOut | krakenFourByFiftyGigFanOut | aresOneOneByFourHundredGigNonFanOut | aresOneTwoByTwoHundredGigFanOut | aresOneFourByOneHundredGigFanOut | aresOneFourByOneHundredGigMacSecFanOut | aresOneEightByFiftyGigFanOut | uhdOneHundredEightByHundredGigNonFanOut | uhdOneHundredEightByFortyGigNonFanOut | uhdOneHundredSixteenByFiftyGigFanOut | uhdOneHundredThirtyTwoByTwentyFiveGigFanOut | uhdOneHundredThirtyTwoByTenGigFanOut): Gets or sets the aggregation mode.
"""
return self._get_attribute(self._SDM_ATT_MAP['AggregationMode'])
@AggregationMode.setter
def AggregationMode(self, value):
self._set_attribute(self._SDM_ATT_MAP['AggregationMode'], value)
@property
def AggregationSupported(self):
"""
Returns
-------
- bool: (read only) If true, indicates that the card is operating in resource group mode and not in normal mode
"""
return self._get_attribute(self._SDM_ATT_MAP['AggregationSupported'])
@property
def AvailableModes(self):
"""
Returns
-------
- list(str[notSupported | mixed | normal | tenGigAggregation | fortyGigAggregation | singleMode | dualMode | hundredGigNonFanOut | fortyGigFanOut | threeByTenGigFanOut | eightByTenGigFanOut | fourByTwentyFiveGigNonFanOut | twoByTwentyFiveGigNonFanOut | oneByFiftyGigNonFanOut | fortyGigNonFanOut | oneByTenGigFanOut | fourByTenGigFanOut | incompatibleMode | hundredGigCapturePlayback | fortyGigCapturePlayback | novusHundredGigNonFanOut | novusFourByTwentyFiveGigNonFanOut | novusTwoByFiftyGigNonFanOut | novusOneByFortyGigNonFanOut | novusFourByTenGigNonFanOut | krakenOneByFourHundredGigNonFanOut | krakenOneByTwoHundredGigNonFanOut | krakenTwoByOneHundredGigFanOut | krakenFourByFiftyGigFanOut | aresOneOneByFourHundredGigNonFanOut | aresOneTwoByTwoHundredGigFanOut | aresOneFourByOneHundredGigFanOut | aresOneFourByOneHundredGigMacSecFanOut | aresOneEightByFiftyGigFanOut | uhdOneHundredEightByHundredGigNonFanOut | uhdOneHundredEightByFortyGigNonFanOut | uhdOneHundredSixteenByFiftyGigFanOut | uhdOneHundredThirtyTwoByTwentyFiveGigFanOut | uhdOneHundredThirtyTwoByTenGigFanOut]): Gets the supported port resource group modes on the card.
"""
return self._get_attribute(self._SDM_ATT_MAP['AvailableModes'])
@property
def CardId(self):
"""
Returns
-------
- number: Identifier for the card on the chassis.
"""
return self._get_attribute(self._SDM_ATT_MAP['CardId'])
@property
def Description(self):
"""
Returns
-------
- str: Description of the card.
"""
return self._get_attribute(self._SDM_ATT_MAP['Description'])
def update(self, AggregationMode=None):
"""Updates card resource on the server.
Args
----
- AggregationMode (str(notSupported | mixed | normal | tenGigAggregation | fortyGigAggregation | singleMode | dualMode | hundredGigNonFanOut | fortyGigFanOut | threeByTenGigFanOut | eightByTenGigFanOut | fourByTwentyFiveGigNonFanOut | twoByTwentyFiveGigNonFanOut | oneByFiftyGigNonFanOut | fortyGigNonFanOut | oneByTenGigFanOut | fourByTenGigFanOut | incompatibleMode | hundredGigCapturePlayback | fortyGigCapturePlayback | novusHundredGigNonFanOut | novusFourByTwentyFiveGigNonFanOut | novusTwoByFiftyGigNonFanOut | novusOneByFortyGigNonFanOut | novusFourByTenGigNonFanOut | krakenOneByFourHundredGigNonFanOut | krakenOneByTwoHundredGigNonFanOut | krakenTwoByOneHundredGigFanOut | krakenFourByFiftyGigFanOut | aresOneOneByFourHundredGigNonFanOut | aresOneTwoByTwoHundredGigFanOut | aresOneFourByOneHundredGigFanOut | aresOneFourByOneHundredGigMacSecFanOut | aresOneEightByFiftyGigFanOut | uhdOneHundredEightByHundredGigNonFanOut | uhdOneHundredEightByFortyGigNonFanOut | uhdOneHundredSixteenByFiftyGigFanOut | uhdOneHundredThirtyTwoByTwentyFiveGigFanOut | uhdOneHundredThirtyTwoByTenGigFanOut)): Gets or sets the aggregation mode.
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def find(self, AggregationMode=None, AggregationSupported=None, AvailableModes=None, CardId=None, Description=None):
"""Finds and retrieves card resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve card resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all card resources from the server.
Args
----
- AggregationMode (str(notSupported | mixed | normal | tenGigAggregation | fortyGigAggregation | singleMode | dualMode | hundredGigNonFanOut | fortyGigFanOut | threeByTenGigFanOut | eightByTenGigFanOut | fourByTwentyFiveGigNonFanOut | twoByTwentyFiveGigNonFanOut | oneByFiftyGigNonFanOut | fortyGigNonFanOut | oneByTenGigFanOut | fourByTenGigFanOut | incompatibleMode | hundredGigCapturePlayback | fortyGigCapturePlayback | novusHundredGigNonFanOut | novusFourByTwentyFiveGigNonFanOut | novusTwoByFiftyGigNonFanOut | novusOneByFortyGigNonFanOut | novusFourByTenGigNonFanOut | krakenOneByFourHundredGigNonFanOut | krakenOneByTwoHundredGigNonFanOut | krakenTwoByOneHundredGigFanOut | krakenFourByFiftyGigFanOut | aresOneOneByFourHundredGigNonFanOut | aresOneTwoByTwoHundredGigFanOut | aresOneFourByOneHundredGigFanOut | aresOneFourByOneHundredGigMacSecFanOut | aresOneEightByFiftyGigFanOut | uhdOneHundredEightByHundredGigNonFanOut | uhdOneHundredEightByFortyGigNonFanOut | uhdOneHundredSixteenByFiftyGigFanOut | uhdOneHundredThirtyTwoByTwentyFiveGigFanOut | uhdOneHundredThirtyTwoByTenGigFanOut)): Gets or sets the aggregation mode.
- AggregationSupported (bool): (read only) If true, indicates that the card is operating in resource group mode and not in normal mode
- AvailableModes (list(str[notSupported | mixed | normal | tenGigAggregation | fortyGigAggregation | singleMode | dualMode | hundredGigNonFanOut | fortyGigFanOut | threeByTenGigFanOut | eightByTenGigFanOut | fourByTwentyFiveGigNonFanOut | twoByTwentyFiveGigNonFanOut | oneByFiftyGigNonFanOut | fortyGigNonFanOut | oneByTenGigFanOut | fourByTenGigFanOut | incompatibleMode | hundredGigCapturePlayback | fortyGigCapturePlayback | novusHundredGigNonFanOut | novusFourByTwentyFiveGigNonFanOut | novusTwoByFiftyGigNonFanOut | novusOneByFortyGigNonFanOut | novusFourByTenGigNonFanOut | krakenOneByFourHundredGigNonFanOut | krakenOneByTwoHundredGigNonFanOut | krakenTwoByOneHundredGigFanOut | krakenFourByFiftyGigFanOut | aresOneOneByFourHundredGigNonFanOut | aresOneTwoByTwoHundredGigFanOut | aresOneFourByOneHundredGigFanOut | aresOneFourByOneHundredGigMacSecFanOut | aresOneEightByFiftyGigFanOut | uhdOneHundredEightByHundredGigNonFanOut | uhdOneHundredEightByFortyGigNonFanOut | uhdOneHundredSixteenByFiftyGigFanOut | uhdOneHundredThirtyTwoByTwentyFiveGigFanOut | uhdOneHundredThirtyTwoByTenGigFanOut])): Gets the supported port resource group modes on the card.
- CardId (number): Identifier for the card on the chassis.
- Description (str): Description of the card.
Returns
-------
- self: This instance with matching card resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of card data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the card resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
def RefreshInfo(self):
"""Executes the refreshInfo operation on the server.
Refresh the hardware information.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
return self._execute('refreshInfo', payload=payload, response_object=None)
| 66.322751
| 1,168
| 0.751496
| 1,012
| 12,535
| 9.241107
| 0.261858
| 0.017323
| 0.008661
| 0.011121
| 0.733533
| 0.709046
| 0.703486
| 0.703486
| 0.691617
| 0.676647
| 0
| 0.000883
| 0.186518
| 12,535
| 188
| 1,169
| 66.675532
| 0.91625
| 0.751974
| 0
| 0.142857
| 0
| 0
| 0.105072
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.265306
| false
| 0
| 0.081633
| 0
| 0.653061
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 7
|
5853ad5db62e6387567f5260866468228cd5174d
| 134
|
py
|
Python
|
dash/lambda.py
|
tobias-pook/stocktinker
|
df328673c30fb606024529a01328c3cfb8558d0c
|
[
"MIT"
] | null | null | null |
dash/lambda.py
|
tobias-pook/stocktinker
|
df328673c30fb606024529a01328c3cfb8558d0c
|
[
"MIT"
] | 9
|
2017-12-08T18:33:24.000Z
|
2018-02-05T21:02:30.000Z
|
dash/lambda.py
|
tobias-pook/stocktinker
|
df328673c30fb606024529a01328c3cfb8558d0c
|
[
"MIT"
] | 4
|
2018-08-07T02:47:09.000Z
|
2020-11-16T20:35:43.000Z
|
import awsgi
from dash.index import app
def lambda_handler(event, context):
return awsgi.response(app.server, event, context)
| 26.8
| 53
| 0.761194
| 19
| 134
| 5.315789
| 0.736842
| 0.237624
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.156716
| 134
| 5
| 53
| 26.8
| 0.893805
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.5
| 0.25
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
5881f592a6ee3a1c4ea6811e904541e5e1c34f64
| 72
|
py
|
Python
|
py_pdf_term/analysis/__init__.py
|
kumachan-mis/pdf-slides-term
|
cf3319e4de723bd9424d23141803342d3c649103
|
[
"MIT"
] | 1
|
2021-01-08T16:05:30.000Z
|
2021-01-08T16:05:30.000Z
|
py_pdf_term/analysis/__init__.py
|
kumachan-mis/py-slides-term
|
1e9337b97ae8968950489e728fc7aeeeb7eb1f4b
|
[
"MIT"
] | 21
|
2021-01-03T13:50:59.000Z
|
2021-06-17T00:27:49.000Z
|
py_pdf_term/analysis/__init__.py
|
kumachan-mis/pdf-slides-term
|
cf3319e4de723bd9424d23141803342d3c649103
|
[
"MIT"
] | null | null | null |
from ._analysis import * # NoQA
from ._analysis import __all__ # NoQA
| 24
| 38
| 0.736111
| 9
| 72
| 5.222222
| 0.555556
| 0.510638
| 0.765957
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.194444
| 72
| 2
| 39
| 36
| 0.810345
| 0.125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
588e684c46ee6ab5e5acd4c87742b6b701b0b817
| 530
|
py
|
Python
|
eval_covid20cases_timm-regnetx_002_Emboss.py
|
BrunoKrinski/segtool
|
cb604b5f38104c43a76450136e37c3d1c4b6d275
|
[
"MIT"
] | null | null | null |
eval_covid20cases_timm-regnetx_002_Emboss.py
|
BrunoKrinski/segtool
|
cb604b5f38104c43a76450136e37c3d1c4b6d275
|
[
"MIT"
] | null | null | null |
eval_covid20cases_timm-regnetx_002_Emboss.py
|
BrunoKrinski/segtool
|
cb604b5f38104c43a76450136e37c3d1c4b6d275
|
[
"MIT"
] | null | null | null |
import os
ls=["python main.py --configs configs/eval_covid20cases_unetplusplus_timm-regnetx_002_0_Emboss.yml",
"python main.py --configs configs/eval_covid20cases_unetplusplus_timm-regnetx_002_1_Emboss.yml",
"python main.py --configs configs/eval_covid20cases_unetplusplus_timm-regnetx_002_2_Emboss.yml",
"python main.py --configs configs/eval_covid20cases_unetplusplus_timm-regnetx_002_3_Emboss.yml",
"python main.py --configs configs/eval_covid20cases_unetplusplus_timm-regnetx_002_4_Emboss.yml",
]
for l in ls:
os.system(l)
| 48.181818
| 100
| 0.843396
| 80
| 530
| 5.2125
| 0.3
| 0.119904
| 0.143885
| 0.227818
| 0.901679
| 0.901679
| 0.901679
| 0.901679
| 0.901679
| 0.901679
| 0
| 0.06012
| 0.058491
| 530
| 11
| 101
| 48.181818
| 0.775551
| 0
| 0
| 0
| 0
| 0
| 0.875706
| 0.640301
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.111111
| 0
| 0.111111
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
5892180353da79459e8414e9e20d411c1ab7e2c9
| 31,140
|
py
|
Python
|
hanibal/ans_reporte/crear_informe_caja_excel.py
|
Christian-Castro/castro_odoo8
|
8247fdb20aa39e043b6fa0c4d0af509462ab3e00
|
[
"Unlicense"
] | null | null | null |
hanibal/ans_reporte/crear_informe_caja_excel.py
|
Christian-Castro/castro_odoo8
|
8247fdb20aa39e043b6fa0c4d0af509462ab3e00
|
[
"Unlicense"
] | null | null | null |
hanibal/ans_reporte/crear_informe_caja_excel.py
|
Christian-Castro/castro_odoo8
|
8247fdb20aa39e043b6fa0c4d0af509462ab3e00
|
[
"Unlicense"
] | null | null | null |
# -*- coding: utf-8 -*-
import openpyxl
from openpyxl import Workbook
import openpyxl.worksheet
import unicodedata
from copy import deepcopy
from openpyxl.chart import (
Reference,
Series,
BarChart
)
from openpyxl.chart.marker import DataPoint
from openpyxl.drawing.fill import PatternFillProperties, ColorChoice
from openpyxl import Workbook
from openpyxl.styles import PatternFill, Border, Side, Alignment, Protection, Font
from openpyxl.styles.borders import Border, Side
from openpyxl.drawing.image import Image
from datetime import datetime, date, timedelta
import time
import locale
global root
def crear_wb_informe():
wb = openpyxl.Workbook()
return wb
def unicodeText(text):
try:
text = unicodedata.unicode(text, 'utf-8')
except TypeError:
return text
def crea_hoja_info(wb, title, flag):
sheet = wb.active
if(flag == 0):
#sheet.page_setup.paperSize = sheet.PAPERSIZE_A4_SMALL
#sheet.print_options.scale = 100
sheet.page_margins.left = 0.1
sheet.page_margins.right = 0.1
sheet.page_margins.top = 0.5
sheet.page_margins.bottom = 0.5
#sheet.page_setup.orientation = sheet.ORIENTATION_PORTRAIT
#sheet.sheet_properties.pageSetUpPr.fitToPage = True
sheet.page_setup.fitToWidht = False
#sheet.print_options.horizontalCentered = True
if(flag == 1):
#sheet.page_setup.paperSize = sheet.PAPERSIZE_A4_SMALL
#sheet.print_options.scale = 100
#sheet.sheet_properties.pageSetUpPr.fitToPage = True
sheet.page_setup.fitToWidth = False
sheet.page_margins.left = 0.1
sheet.page_margins.right = 0.1
sheet.page_margins.top = 0.5
sheet.page_margins.bottom = 0.5
#sheet.page_setup.orientation = sheet.ORIENTATION_PORTRAIT
#sheet.print_options.horizontalCentered = True
sheet.title = title
return sheet
def crea_hoja_info_pdf(wb, title, flag):
sheet = wb.active
if(flag == 0):
#sheet.page_setup.paperSize = sheet.PAPERSIZE_A4_SMALL
#sheet.print_options.scale = 100
sheet.page_margins.left = 0.1
sheet.page_margins.right = 0.1
sheet.page_margins.top = 0.5
sheet.page_margins.bottom = 0.5
#sheet.page_setup.orientation = sheet.ORIENTATION_PORTRAIT
#sheet.sheet_properties.pageSetUpPr.fitToPage = True
sheet.page_setup.fitToWidht = False
#sheet.print_options.horizontalCentered = True
if(flag == 1):
#sheet.page_setup.paperSize = sheet.PAPERSIZE_A4_SMALL
#sheet.print_options.scale = 100
#sheet.sheet_properties.pageSetUpPr.fitToPage = True
sheet.page_setup.fitToWidth = False
sheet.page_margins.left = 0.1
sheet.page_margins.right = 0.1
sheet.page_margins.top = 0.5
sheet.page_margins.bottom = 0.5
#sheet.page_setup.orientation = sheet.ORIENTATION_PORTRAIT
#sheet.print_options.horizontalCentered = True
sheet.title = title
return sheet
def border_tabla(sheet, col, colfin, fil, filfin, styleleft, styletop, styleright, stylebottom):
colfin=colfin+1
filfin=filfin+2
border_cell = Border(left=Side(style=styleleft), top=Side(style=styletop), right=Side(style=styleright), bottom=Side(style=stylebottom))
for i in range(fil, filfin-1):
for j in range(col, colfin):
sheet.cell(row=i, column=j).border = border_cell
def columnas_filas(sheet, flag, celda, value):
if (flag == 0):
sheet.column_dimensions[celda].width = value
if (flag == 1):
sheet.row_dimensions[int(celda)].height = value
def poner_border(sheet, fil, col, styleleft, styletop, styleright, stylebottom):
border_cell = Border(left=Side(style=styleleft), top=Side(style=styletop), right=Side(style=styleright), bottom=Side(style=stylebottom))
sheet.cell(row=fil, column=col).border = border_cell
def Informe(sheet, dic,lista_alumnos,cant_alumno,filtro):
columnas_filas(sheet, 0, 'A', 10.00)
columnas_filas(sheet, 0, 'B', 5.00)
columnas_filas(sheet, 0, 'C', 10.00)
columnas_filas(sheet, 0, 'D', 7.00)
columnas_filas(sheet, 0, 'E', 12.00)
columnas_filas(sheet, 0, 'F', 10.00)
columnas_filas(sheet, 0, 'G', 10.00)
columnas_filas(sheet, 0, 'H', 7.00)
columnas_filas(sheet, 0, 'I', 10.00)
alignment_title = Alignment(horizontal='center', vertical='center')
fuente = Font(bold=False, size=6, name='arial')
fuente3 = Font(bold=True, size=8, name='arial')
fuente2 = Font(bold=True, size=6, name='arial')
fila = 3
fila1 = 2
acum=1
cont=0
col=2
col1=4
fil=4
coli=2
colf=2
sheet.merge_cells('A2:I2')
sheet['A2'].alignment = alignment_title.copy(wrapText=True,horizontal='center', vertical='top')
sheet['A2'].font = fuente3
sheet['A2']= 'REPORTE DE CAJA'
sheet['H1'].alignment = alignment_title.copy(wrapText=True,horizontal='right', vertical='top')
sheet['H1'].font = fuente2
sheet['H1']= 'Usuario'
usuario_id=str(dic['usuario_id'].encode('utf-8'))
sheet['I1'].alignment = alignment_title.copy(wrapText=True,horizontal='left', vertical='top')
sheet['I1'].font = fuente
sheet['I1']= str(dic['usuario_id'].encode('utf-8'))
sheet['A1'].alignment = alignment_title.copy(wrapText=True,horizontal='left', vertical='top')
sheet['A1'].font = fuente2
sheet['A1']= 'Cia'
sheet.merge_cells('B1:C1')
sheet['B1'].alignment = alignment_title.copy(wrapText=True,horizontal='left', vertical='top')
sheet['B1'].font = fuente
sheet['B1']= str(dic['company_id'].encode('utf-8'))
sheet['A3'].alignment = alignment_title.copy(wrapText=True,horizontal='left', vertical='top')
sheet['A3'].font = fuente2
sheet['A3']= 'Fecha Emision:'
#fecha_actual = datetime.strftime(datetime.now(), '%d-%m-%Y %H:%M:%S')
fecha_actual = dic['fecha_corte']
sheet.merge_cells('B3:C3')
sheet['B3'].alignment = alignment_title.copy(wrapText=True,horizontal='left', vertical='top')
sheet['B3'].font = fuente
sheet['B3']= fecha_actual
poner_border(sheet,1,1,'medium','medium','none','none')
poner_border(sheet,1,2,'none','medium','none','none')
poner_border(sheet,1,3,'none','medium','none','none')
poner_border(sheet,1,4,'none','medium','none','none')
poner_border(sheet,1,5,'none','medium','none','none')
poner_border(sheet,1,5,'none','medium','none','none')
poner_border(sheet,1,6,'none','medium','none','none')
poner_border(sheet,1,7,'none','medium','none','none')
poner_border(sheet,1,8,'none','medium','none','none')
poner_border(sheet,1,9,'none','medium','medium','none')
poner_border(sheet,2,1,'medium','none','none','none')
poner_border(sheet,2,9,'none','none','medium','none')
poner_border(sheet,3,1,'medium','none','none','medium')
poner_border(sheet,3,2,'none','none','none','medium')
poner_border(sheet,3,3,'none','none','none','medium')
poner_border(sheet,3,4,'none','none','none','medium')
poner_border(sheet,3,5,'none','none','none','medium')
poner_border(sheet,3,6,'none','none','none','medium')
poner_border(sheet,3,7,'none','none','none','medium')
poner_border(sheet,3,8,'none','none','none','medium')
poner_border(sheet,3,9,'none','none','medium','medium')
fecha_ini=dic['fecha_desde']
fecha_fin=dic['fecha_hasta']
fecha=str(" Desde: "+dic['fecha_desde']+" Hasta: "+dic['fecha_hasta'])
sheet.merge_cells('D4:F4')
sheet['D4'].alignment = alignment_title.copy(wrapText=True,horizontal='center', vertical='top')
sheet['D4'].font = fuente2
sheet['D4']= str(" Desde: "+dic['fecha_desde']+" Hasta: "+dic['fecha_hasta'])
sheet['A5'].alignment = alignment_title.copy(wrapText=True,horizontal='left', vertical='top')
sheet['A5'].font = fuente2
sheet['A5']= 'Origen'
sheet['B5'].alignment = alignment_title.copy(wrapText=True,horizontal='left', vertical='top')
sheet['B5'].font = fuente2
sheet['B5']= 'Fecha'
sheet['C5'].alignment = alignment_title.copy(wrapText=True,horizontal='left', vertical='top')
sheet['C5'].font = fuente2
sheet['C5']= 'FACTURA'
sheet['D5'].alignment = alignment_title.copy(wrapText=True,horizontal='left', vertical='top')
sheet['D5'].font = fuente2
sheet['D5']= 'MONTO'
sheet['E5'].alignment = alignment_title.copy(wrapText=True,horizontal='left', vertical='top')
sheet['E5'].font = fuente2
sheet['E5']= 'Alumno'
sheet['F5'].alignment = alignment_title.copy(wrapText=True,horizontal='left', vertical='top')
sheet['F5'].font = fuente2
sheet['F5']= 'Banco'
sheet['G5'].alignment = alignment_title.copy(wrapText=True,horizontal='left', vertical='top')
sheet['G5'].font = fuente2
sheet['G5']= 'DOCUMENTO'
sheet['H5'].alignment = alignment_title.copy(wrapText=True,horizontal='left', vertical='top')
sheet['H5'].font = fuente2
sheet['H5']= 'Fecha Cheque'
sheet['I5'].alignment = alignment_title.copy(wrapText=True,horizontal='left', vertical='top')
sheet['I5'].font = fuente2
sheet['I5']= 'Comentario'
fila=6
total_general=0.0
saldo_general=0.0
dic={}
lista_datos=[]
for recorrer in lista_alumnos:
sheet['A'+str(fila)].alignment = alignment_title.copy(wrapText=True,horizontal='left', vertical='top')
sheet['A'+str(fila)].font = fuente2
dic={}
if recorrer['tipo']=='efe':
sheet['A'+str(fila)]= 'Efectivo'
dic['tipo']='Efectivo'
elif recorrer['tipo']=='ch':
sheet['A'+str(fila)]= 'Cheque'
dic['tipo']='Cheque'
elif recorrer['tipo']=='tc':
sheet['A'+str(fila)]= 'Tarjeta de Credito'
dic['tipo']='Tarjeta de Credito'
elif recorrer['tipo']=='dep':
sheet['A'+str(fila)]= 'Deposito Bancario'
dic['tipo']='Deposito Bancario'
elif recorrer['tipo']=='trans':
sheet['A'+str(fila)]= 'Transferencia Bancaria'
dic['tipo']='Transferencia Bancaria'
elif recorrer['tipo']=='nc':
sheet['A'+str(fila)]= 'Nota de Credito'
dic['tipo']='Nota de Credito'
elif recorrer['tipo']=='rti':
sheet['A'+str(fila)]= 'Retencion iva'
dic['tipo']='Retencion iva'
elif recorrer['tipo']=='rtf':
sheet['A'+str(fila)]= 'Retencion fuente'
dic['tipo']='Retencion fuente'
elif recorrer['tipo']=='liq':
sheet['A'+str(fila)]= 'Liquidacion'
dic['tipo']='Liquidacion'
fila=fila+1
saldo=0.0
total=0.0
dic['cantidad']=len(recorrer['detalle'])
for det in recorrer['detalle']:
sheet['A'+str(fila)].alignment = alignment_title.copy(wrapText=True,horizontal='left', vertical='top')
sheet['A'+str(fila)].font = fuente
sheet['A'+str(fila)]= det['numero']
sheet['B'+str(fila)].alignment = alignment_title.copy(wrapText=True,horizontal='left', vertical='top')
sheet['B'+str(fila)].font = fuente
sheet['B'+str(fila)]= det['fecha_pago']
sheet['C'+str(fila)].alignment = alignment_title.copy(wrapText=True,horizontal='justify', vertical='top')
sheet['C'+str(fila)].font = fuente
sheet['C'+str(fila)]= det['factura']
sheet['D'+str(fila)].alignment = alignment_title.copy(wrapText=True,horizontal='right', vertical='top')
sheet['D'+str(fila)].font = fuente
sheet['D'+str(fila)]= "{:,}".format(float(det['monto'])).replace(',','~').replace('.',',').replace('~','.')
sheet['E'+str(fila)].alignment = alignment_title.copy(wrapText=True,horizontal='justify', vertical='top')
sheet['E'+str(fila)].font = fuente
if det['cliente']==False:
sheet['E'+str(fila)]= ''
else:
sheet['E'+str(fila)]= det['cliente']
sheet['F'+str(fila)].alignment = alignment_title.copy(wrapText=True,horizontal='justify', vertical='top')
sheet['F'+str(fila)].font = fuente
if det['banco']==False:
sheet['F'+str(fila)]= ''
elif det['banco']==0:
sheet['F'+str(fila)]= ''
else:
sheet['F'+str(fila)]= det['banco']
sheet['G'+str(fila)].alignment = alignment_title.copy(wrapText=True,horizontal='justify', vertical='top')
sheet['G'+str(fila)].font = fuente
if det['documento']==False:
sheet['G'+str(fila)]= ''
else:
sheet['G'+str(fila)]= det['documento']
sheet['H'+str(fila)].alignment = alignment_title.copy(wrapText=True,horizontal='justify', vertical='top')
sheet['H'+str(fila)].font = fuente
if det['fecha_ch']==False:
sheet['H'+str(fila)]= ''
else:
sheet['H'+str(fila)]= det['fecha_ch']
sheet['I'+str(fila)].alignment = alignment_title.copy(wrapText=True,horizontal='justify', vertical='top')
sheet['I'+str(fila)].font = fuente
if det['comentario']==False:
sheet['I'+str(fila)]= ''
elif det['comentario']==0:
sheet['I'+str(fila)]= ''
else:
sheet['I'+str(fila)]= det['comentario']
total=total+float(det['monto'])
fila=fila+1
sheet['C'+str(fila)].alignment = alignment_title.copy(wrapText=True,horizontal='left', vertical='top')
sheet['C'+str(fila)].font = fuente2
sheet['C'+str(fila)]= 'TOTAL'
sheet['D'+str(fila)].alignment = alignment_title.copy(wrapText=True,horizontal='right', vertical='top')
sheet['D'+str(fila)].font = fuente2
sheet['D'+str(fila)]= "{:,}".format(float(total)).replace(',','~').replace('.',',').replace('~','.')
dic['total']=total
total_general = total_general + total
fila= fila + 1
lista_datos.append(dic)
sheet['C'+str(fila+1)].alignment = alignment_title.copy(wrapText=True,horizontal='left', vertical='top')
sheet['C'+str(fila+1)].font = fuente2
sheet['C'+str(fila+1)]= 'TOTAL GENERAL'
sheet['D'+str(fila+1)].alignment = alignment_title.copy(wrapText=True,horizontal='right', vertical='top')
sheet['D'+str(fila+1)].font = fuente2
sheet['D'+str(fila+1)]= "{:,}".format(float(total_general)).replace(',','~').replace('.',',').replace('~','.')
sheet.merge_cells('B'+str(fila+2)+':C'+str(fila+2))
sheet['B'+str(fila+2)].alignment = alignment_title.copy(wrapText=True,horizontal='left', vertical='top')
sheet['B'+str(fila+2)].font = fuente2
sheet['B'+str(fila+2)]= 'RESUMEN DE VALORES'
fila=fila+3
for d in lista_datos:
sheet['A'+str(fila)].alignment = alignment_title.copy(wrapText=True,horizontal='left', vertical='top')
sheet['A'+str(fila)].font = fuente2
sheet['A'+str(fila)]= d['tipo']
sheet['D'+str(fila)].alignment = alignment_title.copy(wrapText=True,horizontal='right', vertical='top')
sheet['D'+str(fila)].font = fuente2
sheet['D'+str(fila)]= "{:,}".format(float(d['total'])).replace(',','~').replace('.',',').replace('~','.')
sheet['H'+str(fila)].alignment = alignment_title.copy(wrapText=True,horizontal='right', vertical='top')
sheet['H'+str(fila)].font = fuente2
sheet['H'+str(fila)]= d['cantidad']
sheet['I'+str(fila)].alignment = alignment_title.copy(wrapText=True,horizontal='left', vertical='top')
sheet['I'+str(fila)].font = fuente2
sheet['I'+str(fila)]= 'VECES'
fila=fila+1
sheet['D'+str(fila+1)].alignment = alignment_title.copy(wrapText=True,horizontal='right', vertical='top')
sheet['D'+str(fila+1)].font = fuente2
sheet['D'+str(fila+1)]= "{:,}".format(float(total_general)).replace(',','~').replace('.',',').replace('~','.')
columnas_filas(sheet, 1, str(fila+5), 10.00)
sheet.merge_cells('D'+str(fila+5)+':F'+str(fila+5))
sheet['D'+str(fila+5)].alignment = alignment_title.copy(wrapText=True,horizontal='center', vertical='center')
sheet['D'+str(fila+5)].font = fuente2
sheet['D'+str(fila+5)]= usuario_id
poner_border(sheet,fila+5,4,'none','thin','none','none')
poner_border(sheet,fila+5,5,'none','thin','none','none')
poner_border(sheet,fila+5,6,'none','thin','none','none')
columnas_filas(sheet, 1, str(fila+6), 8.00)
sheet.merge_cells('D'+str(fila+6)+':F'+str(fila+6))
sheet['D'+str(fila+6)].alignment = alignment_title.copy(wrapText=True,horizontal='center', vertical='center')
sheet['D'+str(fila+6)].font = fuente2
sheet['D'+str(fila+6)]= fecha
columnas_filas(sheet, 1, str(fila+7), 8.00)
sheet.merge_cells('D'+str(fila+7)+':F'+str(fila+7))
sheet['D'+str(fila+7)].alignment = alignment_title.copy(wrapText=True,horizontal='center', vertical='center')
sheet['D'+str(fila+7)].font = fuente2
sheet['D'+str(fila+7)]= filtro
def Informe_pdf(sheet, dic,lista_alumnos,cant_alumno,filtro):
columnas_filas(sheet, 0, 'A', 10.00)
columnas_filas(sheet, 0, 'B', 5.00)
columnas_filas(sheet, 0, 'C', 10.00)
columnas_filas(sheet, 0, 'D', 7.00)
columnas_filas(sheet, 0, 'E', 1.00)
columnas_filas(sheet, 0, 'F', 12.00)
columnas_filas(sheet, 0, 'G', 10.00)
columnas_filas(sheet, 0, 'H', 10.00)
columnas_filas(sheet, 0, 'I', 7.00)
columnas_filas(sheet, 0, 'J', 10.00)
alignment_title = Alignment(horizontal='center', vertical='center')
fuente = Font(bold=False, size=6, name='arial')
fuente3 = Font(bold=True, size=8, name='arial')
fuente2 = Font(bold=True, size=6, name='arial')
fila = 3
fila1 = 2
acum=1
cont=0
col=2
col1=4
fil=4
coli=2
colf=2
sheet.merge_cells('A2:I2')
sheet['A2'].alignment = alignment_title.copy(wrapText=True,horizontal='center', vertical='top')
sheet['A2'].font = fuente3
sheet['A2']= 'REPORTE DE CAJA'
sheet['H1'].alignment = alignment_title.copy(wrapText=True,horizontal='right', vertical='top')
sheet['H1'].font = fuente2
sheet['H1']= 'Usuario'
usuario_id=str(dic['usuario_id'].encode('utf-8'))
sheet['I1'].alignment = alignment_title.copy(wrapText=True,horizontal='left', vertical='top')
sheet['I1'].font = fuente
sheet['I1']= str(dic['usuario_id'].encode('utf-8'))
sheet['A1'].alignment = alignment_title.copy(wrapText=True,horizontal='left', vertical='top')
sheet['A1'].font = fuente2
sheet['A1']= 'Cia'
sheet.merge_cells('B1:C1')
sheet['B1'].alignment = alignment_title.copy(wrapText=True,horizontal='left', vertical='top')
sheet['B1'].font = fuente
sheet['B1']= str(dic['company_id'].encode('utf-8'))
sheet['A3'].alignment = alignment_title.copy(wrapText=True,horizontal='left', vertical='top')
sheet['A3'].font = fuente2
sheet['A3']= 'Fecha Emision:'
#fecha_actual = datetime.strftime(datetime.now(), '%d-%m-%Y %H:%M:%S')
fecha_actual = dic['fecha_corte']
sheet.merge_cells('B3:C3')
sheet['B3'].alignment = alignment_title.copy(wrapText=True,horizontal='left', vertical='top')
sheet['B3'].font = fuente
sheet['B3']= fecha_actual
poner_border(sheet,1,1,'medium','medium','none','none')
poner_border(sheet,1,2,'none','medium','none','none')
poner_border(sheet,1,3,'none','medium','none','none')
poner_border(sheet,1,4,'none','medium','none','none')
poner_border(sheet,1,5,'none','medium','none','none')
poner_border(sheet,1,5,'none','medium','none','none')
poner_border(sheet,1,6,'none','medium','none','none')
poner_border(sheet,1,7,'none','medium','none','none')
poner_border(sheet,1,8,'none','medium','none','none')
poner_border(sheet,1,9,'none','medium','none','none')
poner_border(sheet,1,10,'none','medium','medium','none')
poner_border(sheet,2,1,'medium','none','none','none')
poner_border(sheet,2,10,'none','none','medium','none')
poner_border(sheet,3,1,'medium','none','none','medium')
poner_border(sheet,3,2,'none','none','none','medium')
poner_border(sheet,3,3,'none','none','none','medium')
poner_border(sheet,3,4,'none','none','none','medium')
poner_border(sheet,3,5,'none','none','none','medium')
poner_border(sheet,3,6,'none','none','none','medium')
poner_border(sheet,3,7,'none','none','none','medium')
poner_border(sheet,3,8,'none','none','none','medium')
poner_border(sheet,3,9,'none','none','none','medium')
poner_border(sheet,3,10,'none','none','medium','medium')
fecha_ini=dic['fecha_desde']
fecha_fin=dic['fecha_hasta']
fecha=str(" Desde: "+dic['fecha_desde']+" Hasta: "+dic['fecha_hasta'])
sheet.merge_cells('D4:F4')
sheet['D4'].alignment = alignment_title.copy(wrapText=True,horizontal='center', vertical='top')
sheet['D4'].font = fuente2
sheet['D4']= str(" Desde: "+dic['fecha_desde']+" Hasta: "+dic['fecha_hasta'])
sheet['A5'].alignment = alignment_title.copy(wrapText=True,horizontal='left', vertical='top')
sheet['A5'].font = fuente2
sheet['A5']= 'Origen'
sheet['B5'].alignment = alignment_title.copy(wrapText=True,horizontal='left', vertical='top')
sheet['B5'].font = fuente2
sheet['B5']= 'Fecha'
sheet['C5'].alignment = alignment_title.copy(wrapText=True,horizontal='left', vertical='top')
sheet['C5'].font = fuente2
sheet['C5']= 'FACTURA'
sheet['D5'].alignment = alignment_title.copy(wrapText=True,horizontal='left', vertical='top')
sheet['D5'].font = fuente2
sheet['D5']= 'MONTO'
sheet['F5'].alignment = alignment_title.copy(wrapText=True,horizontal='left', vertical='top')
sheet['F5'].font = fuente2
sheet['F5']= 'Alumno'
sheet['G5'].alignment = alignment_title.copy(wrapText=True,horizontal='left', vertical='top')
sheet['G5'].font = fuente2
sheet['G5']= 'Banco'
sheet['H5'].alignment = alignment_title.copy(wrapText=True,horizontal='left', vertical='top')
sheet['H5'].font = fuente2
sheet['H5']= 'DOCUMENTO'
sheet['I5'].alignment = alignment_title.copy(wrapText=True,horizontal='left', vertical='top')
sheet['I5'].font = fuente2
sheet['I5']= 'Fecha Cheque'
sheet['J5'].alignment = alignment_title.copy(wrapText=True,horizontal='left', vertical='top')
sheet['J5'].font = fuente2
sheet['J5']= 'Comentario'
fila=6
total_general=0.0
saldo_general=0.0
dic={}
lista_datos=[]
for recorrer in lista_alumnos:
sheet['A'+str(fila)].alignment = alignment_title.copy(wrapText=True,horizontal='left', vertical='top')
sheet['A'+str(fila)].font = fuente2
dic={}
if recorrer['tipo']=='efe':
sheet['A'+str(fila)]= 'Efectivo'
dic['tipo']='Efectivo'
elif recorrer['tipo']=='ch':
sheet['A'+str(fila)]= 'Cheque'
dic['tipo']='Cheque'
elif recorrer['tipo']=='tc':
sheet['A'+str(fila)]= 'Tarjeta de Credito'
dic['tipo']='Tarjeta de Credito'
elif recorrer['tipo']=='dep':
sheet['A'+str(fila)]= 'Deposito Bancario'
dic['tipo']='Deposito Bancario'
elif recorrer['tipo']=='trans':
sheet['A'+str(fila)]= 'Transferencia Bancaria'
dic['tipo']='Transferencia Bancaria'
elif recorrer['tipo']=='nc':
sheet['A'+str(fila)]= 'Nota de Credito'
dic['tipo']='Nota de Credito'
elif recorrer['tipo']=='rti':
sheet['A'+str(fila)]= 'Retencion iva'
dic['tipo']='Retencion iva'
elif recorrer['tipo']=='rtf':
sheet['A'+str(fila)]= 'Retencion fuente'
dic['tipo']='Retencion fuente'
elif recorrer['tipo']=='liq':
sheet['A'+str(fila)]= 'Liquidacion'
dic['tipo']='Liquidacion'
fila=fila+1
saldo=0.0
total=0.0
dic['cantidad']=len(recorrer['detalle'])
for det in recorrer['detalle']:
columnas_filas(sheet, 1, str(fila), 15.00)
sheet['A'+str(fila)].alignment = alignment_title.copy(wrapText=True,horizontal='left', vertical='top')
sheet['A'+str(fila)].font = fuente
sheet['A'+str(fila)]= det['numero']
sheet['B'+str(fila)].alignment = alignment_title.copy(wrapText=True,horizontal='left', vertical='top')
sheet['B'+str(fila)].font = fuente
sheet['B'+str(fila)]= det['fecha_pago']
sheet['C'+str(fila)].alignment = alignment_title.copy(wrapText=True,horizontal='justify', vertical='top')
sheet['C'+str(fila)].font = fuente
sheet['C'+str(fila)]= det['factura']
sheet['D'+str(fila)].alignment = alignment_title.copy(wrapText=True,horizontal='right', vertical='top')
sheet['D'+str(fila)].font = fuente
monto="{:.2f}".format(float(det['monto']))
sheet['D'+str(fila)].number_format = '"$"#,##0.00'
sheet['D'+str(fila)]= "{:,}".format(float(monto)).replace(',','~').replace('.',',').replace('~','.')
#sheet['D'+str(fila)]= monto
sheet['F'+str(fila)].alignment = alignment_title.copy(wrapText=True,horizontal='justify', vertical='top')
sheet['F'+str(fila)].font = fuente
if det['cliente']==False:
sheet['F'+str(fila)]= ''
elif det['cliente']==0:
sheet['F'+str(fila)]= ''
else:
sheet['F'+str(fila)]= det['cliente']
sheet['G'+str(fila)].alignment = alignment_title.copy(wrapText=True,horizontal='justify', vertical='top')
sheet['G'+str(fila)].font = fuente
if det['banco']==False:
sheet['G'+str(fila)]= ''
elif det['banco']==0:
sheet['G'+str(fila)]= ''
else:
sheet['G'+str(fila)]= det['banco']
sheet['H'+str(fila)].alignment = alignment_title.copy(wrapText=True,horizontal='justify', vertical='top')
sheet['H'+str(fila)].font = fuente
if det['documento']==False:
sheet['H'+str(fila)]= ''
else:
sheet['H'+str(fila)]= det['documento']
sheet['I'+str(fila)].alignment = alignment_title.copy(wrapText=True,horizontal='justify', vertical='top')
sheet['I'+str(fila)].font = fuente
if det['fecha_ch']==False:
sheet['I'+str(fila)]= ''
else:
sheet['I'+str(fila)]= det['fecha_ch']
sheet['J'+str(fila)].alignment = alignment_title.copy(wrapText=True,horizontal='justify', vertical='top')
sheet['J'+str(fila)].font = fuente
if det['comentario']==False:
sheet['J'+str(fila)]= ''
elif det['comentario']==0:
sheet['J'+str(fila)]= ''
else:
sheet['J'+str(fila)]= det['comentario']
total=total+float(det['monto'])
fila=fila+1
sheet['C'+str(fila)].alignment = alignment_title.copy(wrapText=True,horizontal='left', vertical='top')
sheet['C'+str(fila)].font = fuente2
sheet['C'+str(fila)]= 'TOTAL'
sheet['D'+str(fila)].alignment = alignment_title.copy(wrapText=True,horizontal='right', vertical='top')
sheet['D'+str(fila)].font = fuente2
total_1="{:.2f}".format(float(total))
sheet['D'+str(fila)].number_format = '"$"#,##0.00'
sheet['D'+str(fila)]= "{:,}".format(float(total_1)).replace(',','~').replace('.',',').replace('~','.')
dic['total']=total
total_general = total_general + total
fila= fila + 1
lista_datos.append(dic)
sheet['C'+str(fila+1)].alignment = alignment_title.copy(wrapText=True,horizontal='left', vertical='top')
sheet['C'+str(fila+1)].font = fuente2
sheet['C'+str(fila+1)]= 'TOTAL GENERAL'
sheet['D'+str(fila+1)].alignment = alignment_title.copy(wrapText=True,horizontal='right', vertical='top')
sheet['D'+str(fila+1)].font = fuente2
total_2="{:.2f}".format(float(total_general))
sheet['D'+str(fila+1)].number_format = '"$"#,##0.00'
sheet['D'+str(fila+1)]= "{:,}".format(float(total_2)).replace(',','~').replace('.',',').replace('~','.')
sheet.merge_cells('B'+str(fila+2)+':C'+str(fila+2))
sheet['B'+str(fila+2)].alignment = alignment_title.copy(wrapText=True,horizontal='left', vertical='top')
sheet['B'+str(fila+2)].font = fuente2
sheet['B'+str(fila+2)]= 'RESUMEN DE VALORES'
fila=fila+3
for d in lista_datos:
columnas_filas(sheet, 1, str(fila), 10.00)
sheet['A'+str(fila)].alignment = alignment_title.copy(wrapText=True,horizontal='left', vertical='top')
sheet['A'+str(fila)].font = fuente2
sheet['A'+str(fila)]= d['tipo']
sheet['D'+str(fila)].alignment = alignment_title.copy(wrapText=True,horizontal='right', vertical='top')
sheet['D'+str(fila)].font = fuente2
total_3="{:.2f}".format(float(d['total']))
sheet['D'+str(fila)].number_format = '"$"#,##0.00'
sheet['D'+str(fila)]= "{:,}".format(float(total_3)).replace(',','~').replace('.',',').replace('~','.')
sheet['H'+str(fila)].alignment = alignment_title.copy(wrapText=True,horizontal='right', vertical='top')
sheet['H'+str(fila)].font = fuente2
sheet['H'+str(fila)]= d['cantidad']
sheet['I'+str(fila)].alignment = alignment_title.copy(wrapText=True,horizontal='left', vertical='top')
sheet['I'+str(fila)].font = fuente2
sheet['I'+str(fila)]= 'VECES'
fila=fila+1
sheet['D'+str(fila+1)].alignment = alignment_title.copy(wrapText=True,horizontal='right', vertical='top')
sheet['D'+str(fila+1)].font = fuente2
total_4="{:.2f}".format(float(total_general))
sheet['D'+str(fila+1)].number_format = '"$"#,##0.00'
sheet['D'+str(fila+1)]= "{:,}".format(float(total_4)).replace(',','~').replace('.',',').replace('~','.')
columnas_filas(sheet, 1, str(fila+5), 10.00)
sheet.merge_cells('D'+str(fila+5)+':F'+str(fila+5))
sheet['D'+str(fila+5)].alignment = alignment_title.copy(wrapText=True,horizontal='center', vertical='center')
sheet['D'+str(fila+5)].font = fuente2
sheet['D'+str(fila+5)]= usuario_id
poner_border(sheet,fila+5,4,'none','thin','none','none')
poner_border(sheet,fila+5,5,'none','thin','none','none')
poner_border(sheet,fila+5,6,'none','thin','none','none')
columnas_filas(sheet, 1, str(fila+6), 8.00)
sheet.merge_cells('D'+str(fila+6)+':F'+str(fila+6))
sheet['D'+str(fila+6)].alignment = alignment_title.copy(wrapText=True,horizontal='center', vertical='center')
sheet['D'+str(fila+6)].font = fuente2
sheet['D'+str(fila+6)]= fecha
columnas_filas(sheet, 1, str(fila+7), 20.00)
sheet.merge_cells('D'+str(fila+7)+':F'+str(fila+7))
sheet['D'+str(fila+7)].alignment = alignment_title.copy(wrapText=True,horizontal='center', vertical='top')
sheet['D'+str(fila+7)].font = fuente2
sheet['D'+str(fila+7)]= filtro
| 42.540984
| 140
| 0.610469
| 4,074
| 31,140
| 4.587383
| 0.062592
| 0.074536
| 0.098454
| 0.115576
| 0.928996
| 0.922789
| 0.911338
| 0.905185
| 0.902349
| 0.893199
| 0
| 0.024742
| 0.187508
| 31,140
| 732
| 141
| 42.540984
| 0.713924
| 0.036416
| 0
| 0.818644
| 0
| 0
| 0.130445
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.015254
| false
| 0
| 0.025424
| 0
| 0.047458
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
54362bafc1f4971f35c228c9e3e57a3ae2201271
| 124
|
py
|
Python
|
web_google_maps/models/__init__.py
|
Yousif-Mobark/odoo11_cutom
|
35a09266a1d4d74569316886019c11ce41e9216b
|
[
"Apache-2.0"
] | null | null | null |
web_google_maps/models/__init__.py
|
Yousif-Mobark/odoo11_cutom
|
35a09266a1d4d74569316886019c11ce41e9216b
|
[
"Apache-2.0"
] | null | null | null |
web_google_maps/models/__init__.py
|
Yousif-Mobark/odoo11_cutom
|
35a09266a1d4d74569316886019c11ce41e9216b
|
[
"Apache-2.0"
] | 1
|
2020-04-18T02:42:54.000Z
|
2020-04-18T02:42:54.000Z
|
# -*- coding: utf-8 -*-
# License AGPL-3
from . import ir_act_window_view
from . import ir_ui_view
from . import res_config
| 20.666667
| 32
| 0.725806
| 21
| 124
| 4
| 0.714286
| 0.357143
| 0.285714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.019231
| 0.16129
| 124
| 5
| 33
| 24.8
| 0.788462
| 0.290323
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
5453fd0caf52bc952d94eb655c02d79ae5dcbb15
| 1,872
|
py
|
Python
|
opentera-webrtc-native-client/OpenteraWebrtcNativeClient/python/test/configurations/signaling_server_configuration_test.py
|
introlab/opentera-webrtc
|
cf92ccd0b239646f6caf68e3638b8f28598ea609
|
[
"Apache-2.0"
] | 12
|
2021-05-30T18:32:36.000Z
|
2022-03-25T12:31:57.000Z
|
opentera-webrtc-native-client/OpenteraWebrtcNativeClient/python/test/configurations/signaling_server_configuration_test.py
|
introlab/opentera-webrtc
|
cf92ccd0b239646f6caf68e3638b8f28598ea609
|
[
"Apache-2.0"
] | 22
|
2021-03-17T12:18:42.000Z
|
2022-03-19T19:12:51.000Z
|
opentera-webrtc-native-client/OpenteraWebrtcNativeClient/python/test/configurations/signaling_server_configuration_test.py
|
introlab/opentera-webrtc-teleop
|
ecb671635832d6d66e0f2f0a7e90b0877ce7c338
|
[
"Apache-2.0"
] | 1
|
2022-02-07T21:30:33.000Z
|
2022-02-07T21:30:33.000Z
|
import unittest
import opentera_webrtc_native_client as webrtc
class SignalingServerConfigurationTestCase(unittest.TestCase):
def test_create__url_client_name_room__should_set_the_attributes(self):
testee = webrtc.SignalingServerConfiguration.create('url', 'name', 'room')
self.assertEqual(testee.url, 'url')
self.assertEqual(testee.client_name, 'name')
self.assertEqual(testee.client_data, None)
self.assertEqual(testee.room, 'room')
self.assertEqual(testee.password, '')
def test_create__url_client_name_client_data_room__should_set_the_attributes(self):
testee = webrtc.SignalingServerConfiguration.create('url', 'name', {'data': 10}, 'room')
self.assertEqual(testee.url, 'url')
self.assertEqual(testee.client_name, 'name')
self.assertEqual(testee.client_data, {'data': 10})
self.assertEqual(testee.room, 'room')
self.assertEqual(testee.password, '')
def test_create__url_client_name_room_password__should_set_the_attributes(self):
testee = webrtc.SignalingServerConfiguration.create('url', 'name', room='room', password='password')
self.assertEqual(testee.url, 'url')
self.assertEqual(testee.client_name, 'name')
self.assertEqual(testee.client_data, None)
self.assertEqual(testee.room, 'room')
self.assertEqual(testee.password, 'password')
def test_create__url_client_name_client_data_room_password__should_set_the_attributes(self):
testee = webrtc.SignalingServerConfiguration.create('url', 'name', {'data': 10}, 'room', 'password')
self.assertEqual(testee.url, 'url')
self.assertEqual(testee.client_name, 'name')
self.assertEqual(testee.client_data, {'data': 10})
self.assertEqual(testee.room, 'room')
self.assertEqual(testee.password, 'password')
| 44.571429
| 108
| 0.715812
| 211
| 1,872
| 6.061611
| 0.132701
| 0.234558
| 0.328382
| 0.168882
| 0.903831
| 0.903831
| 0.903831
| 0.883503
| 0.883503
| 0.883503
| 0
| 0.005096
| 0.161325
| 1,872
| 41
| 109
| 45.658537
| 0.809554
| 0
| 0
| 0.645161
| 0
| 0
| 0.07265
| 0
| 0
| 0
| 0
| 0
| 0.645161
| 1
| 0.129032
| false
| 0.258065
| 0.064516
| 0
| 0.225806
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 11
|
548f4928c3e8b8f6c47d9368d169e3c4d02a7f7b
| 254
|
py
|
Python
|
utility/__init__.py
|
LatvianPython/wind-experience
|
b634c020dff0a01152bb95b38e5f6f0e368d47f5
|
[
"MIT"
] | 2
|
2018-12-20T20:31:21.000Z
|
2018-12-29T14:51:42.000Z
|
utility/__init__.py
|
LatvianPython/wind-experience
|
b634c020dff0a01152bb95b38e5f6f0e368d47f5
|
[
"MIT"
] | null | null | null |
utility/__init__.py
|
LatvianPython/wind-experience
|
b634c020dff0a01152bb95b38e5f6f0e368d47f5
|
[
"MIT"
] | null | null | null |
from utility.jupyter_utility import output_score
from utility.jupyter_utility import output_feature_importance
from utility.jupyter_utility import read_model_data
from utility.data_download import download_all
from utility.data_download import next_date
| 42.333333
| 61
| 0.901575
| 37
| 254
| 5.864865
| 0.405405
| 0.253456
| 0.248848
| 0.345622
| 0.751152
| 0.341014
| 0
| 0
| 0
| 0
| 0
| 0
| 0.07874
| 254
| 5
| 62
| 50.8
| 0.92735
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
54c68b2fa2d41fc59fdfed232f3f11d7a780d3b0
| 1,428
|
py
|
Python
|
tests/core/shh-module/test_shh_filter.py
|
jsmeng324/web3.py
|
6f240dcf4f37f55f0ac09c90985674233f344c37
|
[
"MIT"
] | null | null | null |
tests/core/shh-module/test_shh_filter.py
|
jsmeng324/web3.py
|
6f240dcf4f37f55f0ac09c90985674233f344c37
|
[
"MIT"
] | null | null | null |
tests/core/shh-module/test_shh_filter.py
|
jsmeng324/web3.py
|
6f240dcf4f37f55f0ac09c90985674233f344c37
|
[
"MIT"
] | null | null | null |
from web3.utils.compat import sleep
def test_shh_sync_filter(web3, skip_if_testrpc):
skip_if_testrpc(web3)
topic = web3.toHex(text="test")
shh_filter = web3.shh.filter({"topics": [topic]})
payloads = []
payloads.append(str.encode("payload1"))
web3.shh.post({
"topics": [topic],
"payload": web3.toHex(text=payloads[-1]),
})
sleep(1)
payloads.append(str.encode("payload2"))
web3.shh.post({
"topics": [topic],
"payload": web3.toHex(text=payloads[-1]),
})
sleep(1)
received_messages = shh_filter.get_new_entries()
assert len(received_messages) > 1
for message in received_messages:
assert message["payload"] in payloads
def test_shh_async_filter(web3, skip_if_testrpc):
skip_if_testrpc(web3)
received_messages = []
topic = web3.toHex(text="test")
shh_filter = web3.shh.filter({"topics": [topic]})
shh_filter.watch(received_messages.append)
payloads = []
payloads.append(str.encode("payload1"))
web3.shh.post({
"topics": [topic],
"payload": web3.toHex(text=payloads[-1]),
})
sleep(1)
payloads.append(str.encode("payload2"))
web3.shh.post({
"topics": [topic],
"payload": web3.toHex(text=payloads[-1]),
})
sleep(1)
assert len(received_messages) > 1
for message in received_messages:
assert message["payload"] in payloads
| 25.963636
| 53
| 0.631653
| 175
| 1,428
| 5
| 0.222857
| 0.128
| 0.089143
| 0.105143
| 0.829714
| 0.829714
| 0.829714
| 0.829714
| 0.829714
| 0.738286
| 0
| 0.027703
| 0.216387
| 1,428
| 54
| 54
| 26.444444
| 0.754245
| 0
| 0
| 0.863636
| 0
| 0
| 0.082633
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 1
| 0.045455
| false
| 0
| 0.022727
| 0
| 0.068182
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
49b5697d2dd18bfd436e0a4fe67791fd6c626d35
| 17,874
|
py
|
Python
|
tests/unit/states/test_nsxt_ip_blocks.py
|
kdsalvy/salt-ext-modules-vmware-1
|
9fdc941692e4c526f575f33b2ce23c1470582934
|
[
"Apache-2.0"
] | 10
|
2021-11-02T20:24:44.000Z
|
2022-03-11T05:54:27.000Z
|
tests/unit/states/test_nsxt_ip_blocks.py
|
cmcmarrow/salt-ext-modules-vmware
|
c546a9f9ae121b7399dabae82f714117d0ab558d
|
[
"Apache-2.0"
] | 83
|
2021-10-01T15:13:02.000Z
|
2022-03-31T16:22:40.000Z
|
tests/unit/states/test_nsxt_ip_blocks.py
|
cmcmarrow/salt-ext-modules-vmware
|
c546a9f9ae121b7399dabae82f714117d0ab558d
|
[
"Apache-2.0"
] | 15
|
2021-09-30T23:17:27.000Z
|
2022-03-23T06:54:22.000Z
|
"""
Unit Tests for nsxt_ip_blocks state
"""
from unittest.mock import MagicMock
from unittest.mock import patch
import pytest
from saltext.vmware.states import nsxt_ip_blocks
@pytest.fixture
def configure_loader_modules():
return {nsxt_ip_blocks: {}}
def _get_mocked_data():
mocked_ok_response = {
"resource_type": "IpBlock",
"id": "9b636d18-49a2-4e63-a1ec-10c0e50d554d",
"cidr": "1.1.1.1/16",
"display_name": "Create-from_salt",
"description": "Check",
"_create_user": "admin",
"_create_time": 1615905790948,
"_last_modified_user": "admin",
"_last_modified_time": 1615905790948,
"_system_owned": False,
"_protection": "NOT_PROTECTED",
"_revision": 0,
}
mocked_error_response = {
"error": "The credentials were incorrect or the account specified has been locked."
}
mocked_hostname = "nsx-t.vmware.com"
return mocked_hostname, mocked_ok_response, mocked_error_response
def test_present_state_when_error_from_get_by_display_name():
mocked_hostname, mocked_ok_response, mocked_error_response = _get_mocked_data()
mock_get_using_display_name = MagicMock(return_value=mocked_error_response)
with patch.dict(
nsxt_ip_blocks.__salt__, {"nsxt_ip_blocks.get_by_display_name": mock_get_using_display_name}
):
result = nsxt_ip_blocks.present(
name="test_present_using_basic_auth",
hostname=mocked_hostname,
username="username",
password="password",
cidr="1.1.1.1/24",
display_name=mocked_ok_response["display_name"],
)
assert result is not None
assert result["changes"] == {}
assert (
result["comment"]
== "The credentials were incorrect or the account specified has been locked."
)
assert not result["result"]
def test_present_state_when_error_from_create():
mocked_hostname, mocked_ok_response, mocked_error_response = _get_mocked_data()
mock_get_using_display_name = MagicMock(return_value={"results": []})
mock_create = MagicMock(return_value=mocked_error_response)
with patch.dict(
nsxt_ip_blocks.__salt__,
{
"nsxt_ip_blocks.get_by_display_name": mock_get_using_display_name,
"nsxt_ip_blocks.create": mock_create,
},
):
result = nsxt_ip_blocks.present(
name="test_present_using_basic_auth",
hostname=mocked_hostname,
username="username",
password="password",
cidr="1.1.1.1/24",
display_name=mocked_ok_response["display_name"],
)
assert result is not None
assert result["changes"] == {}
assert (
result["comment"]
== "The credentials were incorrect or the account specified has been locked."
)
assert not result["result"]
def test_present_state_when_error_from_update():
mocked_hostname, mocked_ok_response, mocked_error_response = _get_mocked_data()
mock_get_using_display_name = MagicMock(return_value={"results": [mocked_ok_response]})
mock_create = MagicMock(return_value=mocked_error_response)
with patch.dict(
nsxt_ip_blocks.__salt__,
{
"nsxt_ip_blocks.get_by_display_name": mock_get_using_display_name,
"nsxt_ip_blocks.update": mock_create,
},
):
result = nsxt_ip_blocks.present(
name="test_present_using_basic_auth",
hostname=mocked_hostname,
username="username",
password="password",
cidr="1.1.1.1/24",
description="Sample description",
display_name=mocked_ok_response["display_name"],
)
assert result is not None
assert result["changes"] == {}
assert (
result["comment"]
== "The credentials were incorrect or the account specified has been locked."
)
assert not result["result"]
def test_present_state_during_update_to_add_a_new_field():
mocked_hostname, mocked_ok_response, mocked_error_response = _get_mocked_data()
mocked_updated_response = mocked_ok_response.copy()
mocked_ok_response.pop("description")
mock_get_using_display_name = MagicMock(return_value={"results": [mocked_ok_response]})
mocked_updated_response["description"] = "Sample description"
mock_create = MagicMock(return_value=mocked_updated_response)
with patch.dict(
nsxt_ip_blocks.__salt__,
{
"nsxt_ip_blocks.get_by_display_name": mock_get_using_display_name,
"nsxt_ip_blocks.update": mock_create,
},
):
result = nsxt_ip_blocks.present(
name="test_present_using_basic_auth",
hostname=mocked_hostname,
username="username",
password="password",
cidr="1.1.1.1/16",
description="Sample description",
display_name=mocked_ok_response["display_name"],
)
assert result is not None
assert result["changes"]["old"] == mocked_ok_response
assert result["changes"]["new"] == mocked_updated_response
assert result["comment"] == "Updated IP Block Create-from_salt"
assert result["result"]
def test_present_to_create_when_module_returns_success_response():
mocked_hostname, mocked_ok_response, mocked_error_response = _get_mocked_data()
mock_get_using_display_name_response = MagicMock(return_value={"results": []})
mock_create_response = MagicMock(return_value=mocked_ok_response)
display_name = mocked_ok_response["display_name"]
with patch.dict(
nsxt_ip_blocks.__salt__,
{
"nsxt_ip_blocks.get_by_display_name": mock_get_using_display_name_response,
"nsxt_ip_blocks.create": mock_create_response,
},
):
result = nsxt_ip_blocks.present(
name="test_present_using_basic_auth",
hostname=mocked_hostname,
username="username",
password="password",
cidr="1.1.1.1/16",
display_name=display_name,
)
assert result is not None
assert result["changes"] == {"new": mocked_ok_response, "old": None}
assert result["comment"] == "Created IP Block {}".format(display_name)
assert result["result"]
def test_present_to_update_when_module_returns_success_response():
mocked_hostname, mocked_ok_response, mocked_error_response = _get_mocked_data()
mocked_updated_ip_block = mocked_ok_response.copy()
mocked_updated_ip_block["description"] = "Updated Using Salt"
mock_get_using_display_name_response = MagicMock(return_value={"results": [mocked_ok_response]})
mock_update_response = MagicMock(return_value=mocked_updated_ip_block)
display_name = mocked_ok_response["display_name"]
with patch.dict(
nsxt_ip_blocks.__salt__,
{
"nsxt_ip_blocks.get_by_display_name": mock_get_using_display_name_response,
"nsxt_ip_blocks.update": mock_update_response,
},
):
result = nsxt_ip_blocks.present(
name="test_present_using_basic_auth",
hostname=mocked_hostname,
username="username",
password="password",
cidr="1.1.1.1/24",
display_name=display_name,
description="Updated Using Salt",
)
assert result is not None
assert result["changes"] == {"new": mocked_updated_ip_block, "old": mocked_ok_response}
assert result["comment"] == "Updated IP Block {}".format(display_name)
assert result["result"]
def test_present_to_update_when_user_input_and_existing_ip_block_has_identical_fields():
mocked_hostname, mocked_ok_response, mocked_error_response = _get_mocked_data()
mock_get_using_display_name_response = MagicMock(return_value={"results": [mocked_ok_response]})
display_name = mocked_ok_response["display_name"]
with patch.dict(
nsxt_ip_blocks.__salt__,
{"nsxt_ip_blocks.get_by_display_name": mock_get_using_display_name_response},
):
result = nsxt_ip_blocks.present(
name="test_present_using_basic_auth",
hostname=mocked_hostname,
username="username",
password="password",
cidr="1.1.1.1/16",
display_name=display_name,
description="Check",
)
assert result is not None
assert len(result["changes"]) == 0
assert result["comment"] == "IP Address Block exists already, no action to perform"
assert result["result"]
def test_present_state_for_create_when_opts_test_is_true():
mocked_hostname, mocked_ok_response, mocked_error_response = _get_mocked_data()
mock_get_using_display_name_response = MagicMock(return_value={"results": []})
display_name = mocked_ok_response["display_name"]
with patch.dict(
nsxt_ip_blocks.__salt__,
{"nsxt_ip_blocks.get_by_display_name": mock_get_using_display_name_response},
):
with patch.dict(nsxt_ip_blocks.__opts__, {"test": True}):
result = nsxt_ip_blocks.present(
name="test_absent_using_basic_auth",
hostname=mocked_hostname,
username="username",
cidr="1.1.1.1/24",
password="password",
display_name=display_name,
)
assert result is not None
assert len(result["changes"]) == 0
assert result["comment"] == "State present will create IP Block with name {}".format(
display_name
)
assert result["result"] is None
def test_present_state_for_update_when_opts_test_is_true():
mocked_hostname, mocked_ok_response, mocked_error_response = _get_mocked_data()
mock_get_using_display_name_response = MagicMock(return_value={"results": [mocked_ok_response]})
display_name = mocked_ok_response["display_name"]
with patch.dict(
nsxt_ip_blocks.__salt__,
{"nsxt_ip_blocks.get_by_display_name": mock_get_using_display_name_response},
):
with patch.dict(nsxt_ip_blocks.__opts__, {"test": True}):
result = nsxt_ip_blocks.present(
name="test_absent_using_basic_auth",
hostname=mocked_hostname,
username="username",
cidr="1.1.1.1/24",
password="password",
display_name=display_name,
)
assert result is not None
assert len(result["changes"]) == 0
assert result["comment"] == "State present will update IP Block with name {}".format(
display_name
)
assert result["result"] is None
def test_present_state_when_get_by_display_name_returns_multiple_blocks_with_same_display_name():
mocked_hostname, mocked_ok_response, mocked_error_response = _get_mocked_data()
mock_get_using_display_name_response = MagicMock(
return_value={"results": [mocked_ok_response, mocked_ok_response]}
)
display_name = mocked_ok_response["display_name"]
with patch.dict(
nsxt_ip_blocks.__salt__,
{"nsxt_ip_blocks.get_by_display_name": mock_get_using_display_name_response},
):
with patch.dict(nsxt_ip_blocks.__opts__, {"test": True}):
result = nsxt_ip_blocks.present(
name="test_absent_using_basic_auth",
hostname=mocked_hostname,
username="username",
password="password",
cidr="1.1.1.1/24",
display_name=display_name,
)
assert result is not None
assert len(result["changes"]) == 0
assert result["comment"] == "Multiple IP Blocks found for the provided display name {}".format(
display_name
)
assert not result["result"]
def test_absent_state_to_delete_when_module_returns_success_response():
mocked_hostname, mocked_ok_response, mocked_error_response = _get_mocked_data()
mock_get_using_display_name_response = MagicMock(return_value={"results": [mocked_ok_response]})
mock_delete_response = MagicMock(ok=True, return_value="IP Block deleted successfully")
display_name = mocked_ok_response["display_name"]
with patch.dict(
nsxt_ip_blocks.__salt__,
{
"nsxt_ip_blocks.get_by_display_name": mock_get_using_display_name_response,
"nsxt_ip_blocks.delete": mock_delete_response,
},
):
result = nsxt_ip_blocks.absent(
name="test_absent_using_basic_auth",
hostname=mocked_hostname,
username="username",
password="password",
display_name=display_name,
)
assert result is not None
assert result["changes"] == {"new": None, "old": mocked_ok_response}
assert result["comment"] == "Deleted IP Block {}".format(display_name)
assert result["result"]
def test_absent_state_when_object_to_delete_does_not_exists():
mocked_hostname, mocked_ok_response, mocked_error_response = _get_mocked_data()
mock_get_using_display_name_response = MagicMock(return_value={"results": []})
display_name = mocked_ok_response["display_name"]
with patch.dict(
nsxt_ip_blocks.__salt__,
{"nsxt_ip_blocks.get_by_display_name": mock_get_using_display_name_response},
):
result = nsxt_ip_blocks.absent(
name="test_publish_using_basic_auth",
hostname=mocked_hostname,
username="username",
password="password",
display_name=display_name,
)
assert result is not None
assert result["changes"] == {}
assert result["comment"] == "No IP Address Block found with name {}".format(display_name)
assert result["result"]
def test_absent_state_to_delete_when_opts_test_mode_is_true():
mocked_hostname, mocked_ok_response, mocked_error_response = _get_mocked_data()
mock_get_using_display_name_response = MagicMock(return_value={"results": [mocked_ok_response]})
display_name = mocked_ok_response["display_name"]
with patch.dict(
nsxt_ip_blocks.__salt__,
{"nsxt_ip_blocks.get_by_display_name": mock_get_using_display_name_response},
):
with patch.dict(nsxt_ip_blocks.__opts__, {"test": True}):
result = nsxt_ip_blocks.absent(
name="test_absent_using_basic_auth",
hostname=mocked_hostname,
username="username",
password="password",
display_name=display_name,
)
assert result is not None
assert len(result["changes"]) == 0
assert result["comment"] == "State absent will delete IP Block with name {}".format(
display_name
)
assert result["result"] is None
def test_absent_state_when_object_to_delete_doesn_not_exists_and_opts_test_mode_is_true():
mocked_hostname, mocked_ok_response, mocked_error_response = _get_mocked_data()
mock_get_using_display_name_response = MagicMock(return_value={"results": []})
display_name = mocked_ok_response["display_name"]
with patch.dict(
nsxt_ip_blocks.__salt__,
{"nsxt_ip_blocks.get_by_display_name": mock_get_using_display_name_response},
):
with patch.dict(nsxt_ip_blocks.__opts__, {"test": True}):
result = nsxt_ip_blocks.absent(
name="test_absent_using_basic_auth",
hostname=mocked_hostname,
username="username",
password="password",
display_name=display_name,
)
assert result is not None
assert len(result["changes"]) == 0
assert result[
"comment"
] == "State absent will do nothing as no IP Block found with name {}".format(display_name)
assert result["result"] is None
def test_absent_state_when_get_by_display_name_returns_multiple_blocks_with_same_display_name():
mocked_hostname, mocked_ok_response, mocked_error_response = _get_mocked_data()
mock_get_using_display_name_response = MagicMock(
return_value={"results": [mocked_ok_response, mocked_ok_response]}
)
display_name = mocked_ok_response["display_name"]
with patch.dict(
nsxt_ip_blocks.__salt__,
{"nsxt_ip_blocks.get_by_display_name": mock_get_using_display_name_response},
):
with patch.dict(nsxt_ip_blocks.__opts__, {"test": True}):
result = nsxt_ip_blocks.absent(
name="test_absent_using_basic_auth",
hostname=mocked_hostname,
username="username",
password="password",
display_name=display_name,
)
assert result is not None
assert len(result["changes"]) == 0
assert result["comment"] == "Multiple IP Blocks found for the provided display name {}".format(
display_name
)
assert not result["result"]
def test_absent_when_nsxt_ip_blocks_delete_returns_error():
mocked_hostname, mocked_ok_response, mocked_error_response = _get_mocked_data()
mock_get_using_display_name = MagicMock(return_value={"results": [mocked_ok_response]})
mock_create = MagicMock(return_value=mocked_error_response)
with patch.dict(
nsxt_ip_blocks.__salt__,
{
"nsxt_ip_blocks.get_by_display_name": mock_get_using_display_name,
"nsxt_ip_blocks.delete": mock_create,
},
):
result = nsxt_ip_blocks.absent(
name="test_present_using_basic_auth",
hostname=mocked_hostname,
username="username",
password="password",
display_name=mocked_ok_response["display_name"],
)
assert result is not None
assert result["changes"] == {}
assert (
result["comment"]
== "The credentials were incorrect or the account specified has been locked."
)
assert not result["result"]
| 35.891566
| 100
| 0.673212
| 2,145
| 17,874
| 5.134732
| 0.067133
| 0.119847
| 0.070819
| 0.055202
| 0.902125
| 0.8856
| 0.86962
| 0.850191
| 0.838388
| 0.836118
| 0
| 0.008813
| 0.231845
| 17,874
| 497
| 101
| 35.963783
| 0.793372
| 0.001958
| 0
| 0.702703
| 0
| 0
| 0.193246
| 0.066416
| 0
| 0
| 0
| 0
| 0.159705
| 1
| 0.044226
| false
| 0.039312
| 0.009828
| 0.002457
| 0.058968
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
49dfe9d3115d73c0ef904575f169d4b7a2e2350f
| 9,872
|
py
|
Python
|
MultiCSVFetchYahooDiv.py
|
adamrvfisher/TechnicalAnalysisLibrary
|
38a22b2b2b5052623f81edb11b3c5460fc254e45
|
[
"Apache-2.0"
] | 3
|
2019-04-26T11:13:14.000Z
|
2020-01-10T05:58:16.000Z
|
MultiCSVFetchYahooDiv.py
|
adamrvfisher/TechnicalAnalysisLibrary
|
38a22b2b2b5052623f81edb11b3c5460fc254e45
|
[
"Apache-2.0"
] | null | null | null |
MultiCSVFetchYahooDiv.py
|
adamrvfisher/TechnicalAnalysisLibrary
|
38a22b2b2b5052623f81edb11b3c5460fc254e45
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
@author: Adam Reinhold Von Fisher - https://www.linkedin.com/in/adamrvfisher/
"""
#This is an HTML scraper and formatting tool for dividend time series database construction
#Import modules
from pandas import read_csv
import requests
import pandas as pd
import os
import time
from io import StringIO
from CrumbCatcher import CrumbCatcher
from pandas.parser import CParserError
#Read in data
#df = read_csv('refdfser.csv', sep = ',')
df = pd.read_pickle('C:\\Users\\AmatVictoriaCuramIII\\Desktop\\Python\\Universe2018')
#symbol = df.Symbol.values
#Iterable
ranger = range(0,len(df))
#For number of tickers
for i in ranger[:5]:
try:
#Assign ticker
ticker = str(df[i][:-4])
#Generate crumb
artificialcrumb = CrumbCatcher(ticker)
#Generate download url
downloadurl = ("https://query1.finance.yahoo.com/v7/finance/download/" + ticker
+ "?period1=-631123200&period2=1598374000&interval=1d&events=div&crumb=" + artificialcrumb)
#Line optional
mainurl = ("https://finance.yahoo.com/quote/" + ticker + "/history?p=" + ticker)
#Get response
response = requests.post(downloadurl)#, data=CookieDict)
#Format text
datastr = response.text
formatter = StringIO(datastr)
strdf = pd.read_csv(formatter, sep = ',')
#If bad response
if strdf.columns[0] == '{"chart":{"result":null':
print('The URL failed for ' + ticker)
continue
#Format date index
strdf = strdf.set_index('Date')
strdf.index = pd.to_datetime(strdf.index, format = "%Y/%m/%d")
if len(strdf) == 0:
print("No dividend history for " + str(df[i][:-4]) )
continue
#Save to CSV
strdf.to_csv(("F:\\Users\\AmatVictoriaCuram\\TemporaryCSV\\"+ ticker + "div.csv"))
#Iteration tracking
print(ticker)
continue
#Bad response
except CParserError:
print('Parser failed for ' + ticker)
continue
except ConnectionError:
try:
#Sleep, then retry last ticker, continue loop.
print('ConnectionError on ' + str(ticker) + '.')
print('Sleeping for 5 min.')
time.sleep(301)
print('Parsing for ' + ticker + '.')
#Retrying parse
#Generate crumb
artificialcrumb = CrumbCatcher(ticker)
#Generate download url
downloadurl = ("https://query1.finance.yahoo.com/v7/finance/download/" + ticker
+ "?period1=-631123200&period2=1598374000&interval=1d&events=div&crumb=" + artificialcrumb)
#Line optional
mainurl = ("https://finance.yahoo.com/quote/" + ticker + "/history?p=" + ticker)
#Get response
response = requests.post(downloadurl)#, data=CookieDict)
#Format text
datastr = response.text
formatter = StringIO(datastr)
strdf = pd.read_csv(formatter, sep = ',')
#If bad response
if strdf.columns[0] == '{"chart":{"result":null':
print('The URL failed for ' + ticker)
continue
#Format date index
strdf = strdf.set_index('Date')
strdf.index = pd.to_datetime(strdf.index, format = "%Y/%m/%d")
#Save to CSV
strdf.to_csv(("F:\\Users\\AmatVictoriaCuram\\TemporaryCSV\\"+ ticker + "div.csv"))
#Iteration tracking
print(ticker)
continue
#Bad response
except CParserError:
print('Parser failed for ' + ticker + '.')
continue
except requests.exceptions.SSLError:
try:
print('SSLError after Connection Error for ' + ticker + '.')
#Sleep, then retry last ticker, continue loop.
print('Sleeping for 61 seconds.')
time.sleep(61)
print('Parsing for ' + ticker + '.')
#Retrying parse
#Generate crumb
artificialcrumb = CrumbCatcher(ticker)
#Generate download url
downloadurl = ("https://query1.finance.yahoo.com/v7/finance/download/" + ticker
+ "?period1=-631123200&period2=1598374000&interval=1d&events=div&crumb=" + artificialcrumb)
#Line optional
mainurl = ("https://finance.yahoo.com/quote/" + ticker + "/history?p=" + ticker)
#Get response
response = requests.post(downloadurl)#, data=CookieDict)
#Format text
datastr = response.text
formatter = StringIO(datastr)
strdf = pd.read_csv(formatter, sep = ',')
#If bad response
if strdf.columns[0] == '{"chart":{"result":null':
print('The URL failed for ' + ticker)
continue
#Format date index
strdf = strdf.set_index('Date')
strdf.index = pd.to_datetime(strdf.index, format = "%Y/%m/%d")
#Save to CSV
strdf.to_csv(("F:\\Users\\AmatVictoriaCuram\\TemporaryCSV\\"+ ticker + "div.csv"))
#Iteration tracking
print(ticker)
continue
#Bad response
except CParserError:
print('Parser failed for ' + ticker + '.')
continue
except requests.exceptions.SSLError:
print('Double SSLError after ConnectionError for ' + ticker + '.')
continue
except ConnectionError:
print('Double ConnectionError for ' + ticker + '.')
continue
except requests.exceptions.SSLError:
try:
#Sleep, then retry last ticker, continue loop.
print('SSLError on ' + str(ticker) + '.')
print('Sleeping for 61 seconds.')
time.sleep(61)
print('Parsing for ' + ticker + '.')
#Retrying parse
#Generate crumb
artificialcrumb = CrumbCatcher(ticker)
#Generate download url
downloadurl = ("https://query1.finance.yahoo.com/v7/finance/download/" + ticker
+ "?period1=-631123200&period2=1598374000&interval=1d&events=div&crumb=" + artificialcrumb)
#Line optional
mainurl = ("https://finance.yahoo.com/quote/" + ticker + "/history?p=" + ticker)
#Get response
response = requests.post(downloadurl)#, data=CookieDict)
#Format text
datastr = response.text
formatter = StringIO(datastr)
strdf = pd.read_csv(formatter, sep = ',')
#If bad response
if strdf.columns[0] == '{"chart":{"result":null':
print('The URL failed for ' + ticker)
continue
#Format date index
strdf = strdf.set_index('Date')
strdf.index = pd.to_datetime(strdf.index, format = "%Y/%m/%d")
#Save to CSV
strdf.to_csv(("F:\\Users\\AmatVictoriaCuram\\TemporaryCSV\\"+ ticker + "div.csv"))
#Iteration tracking
print(ticker)
continue
#Bad response
except CParserError:
print('Parser failed for ' + ticker + '.')
continue
except requests.exceptions.SSLError:
print('Double SSLError for ' + ticker + '.')
continue
except ConnectionError:
try:
#Sleep, then retry last ticker, continue loop.
print('ConnectionError after SSLError on ' + str(ticker) + '.')
print('Sleeping for 61 seconds.')
time.sleep(61)
print('Parsing for ' + ticker + '.')
#Retrying parse
#Generate crumb
artificialcrumb = CrumbCatcher(ticker)
#Generate download url
downloadurl = ("https://query1.finance.yahoo.com/v7/finance/download/" + ticker
+ "?period1=-631123200&period2=1598374000&interval=1d&events=div&crumb=" + artificialcrumb)
#Line optional
mainurl = ("https://finance.yahoo.com/quote/" + ticker + "/history?p=" + ticker)
#Get response
response = requests.post(downloadurl)#, data=CookieDict)
#Format text
datastr = response.text
formatter = StringIO(datastr)
strdf = pd.read_csv(formatter, sep = ',')
#If bad response
if strdf.columns[0] == '{"chart":{"result":null':
print('The URL failed for ' + ticker)
continue
#Format date index
strdf = strdf.set_index('Date')
strdf.index = pd.to_datetime(strdf.index, format = "%Y/%m/%d")
#Save to CSV
strdf.to_csv(("F:\\Users\\AmatVictoriaCuram\\TemporaryCSV\\"+ ticker + "div.csv"))
#Iteration tracking
print(ticker)
continue
#Bad response
except CParserError:
print('Parser failed after SSLError and ConnectionError for ' + ticker + '.')
continue
except requests.exceptions.SSLError:
print('SSLError after SSLError and ConnectionEror for ' + ticker + '.')
continue
| 44.071429
| 108
| 0.526438
| 919
| 9,872
| 5.630033
| 0.165397
| 0.062234
| 0.045999
| 0.040008
| 0.860456
| 0.850213
| 0.844994
| 0.844994
| 0.811751
| 0.803247
| 0
| 0.024006
| 0.362844
| 9,872
| 223
| 109
| 44.269058
| 0.798569
| 0.140296
| 0
| 0.823129
| 0
| 0
| 0.238782
| 0.090109
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.054422
| 0
| 0.054422
| 0.217687
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
49eed69cc274e045b62b0bca0086550ec563f26b
| 4,343
|
py
|
Python
|
utils/image_compression.py
|
CeZh/Camera_Perception_Quality
|
22a6e6140c21557be215fd94eff75a2ede1d7136
|
[
"MIT"
] | 2
|
2022-03-09T15:46:29.000Z
|
2022-03-11T19:47:01.000Z
|
utils/image_compression.py
|
CeZh/Camera_Perception_Quality
|
22a6e6140c21557be215fd94eff75a2ede1d7136
|
[
"MIT"
] | null | null | null |
utils/image_compression.py
|
CeZh/Camera_Perception_Quality
|
22a6e6140c21557be215fd94eff75a2ede1d7136
|
[
"MIT"
] | 1
|
2022-03-11T19:47:46.000Z
|
2022-03-11T19:47:46.000Z
|
from torchvision import transforms
import torch
from utils.superpixel_slic import superpixel
import numpy as np
from torch_scatter import scatter_mean, scatter_std
def transform_train(img, img_dim, **kwargs):
# With superpixel
if kwargs:
img_transform_init = transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.ToTensor()
])
img_tensor = img_transform_init(img)
super_pixel = kwargs['super_pixel']
data = superpixel(img_tensor, super_pixel)
normalize_transform = transforms.Normalize(mean = (0.485, 0.456, 0.406),
std = (0.229, 0.224, 0.225))
final_transform = transforms.Compose([transforms.Resize((img_dim, img_dim)),
transforms.Normalize(mean = (0.485, 0.456, 0.406, 0.485, 0.456, 0.406),
std = (0.229, 0.224, 0.225, 0.229, 0.224, 0.225))])
img = normalize_transform(data.img)
data.img_super = final_transform(data.img_super)
data.x = scatter_mean(img.view(img.shape[1]*img.shape[2], img.shape[0]),
data.seg.view(img.shape[1]*img.shape[2]), dim=0)
data.x_std = scatter_std(img.view(img.shape[1]*img.shape[2], img.shape[0]),
data.seg.view(img.shape[1]*img.shape[2]), dim=0)
data.x = torch.cat([data.x, data.x_std], dim=1)
super_index, super_counts = torch.unique(data.seg, return_counts=True)
data.pos = torch.cat([data.pos.int(), super_counts.unsqueeze(1)], dim=1)
if data.pos.shape[0] < super_pixel['segments']:
pos_pad = torch.zeros(super_pixel['segments']-data.pos.shape[0], 3)
x_pad = torch.zeros(super_pixel['segments']-data.x.shape[0], 6)
data.pos = torch.cat((data.pos, pos_pad), dim=0)
data.x = torch.cat((data.x, x_pad), dim=0)
return data
# No superpixel
img_transform = transforms.Compose(
[transforms.RandomHorizontalFlip(), transforms.Resize((img_dim, img_dim)), transforms.ToTensor(),
transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))])
trans_img = img_transform(img)
return trans_img
def transform_val(img, img_dim, **kwargs):
if kwargs:
img_transform_init = transforms.Compose([
transforms.ToTensor()
])
img_tensor = img_transform_init(img)
super_pixel = kwargs['super_pixel']
data = superpixel(img_tensor, super_pixel)
normalize_transform = transforms.Normalize(mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225))
final_transform = transforms.Compose([transforms.Resize((img_dim, img_dim)),
transforms.Normalize(mean=(0.485, 0.456, 0.406, 0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225, 0.229, 0.224, 0.225))])
img = normalize_transform(data.img)
data.img_super = final_transform(data.img_super)
data.x = scatter_mean(img.view(img.shape[1] * img.shape[2], img.shape[0]),
data.seg.view(img.shape[1] * img.shape[2]), dim=0)
data.x_std = scatter_std(img.view(img.shape[1] * img.shape[2], img.shape[0]),
data.seg.view(img.shape[1] * img.shape[2]), dim=0)
data.x = torch.cat([data.x, data.x_std], dim=1)
super_index, super_counts = torch.unique(data.seg, return_counts=True)
data.pos = torch.cat([data.pos.int(), super_counts.unsqueeze(1)], dim=1)
if data.pos.shape[0] < super_pixel['segments']:
pos_pad = torch.zeros(super_pixel['segments'] - data.pos.shape[0], 3)
x_pad = torch.zeros(super_pixel['segments'] - data.x.shape[0], 6)
data.pos = torch.cat((data.pos, pos_pad), dim=0)
data.x = torch.cat((data.x, x_pad), dim=0)
return data
img_transform = transforms.Compose(
[transforms.Resize((img_dim, img_dim)), transforms.ToTensor(),
transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))])
trans_img = img_transform(img)
return trans_img
| 52.963415
| 118
| 0.578632
| 592
| 4,343
| 4.103041
| 0.106419
| 0.065871
| 0.016468
| 0.026348
| 0.906546
| 0.872787
| 0.872787
| 0.872787
| 0.834911
| 0.834911
| 0
| 0.075279
| 0.278149
| 4,343
| 82
| 119
| 52.963415
| 0.699522
| 0.006677
| 0
| 0.861111
| 0
| 0
| 0.016234
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027778
| false
| 0
| 0.069444
| 0
| 0.152778
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
49fb89f12f62f836ea70442948dad8fdca878d48
| 200
|
py
|
Python
|
scopus/utils/__init__.py
|
crew102/scopus
|
d8791c162cef4c2f830d983b435333d9d8eaf472
|
[
"MIT"
] | null | null | null |
scopus/utils/__init__.py
|
crew102/scopus
|
d8791c162cef4c2f830d983b435333d9d8eaf472
|
[
"MIT"
] | null | null | null |
scopus/utils/__init__.py
|
crew102/scopus
|
d8791c162cef4c2f830d983b435333d9d8eaf472
|
[
"MIT"
] | null | null | null |
from scopus.utils.create_config import *
from scopus.utils.get_content import *
from scopus.utils.get_encoded_text import *
from scopus.utils.parse_content import *
from scopus.utils.startup import *
| 33.333333
| 43
| 0.825
| 30
| 200
| 5.333333
| 0.4
| 0.3125
| 0.46875
| 0.525
| 0.51875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1
| 200
| 5
| 44
| 40
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
b7163108c80b92e090b901f494eead8db2fe93d5
| 7,284
|
py
|
Python
|
src/tests/test_ChangelogUpdater.py
|
wirecard/extension-release-info-updater
|
286fc2be9f72735653c79683c004459f74195037
|
[
"MIT"
] | null | null | null |
src/tests/test_ChangelogUpdater.py
|
wirecard/extension-release-info-updater
|
286fc2be9f72735653c79683c004459f74195037
|
[
"MIT"
] | null | null | null |
src/tests/test_ChangelogUpdater.py
|
wirecard/extension-release-info-updater
|
286fc2be9f72735653c79683c004459f74195037
|
[
"MIT"
] | null | null | null |
from unittest import TestCase
from src.ChangelogUpdater import ChangelogFileUpdater
class TestChangelogUpdater(TestCase):
def setUp(self) -> None:
self.changelog_updater = ChangelogFileUpdater('woocommerce', 'v3.2.2', 'v3.2.1', ['7.1', '7.2'],
['7.2'], ['3.3.4', '3.8.0'], ['3.8.0'])
self.changelog_updater_with_platform = ChangelogFileUpdater('woocommerce', 'v3.2.2', 'v3.2.1', ['7.1', '7.2'],
['7.2'], ['3.3.4', '3.8.0'], ['3.8.0'], ['5.0.3', '5.3'], ['5.3'])
def test_get_compatible_php_versions_table_header_string(self):
new_table_row = self.changelog_updater.get_compatible_php_versions_table_header_string(
"| Overview "
"| Woocommerce and Wordpress version "
"| PHP 5.6 | PHP 7.0 | PHP 7.1 | PHP 7.2 |")
self.assertEqual(new_table_row,
"| Overview "
"| Woocommerce and Wordpress version "
"| PHP 7.1 | PHP 7.2 |")
def test_get_tested_php_versions_table_string(self):
new_table_row = self. \
changelog_updater. \
get_tested_php_versions_table_string("| Woocommerce version 3.8.0, Wordpress version 5.3 "
"| :x: "
"| :x: "
"| :x: "
"| " + u"\u2705" + " |")
self.assertEqual(new_table_row,
"| Woocommerce version 3.8.0, Wordpress version 5.3 "
"| :x: "
"| " + u"\u2705" + " |")
def test_get_compatible_php_versions_table_string(self):
new_table_row = self. \
changelog_updater. \
get_compatible_php_versions_table_string("| Woocommerce version 3.3.4 - 3.8.0, Wordpress version 5.0.3 - "
"5.3 "
"| " + u"\u2705" + " "
"| " + u"\u2705" + " "
"| " + u"\u2705" + " "
"| " + u"\u2705" + " |")
self.assertEqual("| Woocommerce version 3.3.4 - 3.8.0, Wordpress version 5.0.3 - 5.3 "
"| " + u"\u2705" + " "
"| " + u"\u2705" + " |", new_table_row)
def test_get_tested_shopsystem_and_platform_versions_table_string_no_platform(self):
new_table_row = self. \
changelog_updater. \
get_tested_shopsystem_and_platform_versions_table_string("| Woocommerce version 1.1.1 "
"| :x: "
"| :x: "
"| :x: "
"| " + u"\u2705" + " |")
self.assertEqual("| Woocommerce version 3.8.0 "
"| :x: "
"| :x: "
"| :x: "
"| " + u"\u2705" + " |", new_table_row)
def test_get_compatibility_shopsystem_and_platform_versions_table_string_no_platform(self):
new_table_row = self. \
changelog_updater. \
get_compatibility_shopsystem_and_platform_versions_table_string("| Woocommerce version 1.1.1 - 2.2.2 "
"| " + u"\u2705" + " "
"| " + u"\u2705" + " "
"| " + u"\u2705" + " "
"| " + u"\u2705" + " |")
self.assertEqual("| Woocommerce version 3.3.4 - 3.8.0 "
"| " + u"\u2705" + " "
"| " + u"\u2705" + " "
"| " + u"\u2705" + " "
"| " + u"\u2705" + " |", new_table_row)
def test_get_tested_shopsystem_and_platform_versions_table_string_with_platform(self):
new_table_row = self. \
changelog_updater_with_platform. \
get_tested_shopsystem_and_platform_versions_table_string("| Woocommerce version 1.1.1,"
" Wordpress version 2.3 "
"| :x: "
"| :x: "
"| :x: "
"| " + u"\u2705" + " |")
self.assertEqual("| Woocommerce version 3.8.0, Wordpress version 5.3 "
"| :x: "
"| :x: "
"| :x: "
"| " + u"\u2705" + " |", new_table_row)
def test_get_compatiblity_shopsystem_and_platform_versions_table_string_with_platform(self):
new_table_row = self. \
changelog_updater_with_platform. \
get_compatibility_shopsystem_and_platform_versions_table_string("| Woocommerce version 1.1.1 - 2.2.2, "
"Wordpress version 3.3.3 - 4.4.4 "
"| " + u"\u2705" + " "
"| " + u"\u2705" + " "
"| " + u"\u2705" + " "
"| " + u"\u2705" + " |")
self.assertEqual("| Woocommerce version 3.3.4 - 3.8.0,"
" Wordpress version 5.0.3 - 5.3 "
"| " + u"\u2705" + " "
"| " + u"\u2705" + " "
"| " + u"\u2705" + " "
"| " + u"\u2705" + " |", new_table_row)
def test_get_row_separator_table_row(self):
new_table_row = self.changelog_updater.get_row_separator_table_row(
"|-----------------------"
"|------------------------------------------------------------------"
"|:-------:"
"|:-------:"
"|:-------:"
"|:-------:|")
self.assertEqual(new_table_row,
"|-----------------------"
"|------------------------------------------------------------------"
"|:-------:"
"|:-------:|")
| 60.7
| 134
| 0.337589
| 536
| 7,284
| 4.289179
| 0.097015
| 0.073075
| 0.076555
| 0.083515
| 0.905611
| 0.863854
| 0.797738
| 0.753371
| 0.724228
| 0.721618
| 0
| 0.073648
| 0.52279
| 7,284
| 119
| 135
| 61.210084
| 0.587745
| 0
| 0
| 0.669725
| 0
| 0.027523
| 0.235448
| 0.024986
| 0
| 0
| 0
| 0
| 0.073395
| 1
| 0.082569
| false
| 0
| 0.018349
| 0
| 0.110092
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b74172faee86cee11772cbf669f9ac52a4d7d12d
| 97
|
py
|
Python
|
pltools/train/__init__.py
|
PhoenixDL/PytorchLightningTools
|
86185062d4792e6d5eae002a5594bb7b900106a1
|
[
"MIT"
] | 3
|
2020-05-18T06:34:52.000Z
|
2020-07-17T07:11:57.000Z
|
pltools/train/__init__.py
|
PhoenixDL/PytorchLightningTools
|
86185062d4792e6d5eae002a5594bb7b900106a1
|
[
"MIT"
] | 6
|
2021-06-25T18:21:06.000Z
|
2021-06-25T18:21:32.000Z
|
pltools/train/__init__.py
|
PhoenixDL/PytorchLightningTools
|
86185062d4792e6d5eae002a5594bb7b900106a1
|
[
"MIT"
] | 1
|
2020-05-18T06:34:56.000Z
|
2020-05-18T06:34:56.000Z
|
from pltools.train.module import Module
from pltools.train.lr_find import lr_find, plot_lr_curve
| 32.333333
| 56
| 0.85567
| 17
| 97
| 4.647059
| 0.529412
| 0.278481
| 0.405063
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.092784
| 97
| 2
| 57
| 48.5
| 0.897727
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
3ff471f6caa3e8921fb15437227211c7b3c5897e
| 12,643
|
py
|
Python
|
users/arxiv/users/auth/sessions/tests/test_unit.py
|
cul-it/arxiv-accounts
|
9d237ffc7de4ac7f3c94ad615252681792f53fb5
|
[
"MIT"
] | 11
|
2018-12-29T17:55:16.000Z
|
2021-11-05T12:26:29.000Z
|
users/arxiv/users/auth/sessions/tests/test_unit.py
|
Https-github-com-sulaeman51/arxiv-auth
|
754fa083b0c8b43932f7393f5a4ab67d9f9f0444
|
[
"MIT"
] | 24
|
2019-01-25T18:19:21.000Z
|
2022-02-04T01:04:29.000Z
|
users/arxiv/users/auth/sessions/tests/test_unit.py
|
Https-github-com-sulaeman51/arxiv-auth
|
754fa083b0c8b43932f7393f5a4ab67d9f9f0444
|
[
"MIT"
] | 13
|
2019-01-10T22:01:43.000Z
|
2021-12-30T11:39:48.000Z
|
"""Tests for :mod:`arxiv.users.auth.sessions.store`."""
from unittest import TestCase, mock
import time
import jwt
import json
from datetime import datetime, timedelta
from pytz import timezone, UTC
from redis.exceptions import ConnectionError
from .... import domain
from .. import store
EASTERN = timezone('US/Eastern')
class TestDistributedSessionService(TestCase):
"""The store session service puts sessions in a key-value store."""
@mock.patch(f'{store.__name__}.get_application_config')
@mock.patch(f'{store.__name__}.rediscluster')
def test_create(self, mock_redis, mock_get_config):
"""Accept a :class:`.User` and returns a :class:`.Session`."""
mock_get_config.return_value = {'JWT_SECRET': 'foosecret'}
mock_redis.exceptions.ConnectionError = ConnectionError
mock_redis_connection = mock.MagicMock()
mock_redis.StrictRedisCluster.return_value = mock_redis_connection
ip = '127.0.0.1'
remote_host = 'foo-host.foo.com'
user = domain.User(
user_id='1',
username='theuser',
email='the@user.com'
)
auths = domain.Authorizations(
classic=2,
scopes=['foo:write'],
endorsements=[]
)
r = store.SessionStore('localhost', 7000, 0, 'foosecret')
session = r.create(auths, ip, remote_host, user=user)
cookie = r.generate_cookie(session)
self.assertIsInstance(session, domain.Session)
self.assertTrue(bool(session.session_id))
self.assertIsNotNone(cookie)
self.assertEqual(mock_redis_connection.set.call_count, 1)
@mock.patch(f'{store.__name__}.get_application_config')
@mock.patch(f'{store.__name__}.rediscluster')
def test_delete(self, mock_redis, mock_get_config):
"""Delete a session from the datastore."""
mock_get_config.return_value = {'JWT_SECRET': 'foosecret'}
mock_redis.exceptions.ConnectionError = ConnectionError
mock_redis_connection = mock.MagicMock()
mock_redis.StrictRedisCluster.return_value = mock_redis_connection
r = store.SessionStore('localhost', 7000, 0, 'foosecret')
r.delete_by_id('fookey')
self.assertEqual(mock_redis_connection.delete.call_count, 1)
@mock.patch(f'{store.__name__}.get_application_config')
@mock.patch(f'{store.__name__}.rediscluster')
def test_connection_failed(self, mock_redis, mock_get_config):
""":class:`.SessionCreationFailed` is raised when creation fails."""
mock_get_config.return_value = {'JWT_SECRET': 'foosecret'}
mock_redis.exceptions.ConnectionError = ConnectionError
mock_redis_connection = mock.MagicMock()
mock_redis_connection.set.side_effect = ConnectionError
mock_redis.StrictRedisCluster.return_value = mock_redis_connection
ip = '127.0.0.1'
remote_host = 'foo-host.foo.com'
user = domain.User(
user_id='1',
username='theuser',
email='the@user.com'
)
auths = domain.Authorizations(
classic=2,
scopes=['foo:write'],
endorsements=[]
)
r = store.SessionStore('localhost', 7000, 0, 'foosecret')
with self.assertRaises(store.SessionCreationFailed):
r.create(auths, ip, remote_host, user=user)
class TestGetSession(TestCase):
"""Tests for :func:`store.SessionStore.current_session().load`."""
@mock.patch(f'{store.__name__}.get_application_config')
@mock.patch(f'{store.__name__}.rediscluster.StrictRedisCluster')
def test_not_a_token(self, mock_get_redis, mock_get_config):
"""Something other than a JWT is passed."""
mock_get_config.return_value = {
'JWT_SECRET': 'barsecret',
'REDIS_HOST': 'redis',
'REDIS_PORT': '1234',
'REDIS_DATABASE': 4
}
mock_redis = mock.MagicMock()
mock_get_redis.return_value = mock_redis
with self.assertRaises(store.InvalidToken):
store.SessionStore.current_session().load('notatoken')
@mock.patch(f'{store.__name__}.get_application_config')
@mock.patch(f'{store.__name__}.rediscluster.StrictRedisCluster')
def test_malformed_token(self, mock_get_redis, mock_get_config):
"""A JWT with missing claims is passed."""
secret = 'barsecret'
mock_get_config.return_value = {
'JWT_SECRET': secret,
'REDIS_HOST': 'redis',
'REDIS_PORT': '1234',
'REDIS_DATABASE': 4
}
mock_redis = mock.MagicMock()
mock_get_redis.return_value = mock_redis
required_claims = ['session_id', 'nonce']
for exc in required_claims:
claims = {claim: '' for claim in required_claims if claim != exc}
malformed_token = jwt.encode(claims, secret).decode('ascii')
with self.assertRaises(store.InvalidToken):
store.SessionStore.current_session().load(malformed_token)
@mock.patch(f'{store.__name__}.get_application_config')
@mock.patch(f'{store.__name__}.rediscluster.StrictRedisCluster')
def test_token_with_bad_encryption(self, mock_get_redis, mock_get_config):
"""A JWT produced with a different secret is passed."""
secret = 'barsecret'
mock_get_config.return_value = {
'JWT_SECRET': secret,
'REDIS_HOST': 'redis',
'REDIS_PORT': '1234',
'REDIS_DATABASE': 4
}
mock_redis = mock.MagicMock()
mock_get_redis.return_value = mock_redis
start_time = datetime.now(tz=UTC)
end_time = start_time + timedelta(seconds=7200)
claims = {
'user_id': '1234',
'session_id': 'ajx9043jjx00s',
'nonce': '0039299290099',
'expires': end_time.isoformat()
}
bad_token = jwt.encode(claims, 'nottherightsecret').decode('ascii')
with self.assertRaises(store.InvalidToken):
store.SessionStore.current_session().load(bad_token)
@mock.patch(f'{store.__name__}.get_application_config')
@mock.patch(f'{store.__name__}.rediscluster.StrictRedisCluster')
def test_expired_token(self, mock_get_redis, mock_get_config):
"""A JWT produced with a different secret is passed."""
secret = 'barsecret'
mock_get_config.return_value = {
'JWT_SECRET': secret,
'REDIS_HOST': 'redis',
'REDIS_PORT': '1234',
'REDIS_DATABASE': 4
}
mock_redis = mock.MagicMock()
start_time = datetime.now(tz=UTC)
mock_redis.get.return_value = json.dumps({
'user_id': '1234',
'session_id': 'ajx9043jjx00s',
'nonce': '0039299290099',
'expires': start_time.isoformat(),
})
mock_get_redis.return_value = mock_redis
claims = {
'user_id': '1234',
'session_id': 'ajx9043jjx00s',
'nonce': '0039299290099',
'expires': start_time.isoformat(),
}
expired_token = jwt.encode(claims, secret).decode('ascii')
with self.assertRaises(store.InvalidToken):
store.SessionStore.current_session().load(expired_token)
@mock.patch(f'{store.__name__}.get_application_config')
@mock.patch(f'{store.__name__}.rediscluster.StrictRedisCluster')
def test_forged_token(self, mock_get_redis, mock_get_config):
"""A JWT with the wrong nonce is passed."""
start_time = datetime.now(tz=UTC)
end_time = start_time + timedelta(seconds=7200)
secret = 'barsecret'
mock_get_config.return_value = {
'JWT_SECRET': secret,
'REDIS_HOST': 'redis',
'REDIS_PORT': '1234',
'REDIS_DATABASE': 4
}
mock_redis = mock.MagicMock()
mock_redis.get.return_value = jwt.encode({
'session_id': 'ajx9043jjx00s',
'nonce': '0039299290098',
'start_time': start_time.isoformat(),
'end_time': end_time.isoformat(),
'user': {
'user_id': '1235',
'username': 'foouser',
'email': 'foo@foo.com'
}
}, secret)
mock_get_redis.return_value = mock_redis
claims = {
'user_id': '1234',
'session_id': 'ajx9043jjx00s',
'nonce': '0039299290099', # <- Doesn't match!
'expires': end_time.isoformat(),
}
expired_token = jwt.encode(claims, secret).decode('ascii')
with self.assertRaises(store.InvalidToken):
store.SessionStore.current_session().load(expired_token)
@mock.patch(f'{store.__name__}.get_application_config')
@mock.patch(f'{store.__name__}.rediscluster.StrictRedisCluster')
def test_other_forged_token(self, mock_get_redis, mock_get_config):
"""A JWT with the wrong user_id is passed."""
start_time = datetime.now(tz=UTC)
end_time = start_time + timedelta(seconds=7200)
secret = 'barsecret'
mock_get_config.return_value = {
'JWT_SECRET': secret,
'REDIS_HOST': 'redis',
'REDIS_PORT': '1234',
'REDIS_DATABASE': 4
}
mock_redis = mock.MagicMock()
mock_redis.get.return_value = jwt.encode({
'session_id': 'ajx9043jjx00s',
'nonce': '0039299290099',
'start_time': start_time.isoformat(),
'user': {
'user_id': '1235',
'username': 'foouser',
'email': 'foo@foo.com'
}
}, secret)
mock_get_redis.return_value = mock_redis
claims = {
'user_id': '1234', # <- Doesn't match!
'session_id': 'ajx9043jjx00s',
'nonce': '0039299290099',
'expires': end_time.isoformat(),
}
expired_token = jwt.encode(claims, secret).decode('ascii')
with self.assertRaises(store.InvalidToken):
store.SessionStore.current_session().load(expired_token)
@mock.patch(f'{store.__name__}.get_application_config')
@mock.patch(f'{store.__name__}.rediscluster.StrictRedisCluster')
def test_empty_session(self, mock_get_redis, mock_get_config):
"""Session has been removed, or may never have existed."""
start_time = datetime.now(tz=UTC)
end_time = start_time + timedelta(seconds=7200)
secret = 'barsecret'
mock_get_config.return_value = {
'JWT_SECRET': secret,
'REDIS_HOST': 'redis',
'REDIS_PORT': '1234',
'REDIS_DATABASE': 4
}
mock_redis = mock.MagicMock()
mock_redis.get.return_value = '' # <- Empty record!
mock_get_redis.return_value = mock_redis
claims = {
'user_id': '1234',
'session_id': 'ajx9043jjx00s',
'nonce': '0039299290099',
'expires': end_time.isoformat(),
}
expired_token = jwt.encode(claims, secret).decode('ascii')
with self.assertRaises(store.UnknownSession):
store.SessionStore.current_session().load(expired_token)
@mock.patch(f'{store.__name__}.get_application_config')
@mock.patch(f'{store.__name__}.rediscluster.StrictRedisCluster')
def test_valid_token(self, mock_get_redis, mock_get_config):
"""A valid token is passed."""
start_time = datetime.now(tz=UTC)
end_time = start_time + timedelta(seconds=7200)
secret = 'barsecret'
mock_get_config.return_value = {
'JWT_SECRET': secret,
'REDIS_HOST': 'redis',
'REDIS_PORT': '1234',
'REDIS_DATABASE': 4
}
mock_redis = mock.MagicMock()
mock_redis.get.return_value = jwt.encode({
'session_id': 'ajx9043jjx00s',
'start_time': datetime.now(tz=UTC).isoformat(),
'nonce': '0039299290098',
'user': {
'user_id': '1234',
'username': 'foouser',
'email': 'foo@foo.com'
}
}, secret)
mock_get_redis.return_value = mock_redis
claims = {
'user_id': '1234',
'session_id': 'ajx9043jjx00s',
'nonce': '0039299290098',
'expires': end_time.isoformat(),
}
valid_token = jwt.encode(claims, secret).decode('ascii')
session = store.SessionStore.current_session().load(valid_token)
self.assertIsInstance(session, domain.Session, "Returns a session")
| 39.633229
| 78
| 0.610456
| 1,355
| 12,643
| 5.39262
| 0.126199
| 0.048036
| 0.030108
| 0.045162
| 0.829479
| 0.785548
| 0.768031
| 0.748871
| 0.735459
| 0.730669
| 0
| 0.034806
| 0.265997
| 12,643
| 318
| 79
| 39.757862
| 0.752586
| 0.056474
| 0
| 0.732852
| 0
| 0
| 0.205466
| 0.075911
| 0
| 0
| 0
| 0
| 0.050542
| 1
| 0.039711
| false
| 0
| 0.032491
| 0
| 0.079422
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b75ef27e7bb2ea63dbd0acf3db845f8736cb6c8e
| 65
|
py
|
Python
|
bloc_client/internal/gen_uuid.py
|
fBloc/bloc-client-python
|
68b61610db0d0a30ba21807a18b4c81db8327500
|
[
"MIT"
] | null | null | null |
bloc_client/internal/gen_uuid.py
|
fBloc/bloc-client-python
|
68b61610db0d0a30ba21807a18b4c81db8327500
|
[
"MIT"
] | null | null | null |
bloc_client/internal/gen_uuid.py
|
fBloc/bloc-client-python
|
68b61610db0d0a30ba21807a18b4c81db8327500
|
[
"MIT"
] | null | null | null |
import uuid
def new_uuid() -> str:
return str(uuid.uuid1())
| 13
| 28
| 0.646154
| 10
| 65
| 4.1
| 0.7
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.019231
| 0.2
| 65
| 4
| 29
| 16.25
| 0.769231
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
b78433f3b076b48786be15618cccd2135a098dad
| 510
|
py
|
Python
|
qt5ui_to_pyui/pyuic_powershell.py
|
DvaMishkiLapa/SmallScripts
|
4be08c95a1341df5cd9014cd9359e206977dd407
|
[
"Apache-2.0"
] | null | null | null |
qt5ui_to_pyui/pyuic_powershell.py
|
DvaMishkiLapa/SmallScripts
|
4be08c95a1341df5cd9014cd9359e206977dd407
|
[
"Apache-2.0"
] | null | null | null |
qt5ui_to_pyui/pyuic_powershell.py
|
DvaMishkiLapa/SmallScripts
|
4be08c95a1341df5cd9014cd9359e206977dd407
|
[
"Apache-2.0"
] | null | null | null |
import os
os.system("python -m PyQt5.uic.pyuic -x .\\ui\\main_window.ui -o .\\ui\\main_window.py")
os.system("python -m PyQt5.uic.pyuic -x .\\ui\\login_stack.ui -o .\\ui\\login_stack.py")
os.system("python -m PyQt5.uic.pyuic -x .\\ui\\add_inproject_dialog.ui -o .\\ui\\add_inproject_dialog.py")
os.system("python -m PyQt5.uic.pyuic -x .\\ui\\add_new_user_dialog.ui -o .\\ui\\add_new_user_dialog.py")
os.system("python -m PyQt5.uic.pyuic -x .\\ui\\add_new_project_dialog.ui -o .\\ui\\add_new_project_dialog.py")
| 85
| 110
| 0.713725
| 97
| 510
| 3.546392
| 0.226804
| 0.087209
| 0.203488
| 0.218023
| 0.773256
| 0.651163
| 0.552326
| 0.552326
| 0.552326
| 0.462209
| 0
| 0.010549
| 0.070588
| 510
| 6
| 110
| 85
| 0.71519
| 0
| 0
| 0
| 0
| 0.833333
| 0.843444
| 0.520548
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.166667
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b7963e03bf9ded666c376f2a1c532c59515b86ce
| 4,468
|
py
|
Python
|
dingtalk/python/alibabacloud_dingtalk/flashmeeting_1_0/client.py
|
aliyun/dingtalk-sdk
|
ab4f856b8cfe94f6b69f10a0730a2e5a7d4901c5
|
[
"Apache-2.0"
] | 15
|
2020-08-27T04:10:26.000Z
|
2022-03-07T06:25:42.000Z
|
dingtalk/python/alibabacloud_dingtalk/flashmeeting_1_0/client.py
|
aliyun/dingtalk-sdk
|
ab4f856b8cfe94f6b69f10a0730a2e5a7d4901c5
|
[
"Apache-2.0"
] | 1
|
2020-09-27T01:30:46.000Z
|
2021-12-29T09:15:34.000Z
|
dingtalk/python/alibabacloud_dingtalk/flashmeeting_1_0/client.py
|
aliyun/dingtalk-sdk
|
ab4f856b8cfe94f6b69f10a0730a2e5a7d4901c5
|
[
"Apache-2.0"
] | 5
|
2020-08-27T04:07:44.000Z
|
2021-12-03T02:55:20.000Z
|
# -*- coding: utf-8 -*-
# This file is auto-generated, don't edit it. Thanks.
from Tea.core import TeaCore
from alibabacloud_tea_openapi.client import Client as OpenApiClient
from alibabacloud_tea_openapi import models as open_api_models
from alibabacloud_tea_util.client import Client as UtilClient
from alibabacloud_dingtalk.flashmeeting_1_0 import models as dingtalkflashmeeting__1__0_models
from alibabacloud_tea_util import models as util_models
from alibabacloud_openapi_util.client import Client as OpenApiUtilClient
class Client(OpenApiClient):
"""
*\
"""
def __init__(
self,
config: open_api_models.Config,
):
super().__init__(config)
self._endpoint_rule = ''
if UtilClient.empty(self._endpoint):
self._endpoint = 'api.dingtalk.com'
def create_flash_meeting(
self,
request: dingtalkflashmeeting__1__0_models.CreateFlashMeetingRequest,
) -> dingtalkflashmeeting__1__0_models.CreateFlashMeetingResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkflashmeeting__1__0_models.CreateFlashMeetingHeaders()
return self.create_flash_meeting_with_options(request, headers, runtime)
async def create_flash_meeting_async(
self,
request: dingtalkflashmeeting__1__0_models.CreateFlashMeetingRequest,
) -> dingtalkflashmeeting__1__0_models.CreateFlashMeetingResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkflashmeeting__1__0_models.CreateFlashMeetingHeaders()
return await self.create_flash_meeting_with_options_async(request, headers, runtime)
def create_flash_meeting_with_options(
self,
request: dingtalkflashmeeting__1__0_models.CreateFlashMeetingRequest,
headers: dingtalkflashmeeting__1__0_models.CreateFlashMeetingHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkflashmeeting__1__0_models.CreateFlashMeetingResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.event_id):
body['eventId'] = request.event_id
if not UtilClient.is_unset(request.title):
body['title'] = request.title
if not UtilClient.is_unset(request.creator):
body['creator'] = request.creator
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkflashmeeting__1__0_models.CreateFlashMeetingResponse(),
self.do_roarequest('CreateFlashMeeting', 'flashmeeting_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/flashmeeting/meetings', 'json', req, runtime)
)
async def create_flash_meeting_with_options_async(
self,
request: dingtalkflashmeeting__1__0_models.CreateFlashMeetingRequest,
headers: dingtalkflashmeeting__1__0_models.CreateFlashMeetingHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkflashmeeting__1__0_models.CreateFlashMeetingResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.event_id):
body['eventId'] = request.event_id
if not UtilClient.is_unset(request.title):
body['title'] = request.title
if not UtilClient.is_unset(request.creator):
body['creator'] = request.creator
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkflashmeeting__1__0_models.CreateFlashMeetingResponse(),
await self.do_roarequest_async('CreateFlashMeeting', 'flashmeeting_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/flashmeeting/meetings', 'json', req, runtime)
)
| 46.061856
| 160
| 0.715309
| 480
| 4,468
| 6.260417
| 0.189583
| 0.01198
| 0.109817
| 0.139767
| 0.808319
| 0.774376
| 0.716805
| 0.716805
| 0.713478
| 0.713478
| 0
| 0.011536
| 0.204566
| 4,468
| 96
| 161
| 46.541667
| 0.83399
| 0.017905
| 0
| 0.678571
| 1
| 0
| 0.058985
| 0.024691
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035714
| false
| 0
| 0.083333
| 0
| 0.178571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4d56699cf4392b0a9040899c985562b77a640eb1
| 4,780
|
py
|
Python
|
saleor/dashboard/credit/hours_chart.py
|
glosoftgroup/glosoftgroup-django-pos
|
b489c402939b9ebabd164c449e7da38fe849d550
|
[
"BSD-3-Clause"
] | 2
|
2017-07-11T12:40:59.000Z
|
2017-10-18T18:02:46.000Z
|
saleor/dashboard/credit/hours_chart.py
|
glosoftgroup/glosoftgroup-django-pos
|
b489c402939b9ebabd164c449e7da38fe849d550
|
[
"BSD-3-Clause"
] | 12
|
2017-06-19T07:20:41.000Z
|
2022-03-15T19:03:33.000Z
|
saleor/dashboard/credit/hours_chart.py
|
glosoftgroup/glosoftgroup-django-pos
|
b489c402939b9ebabd164c449e7da38fe849d550
|
[
"BSD-3-Clause"
] | null | null | null |
from django.db.models import Sum
from django.core.exceptions import ObjectDoesNotExist
from ...sale.models import Sales, SoldItem
from structlog import get_logger
logger = get_logger(__name__)
def get_hours_results(date, l, h):
logger.info('get_hours_results')
try:
sales_at_date = Sales.objects.filter(created__contains=date)
sales_at_h = sales_at_date.filter(created__hour__range=[l, h])
try:
amount = Sales.objects.filter(pk__in=sales_at_h).aggregate(Sum('total_net'))['total_net__sum']
if amount is not None:
return amount
else:
amount = 0
return amount
except ObjectDoesNotExist:
amount = 0
return amount
except ObjectDoesNotExist:
amount = 0
return amount
def get_hours_results_range(date_from, date_to, l, h):
logger.info('get_hours_results_range')
try:
sales_at_date = Sales.objects.filter(created__range=[date_from, date_to])
sales_at_h = sales_at_date.filter(created__hour__range=[l, h])
try:
amount = Sales.objects.filter(pk__in=sales_at_h).aggregate(Sum('total_net'))['total_net__sum']
if amount is not None:
return amount
else:
amount = 0
return amount
except ObjectDoesNotExist:
amount = 0
return amount
except ObjectDoesNotExist:
amount = 0
return amount
def get_date_results_range(date_from, date_to):
logger.info('get_date_results_range')
try:
sales_at_date = Sales.objects.filter(created__range=[date_from, date_to])
try:
amount = Sales.objects.filter(pk__in=sales_at_date).aggregate(Sum('total_net'))['total_net__sum']
if amount is not None:
return amount
else:
amount = 0
return amount
except ObjectDoesNotExist:
amount = 0
return amount
except ObjectDoesNotExist:
amount = 0
return amount
def get_date_results(date):
logger.info('get_date_results')
try:
sales_at_date = Sales.objects.filter(created__contains=date)
try:
amount = Sales.objects.filter(pk__in=sales_at_date).aggregate(Sum('total_net'))['total_net__sum']
if amount is not None:
return amount
else:
amount = 0
return amount
except ObjectDoesNotExist:
amount = 0
return amount
except ObjectDoesNotExist:
amount = 0
return amount
def get_category_results(category, year, month):
logger.info('get_category_results')
try:
amount = SoldItem.objects.filter(product_category__contains=category, sales__created__year=year,
sales__created__month=month).aggregate(Sum('total_cost'))['total_cost__sum']
if amount is not None:
return amount
else:
amount = 0
return amount
except ObjectDoesNotExist:
amount = 0
return amount
def get_item_results(item, year, month):
logger.info('get_item_results')
try:
amount = SoldItem.objects.filter(product_name__contains=item, sales__created__year=year,
sales__created__month=month).aggregate(Sum('total_cost'))['total_cost__sum']
if amount is not None:
return amount
else:
amount = 0
return amount
except ObjectDoesNotExist:
amount = 0
return amount
def get_user_results(user, year, month):
logger.info('get_user_results')
try:
amount = Sales.objects.filter(user__name__contains=user, created__year=year, created__month=month).aggregate(
Sum('total_net'))['total_net__sum']
if amount is not None:
return amount
else:
amount = 0
return amount
except ObjectDoesNotExist:
amount = 0
return amount
def get_terminal_results(terminal, year, month):
logger.info('get_terminal_results')
try:
amount = Sales.objects.filter(terminal__terminal_name__contains=terminal, created__year=year,
created__month=month).aggregate(Sum('total_net'))['total_net__sum']
if amount is not None:
return amount
else:
amount = 0
return amount
except ObjectDoesNotExist:
amount = 0
return amount
| 32.517007
| 118
| 0.588703
| 528
| 4,780
| 5.00947
| 0.102273
| 0.127032
| 0.098299
| 0.143667
| 0.855577
| 0.809452
| 0.76673
| 0.718336
| 0.718336
| 0.718336
| 0
| 0.006325
| 0.338494
| 4,780
| 146
| 119
| 32.739726
| 0.830171
| 0
| 0
| 0.787402
| 0
| 0
| 0.072939
| 0.009711
| 0
| 0
| 0
| 0
| 0
| 1
| 0.062992
| false
| 0
| 0.031496
| 0
| 0.314961
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4d5ef040f41b4f06d7b5dfbb45743a29ba2fbd8e
| 36
|
py
|
Python
|
FlaskRESTFULAPITest_JE/JEAPIs/__init__.py
|
JE-Chen/je_old_repo
|
a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5
|
[
"MIT"
] | null | null | null |
FlaskRESTFULAPITest_JE/JEAPIs/__init__.py
|
JE-Chen/je_old_repo
|
a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5
|
[
"MIT"
] | null | null | null |
FlaskRESTFULAPITest_JE/JEAPIs/__init__.py
|
JE-Chen/je_old_repo
|
a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5
|
[
"MIT"
] | null | null | null |
from JEAPIs.APIBlueprints import *
| 18
| 35
| 0.805556
| 4
| 36
| 7.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.138889
| 36
| 1
| 36
| 36
| 0.935484
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
|
0
| 7
|
4d8231726af6b05e036c94f87bcaf8d6a46829f3
| 86,379
|
py
|
Python
|
homework/austen.py
|
mm5110/PIC16A
|
e2dab91439c2627f6a47f4bf6d16de8ba5977fe8
|
[
"MIT"
] | 10
|
2020-11-07T04:07:34.000Z
|
2021-12-31T10:19:12.000Z
|
homework/austen.py
|
mm5110/PIC16A
|
e2dab91439c2627f6a47f4bf6d16de8ba5977fe8
|
[
"MIT"
] | 16
|
2021-02-03T22:35:01.000Z
|
2021-05-24T21:28:56.000Z
|
homework/austen.py
|
mm5110/PIC16A
|
e2dab91439c2627f6a47f4bf6d16de8ba5977fe8
|
[
"MIT"
] | 19
|
2020-11-11T05:44:53.000Z
|
2022-02-01T14:10:15.000Z
|
s = 'It is a truth universally acknowledged, that a single man in possession of a good fortune, must be in want of a wife. However little known the feelings or views of such a man may be on his first entering a neighbourhood, this truth is so well fixed in the minds of the surrounding families, that he is considered the rightful property of some one or other of their daughters. “My dear Mr. Bennet,” said his lady to him one day, “have you heard that Netherfield Park is let at last?” Mr. Bennet replied that he had not. “But it is,” returned she; “for Mrs. Long has just been here, and she told me all about it.” Mr. Bennet made no answer. “Do you not want to know who has taken it?” cried his wife impatiently. “You want to tell me, and I have no objection to hearing it.” This was invitation enough. “Why, my dear, you must know, Mrs. Long says that Netherfield is taken by a young man of large fortune from the north of England; that he came down on Monday in a chaise and four to see the place, and was so much delighted with it, that he agreed with Mr. Morris immediately; that he is to take possession before Michaelmas, and some of his servants are to be in the house by the end of next week.” “What is his name?” “Bingley.” “Is he married or single?” “Oh! Single, my dear, to be sure! A single man of large fortune; four or five thousand a year. What a fine thing for our girls!” “How so? How can it affect them?” “My dear Mr. Bennet,” replied his wife, “how can you be so tiresome! You must know that I am thinking of his marrying one of them.” “Is that his design in settling here?” “Design! Nonsense, how can you talk so! But it is very likely that he may fall in love with one of them, and therefore you must visit him as soon as he comes.” “I see no occasion for that. You and the girls may go, or you may send them by themselves, which perhaps will be still better, for as you are as handsome as any of them, Mr. Bingley may like you the best of the party.” “My dear, you flatter me. I certainly have had my share of beauty, but I do not pretend to be anything extraordinary now. When a woman has five grown-up daughters, she ought to give over thinking of her own beauty.” “In such cases, a woman has not often much beauty to think of.” “But, my dear, you must indeed go and see Mr. Bingley when he comes into the neighbourhood.” “It is more than I engage for, I assure you.” “But consider your daughters. Only think what an establishment it would be for one of them. Sir William and Lady Lucas are determined to go, merely on that account, for in general, you know, they visit no newcomers. Indeed you must go, for it will be impossible for us to visit him if you do not.” “You are over-scrupulous, surely. I dare say Mr. Bingley will be very glad to see you; and I will send a few lines by you to assure him of my hearty consent to his marrying whichever he chooses of the girls; though I must throw in a good word for my little Lizzy.” “I desire you will do no such thing. Lizzy is not a bit better than the others; and I am sure she is not half so handsome as Jane, nor half so good-humoured as Lydia. But you are always giving her the preference.” “They have none of them much to recommend them,” replied he; “they are all silly and ignorant like other girls; but Lizzy has something more of quickness than her sisters.” “Mr. Bennet, how can you abuse your own children in such a way? You take delight in vexing me. You have no compassion for my poor nerves.” “You mistake me, my dear. I have a high respect for your nerves. They are my old friends. I have heard you mention them with consideration these last twenty years at least.” “Ah, you do not know what I suffer.” “But I hope you will get over it, and live to see many young men of four thousand a year come into the neighbourhood.” “It will be no use to us, if twenty such should come, since you will not visit them.” “Depend upon it, my dear, that when there are twenty, I will visit them all.” Mr. Bennet was so odd a mixture of quick parts, sarcastic humour, reserve, and caprice, that the experience of three-and-twenty years had been insufficient to make his wife understand his character. Her mind was less difficult to develop. She was a woman of mean understanding, little information, and uncertain temper. When she was discontented, she fancied herself nervous. The business of her life was to get her daughters married; its solace was visiting and news. Chapter 2Mr. Bennet was among the earliest of those who waited on Mr. Bingley. He had always intended to visit him, though to the last always assuring his wife that he should not go; and till the evening after the visit was paid she had no knowledge of it. It was then disclosed in the following manner. Observing his second daughter employed in trimming a hat, he suddenly addressed her with: “I hope Mr. Bingley will like it, Lizzy.” “We are not in a way to know what Mr. Bingley likes,” said her mother resentfully, “since we are not to visit.” “But you forget, mamma,” said Elizabeth, “that we shall meet him at the assemblies, and that Mrs. Long promised to introduce him.” “I do not believe Mrs. Long will do any such thing. She has two nieces of her own. She is a selfish, hypocritical woman, and I have no opinion of her.” “No more have I,” said Mr. Bennet; “and I am glad to find that you do not depend on her serving you.” Mrs. Bennet deigned not to make any reply, but, unable to contain herself, began scolding one of her daughters. “Don’t keep coughing so, Kitty, for Heaven’s sake! Have a little compassion on my nerves. You tear them to pieces.” “Kitty has no discretion in her coughs,” said her father; “she times them ill.” “I do not cough for my own amusement,” replied Kitty fretfully. “When is your next ball to be, Lizzy?” “To-morrow fortnight.” “Aye, so it is,” cried her mother, “and Mrs. Long does not come back till the day before; so it will be impossible for her to introduce him, for she will not know him herself.” “Then, my dear, you may have the advantage of your friend, and introduce Mr. Bingley to her.” “Impossible, Mr. Bennet, impossible, when I am not acquainted with him myself; how can you be so teasing?” “I honour your circumspection. A fortnight’s acquaintance is certainly very little. One cannot know what a man really is by the end of a fortnight. But if we do not venture somebody else will; and after all, Mrs. Long and her nieces must stand their chance; and, therefore, as she will think it an act of kindness, if you decline the office, I will take it on myself.” The girls stared at their father. Mrs. Bennet said only, “Nonsense, nonsense!” “What can be the meaning of that emphatic exclamation?” cried he. “Do you consider the forms of introduction, and the stress that is laid on them, as nonsense? I cannot quite agree with you there. What say you, Mary? For you are a young lady of deep reflection, I know, and read great books and make extracts.” Mary wished to say something sensible, but knew not how. “While Mary is adjusting her ideas,” he continued, “let us return to Mr. Bingley.” “I am sick of Mr. Bingley,” cried his wife. “I am sorry to hear that; but why did not you tell me that before? If I had known as much this morning I certainly would not have called on him. It is very unlucky; but as I have actually paid the visit, we cannot escape the acquaintance now.” The astonishment of the ladies was just what he wished; that of Mrs. Bennet perhaps surpassing the rest; though, when the first tumult of joy was over, she began to declare that it was what she had expected all the while. “How good it was in you, my dear Mr. Bennet! But I knew I should persuade you at last. I was sure you loved your girls too well to neglect such an acquaintance. Well, how pleased I am! and it is such a good joke, too, that you should have gone this morning and never said a word about it till now.” “Now, Kitty, you may cough as much as you choose,” said Mr. Bennet; and, as he spoke, he left the room, fatigued with the raptures of his wife. “What an excellent father you have, girls!” said she, when the door was shut. “I do not know how you will ever make him amends for his kindness; or me, either, for that matter. At our time of life it is not so pleasant, I can tell you, to be making new acquaintances every day; but for your sakes, we would do anything. Lydia, my love, though you are the youngest, I dare say Mr. Bingley will dance with you at the next ball.” “Oh!” said Lydia stoutly, “I am not afraid; for though I am the youngest, I’m the tallest.” The rest of the evening was spent in conjecturing how soon he would return Mr. Bennet’s visit, and determining when they should ask him to dinner. Chapter 3Not all that Mrs. Bennet, however, with the assistance of her five daughters, could ask on the subject, was sufficient to draw from her husband any satisfactory description of Mr. Bingley. They attacked him in various ways—with barefaced questions, ingenious suppositions, and distant surmises; but he eluded the skill of them all, and they were at last obliged to accept the second-hand intelligence of their neighbour, Lady Lucas. Her report was highly favourable. Sir William had been delighted with him. He was quite young, wonderfully handsome, extremely agreeable, and, to crown the whole, he meant to be at the next assembly with a large party. Nothing could be more delightful! To be fond of dancing was a certain step towards falling in love; and very lively hopes of Mr. Bingley’s heart were entertained. “If I can but see one of my daughters happily settled at Netherfield,” said Mrs. Bennet to her husband, “and all the others equally well married, I shall have nothing to wish for.” In a few days Mr. Bingley returned Mr. Bennet’s visit, and sat about ten minutes with him in his library. He had entertained hopes of being admitted to a sight of the young ladies, of whose beauty he had heard much; but he saw only the father. The ladies were somewhat more fortunate, for they had the advantage of ascertaining from an upper window that he wore a blue coat, and rode a black horse. An invitation to dinner was soon afterwards dispatched; and already had Mrs. Bennet planned the courses that were to do credit to her housekeeping, when an answer arrived which deferred it all. Mr. Bingley was obliged to be in town the following day, and, consequently, unable to accept the honour of their invitation, etc. Mrs. Bennet was quite disconcerted. She could not imagine what business he could have in town so soon after his arrival in Hertfordshire; and she began to fear that he might be always flying about from one place to another, and never settled at Netherfield as he ought to be. Lady Lucas quieted her fears a little by starting the idea of his being gone to London only to get a large party for the ball; and a report soon followed that Mr. Bingley was to bring twelve ladies and seven gentlemen with him to the assembly. The girls grieved over such a number of ladies, but were comforted the day before the ball by hearing, that instead of twelve he brought only six with him from London—his five sisters and a cousin. And when the party entered the assembly room it consisted of only five altogether—Mr. Bingley, his two sisters, the husband of the eldest, and another young man. Mr. Bingley was good-looking and gentlemanlike; he had a pleasant countenance, and easy, unaffected manners. His sisters were fine women, with an air of decided fashion. His brother-in-law, Mr. Hurst, merely looked the gentleman; but his friend Mr. Darcy soon drew the attention of the room by his fine, tall person, handsome features, noble mien, and the report which was in general circulation within five minutes after his entrance, of his having ten thousand a year. The gentlemen pronounced him to be a fine figure of a man, the ladies declared he was much handsomer than Mr. Bingley, and he was looked at with great admiration for about half the evening, till his manners gave a disgust which turned the tide of his popularity; for he was discovered to be proud; to be above his company, and above being pleased; and not all his large estate in Derbyshire could then save him from having a most forbidding, disagreeable countenance, and being unworthy to be compared with his friend. Mr. Bingley had soon made himself acquainted with all the principal people in the room; he was lively and unreserved, danced every dance, was angry that the ball closed so early, and talked of giving one himself at Netherfield. Such amiable qualities must speak for themselves. What a contrast between him and his friend! Mr. Darcy danced only once with Mrs. Hurst and once with Miss Bingley, declined being introduced to any other lady, and spent the rest of the evening in walking about the room, speaking occasionally to one of his own party. His character was decided. He was the proudest, most disagreeable man in the world, and everybody hoped that he would never come there again. Amongst the most violent against him was Mrs. Bennet, whose dislike of his general behaviour was sharpened into particular resentment by his having slighted one of her daughters. Elizabeth Bennet had been obliged, by the scarcity of gentlemen, to sit down for two dances; and during part of that time, Mr. Darcy had been standing near enough for her to hear a conversation between him and Mr. Bingley, who came from the dance for a few minutes, to press his friend to join it. “Come, Darcy,” said he, “I must have you dance. I hate to see you standing about by yourself in this stupid manner. You had much better dance.” “I certainly shall not. You know how I detest it, unless I am particularly acquainted with my partner. At such an assembly as this it would be insupportable. Your sisters are engaged, and there is not another woman in the room whom it would not be a punishment to me to stand up with.” “I would not be so fastidious as you are,” cried Mr. Bingley, “for a kingdom! Upon my honour, I never met with so many pleasant girls in my life as I have this evening; and there are several of them you see uncommonly pretty.” “You are dancing with the only handsome girl in the room,” said Mr. Darcy, looking at the eldest Miss Bennet. “Oh! She is the most beautiful creature I ever beheld! But there is one of her sisters sitting down just behind you, who is very pretty, and I dare say very agreeable. Do let me ask my partner to introduce you.” “Which do you mean?” and turning round he looked for a moment at Elizabeth, till catching her eye, he withdrew his own and coldly said: “She is tolerable, but not handsome enough to tempt me; I am in no humour at present to give consequence to young ladies who are slighted by other men. You had better return to your partner and enjoy her smiles, for you are wasting your time with me.” Mr. Bingley followed his advice. Mr. Darcy walked off; and Elizabeth remained with no very cordial feelings toward him. She told the story, however, with great spirit among her friends; for she had a lively, playful disposition, which delighted in anything ridiculous. The evening altogether passed off pleasantly to the whole family. Mrs. Bennet had seen her eldest daughter much admired by the Netherfield party. Mr. Bingley had danced with her twice, and she had been distinguished by his sisters. Jane was as much gratified by this as her mother could be, though in a quieter way. Elizabeth felt Jane’s pleasure. Mary had heard herself mentioned to Miss Bingley as the most accomplished girl in the neighbourhood; and Catherine and Lydia had been fortunate enough never to be without partners, which was all that they had yet learnt to care for at a ball. They returned, therefore, in good spirits to Longbourn, the village where they lived, and of which they were the principal inhabitants. They found Mr. Bennet still up. With a book he was regardless of time; and on the present occasion he had a good deal of curiosity as to the event of an evening which had raised such splendid expectations. He had rather hoped that his wife’s views on the stranger would be disappointed; but he soon found out that he had a different story to hear. “Oh, my dear Mr. Bennet,” as she entered the room, “we have had a most delightful evening, a most excellent ball. I wish you had been there. Jane was so admired, nothing could be like it. Everybody said how well she looked; and Mr. Bingley thought her quite beautiful, and danced with her twice! Only think of that, my dear; he actually danced with her twice! and she was the only creature in the room that he asked a second time. First of all, he asked Miss Lucas. I was so vexed to see him stand up with her! But, however, he did not admire her at all; indeed, nobody can, you know; and he seemed quite struck with Jane as she was going down the dance. So he inquired who she was, and got introduced, and asked her for the two next. Then the two third he danced with Miss King, and the two fourth with Maria Lucas, and the two fifth with Jane again, and the two sixth with Lizzy, and the Boulanger—” “If he had had any compassion for me,” cried her husband impatiently, “he would not have danced half so much! For God’s sake, say no more of his partners. Oh that he had sprained his ankle in the first dance!” “Oh! my dear, I am quite delighted with him. He is so excessively handsome! And his sisters are charming women. I never in my life saw anything more elegant than their dresses. I dare say the lace upon Mrs. Hurst’s gown—” Here she was interrupted again. Mr. Bennet protested against any description of finery. She was therefore obliged to seek another branch of the subject, and related, with much bitterness of spirit and some exaggeration, the shocking rudeness of Mr. Darcy. “But I can assure you,” she added, “that Lizzy does not lose much by not suiting his fancy; for he is a most disagreeable, horrid man, not at all worth pleasing. So high and so conceited that there was no enduring him! He walked here, and he walked there, fancying himself so very great! Not handsome enough to dance with! I wish you had been there, my dear, to have given him one of your set-downs. I quite detest the man.” Chapter 4When Jane and Elizabeth were alone, the former, who had been cautious in her praise of Mr. Bingley before, expressed to her sister just how very much she admired him. “He is just what a young man ought to be,” said she, “sensible, good-humoured, lively; and I never saw such happy manners!—so much ease, with such perfect good breeding!” “He is also handsome,” replied Elizabeth, “which a young man ought likewise to be, if he possibly can. His character is thereby complete.” “I was very much flattered by his asking me to dance a second time. I did not expect such a compliment.” “Did not you? I did for you. But that is one great difference between us. Compliments always take you by surprise, and me never. What could be more natural than his asking you again? He could not help seeing that you were about five times as pretty as every other woman in the room. No thanks to his gallantry for that. Well, he certainly is very agreeable, and I give you leave to like him. You have liked many a stupider person.” “Dear Lizzy!” “Oh! you are a great deal too apt, you know, to like people in general. You never see a fault in anybody. All the world are good and agreeable in your eyes. I never heard you speak ill of a human being in your life.” “I would not wish to be hasty in censuring anyone; but I always speak what I think.” “I know you do; and it is that which makes the wonder. With your good sense, to be so honestly blind to the follies and nonsense of others! Affectation of candour is common enough—one meets with it everywhere. But to be candid without ostentation or design—to take the good of everybody’s character and make it still better, and say nothing of the bad—belongs to you alone. And so you like this man’s sisters, too, do you? Their manners are not equal to his.” “Certainly not—at first. But they are very pleasing women when you converse with them. Miss Bingley is to live with her brother, and keep his house; and I am much mistaken if we shall not find a very charming neighbour in her.” Elizabeth listened in silence, but was not convinced; their behaviour at the assembly had not been calculated to please in general; and with more quickness of observation and less pliancy of temper than her sister, and with a judgement too unassailed by any attention to herself, she was very little disposed to approve them. They were in fact very fine ladies; not deficient in good humour when they were pleased, nor in the power of making themselves agreeable when they chose it, but proud and conceited. They were rather handsome, had been educated in one of the first private seminaries in town, had a fortune of twenty thousand pounds, were in the habit of spending more than they ought, and of associating with people of rank, and were therefore in every respect entitled to think well of themselves, and meanly of others. They were of a respectable family in the north of England; a circumstance more deeply impressed on their memories than that their brother’s fortune and their own had been acquired by trade. Mr. Bingley inherited property to the amount of nearly a hundred thousand pounds from his father, who had intended to purchase an estate, but did not live to do it. Mr. Bingley intended it likewise, and sometimes made choice of his county; but as he was now provided with a good house and the liberty of a manor, it was doubtful to many of those who best knew the easiness of his temper, whether he might not spend the remainder of his days at Netherfield, and leave the next generation to purchase. His sisters were anxious for his having an estate of his own; but, though he was now only established as a tenant, Miss Bingley was by no means unwilling to preside at his table—nor was Mrs. Hurst, who had married a man of more fashion than fortune, less disposed to consider his house as her home when it suited her. Mr. Bingley had not been of age two years, when he was tempted by an accidental recommendation to look at Netherfield House. He did look at it, and into it for half-an-hour—was pleased with the situation and the principal rooms, satisfied with what the owner said in its praise, and took it immediately. Between him and Darcy there was a very steady friendship, in spite of great opposition of character. Bingley was endeared to Darcy by the easiness, openness, and ductility of his temper, though no disposition could offer a greater contrast to his own, and though with his own he never appeared dissatisfied. On the strength of Darcy’s regard, Bingley had the firmest reliance, and of his judgement the highest opinion. In understanding, Darcy was the superior. Bingley was by no means deficient, but Darcy was clever. He was at the same time haughty, reserved, and fastidious, and his manners, though well-bred, were not inviting. In that respect his friend had greatly the advantage. Bingley was sure of being liked wherever he appeared, Darcy was continually giving offense. The manner in which they spoke of the Meryton assembly was sufficiently characteristic. Bingley had never met with more pleasant people or prettier girls in his life; everybody had been most kind and attentive to him; there had been no formality, no stiffness; he had soon felt acquainted with all the room; and, as to Miss Bennet, he could not conceive an angel more beautiful. Darcy, on the contrary, had seen a collection of people in whom there was little beauty and no fashion, for none of whom he had felt the smallest interest, and from none received either attention or pleasure. Miss Bennet he acknowledged to be pretty, but she smiled too much. Mrs. Hurst and her sister allowed it to be so—but still they admired her and liked her, and pronounced her to be a sweet girl, and one whom they would not object to know more of. Miss Bennet was therefore established as a sweet girl, and their brother felt authorized by such commendation to think of her as he chose. Chapter 5Within a short walk of Longbourn lived a family with whom the Bennets were particularly intimate. Sir William Lucas had been formerly in trade in Meryton, where he had made a tolerable fortune, and risen to the honour of knighthood by an address to the king during his mayoralty. The distinction had perhaps been felt too strongly. It had given him a disgust to his business, and to his residence in a small market town; and, in quitting them both, he had removed with his family to a house about a mile from Meryton, denominated from that period Lucas Lodge, where he could think with pleasure of his own importance, and, unshackled by business, occupy himself solely in being civil to all the world. For, though elated by his rank, it did not render him supercilious; on the contrary, he was all attention to everybody. By nature inoffensive, friendly, and obliging, his presentation at St. James’s had made him courteous. Lady Lucas was a very good kind of woman, not too clever to be a valuable neighbour to Mrs. Bennet. They had several children. The eldest of them, a sensible, intelligent young woman, about twenty-seven, was Elizabeth’s intimate friend. That the Miss Lucases and the Miss Bennets should meet to talk over a ball was absolutely necessary; and the morning after the assembly brought the former to Longbourn to hear and to communicate. “You began the evening well, Charlotte,” said Mrs. Bennet with civil self-command to Miss Lucas. “You were Mr. Bingley’s first choice.” “Yes; but he seemed to like his second better.” “Oh! you mean Jane, I suppose, because he danced with her twice. To be sure that did seem as if he admired her—indeed I rather believe he did—I heard something about it—but I hardly know what—something about Mr. Robinson.” “Perhaps you mean what I overheard between him and Mr. Robinson; did not I mention it to you? Mr. Robinson’s asking him how he liked our Meryton assemblies, and whether he did not think there were a great many pretty women in the room, and which he thought the prettiest? and his answering immediately to the last question: ‘Oh! the eldest Miss Bennet, beyond a doubt; there cannot be two opinions on that point.’” “Upon my word! Well, that is very decided indeed—that does seem as if—but, however, it may all come to nothing, you know.” “My overhearings were more to the purpose than yours, Eliza,” said Charlotte. “Mr. Darcy is not so well worth listening to as his friend, is he?—poor Eliza!—to be only just tolerable.” “I beg you would not put it into Lizzy’s head to be vexed by his ill-treatment, for he is such a disagreeable man, that it would be quite a misfortune to be liked by him. Mrs. Long told me last night that he sat close to her for half-an-hour without once opening his lips.” “Are you quite sure, ma’am?—is not there a little mistake?” said Jane. “I certainly saw Mr. Darcy speaking to her.” “Aye—because she asked him at last how he liked Netherfield, and he could not help answering her; but she said he seemed quite angry at being spoke to.” “Miss Bingley told me,” said Jane, “that he never speaks much, unless among his intimate acquaintances. With them he is remarkably agreeable.” “I do not believe a word of it, my dear. If he had been so very agreeable, he would have talked to Mrs. Long. But I can guess how it was; everybody says that he is eat up with pride, and I dare say he had heard somehow that Mrs. Long does not keep a carriage, and had come to the ball in a hack chaise.” “I do not mind his not talking to Mrs. Long,” said Miss Lucas, “but I wish he had danced with Eliza.” “Another time, Lizzy,” said her mother, “I would not dance with him, if I were you.” “I believe, ma’am, I may safely promise you never to dance with him.” “His pride,” said Miss Lucas, “does not offend me so much as pride often does, because there is an excuse for it. One cannot wonder that so very fine a young man, with family, fortune, everything in his favour, should think highly of himself. If I may so express it, he has a right to be proud.” “That is very true,” replied Elizabeth, “and I could easily forgive his pride, if he had not mortified mine.” “Pride,” observed Mary, who piqued herself upon the solidity of her reflections, “is a very common failing, I believe. By all that I have ever read, I am convinced that it is very common indeed; that human nature is particularly prone to it, and that there are very few of us who do not cherish a feeling of self-complacency on the score of some quality or other, real or imaginary. Vanity and pride are different things, though the words are often used synonymously. A person may be proud without being vain. Pride relates more to our opinion of ourselves, vanity to what we would have others think of us.” “If I were as rich as Mr. Darcy,” cried a young Lucas, who came with his sisters, “I should not care how proud I was. I would keep a pack of foxhounds, and drink a bottle of wine a day.” “Then you would drink a great deal more than you ought,” said Mrs. Bennet; “and if I were to see you at it, I should take away your bottle directly.” The boy protested that she should not; she continued to declare that she would, and the argument ended only with the visit. Chapter 6The ladies of Longbourn soon waited on those of Netherfield. The visit was soon returned in due form. Miss Bennet’s pleasing manners grew on the goodwill of Mrs. Hurst and Miss Bingley; and though the mother was found to be intolerable, and the younger sisters not worth speaking to, a wish of being better acquainted with them was expressed towards the two eldest. By Jane, this attention was received with the greatest pleasure, but Elizabeth still saw superciliousness in their treatment of everybody, hardly excepting even her sister, and could not like them; though their kindness to Jane, such as it was, had a value as arising in all probability from the influence of their brother’s admiration. It was generally evident whenever they met, that he did admire her and to her it was equally evident that Jane was yielding to the preference which she had begun to entertain for him from the first, and was in a way to be very much in love; but she considered with pleasure that it was not likely to be discovered by the world in general, since Jane united, with great strength of feeling, a composure of temper and a uniform cheerfulness of manner which would guard her from the suspicions of the impertinent. She mentioned this to her friend Miss Lucas. “It may perhaps be pleasant,” replied Charlotte, “to be able to impose on the public in such a case; but it is sometimes a disadvantage to be so very guarded. If a woman conceals her affection with the same skill from the object of it, she may lose the opportunity of fixing him; and it will then be but poor consolation to believe the world equally in the dark. There is so much of gratitude or vanity in almost every attachment, that it is not safe to leave any to itself. We can all begin freely—a slight preference is natural enough; but there are very few of us who have heart enough to be really in love without encouragement. In nine cases out of ten a woman had better show more affection than she feels. Bingley likes your sister undoubtedly; but he may never do more than like her, if she does not help him on.” “But she does help him on, as much as her nature will allow. If I can perceive her regard for him, he must be a simpleton, indeed, not to discover it too.” “Remember, Eliza, that he does not know Jane’s disposition as you do.” “But if a woman is partial to a man, and does not endeavour to conceal it, he must find it out.” “Perhaps he must, if he sees enough of her. But, though Bingley and Jane meet tolerably often, it is never for many hours together; and, as they always see each other in large mixed parties, it is impossible that every moment should be employed in conversing together. Jane should therefore make the most of every half-hour in which she can command his attention. When she is secure of him, there will be more leisure for falling in love as much as she chooses.” “Your plan is a good one,” replied Elizabeth, “where nothing is in question but the desire of being well married, and if I were determined to get a rich husband, or any husband, I dare say I should adopt it. But these are not Jane’s feelings; she is not acting by design. As yet, she cannot even be certain of the degree of her own regard nor of its reasonableness. She has known him only a fortnight. She danced four dances with him at Meryton; she saw him one morning at his own house, and has since dined with him in company four times. This is not quite enough to make her understand his character.” “Not as you represent it. Had she merely dined with him, she might only have discovered whether he had a good appetite; but you must remember that four evenings have also been spent together—and four evenings may do a great deal.” “Yes; these four evenings have enabled them to ascertain that they both like Vingt-un better than Commerce; but with respect to any other leading characteristic, I do not imagine that much has been unfolded.” “Well,” said Charlotte, “I wish Jane success with all my heart; and if she were married to him to-morrow, I should think she had as good a chance of happiness as if she were to be studying his character for a twelvemonth. Happiness in marriage is entirely a matter of chance. If the dispositions of the parties are ever so well known to each other or ever so similar beforehand, it does not advance their felicity in the least. They always continue to grow sufficiently unlike afterwards to have their share of vexation; and it is better to know as little as possible of the defects of the person with whom you are to pass your life.” “You make me laugh, Charlotte; but it is not sound. You know it is not sound, and that you would never act in this way yourself.” Occupied in observing Mr. Bingley’s attentions to her sister, Elizabeth was far from suspecting that she was herself becoming an object of some interest in the eyes of his friend. Mr. Darcy had at first scarcely allowed her to be pretty; he had looked at her without admiration at the ball; and when they next met, he looked at her only to criticise. But no sooner had he made it clear to himself and his friends that she hardly had a good feature in her face, than he began to find it was rendered uncommonly intelligent by the beautiful expression of her dark eyes. To this discovery succeeded some others equally mortifying. Though he had detected with a critical eye more than one failure of perfect symmetry in her form, he was forced to acknowledge her figure to be light and pleasing; and in spite of his asserting that her manners were not those of the fashionable world, he was caught by their easy playfulness. Of this she was perfectly unaware; to her he was only the man who made himself agreeable nowhere, and who had not thought her handsome enough to dance with. He began to wish to know more of her, and as a step towards conversing with her himself, attended to her conversation with others. His doing so drew her notice. It was at Sir William Lucas’s, where a large party were assembled. “What does Mr. Darcy mean,” said she to Charlotte, “by listening to my conversation with Colonel Forster?” “That is a question which Mr. Darcy only can answer.” “But if he does it any more I shall certainly let him know that I see what he is about. He has a very satirical eye, and if I do not begin by being impertinent myself, I shall soon grow afraid of him.” On his approaching them soon afterwards, though without seeming to have any intention of speaking, Miss Lucas defied her friend to mention such a subject to him; which immediately provoking Elizabeth to do it, she turned to him and said: “Did you not think, Mr. Darcy, that I expressed myself uncommonly well just now, when I was teasing Colonel Forster to give us a ball at Meryton?” “With great energy; but it is always a subject which makes a lady energetic.” “You are severe on us.” “It will be her turn soon to be teased,” said Miss Lucas. “I am going to open the instrument, Eliza, and you know what follows.” “You are a very strange creature by way of a friend!—always wanting me to play and sing before anybody and everybody! If my vanity had taken a musical turn, you would have been invaluable; but as it is, I would really rather not sit down before those who must be in the habit of hearing the very best performers.” On Miss Lucas’s persevering, however, she added, “Very well, if it must be so, it must.” And gravely glancing at Mr. Darcy, “There is a fine old saying, which everybody here is of course familiar with: ‘Keep your breath to cool your porridge’; and I shall keep mine to swell my song.” Her performance was pleasing, though by no means capital. After a song or two, and before she could reply to the entreaties of several that she would sing again, she was eagerly succeeded at the instrument by her sister Mary, who having, in consequence of being the only plain one in the family, worked hard for knowledge and accomplishments, was always impatient for display. Mary had neither genius nor taste; and though vanity had given her application, it had given her likewise a pedantic air and conceited manner, which would have injured a higher degree of excellence than she had reached. Elizabeth, easy and unaffected, had been listened to with much more pleasure, though not playing half so well; and Mary, at the end of a long concerto, was glad to purchase praise and gratitude by Scotch and Irish airs, at the request of her younger sisters, who, with some of the Lucases, and two or three officers, joined eagerly in dancing at one end of the room. Mr. Darcy stood near them in silent indignation at such a mode of passing the evening, to the exclusion of all conversation, and was too much engrossed by his thoughts to perceive that Sir William Lucas was his neighbour, till Sir William thus began: “What a charming amusement for young people this is, Mr. Darcy! There is nothing like dancing after all. I consider it as one of the first refinements of polished society.” “Certainly, sir; and it has the advantage also of being in vogue amongst the less polished societies of the world. Every savage can dance.” Sir William only smiled. “Your friend performs delightfully,” he continued after a pause, on seeing Bingley join the group; “and I doubt not that you are an adept in the science yourself, Mr. Darcy.” “You saw me dance at Meryton, I believe, sir.” “Yes, indeed, and received no inconsiderable pleasure from the sight. Do you often dance at St. James’s?” “Never, sir.” “Do you not think it would be a proper compliment to the place?” “It is a compliment which I never pay to any place if I can avoid it.” “You have a house in town, I conclude?” Mr. Darcy bowed. “I had once had some thought of fixing in town myself—for I am fond of superior society; but I did not feel quite certain that the air of London would agree with Lady Lucas.” He paused in hopes of an answer; but his companion was not disposed to make any; and Elizabeth at that instant moving towards them, he was struck with the action of doing a very gallant thing, and called out to her: “My dear Miss Eliza, why are you not dancing? Mr. Darcy, you must allow me to present this young lady to you as a very desirable partner. You cannot refuse to dance, I am sure when so much beauty is before you.” And, taking her hand, he would have given it to Mr. Darcy who, though extremely surprised, was not unwilling to receive it, when she instantly drew back, and said with some discomposure to Sir William: “Indeed, sir, I have not the least intention of dancing. I entreat you not to suppose that I moved this way in order to beg for a partner.” Mr. Darcy, with grave propriety, requested to be allowed the honour of her hand, but in vain. Elizabeth was determined; nor did Sir William at all shake her purpose by his attempt at persuasion. “You excel so much in the dance, Miss Eliza, that it is cruel to deny me the happiness of seeing you; and though this gentleman dislikes the amusement in general, he can have no objection, I am sure, to oblige us for one half-hour.” “Mr. Darcy is all politeness,” said Elizabeth, smiling. “He is, indeed; but, considering the inducement, my dear Miss Eliza, we cannot wonder at his complaisance—for who would object to such a partner?” Elizabeth looked archly, and turned away. Her resistance had not injured her with the gentleman, and he was thinking of her with some complacency, when thus accosted by Miss Bingley: “I can guess the subject of your reverie.” “I should imagine not.” “You are considering how insupportable it would be to pass many evenings in this manner—in such society; and indeed I am quite of your opinion. I was never more annoyed! The insipidity, and yet the noise—the nothingness, and yet the self-importance of all those people! What would I give to hear your strictures on them!” “Your conjecture is totally wrong, I assure you. My mind was more agreeably engaged. I have been meditating on the very great pleasure which a pair of fine eyes in the face of a pretty woman can bestow.” Miss Bingley immediately fixed her eyes on his face, and desired he would tell her what lady had the credit of inspiring such reflections. Mr. Darcy replied with great intrepidity: “Miss Elizabeth Bennet.” “Miss Elizabeth Bennet!” repeated Miss Bingley. “I am all astonishment. How long has she been such a favourite?—and pray, when am I to wish you joy?” “That is exactly the question which I expected you to ask. A lady’s imagination is very rapid; it jumps from admiration to love, from love to matrimony, in a moment. I knew you would be wishing me joy.” “Nay, if you are serious about it, I shall consider the matter is absolutely settled. You will be having a charming mother-in-law, indeed; and, of course, she will always be at Pemberley with you.” He listened to her with perfect indifference while she chose to entertain herself in this manner; and as his composure convinced her that all was safe, her wit flowed long. Chapter 7Mr. Bennet’s property consisted almost entirely in an estate of two thousand a year, which, unfortunately for his daughters, was entailed, in default of heirs male, on a distant relation; and their mother’s fortune, though ample for her situation in life, could but ill supply the deficiency of his. Her father had been an attorney in Meryton, and had left her four thousand pounds. She had a sister married to a Mr. Phillips, who had been a clerk to their father and succeeded him in the business, and a brother settled in London in a respectable line of trade. The village of Longbourn was only one mile from Meryton; a most convenient distance for the young ladies, who were usually tempted thither three or four times a week, to pay their duty to their aunt and to a milliner’s shop just over the way. The two youngest of the family, Catherine and Lydia, were particularly frequent in these attentions; their minds were more vacant than their sisters’, and when nothing better offered, a walk to Meryton was necessary to amuse their morning hours and furnish conversation for the evening; and however bare of news the country in general might be, they always contrived to learn some from their aunt. At present, indeed, they were well supplied both with news and happiness by the recent arrival of a militia regiment in the neighbourhood; it was to remain the whole winter, and Meryton was the headquarters. Their visits to Mrs. Phillips were now productive of the most interesting intelligence. Every day added something to their knowledge of the officers’ names and connections. Their lodgings were not long a secret, and at length they began to know the officers themselves. Mr. Phillips visited them all, and this opened to his nieces a store of felicity unknown before. They could talk of nothing but officers; and Mr. Bingley’s large fortune, the mention of which gave animation to their mother, was worthless in their eyes when opposed to the regimentals of an ensign. After listening one morning to their effusions on this subject, Mr. Bennet coolly observed: “From all that I can collect by your manner of talking, you must be two of the silliest girls in the country. I have suspected it some time, but I am now convinced.” Catherine was disconcerted, and made no answer; but Lydia, with perfect indifference, continued to express her admiration of Captain Carter, and her hope of seeing him in the course of the day, as he was going the next morning to London. “I am astonished, my dear,” said Mrs. Bennet, “that you should be so ready to think your own children silly. If I wished to think slightingly of anybody’s children, it should not be of my own, however.” “If my children are silly, I must hope to be always sensible of it.” “Yes—but as it happens, they are all of them very clever.” “This is the only point, I flatter myself, on which we do not agree. I had hoped that our sentiments coincided in every particular, but I must so far differ from you as to think our two youngest daughters uncommonly foolish.” “My dear Mr. Bennet, you must not expect such girls to have the sense of their father and mother. When they get to our age, I dare say they will not think about officers any more than we do. I remember the time when I liked a red coat myself very well—and, indeed, so I do still at my heart; and if a smart young colonel, with five or six thousand a year, should want one of my girls I shall not say nay to him; and I thought Colonel Forster looked very becoming the other night at Sir William’s in his regimentals.” “Mamma,” cried Lydia, “my aunt says that Colonel Forster and Captain Carter do not go so often to Miss Watson’s as they did when they first came; she sees them now very often standing in Clarke’s library.” Mrs. Bennet was prevented replying by the entrance of the footman with a note for Miss Bennet; it came from Netherfield, and the servant waited for an answer. Mrs. Bennet’s eyes sparkled with pleasure, and she was eagerly calling out, while her daughter read, “Well, Jane, who is it from? What is it about? What does he say? Well, Jane, make haste and tell us; make haste, my love.” “It is from Miss Bingley,” said Jane, and then read it aloud. “MY DEAR FRIEND,—“If you are not so compassionate as to dine to-day with Louisa and me, we shall be in danger of hating each other for the rest of our lives, for a whole day’s tête-à-tête between two women can never end without a quarrel. Come as soon as you can on receipt of this. My brother and the gentlemen are to dine with the officers.—Yours ever, “CAROLINE BINGLEY” “With the officers!” cried Lydia. “I wonder my aunt did not tell us of that.” “Dining out,” said Mrs. Bennet, “that is very unlucky.” “Can I have the carriage?” said Jane. “No, my dear, you had better go on horseback, because it seems likely to rain; and then you must stay all night.” “That would be a good scheme,” said Elizabeth, “if you were sure that they would not offer to send her home.” “Oh! but the gentlemen will have Mr. Bingley’s chaise to go to Meryton, and the Hursts have no horses to theirs.” “I had much rather go in the coach.” “But, my dear, your father cannot spare the horses, I am sure. They are wanted in the farm, Mr. Bennet, are they not?” “They are wanted in the farm much oftener than I can get them.” “But if you have got them to-day,” said Elizabeth, “my mother’s purpose will be answered.” She did at last extort from her father an acknowledgment that the horses were engaged. Jane was therefore obliged to go on horseback, and her mother attended her to the door with many cheerful prognostics of a bad day. Her hopes were answered; Jane had not been gone long before it rained hard. Her sisters were uneasy for her, but her mother was delighted. The rain continued the whole evening without intermission; Jane certainly could not come back. “This was a lucky idea of mine, indeed!” said Mrs. Bennet more than once, as if the credit of making it rain were all her own. Till the next morning, however, she was not aware of all the felicity of her contrivance. Breakfast was scarcely over when a servant from Netherfield brought the following note for Elizabeth: “MY DEAREST LIZZY,—“I find myself very unwell this morning, which, I suppose, is to be imputed to my getting wet through yesterday. My kind friends will not hear of my returning till I am better. They insist also on my seeing Mr. Jones—therefore do not be alarmed if you should hear of his having been to me—and, excepting a sore throat and headache, there is not much the matter with me.—Yours, etc.” “Well, my dear,” said Mr. Bennet, when Elizabeth had read the note aloud, “if your daughter should have a dangerous fit of illness—if she should die, it would be a comfort to know that it was all in pursuit of Mr. Bingley, and under your orders.” “Oh! I am not afraid of her dying. People do not die of little trifling colds. She will be taken good care of. As long as she stays there, it is all very well. I would go and see her if I could have the carriage.” Elizabeth, feeling really anxious, was determined to go to her, though the carriage was not to be had; and as she was no horsewoman, walking was her only alternative. She declared her resolution. “How can you be so silly,” cried her mother, “as to think of such a thing, in all this dirt! You will not be fit to be seen when you get there.” “I shall be very fit to see Jane—which is all I want.” “Is this a hint to me, Lizzy,” said her father, “to send for the horses?” “No, indeed, I do not wish to avoid the walk. The distance is nothing when one has a motive; only three miles. I shall be back by dinner.” “I admire the activity of your benevolence,” observed Mary, “but every impulse of feeling should be guided by reason; and, in my opinion, exertion should always be in proportion to what is required.” “We will go as far as Meryton with you,” said Catherine and Lydia. Elizabeth accepted their company, and the three young ladies set off together. “If we make haste,” said Lydia, as they walked along, “perhaps we may see something of Captain Carter before he goes.” In Meryton they parted; the two youngest repaired to the lodgings of one of the officers’ wives, and Elizabeth continued her walk alone, crossing field after field at a quick pace, jumping over stiles and springing over puddles with impatient activity, and finding herself at last within view of the house, with weary ankles, dirty stockings, and a face glowing with the warmth of exercise. She was shown into the breakfast-parlour, where all but Jane were assembled, and where her appearance created a great deal of surprise. That she should have walked three miles so early in the day, in such dirty weather, and by herself, was almost incredible to Mrs. Hurst and Miss Bingley; and Elizabeth was convinced that they held her in contempt for it. She was received, however, very politely by them; and in their brother’s manners there was something better than politeness; there was good humour and kindness. Mr. Darcy said very little, and Mr. Hurst nothing at all. The former was divided between admiration of the brilliancy which exercise had given to her complexion, and doubt as to the occasion’s justifying her coming so far alone. The latter was thinking only of his breakfast. Her inquiries after her sister were not very favourably answered. Miss Bennet had slept ill, and though up, was very feverish, and not well enough to leave her room. Elizabeth was glad to be taken to her immediately; and Jane, who had only been withheld by the fear of giving alarm or inconvenience from expressing in her note how much she longed for such a visit, was delighted at her entrance. She was not equal, however, to much conversation, and when Miss Bingley left them together, could attempt little besides expressions of gratitude for the extraordinary kindness she was treated with. Elizabeth silently attended her. When breakfast was over they were joined by the sisters; and Elizabeth began to like them herself, when she saw how much affection and solicitude they showed for Jane. The apothecary came, and having examined his patient, said, as might be supposed, that she had caught a violent cold, and that they must endeavour to get the better of it; advised her to return to bed, and promised her some draughts. The advice was followed readily, for the feverish symptoms increased, and her head ached acutely. Elizabeth did not quit her room for a moment; nor were the other ladies often absent; the gentlemen being out, they had, in fact, nothing to do elsewhere. When the clock struck three, Elizabeth felt that she must go, and very unwillingly said so. Miss Bingley offered her the carriage, and she only wanted a little pressing to accept it, when Jane testified such concern in parting with her, that Miss Bingley was obliged to convert the offer of the chaise to an invitation to remain at Netherfield for the present. Elizabeth most thankfully consented, and a servant was dispatched to Longbourn to acquaint the family with her stay and bring back a supply of clothes. Chapter 8At five o’clock the two ladies retired to dress, and at half-past six Elizabeth was summoned to dinner. To the civil inquiries which then poured in, and amongst which she had the pleasure of distinguishing the much superior solicitude of Mr. Bingley’s, she could not make a very favourable answer. Jane was by no means better. The sisters, on hearing this, repeated three or four times how much they were grieved, how shocking it was to have a bad cold, and how excessively they disliked being ill themselves; and then thought no more of the matter: and their indifference towards Jane when not immediately before them restored Elizabeth to the enjoyment of all her former dislike. Their brother, indeed, was the only one of the party whom she could regard with any complacency. His anxiety for Jane was evident, and his attentions to herself most pleasing, and they prevented her feeling herself so much an intruder as she believed she was considered by the others. She had very little notice from any but him. Miss Bingley was engrossed by Mr. Darcy, her sister scarcely less so; and as for Mr. Hurst, by whom Elizabeth sat, he was an indolent man, who lived only to eat, drink, and play at cards; who, when he found her to prefer a plain dish to a ragout, had nothing to say to her. When dinner was over, she returned directly to Jane, and Miss Bingley began abusing her as soon as she was out of the room. Her manners were pronounced to be very bad indeed, a mixture of pride and impertinence; she had no conversation, no style, no beauty. Mrs. Hurst thought the same, and added: “She has nothing, in short, to recommend her, but being an excellent walker. I shall never forget her appearance this morning. She really looked almost wild.” “She did, indeed, Louisa. I could hardly keep my countenance. Very nonsensical to come at all! Why must she be scampering about the country, because her sister had a cold? Her hair, so untidy, so blowsy!” “Yes, and her petticoat; I hope you saw her petticoat, six inches deep in mud, I am absolutely certain; and the gown which had been let down to hide it not doing its office.” “Your picture may be very exact, Louisa,” said Bingley; “but this was all lost upon me. I thought Miss Elizabeth Bennet looked remarkably well when she came into the room this morning. Her dirty petticoat quite escaped my notice.” “You observed it, Mr. Darcy, I am sure,” said Miss Bingley; “and I am inclined to think that you would not wish to see your sister make such an exhibition.” “Certainly not.” “To walk three miles, or four miles, or five miles, or whatever it is, above her ankles in dirt, and alone, quite alone! What could she mean by it? It seems to me to show an abominable sort of conceited independence, a most country-town indifference to decorum.” “It shows an affection for her sister that is very pleasing,” said Bingley. “I am afraid, Mr. Darcy,” observed Miss Bingley in a half whisper, “that this adventure has rather affected your admiration of her fine eyes.” “Not at all,” he replied; “they were brightened by the exercise.” A short pause followed this speech, and Mrs. Hurst began again: “I have an excessive regard for Miss Jane Bennet, she is really a very sweet girl, and I wish with all my heart she were well settled. But with such a father and mother, and such low connections, I am afraid there is no chance of it.” “I think I have heard you say that their uncle is an attorney in Meryton.” “Yes; and they have another, who lives somewhere near Cheapside.” “That is capital,” added her sister, and they both laughed heartily. “If they had uncles enough to fill all Cheapside,” cried Bingley, “it would not make them one jot less agreeable.” “But it must very materially lessen their chance of marrying men of any consideration in the world,” replied Darcy. To this speech Bingley made no answer; but his sisters gave it their hearty assent, and indulged their mirth for some time at the expense of their dear friend’s vulgar relations. With a renewal of tenderness, however, they returned to her room on leaving the dining-parlour, and sat with her till summoned to coffee. She was still very poorly, and Elizabeth would not quit her at all, till late in the evening, when she had the comfort of seeing her sleep, and when it seemed to her rather right than pleasant that she should go downstairs herself. On entering the drawing-room she found the whole party at loo, and was immediately invited to join them; but suspecting them to be playing high she declined it, and making her sister the excuse, said she would amuse herself for the short time she could stay below, with a book. Mr. Hurst looked at her with astonishment. “Do you prefer reading to cards?” said he; “that is rather singular.” “Miss Eliza Bennet,” said Miss Bingley, “despises cards. She is a great reader, and has no pleasure in anything else.” “I deserve neither such praise nor such censure,” cried Elizabeth; “I am not a great reader, and I have pleasure in many things.” “In nursing your sister I am sure you have pleasure,” said Bingley; “and I hope it will be soon increased by seeing her quite well.” Elizabeth thanked him from her heart, and then walked towards the table where a few books were lying. He immediately offered to fetch her others—all that his library afforded. “And I wish my collection were larger for your benefit and my own credit; but I am an idle fellow, and though I have not many, I have more than I ever looked into.” Elizabeth assured him that she could suit herself perfectly with those in the room. “I am astonished,” said Miss Bingley, “that my father should have left so small a collection of books. What a delightful library you have at Pemberley, Mr. Darcy!” “It ought to be good,” he replied, “it has been the work of many generations.” “And then you have added so much to it yourself, you are always buying books.” “I cannot comprehend the neglect of a family library in such days as these.” “Neglect! I am sure you neglect nothing that can add to the beauties of that noble place. Charles, when you build your house, I wish it may be half as delightful as Pemberley.” “I wish it may.” “But I would really advise you to make your purchase in that neighbourhood, and take Pemberley for a kind of model. There is not a finer county in England than Derbyshire.” “With all my heart; I will buy Pemberley itself if Darcy will sell it.” “I am talking of possibilities, Charles.” “Upon my word, Caroline, I should think it more possible to get Pemberley by purchase than by imitation.” Elizabeth was so much caught with what passed, as to leave her very little attention for her book; and soon laying it wholly aside, she drew near the card-table, and stationed herself between Mr. Bingley and his eldest sister, to observe the game. “Is Miss Darcy much grown since the spring?” said Miss Bingley; “will she be as tall as I am?” “I think she will. She is now about Miss Elizabeth Bennet’s height, or rather taller.” “How I long to see her again! I never met with anybody who delighted me so much. Such a countenance, such manners! And so extremely accomplished for her age! Her performance on the pianoforte is exquisite.” “It is amazing to me,” said Bingley, “how young ladies can have patience to be so very accomplished as they all are.” “All young ladies accomplished! My dear Charles, what do you mean?” “Yes, all of them, I think. They all paint tables, cover screens, and net purses. I scarcely know anyone who cannot do all this, and I am sure I never heard a young lady spoken of for the first time, without being informed that she was very accomplished.” “Your list of the common extent of accomplishments,” said Darcy, “has too much truth. The word is applied to many a woman who deserves it no otherwise than by netting a purse or covering a screen. But I am very far from agreeing with you in your estimation of ladies in general. I cannot boast of knowing more than half-a-dozen, in the whole range of my acquaintance, that are really accomplished.” “Nor I, I am sure,” said Miss Bingley. “Then,” observed Elizabeth, “you must comprehend a great deal in your idea of an accomplished woman.” “Yes, I do comprehend a great deal in it.” “Oh! certainly,” cried his faithful assistant, “no one can be really esteemed accomplished who does not greatly surpass what is usually met with. A woman must have a thorough knowledge of music, singing, drawing, dancing, and the modern languages, to deserve the word; and besides all this, she must possess a certain something in her air and manner of walking, the tone of her voice, her address and expressions, or the word will be but half-deserved.” “All this she must possess,” added Darcy, “and to all this she must yet add something more substantial, in the improvement of her mind by extensive reading.” “I am no longer surprised at your knowing only six accomplished women. I rather wonder now at your knowing any.” “Are you so severe upon your own sex as to doubt the possibility of all this?” “I never saw such a woman. I never saw such capacity, and taste, and application, and elegance, as you describe united.” Mrs. Hurst and Miss Bingley both cried out against the injustice of her implied doubt, and were both protesting that they knew many women who answered this description, when Mr. Hurst called them to order, with bitter complaints of their inattention to what was going forward. As all conversation was thereby at an end, Elizabeth soon afterwards left the room. “Elizabeth Bennet,” said Miss Bingley, when the door was closed on her, “is one of those young ladies who seek to recommend themselves to the other sex by undervaluing their own; and with many men, I dare say, it succeeds. But, in my opinion, it is a paltry device, a very mean art.” “Undoubtedly,” replied Darcy, to whom this remark was chiefly addressed, “there is a meanness in all the arts which ladies sometimes condescend to employ for captivation. Whatever bears affinity to cunning is despicable.” Miss Bingley was not so entirely satisfied with this reply as to continue the subject. Elizabeth joined them again only to say that her sister was worse, and that she could not leave her. Bingley urged Mr. Jones being sent for immediately; while his sisters, convinced that no country advice could be of any service, recommended an express to town for one of the most eminent physicians. This she would not hear of; but she was not so unwilling to comply with their brother’s proposal; and it was settled that Mr. Jones should be sent for early in the morning, if Miss Bennet were not decidedly better. Bingley was quite uncomfortable; his sisters declared that they were miserable. They solaced their wretchedness, however, by duets after supper, while he could find no better relief to his feelings than by giving his housekeeper directions that every attention might be paid to the sick lady and her sister. Chapter 9Elizabeth passed the chief of the night in her sister’s room, and in the morning had the pleasure of being able to send a tolerable answer to the inquiries which she very early received from Mr. Bingley by a housemaid, and some time afterwards from the two elegant ladies who waited on his sisters. In spite of this amendment, however, she requested to have a note sent to Longbourn, desiring her mother to visit Jane, and form her own judgement of her situation. The note was immediately dispatched, and its contents as quickly complied with. Mrs. Bennet, accompanied by her two youngest girls, reached Netherfield soon after the family breakfast. Had she found Jane in any apparent danger, Mrs. Bennet would have been very miserable; but being satisfied on seeing her that her illness was not alarming, she had no wish of her recovering immediately, as her restoration to health would probably remove her from Netherfield. She would not listen, therefore, to her daughter’s proposal of being carried home; neither did the apothecary, who arrived about the same time, think it at all advisable. After sitting a little while with Jane, on Miss Bingley’s appearance and invitation, the mother and three daughters all attended her into the breakfast parlour. Bingley met them with hopes that Mrs. Bennet had not found Miss Bennet worse than she expected. “Indeed I have, sir,” was her answer. “She is a great deal too ill to be moved. Mr. Jones says we must not think of moving her. We must trespass a little longer on your kindness.” “Removed!” cried Bingley. “It must not be thought of. My sister, I am sure, will not hear of her removal.” “You may depend upon it, Madam,” said Miss Bingley, with cold civility, “that Miss Bennet will receive every possible attention while she remains with us.” Mrs. Bennet was profuse in her acknowledgments. “I am sure,” she added, “if it was not for such good friends I do not know what would become of her, for she is very ill indeed, and suffers a vast deal, though with the greatest patience in the world, which is always the way with her, for she has, without exception, the sweetest temper I have ever met with. I often tell my other girls they are nothing to her. You have a sweet room here, Mr. Bingley, and a charming prospect over the gravel walk. I do not know a place in the country that is equal to Netherfield. You will not think of quitting it in a hurry, I hope, though you have but a short lease.” “Whatever I do is done in a hurry,” replied he; “and therefore if I should resolve to quit Netherfield, I should probably be off in five minutes. At present, however, I consider myself as quite fixed here.” “That is exactly what I should have supposed of you,” said Elizabeth. “You begin to comprehend me, do you?” cried he, turning towards her. “Oh! yes—I understand you perfectly.” “I wish I might take this for a compliment; but to be so easily seen through I am afraid is pitiful.” “That is as it happens. It does not follow that a deep, intricate character is more or less estimable than such a one as yours.” “Lizzy,” cried her mother, “remember where you are, and do not run on in the wild manner that you are suffered to do at home.” “I did not know before,” continued Bingley immediately, “that you were a studier of character. It must be an amusing study.” “Yes, but intricate characters are the most amusing. They have at least that advantage.” “The country,” said Darcy, “can in general supply but a few subjects for such a study. In a country neighbourhood you move in a very confined and unvarying society.” “But people themselves alter so much, that there is something new to be observed in them for ever.” “Yes, indeed,” cried Mrs. Bennet, offended by his manner of mentioning a country neighbourhood. “I assure you there is quite as much of that going on in the country as in town.” Everybody was surprised, and Darcy, after looking at her for a moment, turned silently away. Mrs. Bennet, who fancied she had gained a complete victory over him, continued her triumph. “I cannot see that London has any great advantage over the country, for my part, except the shops and public places. The country is a vast deal pleasanter, is it not, Mr. Bingley?” “When I am in the country,” he replied, “I never wish to leave it; and when I am in town it is pretty much the same. They have each their advantages, and I can be equally happy in either.” “Aye—that is because you have the right disposition. But that gentleman,” looking at Darcy, “seemed to think the country was nothing at all.” “Indeed, Mamma, you are mistaken,” said Elizabeth, blushing for her mother. “You quite mistook Mr. Darcy. He only meant that there was not such a variety of people to be met with in the country as in the town, which you must acknowledge to be true.” “Certainly, my dear, nobody said there were; but as to not meeting with many people in this neighbourhood, I believe there are few neighbourhoods larger. I know we dine with four-and-twenty families.” Nothing but concern for Elizabeth could enable Bingley to keep his countenance. His sister was less delicate, and directed her eyes towards Mr. Darcy with a very expressive smile. Elizabeth, for the sake of saying something that might turn her mother’s thoughts, now asked her if Charlotte Lucas had been at Longbourn since her coming away. “Yes, she called yesterday with her father. What an agreeable man Sir William is, Mr. Bingley, is not he? So much the man of fashion! So genteel and easy! He has always something to say to everybody. That is my idea of good breeding; and those persons who fancy themselves very important, and never open their mouths, quite mistake the matter.” “Did Charlotte dine with you?” “No, she would go home. I fancy she was wanted about the mince-pies. For my part, Mr. Bingley, I always keep servants that can do their own work; my daughters are brought up very differently. But everybody is to judge for themselves, and the Lucases are a very good sort of girls, I assure you. It is a pity they are not handsome! Not that I think Charlotte so very plain—but then she is our particular friend.” “She seems a very pleasant young woman.” “Oh! dear, yes; but you must own she is very plain. Lady Lucas herself has often said so, and envied me Jane’s beauty. I do not like to boast of my own child, but to be sure, Jane—one does not often see anybody better looking. It is what everybody says. I do not trust my own partiality. When she was only fifteen, there was a man at my brother Gardiner’s in town so much in love with her that my sister-in-law was sure he would make her an offer before we came away. But, however, he did not. Perhaps he thought her too young. However, he wrote some verses on her, and very pretty they were.” “And so ended his affection,” said Elizabeth impatiently. “There has been many a one, I fancy, overcome in the same way. I wonder who first discovered the efficacy of poetry in driving away love!” “I have been used to consider poetry as the food of love,” said Darcy. “Of a fine, stout, healthy love it may. Everything nourishes what is strong already. But if it be only a slight, thin sort of inclination, I am convinced that one good sonnet will starve it entirely away.” Darcy only smiled; and the general pause which ensued made Elizabeth tremble lest her mother should be exposing herself again. She longed to speak, but could think of nothing to say; and after a short silence Mrs. Bennet began repeating her thanks to Mr. Bingley for his kindness to Jane, with an apology for troubling him also with Lizzy. Mr. Bingley was unaffectedly civil in his answer, and forced his younger sister to be civil also, and say what the occasion required. She performed her part indeed without much graciousness, but Mrs. Bennet was satisfied, and soon afterwards ordered her carriage. Upon this signal, the youngest of her daughters put herself forward. The two girls had been whispering to each other during the whole visit, and the result of it was, that the youngest should tax Mr. Bingley with having promised on his first coming into the country to give a ball at Netherfield. Lydia was a stout, well-grown girl of fifteen, with a fine complexion and good-humoured countenance; a favourite with her mother, whose affection had brought her into public at an early age. She had high animal spirits, and a sort of natural self-consequence, which the attention of the officers, to whom her uncle’s good dinners, and her own easy manners recommended her, had increased into assurance. She was very equal, therefore, to address Mr. Bingley on the subject of the ball, and abruptly reminded him of his promise; adding, that it would be the most shameful thing in the world if he did not keep it. His answer to this sudden attack was delightful to their mother’s ear: “I am perfectly ready, I assure you, to keep my engagement; and when your sister is recovered, you shall, if you please, name the very day of the ball. But you would not wish to be dancing when she is ill.” Lydia declared herself satisfied. “Oh! yes—it would be much better to wait till Jane was well, and by that time most likely Captain Carter would be at Meryton again. And when you have given your ball,” she added, “I shall insist on their giving one also. I shall tell Colonel Forster it will be quite a shame if he does not.” Mrs. Bennet and her daughters then departed, and Elizabeth returned instantly to Jane, leaving her own and her relations’ behaviour to the remarks of the two ladies and Mr. Darcy; the latter of whom, however, could not be prevailed on to join in their censure of her, in spite of all Miss Bingley’s witticisms on fine eyes. Chapter 10The day passed much as the day before had done. Mrs. Hurst and Miss Bingley had spent some hours of the morning with the invalid, who continued, though slowly, to mend; and in the evening Elizabeth joined their party in the drawing-room. The loo-table, however, did not appear. Mr. Darcy was writing, and Miss Bingley, seated near him, was watching the progress of his letter and repeatedly calling off his attention by messages to his sister. Mr. Hurst and Mr. Bingley were at piquet, and Mrs. Hurst was observing their game. Elizabeth took up some needlework, and was sufficiently amused in attending to what passed between Darcy and his companion. The perpetual commendations of the lady, either on his handwriting, or on the evenness of his lines, or on the length of his letter, with the perfect unconcern with which her praises were received, formed a curious dialogue, and was exactly in union with her opinion of each. “How delighted Miss Darcy will be to receive such a letter!” He made no answer. “You write uncommonly fast.” “You are mistaken. I write rather slowly.” “How many letters you must have occasion to write in the course of a year! Letters of business, too! How odious I should think them!” “It is fortunate, then, that they fall to my lot instead of yours.” “Pray tell your sister that I long to see her.” “I have already told her so once, by your desire.” “I am afraid you do not like your pen. Let me mend it for you. I mend pens remarkably well.” “Thank you—but I always mend my own.” “How can you contrive to write so even?” He was silent. “Tell your sister I am delighted to hear of her improvement on the harp; and pray let her know that I am quite in raptures with her beautiful little design for a table, and I think it infinitely superior to Miss Grantley’s.” “Will you give me leave to defer your raptures till I write again? At present I have not room to do them justice.” “Oh! it is of no consequence. I shall see her in January. But do you always write such charming long letters to her, Mr. Darcy?” “They are generally long; but whether always charming it is not for me to determine.” “It is a rule with me, that a person who can write a long letter with ease, cannot write ill.” “That will not do for a compliment to Darcy, Caroline,” cried her brother, “because he does not write with ease. He studies too much for words of four syllables. Do not you, Darcy?” “My style of writing is very different from yours.” “Oh!” cried Miss Bingley, “Charles writes in the most careless way imaginable. He leaves out half his words, and blots the rest.” “My ideas flow so rapidly that I have not time to express them—by which means my letters sometimes convey no ideas at all to my correspondents.” “Your humility, Mr. Bingley,” said Elizabeth, “must disarm reproof.” “Nothing is more deceitful,” said Darcy, “than the appearance of humility. It is often only carelessness of opinion, and sometimes an indirect boast.” “And which of the two do you call my little recent piece of modesty?” “The indirect boast; for you are really proud of your defects in writing, because you consider them as proceeding from a rapidity of thought and carelessness of execution, which, if not estimable, you think at least highly interesting. The power of doing anything with quickness is always prized much by the possessor, and often without any attention to the imperfection of the performance. When you told Mrs. Bennet this morning that if you ever resolved upon quitting Netherfield you should be gone in five minutes, you meant it to be a sort of panegyric, of compliment to yourself—and yet what is there so very laudable in a precipitance which must leave very necessary business undone, and can be of no real advantage to yourself or anyone else?” “Nay,” cried Bingley, “this is too much, to remember at night all the foolish things that were said in the morning. And yet, upon my honour, I believe what I said of myself to be true, and I believe it at this moment. At least, therefore, I did not assume the character of needless precipitance merely to show off before the ladies.” “I dare say you believed it; but I am by no means convinced that you would be gone with such celerity. Your conduct would be quite as dependent on chance as that of any man I know; and if, as you were mounting your horse, a friend were to say, ‘Bingley, you had better stay till next week,’ you would probably do it, you would probably not go—and at another word, might stay a month.” “You have only proved by this,” cried Elizabeth, “that Mr. Bingley did not do justice to his own disposition. You have shown him off now much more than he did himself.” “I am exceedingly gratified,” said Bingley, “by your converting what my friend says into a compliment on the sweetness of my temper. But I am afraid you are giving it a turn which that gentleman did by no means intend; for he would certainly think better of me, if under such a circumstance I were to give a flat denial, and ride off as fast as I could.” “Would Mr. Darcy then consider the rashness of your original intentions as atoned for by your obstinacy in adhering to it?” “Upon my word, I cannot exactly explain the matter; Darcy must speak for himself.” “You expect me to account for opinions which you choose to call mine, but which I have never acknowledged. Allowing the case, however, to stand according to your representation, you must remember, Miss Bennet, that the friend who is supposed to desire his return to the house, and the delay of his plan, has merely desired it, asked it without offering one argument in favour of its propriety.” “To yield readily—easily—to the persuasion of a friend is no merit with you.” “To yield without conviction is no compliment to the understanding of either.” “You appear to me, Mr. Darcy, to allow nothing for the influence of friendship and affection. A regard for the requester would often make one readily yield to a request, without waiting for arguments to reason one into it. I am not particularly speaking of such a case as you have supposed about Mr. Bingley. We may as well wait, perhaps, till the circumstance occurs before we discuss the discretion of his behaviour thereupon. But in general and ordinary cases between friend and friend, where one of them is desired by the other to change a resolution of no very great moment, should you think ill of that person for complying with the desire, without waiting to be argued into it?” “Will it not be advisable, before we proceed on this subject, to arrange with rather more precision the degree of importance which is to appertain to this request, as well as the degree of intimacy subsisting between the parties?” “By all means,” cried Bingley; “let us hear all the particulars, not forgetting their comparative height and size; for that will have more weight in the argument, Miss Bennet, than you may be aware of. I assure you, that if Darcy were not such a great tall fellow, in comparison with myself, I should not pay him half so much deference. I declare I do not know a more awful object than Darcy, on particular occasions, and in particular places; at his own house especially, and of a Sunday evening, when he has nothing to do.” Mr. Darcy smiled; but Elizabeth thought she could perceive that he was rather offended, and therefore checked her laugh. Miss Bingley warmly resented the indignity he had received, in an expostulation with her brother for talking such nonsense. “I see your design, Bingley,” said his friend. “You dislike an argument, and want to silence this.” “Perhaps I do. Arguments are too much like disputes. If you and Miss Bennet will defer yours till I am out of the room, I shall be very thankful; and then you may say whatever you like of me.” “What you ask,” said Elizabeth, “is no sacrifice on my side; and Mr. Darcy had much better finish his letter.” Mr. Darcy took her advice, and did finish his letter. When that business was over, he applied to Miss Bingley and Elizabeth for an indulgence of some music. Miss Bingley moved with some alacrity to the pianoforte; and, after a polite request that Elizabeth would lead the way which the other as politely and more earnestly negatived, she seated herself. Mrs. Hurst sang with her sister, and while they were thus employed, Elizabeth could not help observing, as she turned over some music-books that lay on the instrument, how frequently Mr. Darcy’s eyes were fixed on her. She hardly knew how to suppose that she could be an object of admiration to so great a man; and yet that he should look at her because he disliked her, was still more strange. She could only imagine, however, at last that she drew his notice because there was something more wrong and reprehensible, according to his ideas of right, than in any other person present. The supposition did not pain her. She liked him too little to care for his approbation. After playing some Italian songs, Miss Bingley varied the charm by a lively Scotch air; and soon afterwards Mr. Darcy, drawing near Elizabeth, said to her: “Do not you feel a great inclination, Miss Bennet, to seize such an opportunity of dancing a reel?” She smiled, but made no answer. He repeated the question, with some surprise at her silence. “Oh!” said she, “I heard you before, but I could not immediately determine what to say in reply. You wanted me, I know, to say ‘Yes,’ that you might have the pleasure of despising my taste; but I always delight in overthrowing those kind of schemes, and cheating a person of their premeditated contempt. I have, therefore, made up my mind to tell you, that I do not want to dance a reel at all—and now despise me if you dare.” “Indeed I do not dare.” Elizabeth, having rather expected to affront him, was amazed at his gallantry; but there was a mixture of sweetness and archness in her manner which made it difficult for her to affront anybody; and Darcy had never been so bewitched by any woman as he was by her. He really believed, that were it not for the inferiority of her connections, he should be in some danger. Miss Bingley saw, or suspected enough to be jealous; and her great anxiety for the recovery of her dear friend Jane received some assistance from her desire of getting rid of Elizabeth. She often tried to provoke Darcy into disliking her guest, by talking of their supposed marriage, and planning his happiness in such an alliance. “I hope,” said she, as they were walking together in the shrubbery the next day, “you will give your mother-in-law a few hints, when this desirable event takes place, as to the advantage of holding her tongue; and if you can compass it, do cure the younger girls of running after officers. And, if I may mention so delicate a subject, endeavour to check that little something, bordering on conceit and impertinence, which your lady possesses.” “Have you anything else to propose for my domestic felicity?” “Oh! yes. Do let the portraits of your uncle and aunt Phillips be placed in the gallery at Pemberley. Put them next to your great-uncle the judge. They are in the same profession, you know, only in different lines. As for your Elizabeth’s picture, you must not have it taken, for what painter could do justice to those beautiful eyes?” “It would not be easy, indeed, to catch their expression, but their colour and shape, and the eyelashes, so remarkably fine, might be copied.” At that moment they were met from another walk by Mrs. Hurst and Elizabeth herself. “I did not know that you intended to walk,” said Miss Bingley, in some confusion, lest they had been overheard. “You used us abominably ill,” answered Mrs. Hurst, “running away without telling us that you were coming out.” Then taking the disengaged arm of Mr. Darcy, she left Elizabeth to walk by herself. The path just admitted three. Mr. Darcy felt their rudeness, and immediately said: “This walk is not wide enough for our party. We had better go into the avenue.” But Elizabeth, who had not the least inclination to remain with them, laughingly answered: “No, no; stay where you are. You are charmingly grouped, and appear to uncommon advantage. The picturesque would be spoilt by admitting a fourth. Good-bye.” She then ran gaily off, rejoicing as she rambled about, in the hope of being at home again in a day or two. Jane was already so much recovered as to intend leaving her room for a couple of hours that evening.'
| 86,379
| 86,379
| 0.780711
| 15,735
| 86,379
| 4.289228
| 0.143184
| 0.003556
| 0.001422
| 0.001037
| 0.012609
| 0.004623
| 0.000652
| 0
| 0
| 0
| 0
| 0.000141
| 0.179569
| 86,379
| 1
| 86,379
| 86,379
| 0.951445
| 0
| 0
| 0
| 0
| 1
| 0.999919
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| false
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
|
0
| 9
|
4d8be5562e5514fc9f0a81982ca78b7e642f771b
| 20,240
|
py
|
Python
|
sci_analysis/test/test_determine_analysis_type.py
|
cmmorrow/sci-analysis
|
de65ba29fe210eb950daa3dbc2e956963a4770ef
|
[
"MIT"
] | 17
|
2017-05-10T18:25:36.000Z
|
2021-12-23T14:43:49.000Z
|
sci_analysis/test/test_determine_analysis_type.py
|
cmmorrow/sci-analysis
|
de65ba29fe210eb950daa3dbc2e956963a4770ef
|
[
"MIT"
] | 57
|
2016-08-22T23:58:05.000Z
|
2019-07-31T06:54:22.000Z
|
sci_analysis/test/test_determine_analysis_type.py
|
cmmorrow/sci-analysis
|
de65ba29fe210eb950daa3dbc2e956963a4770ef
|
[
"MIT"
] | null | null | null |
import unittest
import numpy as np
import pandas as pd
import scipy.stats as st
from ..analysis import determine_analysis_type
from ..analysis.exc import NoDataError
from ..data import Vector, Categorical
class MyTestCase(unittest.TestCase):
def test_small_float_array(self):
np.random.seed(123456789)
input_array = st.norm.rvs(0, 1, 30)
self.assertIsInstance(determine_analysis_type(input_array), Vector)
def test_small_float_list(self):
np.random.seed(123456789)
input_array = st.norm.rvs(0, 1, 30).tolist()
self.assertIsInstance(determine_analysis_type(input_array), Vector)
def test_small_float_series(self):
np.random.seed(123456789)
input_array = pd.Series(st.norm.rvs(0, 1, 30))
self.assertIsInstance(determine_analysis_type(input_array), Vector)
def test_large_float_array(self):
np.random.seed(123456789)
input_array = st.norm.rvs(0, 1, 10000)
self.assertIsInstance(determine_analysis_type(input_array), Vector)
def test_large_float_list(self):
np.random.seed(123456789)
input_array = st.norm.rvs(0, 1, 10000).tolist()
self.assertIsInstance(determine_analysis_type(input_array), Vector)
def test_large_float_series(self):
np.random.seed(123456789)
input_array = pd.Series(st.norm.rvs(0, 1, 10000))
self.assertIsInstance(determine_analysis_type(input_array), Vector)
def test_small_float32_array(self):
np.random.seed(123456789)
input_array = st.norm.rvs(0, 1, 30).astype('float32')
self.assertIsInstance(determine_analysis_type(input_array), Vector)
def test_small_float32_list(self):
np.random.seed(123456789)
input_array = st.norm.rvs(0, 1, 30).astype('float32').tolist()
self.assertIsInstance(determine_analysis_type(input_array), Vector)
def test_small_float32_series(self):
np.random.seed(123456789)
input_array = pd.Series(st.norm.rvs(0, 1, 30).astype('float32'))
self.assertIsInstance(determine_analysis_type(input_array), Vector)
def test_small_float16_array(self):
np.random.seed(123456789)
input_array = st.norm.rvs(0, 1, 30).astype('float16')
self.assertIsInstance(determine_analysis_type(input_array), Vector)
def test_small_float16_list(self):
np.random.seed(123456789)
input_array = st.norm.rvs(0, 1, 30).astype('float16').tolist()
self.assertIsInstance(determine_analysis_type(input_array), Vector)
def test_small_float16_series(self):
np.random.seed(123456789)
input_array = pd.Series(st.norm.rvs(0, 1, 30).astype('float16'))
self.assertIsInstance(determine_analysis_type(input_array), Vector)
def test_small_single_float_array(self):
np.random.seed(123456789)
input_array = st.norm.rvs(0, 1, 1)
self.assertIsInstance(determine_analysis_type(input_array), Vector)
def test_small_single_float_list(self):
np.random.seed(123456789)
input_array = st.norm.rvs(0, 1, 1).tolist()
self.assertIsInstance(determine_analysis_type(input_array), Vector)
def test_small_single_float_series(self):
np.random.seed(123456789)
input_array = pd.Series(st.norm.rvs(0, 1, 1))
self.assertIsInstance(determine_analysis_type(input_array), Vector)
def test_small_vector(self):
np.random.seed(123456789)
input_array = Vector(st.norm.rvs(0, 1, 30))
self.assertIsInstance(determine_analysis_type(input_array), Vector)
def test_large_vector(self):
np.random.seed(123456789)
input_array = Vector(st.norm.rvs(0, 1, 10000))
self.assertIsInstance(determine_analysis_type(input_array), Vector)
def test_small_array_with_nan(self):
np.random.seed(123456789)
input_array = st.norm.rvs(0, 1, 30)
input_array[4] = np.nan
input_array[10] = np.nan
input_array[17] = np.nan
input_array[22] = np.nan
input_array[24] = np.nan
self.assertIsInstance(determine_analysis_type(input_array), Vector)
def test_small_list_with_nan(self):
np.random.seed(123456789)
input_array = st.norm.rvs(0, 1, 30)
input_array[4] = np.nan
input_array[10] = np.nan
input_array[17] = np.nan
input_array[22] = np.nan
input_array[24] = np.nan
self.assertIsInstance(determine_analysis_type(input_array.tolist()), Vector)
def test_small_series_with_nan(self):
np.random.seed(123456789)
input_array = st.norm.rvs(0, 1, 30)
input_array[4] = np.nan
input_array[10] = np.nan
input_array[17] = np.nan
input_array[22] = np.nan
input_array[24] = np.nan
self.assertIsInstance(determine_analysis_type(pd.Series(input_array)), Vector)
def test_none(self):
input_array = None
self.assertRaises(ValueError, lambda: determine_analysis_type(input_array))
def test_empty_list(self):
input_array = list()
self.assertRaises(NoDataError, lambda: determine_analysis_type(input_array))
def test_empty_array(self):
input_array = np.array([])
self.assertRaises(NoDataError, lambda: determine_analysis_type(input_array))
def test_empty_vector(self):
input_array = Vector([])
self.assertRaises(NoDataError, lambda: determine_analysis_type(input_array))
def test_float_scalar(self):
input_array = 3.14159256
self.assertRaises(ValueError, lambda: determine_analysis_type(input_array))
def test_small_int_array(self):
np.random.seed(123456789)
input_array = np.random.randint(-10, 11, 30)
self.assertIsInstance(determine_analysis_type(input_array), Vector)
def test_small_int_list(self):
np.random.seed(123456789)
input_array = np.random.randint(-10, 11, 30).tolist()
self.assertIsInstance(determine_analysis_type(input_array), Vector)
def test_small_int_series(self):
np.random.seed(123456789)
input_array = pd.Series(np.random.randint(-10, 11, 30))
self.assertIsInstance(determine_analysis_type(input_array), Vector)
def test_large_int_array(self):
np.random.seed(123456789)
input_array = np.random.randint(-10, 11, 10000)
self.assertIsInstance(determine_analysis_type(input_array), Vector)
def test_large_int_list(self):
np.random.seed(123456789)
input_array = np.random.randint(-10, 11, 10000).tolist()
self.assertIsInstance(determine_analysis_type(input_array), Vector)
def test_large_int_series(self):
np.random.seed(123456789)
input_array = pd.Series(np.random.randint(-10, 11, 10000))
self.assertIsInstance(determine_analysis_type(input_array), Vector)
def test_small_int32_array(self):
np.random.seed(123456789)
input_array = np.random.randint(-10, 11, 30).astype('int32')
self.assertIsInstance(determine_analysis_type(input_array), Vector)
def test_small_int32_list(self):
np.random.seed(123456789)
input_array = np.random.randint(-10, 11, 30).astype('int32').tolist()
self.assertIsInstance(determine_analysis_type(input_array), Vector)
def test_small_int32_series(self):
np.random.seed(123456789)
input_array = pd.Series(np.random.randint(-10, 11, 30).astype('int32'))
self.assertIsInstance(determine_analysis_type(input_array), Vector)
def test_small_int16_array(self):
np.random.seed(123456789)
input_array = np.random.randint(-10, 11, 30).astype('int16')
self.assertIsInstance(determine_analysis_type(input_array), Vector)
def test_small_int16_list(self):
np.random.seed(123456789)
input_array = np.random.randint(-10, 11, 30).astype('int16').tolist()
self.assertIsInstance(determine_analysis_type(input_array), Vector)
def test_small_int16_series(self):
np.random.seed(123456789)
input_array = pd.Series(np.random.randint(-10, 11, 30).astype('int16'))
self.assertIsInstance(determine_analysis_type(input_array), Vector)
def test_small_int8_array(self):
np.random.seed(123456789)
input_array = np.random.randint(-10, 11, 30).astype('int8')
self.assertIsInstance(determine_analysis_type(input_array), Vector)
def test_small_int8_list(self):
np.random.seed(123456789)
input_array = np.random.randint(-10, 11, 30).astype('int8').tolist()
self.assertIsInstance(determine_analysis_type(input_array), Vector)
def test_small_int8_series(self):
np.random.seed(123456789)
input_array = pd.Series(np.random.randint(-10, 11, 30).astype('int8'))
self.assertIsInstance(determine_analysis_type(input_array), Vector)
def test_int_scalar(self):
input_array = 3
self.assertRaises(ValueError, lambda: determine_analysis_type(input_array))
def test_small_cat_list(self):
np.random.seed(123456789)
input_array = ['abcdefghijklmnopqrstuvwxyz'[:np.random.randint(1, 26)] for _ in range(30)]
self.assertIsInstance(determine_analysis_type(input_array), Categorical)
self.assertNotIsInstance(determine_analysis_type(input_array), Vector)
def test_small_cat_array(self):
np.random.seed(123456789)
input_array = np.array(['abcdefghijklmnopqrstuvwxyz'[:np.random.randint(1, 26)] for _ in range(30)])
self.assertIsInstance(determine_analysis_type(input_array), Categorical)
self.assertNotIsInstance(determine_analysis_type(input_array), Vector)
def test_small_cat_series(self):
np.random.seed(123456789)
input_array = pd.Series(['abcdefghijklmnopqrstuvwxyz'[:np.random.randint(1, 26)] for _ in range(30)])
self.assertIsInstance(determine_analysis_type(input_array), Categorical)
self.assertNotIsInstance(determine_analysis_type(input_array), Vector)
def test_large_cat_list(self):
np.random.seed(123456789)
input_array = ['abcdefghijklmnopqrstuvwxyz'[:np.random.randint(1, 26)] for _ in range(10000)]
self.assertIsInstance(determine_analysis_type(input_array), Categorical)
self.assertNotIsInstance(determine_analysis_type(input_array), Vector)
def test_large_cat_array(self):
np.random.seed(123456789)
input_array = np.array(['abcdefghijklmnopqrstuvwxyz'[:np.random.randint(1, 26)] for _ in range(10000)])
self.assertIsInstance(determine_analysis_type(input_array), Categorical)
self.assertNotIsInstance(determine_analysis_type(input_array), Vector)
def test_large_cat_series(self):
np.random.seed(123456789)
input_array = pd.Series(['abcdefghijklmnopqrstuvwxyz'[:np.random.randint(1, 26)] for _ in range(10000)])
self.assertIsInstance(determine_analysis_type(input_array), Categorical)
self.assertNotIsInstance(determine_analysis_type(input_array), Vector)
def test_single_cat_list(self):
input_array = ['a']
self.assertIsInstance(determine_analysis_type(input_array), Categorical)
self.assertNotIsInstance(determine_analysis_type(input_array), Vector)
def test_single_cat_array(self):
input_array = np.array(['a'])
self.assertIsInstance(determine_analysis_type(input_array), Categorical)
self.assertNotIsInstance(determine_analysis_type(input_array), Vector)
def test_single_cat_series(self):
input_array = pd.Series(['a'])
self.assertIsInstance(determine_analysis_type(input_array), Categorical)
self.assertNotIsInstance(determine_analysis_type(input_array), Vector)
def test_small_categorical(self):
np.random.seed(123456789)
input_array = Categorical(['abcdefghijklmnopqrstuvwxyz'[:np.random.randint(1, 26)] for _ in range(30)])
self.assertIsInstance(determine_analysis_type(input_array), Categorical)
def test_large_categorical(self):
np.random.seed(123456789)
input_array = Categorical(['abcdefghijklmnopqrstuvwxyz'[:np.random.randint(1, 26)] for _ in range(10000)])
self.assertIsInstance(determine_analysis_type(input_array), Categorical)
def test_string_scalar(self):
input_array = 'a'
self.assertRaises(ValueError, lambda: determine_analysis_type(input_array))
def test_empty_categorical(self):
input_array = Categorical([])
self.assertRaises(NoDataError, lambda: determine_analysis_type(input_array))
def test_small_cat_list_with_nan(self):
np.random.seed(123456789)
input_array = ['abcdefghijklmnopqrstuvwxyz'[:np.random.randint(1, 26)] for _ in range(30)]
input_array[4] = np.nan
input_array[10] = np.nan
input_array[17] = np.nan
input_array[22] = np.nan
input_array[24] = np.nan
self.assertIsInstance(determine_analysis_type(input_array), Categorical)
self.assertNotIsInstance(determine_analysis_type(input_array), Vector)
def test_small_cat_array_with_nan(self):
np.random.seed(123456789)
input_array = ['abcdefghijklmnopqrstuvwxyz'[:np.random.randint(1, 26)] for _ in range(30)]
input_array[4] = np.nan
input_array[10] = np.nan
input_array[17] = np.nan
input_array[22] = np.nan
input_array[24] = np.nan
input_array = np.array(input_array)
self.assertIsInstance(determine_analysis_type(input_array), Categorical)
self.assertNotIsInstance(determine_analysis_type(input_array), Vector)
def test_small_cat_series_with_nan(self):
np.random.seed(123456789)
input_array = ['abcdefghijklmnopqrstuvwxyz'[:np.random.randint(1, 26)] for _ in range(30)]
input_array[4] = np.nan
input_array[10] = np.nan
input_array[17] = np.nan
input_array[22] = np.nan
input_array[24] = np.nan
input_array = pd.Series(input_array)
self.assertIsInstance(determine_analysis_type(input_array), Categorical)
self.assertNotIsInstance(determine_analysis_type(input_array), Vector)
def test_small_string_num_list(self):
input_array = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10']
self.assertIsInstance(determine_analysis_type(input_array), Categorical)
def test_small_string_num_array(self):
input_array = np.array(['1', '2', '3', '4', '5', '6', '7', '8', '9', '10'])
self.assertIsInstance(determine_analysis_type(input_array), Categorical)
def test_small_string_num_series(self):
input_array = pd.Series(['1', '2', '3', '4', '5', '6', '7', '8', '9', '10'])
self.assertIsInstance(determine_analysis_type(input_array), Categorical)
def test_small_mixed_list(self):
input_array = ['1', 'a', np.nan, 4, 5.0]
self.assertIsInstance(determine_analysis_type(input_array), Categorical)
def test_small_mixed_array(self):
input_array = np.array(['1', 'a', np.nan, 4, 5.0])
self.assertIsInstance(determine_analysis_type(input_array), Categorical)
def test_small_mixed_series(self):
input_array = pd.Series(['1', 'a', np.nan, 4, 5.0])
self.assertIsInstance(determine_analysis_type(input_array), Categorical)
def test_arrays_with_other(self):
np.random.seed(123456789)
input_1_array = st.norm.rvs(0, 1, 10000)
input_2_array = st.norm.rvs(1, 1, 10000)
self.assertIsInstance(determine_analysis_type(input_1_array, other=input_2_array), Vector)
self.assertTrue(pd.Series(input_1_array)
.equals(determine_analysis_type(input_1_array, other=input_2_array).data))
self.assertTrue(pd.Series(input_2_array)
.equals(determine_analysis_type(input_1_array, other=input_2_array).other))
def test_series_with_other(self):
np.random.seed(123456789)
input_1_array = pd.Series(st.norm.rvs(0, 1, 10000))
input_2_array = pd.Series(st.norm.rvs(1, 1, 10000))
self.assertIsInstance(determine_analysis_type(input_1_array, other=input_2_array), Vector)
self.assertTrue(input_1_array
.equals(determine_analysis_type(input_1_array, other=input_2_array).data))
self.assertTrue(input_2_array
.equals(determine_analysis_type(input_1_array, other=input_2_array).other))
def test_list_with_other(self):
np.random.seed(123456789)
input_1_array = pd.Series(st.norm.rvs(0, 1, 10000)).tolist()
input_2_array = pd.Series(st.norm.rvs(1, 1, 10000)).tolist()
self.assertIsInstance(determine_analysis_type(input_1_array, other=input_2_array), Vector)
self.assertListEqual(input_1_array, determine_analysis_type(input_1_array, other=input_2_array).data.tolist())
self.assertListEqual(input_2_array, determine_analysis_type(input_1_array, other=input_2_array).other.tolist())
def test_vector_with_other(self):
np.random.seed(123456789)
input_1_array = st.norm.rvs(0, 1, 10000)
input_2_array = st.norm.rvs(1, 1, 10000)
vector = Vector(input_1_array, other=input_2_array)
self.assertIsInstance(determine_analysis_type(vector), Vector)
self.assertTrue(vector.data
.equals(determine_analysis_type(input_1_array, other=input_2_array).data))
self.assertTrue(vector.other
.equals(determine_analysis_type(input_1_array, other=input_2_array).other))
def test_vector_with_other_categorical(self):
np.random.seed(123456789)
input_1_array = st.norm.rvs(0, 1, 10000)
input_2_array = ['abcdefghijklmnopqrstuvwxyz'[:np.random.randint(1, 26)] for _ in range(30)]
self.assertIsInstance(determine_analysis_type(input_1_array, other=input_2_array), Vector)
self.assertTrue(pd.Series(input_1_array)
.equals(determine_analysis_type(input_1_array, other=input_2_array).data))
self.assertTrue(all(determine_analysis_type(input_1_array, other=input_2_array).other.isnull()))
def test_categorical_with_other_vector(self):
np.random.seed(123456789)
input_1_array = ['abcdefghijklmnopqrstuvwxyz'[:np.random.randint(1, 26)] for _ in range(30)]
input_2_array = st.norm.rvs(0, 1, 10000)
self.assertIsInstance(determine_analysis_type(input_1_array, other=input_2_array), Categorical)
def test_float_with_groups(self):
np.random.seed(123456789)
input_1_array = pd.DataFrame({'input': st.norm.rvs(size=2000), 'group': ['Group 1'] * 2000})
input_2_array = pd.DataFrame({'input': st.norm.rvs(1, size=2000), 'group': ['Group 2'] * 2000})
df = pd.concat([input_1_array, input_2_array])
self.assertIsInstance(determine_analysis_type(df['input'], groups=df['group']), Vector)
self.assertEqual(len(determine_analysis_type(df['input'], groups=df['group']).groups), 2)
def test_float_with_other_with_groups(self):
np.random.seed(123456789)
input_1_array = pd.DataFrame({'input1': st.norm.rvs(size=2000),
'input2': st.weibull_min.rvs(1.7, size=2000),
'group': ['Group 1'] * 2000})
input_2_array = pd.DataFrame({'input1': st.norm.rvs(1, size=2000),
'input2': st.weibull_min.rvs(1.7, size=2000),
'group': ['Group 2'] * 2000})
df = pd.concat([input_1_array, input_2_array])
self.assertIsInstance(determine_analysis_type(df['input1'], other=df['input2'], groups=df['group']), Vector)
self.assertEqual(len(determine_analysis_type(df['input1'], other=df['input2'], groups=df['group']).groups), 2)
def test_categorical_with_groups(self):
np.random.seed(123456789)
input_array = ['abcdefghijklmnopqrstuvwxyz'[:np.random.randint(1, 26)] for _ in range(30)]
grp = ['Group 1' for _ in range(30)]
self.assertIsInstance(determine_analysis_type(input_array, groups=grp), Categorical)
if __name__ == '__main__':
unittest.main()
| 46.315789
| 119
| 0.69249
| 2,625
| 20,240
| 5.06019
| 0.041524
| 0.130995
| 0.153354
| 0.176165
| 0.94971
| 0.937665
| 0.931793
| 0.915004
| 0.906271
| 0.905819
| 0
| 0.068397
| 0.190959
| 20,240
| 436
| 120
| 46.422018
| 0.742779
| 0
| 0
| 0.525
| 0
| 0
| 0.031719
| 0.017984
| 0
| 0
| 0
| 0
| 0.266667
| 1
| 0.2
| false
| 0
| 0.019444
| 0
| 0.222222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4dbc1384a61c6f9ca30f3934c62fc1147d9adb40
| 4,605
|
py
|
Python
|
test.py
|
leonard-thong/dlwlrat
|
2b4b669a3b2f348d1b05125afd6b8a1907b6b212
|
[
"CC-BY-3.0"
] | 2
|
2020-10-08T06:01:22.000Z
|
2020-12-15T16:28:27.000Z
|
test.py
|
dreamyang-liu/SciAnnotate
|
dfa41ec5c2c4b6665b5d9b059895be20e9e0cfe2
|
[
"CC-BY-3.0"
] | 8
|
2020-12-09T02:55:20.000Z
|
2020-12-23T23:31:42.000Z
|
test.py
|
dreamyang-liu/SciAnnotate
|
dfa41ec5c2c4b6665b5d9b059895be20e9e0cfe2
|
[
"CC-BY-3.0"
] | 4
|
2021-02-02T04:51:53.000Z
|
2021-07-18T17:00:04.000Z
|
def prehandle_data(**kwargs):
collection = kwargs['collection']
document = kwargs['document']
directory = collection
real_dir = real_directory(directory)
document = path_join(real_dir, document)
txt_file_path = document + '.txt'
ann_file_path = txt_file_path[:-4] + '.ann'
function_ann_file_path = txt_file_path[:-4] + '_func.ann'
out = []
with open(txt_file_path, 'r') as txt_file:
for line in txt_file.readlines():
sentence = dict()
sentence['sentence'] = line
sentence['annotation'] = []
out.append(sentence)
return _prehandle_data(out, txt_file_path, ann_file_path,function_ann_file_path)
def _prehandle_data(out, txt_file_path, ann_file_path, function_ann_file_path):
res = dict()
with open(ann_file_path, 'r') as ann_file:
for line in ann_file.readlines():
line_num = -1
sentence = dict()
sentence['sentence'] = ''
sentence['annotation'] = []
data = []
line = line.replace('\t', ' ')
info = line.split(' ')
source_name = info[1].split('_')[0]
temp = info[1].split('_')[1:]
label = ''
for i in range(len(temp)):
label += temp[i]
data.append(source_name)
data.append(label)
start = int(info[2])
end = int(info[3])
line_dict = judge_line(txt_file_path)
line_start_index = [key for key in line_dict]
line_start_index = sorted(line_start_index)
for i in range(len(line_start_index)):
if start > int(line_start_index[i]) and end < (int(line_start_index[i]) + len(line_dict[line_start_index[i]])):
sentence['sentence']=line_dict[line_start_index[i]]
start -= int(line_start_index[i])
end -= int(line_start_index[i])
line_num = i
break
'''
elif start > line_start_index[i] and end > (line_start_index[i] + len(line_dict[line_start_index[i]])):
'''
data.append(start)
data.append(end)
out[line_num]['annotation'].append(data)
with open(function_ann_file_path, 'r') as function_ann_file:
for line in function_ann_file.readlines():
line_num = -1
sentence = dict()
sentence['sentence'] = ''
sentence['annotation'] = []
data = []
line = line.replace('\t', ' ')
info = line.split(' ')
source_name = info[1].split('_')[0]
temp = info[1].split('_')[1:]
label = ''
for i in range(len(temp)):
label += temp[i]
data.append(source_name)
data.append(label)
start = int(info[2])
end = int(info[3])
line_dict = judge_line(txt_file_path)
line_start_index = [key for key in line_dict]
line_start_index = sorted(line_start_index)
for i in range(len(line_start_index)):
if start > int(line_start_index[i]) and end < (int(line_start_index[i]) + len(line_dict[line_start_index[i]])):
sentence['sentence']=line_dict[line_start_index[i]]
start -= int(line_start_index[i])
end -= int(line_start_index[i])
line_num = i
break
'''
elif start > line_start_index[i] and end > (line_start_index[i] + len(line_dict[line_start_index[i]])):
'''
data.append(start)
data.append(end)
out[line_num]['annotation'].append(data)
res['processedData'] = out
return res
def judge_line(txt_file_path):
count = 0
line_dict = dict()
with open(txt_file_path, 'r') as txt_file:
for line in txt_file.readlines():
line_dict[count] = line
count += len(line)
return line_dict
txt_file_path = '/Users/robin/research/brat/data/Local/test.txt'
out = []
with open(txt_file_path, 'r') as txt_file:
for line in txt_file.readlines():
sentence = dict()
sentence['sentence'] = line
sentence['annotation'] = []
out.append(sentence)
res = _prehandle_data(out, '/Users/robin/research/brat/data/Local/test.txt', '/Users/robin/research/brat/data/Local/test.ann','/Users/robin/research/brat/data/Local/test_func.ann')
print(res)
| 39.698276
| 180
| 0.549837
| 564
| 4,605
| 4.221631
| 0.120567
| 0.098278
| 0.152877
| 0.113398
| 0.838303
| 0.804704
| 0.804704
| 0.755985
| 0.724066
| 0.724066
| 0
| 0.005463
| 0.324213
| 4,605
| 116
| 181
| 39.698276
| 0.75964
| 0
| 0
| 0.732673
| 0
| 0
| 0.083816
| 0.04376
| 0
| 0
| 0
| 0
| 0
| 1
| 0.029703
| false
| 0
| 0
| 0
| 0.059406
| 0.009901
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
15134cb122c5dfed422dc54d51cec34d57ff07b8
| 10,489
|
py
|
Python
|
unit_test/test_ecole_plot.py
|
pandat8/ML4LocalBranch_extend
|
001839ace3506c8410a30d1f4d3188a3cd95e2dd
|
[
"MIT"
] | 4
|
2021-10-17T00:26:12.000Z
|
2021-12-06T08:41:02.000Z
|
unit_test/test_ecole_plot.py
|
pandat8/ML4LocalBranch
|
2fb38b12556ea5e62a0313f617e98cd163eaaf7f
|
[
"MIT"
] | null | null | null |
unit_test/test_ecole_plot.py
|
pandat8/ML4LocalBranch
|
2fb38b12556ea5e62a0313f617e98cd163eaaf7f
|
[
"MIT"
] | null | null | null |
import matplotlib.pyplot as plt
import numpy
instancetypes = ['setcovering', 'capacitedfacility', 'independentset', 'combinatorialauction']
modes = ['repair-slackvars', 'repair-supportbinvars', 'repair-binvars', 'improve-supportbinvars', 'improve-binvars']
instancetype = instancetypes[2]
mode = modes[4]
directory = './result/generated_instances/'+ instancetype +'/'+mode+'/'
# modes = ['repair-nviolations','repair-nbinvars','improve']
# mode = modes[2]
#
# if mode == 'repair-nviolations':
# directory = './result/generated_instances/setcovering_asym/repair/timesofviolations/'
# elif mode == 'repair-nbinvars':
# directory = './result/generated_instances/setcovering_asym/repair/timesofbinvars/'
# elif mode == 'improve':
# directory = './result/generated_instances/setcovering_asym/improve/'
if mode == 'repair-nviolations':
for i in range(100):
if not i == 38:
instance_name = instancetype + '-' + str(i)
data = numpy.load(directory + instance_name + '.npz')
neigh_sizes = data['neigh_sizes']
t = data['t']
objs = data['objs']
# objs = objs / objs[0]
print(objs[0])
neigh_sizes = numpy.log10(neigh_sizes)
if i==0:
objs_all = objs
t_all = t
else:
objs_all += objs
t_all += t
t_ave= t_all/99
objs_ave = objs_all/99
plt.clf()
fig, ax = plt.subplots(2, 1, figsize=(6.4, 6.4))
fig.suptitle("LB to repair")
fig.subplots_adjust(top=0.5)
ax[0].plot(neigh_sizes, objs_ave)
ax[0].set_title(instance_name, loc='right')
ax[0].set_xlabel(r'$log(\alpha)$ '+'(Neighborhood size: '+ r'$K = \alpha \times N_{violations}$)')
ax[0].set_ylabel(r'$N_{violations}$')
ax[1].plot(neigh_sizes, t_ave)
# ax[1].set_ylim([0, 31])
ax[1].set_ylabel("Solving time")
plt.show()
elif mode == 'repair-nbinvars':
for i in range(100):
if not i == 38:
instance_name =instancetype + '-' + str(i)
data = numpy.load(directory + instance_name + '.npz')
neigh_sizes = data['neigh_sizes']
t = data['t']
objs = data['objs']
print(objs[0])
# objs = objs / objs[0]
if i == 0:
objs_all = objs
t_all = t
else:
objs_all += objs
t_all += t
t_ave = t_all / 99
objs_ave = objs_all / 99
plt.clf()
fig, ax = plt.subplots(2, 1, figsize=(6.4, 6.4))
fig.suptitle("LB to repair")
fig.subplots_adjust(top=0.5)
ax[0].plot(neigh_sizes, objs_ave)
ax[0].set_title(instance_name, loc='right')
ax[0].set_xlabel(r'$\alpha$ ' + '(Neighborhood size: ' + r'$K = \alpha \times N_{binvars}$)')
ax[0].set_ylabel(r'$N_{violations}$')
ax[1].plot(neigh_sizes, t_ave)
# ax[1].set_ylim([0, 31])
ax[1].set_ylabel("Solving time")
plt.show()
elif mode == 'improve':
for i in range(100):
# if not i == 38:
instance_name = instancetype + '-' + str(i)
data = numpy.load(directory + instance_name + '.npz')
neigh_sizes = data['neigh_sizes']
t = data['t']
objs = data['objs']
# objs = objs / objs[0]
if i == 0:
objs_all = objs
t_all = t
else:
objs_all += objs
t_all += t
t_ave = t_all / 100
objs_ave = objs_all / 100
plt.clf()
fig, ax = plt.subplots(2, 1, figsize=(6.4, 6.4))
fig.suptitle("LB to improve")
fig.subplots_adjust(top=0.5)
ax[0].plot(neigh_sizes, objs_ave)
ax[0].set_title(instance_name, loc='right')
ax[0].set_xlabel(r'$\alpha$ ' + '(Neighborhood size: ' + r'$K = \alpha \times N_{binvars}$)')
ax[0].set_ylabel("Objective")
ax[1].plot(neigh_sizes, t_ave)
# ax[1].set_ylim([0,31])
ax[1].set_ylabel("Solving time")
plt.show()
elif mode == 'repair-slackvars':
for i in range(100):
if not i == 38:
instance_name = instancetype + '-' + str(i)
data = numpy.load(directory + instance_name + '.npz')
neigh_sizes = data['neigh_sizes']
t = data['t']
objs = data['objs']
# objs = objs / objs[0]
print(objs[0])
if i == 0:
objs_all = objs
t_all = t
else:
objs_all += objs
t_all += t
t_ave = t_all / 99
objs_ave = objs_all / 99
plt.clf()
fig, ax = plt.subplots(2, 1, figsize=(6.4, 6.4))
fig.suptitle("LB to repair (over slack variables)")
fig.subplots_adjust(top=0.5)
ax[0].plot(neigh_sizes, objs_ave)
ax[0].set_title(instance_name, loc='right')
ax[0].set_xlabel(r'$\alpha$ ' + '(Neighborhood size: ' + r'$K = \alpha \times N_{violations}$)')
ax[0].set_ylabel(r'$N_{violations}$')
ax[1].plot(neigh_sizes, t_ave)
# ax[1].set_ylim([0, 31])
ax[1].set_ylabel("Solving time")
plt.show()
elif mode == 'repair-supportbinvars':
for i in range(100):
if not i == 38:
instance_name = instancetype + '-' + str(i)
data = numpy.load(directory + instance_name + '.npz')
neigh_sizes = data['neigh_sizes']
t = data['t']
objs = data['objs']
# objs = objs / objs[0]
print(objs[0])
if i == 0:
objs_all = objs
t_all = t
else:
objs_all += objs
t_all += t
t_ave = t_all / 99
objs_ave = objs_all / 99
plt.clf()
fig, ax = plt.subplots(2, 1, figsize=(6.4, 6.4))
fig.suptitle("LB to repair (over support of binary vars)")
fig.subplots_adjust(top=0.5)
ax[0].plot(neigh_sizes, objs_ave)
ax[0].set_title(instance_name, loc='right')
ax[0].set_xlabel(r'$\alpha$ ' + '(Neighborhood size: ' + r'$K = \alpha \times N_{supportofbins}$)')
ax[0].set_ylabel(r'$N_{violations}$')
ax[1].plot(neigh_sizes, t_ave)
# ax[1].set_ylim([0, 31])
ax[1].set_ylabel("Solving time")
plt.show()
elif mode == 'repair-binvars':
for i in range(100):
if not i == 38:
instance_name = instancetype + '-' + str(i)
data = numpy.load(directory + instance_name + '.npz')
neigh_sizes = data['neigh_sizes']
t = data['t']
objs = data['objs']
objs = objs / objs[0]
if i == 0:
objs_all = objs
t_all = t
else:
objs_all += objs
t_all += t
t_ave = t_all / 99
objs_ave = objs_all / 99
plt.clf()
fig, ax = plt.subplots(2, 1, figsize=(6.4, 6.4))
fig.suptitle("LB to repair (over binary variables)")
fig.subplots_adjust(top=0.5)
ax[0].plot(neigh_sizes, objs_ave)
ax[0].set_title(instance_name, loc='right')
ax[0].set_xlabel(r'$\alpha$ ' + '(Neighborhood size: ' + r'$K = \alpha \times N_{binvars}$)')
ax[0].set_ylabel(r'$N_{violations}$')
ax[1].plot(neigh_sizes, t_ave)
# ax[1].set_ylim([0, 31])
ax[1].set_ylabel("Solving time")
plt.show()
elif mode =='improve-supportbinvars':
for i in range(0, 100):
# if not i == 38:
instance_name = instancetype + '-' + str(i)
print(instance_name)
data = numpy.load(directory + instance_name + '.npz')
neigh_sizes = data['neigh_sizes']
t = data['t']
objs = data['objs']
# objs = objs / objs[0]
if i == 0:
objs_all = objs
t_all = t
else:
objs_all += objs
t_all += t
t_ave = t_all / 100
objs_ave = objs_all / 100
plt.clf()
fig, ax = plt.subplots(2, 1, figsize=(6.4, 6.4))
fig.suptitle("LB to improve (over support of binary vars)")
fig.subplots_adjust(top=0.5)
ax[0].plot(neigh_sizes, objs_ave)
ax[0].set_title(instance_name, loc='right')
ax[0].set_xlabel(r'$\alpha$ ' + '(Neighborhood size: ' + r'$K = \alpha \times N_{supportofbins}$)')
ax[0].set_ylabel("Objective")
ax[1].plot(neigh_sizes, t_ave)
# ax[1].set_ylim([0,31])
ax[1].set_ylabel("Solving time")
plt.show()
# elif mode =='improve-binvars':
# for i in range(60, 70):
# # if not i == 38:
# instance_name = instancetype + '-' + str(i)
# print(instance_name)
# data = numpy.load(directory + instance_name + '.npz')
# neigh_sizes = data['neigh_sizes']
# t = data['t']
# objs = data['objs']
# # objs = objs / objs[0]
#
#
# plt.clf()
# fig, ax = plt.subplots(2, 1, figsize=(6.4, 6.4))
# fig.suptitle("LB to improve (over all bins)")
# fig.subplots_adjust(top=0.5)
# ax[0].plot(neigh_sizes, objs)
# ax[0].set_title(instance_name, loc='right')
# ax[0].set_xlabel(r'$\alpha$ ' + '(Neighborhood size: ' + r'$K = \alpha \times N_{binvars}$)')
# ax[0].set_ylabel("Objective")
# ax[1].plot(neigh_sizes, t)
# # ax[1].set_ylim([0,31])
# ax[1].set_ylabel("Solving time")
# plt.show()
elif mode =='improve-binvars':
for i in range(0, 100):
# if not i == 38:
instance_name = instancetype + '-' + str(i)
print(instance_name)
data = numpy.load(directory + instance_name + '.npz')
neigh_sizes = data['neigh_sizes']
t = data['t']
objs = data['objs']
t = t/30
objs = (objs - numpy.min(objs))
objs = objs / numpy.max(objs)
if i == 0:
objs_all = objs
t_all = t
else:
objs_all += objs
t_all += t
t_ave = t_all / 100
objs_ave = objs_all / 100
alpha = 1/3
perf = alpha * t_ave + (1-alpha) * objs_ave
print(neigh_sizes[numpy.where(perf == perf.min())])
plt.clf()
fig, ax = plt.subplots(3, 1, figsize=(6.4, 6.4))
fig.suptitle("LB to improve (over all bins)")
fig.subplots_adjust(top=0.5)
ax[0].plot(neigh_sizes, objs_ave)
ax[0].set_title(instance_name, loc='right')
ax[0].set_xlabel(r'$\alpha$ ' + '(Neighborhood size: ' + r'$K = \alpha \times N_{binvars}$)')
ax[0].set_ylabel("Objective")
ax[1].plot(neigh_sizes, t_ave)
# ax[1].set_ylim([0,31])
ax[1].set_ylabel("Solving time")
ax[2].plot(neigh_sizes, perf)
ax[2].set_ylabel("Performance score")
plt.show()
| 33.193038
| 116
| 0.543236
| 1,457
| 10,489
| 3.75429
| 0.075498
| 0.073126
| 0.029616
| 0.035101
| 0.871115
| 0.864717
| 0.85192
| 0.832176
| 0.832176
| 0.832176
| 0
| 0.037812
| 0.294022
| 10,489
| 315
| 117
| 33.298413
| 0.700878
| 0.15807
| 0
| 0.855932
| 0
| 0
| 0.169306
| 0.013102
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.008475
| 0
| 0.008475
| 0.029661
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
12a977a3f3bfad4e4d85d50fa271341bbd763dbb
| 94
|
py
|
Python
|
pytest_unittest_discovery_scenarios/under_subdir_of_root_plus_one_further_subdir/tests/test_base_stuff.py
|
d3r3kk/vscode-python-extras
|
e96da47ccf15dff8673c28bd3e8981b550a00a3d
|
[
"MIT"
] | null | null | null |
pytest_unittest_discovery_scenarios/under_subdir_of_root_plus_one_further_subdir/tests/test_base_stuff.py
|
d3r3kk/vscode-python-extras
|
e96da47ccf15dff8673c28bd3e8981b550a00a3d
|
[
"MIT"
] | null | null | null |
pytest_unittest_discovery_scenarios/under_subdir_of_root_plus_one_further_subdir/tests/test_base_stuff.py
|
d3r3kk/vscode-python-extras
|
e96da47ccf15dff8673c28bd3e8981b550a00a3d
|
[
"MIT"
] | null | null | null |
def test_do_test():
assert 1 == 1
def test_do_other_test():
assert "blah" == "blah"
| 13.428571
| 27
| 0.617021
| 15
| 94
| 3.533333
| 0.466667
| 0.264151
| 0.339623
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.027778
| 0.234043
| 94
| 6
| 28
| 15.666667
| 0.708333
| 0
| 0
| 0
| 0
| 0
| 0.085106
| 0
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0.5
| true
| 0
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
420057cf980bf87e242477bb6b78614c55464cd4
| 66,131
|
py
|
Python
|
cinder/tests/unit/volume/test_connection.py
|
lightsey/cinder
|
e03d68e42e57a63f8d0f3e177fb4287290612b24
|
[
"Apache-2.0"
] | 3
|
2015-04-02T21:44:36.000Z
|
2016-04-29T21:19:04.000Z
|
cinder/tests/unit/volume/test_connection.py
|
lightsey/cinder
|
e03d68e42e57a63f8d0f3e177fb4287290612b24
|
[
"Apache-2.0"
] | 3
|
2016-04-29T21:45:26.000Z
|
2016-05-04T19:41:23.000Z
|
cinder/tests/unit/volume/test_connection.py
|
lightsey/cinder
|
e03d68e42e57a63f8d0f3e177fb4287290612b24
|
[
"Apache-2.0"
] | 4
|
2016-01-27T00:25:52.000Z
|
2021-03-25T19:54:08.000Z
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for Volume connection test cases."""
from unittest import mock
import ddt
from cinder import context
from cinder import db
from cinder import exception
from cinder.message import message_field
from cinder import objects
from cinder.objects import fields
from cinder.tests import fake_driver
from cinder.tests.unit.api.v2 import fakes as v2_fakes
from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit import fake_volume
from cinder.tests.unit import utils as tests_utils
from cinder.tests.unit import volume as base
import cinder.volume
import cinder.volume.targets
import cinder.volume.targets.iscsi
@ddt.ddt
class DiscardFlagTestCase(base.BaseVolumeTestCase):
def setUp(self):
super(DiscardFlagTestCase, self).setUp()
self.volume.driver = mock.MagicMock()
db.volume_type_create(self.context,
v2_fakes.fake_default_type_get(
fake.VOLUME_TYPE2_ID))
self.vol_type = db.volume_type_get_by_name(self.context,
'vol_type_name')
@ddt.data(dict(config_discard_flag=True,
driver_discard_flag=None,
expected_flag=True),
dict(config_discard_flag=False,
driver_discard_flag=None,
expected_flag=None),
dict(config_discard_flag=True,
driver_discard_flag=True,
expected_flag=True),
dict(config_discard_flag=False,
driver_discard_flag=True,
expected_flag=True),
dict(config_discard_flag=False,
driver_discard_flag=False,
expected_flag=False),
dict(config_discard_flag=None,
driver_discard_flag=True,
expected_flag=True),
dict(config_discard_flag=None,
driver_discard_flag=False,
expected_flag=False))
@ddt.unpack
def test_initialize_connection_discard_flag(self,
config_discard_flag,
driver_discard_flag,
expected_flag):
self.volume.driver.create_export.return_value = None
connector = {'ip': 'IP', 'initiator': 'INITIATOR'}
conn_info = {
'driver_volume_type': 'iscsi',
'data': {'access_mode': 'rw',
'encrypted': False}
}
if driver_discard_flag is not None:
conn_info['data']['discard'] = driver_discard_flag
self.volume.driver.initialize_connection.return_value = conn_info
def _safe_get(key):
if key == 'report_discard_supported':
return config_discard_flag
else:
return None
self.volume.driver.configuration.safe_get.side_effect = _safe_get
with mock.patch.object(objects, 'Volume') as mock_vol:
volume = tests_utils.create_volume(self.context)
volume.volume_type_id = None
mock_vol.get_by_id.return_value = volume
conn_info = self.volume.initialize_connection(self.context,
volume,
connector)
self.assertEqual(expected_flag, conn_info['data'].get('discard'))
class VolumeConnectionTestCase(base.BaseVolumeTestCase):
def setUp(self, *args, **kwargs):
super(VolumeConnectionTestCase, self).setUp()
db.volume_type_create(self.context,
v2_fakes.fake_default_type_get(
fake.VOLUME_TYPE2_ID))
self.vol_type = db.volume_type_get_by_name(self.context,
'vol_type_name')
@mock.patch.object(cinder.volume.targets.iscsi.ISCSITarget,
'_get_target_chap_auth')
@mock.patch.object(db, 'volume_admin_metadata_get')
@mock.patch.object(db.sqlalchemy.api, 'volume_get')
@mock.patch.object(db, 'volume_update')
def test_initialize_connection_fetchqos(self,
_mock_volume_update,
_mock_volume_get,
_mock_volume_admin_metadata_get,
mock_get_target):
"""Make sure initialize_connection returns correct information."""
_fake_admin_meta = [{'key': 'fake-key', 'value': 'fake-value'}]
_fake_volume = {'volume_type_id': fake.VOLUME_TYPE_ID,
'name': 'fake_name',
'host': 'fake_host',
'id': fake.VOLUME_ID,
'volume_admin_metadata': _fake_admin_meta}
fake_volume_obj = fake_volume.fake_volume_obj(self.context,
**_fake_volume)
_mock_volume_get.return_value = _fake_volume
_mock_volume_update.return_value = _fake_volume
_mock_volume_admin_metadata_get.return_value = {
'fake-key': 'fake-value'}
connector = {'ip': 'IP', 'initiator': 'INITIATOR'}
qos_values = {'consumer': 'front-end',
'specs': {
'key1': 'value1',
'key2': 'value2'}
}
with mock.patch.object(cinder.volume.volume_types,
'get_volume_type_qos_specs') as type_qos, \
mock.patch.object(cinder.tests.fake_driver.FakeLoggingVolumeDriver,
'initialize_connection') as driver_init:
type_qos.return_value = dict(qos_specs=qos_values)
driver_init.return_value = {'data': {}}
mock_get_target.return_value = None
qos_specs_expected = {'key1': 'value1',
'key2': 'value2'}
# initialize_connection() passes qos_specs that is designated to
# be consumed by front-end or both front-end and back-end
conn_info = self.volume.initialize_connection(
self.context, fake_volume_obj, connector,)
self.assertDictEqual(qos_specs_expected,
conn_info['data']['qos_specs'])
qos_values.update({'consumer': 'both'})
conn_info = self.volume.initialize_connection(
self.context, fake_volume_obj, connector)
self.assertDictEqual(qos_specs_expected,
conn_info['data']['qos_specs'])
# initialize_connection() skips qos_specs that is designated to be
# consumed by back-end only
qos_values.update({'consumer': 'back-end'})
type_qos.return_value = dict(qos_specs=qos_values)
conn_info = self.volume.initialize_connection(
self.context, fake_volume_obj, connector)
self.assertIsNone(conn_info['data']['qos_specs'])
@mock.patch.object(cinder.volume.targets.iscsi.ISCSITarget,
'_get_target_chap_auth')
@mock.patch.object(db, 'volume_admin_metadata_get')
@mock.patch.object(db.sqlalchemy.api, 'volume_get')
@mock.patch.object(db, 'volume_update')
def test_initialize_connection_qos_per_gb(self,
_mock_volume_update,
_mock_volume_get,
_mock_volume_admin_metadata_get,
mock_get_target):
"""QoS test with no minimum value."""
_fake_admin_meta = [{'key': 'fake-key', 'value': 'fake-value'}]
_fake_volume = {'size': 3,
'volume_type_id': fake.VOLUME_TYPE_ID,
'name': 'fake_name',
'host': 'fake_host',
'id': fake.VOLUME_ID,
'volume_admin_metadata': _fake_admin_meta}
fake_volume_obj = fake_volume.fake_volume_obj(self.context,
**_fake_volume)
_mock_volume_get.return_value = _fake_volume
_mock_volume_update.return_value = _fake_volume
_mock_volume_admin_metadata_get.return_value = {
'fake-key': 'fake-value'}
connector = {'ip': 'IP', 'initiator': 'INITIATOR'}
qos_values = {'consumer': 'front-end',
'specs': {
'write_iops_sec_per_gb': 30,
'read_iops_sec_per_gb': 7700,
'total_iops_sec_per_gb': 300000,
'read_bytes_sec_per_gb': 10,
'write_bytes_sec_per_gb': 40,
'total_bytes_sec_per_gb': 1048576}
}
with mock.patch.object(cinder.volume.volume_types,
'get_volume_type_qos_specs') as type_qos, \
mock.patch.object(cinder.tests.fake_driver.FakeLoggingVolumeDriver,
'initialize_connection') as driver_init:
type_qos.return_value = dict(qos_specs=qos_values)
driver_init.return_value = {'data': {}}
mock_get_target.return_value = None
qos_specs_expected = {'write_iops_sec': 90,
'read_iops_sec': 23100,
'total_iops_sec': 900000,
'read_bytes_sec': 30,
'write_bytes_sec': 120,
'total_bytes_sec': 3145728}
# initialize_connection() passes qos_specs that is designated to
# be consumed by front-end or both front-end and back-end
conn_info = self.volume.initialize_connection(
self.context, fake_volume_obj, connector,)
self.assertDictEqual(qos_specs_expected,
conn_info['data']['qos_specs'])
qos_values.update({'consumer': 'both'})
conn_info = self.volume.initialize_connection(
self.context, fake_volume_obj, connector)
self.assertDictEqual(qos_specs_expected,
conn_info['data']['qos_specs'])
@mock.patch.object(cinder.volume.targets.iscsi.ISCSITarget,
'_get_target_chap_auth')
@mock.patch.object(db, 'volume_admin_metadata_get')
@mock.patch.object(db.sqlalchemy.api, 'volume_get')
@mock.patch.object(db, 'volume_update')
def test_initialize_connection_qos_per_gb_with_min_small(
self, _mock_volume_update, _mock_volume_get,
_mock_volume_admin_metadata_get, mock_get_target):
"""QoS test when volume size results in using minimum."""
_fake_admin_meta = [{'key': 'fake-key', 'value': 'fake-value'}]
_fake_volume = {'size': 1,
'volume_type_id': fake.VOLUME_TYPE_ID,
'name': 'fake_name',
'host': 'fake_host',
'id': fake.VOLUME_ID,
'volume_admin_metadata': _fake_admin_meta}
fake_volume_obj = fake_volume.fake_volume_obj(self.context,
**_fake_volume)
_mock_volume_get.return_value = _fake_volume
_mock_volume_update.return_value = _fake_volume
_mock_volume_admin_metadata_get.return_value = {
'fake-key': 'fake-value'}
connector = {'ip': 'IP', 'initiator': 'INITIATOR'}
qos_values = {'consumer': 'front-end',
'specs': {
'write_iops_sec_per_gb_min': 800,
'write_iops_sec_per_gb': 30,
'read_iops_sec_per_gb_min': 23100,
'read_iops_sec_per_gb': 7700,
'total_iops_sec_per_gb_min': 900000,
'total_iops_sec_per_gb': 300000,
'total_iops_sec_max': 15000000,
'read_bytes_sec_per_gb_min': 30,
'read_bytes_sec_per_gb': 10,
'write_bytes_sec_per_gb_min': 120,
'write_bytes_sec_per_gb': 40,
'total_bytes_sec_per_gb_min': 3145728,
'total_bytes_sec_per_gb': 1048576}
}
with mock.patch.object(cinder.volume.volume_types,
'get_volume_type_qos_specs') as type_qos, \
mock.patch.object(cinder.tests.fake_driver.FakeLoggingVolumeDriver,
'initialize_connection') as driver_init:
type_qos.return_value = dict(qos_specs=qos_values)
driver_init.return_value = {'data': {}}
mock_get_target.return_value = None
qos_specs_expected = {'write_iops_sec': 800,
'read_iops_sec': 23100,
'total_iops_sec': 900000,
'read_bytes_sec': 30,
'write_bytes_sec': 120,
'total_bytes_sec': 3145728}
# initialize_connection() passes qos_specs that is designated to
# be consumed by front-end or both front-end and back-end
conn_info = self.volume.initialize_connection(
self.context, fake_volume_obj, connector,)
self.assertDictEqual(qos_specs_expected,
conn_info['data']['qos_specs'])
qos_values.update({'consumer': 'both'})
conn_info = self.volume.initialize_connection(
self.context, fake_volume_obj, connector)
self.assertDictEqual(qos_specs_expected,
conn_info['data']['qos_specs'])
@mock.patch.object(cinder.volume.targets.iscsi.ISCSITarget,
'_get_target_chap_auth')
@mock.patch.object(db, 'volume_admin_metadata_get')
@mock.patch.object(db.sqlalchemy.api, 'volume_get')
@mock.patch.object(db, 'volume_update')
def test_initialize_connection_qos_per_gb_with_min_large(
self, _mock_volume_update, _mock_volume_get,
_mock_volume_admin_metadata_get, mock_get_target):
"""QoS test when volume size results in using per-gb values."""
_fake_admin_meta = [{'key': 'fake-key', 'value': 'fake-value'}]
_fake_volume = {'size': 100,
'volume_type_id': fake.VOLUME_TYPE_ID,
'name': 'fake_name',
'host': 'fake_host',
'id': fake.VOLUME_ID,
'volume_admin_metadata': _fake_admin_meta}
fake_volume_obj = fake_volume.fake_volume_obj(self.context,
**_fake_volume)
_mock_volume_get.return_value = _fake_volume
_mock_volume_update.return_value = _fake_volume
_mock_volume_admin_metadata_get.return_value = {
'fake-key': 'fake-value'}
connector = {'ip': 'IP', 'initiator': 'INITIATOR'}
qos_values = {'consumer': 'front-end',
'specs': {
'write_iops_sec_per_gb_min': 800,
'write_iops_sec_per_gb': 30,
'read_iops_sec_per_gb_min': 23100,
'read_iops_sec_per_gb': 7700,
'total_iops_sec_per_gb_min': 900000,
'total_iops_sec_per_gb': 300000,
'total_iops_sec_max': 15000000,
'read_bytes_sec_per_gb_min': 30,
'read_bytes_sec_per_gb': 10,
'write_bytes_sec_per_gb_min': 120,
'write_bytes_sec_per_gb': 40,
'total_bytes_sec_per_gb_min': 3145728,
'total_bytes_sec_per_gb': 1048576}
}
with mock.patch.object(cinder.volume.volume_types,
'get_volume_type_qos_specs') as type_qos, \
mock.patch.object(cinder.tests.fake_driver.FakeLoggingVolumeDriver,
'initialize_connection') as driver_init:
type_qos.return_value = dict(qos_specs=qos_values)
driver_init.return_value = {'data': {}}
mock_get_target.return_value = None
qos_specs_expected = {'write_iops_sec': 3000,
'read_iops_sec': 770000,
'total_iops_sec': 15000000,
'read_bytes_sec': 1000,
'write_bytes_sec': 4000,
'total_bytes_sec': 104857600}
# initialize_connection() passes qos_specs that is designated to
# be consumed by front-end or both front-end and back-end
conn_info = self.volume.initialize_connection(
self.context, fake_volume_obj, connector,)
self.assertDictEqual(qos_specs_expected,
conn_info['data']['qos_specs'])
qos_values.update({'consumer': 'both'})
conn_info = self.volume.initialize_connection(
self.context, fake_volume_obj, connector)
self.assertDictEqual(qos_specs_expected,
conn_info['data']['qos_specs'])
@mock.patch.object(fake_driver.FakeLoggingVolumeDriver, 'create_export')
def test_initialize_connection_export_failure(self,
_mock_create_export):
"""Test exception path for create_export failure."""
volume = tests_utils.create_volume(
self.context, admin_metadata={'fake-key': 'fake-value'},
**self.volume_params)
_mock_create_export.side_effect = exception.CinderException
connector = {'ip': 'IP', 'initiator': 'INITIATOR'}
self.assertRaises(exception.VolumeBackendAPIException,
self.volume.initialize_connection,
self.context, volume, connector)
def test_initialize_connection_maintenance(self):
"""Test initialize connection in maintenance."""
test_meta1 = {'fake_key1': 'fake_value1', 'fake_key2': 'fake_value2'}
volume = tests_utils.create_volume(self.context, metadata=test_meta1,
**self.volume_params)
volume['status'] = 'maintenance'
volume_api = cinder.volume.api.API()
self.assertRaises(exception.InvalidVolume,
volume_api.initialize_connection,
self.context,
volume,
None)
@ddt.ddt
class VolumeAttachDetachTestCase(base.BaseVolumeTestCase):
def setUp(self):
super(VolumeAttachDetachTestCase, self).setUp()
self.patch('cinder.volume.volume_utils.clear_volume', autospec=True)
self.user_context = context.RequestContext(user_id=fake.USER_ID,
project_id=fake.PROJECT_ID)
db.volume_type_create(self.context,
v2_fakes.fake_default_type_get(
fake.VOLUME_TYPE2_ID))
self.vol_type = db.volume_type_get_by_name(self.context,
'vol_type_name')
@ddt.data(False, True)
def test_run_attach_detach_volume_for_instance(self, volume_object):
"""Make sure volume can be attached and detached from instance."""
mountpoint = "/dev/sdf"
# attach volume to the instance then to detach
instance_uuid = '12345678-1234-5678-1234-567812345678'
volume = tests_utils.create_volume(self.user_context,
**self.volume_params)
with volume.obj_as_admin():
volume.admin_metadata['readonly'] = True
volume.save()
volume_id = volume.id
self.volume.create_volume(self.user_context,
volume=volume)
volume_passed = volume if volume_object else None
attachment = self.volume.attach_volume(self.user_context,
volume_id,
instance_uuid, None,
mountpoint, 'ro',
volume=volume_passed)
attachment2 = self.volume.attach_volume(self.user_context,
volume_id,
instance_uuid, None,
mountpoint, 'ro',
volume=volume_passed)
self.assertEqual(attachment.id, attachment2.id)
vol = objects.Volume.get_by_id(self.context, volume_id)
self.assertEqual("in-use", vol.status)
self.assertEqual(fields.VolumeAttachStatus.ATTACHED,
attachment.attach_status)
self.assertEqual(mountpoint, attachment.mountpoint)
self.assertEqual(instance_uuid, attachment.instance_uuid)
self.assertIsNone(attachment.attached_host)
admin_metadata = vol.volume_admin_metadata
self.assertEqual(2, len(admin_metadata))
expected = dict(readonly='True', attached_mode='ro')
ret = {}
for item in admin_metadata:
ret.update({item['key']: item['value']})
self.assertDictEqual(expected, ret)
connector = {'initiator': 'iqn.2012-07.org.fake:01'}
volume = volume if volume_object else vol
conn_info = self.volume.initialize_connection(self.context,
volume, connector)
self.assertEqual('ro', conn_info['data']['access_mode'])
self.assertRaises(exception.VolumeAttached,
self.volume.delete_volume,
self.context,
volume=volume)
self.volume.detach_volume(self.context, volume_id,
attachment.id,
volume=volume_passed)
vol = objects.Volume.get_by_id(self.context, volume_id)
self.assertEqual('available', vol.status)
self.volume.delete_volume(self.context, volume)
self.assertRaises(exception.VolumeNotFound,
db.volume_get,
self.context,
volume_id)
@mock.patch('cinder.volume.manager.LOG', mock.Mock())
def test_initialize_connection(self):
volume = mock.Mock(save=mock.Mock(side_effect=Exception))
with mock.patch.object(self.volume, 'driver') as driver_mock:
self.assertRaises(exception.ExportFailure,
self.volume.initialize_connection, self.context,
volume, mock.Mock())
driver_mock.remove_export.assert_called_once_with(mock.ANY, volume)
def test_run_attach_detach_2volumes_for_instance(self):
"""Make sure volume can be attached and detached from instance."""
# attach first volume to the instance
mountpoint1 = "/dev/vdc"
instance_uuid = '12345678-1234-5678-1234-567812345678'
volume1 = tests_utils.create_volume(
self.context, admin_metadata={'readonly': 'True'},
**self.volume_params)
volume1_id = volume1['id']
self.volume.create_volume(self.context, volume1)
attachment = self.volume.attach_volume(self.context, volume1_id,
instance_uuid, None,
mountpoint1, 'ro')
vol1 = db.volume_get(context.get_admin_context(), volume1_id)
self.assertEqual("in-use", vol1['status'])
self.assertEqual('attached', attachment['attach_status'])
self.assertEqual(mountpoint1, attachment['mountpoint'])
self.assertEqual(instance_uuid, attachment['instance_uuid'])
self.assertIsNone(attachment['attached_host'])
admin_metadata = vol1['volume_admin_metadata']
self.assertEqual(2, len(admin_metadata))
expected = dict(readonly='True', attached_mode='ro')
ret = {}
for item in admin_metadata:
ret.update({item['key']: item['value']})
self.assertDictEqual(expected, ret)
connector = {'initiator': 'iqn.2012-07.org.fake:01'}
conn_info = self.volume.initialize_connection(self.context,
volume1, connector)
self.assertEqual('ro', conn_info['data']['access_mode'])
self.assertRaises(exception.VolumeAttached,
self.volume.delete_volume,
self.context,
volume1)
# attach 2nd volume to the instance
mountpoint2 = "/dev/vdd"
volume2 = tests_utils.create_volume(
self.context, admin_metadata={'readonly': 'False'},
**self.volume_params)
volume2_id = volume2['id']
self.volume.create_volume(self.context, volume2)
attachment2 = self.volume.attach_volume(self.context, volume2_id,
instance_uuid, None,
mountpoint2, 'rw')
vol2 = db.volume_get(context.get_admin_context(), volume2_id)
self.assertEqual("in-use", vol2['status'])
self.assertEqual('attached', attachment2['attach_status'])
self.assertEqual(mountpoint2, attachment2['mountpoint'])
self.assertEqual(instance_uuid, attachment2['instance_uuid'])
self.assertIsNone(attachment2['attached_host'])
admin_metadata = vol2['volume_admin_metadata']
self.assertEqual(2, len(admin_metadata))
expected = dict(readonly='False', attached_mode='rw')
ret = {}
for item in admin_metadata:
ret.update({item['key']: item['value']})
self.assertDictEqual(expected, ret)
connector = {'initiator': 'iqn.2012-07.org.fake:02'}
conn_info = self.volume.initialize_connection(self.context,
volume2, connector)
self.assertEqual('rw', conn_info['data']['access_mode'])
self.assertRaises(exception.VolumeAttached,
self.volume.delete_volume,
self.context,
volume2)
# detach first volume and then 2nd volume
self.volume.detach_volume(self.context, volume1_id, attachment['id'])
vol1 = db.volume_get(self.context, volume1_id)
self.assertEqual('available', vol1['status'])
self.volume.delete_volume(self.context, volume1)
self.assertRaises(exception.VolumeNotFound,
db.volume_get,
self.context,
volume1_id)
self.volume.detach_volume(self.context, volume2_id, attachment2['id'])
vol2 = db.volume_get(self.context, volume2_id)
self.assertEqual('available', vol2['status'])
self.volume.delete_volume(self.context, volume2)
self.assertRaises(exception.VolumeNotFound,
db.volume_get,
self.context,
volume2_id)
def test_detach_invalid_attachment_id(self):
"""Make sure if the attachment id isn't found we raise."""
attachment_id = "notfoundid"
volume = tests_utils.create_volume(self.context,
admin_metadata={'readonly': 'True'},
multiattach=False,
**self.volume_params)
self.volume.detach_volume(self.context, volume['id'],
attachment_id)
volume = db.volume_get(self.context, volume['id'])
self.assertEqual('available', volume['status'])
instance_uuid = '12345678-1234-5678-1234-567812345678'
attached_host = 'fake_host'
mountpoint = '/dev/fake'
tests_utils.attach_volume(self.context, volume['id'],
instance_uuid, attached_host,
mountpoint)
self.volume.detach_volume(self.context, volume['id'],
attachment_id)
volume = db.volume_get(self.context, volume['id'])
self.assertEqual('in-use', volume['status'])
def test_detach_no_attachments(self):
self.volume_params['status'] = 'detaching'
volume = tests_utils.create_volume(self.context,
admin_metadata={'readonly': 'True'},
multiattach=False,
**self.volume_params)
self.volume.detach_volume(self.context, volume['id'])
volume = db.volume_get(self.context, volume['id'])
self.assertEqual('available', volume['status'])
def test_run_attach_detach_volume_for_instance_no_attachment_id(self):
"""Make sure volume can be attached and detached from instance."""
mountpoint = "/dev/sdf"
# attach volume to the instance then to detach
instance_uuid = '12345678-1234-5678-1234-567812345678'
instance_uuid_2 = '12345678-4321-8765-4321-567812345678'
volume = tests_utils.create_volume(self.context,
admin_metadata={'readonly': 'True'},
multiattach=True,
**self.volume_params)
volume_id = volume['id']
self.volume.create_volume(self.context, volume)
attachment = self.volume.attach_volume(self.context, volume_id,
instance_uuid, None,
mountpoint, 'ro')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual('in-use', vol['status'])
self.assertEqual(fields.VolumeAttachStatus.ATTACHED,
attachment['attach_status'])
self.assertEqual(mountpoint, attachment['mountpoint'])
self.assertEqual(instance_uuid, attachment['instance_uuid'])
self.assertIsNone(attachment['attached_host'])
admin_metadata = vol['volume_admin_metadata']
self.assertEqual(2, len(admin_metadata))
expected = dict(readonly='True', attached_mode='ro')
ret = {}
for item in admin_metadata:
ret.update({item['key']: item['value']})
self.assertDictEqual(expected, ret)
attachment2 = self.volume.attach_volume(self.context, volume_id,
instance_uuid_2, None,
mountpoint, 'ro')
connector = {'initiator': 'iqn.2012-07.org.fake:01'}
conn_info = self.volume.initialize_connection(self.context,
volume, connector)
self.assertEqual('ro', conn_info['data']['access_mode'])
self.assertRaises(exception.VolumeAttached,
self.volume.delete_volume,
self.context,
volume)
self.assertRaises(exception.InvalidVolume,
self.volume.detach_volume,
self.context, volume_id)
self.volume.detach_volume(self.context, volume_id, attachment['id'])
vol = db.volume_get(self.context, volume_id)
self.assertEqual('in-use', vol['status'])
self.volume.detach_volume(self.context, volume_id, attachment2['id'])
vol = db.volume_get(self.context, volume_id)
self.assertEqual('available', vol['status'])
attachment = self.volume.attach_volume(self.context, volume_id,
instance_uuid, None,
mountpoint, 'ro')
vol = db.volume_get(self.context, volume_id)
self.assertEqual('in-use', vol['status'])
self.volume.detach_volume(self.context, volume_id)
vol = db.volume_get(self.context, volume_id)
self.assertEqual('available', vol['status'])
self.volume.delete_volume(self.context, volume)
self.assertRaises(exception.VolumeNotFound,
db.volume_get,
self.context,
volume_id)
def test_run_attach_detach_multiattach_volume_for_instances(self):
"""Make sure volume can be attached to multiple instances."""
mountpoint = "/dev/sdf"
# attach volume to the instance then to detach
instance_uuid = '12345678-1234-5678-1234-567812345678'
volume = tests_utils.create_volume(self.context,
admin_metadata={'readonly': 'True'},
multiattach=True,
**self.volume_params)
volume_id = volume['id']
self.volume.create_volume(self.context, volume)
attachment = self.volume.attach_volume(self.context, volume_id,
instance_uuid, None,
mountpoint, 'ro')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual('in-use', vol['status'])
self.assertTrue(vol['multiattach'])
self.assertEqual(fields.VolumeAttachStatus.ATTACHED,
attachment['attach_status'])
self.assertEqual(mountpoint, attachment['mountpoint'])
self.assertEqual(instance_uuid, attachment['instance_uuid'])
self.assertIsNone(attachment['attached_host'])
admin_metadata = vol['volume_admin_metadata']
self.assertEqual(2, len(admin_metadata))
expected = dict(readonly='True', attached_mode='ro')
ret = {}
for item in admin_metadata:
ret.update({item['key']: item['value']})
self.assertDictEqual(expected, ret)
connector = {'initiator': 'iqn.2012-07.org.fake:01'}
conn_info = self.volume.initialize_connection(self.context,
volume, connector)
self.assertEqual('ro', conn_info['data']['access_mode'])
instance2_uuid = '12345678-1234-5678-1234-567812345000'
mountpoint2 = "/dev/sdx"
attachment2 = self.volume.attach_volume(self.context, volume_id,
instance2_uuid, None,
mountpoint2, 'ro')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual('in-use', vol['status'])
self.assertTrue(vol['multiattach'])
self.assertEqual(fields.VolumeAttachStatus.ATTACHED,
attachment2['attach_status'])
self.assertEqual(mountpoint2, attachment2['mountpoint'])
self.assertEqual(instance2_uuid, attachment2['instance_uuid'])
self.assertIsNone(attachment2['attached_host'])
self.assertNotEqual(attachment, attachment2)
self.assertRaises(exception.VolumeAttached,
self.volume.delete_volume,
self.context,
volume)
self.volume.detach_volume(self.context, volume_id, attachment['id'])
vol = db.volume_get(self.context, volume_id)
self.assertEqual('in-use', vol['status'])
self.assertRaises(exception.VolumeAttached,
self.volume.delete_volume,
self.context,
volume)
self.volume.detach_volume(self.context, volume_id, attachment2['id'])
vol = db.volume_get(self.context, volume_id)
self.assertEqual('available', vol['status'])
self.volume.delete_volume(self.context, volume)
self.assertRaises(exception.VolumeNotFound,
db.volume_get,
self.context,
volume_id)
def test_run_attach_twice_multiattach_volume_for_instances(self):
"""Make sure volume can be attached to multiple instances."""
mountpoint = "/dev/sdf"
# attach volume to the instance then to detach
instance_uuid = '12345678-1234-5678-1234-567812345699'
volume = tests_utils.create_volume(self.context,
admin_metadata={'readonly': 'True'},
multiattach=True,
**self.volume_params)
volume_id = volume['id']
self.volume.create_volume(self.context, volume)
attachment = self.volume.attach_volume(self.context, volume_id,
instance_uuid, None,
mountpoint, 'ro')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual('in-use', vol['status'])
self.assertTrue(vol['multiattach'])
self.assertEqual(fields.VolumeAttachStatus.ATTACHED,
attachment['attach_status'])
self.assertEqual(mountpoint, attachment['mountpoint'])
self.assertEqual(instance_uuid, attachment['instance_uuid'])
self.assertIsNone(attachment['attached_host'])
admin_metadata = vol['volume_admin_metadata']
self.assertEqual(2, len(admin_metadata))
expected = dict(readonly='True', attached_mode='ro')
ret = {}
for item in admin_metadata:
ret.update({item['key']: item['value']})
self.assertDictEqual(expected, ret)
connector = {'initiator': 'iqn.2012-07.org.fake:01'}
conn_info = self.volume.initialize_connection(self.context,
volume, connector)
self.assertEqual('ro', conn_info['data']['access_mode'])
mountpoint2 = "/dev/sdx"
attachment2 = self.volume.attach_volume(self.context, volume_id,
instance_uuid, None,
mountpoint2, 'ro')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual('in-use', vol['status'])
self.assertTrue(vol['multiattach'])
self.assertEqual('attached', attachment2['attach_status'])
self.assertEqual(mountpoint, attachment2['mountpoint'])
self.assertEqual(instance_uuid, attachment2['instance_uuid'])
self.assertIsNone(attachment2['attached_host'])
self.assertRaises(exception.VolumeAttached,
self.volume.delete_volume,
self.context,
volume)
def test_attach_detach_not_multiattach_volume_for_instances(self):
"""Make sure volume can't be attached to more than one instance."""
mountpoint = "/dev/sdf"
# attach volume to the instance then to detach
instance_uuid = '12345678-1234-5678-1234-567812345678'
volume = tests_utils.create_volume(self.context,
admin_metadata={'readonly': 'True'},
multiattach=False,
**self.volume_params)
volume_id = volume['id']
self.volume.create_volume(self.context, volume)
attachment = self.volume.attach_volume(self.context, volume_id,
instance_uuid, None,
mountpoint, 'ro')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual('in-use', vol['status'])
self.assertFalse(vol['multiattach'])
self.assertEqual(fields.VolumeAttachStatus.ATTACHED,
attachment['attach_status'])
self.assertEqual(mountpoint, attachment['mountpoint'])
self.assertEqual(instance_uuid, attachment['instance_uuid'])
self.assertIsNone(attachment['attached_host'])
admin_metadata = vol['volume_admin_metadata']
self.assertEqual(2, len(admin_metadata))
expected = dict(readonly='True', attached_mode='ro')
ret = {}
for item in admin_metadata:
ret.update({item['key']: item['value']})
self.assertDictEqual(expected, ret)
connector = {'initiator': 'iqn.2012-07.org.fake:01'}
conn_info = self.volume.initialize_connection(self.context,
volume, connector)
self.assertEqual('ro', conn_info['data']['access_mode'])
instance2_uuid = '12345678-1234-5678-1234-567812345000'
mountpoint2 = "/dev/sdx"
self.assertRaises(exception.InvalidVolume,
self.volume.attach_volume,
self.context,
volume_id,
instance2_uuid,
None,
mountpoint2, 'ro')
self.assertRaises(exception.VolumeAttached,
self.volume.delete_volume,
self.context,
volume)
self.volume.detach_volume(self.context, volume_id, attachment['id'])
vol = db.volume_get(self.context, volume_id)
self.assertEqual('available', vol['status'])
self.volume.delete_volume(self.context, volume)
self.assertRaises(exception.VolumeNotFound,
db.volume_get,
self.context,
volume_id)
def test_run_attach_detach_volume_for_host(self):
"""Make sure volume can be attached and detached from host."""
mountpoint = "/dev/sdf"
volume = tests_utils.create_volume(
self.context,
admin_metadata={'readonly': 'False'},
**self.volume_params)
volume_id = volume['id']
self.volume.create_volume(self.context, volume)
attachment = self.volume.attach_volume(self.context, volume_id, None,
'fake_host', mountpoint, 'rw')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual('in-use', vol['status'])
self.assertEqual(fields.VolumeAttachStatus.ATTACHED,
attachment['attach_status'])
self.assertEqual(mountpoint, attachment['mountpoint'])
self.assertIsNone(attachment['instance_uuid'])
# sanitized, conforms to RFC-952 and RFC-1123 specs.
self.assertEqual('fake-host', attachment['attached_host'])
admin_metadata = vol['volume_admin_metadata']
self.assertEqual(2, len(admin_metadata))
expected = dict(readonly='False', attached_mode='rw')
ret = {}
for item in admin_metadata:
ret.update({item['key']: item['value']})
self.assertDictEqual(expected, ret)
connector = {'initiator': 'iqn.2012-07.org.fake:01'}
conn_info = self.volume.initialize_connection(self.context,
volume, connector)
self.assertEqual('rw', conn_info['data']['access_mode'])
self.assertRaises(exception.VolumeAttached,
self.volume.delete_volume,
self.context,
volume)
self.volume.detach_volume(self.context, volume_id, attachment['id'])
vol = db.volume_get(self.context, volume_id)
self.assertEqual("available", vol['status'])
self.volume.delete_volume(self.context, volume)
self.assertRaises(exception.VolumeNotFound,
db.volume_get,
self.context,
volume_id)
def test_run_attach_detach_multiattach_volume_for_hosts(self):
"""Make sure volume can be attached and detached from hosts."""
mountpoint = "/dev/sdf"
volume = tests_utils.create_volume(
self.context,
admin_metadata={'readonly': 'False'},
multiattach=True,
**self.volume_params)
volume_id = volume['id']
self.volume.create_volume(self.context, volume)
attachment = self.volume.attach_volume(self.context, volume_id, None,
'fake_host', mountpoint, 'rw')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual('in-use', vol['status'])
self.assertTrue(vol['multiattach'])
self.assertEqual(fields.VolumeAttachStatus.ATTACHED,
attachment['attach_status'])
self.assertEqual(mountpoint, attachment['mountpoint'])
self.assertIsNone(attachment['instance_uuid'])
# sanitized, conforms to RFC-952 and RFC-1123 specs.
self.assertEqual('fake-host', attachment['attached_host'])
admin_metadata = vol['volume_admin_metadata']
self.assertEqual(2, len(admin_metadata))
expected = dict(readonly='False', attached_mode='rw')
ret = {}
for item in admin_metadata:
ret.update({item['key']: item['value']})
self.assertDictEqual(expected, ret)
connector = {'initiator': 'iqn.2012-07.org.fake:01'}
conn_info = self.volume.initialize_connection(self.context,
volume, connector)
self.assertEqual('rw', conn_info['data']['access_mode'])
mountpoint2 = "/dev/sdx"
attachment2 = self.volume.attach_volume(self.context, volume_id, None,
'fake_host2', mountpoint2,
'rw')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual('in-use', vol['status'])
self.assertEqual(fields.VolumeAttachStatus.ATTACHED,
attachment2['attach_status'])
self.assertEqual(mountpoint2, attachment2['mountpoint'])
self.assertIsNone(attachment2['instance_uuid'])
# sanitized, conforms to RFC-952 and RFC-1123 specs.
self.assertEqual('fake-host2', attachment2['attached_host'])
self.assertRaises(exception.VolumeAttached,
self.volume.delete_volume,
self.context,
volume)
self.volume.detach_volume(self.context, volume_id, attachment['id'])
vol = db.volume_get(self.context, volume_id)
self.assertEqual("in-use", vol['status'])
self.volume.detach_volume(self.context, volume_id, attachment2['id'])
vol = db.volume_get(self.context, volume_id)
self.assertEqual("available", vol['status'])
self.volume.delete_volume(self.context, volume)
self.assertRaises(exception.VolumeNotFound,
db.volume_get,
self.context,
volume_id)
def test_run_attach_twice_multiattach_volume_for_hosts(self):
"""Make sure volume can be attached and detached from hosts."""
mountpoint = "/dev/sdf"
volume = tests_utils.create_volume(
self.context,
admin_metadata={'readonly': 'False'},
multiattach=True,
**self.volume_params)
volume_id = volume['id']
self.volume.create_volume(self.context, volume)
attachment = self.volume.attach_volume(self.context, volume_id, None,
'fake_host', mountpoint, 'rw')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual('in-use', vol['status'])
self.assertTrue(vol['multiattach'])
self.assertEqual(fields.VolumeAttachStatus.ATTACHED,
attachment['attach_status'])
self.assertEqual(mountpoint, attachment['mountpoint'])
self.assertIsNone(attachment['instance_uuid'])
# sanitized, conforms to RFC-952 and RFC-1123 specs.
self.assertEqual('fake-host', attachment['attached_host'])
admin_metadata = vol['volume_admin_metadata']
self.assertEqual(2, len(admin_metadata))
expected = dict(readonly='False', attached_mode='rw')
ret = {}
for item in admin_metadata:
ret.update({item['key']: item['value']})
self.assertDictEqual(expected, ret)
connector = {'initiator': 'iqn.2012-07.org.fake:01'}
conn_info = self.volume.initialize_connection(self.context,
volume, connector)
self.assertEqual('rw', conn_info['data']['access_mode'])
mountpoint2 = "/dev/sdx"
attachment2 = self.volume.attach_volume(self.context, volume_id, None,
'fake_host', mountpoint2,
'rw')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual('in-use', vol['status'])
self.assertEqual('attached', attachment2['attach_status'])
self.assertEqual(mountpoint, attachment2['mountpoint'])
self.assertIsNone(attachment2['instance_uuid'])
self.assertRaises(exception.VolumeAttached,
self.volume.delete_volume,
self.context,
volume)
def test_run_attach_detach_not_multiattach_volume_for_hosts(self):
"""Make sure volume can't be attached to more than one host."""
mountpoint = "/dev/sdf"
volume = tests_utils.create_volume(
self.context,
admin_metadata={'readonly': 'False'},
multiattach=False,
**self.volume_params)
volume_id = volume['id']
self.volume.create_volume(self.context, volume)
attachment = self.volume.attach_volume(self.context, volume_id, None,
'fake_host', mountpoint, 'rw')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual('in-use', vol['status'])
self.assertFalse(vol['multiattach'])
self.assertEqual(fields.VolumeAttachStatus.ATTACHED,
attachment['attach_status'])
self.assertEqual(mountpoint, attachment['mountpoint'])
self.assertIsNone(attachment['instance_uuid'])
# sanitized, conforms to RFC-952 and RFC-1123 specs.
self.assertEqual('fake-host', attachment['attached_host'])
admin_metadata = vol['volume_admin_metadata']
self.assertEqual(2, len(admin_metadata))
expected = dict(readonly='False', attached_mode='rw')
ret = {}
for item in admin_metadata:
ret.update({item['key']: item['value']})
self.assertDictEqual(expected, ret)
connector = {'initiator': 'iqn.2012-07.org.fake:01'}
conn_info = self.volume.initialize_connection(self.context,
volume, connector)
self.assertEqual('rw', conn_info['data']['access_mode'])
mountpoint2 = "/dev/sdx"
self.assertRaises(exception.InvalidVolume,
self.volume.attach_volume,
self.context,
volume_id,
None,
'fake_host2',
mountpoint2,
'rw')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual('in-use', vol['status'])
self.assertEqual(fields.VolumeAttachStatus.ATTACHED,
attachment['attach_status'])
self.assertEqual(mountpoint, attachment['mountpoint'])
self.assertIsNone(attachment['instance_uuid'])
# sanitized, conforms to RFC-952 and RFC-1123 specs.
self.assertEqual('fake-host', attachment['attached_host'])
self.assertRaises(exception.VolumeAttached,
self.volume.delete_volume,
self.context,
volume)
self.volume.detach_volume(self.context, volume_id, attachment['id'])
vol = db.volume_get(self.context, volume_id)
self.assertEqual('available', vol['status'])
self.volume.delete_volume(self.context, volume)
self.assertRaises(exception.VolumeNotFound,
db.volume_get,
self.context,
volume_id)
def test_run_attach_detach_volume_with_attach_mode(self):
instance_uuid = '12345678-1234-5678-1234-567812345678'
mountpoint = "/dev/sdf"
volume = tests_utils.create_volume(self.context,
admin_metadata={'readonly': 'True'},
**self.volume_params)
volume_id = volume['id']
db.volume_update(self.context, volume_id, {'status': 'available', })
self.volume.attach_volume(self.context, volume_id, instance_uuid,
None, mountpoint, 'ro')
vol = db.volume_get(context.get_admin_context(), volume_id)
attachment = vol['volume_attachment'][0]
self.assertEqual('in-use', vol['status'])
self.assertEqual(fields.VolumeAttachStatus.ATTACHED,
vol['attach_status'])
self.assertEqual(mountpoint, attachment['mountpoint'])
self.assertEqual(instance_uuid, attachment['instance_uuid'])
self.assertIsNone(attachment['attached_host'])
admin_metadata = vol['volume_admin_metadata']
self.assertEqual(2, len(admin_metadata))
expected = dict(readonly='True', attached_mode='ro')
ret = {}
for item in admin_metadata:
ret.update({item['key']: item['value']})
self.assertDictEqual(expected, ret)
connector = {'initiator': 'iqn.2012-07.org.fake:01'}
conn_info = self.volume.initialize_connection(self.context,
volume, connector)
self.assertEqual('ro', conn_info['data']['access_mode'])
self.volume.detach_volume(self.context, volume_id, attachment['id'])
vol = db.volume_get(self.context, volume_id)
attachment = vol['volume_attachment']
self.assertEqual('available', vol['status'])
self.assertEqual(fields.VolumeAttachStatus.DETACHED,
vol['attach_status'])
self.assertEqual([], attachment)
admin_metadata = vol['volume_admin_metadata']
self.assertEqual(1, len(admin_metadata))
self.assertEqual('readonly', admin_metadata[0]['key'])
self.assertEqual('True', admin_metadata[0]['value'])
self.volume.attach_volume(self.context, volume_id, None,
'fake_host', mountpoint, 'ro')
vol = db.volume_get(context.get_admin_context(), volume_id)
attachment = vol['volume_attachment'][0]
self.assertEqual('in-use', vol['status'])
self.assertEqual(fields.VolumeAttachStatus.ATTACHED,
vol['attach_status'])
self.assertEqual(mountpoint, attachment['mountpoint'])
self.assertIsNone(attachment['instance_uuid'])
self.assertEqual('fake-host', attachment['attached_host'])
admin_metadata = vol['volume_admin_metadata']
self.assertEqual(2, len(admin_metadata))
expected = dict(readonly='True', attached_mode='ro')
ret = {}
for item in admin_metadata:
ret.update({item['key']: item['value']})
self.assertDictEqual(expected, ret)
connector = {'initiator': 'iqn.2012-07.org.fake:01'}
conn_info = self.volume.initialize_connection(self.context,
volume, connector)
self.assertEqual('ro', conn_info['data']['access_mode'])
self.volume.detach_volume(self.context, volume_id,
attachment['id'])
vol = db.volume_get(self.context, volume_id)
attachment = vol['volume_attachment']
self.assertEqual('available', vol['status'])
self.assertEqual(fields.VolumeAttachStatus.DETACHED,
vol['attach_status'])
self.assertEqual([], attachment)
admin_metadata = vol['volume_admin_metadata']
self.assertEqual(1, len(admin_metadata))
self.assertEqual('readonly', admin_metadata[0]['key'])
self.assertEqual('True', admin_metadata[0]['value'])
self.volume.delete_volume(self.context, volume)
self.assertRaises(exception.VolumeNotFound,
db.volume_get,
self.context,
volume_id)
def test_run_manager_attach_detach_volume_with_wrong_attach_mode(self):
# Not allow using 'read-write' mode attach readonly volume
instance_uuid = '12345678-1234-5678-1234-567812345678'
mountpoint = "/dev/sdf"
volume = tests_utils.create_volume(self.context,
admin_metadata={'readonly': 'True'},
**self.volume_params)
volume_id = volume['id']
self.volume.create_volume(self.context, volume)
self.assertRaises(exception.InvalidVolumeAttachMode,
self.volume.attach_volume,
self.context,
volume_id,
instance_uuid,
None,
mountpoint,
'rw')
# Assert a user message was created
self.volume.message_api.create.assert_called_once_with(
self.context, message_field.Action.ATTACH_VOLUME,
resource_uuid=volume['id'],
exception=mock.ANY)
attachment = objects.VolumeAttachmentList.get_all_by_volume_id(
context.get_admin_context(), volume_id)[0]
self.assertEqual(fields.VolumeAttachStatus.ERROR_ATTACHING,
attachment.attach_status)
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual(fields.VolumeAttachStatus.DETACHED,
vol['attach_status'])
admin_metadata = vol['volume_admin_metadata']
self.assertEqual(2, len(admin_metadata))
expected = dict(readonly='True', attached_mode='rw')
ret = {}
for item in admin_metadata:
ret.update({item['key']: item['value']})
self.assertDictEqual(expected, ret)
db.volume_update(self.context, volume_id, {'status': 'available'})
self.assertRaises(exception.InvalidVolumeAttachMode,
self.volume.attach_volume,
self.context,
volume_id,
None,
'fake_host',
mountpoint,
'rw')
attachment = objects.VolumeAttachmentList.get_all_by_volume_id(
context.get_admin_context(), volume_id)[0]
self.assertEqual(fields.VolumeAttachStatus.ERROR_ATTACHING,
attachment.attach_status)
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual(fields.VolumeAttachStatus.DETACHED,
vol['attach_status'])
admin_metadata = vol['volume_admin_metadata']
self.assertEqual(2, len(admin_metadata))
expected = dict(readonly='True', attached_mode='rw')
ret = {}
for item in admin_metadata:
ret.update({item['key']: item['value']})
self.assertDictEqual(expected, ret)
def test_run_api_attach_detach_volume_with_wrong_attach_mode(self):
# Not allow using 'read-write' mode attach readonly volume
instance_uuid = '12345678-1234-5678-1234-567812345678'
mountpoint = "/dev/sdf"
volume = tests_utils.create_volume(self.context,
admin_metadata={'readonly': 'True'},
**self.volume_params)
volume_id = volume['id']
self.volume.create_volume(self.context, volume)
volume_api = cinder.volume.api.API()
self.assertRaises(exception.InvalidVolumeAttachMode,
volume_api.attach,
self.context,
volume,
instance_uuid,
None,
mountpoint,
'rw')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual(fields.VolumeAttachStatus.DETACHED,
vol['attach_status'])
admin_metadata = vol['volume_admin_metadata']
self.assertEqual(1, len(admin_metadata))
self.assertEqual('readonly', admin_metadata[0]['key'])
self.assertEqual('True', admin_metadata[0]['value'])
db.volume_update(self.context, volume_id, {'status': 'available'})
self.assertRaises(exception.InvalidVolumeAttachMode,
volume_api.attach,
self.context,
volume,
None,
'fake_host',
mountpoint,
'rw')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual(fields.VolumeAttachStatus.DETACHED,
vol['attach_status'])
admin_metadata = vol['volume_admin_metadata']
self.assertEqual(1, len(admin_metadata))
self.assertEqual('readonly', admin_metadata[0]['key'])
self.assertEqual('True', admin_metadata[0]['value'])
def test_detach_volume_while_uploading_to_image_is_in_progress(self):
# If instance is booted from volume with 'Terminate on Delete' flag
# set, and when we delete instance then it tries to delete volume
# even it is in 'uploading' state.
# It is happening because detach call is setting volume status to
# 'available'.
mountpoint = "/dev/sdf"
# Attach volume to the instance
instance_uuid = '12345678-1234-5678-1234-567812345678'
volume = tests_utils.create_volume(self.context,
admin_metadata={'readonly': 'True'},
**self.volume_params)
volume_id = volume['id']
self.volume.create_volume(self.context, volume)
self.volume.attach_volume(self.context, volume_id, instance_uuid,
None, mountpoint, 'ro')
# Change volume status to 'uploading'
db.volume_update(self.context, volume_id, {'status': 'uploading'})
# Call detach api
self.volume.detach_volume(self.context, volume_id)
vol = db.volume_get(self.context, volume_id)
# Check that volume status is 'uploading'
self.assertEqual("uploading", vol['status'])
self.assertEqual(fields.VolumeAttachStatus.DETACHED,
vol['attach_status'])
def test_volume_attach_in_maintenance(self):
"""Test attach the volume in maintenance."""
test_meta1 = {'fake_key1': 'fake_value1', 'fake_key2': 'fake_value2'}
volume = tests_utils.create_volume(self.context, metadata=test_meta1,
**self.volume_params)
volume['status'] = 'maintenance'
self.assertRaises(exception.InvalidVolume,
self.volume_api.attach,
self.context,
volume, None, None, None, None)
def test_volume_detach_in_maintenance(self):
"""Test detach the volume in maintenance."""
test_meta1 = {'fake_key1': 'fake_value1', 'fake_key2': 'fake_value2'}
volume = tests_utils.create_volume(self.context, metadata=test_meta1,
**self.volume_params)
volume['status'] = 'maintenance'
volume_api = cinder.volume.api.API()
self.assertRaises(exception.InvalidVolume,
volume_api.detach,
self.context,
volume, None)
| 49.647898
| 79
| 0.566119
| 6,538
| 66,131
| 5.474763
| 0.055675
| 0.054087
| 0.056993
| 0.037688
| 0.877298
| 0.864447
| 0.854501
| 0.833492
| 0.819299
| 0.808264
| 0
| 0.022599
| 0.334881
| 66,131
| 1,331
| 80
| 49.685199
| 0.791179
| 0.052487
| 0
| 0.813264
| 0
| 0
| 0.1104
| 0.037499
| 0
| 0
| 0
| 0
| 0.201571
| 1
| 0.026178
| false
| 0.00349
| 0.014834
| 0
| 0.045375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
42145aadef6209503d3b265d7f9577475c6f6100
| 9,093
|
py
|
Python
|
Packs/DeHashed/Integrations/DeHashed/DeHashed_test.py
|
yyyogev/content
|
a4692b55873f900c4803f0cc020c1b1ad3e9e74c
|
[
"MIT"
] | 1
|
2020-07-22T05:55:11.000Z
|
2020-07-22T05:55:11.000Z
|
Packs/DeHashed/Integrations/DeHashed/DeHashed_test.py
|
nicoloereni/content
|
ddb88044c5b39a17894dd13e7ae260d9854afc30
|
[
"MIT"
] | null | null | null |
Packs/DeHashed/Integrations/DeHashed/DeHashed_test.py
|
nicoloereni/content
|
ddb88044c5b39a17894dd13e7ae260d9854afc30
|
[
"MIT"
] | 1
|
2020-07-22T23:24:05.000Z
|
2020-07-22T23:24:05.000Z
|
import json
import urllib
DEHASHED_URL = "https://url.com/" # disable-secrets-detection
INTEGRATION_CONTEXT_BRAND = "DeHashed"
def load_test_data(json_path):
with open(json_path) as f:
return json.load(f)
def test_module_command(requests_mock):
"""
Given:
- Performs a basic GET request to check if the API is reachable and authentication is successful.
When
- Setting a new instance of the integration.
Then
- returns "ok".
"""
from DeHashed import Client, test_module
test_data = load_test_data("test_data/search.json")
url_params = {"query": 'vin:"test" "test1"'}
encoded = urllib.parse.urlencode(url_params)
requests_mock.get(f"{DEHASHED_URL}search?{encoded}", json=test_data["api_response"])
client = Client(base_url=f"{DEHASHED_URL}")
client._headers = {}
res = test_module(client)
assert res == "ok"
def test_search_command_using_is_operator_without_filter(requests_mock):
"""
Given:
- "Is" operator, value to search, and not using any filters.
When
- Searching an object that matches the specified value.
Then
- returns Demisto outputs.
"""
from DeHashed import Client, dehashed_search_command
test_data = load_test_data("test_data/search.json")
expected_result = {
"DeHashed.Search(val.Id==obj.Id)": test_data["expected_results"][
"full_results"
],
"DeHashed.LastQuery(true)": {
"ResultsFrom": 1,
"ResultsTo": 2,
"DisplayedResults": 2,
"TotalResults": 2,
"PageNumber": 1
},
}
url_params = {"query": '"testgamil.co"'}
encoded = urllib.parse.urlencode(url_params)
requests_mock.get(f"{DEHASHED_URL}search?{encoded}", json=test_data["api_response"])
client = Client(base_url=f"{DEHASHED_URL}")
client._headers = {}
markdown, context, raw = dehashed_search_command(client, test_data["is_op_single"])
assert expected_result == context
def test_search_command_using_contains_operator_without_filter(requests_mock):
"""
Given:
- "Contains" operator, value to search.
When
- Searching an object that contains the specified value.
Then
- returns Demisto outputs.
"""
from DeHashed import Client, dehashed_search_command
test_data = load_test_data("test_data/search.json")
expected_result = {
"DeHashed.Search(val.Id==obj.Id)": test_data["expected_results"][
"full_results"
],
"DeHashed.LastQuery(true)": {
"ResultsFrom": 1,
"ResultsTo": 2,
"DisplayedResults": 2,
"TotalResults": 2,
"PageNumber": 1
},
}
url_params = {"query": "testgamil.co"}
encoded = urllib.parse.urlencode(url_params)
requests_mock.get(f"{DEHASHED_URL}search?{encoded}", json=test_data["api_response"])
client = Client(base_url=f"{DEHASHED_URL}")
client._headers = {}
markdown, context, raw = dehashed_search_command(
client, test_data["contains_op_single"]
)
assert expected_result == context
def test_search_command_using_regex_operator_without_filter(requests_mock):
"""
Given:
- "Regex" operator, value to search.
When
- Searching an object that contains the specified value.
Then
- returns Demisto outputs.
"""
from DeHashed import Client, dehashed_search_command
test_data = load_test_data("test_data/search.json")
expected_result = {
"DeHashed.Search(val.Id==obj.Id)": test_data["expected_results"][
"full_results"
],
"DeHashed.LastQuery(true)": {
"ResultsFrom": 1,
"ResultsTo": 2,
"DisplayedResults": 2,
"TotalResults": 2,
"PageNumber": 1
},
}
url_params = {"query": "/joh?n(ath[oa]n)/"}
encoded = urllib.parse.urlencode(url_params)
requests_mock.get(f"{DEHASHED_URL}search?{encoded}", json=test_data["api_response"])
client = Client(base_url=f"{DEHASHED_URL}")
client._headers = {}
markdown, context, raw = dehashed_search_command(
client, test_data["regex_op_single"]
)
assert expected_result == context
def test_search_command_using_is_operator_with_filter_and_multi_values(requests_mock):
"""
Given:
- "Is" operator, value to search and "email" as a filter.
When
- Searching an object that matches the specified value.
Then
- returns Demisto outputs.
"""
from DeHashed import Client, dehashed_search_command
test_data = load_test_data("test_data/search.json")
expected_result = {
"DeHashed.Search(val.Id==obj.Id)": test_data["expected_results"][
"full_results"
],
"DeHashed.LastQuery(true)": {
"ResultsFrom": 1,
"ResultsTo": 2,
"DisplayedResults": 2,
"TotalResults": 2,
"PageNumber": 1
},
}
url_params = {"query": 'email:"testgamil.co" "test1gmail.com"'}
encoded = urllib.parse.urlencode(url_params)
requests_mock.get(f"{DEHASHED_URL}search?{encoded}", json=test_data["api_response"])
client = Client(base_url=f"{DEHASHED_URL}")
client._headers = {}
markdown, context, raw = dehashed_search_command(client, test_data["is_op_multi"])
assert expected_result == context
def test_search_command_using_contains_operator_with_filter_and_multi_values(
requests_mock,
):
"""
Given:
- "Contains" operator, value to search and "name" as a filter.
When
- Searching an object that contains the specified value.
Then
- returns Demisto outputs.
"""
from DeHashed import Client, dehashed_search_command
test_data = load_test_data("test_data/search.json")
expected_result = {
"DeHashed.Search(val.Id==obj.Id)": test_data["expected_results"][
"full_results"
],
"DeHashed.LastQuery(true)": {
"ResultsFrom": 1,
"ResultsTo": 2,
"DisplayedResults": 2,
"TotalResults": 2,
"PageNumber": 1
},
}
url_params = {"query": "name:(test1 OR test2)"}
encoded = urllib.parse.urlencode(url_params)
requests_mock.get(f"{DEHASHED_URL}search?{encoded}", json=test_data["api_response"])
client = Client(base_url=f"{DEHASHED_URL}")
client._headers = {}
markdown, context, raw = dehashed_search_command(
client, test_data["contains_op_multi"]
)
assert expected_result == context
def test_search_command_using_regex_operator_with_filter_and_multi_values(
requests_mock,
):
"""
Given:
- "Regex" operator, value to search and "vin" as a filter.
When
- Searching an object that contains the specified value.
Then
- returns Demisto outputs.
"""
from DeHashed import Client, dehashed_search_command
test_data = load_test_data("test_data/search.json")
expected_result = {
"DeHashed.Search(val.Id==obj.Id)": test_data["expected_results"][
"full_results"
],
"DeHashed.LastQuery(true)": {
"ResultsFrom": 1,
"ResultsTo": 2,
"DisplayedResults": 2,
"TotalResults": 2,
"PageNumber": 1
},
}
url_params = {"query": "vin:/joh?n(ath[oa]n)/ /joh?n11(ath[oa]n)/"}
encoded = urllib.parse.urlencode(url_params)
requests_mock.get(f"{DEHASHED_URL}search?{encoded}", json=test_data["api_response"])
client = Client(base_url=f"{DEHASHED_URL}")
client._headers = {}
markdown, context, raw = dehashed_search_command(
client, test_data["regex_op_multi"]
)
assert expected_result == context
def test_search_command_using_regex_operator_with_filter_and_change_result_range(
requests_mock,
):
"""
Given:
- "Regex" operator, value to search, "vin" as a filter and a range of results amount to return.
When
- Searching an object that contains the specified value.
Then
- returns Demisto outputs.
"""
from DeHashed import Client, dehashed_search_command
test_data = load_test_data("test_data/search.json")
expected_result = {
"DeHashed.Search(val.Id==obj.Id)": test_data["expected_results_range"][
"full_results"
],
"DeHashed.LastQuery(true)": {
"ResultsFrom": 1,
"ResultsTo": 1,
"DisplayedResults": 1,
"TotalResults": 2,
"PageNumber": 1
},
}
url_params = {"query": "vin:/joh?n(ath[oa]n)/ /joh?n11(ath[oa]n)/"}
encoded = urllib.parse.urlencode(url_params)
requests_mock.get(f"{DEHASHED_URL}search?{encoded}", json=test_data["api_response"])
client = Client(base_url=f"{DEHASHED_URL}")
client._headers = {}
markdown, context, raw = dehashed_search_command(
client, test_data["regex_op_multi_range"]
)
assert expected_result == context
| 30.719595
| 105
| 0.634004
| 1,049
| 9,093
| 5.237369
| 0.115348
| 0.068438
| 0.034947
| 0.034947
| 0.890062
| 0.888242
| 0.881143
| 0.874408
| 0.836731
| 0.795777
| 0
| 0.006286
| 0.247663
| 9,093
| 295
| 106
| 30.823729
| 0.796813
| 0.155834
| 0
| 0.716578
| 0
| 0.010695
| 0.268154
| 0.115889
| 0
| 0
| 0
| 0
| 0.042781
| 1
| 0.048128
| false
| 0
| 0.053476
| 0
| 0.106952
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
421b068045048162f2a91f87c80e3dfa2ee98eed
| 6,202
|
py
|
Python
|
env/lib/python3.9/site-packages/sklearn_genetic/utils/cv_scores.py
|
wphoong/flappy_doge
|
c778f0e4820c1ed46e50a56f989d57df4f386736
|
[
"MIT"
] | null | null | null |
env/lib/python3.9/site-packages/sklearn_genetic/utils/cv_scores.py
|
wphoong/flappy_doge
|
c778f0e4820c1ed46e50a56f989d57df4f386736
|
[
"MIT"
] | null | null | null |
env/lib/python3.9/site-packages/sklearn_genetic/utils/cv_scores.py
|
wphoong/flappy_doge
|
c778f0e4820c1ed46e50a56f989d57df4f386736
|
[
"MIT"
] | null | null | null |
import numpy as np
from scipy.stats import rankdata
def select_dict_keys(dictionary, keys):
return {key: dictionary[key] for key in keys}
def create_gasearch_cv_results_(logbook, space, return_train_score, metrics):
cv_results = {}
n_splits = len(logbook.chapters["parameters"].select("cv_scores")[0])
for parameter in space.parameters:
cv_results[f"param_{parameter}"] = logbook.chapters["parameters"].select(
parameter
)
# Keys that are extended per metric in multi-metric
for metric in metrics:
for split in range(n_splits):
cv_results[f"split{split}_test_{metric}"] = [
cv_scores[split]
for cv_scores in logbook.chapters["parameters"].select(f"test_{metric}")
]
cv_results[f"mean_test_{metric}"] = [
np.nanmean(cv_scores)
for cv_scores in logbook.chapters["parameters"].select(f"test_{metric}")
]
cv_results[f"std_test_{metric}"] = [
np.nanstd(cv_scores)
for cv_scores in logbook.chapters["parameters"].select(f"test_{metric}")
]
cv_results[f"rank_test_{metric}"] = rankdata(
-np.array(cv_results[f"mean_test_{metric}"]), method="min"
).astype(int)
if return_train_score:
for split in range(n_splits):
cv_results[f"split{split}_train_{metric}"] = [
cv_scores[split]
for cv_scores in logbook.chapters["parameters"].select(
f"train_{metric}"
)
]
cv_results[f"mean_train_{metric}"] = [
np.nanmean(cv_scores)
for cv_scores in logbook.chapters["parameters"].select(
f"train_{metric}"
)
]
cv_results[f"std_train_{metric}"] = [
np.nanstd(cv_scores)
for cv_scores in logbook.chapters["parameters"].select(
f"train_{metric}"
)
]
cv_results[f"rank_train_{metric}"] = rankdata(
-np.array(cv_results[f"mean_train_{metric}"]), method="min"
).astype(int)
# These values are only one even with multi-metric
cv_results["mean_fit_time"] = [
np.nanmean(fit_time)
for fit_time in logbook.chapters["parameters"].select("fit_time")
]
cv_results["std_fit_time"] = [
np.nanstd(fit_time)
for fit_time in logbook.chapters["parameters"].select("fit_time")
]
cv_results["mean_score_time"] = [
np.nanmean(score_time)
for score_time in logbook.chapters["parameters"].select("score_time")
]
cv_results["std_score_time"] = [
np.nanstd(score_time)
for score_time in logbook.chapters["parameters"].select("score_time")
]
cv_results["params"] = [
select_dict_keys(individual, space.parameters)
for individual in logbook.chapters["parameters"]
]
return cv_results
def create_feature_selection_cv_results_(logbook, return_train_score, metrics):
cv_results = {}
n_splits = len(logbook.chapters["parameters"].select("cv_scores")[0])
# Keys that are extended per metric in multi-metric
for metric in metrics:
for split in range(n_splits):
cv_results[f"split{split}_test_{metric}"] = [
cv_scores[split]
for cv_scores in logbook.chapters["parameters"].select(f"test_{metric}")
]
cv_results[f"mean_test_{metric}"] = [
np.nanmean(cv_scores)
for cv_scores in logbook.chapters["parameters"].select(f"test_{metric}")
]
cv_results[f"std_test_{metric}"] = [
np.nanstd(cv_scores)
for cv_scores in logbook.chapters["parameters"].select(f"test_{metric}")
]
cv_results[f"rank_test_{metric}"] = rankdata(
-np.array(cv_results[f"mean_test_{metric}"]), method="min"
).astype(int)
if return_train_score:
for split in range(n_splits):
cv_results[f"split{split}_train_{metric}"] = [
cv_scores[split]
for cv_scores in logbook.chapters["parameters"].select(
f"train_{metric}"
)
]
cv_results[f"mean_train_{metric}"] = [
np.nanmean(cv_scores)
for cv_scores in logbook.chapters["parameters"].select(
f"train_{metric}"
)
]
cv_results[f"std_train_{metric}"] = [
np.nanstd(cv_scores)
for cv_scores in logbook.chapters["parameters"].select(
f"train_{metric}"
)
]
cv_results[f"rank_train_{metric}"] = rankdata(
-np.array(cv_results[f"mean_train_{metric}"]), method="min"
).astype(int)
# These values are only one even with multi-metric
cv_results["mean_fit_time"] = [
np.nanmean(fit_time)
for fit_time in logbook.chapters["parameters"].select("fit_time")
]
cv_results["std_fit_time"] = [
np.nanstd(fit_time)
for fit_time in logbook.chapters["parameters"].select("fit_time")
]
cv_results["mean_score_time"] = [
np.nanmean(score_time)
for score_time in logbook.chapters["parameters"].select("score_time")
]
cv_results["std_score_time"] = [
np.nanstd(score_time)
for score_time in logbook.chapters["parameters"].select("score_time")
]
cv_results["n_features"] = [
np.sum(features)
for features in logbook.chapters["parameters"].select("features")
]
cv_results["rank_n_features"] = rankdata(
np.array(cv_results["n_features"]), method="min"
).astype(int)
cv_results["features"] = logbook.chapters["parameters"].select("features")
return cv_results
| 34.455556
| 89
| 0.563367
| 700
| 6,202
| 4.722857
| 0.1
| 0.108893
| 0.196612
| 0.234422
| 0.850575
| 0.819117
| 0.819117
| 0.819117
| 0.819117
| 0.819117
| 0
| 0.000474
| 0.319574
| 6,202
| 179
| 90
| 34.648045
| 0.782938
| 0.031764
| 0
| 0.664286
| 0
| 0
| 0.191548
| 0.01821
| 0
| 0
| 0
| 0
| 0
| 1
| 0.021429
| false
| 0
| 0.014286
| 0.007143
| 0.057143
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
423da2bac92c2ab7035ad6c8d2716829a9fea096
| 6,511
|
py
|
Python
|
tests/test_targeted.py
|
rkeulemans/pymdown-extensions
|
e809f20f672cc350e2cb2c6c2d86e8438f329765
|
[
"MIT"
] | null | null | null |
tests/test_targeted.py
|
rkeulemans/pymdown-extensions
|
e809f20f672cc350e2cb2c6c2d86e8438f329765
|
[
"MIT"
] | 1
|
2021-02-07T05:27:54.000Z
|
2021-02-07T05:27:54.000Z
|
tests/test_targeted.py
|
rkeulemans/pymdown-extensions
|
e809f20f672cc350e2cb2c6c2d86e8438f329765
|
[
"MIT"
] | null | null | null |
"""Test `uniprops`."""
from pymdownx import util
import unittest
import pytest
import markdown
class TestUrlParse(unittest.TestCase):
"""Test UrlParse."""
def test_url(self):
"""Test URL."""
url = 'http://www.google.com'
scheme, netloc, path, params, query, fragment, is_url, is_absolute = util.parse_url(url)
self.assertEqual(scheme, 'http')
self.assertEqual(netloc, 'www.google.com')
self.assertEqual(is_url, True)
self.assertEqual(is_absolute, False)
def test_fragment(self):
"""Test fragment."""
url = '#header'
scheme, netloc, path, params, query, fragment, is_url, is_absolute = util.parse_url(url)
self.assertEqual(scheme, '')
self.assertEqual(netloc, '')
self.assertEqual(fragment, 'header')
self.assertEqual(is_url, True)
self.assertEqual(is_absolute, False)
def test_file_windows(self):
"""Test file windows."""
url = 'file://c:/path'
scheme, netloc, path, params, query, fragment, is_url, is_absolute = util.parse_url(url)
self.assertEqual(scheme, 'file')
self.assertEqual(path, '/c:/path')
self.assertEqual(is_url, False)
self.assertEqual(is_absolute, True)
def test_file_windows_backslash(self):
"""Test file windows with backslash."""
url = r'file://c:\path'
scheme, netloc, path, params, query, fragment, is_url, is_absolute = util.parse_url(url)
self.assertEqual(scheme, 'file')
self.assertEqual(path, '/c:/path')
self.assertEqual(is_url, False)
self.assertEqual(is_absolute, True)
def test_file_windows_start_backslash(self):
"""Test file windows start with backslash."""
url = r'file://\c:\path'
scheme, netloc, path, params, query, fragment, is_url, is_absolute = util.parse_url(url)
self.assertEqual(scheme, 'file')
self.assertEqual(path, '/c:/path')
self.assertEqual(is_url, False)
self.assertEqual(is_absolute, True)
def test_file_windows_netpath(self):
"""Test file windows netpath."""
url = 'file://\\\\path'
scheme, netloc, path, params, query, fragment, is_url, is_absolute = util.parse_url(url)
self.assertEqual(scheme, 'file')
self.assertEqual(path, '//path')
self.assertEqual(is_url, False)
self.assertEqual(is_absolute, True)
def test_nix_path(self):
"""Test file Linux/Unix path."""
url = 'file:///path'
scheme, netloc, path, params, query, fragment, is_url, is_absolute = util.parse_url(url)
self.assertEqual(scheme, 'file')
self.assertEqual(path, '/path')
self.assertEqual(is_url, False)
self.assertEqual(is_absolute, True)
def test_windows_path_forward_slash(self):
"""Test windows path."""
url = 'c:/path'
scheme, netloc, path, params, query, fragment, is_url, is_absolute = util.parse_url(url)
self.assertEqual(scheme, 'file')
self.assertEqual(path, '/c:/path')
self.assertEqual(is_url, False)
self.assertEqual(is_absolute, True)
def test_windows_path_backslash(self):
"""Test file windows path with backslash."""
url = r'c:\path'
scheme, netloc, path, params, query, fragment, is_url, is_absolute = util.parse_url(url)
self.assertEqual(scheme, 'file')
self.assertEqual(path, '/c:/path')
self.assertEqual(is_url, False)
self.assertEqual(is_absolute, True)
def test_windows_netpath_forward_slash(self):
"""Test netpath with forward slash."""
url = '//file/path'
scheme, netloc, path, params, query, fragment, is_url, is_absolute = util.parse_url(url)
self.assertEqual(scheme, 'file')
self.assertEqual(path, '//file/path')
self.assertEqual(is_url, False)
self.assertEqual(is_absolute, True)
def test_windows_netpath_backslash(self):
"""Test windows netpath with backslash."""
url = '\\\\file\\path'
scheme, netloc, path, params, query, fragment, is_url, is_absolute = util.parse_url(url)
self.assertEqual(scheme, '')
self.assertEqual(path, '\\\\file\\path')
self.assertEqual(is_url, False)
self.assertEqual(is_absolute, True)
def test_relative_path(self):
"""Test relative path."""
url = '../file/path'
scheme, netloc, path, params, query, fragment, is_url, is_absolute = util.parse_url(url)
self.assertEqual(scheme, '')
self.assertEqual(path, '../file/path')
self.assertEqual(is_url, False)
self.assertEqual(is_absolute, False)
def test_windows_relative_path(self):
"""Test windows relative with backslash."""
url = '..\\file\\path'
scheme, netloc, path, params, query, fragment, is_url, is_absolute = util.parse_url(url)
self.assertEqual(scheme, '')
self.assertEqual(path, '..\\file\\path')
self.assertEqual(is_url, False)
self.assertEqual(is_absolute, False)
class TestSnippets(unittest.TestCase):
"""Targeted tests for Snippets."""
def test_bad_file_checked(self):
"""Test bad file when the check is enabled."""
with self.assertRaises(IOError):
markdown.Markdown(
extensions=['pymdownx.snippets'],
extension_configs={'pymdownx.snippets': {'check_paths': True}}
).convert('--8<--- "bad.file"')
def test_good_file_checked(self):
"""Test good file when the check is enabled."""
expected = "<p>Snippet</p>"
rendered = markdown.Markdown(
extensions=['pymdownx.snippets'],
extension_configs={'pymdownx.snippets': {
'check_paths': True,
'base_path': 'tests/extensions/_snippets'
}}
).convert('--8<--- "d.txt"')
self.assertEqual(expected, rendered)
def test_bad_file_unchecked(self):
"""Test bad file when the check is disabled."""
expected = ""
rendered = markdown.Markdown(
extensions=['pymdownx.snippets'],
extension_configs={'pymdownx.snippets': {'check_paths': False}}
).convert('--8<--- "bad.file"')
self.assertEqual(expected, rendered)
def run():
"""Run pytest."""
pytest.main(
[
'tests/test_targeted.py',
'-p', 'no:pytest_cov'
]
)
| 34.818182
| 96
| 0.615574
| 754
| 6,511
| 5.157825
| 0.107427
| 0.212137
| 0.113654
| 0.073541
| 0.771149
| 0.732065
| 0.723836
| 0.722037
| 0.707123
| 0.707123
| 0
| 0.000611
| 0.245738
| 6,511
| 186
| 97
| 35.005376
| 0.791285
| 0.08524
| 0
| 0.507813
| 0
| 0
| 0.101281
| 0.008198
| 0
| 0
| 0
| 0
| 0.4375
| 1
| 0.132813
| false
| 0
| 0.03125
| 0
| 0.179688
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
424acb02ac1fde01ff9b5a4795a1c5f3f2126a9f
| 14,434
|
py
|
Python
|
tools/build/test/toolset-mock/src/clang-linux-3.9.0.py
|
anarthal/boost-unix-mirror
|
8c34eb2fe471d6c3113c680c1fbef29e7a8063a0
|
[
"BSL-1.0"
] | 1
|
2021-08-15T13:07:07.000Z
|
2021-08-15T13:07:07.000Z
|
tools/build/test/toolset-mock/src/clang-linux-3.9.0.py
|
anarthal/boost-unix-mirror
|
8c34eb2fe471d6c3113c680c1fbef29e7a8063a0
|
[
"BSL-1.0"
] | null | null | null |
tools/build/test/toolset-mock/src/clang-linux-3.9.0.py
|
anarthal/boost-unix-mirror
|
8c34eb2fe471d6c3113c680c1fbef29e7a8063a0
|
[
"BSL-1.0"
] | 1
|
2021-08-24T08:55:27.000Z
|
2021-08-24T08:55:27.000Z
|
#!/usr/bin/python
# coding: utf-8
#
# Copyright 2017 Steven Watanabe
# Copyright 2020 René Ferdinand Rivera Morell
#
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
from MockProgram import *
command('clang++', '-print-prog-name=ar', stdout=script('ar.py'))
command('clang++', '-print-prog-name=ranlib', stdout=script('ranlib.py'))
# target-os=linux ..
if allow_properties('target-os=linux', 'variant=debug', 'link=shared', 'threading=single', 'runtime-link=shared'):
command('clang++', unordered(ordered('-x', 'c++'), '-O0', '-fno-inline', '-Wall', '-g', '-fPIC', '-c'), '-o', output_file('bin/clang-linux-3.9.0/debug/lib.o'), input_file(source='lib.cpp'))
command('clang++', '-o', output_file('bin/clang-linux-3.9.0/debug/libl1.so'), '-Wl,-soname', '-Wl,libl1.so', '-shared', '-Wl,--start-group', input_file('bin/clang-linux-3.9.0/debug/lib.o'), '-Wl,-Bstatic', '-Wl,-Bdynamic', '-Wl,--end-group', unordered('-g', '-fPIC'))
command('clang++', unordered(ordered('-x', 'c++'), '-O0', '-fno-inline', '-Wall', '-g', '-fPIC', '-c'), '-o', output_file('bin/clang-linux-3.9.0/debug/main.o'), input_file(source='main.cpp'))
command('clang++', '-o', output_file('bin/clang-linux-3.9.0/debug/test'), '-Wl,-R', arg('-Wl,', target_path('bin/clang-linux-3.9.0/debug/libl1.so')), '-Wl,-rpath-link', arg('-Wl,', target_path('bin/clang-linux-3.9.0/debug/libl1.so')), '-Wl,--start-group', input_file('bin/clang-linux-3.9.0/debug/main.o'), input_file('bin/clang-linux-3.9.0/debug/libl1.so'), '-Wl,-Bstatic', '-Wl,-Bdynamic', '-Wl,--end-group', unordered('-g', '-fPIC'))
if allow_properties('target-os=linux', 'variant=release', 'link=shared', 'threading=single', 'runtime-link=shared', 'strip=on'):
command('clang++', unordered(ordered('-x', 'c++'), '-O3', '-Wno-inline', '-Wall', '-fPIC', '-DNDEBUG', '-c'), '-o', output_file('bin/clang-linux-3.9.0/release/lib.o'), input_file(source='lib.cpp'))
command('clang++', '-o', output_file('bin/clang-linux-3.9.0/release/libl1.so'), '-Wl,-soname', '-Wl,libl1.so', '-shared', '-Wl,--start-group', input_file('bin/clang-linux-3.9.0/release/lib.o'), '-Wl,-Bstatic', '-Wl,-Bdynamic', '-Wl,--end-group', unordered('-fPIC', '-Wl,--strip-all'))
command('clang++', unordered(ordered('-x', 'c++'), '-O3', '-Wno-inline', '-Wall', '-fPIC', '-DNDEBUG', '-c'), '-o', output_file('bin/clang-linux-3.9.0/release/main.o'), input_file(source='main.cpp'))
command('clang++', '-o', output_file('bin/clang-linux-3.9.0/release/test'), '-Wl,-R', arg('-Wl,', target_path('bin/clang-linux-3.9.0/release/libl1.so')), '-Wl,-rpath-link', arg('-Wl,', target_path('bin/clang-linux-3.9.0/release/libl1.so')), '-Wl,--start-group', input_file('bin/clang-linux-3.9.0/release/main.o'), input_file('bin/clang-linux-3.9.0/release/libl1.so'), '-Wl,-Bstatic', '-Wl,-Bdynamic', '-Wl,--end-group', unordered('-fPIC', '-Wl,--strip-all'))
if allow_properties('target-os=linux', 'variant=debug', 'link=shared', 'threading=multi', 'runtime-link=shared'):
command('clang++', unordered(ordered('-x', 'c++'), '-O0', '-fno-inline', '-Wall', '-g', '-pthread', '-fPIC', '-c'), '-o', output_file('bin/clang-linux-3.9.0/debug/threading-multi/lib.o'), input_file(source='lib.cpp'))
command('clang++', '-o', output_file('bin/clang-linux-3.9.0/debug/threading-multi/libl1.so'), '-Wl,-soname', '-Wl,libl1.so', '-shared', '-Wl,--start-group', input_file('bin/clang-linux-3.9.0/debug/threading-multi/lib.o'), '-Wl,-Bstatic', '-Wl,-Bdynamic', '-lrt', '-Wl,--end-group', unordered('-g', '-pthread', '-fPIC'))
command('clang++', unordered(ordered('-x', 'c++'), '-O0', '-fno-inline', '-Wall', '-g', '-pthread', '-fPIC', '-c'), '-o', output_file('bin/clang-linux-3.9.0/debug/threading-multi/main.o'), input_file(source='main.cpp'))
command('clang++', '-o', output_file('bin/clang-linux-3.9.0/debug/threading-multi/test'), '-Wl,-R', arg('-Wl,', target_path('bin/clang-linux-3.9.0/debug/threading-multi/libl1.so')), '-Wl,-rpath-link', arg('-Wl,', target_path('bin/clang-linux-3.9.0/debug/threading-multi/libl1.so')), '-Wl,--start-group', input_file('bin/clang-linux-3.9.0/debug/threading-multi/main.o'), input_file('bin/clang-linux-3.9.0/debug/threading-multi/libl1.so'), '-Wl,-Bstatic', '-Wl,-Bdynamic', '-lrt', '-Wl,--end-group', unordered('-g', '-pthread', '-fPIC'))
if allow_properties('target-os=linux', 'variant=debug', 'link=static', 'threading=single', 'runtime-link=shared'):
command('clang++', unordered(ordered('-x', 'c++'), '-O0', '-fno-inline', '-Wall', '-g', '-c'), '-o', output_file('bin/clang-linux-3.9.0/debug/link-static/lib.o'), input_file(source='lib.cpp'))
command('clang++', unordered(ordered('-x', 'c++'), '-O0', '-fno-inline', '-Wall', '-g', '-c'), '-o', output_file('bin/clang-linux-3.9.0/debug/link-static/main.o'), input_file(source='main.cpp'))
command('clang++', '-o', output_file('bin/clang-linux-3.9.0/debug/link-static/test'), '-Wl,--start-group', input_file('bin/clang-linux-3.9.0/debug/link-static/main.o'), input_file('bin/clang-linux-3.9.0/debug/link-static/libl1.a'), '-Wl,-Bstatic', '-Wl,-Bdynamic', '-Wl,--end-group', '-g')
if allow_properties('target-os=linux', 'variant=debug', 'link=static', 'threading=single', 'runtime-link=static'):
command('clang++', unordered(ordered('-x', 'c++'), '-O0', '-fno-inline', '-Wall', '-g', '-c'), '-o', output_file('bin/clang-linux-3.9.0/debug/link-static/runtime-link-static/lib.o'), input_file(source='lib.cpp'))
command('clang++', unordered(ordered('-x', 'c++'), '-O0', '-fno-inline', '-Wall', '-g', '-c'), '-o', output_file('bin/clang-linux-3.9.0/debug/link-static/runtime-link-static/main.o'), input_file(source='main.cpp'))
command('clang++', '-o', output_file('bin/clang-linux-3.9.0/debug/link-static/runtime-link-static/test'), '-Wl,--start-group', input_file('bin/clang-linux-3.9.0/debug/link-static/runtime-link-static/main.o'), input_file('bin/clang-linux-3.9.0/debug/link-static/runtime-link-static/libl1.a'), '-Wl,--end-group', unordered('-g', '-static'))
if allow_properties('target-os=linux', 'variant=debug', 'link=shared', 'threading=single', 'runtime-link=shared', 'architecture=x86', 'address-model=32'):
command('clang++', unordered(ordered('-x', 'c++'), '-O0', '-fno-inline', '-Wall', '-g', '-march=i686', '-m32', '-fPIC', '-c'), '-o', output_file('bin/clang-linux-3.9.0/debug/lib.o'), input_file(source='lib.cpp'))
command('clang++', '-o', output_file('bin/clang-linux-3.9.0/debug/libl1.so'), '-Wl,-soname', '-Wl,libl1.so', '-shared', '-Wl,--start-group', input_file('bin/clang-linux-3.9.0/debug/lib.o'), '-Wl,-Bstatic', '-Wl,-Bdynamic', '-Wl,--end-group', unordered('-g', '-march=i686', '-fPIC', '-m32'))
command('clang++', unordered(ordered('-x', 'c++'), '-O0', '-fno-inline', '-Wall', '-g', '-march=i686', '-m32', '-fPIC', '-c'), '-o', output_file('bin/clang-linux-3.9.0/debug/main.o'), input_file(source='main.cpp'))
command('clang++', '-o', output_file('bin/clang-linux-3.9.0/debug/test'), '-Wl,-R', arg('-Wl,', target_path('bin/clang-linux-3.9.0/debug/libl1.so')), '-Wl,-rpath-link', arg('-Wl,', target_path('bin/clang-linux-3.9.0/debug/libl1.so')), '-Wl,--start-group', input_file('bin/clang-linux-3.9.0/debug/main.o'), input_file('bin/clang-linux-3.9.0/debug/libl1.so'), '-Wl,-Bstatic', '-Wl,-Bdynamic', '-Wl,--end-group', unordered('-g', '-march=i686', '-fPIC', '-m32'))
# target-os=windows ..
if allow_properties('target-os=windows', 'variant=debug', 'link=shared', 'threading=single', 'runtime-link=shared'):
command('clang++', unordered(ordered('-x', 'c++'), '-O0', '-fno-inline', '-Wall', '-g', '-c'), '-o', output_file('bin/clang-linux-3.9.0/debug/target-os-windows/lib.obj'), input_file(source='lib.cpp'))
command('clang++', '-o', output_file('bin/clang-linux-3.9.0/debug/target-os-windows/l1.dll'), '-shared', '-Wl,--start-group', input_file('bin/clang-linux-3.9.0/debug/target-os-windows/lib.obj'), '-Wl,-Bstatic', '-Wl,-Bdynamic', '-Wl,--end-group', unordered('-g'))
command('clang++', unordered(ordered('-x', 'c++'), '-O0', '-fno-inline', '-Wall', '-g', '-c'), '-o', output_file('bin/clang-linux-3.9.0/debug/target-os-windows/main.obj'), input_file(source='main.cpp'))
command('clang++', '-o', output_file('bin/clang-linux-3.9.0/debug/target-os-windows/test.exe'), '-Wl,--start-group', input_file('bin/clang-linux-3.9.0/debug/target-os-windows/main.obj'), input_file('bin/clang-linux-3.9.0/debug/target-os-windows/l1.dll'), '-Wl,-Bstatic', '-Wl,-Bdynamic', '-Wl,--end-group', unordered('-g'))
if allow_properties('target-os=windows', 'variant=release', 'link=shared', 'threading=single', 'runtime-link=shared', 'strip=on'):
command('clang++', unordered(ordered('-x', 'c++'), '-O3', '-Wno-inline', '-Wall', '-DNDEBUG', '-c'), '-o', output_file('bin/clang-linux-3.9.0/release/target-os-windows/lib.obj'), input_file(source='lib.cpp'))
command('clang++', '-o', output_file('bin/clang-linux-3.9.0/release/strip-on/target-os-windows/l1.dll'), '-shared', '-Wl,--start-group', input_file('bin/clang-linux-3.9.0/release/target-os-windows/lib.obj'), '-Wl,-Bstatic', '-Wl,-Bdynamic', '-Wl,--end-group', unordered('-Wl,--strip-all'))
command('clang++', unordered(ordered('-x', 'c++'), '-O3', '-Wno-inline', '-Wall', '-DNDEBUG', '-c'), '-o', output_file('bin/clang-linux-3.9.0/release/strip-on/target-os-windows/main.obj'), input_file(source='main.cpp'))
command('clang++', '-o', output_file('bin/clang-linux-3.9.0/release/test'), '-Wl,--start-group', input_file('bin/clang-linux-3.9.0/release/strip-on/target-os-windows/main.obj'), input_file('bin/clang-linux-3.9.0/release/strip-on/target-os-windows/l1.dll'), '-Wl,-Bstatic', '-Wl,-Bdynamic', '-Wl,--end-group', unordered('-Wl,--strip-all'))
if allow_properties('target-os=windows', 'variant=debug', 'link=shared', 'threading=multi', 'runtime-link=shared'):
command('clang++', unordered(ordered('-x', 'c++'), '-O0', '-fno-inline', '-Wall', '-g', '-pthread', '-c'), '-o', output_file('bin/clang-linux-3.9.0/debug/target-os-windows/threading-multi/lib.obj'), input_file(source='lib.cpp'))
command('clang++', '-o', output_file('bin/clang-linux-3.9.0/debug/target-os-windows/threading-multi/l1.dll'), '-shared', '-Wl,--start-group', input_file('bin/clang-linux-3.9.0/debug/target-os-windows/threading-multi/lib.obj'), '-Wl,-Bstatic', '-Wl,-Bdynamic', '-Wl,--end-group', unordered('-g', '-pthread'))
command('clang++', unordered(ordered('-x', 'c++'), '-O0', '-fno-inline', '-Wall', '-g', '-pthread', '-c'), '-o', output_file('bin/clang-linux-3.9.0/debug/target-os-windows/threading-multi/main.obj'), input_file(source='main.cpp'))
command('clang++', '-o', output_file('bin/clang-linux-3.9.0/debug/threading-multi/test'), '-Wl,--start-group', input_file('bin/clang-linux-3.9.0/debug/target-os-windows/threading-multi/main.obj'), input_file('bin/clang-linux-3.9.0/debug/target-os-windows/threading-multi/l1.dll'), '-Wl,-Bstatic', '-Wl,-Bdynamic', '-Wl,--end-group', unordered('-g', '-pthread'))
if allow_properties('target-os=windows', 'variant=debug', 'link=static', 'threading=single', 'runtime-link=shared'):
command('clang++', unordered(ordered('-x', 'c++'), '-O0', '-fno-inline', '-Wall', '-g', '-c'), '-o', output_file('bin/clang-linux-3.9.0/debug/link-static/target-os-windows/lib.obj'), input_file(source='lib.cpp'))
command('clang++', unordered(ordered('-x', 'c++'), '-O0', '-fno-inline', '-Wall', '-g', '-c'), '-o', output_file('bin/clang-linux-3.9.0/debug/link-static/target-os-windows/main.obj'), input_file(source='main.cpp'))
command('clang++', '-o', output_file('bin/clang-linux-3.9.0/debug/link-static/target-os-windows/test.exe'), '-Wl,--start-group', input_file('bin/clang-linux-3.9.0/debug/link-static/target-os-windows/main.obj'), input_file('bin/clang-linux-3.9.0/debug/link-static/target-os-windows/libl1.lib'), '-Wl,-Bstatic', '-Wl,-Bdynamic', '-Wl,--end-group', '-g')
if allow_properties('target-os=windows', 'variant=debug', 'link=static', 'threading=single', 'runtime-link=static'):
command('clang++', unordered(ordered('-x', 'c++'), '-O0', '-fno-inline', '-Wall', '-g', '-c'), '-o', output_file('bin/clang-linux-3.9.0/debug/link-static/runtime-link-static/target-os-windows/lib.obj'), input_file(source='lib.cpp'))
command('clang++', unordered(ordered('-x', 'c++'), '-O0', '-fno-inline', '-Wall', '-g', '-c'), '-o', output_file('bin/clang-linux-3.9.0/debug/link-static/runtime-link-static/target-os-windows/main.obj'), input_file(source='main.cpp'))
command('clang++', '-o', output_file('bin/clang-linux-3.9.0/debug/link-static/runtime-link-static/target-os-windows/test.exe'), '-Wl,--start-group', input_file('bin/clang-linux-3.9.0/debug/link-static/target-os-windows/main.obj'), input_file('bin/clang-linux-3.9.0/debug/link-static/target-os-windows/libl1.lib'), '-Wl,--end-group', unordered('-g', '-static'))
if allow_properties('target-os=windows', 'variant=debug', 'link=shared', 'threading=single', 'runtime-link=shared', 'architecture=x86', 'address-model=32'):
command('clang++', unordered(ordered('-x', 'c++'), '-O0', '-fno-inline', '-Wall', '-g', '-march=i686', '-m32', '-c'), '-o', output_file('bin/clang-linux-3.9.0/debug/address-model-32/architecture-x86/target-os-windows/lib.obj'), input_file(source='lib.cpp'))
command('clang++', '-o', output_file('bin/clang-linux-3.9.0/debug/address-model-32/architecture-x86/target-os-windows/l1.dll'), '-shared', '-Wl,--start-group', input_file('bin/clang-linux-3.9.0/debug/address-model-32/architecture-x86/target-os-windows/lib.obj'), '-Wl,-Bstatic', '-Wl,-Bdynamic', '-Wl,--end-group', unordered('-g', '-march=i686', '-m32'))
command('clang++', unordered(ordered('-x', 'c++'), '-O0', '-fno-inline', '-Wall', '-g', '-march=i686', '-m32', '-c'), '-o', output_file('bin/clang-linux-3.9.0/debug/address-model-32/architecture-x86/target-os-windows/main.obj'), input_file(source='main.cpp'))
command('clang++', '-o', output_file('bin/clang-linux-3.9.0/debug/address-model-32/architecture-x86/target-os-windows/test.exe'), '-Wl,--start-group', input_file('bin/clang-linux-3.9.0/debug/address-model-32/architecture-x86/target-os-windows/main.obj'), input_file('bin/clang-linux-3.9.0/debug/address-model-32/architecture-x86/target-os-windows/l1.dll'), '-Wl,-Bstatic', '-Wl,-Bdynamic', '-Wl,--end-group', unordered('-g', '-march=i686', '-m32'))
main()
| 162.179775
| 539
| 0.650062
| 2,276
| 14,434
| 4.068102
| 0.051845
| 0.072578
| 0.117939
| 0.127012
| 0.967815
| 0.961119
| 0.961119
| 0.961119
| 0.954423
| 0.954423
| 0
| 0.029397
| 0.059651
| 14,434
| 88
| 540
| 164.022727
| 0.652767
| 0.02023
| 0
| 0
| 0
| 0.883333
| 0.607034
| 0.321633
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.016667
| 0
| 0.016667
| 0.033333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
429dea3cfbfc5c6983ad4058292d2aa8db6357c6
| 12,986
|
py
|
Python
|
test/unit/mongo_class/masterrep_connect.py
|
deepcoder42/mongo-lib
|
fa2b65587ab88ee90c9d85f12dd642c6295e0d94
|
[
"MIT"
] | null | null | null |
test/unit/mongo_class/masterrep_connect.py
|
deepcoder42/mongo-lib
|
fa2b65587ab88ee90c9d85f12dd642c6295e0d94
|
[
"MIT"
] | null | null | null |
test/unit/mongo_class/masterrep_connect.py
|
deepcoder42/mongo-lib
|
fa2b65587ab88ee90c9d85f12dd642c6295e0d94
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# Classification (U)
"""Program: masterrep_connect.py
Description: Unit testing of MasterRep.connect in mongo_class.py.
Usage:
test/unit/mongo_class/masterrep_connect.py
Arguments:
"""
# Libraries and Global Variables
# Standard
import sys
import os
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
# Third-party
import mock
# Local
sys.path.append(os.getcwd())
import mongo_class
import version
__version__ = version.__version__
class UnitTest(unittest.TestCase):
"""Class: UnitTest
Description: Class which is a representation of a unit testing.
Methods:
setUp
test_slaves_attr4
test_slaves_attr3
test_slaves_attr2
test_slaves_attr
test_repset_attr2
test_repset_attr
test_issecondary_attr2
test_issecondary_attr
test_ismaster_attr2
test_ismaster_attr
test_no_conn_list1
test_no_conn_list
test_fail_connection2
test_fail_connection
test_no_data2
test_no_data
test_default2
test_default
"""
def setUp(self):
"""Function: setUp
Description: Initialization for unit testing.
Arguments:
"""
self.name = "Mongo_Server"
self.user = "mongo_user"
self.japd = "mongo_pd"
self.host = "host_server"
self.port = 27017
self.dbs = "test"
self.coll = None
self.db_auth = None
self.repset = "mongo_repset"
self.data = {"secondary": False, "ismaster": True,
"issecondary": False, "setName": "mongo_repset",
"hosts": ["slave1", "slave2"]}
self.data2 = {"secondary": False, "ismaster": True,
"issecondary": False, "setName": "mongo_repset"}
self.msg = "Error: This is not a Master Replication server."
@mock.patch("mongo_class.Server.connect",
mock.Mock(return_value=(True, None)))
@mock.patch("mongo_class.fetch_ismaster")
def test_slaves_attr4(self, mock_fetch):
"""Function: test_slaves_attr4
Description: Test slaves attribute.
Arguments:
"""
mock_fetch.return_value = self.data2
mongo = mongo_class.MasterRep(self.name, self.user, self.japd,
self.host, self.port)
mongo.connect()
self.assertEqual(mongo.slaves, [])
@mock.patch("mongo_class.Server.connect",
mock.Mock(return_value=(True, None)))
@mock.patch("mongo_class.fetch_ismaster")
def test_slaves_attr3(self, mock_fetch):
"""Function: test_slaves_attr3
Description: Test slaves attribute.
Arguments:
"""
mock_fetch.return_value = self.data
mongo = mongo_class.MasterRep(self.name, self.user, self.japd,
self.host, self.port)
self.assertEqual(mongo.connect(), (True, None))
@mock.patch("mongo_class.Server.connect",
mock.Mock(return_value=(True, None)))
@mock.patch("mongo_class.fetch_ismaster")
def test_slaves_attr2(self, mock_fetch):
"""Function: test_slaves_attr2
Description: Test slaves attribute.
Arguments:
"""
mock_fetch.return_value = self.data
mongo = mongo_class.MasterRep(self.name, self.user, self.japd,
self.host, self.port)
mongo.connect()
self.assertEqual(mongo.slaves, ["slave1", "slave2"])
@mock.patch("mongo_class.Server.connect",
mock.Mock(return_value=(True, None)))
@mock.patch("mongo_class.fetch_ismaster")
def test_slaves_attr(self, mock_fetch):
"""Function: test_slaves_attr
Description: Test slaves attribute.
Arguments:
"""
mock_fetch.return_value = self.data
mongo = mongo_class.MasterRep(self.name, self.user, self.japd,
self.host, self.port)
self.assertEqual(mongo.connect(), (True, None))
@mock.patch("mongo_class.Server.connect",
mock.Mock(return_value=(True, None)))
@mock.patch("mongo_class.fetch_ismaster")
def test_repset_attr2(self, mock_fetch):
"""Function: test_repset_attr2
Description: Test repset attribute.
Arguments:
"""
mock_fetch.return_value = self.data
mongo = mongo_class.MasterRep(self.name, self.user, self.japd,
self.host, self.port)
mongo.connect()
self.assertEqual(mongo.repset, "mongo_repset")
@mock.patch("mongo_class.Server.connect",
mock.Mock(return_value=(True, None)))
@mock.patch("mongo_class.fetch_ismaster")
def test_repset_attr(self, mock_fetch):
"""Function: test_repset_attr
Description: Test repset attribute.
Arguments:
"""
mock_fetch.return_value = self.data
mongo = mongo_class.MasterRep(self.name, self.user, self.japd,
self.host, self.port)
self.assertEqual(mongo.connect(), (True, None))
@mock.patch("mongo_class.Server.connect",
mock.Mock(return_value=(True, None)))
@mock.patch("mongo_class.fetch_ismaster")
def test_issecondary_attr2(self, mock_fetch):
"""Function: test_issecondary_attr2
Description: Test issecondary attribute.
Arguments:
"""
mock_fetch.return_value = self.data
mongo = mongo_class.MasterRep(self.name, self.user, self.japd,
self.host, self.port)
mongo.connect()
self.assertFalse(mongo.issecondary)
@mock.patch("mongo_class.Server.connect",
mock.Mock(return_value=(True, None)))
@mock.patch("mongo_class.fetch_ismaster")
def test_issecondary_attr(self, mock_fetch):
"""Function: test_issecondary_attr
Description: Test issecondary attribute.
Arguments:
"""
mock_fetch.return_value = self.data
mongo = mongo_class.MasterRep(self.name, self.user, self.japd,
self.host, self.port)
self.assertEqual(mongo.connect(), (True, None))
@mock.patch("mongo_class.Server.connect",
mock.Mock(return_value=(True, None)))
@mock.patch("mongo_class.fetch_ismaster")
def test_ismaster_attr2(self, mock_fetch):
"""Function: test_ismaster_attr2
Description: Test ismaster attribute.
Arguments:
"""
mock_fetch.return_value = self.data
mongo = mongo_class.MasterRep(self.name, self.user, self.japd,
self.host, self.port)
mongo.connect()
self.assertTrue(mongo.ismaster)
@mock.patch("mongo_class.Server.connect",
mock.Mock(return_value=(True, None)))
@mock.patch("mongo_class.fetch_ismaster")
def test_ismaster_attr(self, mock_fetch):
"""Function: test_ismaster_attr
Description: Test ismaster attribute.
Arguments:
"""
mock_fetch.return_value = self.data
mongo = mongo_class.MasterRep(self.name, self.user, self.japd,
self.host, self.port)
self.assertEqual(mongo.connect(), (True, None))
@mock.patch("mongo_class.Server.connect",
mock.Mock(return_value=(True, None)))
@mock.patch("mongo_class.fetch_ismaster")
def test_no_conn_list1(self, mock_fetch):
"""Function: test_no_conn_list1
Description: Test with no connections passed.
Arguments:
"""
mock_fetch.return_value = self.data
mongo = mongo_class.MasterRep(self.name, self.user, self.japd,
self.host, self.port)
mongo.conn = True
mongo.connect()
self.assertEqual(
(mongo.name, mongo.user, mongo.japd, mongo.host, mongo.port,
mongo.ismaster, mongo.issecondary),
(self.name, self.user, self.japd, self.host, self.port, True,
False))
@mock.patch("mongo_class.Server.connect",
mock.Mock(return_value=(True, None)))
@mock.patch("mongo_class.fetch_ismaster")
def test_no_conn_list(self, mock_fetch):
"""Function: test_no_conn_list
Description: Test with no connections passed.
Arguments:
"""
mock_fetch.return_value = self.data
mongo = mongo_class.MasterRep(self.name, self.user, self.japd,
self.host, self.port)
mongo.conn = True
self.assertEqual(mongo.connect(), (True, None))
@mock.patch("mongo_class.Server.connect",
mock.Mock(return_value=(False, "Error Message")))
@mock.patch("mongo_class.fetch_ismaster")
def test_fail_connection2(self, mock_fetch):
"""Function: test_fail_connection2
Description: Test with failed connection.
Arguments:
"""
mock_fetch.return_value = self.data
mongo = mongo_class.MasterRep(self.name, self.user, self.japd,
self.host, self.port)
self.assertEqual(
(mongo.name, mongo.user, mongo.japd, mongo.host, mongo.port,
mongo.ismaster, mongo.issecondary),
(self.name, self.user, self.japd, self.host, self.port, None,
None))
@mock.patch("mongo_class.Server.connect",
mock.Mock(return_value=(False, "Error Message")))
@mock.patch("mongo_class.fetch_ismaster")
def test_fail_connection(self, mock_fetch):
"""Function: test_fail_connection
Description: Test with failed connection.
Arguments:
"""
mock_fetch.return_value = self.data
mongo = mongo_class.MasterRep(self.name, self.user, self.japd,
self.host, self.port)
self.assertEqual(mongo.connect(), (False, "Error Message"))
@mock.patch("mongo_class.Server.disconnect", mock.Mock(return_value=True))
@mock.patch("mongo_class.Server.connect",
mock.Mock(return_value=(True, None)))
@mock.patch("mongo_class.fetch_ismaster", mock.Mock(return_value={}))
def test_no_data2(self):
"""Function: test_no_data2
Description: Test with no data returned.
Arguments:
"""
mongo = mongo_class.MasterRep(self.name, self.user, self.japd,
self.host, self.port)
self.assertEqual(
(mongo.name, mongo.user, mongo.japd, mongo.host, mongo.port,
mongo.ismaster, mongo.issecondary),
(self.name, self.user, self.japd, self.host, self.port, None,
None))
@mock.patch("mongo_class.Server.disconnect", mock.Mock(return_value=True))
@mock.patch("mongo_class.Server.connect",
mock.Mock(return_value=(True, None)))
@mock.patch("mongo_class.fetch_ismaster", mock.Mock(return_value={}))
def test_no_data(self):
"""Function: test_no_data
Description: Test with no data returned.
Arguments:
"""
mongo = mongo_class.MasterRep(self.name, self.user, self.japd,
self.host, self.port)
self.assertEqual(mongo.connect(), (False, self.msg))
@mock.patch("mongo_class.Server.connect",
mock.Mock(return_value=(True, None)))
@mock.patch("mongo_class.fetch_ismaster")
def test_default2(self, mock_fetch):
"""Function: test_default2
Description: Test connect method with default arguments.
Arguments:
"""
mock_fetch.return_value = self.data
mongo = mongo_class.MasterRep(self.name, self.user, self.japd,
self.host, self.port)
mongo.connect()
self.assertEqual(
(mongo.name, mongo.user, mongo.japd, mongo.host, mongo.port,
mongo.ismaster, mongo.issecondary),
(self.name, self.user, self.japd, self.host, self.port, True,
False))
@mock.patch("mongo_class.Server.connect",
mock.Mock(return_value=(True, None)))
@mock.patch("mongo_class.fetch_ismaster")
def test_default(self, mock_fetch):
"""Function: test_default
Description: Test connect method with default arguments.
Arguments:
"""
mock_fetch.return_value = self.data
mongo = mongo_class.MasterRep(self.name, self.user, self.japd,
self.host, self.port)
self.assertEqual(mongo.connect(), (True, None))
if __name__ == "__main__":
unittest.main()
| 28.108225
| 78
| 0.597105
| 1,440
| 12,986
| 5.188194
| 0.08125
| 0.078972
| 0.071209
| 0.09664
| 0.819837
| 0.813144
| 0.757194
| 0.74729
| 0.74729
| 0.731227
| 0
| 0.004785
| 0.29193
| 12,986
| 461
| 79
| 28.169197
| 0.807722
| 0.181349
| 0
| 0.718593
| 0
| 0
| 0.129159
| 0.100222
| 0
| 0
| 0
| 0
| 0.090452
| 1
| 0.095477
| false
| 0
| 0.035176
| 0
| 0.135678
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c45eaac8a40e0081ea0aa7ef65b1e693f3618fd1
| 419
|
py
|
Python
|
python/testData/formatter/fromImportTrailingCommaWithoutParentheses.py
|
truthiswill/intellij-community
|
fff88cfb0dc168eea18ecb745d3e5b93f57b0b95
|
[
"Apache-2.0"
] | 2
|
2019-04-28T07:48:50.000Z
|
2020-12-11T14:18:08.000Z
|
python/testData/formatter/fromImportTrailingCommaWithoutParentheses.py
|
truthiswill/intellij-community
|
fff88cfb0dc168eea18ecb745d3e5b93f57b0b95
|
[
"Apache-2.0"
] | 173
|
2018-07-05T13:59:39.000Z
|
2018-08-09T01:12:03.000Z
|
python/testData/formatter/fromImportTrailingCommaWithoutParentheses.py
|
truthiswill/intellij-community
|
fff88cfb0dc168eea18ecb745d3e5b93f57b0b95
|
[
"Apache-2.0"
] | 2
|
2020-03-15T08:57:37.000Z
|
2020-04-07T04:48:14.000Z
|
from module import foo
from module import foo, bar
from module import foo, bar,
# | margin
from module import foo, bar, baz
from module import foo, \
bar
from module import foo, \
bar,
from module import foo, \
bar # comment
from module import (foo,
bar)
from module import (foo,
bar,)
from module import (
foo,
bar # comment
)
| 18.217391
| 37
| 0.572792
| 53
| 419
| 4.528302
| 0.150943
| 0.416667
| 0.666667
| 0.791667
| 0.883333
| 0.791667
| 0.791667
| 0.791667
| 0.791667
| 0.791667
| 0
| 0
| 0.365155
| 419
| 23
| 38
| 18.217391
| 0.902256
| 0.057279
| 0
| 0.444444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.555556
| null | null | 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 11
|
c47deb999743e08e15b1c3504528bdf2d8c7910d
| 22,528
|
py
|
Python
|
AdventOfCode/Day11.py
|
JanStoltman/100DaysOfCode
|
1d18b76ed1e3e942e8392006a5d4bfb41484d047
|
[
"MIT"
] | null | null | null |
AdventOfCode/Day11.py
|
JanStoltman/100DaysOfCode
|
1d18b76ed1e3e942e8392006a5d4bfb41484d047
|
[
"MIT"
] | null | null | null |
AdventOfCode/Day11.py
|
JanStoltman/100DaysOfCode
|
1d18b76ed1e3e942e8392006a5d4bfb41484d047
|
[
"MIT"
] | null | null | null |
steps ="s,nw,s,nw,se,nw,nw,nw,nw,n,n,se,n,ne,n,ne,ne,n,se,ne,nw,ne,s,nw,ne,ne,sw,se,se,se,se,se,se,se,nw,se,se,sw,n,se,se,se,se,s,n,ne,se,nw,nw,nw,s,s,sw,se,se,s,s,s,se,s,s,n,sw,s,s,s,s,s,nw,s,se,sw,s,sw,s,s,s,ne,sw,sw,sw,s,sw,s,sw,sw,sw,s,sw,sw,sw,sw,se,sw,sw,sw,sw,ne,sw,ne,sw,sw,nw,sw,sw,ne,sw,se,sw,n,n,sw,sw,sw,nw,nw,s,nw,sw,nw,nw,sw,s,nw,sw,nw,nw,sw,sw,sw,nw,sw,sw,nw,nw,nw,sw,nw,nw,nw,nw,nw,nw,nw,nw,nw,nw,nw,nw,sw,nw,nw,nw,se,nw,s,nw,nw,nw,nw,nw,nw,n,ne,n,se,nw,nw,nw,nw,nw,n,nw,ne,se,n,nw,nw,nw,nw,n,n,nw,ne,nw,ne,ne,n,nw,n,nw,n,ne,n,s,n,ne,se,nw,n,n,n,nw,n,n,sw,n,n,nw,n,se,n,nw,n,n,n,n,n,n,n,n,n,n,se,n,nw,n,n,n,n,se,sw,n,n,n,n,n,n,ne,n,s,n,nw,se,n,n,n,n,ne,ne,ne,n,se,n,ne,sw,n,ne,ne,n,se,s,n,n,ne,se,n,ne,ne,n,ne,n,s,ne,n,ne,ne,n,se,ne,s,n,nw,ne,nw,n,ne,ne,ne,ne,ne,ne,n,ne,ne,ne,ne,ne,ne,sw,ne,n,ne,se,sw,se,n,ne,ne,n,ne,ne,ne,s,ne,sw,ne,ne,ne,ne,ne,ne,ne,ne,ne,ne,ne,sw,ne,ne,ne,ne,ne,ne,se,se,ne,nw,ne,ne,ne,ne,ne,ne,ne,ne,ne,se,se,ne,ne,se,ne,ne,ne,se,ne,ne,se,se,sw,s,ne,sw,ne,ne,sw,se,ne,ne,se,sw,ne,se,ne,ne,se,ne,ne,ne,ne,ne,se,ne,ne,se,ne,se,ne,se,se,se,s,ne,se,ne,ne,se,ne,se,nw,ne,ne,ne,se,ne,se,ne,se,se,n,se,se,se,ne,se,se,se,se,se,ne,se,ne,se,se,se,ne,se,ne,sw,se,ne,n,se,se,se,ne,se,se,ne,sw,se,se,n,se,ne,se,se,se,ne,se,se,s,se,se,se,se,se,se,se,se,se,se,se,ne,nw,se,se,se,se,se,se,se,se,se,se,se,se,se,ne,se,se,ne,se,se,se,s,se,se,ne,nw,se,s,se,se,n,se,n,se,se,se,se,nw,s,se,se,se,se,s,s,s,sw,se,se,s,se,se,se,s,s,s,n,s,se,se,se,s,se,se,nw,n,se,se,s,se,se,se,se,s,se,se,nw,nw,s,se,se,se,se,se,ne,s,s,se,se,se,s,se,s,se,se,s,se,se,se,sw,se,se,se,se,se,se,s,s,sw,s,s,s,s,s,s,s,sw,se,s,s,s,se,se,se,se,s,s,n,s,nw,se,s,s,se,se,s,s,se,s,s,s,ne,se,ne,nw,n,s,s,s,se,sw,s,s,s,se,s,s,s,s,s,s,s,s,s,ne,s,s,s,se,s,s,s,s,s,n,ne,sw,se,s,se,s,s,nw,s,sw,s,s,s,s,se,se,s,s,ne,s,s,s,s,se,s,s,se,ne,s,s,s,s,s,nw,s,s,se,s,sw,n,n,s,s,s,ne,s,s,s,s,s,s,s,s,s,sw,s,s,nw,sw,s,s,s,sw,s,s,s,sw,s,s,s,s,n,s,s,s,s,s,s,sw,nw,s,sw,sw,s,s,n,sw,s,s,s,s,se,sw,ne,ne,s,se,sw,s,sw,ne,n,sw,sw,sw,ne,s,s,sw,se,nw,s,s,s,se,nw,s,sw,nw,ne,sw,s,s,sw,sw,s,s,se,ne,s,s,sw,nw,sw,s,s,s,s,ne,s,sw,sw,s,sw,s,s,s,s,sw,sw,s,sw,s,s,s,sw,s,s,s,sw,sw,nw,sw,sw,ne,s,se,sw,sw,s,sw,sw,s,s,sw,s,s,s,n,s,s,sw,s,nw,sw,s,s,s,s,ne,ne,sw,s,sw,sw,sw,sw,s,sw,sw,se,ne,sw,s,sw,sw,s,s,sw,nw,sw,sw,s,nw,nw,s,sw,s,nw,sw,sw,n,sw,s,sw,sw,sw,s,sw,sw,sw,sw,ne,s,s,nw,sw,sw,sw,sw,sw,ne,s,sw,s,sw,ne,n,sw,sw,sw,s,sw,s,nw,sw,sw,sw,sw,sw,s,s,s,n,sw,sw,sw,se,s,ne,sw,s,s,sw,sw,sw,s,s,sw,sw,sw,sw,sw,sw,s,sw,sw,sw,ne,sw,s,s,sw,ne,sw,se,sw,sw,sw,ne,ne,sw,sw,ne,n,sw,ne,sw,sw,sw,sw,sw,sw,s,sw,sw,n,sw,nw,sw,sw,sw,sw,nw,sw,sw,sw,sw,sw,ne,sw,sw,sw,s,sw,sw,sw,sw,nw,sw,n,sw,sw,sw,sw,ne,ne,sw,sw,sw,sw,sw,sw,sw,sw,s,sw,se,sw,sw,sw,sw,sw,sw,sw,s,sw,sw,sw,sw,sw,sw,sw,sw,nw,sw,sw,sw,sw,sw,sw,sw,sw,se,sw,sw,nw,sw,sw,sw,sw,sw,nw,sw,sw,ne,sw,se,sw,sw,se,sw,nw,sw,nw,sw,nw,nw,nw,n,ne,nw,nw,sw,nw,ne,sw,sw,nw,sw,n,sw,s,sw,sw,sw,sw,sw,sw,sw,sw,nw,nw,sw,sw,sw,nw,sw,sw,sw,sw,sw,ne,sw,nw,sw,n,sw,s,n,sw,sw,sw,sw,sw,sw,nw,sw,sw,sw,sw,nw,sw,nw,nw,sw,sw,sw,s,sw,sw,s,sw,sw,nw,sw,ne,se,n,sw,sw,se,ne,sw,nw,nw,nw,sw,sw,sw,nw,sw,sw,nw,nw,nw,nw,s,sw,nw,sw,n,nw,s,sw,nw,n,sw,sw,sw,nw,nw,ne,nw,nw,sw,sw,sw,nw,nw,sw,nw,sw,sw,sw,nw,sw,s,nw,sw,nw,nw,sw,sw,nw,sw,s,nw,nw,ne,sw,sw,ne,sw,se,nw,sw,nw,sw,n,nw,sw,sw,nw,sw,sw,sw,sw,sw,nw,n,nw,nw,s,nw,nw,nw,sw,sw,n,nw,sw,nw,sw,se,sw,sw,s,nw,sw,se,nw,sw,nw,sw,ne,n,sw,ne,nw,sw,nw,sw,sw,nw,ne,nw,nw,nw,s,nw,sw,nw,se,sw,nw,s,sw,s,nw,n,nw,sw,se,nw,nw,nw,n,s,sw,nw,ne,n,sw,sw,nw,nw,sw,nw,nw,nw,nw,sw,sw,nw,ne,ne,sw,nw,n,nw,se,nw,nw,sw,nw,nw,nw,nw,nw,nw,s,nw,nw,sw,sw,nw,nw,sw,nw,nw,sw,sw,se,n,se,s,nw,nw,sw,nw,nw,nw,nw,nw,nw,nw,nw,nw,sw,s,sw,nw,nw,sw,sw,nw,nw,nw,nw,nw,nw,se,nw,n,sw,sw,nw,n,nw,nw,nw,ne,se,nw,nw,nw,nw,nw,nw,nw,nw,nw,nw,nw,nw,nw,nw,nw,nw,nw,s,nw,nw,nw,s,nw,nw,nw,nw,nw,nw,ne,nw,nw,nw,nw,ne,nw,ne,nw,nw,nw,n,sw,nw,nw,sw,nw,nw,nw,s,nw,nw,s,nw,nw,nw,sw,nw,nw,s,s,nw,nw,nw,sw,sw,nw,n,nw,nw,nw,s,nw,nw,nw,nw,nw,nw,nw,nw,ne,nw,nw,n,nw,nw,nw,nw,nw,nw,nw,nw,s,n,nw,nw,nw,n,nw,nw,nw,nw,nw,nw,nw,n,nw,nw,nw,n,sw,se,nw,nw,ne,nw,sw,nw,ne,nw,n,nw,nw,n,ne,nw,nw,nw,nw,nw,nw,nw,se,nw,nw,nw,nw,nw,nw,nw,nw,ne,nw,nw,se,nw,nw,n,nw,n,nw,nw,nw,nw,n,n,nw,se,n,n,nw,n,nw,nw,nw,nw,nw,nw,nw,nw,nw,nw,n,s,n,n,sw,nw,nw,nw,se,nw,ne,nw,nw,nw,sw,nw,nw,sw,n,nw,nw,nw,n,nw,nw,n,n,nw,nw,n,n,n,nw,nw,nw,n,nw,sw,ne,sw,sw,se,n,se,n,n,nw,n,n,n,nw,nw,nw,nw,nw,n,n,n,se,nw,n,n,sw,nw,nw,nw,nw,n,nw,s,nw,n,nw,nw,n,nw,nw,n,nw,nw,nw,n,nw,nw,s,nw,nw,nw,nw,nw,nw,n,nw,n,se,n,n,nw,nw,nw,nw,ne,n,se,sw,nw,n,n,nw,nw,n,nw,sw,sw,n,n,nw,se,nw,n,nw,nw,ne,nw,n,nw,nw,nw,n,n,nw,n,nw,nw,n,s,n,n,nw,n,n,n,nw,n,sw,n,se,sw,nw,n,n,sw,n,nw,nw,n,sw,sw,n,n,s,n,n,n,n,n,n,sw,nw,nw,n,sw,n,nw,nw,se,n,nw,ne,n,n,n,n,n,nw,sw,nw,nw,s,ne,se,nw,n,se,sw,nw,n,n,n,nw,n,nw,nw,n,n,n,nw,s,s,n,sw,n,nw,n,ne,nw,n,nw,nw,nw,sw,n,nw,n,n,sw,n,n,nw,n,n,se,n,nw,nw,nw,ne,nw,n,nw,n,nw,n,n,nw,nw,nw,n,nw,s,nw,n,s,nw,nw,n,nw,n,nw,n,n,sw,nw,n,nw,nw,n,sw,nw,n,nw,n,se,n,n,n,nw,n,nw,nw,se,nw,nw,nw,nw,n,nw,n,n,n,nw,n,n,nw,n,n,nw,n,n,n,n,n,nw,sw,n,nw,n,n,n,n,nw,n,nw,ne,nw,n,n,n,n,n,n,se,n,n,nw,n,n,n,n,s,nw,n,n,ne,n,n,se,n,n,nw,n,sw,n,s,n,nw,n,n,nw,nw,n,n,n,n,ne,n,n,se,ne,nw,n,n,nw,n,s,nw,nw,nw,n,sw,n,s,n,n,n,n,n,n,sw,n,ne,n,n,ne,n,n,n,se,sw,n,n,n,nw,n,n,n,nw,n,n,n,n,se,n,n,n,n,sw,ne,ne,n,n,n,ne,s,ne,n,n,n,nw,n,n,n,nw,n,n,n,n,n,nw,sw,nw,n,n,n,s,ne,n,n,ne,nw,n,n,n,n,n,n,nw,n,n,n,n,n,n,sw,n,nw,n,se,n,n,n,n,se,n,n,se,n,s,n,nw,n,se,n,n,sw,n,n,n,n,n,n,ne,n,sw,ne,n,n,n,n,n,n,n,n,n,n,ne,n,n,sw,sw,n,n,n,n,n,n,n,n,se,n,n,n,se,n,n,n,n,n,ne,se,n,n,n,n,n,n,n,n,n,n,ne,n,n,n,n,ne,n,n,ne,nw,se,n,n,n,n,n,n,n,n,n,ne,s,n,n,n,n,nw,n,n,n,ne,n,se,ne,ne,n,n,ne,n,sw,n,n,ne,n,n,se,ne,n,n,n,n,n,s,s,ne,nw,n,nw,ne,n,ne,nw,ne,ne,n,n,sw,n,n,ne,s,ne,n,sw,ne,n,se,nw,n,n,n,n,sw,s,se,ne,n,n,n,n,n,ne,sw,sw,s,n,nw,ne,n,n,ne,ne,ne,se,n,n,sw,n,se,n,n,se,ne,n,n,ne,nw,ne,n,sw,n,n,n,ne,ne,ne,ne,n,n,n,n,n,sw,n,n,n,nw,n,n,s,n,ne,ne,n,n,sw,sw,ne,n,n,n,n,ne,ne,ne,ne,nw,ne,se,ne,n,n,n,se,n,n,ne,n,sw,nw,n,n,s,n,n,n,n,n,n,n,n,sw,nw,n,n,n,n,n,n,n,n,ne,n,ne,n,ne,n,n,n,ne,ne,n,n,ne,n,n,n,n,ne,sw,n,se,n,n,n,n,ne,n,s,n,n,ne,ne,ne,n,n,ne,ne,n,ne,n,ne,se,n,se,sw,sw,sw,se,nw,ne,nw,ne,ne,n,ne,n,ne,n,ne,ne,ne,ne,n,n,n,ne,ne,n,ne,nw,sw,ne,n,ne,n,n,ne,nw,n,n,n,ne,se,n,n,n,ne,ne,n,n,nw,ne,n,n,s,n,ne,s,s,ne,nw,n,ne,n,n,nw,ne,ne,ne,n,n,ne,s,ne,sw,n,n,n,n,ne,ne,ne,ne,ne,n,sw,n,nw,ne,ne,ne,n,se,ne,s,ne,ne,ne,s,ne,n,n,ne,n,s,ne,n,ne,n,n,ne,n,ne,ne,ne,n,ne,n,nw,ne,ne,ne,ne,ne,ne,nw,ne,ne,ne,ne,n,sw,sw,ne,n,se,s,ne,n,n,nw,ne,ne,sw,ne,n,se,ne,n,sw,ne,n,ne,ne,ne,ne,ne,ne,ne,ne,n,n,n,ne,ne,nw,n,ne,ne,n,ne,n,sw,ne,n,n,ne,ne,n,s,nw,n,se,ne,n,se,ne,ne,ne,sw,n,ne,ne,sw,ne,n,n,ne,n,nw,n,ne,ne,n,ne,ne,ne,ne,se,se,nw,n,ne,ne,n,ne,ne,n,ne,sw,ne,se,n,ne,ne,ne,ne,ne,ne,ne,n,nw,ne,se,ne,n,ne,ne,ne,ne,ne,ne,ne,n,n,n,nw,ne,n,ne,ne,ne,ne,ne,ne,ne,ne,ne,ne,ne,ne,ne,ne,ne,ne,ne,ne,ne,ne,ne,nw,ne,s,n,ne,ne,ne,sw,sw,ne,sw,ne,ne,ne,ne,ne,ne,ne,ne,ne,ne,ne,ne,ne,ne,se,ne,ne,ne,ne,sw,ne,n,ne,ne,n,ne,ne,ne,nw,n,ne,ne,ne,ne,ne,ne,ne,ne,ne,ne,ne,s,n,ne,ne,ne,ne,ne,ne,ne,ne,ne,ne,ne,ne,ne,ne,sw,ne,ne,ne,ne,n,nw,ne,ne,ne,ne,se,ne,ne,ne,ne,sw,ne,ne,nw,ne,n,ne,ne,ne,ne,sw,ne,ne,sw,ne,ne,ne,ne,ne,se,ne,n,ne,nw,ne,sw,ne,ne,sw,ne,ne,n,ne,ne,ne,nw,n,ne,ne,n,ne,nw,n,ne,ne,ne,s,s,ne,ne,n,ne,ne,ne,ne,ne,ne,ne,ne,ne,ne,sw,nw,ne,ne,ne,ne,ne,nw,ne,sw,ne,ne,ne,ne,ne,ne,ne,ne,ne,ne,ne,ne,ne,ne,ne,ne,ne,ne,ne,ne,ne,ne,n,n,ne,sw,ne,ne,ne,ne,ne,ne,ne,ne,ne,ne,ne,ne,ne,ne,ne,ne,ne,ne,ne,ne,ne,ne,sw,ne,ne,se,n,ne,ne,ne,ne,s,ne,sw,ne,ne,ne,ne,se,ne,ne,sw,ne,ne,s,n,ne,ne,ne,ne,n,ne,se,ne,ne,nw,s,ne,ne,ne,ne,ne,ne,ne,ne,ne,nw,ne,ne,s,ne,ne,ne,se,ne,ne,ne,ne,s,ne,se,ne,ne,ne,se,ne,nw,ne,ne,ne,ne,ne,ne,ne,ne,ne,ne,ne,ne,ne,ne,ne,ne,nw,ne,ne,se,se,ne,nw,ne,ne,ne,n,ne,ne,ne,s,ne,ne,ne,ne,ne,nw,n,ne,s,ne,se,s,ne,ne,ne,ne,se,ne,ne,ne,nw,ne,se,ne,ne,ne,ne,ne,ne,ne,nw,ne,se,ne,ne,ne,se,ne,se,ne,ne,ne,ne,ne,ne,se,ne,ne,ne,ne,n,ne,ne,ne,ne,ne,ne,ne,ne,sw,ne,ne,ne,s,ne,ne,ne,ne,ne,ne,ne,ne,ne,n,sw,ne,nw,ne,nw,ne,sw,ne,ne,ne,ne,ne,ne,ne,s,ne,ne,n,ne,n,sw,ne,ne,ne,se,ne,se,se,ne,ne,n,n,ne,nw,ne,s,ne,ne,ne,ne,ne,ne,se,sw,ne,se,se,s,nw,se,ne,ne,s,n,ne,ne,ne,ne,se,se,ne,ne,ne,nw,sw,ne,ne,ne,ne,ne,se,ne,ne,se,se,se,se,se,se,ne,ne,se,ne,sw,ne,ne,ne,se,ne,ne,ne,ne,se,ne,se,ne,se,ne,sw,ne,ne,ne,se,ne,ne,ne,ne,ne,s,ne,ne,ne,ne,s,se,sw,ne,ne,s,ne,se,ne,se,ne,se,se,ne,ne,ne,ne,ne,ne,ne,ne,se,se,ne,ne,se,s,se,ne,ne,ne,ne,se,ne,se,se,nw,ne,se,ne,n,n,ne,ne,s,ne,n,nw,ne,nw,ne,ne,ne,s,s,ne,sw,ne,sw,se,se,se,ne,ne,sw,se,ne,ne,se,ne,se,ne,se,ne,se,ne,s,se,ne,ne,ne,se,ne,ne,se,ne,n,se,se,se,ne,nw,se,se,se,se,se,ne,n,ne,nw,ne,se,se,ne,ne,ne,ne,se,se,ne,ne,ne,ne,ne,se,se,ne,sw,s,se,nw,se,ne,se,n,ne,se,sw,ne,ne,ne,ne,ne,ne,n,ne,se,n,se,ne,ne,ne,n,ne,se,ne,se,ne,ne,se,n,ne,sw,s,se,se,se,sw,ne,se,ne,ne,se,se,nw,se,ne,se,ne,ne,se,se,nw,ne,nw,ne,ne,ne,n,se,ne,n,ne,sw,ne,ne,ne,sw,ne,se,se,se,ne,sw,ne,ne,sw,ne,ne,ne,ne,ne,se,se,ne,ne,ne,ne,ne,n,se,ne,ne,s,ne,ne,se,ne,nw,ne,s,nw,nw,se,se,ne,ne,n,ne,ne,nw,ne,ne,ne,se,ne,sw,s,ne,ne,ne,n,ne,ne,se,se,se,ne,sw,ne,se,ne,sw,se,se,se,ne,ne,ne,ne,ne,se,s,se,sw,se,ne,se,se,ne,se,ne,se,ne,se,ne,se,ne,ne,ne,s,se,se,n,se,se,ne,se,ne,n,s,se,se,se,se,se,nw,ne,ne,se,se,se,se,ne,se,ne,ne,se,se,se,ne,se,se,se,ne,se,ne,ne,se,ne,nw,ne,se,ne,se,ne,ne,se,se,ne,ne,ne,ne,se,ne,ne,se,n,ne,se,sw,ne,ne,ne,se,ne,sw,se,ne,se,ne,se,ne,se,se,ne,ne,nw,sw,se,ne,se,ne,se,ne,se,ne,ne,ne,se,ne,ne,se,n,se,se,se,nw,se,se,se,se,se,ne,se,ne,se,sw,sw,se,se,n,ne,ne,s,se,ne,ne,ne,se,ne,se,ne,se,se,ne,ne,se,se,se,se,ne,se,ne,ne,ne,se,ne,se,ne,ne,se,ne,ne,se,se,se,s,se,se,ne,ne,ne,nw,se,se,ne,nw,sw,ne,se,se,s,se,ne,ne,nw,se,se,n,se,se,se,se,s,se,se,se,ne,se,ne,se,se,se,ne,ne,ne,se,se,nw,se,ne,se,se,se,ne,ne,se,se,ne,ne,n,se,ne,se,ne,se,ne,ne,se,se,se,se,se,se,ne,se,ne,sw,ne,se,n,se,se,s,se,se,ne,ne,se,se,se,se,nw,se,se,se,s,sw,se,se,se,ne,se,ne,se,se,ne,ne,se,ne,ne,ne,se,sw,se,n,se,se,se,se,ne,nw,ne,se,se,s,se,se,ne,s,se,ne,s,se,se,ne,sw,sw,se,se,se,se,se,se,s,se,ne,ne,se,sw,ne,se,se,se,se,ne,se,se,se,se,ne,se,ne,se,se,se,se,ne,ne,ne,ne,ne,ne,n,se,ne,se,se,se,se,ne,nw,se,se,ne,se,n,sw,se,se,se,se,ne,se,se,se,s,se,ne,sw,se,se,se,se,n,se,ne,ne,se,se,se,se,se,se,se,nw,se,ne,ne,se,ne,se,se,se,se,se,ne,se,s,se,sw,se,ne,se,se,ne,n,se,se,se,nw,nw,se,se,se,nw,se,se,s,ne,sw,se,se,ne,se,ne,se,ne,ne,sw,se,ne,nw,se,se,se,se,ne,ne,se,n,se,sw,se,se,nw,se,se,se,se,se,se,se,ne,ne,se,se,se,nw,se,se,se,n,s,n,se,ne,se,s,ne,s,se,se,se,se,ne,se,se,se,se,se,se,se,se,se,se,se,ne,nw,sw,se,ne,se,se,n,se,se,nw,se,se,ne,se,se,s,ne,se,ne,se,se,se,se,se,s,se,nw,se,se,se,se,n,se,ne,se,nw,se,se,se,se,se,s,se,se,se,n,ne,se,se,se,ne,ne,se,se,ne,sw,se,sw,n,se,sw,se,se,se,se,se,se,se,se,ne,se,se,se,se,se,ne,se,s,se,se,se,se,n,se,se,se,se,se,se,se,se,se,se,sw,se,se,sw,se,se,sw,se,se,sw,se,nw,se,se,se,se,se,se,se,se,se,n,se,ne,se,se,se,se,se,se,se,se,se,se,se,se,se,s,nw,ne,se,se,s,se,se,s,se,se,se,se,se,se,se,sw,se,se,se,se,se,se,se,se,se,se,se,se,se,se,se,s,se,se,se,se,se,sw,se,se,ne,s,se,se,se,se,se,se,sw,se,se,se,se,ne,se,se,s,se,se,n,se,se,se,ne,se,s,se,se,s,n,se,s,se,se,sw,se,se,se,se,se,se,se,sw,sw,se,se,se,se,se,se,s,se,se,nw,se,se,se,se,se,se,se,se,se,se,s,se,se,se,se,se,se,se,sw,se,se,se,n,se,se,se,se,s,s,se,se,se,se,n,s,se,n,se,se,se,se,se,nw,se,se,se,sw,se,se,se,s,s,s,se,se,se,se,nw,s,n,se,se,se,se,ne,se,se,sw,se,se,s,se,ne,ne,se,sw,s,s,se,se,s,nw,se,se,se,se,se,nw,se,se,se,se,s,s,se,s,n,se,se,se,se,se,se,s,se,s,se,nw,se,se,se,sw,se,s,s,se,s,se,se,se,se,se,se,se,nw,se,se,nw,se,se,se,se,se,s,se,se,s,se,se,s,s,se,se,se,se,s,se,sw,s,se,se,s,se,ne,se,se,s,se,se,ne,se,se,s,se,se,s,se,s,se,se,se,se,se,s,s,s,se,se,se,se,s,s,ne,n,se,se,s,sw,se,sw,se,sw,se,se,se,se,s,se,s,s,se,se,se,se,ne,se,se,s,s,se,se,se,se,se,n,ne,ne,se,se,s,se,se,se,se,se,s,se,se,se,se,sw,se,se,se,sw,s,se,se,se,ne,se,se,se,se,se,se,se,s,ne,se,se,se,se,sw,s,s,se,se,se,se,se,n,n,se,se,ne,se,se,sw,s,se,se,nw,ne,s,se,se,se,n,n,se,s,ne,se,se,se,se,se,se,se,se,se,se,se,se,se,s,nw,se,se,s,se,se,sw,se,se,s,sw,sw,se,se,s,sw,se,s,s,s,s,se,se,nw,se,n,se,n,se,s,se,s,ne,n,se,n,se,se,se,ne,s,se,se,ne,se,se,sw,se,s,nw,se,se,s,s,se,s,sw,sw,n,se,se,s,n,se,se,se,s,se,se,s,nw,nw,se,se,se,se,se,se,s,se,se,se,se,s,nw,s,sw,se,sw,s,ne,nw,se,se,s,se,se,se,se,se,se,se,se,se,se,se,se,s,s,se,se,se,ne,se,ne,se,ne,se,se,se,se,se,se,s,se,s,n,se,se,n,s,s,s,nw,se,ne,se,se,s,se,se,sw,s,s,s,se,s,s,n,sw,s,se,se,se,s,ne,se,s,n,se,se,se,se,ne,se,nw,s,s,s,nw,se,n,s,n,se,se,se,s,se,s,se,nw,s,n,se,s,ne,s,se,s,se,s,s,sw,se,s,s,se,se,s,se,n,se,n,se,se,se,s,se,se,nw,se,se,n,se,se,ne,se,se,s,s,se,se,se,se,n,se,se,se,ne,nw,se,se,se,se,se,se,se,se,sw,s,se,s,se,s,s,sw,se,se,se,s,se,se,se,se,ne,s,s,se,se,s,se,se,se,se,sw,s,se,se,ne,nw,sw,se,s,se,s,se,n,s,sw,se,se,nw,nw,s,ne,s,se,se,s,sw,se,se,s,s,s,s,se,s,se,n,s,se,s,se,s,se,s,se,se,se,s,se,se,s,s,s,s,ne,se,se,se,ne,ne,s,sw,nw,se,se,s,s,se,s,nw,se,ne,se,se,s,sw,ne,se,se,s,sw,se,s,s,s,sw,s,se,se,ne,ne,se,sw,s,s,s,s,s,se,se,s,s,se,nw,se,s,s,s,se,s,s,se,sw,s,s,s,ne,se,se,s,se,s,se,s,se,nw,nw,s,se,s,ne,se,n,se,se,s,se,se,nw,s,s,se,s,s,se,n,se,se,s,se,se,se,se,s,se,se,se,s,s,se,s,se,ne,s,n,sw,n,sw,s,s,s,se,s,s,s,s,n,se,se,se,s,s,se,se,se,se,sw,s,s,s,se,ne,se,se,s,ne,s,s,se,sw,se,se,s,se,se,se,se,s,s,ne,s,s,s,se,s,se,s,ne,se,s,sw,nw,se,s,s,s,s,s,s,se,n,s,s,se,se,sw,se,s,se,s,se,se,se,s,sw,s,s,sw,se,se,se,se,s,se,sw,s,n,ne,se,s,s,se,n,s,sw,nw,se,s,nw,se,se,se,se,s,ne,s,sw,s,nw,s,se,se,s,se,ne,se,se,se,se,s,se,s,sw,s,n,ne,se,s,s,s,s,s,s,se,s,se,s,se,s,s,s,s,s,s,s,s,se,se,se,nw,se,se,s,s,s,s,se,s,s,nw,s,se,se,s,se,se,ne,sw,se,s,s,se,sw,s,s,se,se,se,s,se,n,sw,s,se,s,se,se,se,se,se,se,se,se,se,se,se,s,s,n,s,s,se,s,se,se,s,s,nw,se,s,se,s,se,s,s,s,ne,s,se,s,s,nw,s,se,se,se,s,n,s,se,s,s,s,s,se,s,nw,se,se,se,se,nw,nw,ne,s,se,se,s,sw,sw,se,se,s,s,s,se,s,s,s,se,se,s,se,se,ne,n,nw,s,s,s,se,se,nw,s,s,s,s,s,s,s,s,se,ne,se,s,s,se,se,s,s,s,s,ne,ne,s,sw,s,se,n,s,nw,nw,se,se,s,se,s,s,nw,s,ne,nw,se,s,s,se,se,se,s,se,s,sw,se,n,se,s,s,s,nw,se,sw,s,s,s,ne,s,s,s,s,s,s,sw,se,se,s,nw,s,se,se,s,s,s,s,s,s,s,se,nw,s,se,s,ne,s,se,s,s,n,s,s,s,se,s,s,s,s,s,se,s,s,sw,s,sw,s,n,s,nw,ne,se,s,ne,s,s,se,n,s,ne,se,s,se,s,s,s,sw,sw,s,s,s,s,sw,s,sw,se,s,n,n,s,s,se,s,se,s,ne,s,se,s,s,s,nw,s,s,se,nw,s,s,s,s,s,s,s,s,s,nw,ne,n,s,n,s,s,se,s,s,se,se,s,s,s,s,s,ne,s,s,s,n,se,ne,ne,s,s,s,s,s,s,s,s,s,s,s,s,s,s,s,s,se,s,s,n,s,s,s,nw,se,ne,s,s,s,s,s,se,s,s,s,sw,s,s,s,s,s,s,s,s,se,s,n,ne,s,s,s,nw,n,s,s,s,s,sw,se,nw,s,se,n,s,s,sw,s,s,s,sw,s,s,s,se,s,s,s,s,n,s,s,s,s,s,se,sw,s,s,s,s,nw,s,s,s,nw,s,nw,s,s,se,s,s,se,s,sw,s,s,s,s,s,se,s,s,n,s,nw,s,s,s,ne,se,se,s,ne,s,sw,s,se,s,s,s,s,s,se,s,nw,nw,se,nw,s,s,s,nw,se,s,s,s,se,s,s,s,ne,se,s,s,s,n,s,s,s,se,s,s,s,s,sw,sw,s,nw,ne,s,s,n,s,s,s,se,ne,s,s,nw,s,s,s,s,s,n,nw,s,s,s,se,s,nw,s,ne,s,s,s,sw,s,s,n,s,s,s,s,s,s,s,n,nw,ne,se,sw,s,nw,n,s,s,s,se,s,s,s,s,s,s,sw,s,s,ne,s,n,nw,s,nw,ne,s,s,s,s,s,s,s,sw,s,s,s,s,s,s,s,sw,s,s,s,s,s,se,s,s,s,s,s,n,s,s,s,s,n,s,s,s,se,ne,nw,ne,ne,n,nw,n,n,nw,nw,nw,nw,sw,s,sw,ne,sw,nw,sw,s,sw,sw,sw,se,sw,sw,sw,sw,s,s,nw,sw,n,s,n,s,s,s,s,n,s,se,se,s,se,se,se,s,se,se,s,s,se,s,ne,n,se,nw,se,se,se,se,ne,se,se,se,n,se,se,sw,se,se,se,ne,se,ne,ne,n,se,s,se,ne,ne,ne,se,ne,s,ne,ne,ne,ne,ne,se,ne,ne,ne,ne,se,n,n,ne,s,s,ne,n,n,ne,ne,se,s,sw,ne,n,s,n,ne,ne,n,s,ne,ne,n,n,nw,n,n,n,s,ne,ne,n,ne,n,ne,n,n,n,ne,ne,nw,n,sw,n,n,nw,n,n,n,n,n,n,n,n,n,sw,n,n,n,n,n,n,n,nw,se,n,nw,n,n,n,n,se,nw,n,nw,n,n,nw,n,nw,n,nw,se,n,n,n,n,s,n,se,se,n,nw,nw,nw,n,n,nw,nw,s,n,nw,nw,nw,nw,nw,nw,nw,nw,nw,sw,n,nw,nw,nw,sw,nw,nw,nw,nw,nw,nw,nw,nw,nw,ne,nw,nw,nw,sw,nw,ne,ne,se,nw,nw,nw,nw,nw,sw,nw,sw,nw,sw,s,nw,nw,nw,nw,nw,nw,nw,nw,sw,sw,nw,nw,sw,nw,sw,n,nw,sw,nw,sw,nw,se,nw,se,s,s,nw,nw,nw,nw,s,sw,n,nw,n,nw,sw,nw,nw,n,sw,s,nw,sw,sw,nw,sw,sw,sw,ne,sw,n,s,sw,sw,sw,sw,sw,n,sw,sw,s,sw,sw,sw,ne,sw,sw,nw,sw,n,s,sw,sw,sw,sw,sw,s,sw,sw,sw,sw,sw,sw,sw,sw,sw,ne,sw,nw,sw,ne,sw,sw,n,sw,sw,sw,ne,sw,nw,sw,s,ne,sw,sw,sw,sw,s,sw,sw,se,sw,sw,sw,sw,s,sw,sw,s,ne,s,sw,s,sw,sw,s,n,s,se,sw,sw,se,se,nw,s,s,sw,sw,n,s,s,s,sw,s,se,sw,sw,se,sw,sw,sw,nw,n,s,sw,sw,s,sw,s,sw,s,s,sw,s,sw,s,sw,s,se,s,sw,sw,sw,sw,sw,s,s,sw,s,s,s,ne,s,s,ne,sw,sw,nw,s,s,se,sw,sw,s,s,s,n,s,s,s,ne,s,s,s,s,n,nw,s,s,s,s,s,s,s,s,sw,s,s,s,s,s,s,s,s,s,s,se,s,se,s,s,s,s,s,s,s,s,s,nw,s,s,nw,s,s,s,s,s,ne,n,s,s,s,s,n,s,s,n,s,s,s,s,se,s,se,s,se,sw,se,s,s,se,s,s,se,s,s,s,s,se,nw,ne,n,se,s,s,s,s,s,sw,n,s,s,s,se,se,s,se,se,sw,se,ne,s,se,sw,se,nw,se,se,s,s,s,se,s,s,s,se,se,s,sw,se,s,n,s,s,se,n,se,se,s,se,s,ne,s,s,s,se,s,se,s,ne,s,se,se,nw,ne,se,se,se,s,s,s,se,s,nw,s,se,s,se,se,n,se,s,se,nw,se,se,se,se,se,se,se,s,se,s,nw,n,se,ne,se,s,se,se,ne,n,se,ne,se,se,se,s,se,ne,se,s,se,se,se,sw,s,s,se,se,s,se,nw,se,se,se,se,se,se,se,se,se,se,se,se,se,ne,se,se,s,se,se,se,s,se,se,se,se,se,se,se,se,se,se,se,s,se,sw,se,se,se,ne,se,se,se,se,se,se,ne,se,nw,se,se,se,se,ne,sw,sw,se,se,se,ne,n,se,ne,nw,se,sw,se,se,se,ne,ne,se,ne,se,se,se,sw,se,se,n,se,se,n,se,se,se,ne,se,se,sw,se,nw,s,se,se,n,ne,se,ne,ne,se,se,se,ne,ne,nw,se,se,s,sw,se,ne,se,ne,sw,s,se,ne,se,se,se,se,nw,se,se,se,se,se,se,se,se,ne,ne,ne,se,se,ne,se,n,se,se,ne,n,sw,se,ne,se,se,se,se,ne,se,sw,ne,ne,ne,ne,se,se,se,ne,se,se,se,ne,n,ne,se,se,ne,ne,ne,se,se,se,se,sw,nw,ne,ne,s,sw,ne,se,sw,se,sw,ne,nw,ne,ne,ne,ne,ne,se,ne,ne,ne,se,se,ne,ne,ne,s,se,ne,nw,se,se,ne,ne,ne,se,sw,ne,nw,sw,ne,ne,n,n,ne,ne,se,ne,nw,sw,se,ne,se,ne,se,se,ne,se,se,sw,ne,nw,s,ne,se,se,ne,sw,ne,ne,nw,s,ne,ne,ne,ne,ne,se,ne,nw,ne,ne,ne,se,ne,ne,ne,ne,sw,ne,ne,ne,ne,ne,ne,ne,ne,nw,ne,ne,se,se,ne,ne,ne,ne,se,ne,se,n,ne,ne,ne,s,se,sw,ne,ne,ne,ne,ne,ne,ne,ne,se,ne,sw,ne,ne,ne,ne,ne,ne,ne,n,n,ne,n,ne,ne,nw,ne,ne,ne,ne,se,ne,ne,ne,ne,ne,ne,ne,ne,se,ne,sw,ne,ne,ne,ne,ne,ne,ne,ne,ne,n,sw,ne,ne,ne,ne,ne,ne,sw,ne,ne,ne,s,ne,ne,ne,sw,ne,ne,n,ne,ne,ne,ne,ne,ne,ne,ne,s,n,ne,se,sw,ne,se,ne,ne,ne,ne,n,ne,ne,ne,se,n,nw,ne,ne,ne,sw,ne,n,nw,s,ne,ne,nw,s,ne,se,ne,ne,nw,n,ne,se,nw,ne,n,ne,ne,ne,ne,n,ne,ne,n,ne,n,ne,ne,n,ne,ne,n,n,ne,ne,ne,nw,ne,ne,ne,se,ne,ne,ne,n,ne,nw,ne,ne,ne,ne,n,n,ne,ne,ne,ne,sw,nw,ne,ne,n,ne,sw,n,ne,ne,ne,ne,ne,n,ne,n,n,n,s,n,ne,se,ne,n,ne,ne,s,s,ne,n,ne,n,nw,n,nw,ne,sw,s,ne,ne,ne,ne,ne,ne,n,ne,nw,s,ne,n,ne,se,ne,ne,n,ne,n,s,se,sw,ne,sw,n,ne,ne,sw,n,ne,n,n,sw,n,n,n,ne,n,sw,n,ne,ne,sw,sw,n,ne,n,ne,n,n,sw,n,n,ne,n,ne,n,ne,nw,ne,n,ne,n,n,ne,n,n,n,ne,n,ne,sw,n,n,se,n,n,ne,ne,s,n,ne,n,ne,ne,ne,ne,n,ne,s,n,ne,n,nw,n,n,ne,n,ne,n,nw,n,n,ne,ne,n,n,n,ne,n,n,n,se,n,n,ne,n,n,n,n,n,n,n,n,n,n,n,s,n,ne,ne,n,n,n,n,n,ne,n,nw,nw,n,n,sw,ne,nw,ne,n,n,ne,n,s,n,ne,ne,n,sw,ne,n,ne,n,n,n,se,n,n,ne,n,n,n,ne,n,n,n,n,n,ne,nw,n,n,s,n,n,n,ne,n,se,n,n,s,n,n,ne,se,n,n,ne,n,ne,n,n,n,ne,se,n,n,sw,sw,n,s,n,se,n,n,n,n,sw,n,n,ne,n,n,n,n,n,n,n,n,n,n,n,n,n,nw,n,n,n,n,n,se,n,s,n,se,n,n,n,n,n,sw,n,ne,n,n,s,n,n,n,nw,n,n,n,n,nw,nw,n,n,n,n,n,s,n,n,ne,n,n,n,n,nw,sw,n,n,n,s,n,nw,s,n,s,n,n,n,n,n,n,n,n,n,n,n,n,n,se,nw,n,n,n,n,nw,n,sw,s,nw,n,n,n,n,n,n,se,n,n,n,n,n,n,ne,nw,n,n,n,n,n,se,n,n,n,n,n,n,n,n,n,n,n,n,sw,n,n,nw,se,se,nw,ne,n,n,n,nw,n,n,n,n,n,n,nw,n,nw,nw,s,nw,n,n,n,n,nw,nw,nw,nw,ne,n,nw,n,nw,n,ne,s,nw,se,n,nw,nw,nw,sw,n,n,se,nw,nw,nw,n,n,nw,nw,n,n,n,nw,nw,n,nw,sw,n,n,n,n,n,nw,n,nw,n,nw,s,n,nw,ne,nw,se,n,se,n,nw,n,n,s,n,s,n,n,nw,nw,ne,nw,n,sw,ne,nw,ne,nw,n,nw,nw,nw,se,nw,n,n,nw,se,nw,nw,n,se,n,n,n,n,n,nw,n,ne,n,n,n,sw,nw,nw,nw,nw,nw,n,n,n,s,nw,se,nw,nw,n,n,s,nw,n,ne,nw,n,se,n,nw,n,nw,nw,nw,n,n,nw,nw,nw,nw,n,nw,n,nw,nw,s,n,nw,n,nw,s,nw,n,sw,ne,nw,se,nw,nw,ne,n,n,n,se,nw,n,nw,nw,nw,nw,ne,nw,s,nw,nw,nw,nw,n,n,nw,se,nw,n,nw,ne,nw,sw,n,se,n,nw,sw,nw,nw,n,s,nw,nw,s,ne,nw,n,n,n,nw,n,n,nw,n,nw,n,nw,n,n,sw,nw,n,nw,n,nw,n,se,ne,nw,nw,n,sw,nw,n,nw,nw,nw,ne,nw,n,nw,nw,nw,nw,nw,nw,n,n,n,nw,ne,nw,n,n,s,nw,nw,ne,nw,nw,nw,nw,nw,nw,se,nw,n,nw,nw,nw,nw,nw,nw,n,nw,ne,n,nw,nw,nw,n,nw,nw,nw,nw,nw,n,nw,ne,nw,sw,s,nw,sw,nw,s,nw,nw,nw,se,n,n,sw,nw,nw,s,nw,nw,nw,s,n,ne,n,nw,s,n,n,nw,n,n,n,nw,nw,nw,nw,n,nw,nw,n,nw,nw,nw,s,sw,nw,nw,n,nw,n,nw,n,s,nw,nw,n,sw,n,nw,nw,n,nw,sw,se,n,nw,nw,nw,nw,n,n,nw,nw,n,nw,n,nw,nw,nw,nw,nw,se,nw,n,nw,nw,nw,n,nw,nw,nw,ne,s,nw,n,nw,s,se,n,nw,nw,n,nw,s,se,n,n,nw,nw,nw,sw,nw,n,nw,sw,nw,nw,s,nw,nw,nw,s,nw,s,sw,nw,nw,nw,nw,nw,nw,ne,nw,nw,se,nw,nw,nw,nw,n,nw,nw,se,nw,nw,nw,nw,nw,nw,n,s,nw,nw,n,nw,sw,nw,nw,nw,nw,nw,nw,s,nw,nw,nw,nw,sw,nw,nw,nw,nw,nw,nw,nw,nw,nw,nw,nw,nw,nw,nw,nw,nw,nw,nw,nw,nw,nw,nw,n,nw,nw,sw,nw,nw,nw,nw,nw,nw,nw,nw,nw,se,nw,nw,nw,nw,n,sw,nw,sw,n,nw,nw,nw,se,nw,nw,sw,nw,sw,se,sw,nw,nw,sw,nw,nw,nw,nw,nw,n,s,ne,nw,s,nw,nw,nw,nw,n,nw,nw,s,nw,nw,nw,nw,nw,ne,ne,nw,nw,se,nw,sw,nw,nw,nw,nw,nw,ne,nw,nw,nw,nw,sw,ne,nw,nw,nw,sw,nw,nw,s,se,nw,nw,s,nw,nw,ne,nw,nw,nw,se,nw,se,n,nw,nw,sw,se,sw,nw,se,sw,nw,nw,nw,n,sw,nw,nw,nw,nw,nw,nw,se,nw,nw,sw,nw,sw,sw,nw,ne,nw,nw,sw,ne,nw,sw,sw,nw,nw,nw,sw,nw,nw,nw,n,sw,se,nw,sw,nw,nw,nw,se,nw,nw,sw,ne,nw,sw,sw,n,nw,nw,nw,sw,nw,nw,sw,nw,nw,sw,se,se,nw,nw,nw,nw,nw,nw,nw,sw,nw,sw,nw,nw,se,n,se,sw,nw,sw,nw,sw,se,nw,nw,sw,nw,nw,nw,n,sw,nw,nw,nw,sw,nw,sw,nw,nw,se,nw,nw,nw,nw,nw,ne,nw,ne,nw,nw,nw,nw,nw,nw,n,sw,nw,sw,n,nw,sw,sw,nw,nw,nw,n,n,nw,nw,sw,nw,nw,nw,sw,nw,nw,n,nw,n,nw,n,nw,nw,nw,nw,sw,sw,nw,sw,n,sw,sw,nw,nw,sw,sw,nw,sw,sw,nw,nw,nw,ne,nw,nw,nw,sw,n,nw,sw,nw,ne,sw,nw,ne,nw,nw,n,ne,nw,n,nw,s,n,sw,nw,nw,sw,nw,nw,s,nw,sw,sw,nw,se,sw,nw,sw,nw,sw,sw,sw,nw,s,se,sw,nw,nw,sw,se,sw,sw,ne,nw,nw,sw,sw,se,ne,nw,nw,s,se,ne,sw,nw,nw,nw,nw,sw,ne,ne,sw,nw,sw,nw,ne,nw,s,sw,sw,sw,nw,s,sw,nw,nw,nw,sw,nw,nw,ne,nw,n,nw,nw,nw,sw,nw,sw,sw,nw,nw,nw,nw,ne,ne,nw,nw,nw,s,s,sw,n,nw,sw,sw,sw,sw,sw,sw,nw,nw,s,s,nw,nw,sw,sw,sw,sw,nw,sw,sw,ne,nw,n,nw,nw,ne,sw,s,sw,ne,sw,ne,sw,sw,sw,sw,nw,sw,nw,sw,sw,nw,nw,s,nw,sw,nw,nw,n,sw,sw,sw,sw,nw,sw,nw,sw,n,nw,se,nw,sw,sw,sw,sw,nw,sw,sw,sw,sw,sw,nw,nw,sw,nw,sw,sw,sw,nw,sw,sw,sw,sw,sw,nw,ne,sw,nw,nw,nw,sw,nw,nw,nw,sw,sw,sw,nw,nw,sw,sw,sw,se,sw,nw,nw,sw,sw,ne,sw,sw,nw,sw,n,sw,nw,nw,sw,sw,sw,nw,sw,nw,sw,sw,s,nw,sw,sw,sw,se,s,sw,sw,sw,nw,s,sw,sw,sw,sw,sw,nw,sw,nw,sw,nw,nw,nw,sw,nw,sw,nw,nw,sw,nw,sw,sw,nw,sw,se,sw,sw,nw,sw,se,sw,ne,ne,sw,sw,sw,sw,nw,s,nw,nw,se,sw,nw,sw,nw,sw,sw,sw,sw,se,se,s,sw,sw,sw,nw,sw,nw,sw,nw,ne,nw,sw,sw,sw,s,s,sw,sw,sw,nw,se,sw,nw,sw,sw,se,nw,s,nw,sw,nw,sw,se,ne,sw,sw,sw,nw,nw,nw,nw,sw,nw,ne,sw,sw,nw,sw,sw,sw,sw,se,se,sw,sw,se,s,sw,se,ne,se,sw,sw,sw,sw,sw,sw,sw,sw,sw,sw,sw,sw,sw,sw,nw,nw,nw,nw,sw,sw,sw,sw,sw,sw,sw,nw,sw,s,sw,nw,sw,sw,sw,sw,sw,sw,sw,sw,sw,sw,sw,sw,sw,sw,sw,se,sw,n,sw,sw,sw,ne,sw,sw,sw,sw,s,ne,sw,sw,n,sw,sw,sw,nw,sw,sw,se,sw,sw,sw,sw,nw,sw,sw,sw,sw,sw,sw,sw,sw,se,ne,sw,sw,sw,nw,nw,sw,sw,se,sw,sw,sw,sw,sw,sw,sw,sw,sw,sw,sw,sw,sw,sw,sw,nw,sw,n,sw,sw,sw,sw,sw,sw,sw,nw,sw,sw,sw,sw,sw,sw,se,sw,s,n,s,s,sw,sw,sw,sw,ne,sw,n,se,sw,sw,sw,sw".split(',')
cy = 0
cx = 0
mx = 0
for step in steps:
if step == 'n':
cy += 1
elif step == 's':
cy -= 1
elif step == 'ne':
cy += 1
cx += 1
elif step == 'nw':
cx -= 1
elif step == 'se':
cx += 1
elif step == 'sw':
cy -= 1
cx -= 1
mx = max(abs(cy), abs(cx), abs(cy - cx), mx)
print cy
print cx
print cy - cx
print mx
| 776.827586
| 22,199
| 0.626953
| 8,288
| 22,528
| 1.704151
| 0.002534
| 0.226565
| 0.191164
| 0.161427
| 0.974936
| 0.911286
| 0.787737
| 0.628646
| 0.443005
| 0.321439
| 0
| 0.000491
| 0.004838
| 22,528
| 28
| 22,200
| 804.571429
| 0.629511
| 0
| 0
| 0.333333
| 0
| 0.041667
| 0.984996
| 0.984508
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0.166667
| 0
| 0
| 1
| null | 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 12
|
67b5577ec1585868af7a815b30970983d7da5309
| 4,543
|
py
|
Python
|
utility/generate_subprocess_graph.py
|
CamLeon/Challenge
|
893502ec1722932fc264ab412bcffa8d06095e44
|
[
"MIT"
] | 2
|
2018-11-12T18:34:13.000Z
|
2018-11-12T18:36:21.000Z
|
utility/generate_subprocess_graph.py
|
CamLeon/Challenge
|
893502ec1722932fc264ab412bcffa8d06095e44
|
[
"MIT"
] | null | null | null |
utility/generate_subprocess_graph.py
|
CamLeon/Challenge
|
893502ec1722932fc264ab412bcffa8d06095e44
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
############################
# Last modified by Muguette#
############################
import networkx as nx
def generate_subprocess_graph(links):
"""
:param links: list of childhood tuples (parent -> child)
:return: the tree of subprocesses
"""
G = nx.DiGraph(Type="Subprocesses")
G.add_edges_from(links)
return G
def generate_subprocess_tre(links):
"""
:param links: list of childhood tuples (parent -> child)
:return: the tree of subprocesses
"""
G = nx.DiGraph(Type="Subprocesses")
G.add_edges_from(links)
return G, links[0][0] # return parent with graph
def generate_RIP_behavior_graph(records):
"""
:param records:list of tuples (process, RIP, API_called)
:return: the graphs of API_calls of all subprocesses
"""
# Each subprocess is associated to one graph
# LIC stands for Last I Checked
forest_and_LIC = {} # forest_and_LIC = {subprocess : (graph, LIC)}
# Equivalent to :
#forest = {} # Forest = {subprocess : graph}
#last_I_checked = {} # last_I_checked = {subprocess : last record (subprocess, _, _)}
for i, record in enumerate(records):
subprocess = record[0]
RIP = record[1]
API_call = record[2]
if subprocess in forest_and_LIC.keys():
# This process had been seen before
process_graph, process_LIC = forest_and_LIC[subprocess]
# Equivalent to :
#process_graph = forest[subprocess]
#process_LIC = last_I_checked[subprocess]
# This was the RIP of the last record (subprocess, _, _)
lastRIP = records[process_LIC][1]
if process_graph.has_edge(lastRIP, RIP):
# The edge already exist, its weight is incremented
process_graph[lastRIP][RIP]['weight'] += 1
else:
# The edge is created
process_graph.add_edge(lastRIP, RIP, weight=1)
# We want to add API_call in the list of API_calls associated to this RIP iff it is not in already
if API_call not in process_graph[RIP]['API_calls']:
process_graph[RIP]['API_calls'].append(API_call)
else:
# This process had not been seen before
process_graph = nx.DiGraph(process=subprocess)
# We generate the node so that we can add the API_call associated to it
process_graph.add_node(RIP, API_calls=[API_call])
# Generate the tuple (graph, LIC) associated to subprocess
forest_and_LIC[subprocess] = (process_graph, i)
return [forest_and_LIC[subprocess][0] for subprocess in forest_and_LIC.keys()]
def generate_API_behavior_graph(records):
"""
:param records:list of tuples (process, RIP, API_called)
:return: the graphs of API_calls of all subprocesses
"""
# Each subprocess is associated to one graph
# LIC stands for Last I Checked
forest_and_LIC = {} # forest_and_LIC = {subprocess : (graph, LIC)}
# Equivalent to :
#forest = {} # Forest = {subprocess : graph}
#last_I_checked = {} # last_I_checked = {subprocess : last record (subprocess, _, _)}
for i, record in enumerate(records):
subprocess = record[0]
RIP = record[1]
API = record[2]
if subprocess in forest_and_LIC.keys():
# This process had been seen before
process_graph, process_LIC = forest_and_LIC[subprocess]
# Equivalent to :
#process_graph = forest[subprocess]
#process_LIC = last_I_checked[subprocess]
# This was the RIP of the last record (subprocess, _, _)
lastRIP = records[process_LIC][1]
lastAPI = records[process_LIC][2]
if process_graph.has_edge((lastRIP, lastAPI) , (RIP, API)):
# The edge already exist, its weight is incremented
process_graph[(lastRIP, lastAPI)][(RIP, API)]['weight'] += 1
else:
# The edge is created
process_graph.add_edge((lastRIP, lastAPI), (RIP, API), weight=1)
else:
# This process had not been seen before
process_graph = nx.DiGraph(process=subprocess)
# Generate the tuple (graph, LIC) associated to subprocess
forest_and_LIC[subprocess] = (process_graph, i)
return [forest_and_LIC[subprocess][0] for subprocess in forest_and_LIC.keys()]
| 37.237705
| 110
| 0.609949
| 558
| 4,543
| 4.777778
| 0.175627
| 0.076519
| 0.063016
| 0.066017
| 0.846212
| 0.824081
| 0.803076
| 0.790698
| 0.790698
| 0.790698
| 0
| 0.005547
| 0.285714
| 4,543
| 121
| 111
| 37.545455
| 0.816025
| 0.412283
| 0
| 0.595745
| 1
| 0
| 0.021635
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.085106
| false
| 0
| 0.021277
| 0
| 0.191489
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
67fae855d9bc494b92b6b66d608da6270812cb1a
| 21,942
|
py
|
Python
|
backend/billparser/tests/test_routes.py
|
Congress-Dev/congress-dev
|
a697b2590472e0e55f94cec35b3e57042649638d
|
[
"MIT"
] | 9
|
2020-03-09T01:18:43.000Z
|
2022-01-28T10:07:05.000Z
|
backend/billparser/tests/test_routes.py
|
Congress-Dev/congress-dev
|
a697b2590472e0e55f94cec35b3e57042649638d
|
[
"MIT"
] | 13
|
2020-03-24T12:57:38.000Z
|
2022-02-18T15:37:01.000Z
|
backend/billparser/tests/test_routes.py
|
Congress-Dev/congress-dev
|
a697b2590472e0e55f94cec35b3e57042649638d
|
[
"MIT"
] | 1
|
2022-02-15T20:52:55.000Z
|
2022-02-15T20:52:55.000Z
|
from unittest import TestCase, mock
import json
from billparser.__main__ import (
bills,
app,
bill_content,
bill_content_tree,
titles,
versions,
revisions,
version,
latest_sections,
sections,
contents,
)
from billparser.db.models import (
USCChapter,
USCSection,
USCContent,
USCContentDiff,
Version,
Legislation,
LegislationVersion,
LegislationContent,
LegislationVersionEnum,
LegislationChamber,
LegislationType,
)
# This is to ensure that the return values are the same
# No matter what, these return values shouldn't change, or the frontend
# Will need to change
class TestRoutes(TestCase):
@mock.patch("billparser.__main__.get_bills", return_value=[])
def test_bills_no_version(self, mock_get_bills):
"""
Should be returning a dict where the key is the bill, and the value is the bill metadata
"""
mock_get_bills.return_value = [
Legislation(
legislation_id=1,
legislation_type=LegislationType.Bill,
chamber=LegislationChamber.House,
title="Test House Bill",
number=1
),
Legislation(
legislation_id=2,
legislation_type=LegislationType.Bill,
chamber=LegislationChamber.Senate,
title="Test Senate Bill",
number=5
),
]
with app.app.test_request_context():
resp = bills()
self.assertEqual(
json.dumps(
{
"H-1": {
"bill_id": "1",
"chamber": "House",
"bill_type": "BillTypes.Bill",
"bill_number": "1",
"bill_title": "Test House Bill",
"versions": [],
},
"S-5": {
"bill_id": "2",
"chamber": "Senate",
"bill_type": "BillTypes.Bill",
"bill_number": "5",
"bill_title": "Test Senate Bill",
"versions": [],
},
}
),
resp,
)
@mock.patch("billparser.__main__.get_bills", return_value=[])
def test_bills_with_version(self, mock_get_bills):
"""
Should be returning a dict where the key is the bill, and the value is the bill metadata
Should also include the given bill versions for the bill
"""
mock_get_bills.return_value = [
Legislation(
legislation_id=1,
legislation_type=LegislationType.Bill,
chamber=LegislationChamber.House,
title="Test House Bill",
number=1,
versions=[
LegislationVersion(
legislation_version_id=1,
legislation_id=1,
legislation_version=LegislationVersionEnum.IH,
)
],
),
Legislation(
legislation_id=2,
legislation_type=LegislationType.Bill,
chamber=LegislationChamber.Senate,
title="Test Senate Bill",
number=5,
versions=[
LegislationVersion(
legislation_version_id=2,
legislation_id=2,
legislation_version=LegislationVersionEnum.IS,
)
],
),
]
with app.app.test_request_context():
resp = bills()
self.assertEqual(
json.dumps(
{
"H-1": {
"bill_id": "1",
"chamber": "House",
"bill_type": "BillTypes.Bill",
"bill_number": "1",
"bill_title": "Test House Bill",
"versions": [
{
"bill_version_id": "1",
"bill_id": "1",
"bill_version": "ih",
# "base_version_id": "1", # This was removed in the translation
}
],
},
"S-5": {
"bill_id": "2",
"chamber": "Senate",
"bill_type": "BillTypes.Bill",
"bill_number": "5",
"bill_title": "Test Senate Bill",
"versions": [
{
"bill_version_id": "2",
"bill_id": "2",
"bill_version": "is",
# "base_version_id": "1", # This was removed in the translation
}
],
},
}
),
resp,
resp,
)
@mock.patch("billparser.__main__.get_bill_contents", return_value=[])
def test_bill_content_1(self, mock_get_bill_contents):
"""
Should return the bill content objects
"""
mock_get_bill_contents.return_value = [
LegislationContent(
legislation_content_id=1,
parent_id=None,
order_number=0,
section_display="SS 1.)",
heading="Test heading",
content_str="Test content",
legislation_version_id=1,
content_type="section",
action_parse=[],
),
]
with app.app.test_request_context():
resp = bill_content("1")
self.assertEqual(
json.dumps(
[
{
"bill_content_id": 1,
"content_type": "section",
"order": 0,
# "number": "1", # Removed
"display": "SS 1.)",
"heading": "Test heading",
"content": "Test content",
"version": "1",
}
]
),
resp,
resp,
)
@mock.patch("billparser.__main__.get_bill_contents", return_value=[])
def test_bill_content_2(self, mock_get_bill_contents):
"""
Should return the bill content objects, multiple contents
"""
self.maxDiff = None
mock_get_bill_contents.return_value = [
LegislationContent(
legislation_content_id=1,
parent_id=None,
order_number=0,
section_display="SS 1.)",
heading="Test heading",
content_str="Test content",
legislation_version_id=1,
content_type="section",
action_parse=[],
),
LegislationContent(
legislation_content_id=2,
parent_id="1",
order_number=0,
section_display="a.)",
heading="",
content_str="Test subcontent",
legislation_version_id=1,
content_type="legis-body",
action_parse=[],
),
]
with app.app.test_request_context():
resp = bill_content("1")
self.assertEqual(
json.dumps(
[
{
"bill_content_id": 1,
"content_type": "section",
"order": 0,
# "number": "1", # Removed
"display": "SS 1.)",
"heading": "Test heading",
"content": "Test content",
"version": "1",
},
{
"bill_content_id": 2,
"content_type": "legis-body",
"order": 0,
"parent": "1",
# "number": "a", # Removed
"display": "a.)",
"heading": "",
"content": "Test subcontent",
"version": "1",
},
]
),
resp,
resp,
)
@mock.patch("billparser.__main__.get_bill_metadata", return_value=[])
@mock.patch("billparser.__main__.get_bill_contents", return_value=[])
def test_bill_content_tree_1(self, mock_get_bill_contents, mock_get_bill_metadata):
"""
Should return the bill content objects, and metadata
"""
mock_get_bill_metadata.return_value = {
"chamber": "House",
"number": "12",
"version": "1",
}
mock_get_bill_contents.return_value = [
LegislationContent(
legislation_content_id=1,
parent_id=None,
order_number=0,
section_display="SS 1.)",
heading="Test heading",
content_str="Test content",
legislation_version_id=1,
content_type="section",
action_parse=[],
),
LegislationContent(
legislation_content_id=2,
parent_id=1,
order_number=0,
section_display="a.)",
heading="",
content_str="Test subcontent",
legislation_version_id=1,
content_type="legis-body",
action_parse=[],
),
]
with app.app.test_request_context():
resp = bill_content_tree("1")
self.assertEqual(
json.dumps(
{
"content": {
"bill_content_id": 1,
"content_type": "section",
"order": 0,
#"number": "1", # Removed
"display": "SS 1.)",
"heading": "Test heading",
"content": "Test content",
"version": "1",
"child": [
{
"bill_content_id": 2,
"content_type": "legis-body",
"order": 0,
"parent": 1,
# "number": "a", # Removed
"display": "a.)",
"heading": "",
"content": "Test subcontent",
"version": "1",
"child": [],
}
],
},
"metadata": {
"chamber": "House",
"number": "12",
"version": "1",
},
}
),
resp,
resp,
)
@mock.patch("billparser.__main__.get_chapters", return_value=[])
def test_chapters(self, mock_get_chapters):
"""
Should return the chapter objects
"""
mock_get_chapters.return_value = [
USCChapter(
usc_chapter_id=1,
usc_ident="/usc/1",
short_title="01",
document="usc",
version_id=1,
)
]
with app.app.test_request_context():
resp = titles()
self.assertEqual(
json.dumps(
[{"chapter_id": 1, "ident": "/usc/1", "number": "01", "version": 1}]
),
resp,
resp,
)
@mock.patch("billparser.__main__.get_versions", return_value=[])
def test_versions(self, mock_get_versions):
"""
Should return the version objects
"""
mock_get_versions.return_value = [
Version(version_id=1, base_id=1)
]
with app.app.test_request_context():
resp = versions()
self.assertEqual(
json.dumps([{"version_id": 1, "title": "Legacy Title", "base_id": 1}]),
resp,
resp,
)
@mock.patch("billparser.__main__.get_revisions", return_value=[])
def test_revisions(self, mock_get_versions):
"""
Should return the version objects without a base id
"""
mock_get_versions.return_value = [
Version(
version_id=1, base_id=None
)
]
with app.app.test_request_context():
resp = revisions()
self.assertEqual(
json.dumps([{"version_id": 1, "title": "Legacy Title"}]), resp, resp,
)
@mock.patch("billparser.__main__.get_content_versions", return_value=[])
@mock.patch("billparser.__main__.get_diffs", return_value=[])
def test_get_version(self, mock_get_diffs, mock_get_content_versions):
"""
Should return the version objects without a base id
"""
mock_get_content_versions.return_value = [
USCContent(
usc_content_id=1,
usc_section_id=1,
parent_id=None,
usc_ident="/usc/s1/1",
usc_guid="1-2-3",
number="1",
section_display="S 1.)",
heading="Test - heading",
content_str="content - str",
version_id=1,
)
]
mock_get_diffs.return_value = [
USCContentDiff(
usc_content_diff_id=1,
usc_chapter_id=1,
usc_section_id=1,
usc_content_id=1,
order_number=0,
number="1",
section_display="test",
heading="test - heading",
content_str="test - content",
version_id=1,
)
]
with app.app.test_request_context(json={"version": 1}):
resp = version()
self.assertEqual(
json.dumps(
{
"diffs": [
{
"id": 1,
"content_id": 1,
"section_id": 1,
"chapter_id": 1,
"order": 0,
"number": "1",
"display": "test",
"heading": "test - heading",
"content": "test - content",
"version": 1,
}
],
"contents": [
{
"content_id": 1,
"section_id": 1,
"ident": "/usc/s1/1",
"number": "1",
"display": "S 1.)",
"heading": "Test - heading",
"content": "content - str",
"version": 1,
}
],
}
),
resp,
resp,
)
@mock.patch("billparser.__main__.get_latest_sections", return_value=[])
def test_latest_sections(self, mock_get_sections):
"""
Should return the section objects
"""
mock_get_sections.return_value = [
USCSection(
usc_section_id=1,
usc_ident="/usc/01/s1",
number="1",
section_display="S 1.)",
heading="Test - Heading",
usc_chapter_id=1,
version_id=1,
)
]
with app.app.test_request_context():
resp = latest_sections("1")
self.assertEqual(
json.dumps(
[
{
"section_id": 1,
"ident": "/usc/01/s1",
"number": "1",
"display": "S 1.)",
"heading": "Test - Heading",
"chapter_id": 1,
"version": 1,
}
]
),
resp,
resp,
)
@mock.patch("billparser.__main__.get_latest_sections", return_value=[])
def test_latest_sections(self, mock_get_sections):
"""
Should return the section objects
"""
mock_get_sections.return_value = [
USCSection(
usc_section_id=1,
usc_ident="/usc/01/s1",
number="1",
section_display="S 1.)",
heading="Test - Heading",
usc_chapter_id=1,
version_id=1,
)
]
with app.app.test_request_context():
resp = latest_sections("1")
self.assertEqual(
json.dumps(
[
{
"section_id": 1,
"ident": "/usc/01/s1",
"number": "1",
"display": "S 1.)",
"heading": "Test - Heading",
"chapter_id": 1,
"version": 1,
}
]
),
resp,
resp,
)
@mock.patch(
"billparser.__main__.get_latest_base", return_value=Version(version_id=1)
)
@mock.patch("billparser.__main__.get_sections", return_value=[])
def test_sections(self, mock_get_sections, mock_get_latest_base):
"""
Should return the section objects
"""
mock_get_sections.return_value = [
USCSection(
usc_section_id=1,
usc_ident="/usc/01/s1",
number="1",
section_display="S 1.)",
heading="Test - Heading",
usc_chapter_id=1,
version_id=1,
)
]
with app.app.test_request_context():
resp = sections("1")
self.assertEqual(
json.dumps(
[
{
"section_id": 1,
"ident": "/usc/01/s1",
"number": "1",
"display": "S 1.)",
"heading": "Test - Heading",
"chapter_id": 1,
"version": 1,
}
]
),
resp,
resp,
)
@mock.patch(
"billparser.__main__.get_latest_base", return_value=Version(version_id=1)
)
@mock.patch("billparser.__main__.get_content", return_value=[])
def test_contents(self, mock_get_content, mock_get_latest_base):
"""
Should return the content objects
"""
mock_get_content.return_value = [
USCContent(
usc_content_id=1,
usc_section_id=1,
parent_id=None,
order_number=0,
usc_ident="/usc/01/s1",
usc_guid="1-2-3",
number="1",
section_display="S 1.)",
heading="Test - Heading",
content_str="Content - Str",
version_id=1,
content_type="legis-body",
)
]
with app.app.test_request_context():
resp = contents("1")
self.assertEqual(
json.dumps(
[
{
"content_id": 1,
"content_type": "legis-body",
"section_id": 1,
"order": 0,
"ident": "/usc/01/s1",
"number": "1",
"display": "S 1.)",
"heading": "Test - Heading",
"content": "Content - Str",
"version": 1,
}
]
),
resp,
resp,
)
| 35.051118
| 100
| 0.383693
| 1,633
| 21,942
| 4.872015
| 0.080833
| 0.025641
| 0.027652
| 0.049145
| 0.823655
| 0.784565
| 0.74824
| 0.716315
| 0.704877
| 0.677979
| 0
| 0.019719
| 0.519278
| 21,942
| 625
| 101
| 35.1072
| 0.734547
| 0.04913
| 0
| 0.638989
| 0
| 0
| 0.135039
| 0.028463
| 0
| 0
| 0
| 0
| 0.023466
| 1
| 0.023466
| false
| 0
| 0.00722
| 0
| 0.032491
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c00eff01bb8b846ec60b3ab90fc6e20b088eb651
| 115
|
py
|
Python
|
jaitool/annotation/COCO/__init__.py
|
Jitesh17/jaitool
|
bcbb014808045d65c0f5b2bd587b1a418271f61e
|
[
"MIT"
] | 1
|
2021-01-22T00:38:41.000Z
|
2021-01-22T00:38:41.000Z
|
jaitool/annotation/COCO/__init__.py
|
Jitesh17/jaitool
|
bcbb014808045d65c0f5b2bd587b1a418271f61e
|
[
"MIT"
] | null | null | null |
jaitool/annotation/COCO/__init__.py
|
Jitesh17/jaitool
|
bcbb014808045d65c0f5b2bd587b1a418271f61e
|
[
"MIT"
] | 1
|
2021-02-26T05:23:23.000Z
|
2021-02-26T05:23:23.000Z
|
from .coco_dataset import *
from .coco_result_dataset import *
from .edit_coco_json import *
from .vis_ann import *
| 28.75
| 34
| 0.8
| 18
| 115
| 4.777778
| 0.5
| 0.348837
| 0.395349
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.130435
| 115
| 4
| 35
| 28.75
| 0.86
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
c06217b9ab0f7755adc860ea3d1999f131b06748
| 349
|
py
|
Python
|
tests/internal/instance_type/test_instance_type_m6_auto.py
|
frolovv/aws.ec2.compare
|
582805823492f833d65c0441c4a14dce697c12aa
|
[
"Apache-2.0"
] | null | null | null |
tests/internal/instance_type/test_instance_type_m6_auto.py
|
frolovv/aws.ec2.compare
|
582805823492f833d65c0441c4a14dce697c12aa
|
[
"Apache-2.0"
] | null | null | null |
tests/internal/instance_type/test_instance_type_m6_auto.py
|
frolovv/aws.ec2.compare
|
582805823492f833d65c0441c4a14dce697c12aa
|
[
"Apache-2.0"
] | 1
|
2021-12-15T11:58:22.000Z
|
2021-12-15T11:58:22.000Z
|
# Testing module instance_type.m6
import pytest
import ec2_compare.internal.instance_type.m6
def test_get_internal_data_instance_type_m6_get_instances_list():
assert len(ec2_compare.internal.instance_type.m6.get_instances_list()) > 0
def test_get_internal_data_instance_type_m6_get():
assert len(ec2_compare.internal.instance_type.m6.get) > 0
| 34.9
| 76
| 0.848138
| 56
| 349
| 4.839286
| 0.339286
| 0.265683
| 0.309963
| 0.250923
| 0.826568
| 0.826568
| 0.612546
| 0.612546
| 0.612546
| 0
| 0
| 0.034056
| 0.074499
| 349
| 9
| 77
| 38.777778
| 0.804954
| 0.088825
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 10
|
22593b7aaef4673c3bd78d1a3f74495167f49dc0
| 151
|
py
|
Python
|
onepay_new/commands.py
|
shaoren0110/onepay_flask
|
c736971113763ab5e1a67c85d5599137f3a373fc
|
[
"MIT"
] | null | null | null |
onepay_new/commands.py
|
shaoren0110/onepay_flask
|
c736971113763ab5e1a67c85d5599137f3a373fc
|
[
"MIT"
] | null | null | null |
onepay_new/commands.py
|
shaoren0110/onepay_flask
|
c736971113763ab5e1a67c85d5599137f3a373fc
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from flask import request, redirect, url_for
def redirect_back(html, **kwargs):
return redirect(url_for(html, **kwargs))
| 21.571429
| 44
| 0.688742
| 21
| 151
| 4.809524
| 0.714286
| 0.217822
| 0.277228
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007813
| 0.152318
| 151
| 7
| 45
| 21.571429
| 0.78125
| 0.139073
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 8
|
3f242f39616a288d30c663bd9ab5f87b6614d326
| 19,312
|
py
|
Python
|
src/Fig_2_supplement_5_Supralinear_network_with_initial_ISN.py
|
fmi-basel/gzenke-nonlinear-transient-amplification
|
f3b0c8c89b42c34f1aad740c7026865cf3164f1d
|
[
"MIT"
] | null | null | null |
src/Fig_2_supplement_5_Supralinear_network_with_initial_ISN.py
|
fmi-basel/gzenke-nonlinear-transient-amplification
|
f3b0c8c89b42c34f1aad740c7026865cf3164f1d
|
[
"MIT"
] | 3
|
2021-12-16T10:15:10.000Z
|
2021-12-16T12:54:24.000Z
|
src/Fig_2_supplement_5_Supralinear_network_with_initial_ISN.py
|
fmi-basel/gzenke-nonlinear-transient-amplification
|
f3b0c8c89b42c34f1aad740c7026865cf3164f1d
|
[
"MIT"
] | 1
|
2021-12-16T10:02:43.000Z
|
2021-12-16T10:02:43.000Z
|
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib import patches
import matplotlib.patches as mpatches
import scipy.io as sio
# plotting configuration
ratio = 1.5
figure_len, figure_width = 15*ratio, 12*ratio
font_size_1, font_size_2 = 36*ratio, 36*ratio
legend_size = 18*ratio
line_width, tick_len = 3*ratio, 10*ratio
marker_size = 15*ratio
plot_line_width = 5*ratio
hfont = {'fontname': 'Arial'}
b_plotting_activity = True
b_plotting_paradoxical_effect = False
b_plotting_frozen_inhibition = False
if b_plotting_activity:
# simulation setup
dt = 0.0001
T = int(9/dt)
l_g_e_ini = [1.55, 1.8]
for g_e_ini in l_g_e_ini:
Jacobian_mat = np.zeros((2, 2)) * np.nan
# neuronal parameters
tau_e, tau_i = 0.020, 0.010
alpha_e, alpha_i = 2, 2
# short-term depression
x, u_d = 1, 1
tau_x = 0.20
# network connectivity
Jee = 1.8
Jie = 1.0
Jei = 1.0
Jii = 0.6
r_e, r_i = 0, 0
z_e, z_i = 0, 0
l_r_e, l_r_i, l_x = [], [], []
l_ISN_index = []
for i in range(T):
if 50000 <= i < 70000:
g_e, g_i = 3.0, 2
else:
g_e, g_i = g_e_ini, 2
g_e = g_e * (g_e > 0)
g_i = g_i * (g_i > 0)
# SSN part
z_e = Jee * x * r_e - Jei * r_i + g_e
z_i = Jie * r_e - Jii * r_i + g_i
z_e = z_e * (z_e > 0)
z_i = z_i * (z_i > 0)
r_e = r_e + (-r_e + np.power(z_e, alpha_e)) / tau_e * dt
r_i = r_i + (-r_i + np.power(z_i, alpha_i)) / tau_i * dt
r_e = r_e * (r_e > 0)
r_i = r_i * (r_i > 0)
x = x + ((1 - x) / tau_x - u_d * x * r_e) * dt
x = np.clip(x, 0, 1)
Jacobian_mat[0, 0] = 1.0 / tau_e * (x * Jee * alpha_e * np.power(r_e, (alpha_e - 1.0) / alpha_e) - 1)
Jacobian_mat[0, 1] = 1.0 / tau_e * Jee * alpha_e * np.power(r_e, (2 * alpha_e - 1.0) / alpha_e)
Jacobian_mat[1, 0] = - u_d * x
Jacobian_mat[1, 1] = -1.0 / tau_x - u_d * r_e
lambda_1 = np.linalg.eig(Jacobian_mat)[0][0]
lambda_2 = np.linalg.eig(Jacobian_mat)[0][1]
l_ISN_index.append(np.max([lambda_1.real, lambda_2.real]))
l_r_e.append(r_e)
l_r_i.append(r_i)
l_x.append(x)
l_r_e = np.asarray(l_r_e)
l_r_i = np.asarray(l_r_i)
l_x = np.asarray(l_x)
l_ISN_index = np.asarray(l_ISN_index)
# plotting
plt.figure(figsize=(figure_len, figure_width))
ax = plt.gca()
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(True)
ax.spines['left'].set_visible(True)
for axis in ['top', 'bottom', 'left', 'right']:
ax.spines[axis].set_linewidth(line_width)
plt.tick_params(width=line_width, length=tick_len)
plt.yscale('symlog', linthreshy=0.1)
plt.plot(l_r_e, color='blue', linewidth=plot_line_width)
plt.plot(l_r_i, color='red', linewidth=plot_line_width)
plt.xticks(np.arange(30000, 90000 + 5000, 20000), np.arange(0, 6 + 0.5, 2), fontsize=font_size_1, **hfont)
plt.yticks([0, 1, 100, 10000], fontsize=font_size_1, **hfont)
plt.xlabel('Time (s)', fontsize=font_size_1, **hfont)
plt.ylabel('Firing rate (Hz)', fontsize=font_size_1, **hfont)
plt.xlim([30000, 90000])
plt.ylim([0, 10000])
plt.legend(['Exc', 'Inh'], prop={"family": "Arial", 'size': font_size_1}, loc='upper right')
if g_e_ini == 1.55:
plt.savefig('paper_figures/png/Revision_Fig_Point_1_1_Supralinear_network_2D_EE_STP_initial_non_ISN.png')
plt.savefig('paper_figures/pdf/Revision_Fig_Point_1_1_Supralinear_network_2D_EE_STP_initial_non_ISN.pdf')
else:
plt.savefig('paper_figures/png/Revision_Fig_Point_1_1_Supralinear_network_2D_EE_STP_initial_ISN.png')
plt.savefig('paper_figures/pdf/Revision_Fig_Point_1_1_Supralinear_network_2D_EE_STP_initial_ISN.pdf')
# plotting
plt.figure(figsize=(figure_len, figure_width))
ax = plt.gca()
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(True)
ax.spines['left'].set_visible(True)
for axis in ['top', 'bottom', 'left', 'right']:
ax.spines[axis].set_linewidth(line_width)
plt.tick_params(width=line_width, length=tick_len)
plt.plot(l_x, linewidth=plot_line_width)
plt.plot(np.sqrt(1.0/(Jee * alpha_e) * np.power(l_r_e, 1.0/alpha_e -1)), '--', linewidth=plot_line_width)
plt.xticks(np.arange(30000, 90000 + 5000, 20000), np.arange(0, 6 + 0.5, 2), fontsize=font_size_1, **hfont)
plt.yticks([0, 0.2, 0.4, 0.6, 0.8, 1.0], fontsize=font_size_1, **hfont)
plt.xlabel('Time (s)', fontsize=font_size_1, **hfont)
plt.ylabel('x', fontsize=font_size_1, **hfont)
plt.xlim([30000, 90000])
plt.ylim([0, 1])
if g_e_ini == 1.55:
plt.savefig('paper_figures/png/Revision_Fig_Point_1_1_Supralinear_network_x_initial_non_ISN.png')
plt.savefig('paper_figures/pdf/Revision_Fig_Point_1_1_Supralinear_network_x_initial_non_ISN.pdf')
else:
plt.savefig('paper_figures/png/Revision_Fig_Point_1_1_Supralinear_network_x_initial_ISN.png')
plt.savefig('paper_figures/pdf/Revision_Fig_Point_1_1_Supralinear_network_x_initial_ISN.pdf')
# plotting
plt.figure(figsize=(figure_len, figure_width))
ax = plt.gca()
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(True)
ax.spines['left'].set_visible(True)
for axis in ['top', 'bottom', 'left', 'right']:
ax.spines[axis].set_linewidth(line_width)
plt.tick_params(width=line_width, length=tick_len)
plt.yscale('symlog', linthreshy=10)
plt.plot(l_ISN_index, linewidth=plot_line_width)
plt.xticks(np.arange(30000, 90000 + 5000, 20000), np.arange(0, 6 + 0.5, 2), fontsize=font_size_1, **hfont)
plt.yticks([-10000, -100, 0, 100, 10000], fontsize=font_size_1, **hfont)
plt.xlabel('Time (s)', fontsize=font_size_1, **hfont)
plt.ylabel('ISN index', fontsize=font_size_1, **hfont)
plt.xlim([30000, 90000])
plt.ylim([-10000, 10000])
plt.hlines(y=0, xmin=30000, xmax=90000, colors='k', linestyles=[(0, (6, 6, 6, 6))], linewidth=line_width)
if g_e_ini == 1.55:
plt.savefig('paper_figures/png/Revision_Fig_Point_1_1_Supralinear_network_ISN_index_initial_non_ISN.png')
plt.savefig('paper_figures/pdf/Revision_Fig_Point_1_1_Supralinear_network_ISN_index_initial_non_ISN.pdf')
else:
plt.savefig('paper_figures/png/Revision_Fig_Point_1_1_Supralinear_network_ISN_index_initial_ISN.png')
plt.savefig('paper_figures/pdf/Revision_Fig_Point_1_1_Supralinear_network_ISN_index_initial_ISN.pdf')
if b_plotting_paradoxical_effect:
# simulation setup
dt = 0.0001
T = int(9 / dt)
# neuronal parameters
tau_e, tau_i = 0.020, 0.010
alpha_e, alpha_i = 2, 2
# short-term depression
x, u_d = 1, 1
tau_x = 0.20
# network connectivity
Jee = 1.8
Jie = 1.0
Jei = 1.0
Jii = 0.6
l_b_before_stimulation = [True, False]
for b_before_stimulation in l_b_before_stimulation:
x = 1
r_e, r_i = 0, 0
z_e, z_i = 0, 0
l_r_e, l_r_i, l_x = [], [], []
for i in range(T):
if 50000 <= i < 70000:
g_e, g_i = 3.0, 2
else:
g_e, g_i = 1.8, 2
if b_before_stimulation:
if 42000 < i <= 49000:
g_i = 2.1
else:
pass
else:
if 62000 < i <= 69000:
g_i = 2.1
else:
pass
g_e = g_e * (g_e > 0)
g_i = g_i * (g_i > 0)
# SSN part
z_e = Jee * x * r_e - Jei * r_i + g_e
z_i = Jie * r_e - Jii * r_i + g_i
z_e = z_e * (z_e > 0)
z_i = z_i * (z_i > 0)
r_e = r_e + (-r_e + np.power(z_e, alpha_e)) / tau_e * dt
r_i = r_i + (-r_i + np.power(z_i, alpha_i)) / tau_i * dt
r_e = r_e * (r_e > 0)
r_i = r_i * (r_i > 0)
x = x + ((1 - x) / tau_x - u_d * x * r_e) * dt
x = np.clip(x, 0, 1)
l_r_e.append(r_e)
l_r_i.append(r_i)
l_x.append(x)
l_r_e = np.asarray(l_r_e)
l_r_i = np.asarray(l_r_i)
l_x = np.asarray(l_x)
if b_before_stimulation:
plt.figure(figsize=(figure_len, figure_width))
ax = plt.gca()
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(True)
ax.spines['left'].set_visible(True)
for axis in ['top', 'bottom', 'left', 'right']:
ax.spines[axis].set_linewidth(line_width)
plt.tick_params(width=line_width, length=tick_len)
mean_e = l_r_e / np.mean(l_r_e[40000:42000])
mean_i = l_r_i / np.mean(l_r_i[40000:42000])
plt.plot(mean_e, color='blue', linewidth=plot_line_width)
plt.plot(mean_i, color='red', linewidth=plot_line_width)
plt.xticks([40000, 42000, 44000, 46000, 48000], [1.0, 1.2, 1.4, 1.6, 1.8], fontsize=font_size_1, **hfont)
plt.yticks([0, 0.2, 0.4, 0.6, 0.8, 1.0, 1.2], fontsize=font_size_1, **hfont)
plt.xlabel('Time (s)', fontsize=font_size_1, **hfont)
plt.ylabel('Normalized firing rate', fontsize=font_size_1, **hfont)
plt.xlim([40000, 48000])
plt.ylim([0, 1.2])
plt.legend(['Exc', 'Inh'], prop={"family": "Arial", 'size': font_size_1})
plt.hlines(y=1, xmin=42000, xmax=50000, colors='k', linestyles=[(0, (6, 6, 6, 6))], linewidth=line_width)
plt.savefig(
'paper_figures/png/Revision_Fig_Point_1_1_Supralinear_network_2D_EE_STP_normalized_activity_paradoxical_effect_before_stimulation_ISN.png')
plt.savefig(
'paper_figures/pdf/Revision_Fig_Point_1_1_Supralinear_network_2D_EE_STP_normalized_activity_paradoxical_effect_before_stimulation_ISN.pdf')
else:
plt.figure(figsize=(figure_len, figure_width))
ax = plt.gca()
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(True)
ax.spines['left'].set_visible(True)
for axis in ['top', 'bottom', 'left', 'right']:
ax.spines[axis].set_linewidth(line_width)
plt.tick_params(width=line_width, length=tick_len)
mean_e = l_r_e / np.mean(l_r_e[60000:62000])
mean_i = l_r_i / np.mean(l_r_i[60000:62000])
plt.plot(mean_e, color='blue', linewidth=plot_line_width)
plt.plot(mean_i, color='red', linewidth=plot_line_width)
plt.xticks([60000, 62000, 64000, 66000, 68000], [3.0, 3.2, 3.4, 3.6, 3.8], fontsize=font_size_1,
**hfont)
plt.yticks([0.85, 0.9, 0.95, 1.0, 1.05], fontsize=font_size_1, **hfont)
plt.xlabel('Time (s)', fontsize=font_size_1, **hfont)
plt.ylabel('Normalized firing rate', fontsize=font_size_1, **hfont)
plt.xlim([60000, 68000])
plt.ylim([0.85, 1.05])
plt.legend(['Exc', 'Inh'], prop={"family": "Arial", 'size': font_size_1})
plt.hlines(y=1, xmin=62000, xmax=70000, colors='k', linestyles=[(0, (6, 6, 6, 6))], linewidth=line_width)
plt.savefig(
'paper_figures/png/Revision_Fig_Point_1_1_Supralinear_network_2D_EE_STP_normalized_activity_paradoxical_effect_during_stimulation_ISN.png')
plt.savefig(
'paper_figures/pdf/Revision_Fig_Point_1_1_Supralinear_network_2D_EE_STP_normalized_activity_paradoxical_effect_during_stimulation_ISN.pdf')
if b_plotting_frozen_inhibition:
# simulation setup
dt = 0.0001
T = int(9/dt)
# neuronal parameters
tau_e, tau_i = 0.020, 0.010
alpha_e, alpha_i = 2, 2
# adaptation
x, u_d = 1, 1
tau_x = 0.20
# network connectivity
Jee = 1.8
Jie = 1.0
Jei = 1.0
Jii = 0.6
l_b_before_stimulation = [True, False]
l_g_e_ini = [1.55, 1.8]
for g_e_ini in l_g_e_ini:
for b_before_stimulation in l_b_before_stimulation:
x = 1
r_e, r_i = 0, 0
z_e, z_i = 0, 0
l_r_e, l_r_i, l_x = [], [], []
for i in range(T):
if b_before_stimulation:
g_e, g_i = g_e_ini, 2
else:
if 50000 <= i:
g_e, g_i = 3.0, 2
else:
g_e, g_i = g_e_ini, 2
if b_before_stimulation:
if 42000 <= i < 42001:
r_e = r_e + 0.01
else:
pass
else:
if 62000 <= i < 62001:
r_e = r_e + 0.01
else:
pass
g_e = g_e * (g_e > 0)
g_i = g_i * (g_i > 0)
# SSN part
z_e = Jee * x * r_e - Jei * r_i + g_e
z_i = Jie * r_e - Jii * r_i + g_i
z_e = z_e * (z_e > 0)
z_i = z_i * (z_i > 0)
r_e = r_e + (-r_e + np.power(z_e, alpha_e)) / tau_e * dt
if b_before_stimulation:
if 42000 < i:
pass
else:
r_i = r_i + (-r_i + np.power(z_i, alpha_i)) / tau_i * dt
else:
if 62000 < i:
pass
else:
r_i = r_i + (-r_i + np.power(z_i, alpha_i)) / tau_i * dt
r_e = r_e * (r_e > 0)
r_i = r_i * (r_i > 0)
x = x + ((1 - x) / tau_x - u_d * x * r_e) * dt
x = np.clip(x, 0, 1)
l_r_e.append(r_e)
l_r_i.append(r_i)
l_x.append(x)
l_r_e = np.asarray(l_r_e)
l_r_i = np.asarray(l_r_i)
l_x = np.asarray(l_x)
if b_before_stimulation:
plt.figure(figsize=(figure_len, figure_width))
ax = plt.gca()
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(True)
ax.spines['left'].set_visible(True)
for axis in ['top', 'bottom', 'left', 'right']:
ax.spines[axis].set_linewidth(line_width)
plt.tick_params(width=line_width, length=tick_len)
plt.yscale('symlog', linthreshy=0.1)
plt.plot(l_r_e, color='blue', linewidth=plot_line_width)
plt.plot(l_r_i, color='red', linewidth=plot_line_width)
plt.xticks(np.arange(30000, 90000 + 5000, 20000), np.arange(0, 6 + 0.5, 2), fontsize=font_size_1, **hfont)
plt.yticks([0, 1, 100, 10000], fontsize=font_size_1, **hfont)
plt.xlabel('Time (s)', fontsize=font_size_1, **hfont)
plt.ylabel('Firing rate (Hz)', fontsize=font_size_1, **hfont)
plt.xlim([30000, 90000])
plt.ylim([0, 10000])
plt.legend(['Exc', 'Inh'], prop={"family": "Arial", 'size': font_size_1}, loc='upper right')
plt.vlines(x=42001, ymin=0, ymax=10000, colors='k', linestyles=[(0, (6, 6, 6, 6))],
linewidth=line_width)
if g_e_ini == 1.55:
plt.savefig(
'paper_figures/png/Revision_Fig_Point_1_1_Supralinear_network_2D_EE_STP_normalized_activity_frozen_inhibition_before_stimulation_non_ISN.png')
plt.savefig(
'paper_figures/pdf/Revision_Fig_Point_1_1_Supralinear_network_2D_EE_STP_normalized_activity_frozen_inhibition_before_stimulation_non_ISN.pdf')
else:
plt.savefig(
'paper_figures/png/Revision_Fig_Point_1_1_Supralinear_network_2D_EE_STP_normalized_activity_frozen_inhibition_before_stimulation_ISN.png')
plt.savefig(
'paper_figures/pdf/Revision_Fig_Point_1_1_Supralinear_network_2D_EE_STP_normalized_activity_frozen_inhibition_before_stimulation_ISN.pdf')
else:
plt.figure(figsize=(figure_len, figure_width))
ax = plt.gca()
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(True)
ax.spines['left'].set_visible(True)
for axis in ['top', 'bottom', 'left', 'right']:
ax.spines[axis].set_linewidth(line_width)
plt.tick_params(width=line_width, length=tick_len)
plt.yscale('symlog', linthreshy=0.1)
plt.plot(l_r_e, color='blue', linewidth=plot_line_width)
plt.plot(l_r_i, color='red', linewidth=plot_line_width)
plt.xticks(np.arange(30000, 90000 + 5000, 20000), np.arange(0, 6 + 0.5, 2), fontsize=font_size_1, **hfont)
plt.yticks([0, 1, 100, 10000], fontsize=font_size_1, **hfont)
plt.xlabel('Time (s)', fontsize=font_size_1, **hfont)
plt.ylabel('Firing rate (Hz)', fontsize=font_size_1, **hfont)
plt.xlim([30000, 90000])
plt.ylim([0, 10000])
plt.legend(['Exc', 'Inh'], prop={"family": "Arial", 'size': font_size_1}, loc='upper right')
plt.vlines(x=62001, ymin=0, ymax=10000, colors='k', linestyles=[(0, (6, 6, 6, 6))],
linewidth=line_width)
if g_e_ini == 1.55:
plt.savefig(
'paper_figures/png/Revision_Fig_Point_1_1_Supralinear_network_2D_EE_STP_normalized_activity_frozen_inhibition_during_stimulation_non_ISN.png')
plt.savefig(
'paper_figures/pdf/Revision_Fig_Point_1_1_Supralinear_network_2D_EE_STP_normalized_activity_frozen_inhibition_during_stimulation_non_ISN.pdf')
else:
plt.savefig(
'paper_figures/png/Revision_Fig_Point_1_1_Supralinear_network_2D_EE_STP_normalized_activity_frozen_inhibition_during_stimulation_ISN.png')
plt.savefig(
'paper_figures/pdf/Revision_Fig_Point_1_1_Supralinear_network_2D_EE_STP_normalized_activity_frozen_inhibition_during_stimulation_ISN.pdf')
| 40.233333
| 166
| 0.559911
| 2,863
| 19,312
| 3.452323
| 0.071603
| 0.012141
| 0.030959
| 0.048159
| 0.897208
| 0.890935
| 0.875354
| 0.868879
| 0.859875
| 0.853501
| 0
| 0.069263
| 0.315193
| 19,312
| 480
| 167
| 40.233333
| 0.67811
| 0.015793
| 0
| 0.756098
| 0
| 0
| 0.17332
| 0.140299
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.01626
| 0.01626
| 0
| 0.01626
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
58aca9f080ab575cbcc462f5aa1239ec8e7dec83
| 11,801
|
py
|
Python
|
lasagne/tests/layers/test_dense.py
|
Poberun/Lasagne31ac7d2bbc
|
d93eeeaf671377977144b7c8b978114e1cfb779a
|
[
"MIT"
] | 60
|
2015-01-29T21:54:04.000Z
|
2019-11-12T07:38:15.000Z
|
lasagne/tests/layers/test_dense.py
|
Poberun/Lasagne31ac7d2bbc
|
d93eeeaf671377977144b7c8b978114e1cfb779a
|
[
"MIT"
] | 5
|
2015-06-15T00:21:47.000Z
|
2017-09-14T10:24:40.000Z
|
lasagne/tests/layers/test_dense.py
|
Poberun/Lasagne31ac7d2bbc
|
d93eeeaf671377977144b7c8b978114e1cfb779a
|
[
"MIT"
] | 20
|
2015-04-28T00:21:41.000Z
|
2019-09-16T01:10:37.000Z
|
from mock import Mock
import numpy as np
import pytest
import theano
import lasagne
class TestDenseLayer:
@pytest.fixture
def DenseLayer(self):
from lasagne.layers.dense import DenseLayer
return DenseLayer
@pytest.fixture
def layer_vars(self, dummy_input_layer):
from lasagne.layers.dense import DenseLayer
W = Mock()
b = Mock()
nonlinearity = Mock()
W.return_value = np.ones((12, 3))
b.return_value = np.ones((3,)) * 3
layer = DenseLayer(
dummy_input_layer,
num_units=3,
W=W,
b=b,
nonlinearity=nonlinearity,
)
return {
'W': W,
'b': b,
'nonlinearity': nonlinearity,
'layer': layer,
}
@pytest.fixture
def layer(self, layer_vars):
return layer_vars['layer']
def test_init(self, layer_vars):
layer = layer_vars['layer']
assert (layer.W.get_value() == layer_vars['W'].return_value).all()
assert (layer.b.get_value() == layer_vars['b'].return_value).all()
layer_vars['W'].assert_called_with((12, 3))
layer_vars['b'].assert_called_with((3,))
def test_init_none_nonlinearity_bias(self, DenseLayer, dummy_input_layer):
layer = DenseLayer(
dummy_input_layer,
num_units=3,
nonlinearity=None,
b=None,
)
assert layer.nonlinearity == lasagne.nonlinearities.identity
assert layer.b is None
def test_get_params(self, layer):
assert layer.get_params() == [layer.W, layer.b]
assert layer.get_params(regularizable=False) == [layer.b]
assert layer.get_params(regularizable=True) == [layer.W]
assert layer.get_params(trainable=True) == [layer.W, layer.b]
assert layer.get_params(trainable=False) == []
assert layer.get_params(_nonexistent_tag=True) == []
assert layer.get_params(_nonexistent_tag=False) == [layer.W, layer.b]
def test_get_output_shape_for(self, layer):
assert layer.get_output_shape_for((5, 6, 7)) == (5, 3)
def test_get_output_for(self, layer_vars):
layer = layer_vars['layer']
nonlinearity = layer_vars['nonlinearity']
W = layer_vars['W']()
b = layer_vars['b']()
input = theano.shared(np.ones((2, 12)))
result = layer.get_output_for(input)
assert result is nonlinearity.return_value
# Check that the input to the nonlinearity was what we expect
# from dense layer, i.e. the dot product plus bias
nonlinearity_arg = nonlinearity.call_args[0][0]
assert (nonlinearity_arg.eval() ==
np.dot(input.get_value(), W) + b).all()
def test_get_output_for_flattens_input(self, layer_vars):
layer = layer_vars['layer']
nonlinearity = layer_vars['nonlinearity']
W = layer_vars['W']()
b = layer_vars['b']()
input = theano.shared(np.ones((2, 3, 4)))
result = layer.get_output_for(input)
assert result is nonlinearity.return_value
# Check that the input to the nonlinearity was what we expect
# from dense layer, i.e. the dot product plus bias
nonlinearity_arg = nonlinearity.call_args[0][0]
assert np.allclose(nonlinearity_arg.eval(),
np.dot(input.get_value().reshape(2, -1), W) + b)
def test_param_names(self, layer):
assert layer.W.name == "W"
assert layer.b.name == "b"
def test_named_layer_param_names(self, DenseLayer, dummy_input_layer):
layer = DenseLayer(
dummy_input_layer,
num_units=3,
name="foo"
)
assert layer.W.name == "foo.W"
assert layer.b.name == "foo.b"
class TestNINLayer:
@pytest.fixture
def dummy_input_layer(self):
from lasagne.layers.input import InputLayer
input_layer = InputLayer((2, 3, 4, 5))
mock = Mock(input_layer)
mock.shape = input_layer.shape
mock.input_var = input_layer.input_var
mock.output_shape = input_layer.output_shape
return mock
@pytest.fixture
def NINLayer(self):
from lasagne.layers.dense import NINLayer
return NINLayer
@pytest.fixture
def layer_vars(self, NINLayer, dummy_input_layer):
W = Mock()
b = Mock()
nonlinearity = Mock()
W.return_value = np.ones((3, 5))
b.return_value = np.ones((5,))
layer = NINLayer(
dummy_input_layer,
num_units=5,
W=W,
b=b,
nonlinearity=nonlinearity,
)
return {
'W': W,
'b': b,
'nonlinearity': nonlinearity,
'layer': layer,
}
@pytest.fixture
def layer(self, layer_vars):
return layer_vars['layer']
def test_init(self, layer_vars):
layer = layer_vars['layer']
assert (layer.W.get_value() == layer_vars['W'].return_value).all()
assert (layer.b.get_value() == layer_vars['b'].return_value).all()
layer_vars['W'].assert_called_with((3, 5))
layer_vars['b'].assert_called_with((5,))
def test_init_none_nonlinearity_bias(self, NINLayer, dummy_input_layer):
layer = NINLayer(
dummy_input_layer,
num_units=3,
nonlinearity=None,
b=None,
)
assert layer.nonlinearity == lasagne.nonlinearities.identity
assert layer.b is None
def test_init_untie_biases(self, NINLayer, dummy_input_layer):
layer = NINLayer(
dummy_input_layer,
num_units=5,
untie_biases=True,
)
assert (layer.b.shape.eval() == (5, 4, 5)).all()
def test_get_params(self, layer):
assert layer.get_params() == [layer.W, layer.b]
assert layer.get_params(regularizable=False) == [layer.b]
assert layer.get_params(regularizable=True) == [layer.W]
assert layer.get_params(trainable=True) == [layer.W, layer.b]
assert layer.get_params(trainable=False) == []
assert layer.get_params(_nonexistent_tag=True) == []
assert layer.get_params(_nonexistent_tag=False) == [layer.W, layer.b]
def test_get_output_shape_for(self, layer):
assert layer.get_output_shape_for((5, 6, 7, 8)) == (5, 5, 7, 8)
@pytest.mark.parametrize("extra_kwargs", [
{},
{'untie_biases': True},
{'b': None},
])
def test_get_output_for(self, dummy_input_layer, extra_kwargs):
from lasagne.layers.dense import NINLayer
nonlinearity = Mock()
layer = NINLayer(
dummy_input_layer,
num_units=6,
nonlinearity=nonlinearity,
**extra_kwargs
)
input = theano.shared(np.random.uniform(-1, 1, (2, 3, 4, 5)))
result = layer.get_output_for(input)
assert result is nonlinearity.return_value
nonlinearity_arg = nonlinearity.call_args[0][0]
X = input.get_value()
X = np.rollaxis(X, 1).T
X = np.dot(X, layer.W.get_value())
if layer.b is not None:
if layer.untie_biases:
X += layer.b.get_value()[:, np.newaxis].T
else:
X += layer.b.get_value()
X = np.rollaxis(X.T, 0, 2)
assert np.allclose(nonlinearity_arg.eval(), X)
def test_param_names(self, layer):
assert layer.W.name == "W"
assert layer.b.name == "b"
def test_named_layer_param_names(self, NINLayer, dummy_input_layer):
layer = NINLayer(
dummy_input_layer,
num_units=3,
name="foo"
)
assert layer.W.name == "foo.W"
assert layer.b.name == "foo.b"
class TestNINLayer_c01b:
@pytest.fixture
def dummy_input_layer(self):
from lasagne.layers.input import InputLayer
input_layer = InputLayer((3, 4, 5, 2))
mock = Mock(input_layer)
mock.shape = input_layer.shape
mock.input_var = input_layer.input_var
mock.output_shape = input_layer.output_shape
return mock
@pytest.fixture
def NINLayer_c01b(self):
try:
from lasagne.layers.cuda_convnet import NINLayer_c01b
except ImportError:
pytest.skip("cuda_convnet not available")
return NINLayer_c01b
@pytest.fixture
def layer_vars(self, NINLayer_c01b, dummy_input_layer):
W = Mock()
b = Mock()
nonlinearity = Mock()
W.return_value = np.ones((5, 3))
b.return_value = np.ones((5,))
layer = NINLayer_c01b(
dummy_input_layer,
num_units=5,
W=W,
b=b,
nonlinearity=nonlinearity,
)
return {
'W': W,
'b': b,
'nonlinearity': nonlinearity,
'layer': layer,
}
@pytest.fixture
def layer(self, layer_vars):
return layer_vars['layer']
def test_init(self, layer_vars):
layer = layer_vars['layer']
assert (layer.W.get_value() == layer_vars['W'].return_value).all()
assert (layer.b.get_value() == layer_vars['b'].return_value).all()
layer_vars['W'].assert_called_with((5, 3))
layer_vars['b'].assert_called_with((5,))
def test_init_none_nonlinearity_bias(self, NINLayer_c01b,
dummy_input_layer):
layer = NINLayer_c01b(
dummy_input_layer,
num_units=3,
nonlinearity=None,
b=None,
)
assert layer.nonlinearity == lasagne.nonlinearities.identity
assert layer.b is None
def test_init_untie_biases(self, NINLayer_c01b, dummy_input_layer):
layer = NINLayer_c01b(
dummy_input_layer,
num_units=5,
untie_biases=True,
)
assert (layer.b.shape.eval() == (5, 4, 5)).all()
def test_get_params(self, layer):
assert layer.get_params() == [layer.W, layer.b]
assert layer.get_params(regularizable=False) == [layer.b]
assert layer.get_params(regularizable=True) == [layer.W]
assert layer.get_params(trainable=True) == [layer.W, layer.b]
assert layer.get_params(trainable=False) == []
assert layer.get_params(_nonexistent_tag=True) == []
assert layer.get_params(_nonexistent_tag=False) == [layer.W, layer.b]
def test_get_output_shape_for(self, layer):
assert layer.get_output_shape_for((6, 7, 8, 5)) == (5, 7, 8, 5)
@pytest.mark.parametrize("extra_kwargs", [
{},
{'untie_biases': True},
{'b': None},
])
def test_get_output_for(self, dummy_input_layer, NINLayer_c01b,
extra_kwargs):
nonlinearity = Mock()
layer = NINLayer_c01b(
dummy_input_layer,
num_units=6,
nonlinearity=nonlinearity,
**extra_kwargs
)
input = theano.shared(np.random.uniform(-1, 1, (3, 4, 5, 2)))
result = layer.get_output_for(input)
assert result is nonlinearity.return_value
nonlinearity_arg = nonlinearity.call_args[0][0]
X = input.get_value()
W = layer.W.get_value()
out = np.dot(W, X.reshape(X.shape[0], -1))
out = out.reshape(W.shape[0], X.shape[1], X.shape[2], X.shape[3])
if layer.b is not None:
if layer.untie_biases:
out += layer.b.get_value()[..., None]
else:
out += layer.b.get_value()[:, None, None, None]
assert np.allclose(nonlinearity_arg.eval(), out)
| 32.599448
| 78
| 0.582578
| 1,456
| 11,801
| 4.504808
| 0.07967
| 0.077146
| 0.05946
| 0.064034
| 0.915536
| 0.908828
| 0.850587
| 0.826498
| 0.798902
| 0.79631
| 0
| 0.015623
| 0.300314
| 11,801
| 361
| 79
| 32.689751
| 0.778733
| 0.018388
| 0
| 0.727575
| 0
| 0
| 0.020986
| 0
| 0
| 0
| 0
| 0
| 0.199336
| 1
| 0.109635
| false
| 0
| 0.043189
| 0.009967
| 0.199336
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
58d97ee8b9da48b2e1db14afc2c4ef7d9799bb8a
| 38,759
|
py
|
Python
|
config.py
|
AlexanderFengler/nn_likelihoods
|
2d0f63a63eb50f026b9492acba14708b23dfcaa4
|
[
"MIT"
] | 2
|
2019-08-19T15:48:01.000Z
|
2020-03-13T12:47:23.000Z
|
config.py
|
AlexanderFengler/nn_likelihoods
|
2d0f63a63eb50f026b9492acba14708b23dfcaa4
|
[
"MIT"
] | null | null | null |
config.py
|
AlexanderFengler/nn_likelihoods
|
2d0f63a63eb50f026b9492acba14708b23dfcaa4
|
[
"MIT"
] | 6
|
2019-06-13T04:46:51.000Z
|
2021-01-27T18:26:59.000Z
|
import numpy as np
import yaml
import pickle
import cddm_data_simulation as cd
#import clba
import boundary_functions as bf
import os
# "output_folder": "/users/afengler/data/kde/test/method_comparison/",
# "model_folder": "/users/afengler/data/kde/test/keras_models/",
# #"custom_objects": {"huber_loss": tf.losses.huber_loss},
# #"fcn_custom_objects": {"heteroscedastic_loss": tf.losses.huber_loss},
config = dict()
config['model_paths'] = yaml.load(open("model_paths_simple.yaml"))
# Add machine basic folders here
config['base_data_folder'] = {'af_x7': '/media/data_cifs/projects/...',
'af_ccv': '/users/afengler/data/proj_nn_likelihoods/',
'af_home': '/Users/afengler/OneDrive/project_nn_likelihoods/data/nn_likelihoods/',
'af_home_test': '/Users/afengler/OneDrive/project_nn_likelihoods/data/tests/',
'kle_ccv': '',
'kle_home': '',
'default': ''}
# Network params
config['mlp_hyperparameters'] = {'hidden_layers': [100, 100, 120, 1],
'hidden_activations': ["tanh", "tanh", "tanh", "linear"],
'filters': [128, 128, 128, 128],
'batch_size': 100000,
'n_epochs': 100, # CHANGE AGAINs
'learning_rate': .002, # I think was originally 0.0002
'momentum': .7,
'model_type': "dnnregressor",
'optimizer': "adam",
'log': True,
'loss': "huber",
'gpu_x7': '2'}
config['mlp_simulation_filters'] = {'mode': 20, # != (if mode is max_rt)
'choice_cnt': 10, # > (each choice receive at least 10 samples in simulator)
'mean_rt': 15, # < (mean_rt is smaller than specified value
'std': 0, # > (std is positive for each choice)
'mode_cnt_rel': 0.5 # < (mode does not receive more than a proportion of samples for each choice)
}
# Globally applied hyperparamters across all simulators
config['dgp_hyperparameters_global'] = {'max_t': 20,
'binned_max_t': 10,
's': 1.0,
'delta_t': 0.001,
'n_samples': 100000}
config['model_data'] = {
"test":{
"dgp": cd.test,
"boundary": bf.constant,
"boundary_multiplicative": True,
"folder_suffix":'models/test/',
"param_names": ['v', 'a', 'w', 'ndt'],
"boundary_param_names": [],
"param_bounds_network": [[-2.0, 2.0], [0.3, 2], [0.2, 0.8], [0.0, 2.0]],
"param_bounds_sampler": [[-1.9, 1.9], [0.6, 1.4], [0.31, 0.69], [0.1, 0.9]],
"param_bounds_cnn": [[-2.5, 2.5], [0.2, 2], [0.1, 0.9], [0.0, 2.0]],
"boundary_param_bounds_network": [],
"boundary_param_bounds_sampler": [],
"boundary_param_bounds_cnn":[],
"dgp_hyperparameters": dict([['s', 1.0],
['delta_t', 0.01],
['max_t', config['dgp_hyperparameters_global']['max_t']],
['binned_max_t', config['dgp_hyperparameters_global']['binned_max_t']],
['n_samples', 20000],
['print_info', False],
['boundary', bf.constant],
['boundary_multiplicative', True],
['possible_choices', [-1, 1]]])
},
"levy":{
"dgp": cd.levy_flexbound,
"boundary": bf.constant,
"boundary_multiplicative": True,
"folder_suffix":'models/levy',
"param_names": ['v', 'a', 'w', 'alpha-diff', 'ndt'],
"boundary_param_names": [],
"param_bounds_network": [[-3.0, 3.0], [0.3, 2.0], [0.1, 0.9], [1.0, 2.0], [0.0, 2.0]],
"param_bounds_sampler": [[-2.7, 2.7], [0.4, 1.7], [0.3, 0.7], [1.1, 1.9], [0.1, 1.9]],
"param_bounds_cnn": [[-3, 3], [0.3, 2], [0.1, 0.9], [1.0, 2.0], [0.0, 2.0]],
"boundary_param_bounds_network": [],
"boundary_param_bounds_sampler": [],
"boundary_param_bounds_cnn":[],
"dgp_hyperparameters": dict([['s', 1.0],
['delta_t', 0.01],
['max_t', config['dgp_hyperparameters_global']['max_t']],
['binned_max_t', config['dgp_hyperparameters_global']['binned_max_t']],
['n_samples', 20000],
['print_info', False],
['boundary', bf.constant],
['boundary_multiplicative', True],
['possible_choices', [-1, 1]]])
},
"ddm":{
"dgp": cd.ddm_flexbound,
"boundary": bf.constant,
"boundary_multiplicative": True,
"folder_suffix": 'models/ddm/',
"param_names": ['v', 'a', 'w', 'ndt'],
"boundary_param_names": [],
"param_bounds_network": [[-3.0, 3.0], [0.3, 2.5], [0.1, 0.9], [0.0, 2.0]],
"param_bounds_sampler": [[-2.5, 2.5], [0.5, 2.2], [0.25, 0.75], [0.05, 1.95]],
"param_bounds_cnn": [[-3.0, 3.0], [0.3, 2.5], [0.1, 0.9], [0.0, 2.0]],
"boundary_param_bounds_network": [],
"boundary_param_bounds_sampler": [],
"boundary_param_bounds_cnn":[],
"dgp_hyperparameters": dict([['s', 1.0],
['delta_t', 0.01],
['max_t', config['dgp_hyperparameters_global']['max_t']],
['binned_max_t', config['dgp_hyperparameters_global']['binned_max_t']],
['n_samples', 20000],
['print_info', False],
['boundary', bf.constant],
['boundary_multiplicative', True],
['possible_choices', [-1, 1]]]),
},
"ddm_elife":{
"dgp": cd.ddm_flexbound,
"boundary": bf.constant,
"boundary_multiplicative": True,
"folder_suffix": 'models/ddm_elife',
"param_names": ['v', 'a', 'w', 'ndt'],
"boundary_param_names": [],
"param_bounds_network": [[-3.0, 3.0], [0.3, 2.5], [0.1, 0.9], [0.0, 2.0]],
"param_bounds_sampler": [[-2.5, 2.5], [0.5, 2.2], [0.25, 0.75], [0.05, 1.95]],
"param_bounds_cnn": [[-3.0, 3.0], [0.3, 2.5], [0.1, 0.9], [0.0, 2.0]],
"boundary_param_bounds_network": [],
"boundary_param_bounds_sampler": [],
"boundary_param_bounds_cnn":[],
"dgp_hyperparameters": dict([['s', 1.0],
['delta_t', 0.01],
['max_t', config['dgp_hyperparameters_global']['max_t']],
['binned_max_t', config['dgp_hyperparameters_global']['binned_max_t']],
['n_samples', 20000],
['print_info', False],
['boundary', bf.constant],
['boundary_multiplicative', True],
['possible_choices', [-1, 1]]]),
},
"ddm_analytic":{
"dgp": cd.ddm_flexbound,
"boundary": bf.constant,
"boundary_multiplicative": True,
"folder_suffix": 'models/ddm_analytic/',
"param_names": ['v', 'a', 'w', 'ndt'],
"boundary_param_names": [],
"boundary_param_bounds_sampler": [],
"boundary_param_bounds_network": [],
"boundary_param_bounds_cnn": [],
"param_bounds_network": [[-3.0, 3.0], [0.3, 2.5], [0.1, 0.9], [0.0, 2.0]],
"param_bounds_sampler": [[-2.5, 2.5], [0.5, 2.2], [0.25, 0.75], [0.05, 1.95]],
"param_bounds_cnn": [[-3.0, 3.0], [0.2, 2.2], [0.1, 0.9], [0.0, 2.0]],
"dgp_hyperparameters": dict([['s', 1.0],
['delta_t', 0.001],
['max_t', config['dgp_hyperparameters_global']['max_t']],
['binned_max_t', config['dgp_hyperparameters_global']['binned_max_t']],
['n_samples', 20000],
['print_info', False],
['boundary', bf.constant],
['boundary_multiplicative', True],
['possible_choices', [-1, 1]]]),
},
"ddm_analytic_elife":{
"dgp": cd.ddm_flexbound,
"boundary": bf.constant,
"boundary_multiplicative": True,
"folder_suffix": 'models/ddm_analytic_elife/',
"param_names": ['v', 'a', 'w', 'ndt'],
"boundary_param_names": [],
"boundary_param_bounds_sampler": [],
"boundary_param_bounds_network": [],
"boundary_param_bounds_cnn": [],
"param_bounds_network": [[-3.0, 3.0], [0.3, 2.5], [0.1, 0.9], [0.0, 2.0]],
"param_bounds_sampler": [[-2.5, 2.5], [0.5, 2.2], [0.25, 0.75], [0.05, 1.95]],
"param_bounds_cnn": [[-3.0, 3.0], [0.2, 2.2], [0.1, 0.9], [0.0, 2.0]],
"dgp_hyperparameters": dict([['s', 1.0],
['delta_t', 0.001],
['max_t', config['dgp_hyperparameters_global']['max_t']],
['binned_max_t', config['dgp_hyperparameters_global']['binned_max_t']],
['n_samples', 20000],
['print_info', False],
['boundary', bf.constant],
['boundary_multiplicative', True],
['possible_choices', [-1, 1]]]),
},
"ddm_sdv_analytic":{
"dgp": cd.ddm_sdv,
"boundary": bf.constant,
"boundary_multiplicative": True,
"folder_suffix": 'models/ddm_sdv_analytic/',
"param_names": ["v", "a", "w", "ndt", "sdv"],
"boundary_param_names": [],
"param_bounds_network": [[-3, 3], [0.3, 2.5], [0.1, 0.9], [0.0, 2.0], [0.0, 2.5]],
"param_bounds_sampler": [[-2.2, 2.2], [0.5, 2.2], [0.25, 0.75], [0.05, 1.95], [0.3, 2.2]],
"param_bounds_cnn": [[-3, 3], [0.3, 2.5], [0.1, 0.9], [0.0, 2.0], [0.0, 2.5]],
"boundary_param_bounds_network": [],
"boundary_param_bounds_sampler": [],
"boundary_param_bounds_cnn": [],
"dgp_hyperparameters": dict([['s', 1.0],
['delta_t', 0.001],
['max_t', config['dgp_hyperparameters_global']['max_t']],
['binned_max_t', config['dgp_hyperparameters_global']['binned_max_t']],
['n_samples', 20000],
['print_info', False],
['boundary', bf.constant],
['boundary_multiplicative', True],
['possible_choices', [-1, 1]]]),
},
"ddm_sdv_analytic_elife":{
"dgp": cd.ddm_sdv,
"boundary": bf.constant,
"boundary_multiplicative": True,
"folder_suffix": 'models/ddm_sdv_analytic_elife/',
"param_names": ["v", "a", "w", "ndt", "sdv"],
"boundary_param_names": [],
"param_bounds_network": [[-3, 3], [0.3, 2.5], [0.1, 0.9], [0.0, 2.0], [0.0, 2.5]],
"param_bounds_sampler": [[-2.2, 2.2], [0.5, 2.2], [0.25, 0.75], [0.05, 1.95], [0.3, 2.2]],
"param_bounds_cnn": [[-3, 3], [0.3, 2.5], [0.1, 0.9], [0.0, 2.0], [0.0, 2.5]],
"boundary_param_bounds_network": [],
"boundary_param_bounds_sampler": [],
"boundary_param_bounds_cnn": [],
"dgp_hyperparameters": dict([['s', 1.0],
['delta_t', 0.001],
['max_t', config['dgp_hyperparameters_global']['max_t']],
['binned_max_t', config['dgp_hyperparameters_global']['binned_max_t']],
['n_samples', 20000],
['print_info', False],
['boundary', bf.constant],
['boundary_multiplicative', True],
['possible_choices', [-1, 1]]]),
},
"ddm_sdv":{
"dgp": cd.ddm_sdv,
"boundary": bf.constant,
"boundary_multiplicative": True,
"folder_suffix": "models/ddm_sdv/",
"param_names": ["v", "a", "w", "ndt", "sdv"],
"boundary_param_names": [],
"param_bounds_network": [[- 2.5, 2.5], [0.3, 2.5], [0.1, 0.9], [0.0, 2.0], [0.0, 2.5]],
"param_bounds_sampler": [[- 2.2, 2.2], [0.5, 2.2], [0.25, 0.75], [0.05, 1.95], [0.3, 2.2]],
"param_bounds_cnn": [[- 3, 3], [0.3, 2.5], [0.1, 0.9], [0.0, 2.0], [0.0, 2.5]],
"boundary_param_bounds_network": [],
"boundary_param_bounds_sampler": [],
"boundary_param_bounds_cnn": [],
"dgp_hyperparameters": dict([['s', 1.0],
['delta_t', 0.001],
['max_t', config['dgp_hyperparameters_global']['max_t']],
['binned_max_t', config['dgp_hyperparameters_global']['binned_max_t']],
['n_samples', 20000],
['print_info', False],
['boundary', bf.constant],
['boundary_multiplicative', True],
['possible_choices', [-1, 1]]]),
},
"ddm_sdv_elife":{
"dgp": cd.ddm_sdv,
"boundary": bf.constant,
"boundary_multiplicative": True,
"folder_suffix": "models/ddm_sdv_elife/",
"param_names": ["v", "a", "w", "ndt", "sdv"],
"boundary_param_names": [],
"param_bounds_network": [[-2.5, 2.5], [0.3, 2.5], [0.1, 0.9], [0.0, 2.0], [0.0, 2.5]],
"param_bounds_sampler": [[-2.2, 2.2], [0.5, 2.2], [0.25, 0.75], [0.05, 1.95], [0.3, 2.2]],
"param_bounds_cnn": [[-3, 3], [0.3, 2.5], [0.1, 0.9], [0.0, 2.0], [0.0, 2.5]],
"boundary_param_bounds_network": [],
"boundary_param_bounds_sampler": [],
"boundary_param_bounds_cnn": [],
"dgp_hyperparameters": dict([['s', 1.0],
['delta_t', 0.001],
['max_t', config['dgp_hyperparameters_global']['max_t']],
['binned_max_t', config['dgp_hyperparameters_global']['binned_max_t']],
['n_samples', 20000],
['print_info', False],
['boundary', bf.constant],
['boundary_multiplicative', True],
['possible_choices', [-1, 1]]]),
},
"angle":{
"dgp": cd.ddm_flexbound,
"boundary": bf.angle,
"boundary_multiplicative": False,
"folder_suffix": "models/angle/",
"param_names": ["v", "a", "w", "ndt"],
"boundary_param_names": ["theta"],
"param_bounds_network": [[-2.0, 2.0], [0.3, 2], [0.2, 0.8], [0.0, 2.0]],
"param_bounds_sampler": [[-1.9, 1.9], [0.4, 1.9], [0.25, 0.75], [0.1, 1.9]],
"param_bounds_cnn": [[-2.5, 2.5], [0.2, 2.0], [0.1, 0.9], [0.0, 2.0]],
'boundary_param_bounds_network':[[0, (np.pi / 2 - .1)]],
"boundary_param_bounds_sampler": [[0.05, np.pi / 2 - .3]],
"boundary_param_bounds_cnn": [[0, (np.pi / 2 - .2)]],
"dgp_hyperparameters": dict([['s', 1.0],
['delta_t', 0.01],
['max_t', config['dgp_hyperparameters_global']['max_t']],
['binned_max_t', config['dgp_hyperparameters_global']['binned_max_t']],
['n_samples', 20000],
['print_info', False],
['boundary', bf.angle],
['boundary_multiplicative', False],
['possible_choices', [-1, 1]]]),
},
"angle2":{
"dgp": cd.ddm_flexbound,
"boundary": bf.angle,
"boundary_multiplicative": False,
"folder_suffix": "models/angle2/",
"param_names": ["v", "a", "w", "ndt"],
"boundary_param_names": ["theta"],
"param_bounds_network": [[-3.0, 3.0], [0.3, 2], [0.2, 0.8], [0.0, 2.0]],
"param_bounds_sampler": [[-2.7, 2.7], [0.4, 1.7], [0.3, 0.7], [0.1, 1.9]],
"param_bounds_cnn": [[-3.0, 3.0], [0.3, 2], [0.1, 0.9], [0.0, 2.0]],
'boundary_param_bounds_network':[[- .1, (np.pi / 2 - .1)]],
"boundary_param_bounds_sampler": [[- 0.05, np.pi / 2 - .3]],
"boundary_param_bounds_cnn": [[0, (np.pi / 2 - .2)]],
"dgp_hyperparameters": dict([['s', 1.0],
['delta_t', 0.01],
['max_t', config['dgp_hyperparameters_global']['max_t']],
['binned_max_t', config['dgp_hyperparameters_global']['binned_max_t']],
['n_samples', 20000],
['print_info', False],
['boundary', bf.angle],
['boundary_multiplicative', False],
['possible_choices', [-1, 1]]]),
},
"weibull_cdf":{
"dgp": cd.ddm_flexbound,
"boundary": bf.weibull_cdf,
"boundary_multiplicative": True,
"folder_suffix": "models/weibull_cdf/",
"param_names": ["v", "a", "w", "ndt"],
"boundary_param_names": ["alpha", "beta"],
"param_bounds_network": [[-2.0, 2.0], [0.3, 2], [0.2, 0.8], [0.0, 2.0]],
"param_bounds_sampler": [[-1.9, 1.9], [0.4, 1.9], [0.25, 0.75], [0.1, 1.9]],
"param_bounds_cnn": [[-2.5, 2.5], [0.2, 2.0], [0.1, 0.9], [0.0, 2.0]],
"boundary_param_bounds_network": [[0.3, 5.0], [0.3, 7.0]],
"boundary_param_bounds_sampler": [[0.55, 4.95], [0.55, 6.95]],
"boundary_param_bounds_cnn": [[0.5, 5.0], [0.5, 7.0]],
"dgp_hyperparameters": dict([['s', 1.0],
['delta_t', 0.01],
['max_t', config['dgp_hyperparameters_global']['max_t']],
['binned_max_t', config['dgp_hyperparameters_global']['binned_max_t']],
['n_samples', 20000],
['print_info', False],
['boundary', bf.weibull_cdf],
['boundary_multiplicative', True],
['possible_choices', [-1, 1]]]),
},
"weibull_cdf2":{
"dgp": cd.ddm_flexbound,
"boundary": bf.weibull_cdf,
"boundary_multiplicative": True,
"folder_suffix": "models/weibull_cdf2/",
"param_names": ["v", "a", "w", "ndt"],
"boundary_param_names": ["alpha", "beta"],
"param_bounds_network": [[-3.0, 3.0], [0.3, 2], [0.2, 0.8], [0.0, 2.0]],
"param_bounds_sampler": [[-2.7, 2.7], [0.4, 1.7], [0.3, 0.7], [0.1, 1.9]],
"param_bounds_cnn": [[-3.0, 3.0], [0.3, 2.0], [0.1, 0.9], [0.0, 2.0]],
"boundary_param_bounds_network": [[0.3, 5.0], [0.3, 7.0]],
"boundary_param_bounds_sampler": [[0.5, 4.5], [0.5, 6.5]],
"boundary_param_bounds_cnn": [[0.3, 5.0], [0.3, 7.0]],
"dgp_hyperparameters": dict([['s', 1.0],
['delta_t', 0.01],
['max_t', config['dgp_hyperparameters_global']['max_t']],
['binned_max_t', config['dgp_hyperparameters_global']['binned_max_t']],
['n_samples', 20000],
['print_info', False],
['boundary', bf.weibull_cdf],
['boundary_multiplicative', True],
['possible_choices', [-1, 1]]]),
},
"weibull_cdf_ext":{
"dgp": cd.ddm_flexbound,
"boundary": bf.weibull_cdf,
"boundary_multiplicative": True,
"folder_suffix": "models/weibull_cdf_ext/",
"param_names": ["v", "a", "w", "ndt"],
"boundary_param_names": ["alpha", "beta"],
"param_bounds_network": [[-2.5, 2.5], [0.3, 2.5], [0.2, 0.8], [0.0, 2.0]],
"param_bounds_sampler": [[-2.2, 2.2], [0.5, 2.2], [0.3, 0.7], [0.1, 1.9]],
"param_bounds_cnn": [[-2.5, 2.5], [0.3, 2.5], [0.2, 0.8], [0.0, 2.0]],
"boundary_param_bounds_network": [[0.3, 5.0], [0.3, 7.0]],
"boundary_param_bounds_sampler": [[0.5, 4.5], [0.5, 6.5]],
"boundary_param_bounds_cnn": [[0.3, 5.0], [0.3, 7.0]],
"dgp_hyperparameters": dict([['s', 1.0],
['delta_t', 0.01],
['max_t', config['dgp_hyperparameters_global']['max_t']],
['binned_max_t', config['dgp_hyperparameters_global']['binned_max_t']],
['n_samples', 20000],
['print_info', False],
['boundary', bf.weibull_cdf],
['boundary_multiplicative', True],
['possible_choices', [-1, 1]]]),
},
"ornstein":{
"dgp": cd.ornstein_uhlenbeck,
"boundary": bf.constant,
"boundary_multiplicative": True,
"folder_suffix": "models/ornstein/",
"param_names": ["v", "a", "w", "g", "ndt"],
'param_bounds_network': [[-2.0, 2.0], [0.3, 2], [0.2, 0.8], [-1.0, 1.0], [0.0, 2.0]],
"param_bounds_sampler": [[-1.9, 1.9], [0.4, 1.9], [0.25, 0.75], [-0.9, 0.9], [0.1, 1.9]],
"param_bounds_cnn": [[-2.5, 2.5], [0.2, 2.0], [0.1, 0.9], [-1.0, 1.0], [0.0, 2.0]],
"boundary_param_names": [],
"boundary_param_bounds_network": [],
"boundary_param_bounds_sampler": [],
"boundary_param_bounds_cnn": [],
"dgp_hyperparameters": dict([['s', 1.0],
['delta_t', 0.01],
['max_t', config['dgp_hyperparameters_global']['max_t']],
['binned_max_t', config['dgp_hyperparameters_global']['binned_max_t']],
['n_samples', 20000],
['print_info', False],
['boundary', bf.constant],
['boundary_multiplicative', True],
['possible_choices', [-1, 1]]]),
},
"full_ddm":{
"dgp": cd.full_ddm,
"boundary": bf.constant,
"boundary_multiplicative": True,
"folder_suffix": "models/full_ddm/",
"param_names": ["v", "a", "w", "ndt", "dw", "sdv", "dndt"],
"boundary_param_names": [],
"param_bounds_network": [[-2.0, 2.0], [0.3, 2], [0.3, 0.7], [0.25, 2.25], [0.0, 0.4], [0, 0.5], [0.0, 0.25]],
"param_bounds_sampler": [[-1.9, 1.9], [0.4, 1.9], [0.25, 0.65], [0.3, 2.2], [0.05, 0.35], [0.05, 0.45], [0.05, 0.2]],
"param_bounds_cnn": [[-2.5, 2.5], [0.2, 2.0], [0.1, 0.9], [0.25, 2.5], [0.0, 0.2], [0.0, 1.0], [0.0, 0.25]],
"boundary_param_bounds_network": [],
"boundary_param_bounds_sampler": [],
"boundary_param_bounds_cnn": [],
"dgp_hyperparameters": dict([['s', 1.0],
['delta_t', 0.01],
['max_t', config['dgp_hyperparameters_global']['max_t']],
['binned_max_t', config['dgp_hyperparameters_global']['binned_max_t']],
['n_samples', 20000],
['print_info', False],
['boundary', bf.constant],
['boundary_multiplicative', True],
['possible_choices', [-1, 1]]]),
},
"full_ddm2":{
"dgp": cd.full_ddm,
"boundary": bf.constant,
"boundary_multiplicative": True,
"folder_suffix": "models/full_ddm2/",
"param_names": ["v", "a", "w", "ndt", "dw", "sdv", "dndt"],
"boundary_param_names": [],
"param_bounds_network": [[-3.0, 3.0], [0.3, 2.5], [0.3, 0.7], [0.25, 2.25], [0.0, 0.2], [0.0, 2.0], [0.0, 0.25]],
"param_bounds_sampler": [[-2.5, 2.5], [0.5, 2.2], [0.35, 0.65], [0.3, 2.2], [0.05, 0.25], [0.0, 1.7], [0.05, 0.2]],
"param_bounds_cnn": [[-3.0, 3.0], [0.3, 2.5], [0.3, 0.7], [0.25, 2.25], [0.0, 0.2], [0.0, 2.0], [0.0, 0.25]],
"boundary_param_bounds_network": [],
"boundary_param_bounds_sampler": [],
"boundary_param_bounds_cnn": [],
"dgp_hyperparameters": dict([['s', 1.0],
['delta_t', 0.01],
['max_t', config['dgp_hyperparameters_global']['max_t']],
['binned_max_t', config['dgp_hyperparameters_global']['binned_max_t']],
['n_samples', 20000],
['print_info', False],
['boundary', bf.constant],
['boundary_multiplicative', True],
['possible_choices', [-1, 1]]]),
},
"race_3":{
"dgp": cd.race_model,
"boundary": bf.constant,
"boundary_multiplicative": True,
"folder_suffix": "models/race_3/",
"param_names": ["v_0", "v_1", "v_2", "a", "w_0", "w_1", "w_2", "ndt"],
"boundary_param_names": [],
"param_bounds_network":[[0, 2.0], [0, 2.0], [0, 2.0], [1.0, 3.0], [0.2, 0.8], [0.2, 0.8], [0.2, 0.8], [0.0, 1.0]],
"param_bounds_sampler": [[0.1, 1.9], [0.1, 1.9], [0.1, 1.9], [1.1, 2.9], [0.21, 0.79], [0.21, 0.79], [0.21, 0.79], [0.1, 0.9]],
"param_bounds_cnn": [[0.0, 2.5], [0.0, 2.5], [0.0, 2.5], [1.0, 3.0], [0.1, 0.9], [0.1, 0.9], [0.1, 0.9], [0.0, 2.0]],
"boundary_param_bounds_network": [],
"boundary_param_bounds_sampler": [],
"boundary_param_bounds_cnn": [],
"dgp_hyperparameters": dict([['s', 1.0],
['delta_t', 0.01],
['max_t', config['dgp_hyperparameters_global']['max_t']],
['binned_max_t', config['dgp_hyperparameters_global']['binned_max_t']],
['n_samples', 20000],
['print_info', False],
['boundary', bf.constant],
['boundary_multiplicative', True],
['possible_choices', [0, 1, 2]]]),
},
"race_4":{
"dgp": cd.race_model,
"boundary": bf.constant,
"boundary_multiplicative": True,
"folder_suffix": "models/race_4/",
"param_names": ["v_0", "v_1", "v_2", "v_3", "a", "w_0", "w_1", "w_2", "w_3", "ndt"],
"param_depends_on_n_choice": [1, 0, 1, 0],
"boundary_param_names": [],
"param_bounds_network":[[0, 2.0], [0, 2.0], [0, 2.0], [0, 2.0], [1.0, 3.0], [0.2, 0.8], [0.2, 0.8], [0.2, 0.8], [0.2, 0.8], [0.0, 1.0]],
"param_bounds_sampler": [[0.1, 1.9], [0.1, 1.9], [0.1, 1.9], [0.1, 1.9], [1.1, 2.9], [0.21, 0.79], [0.21, 0.79], [0.21, 0.79], [0.21, 0.79],[0.1, 0.9]],
"param_bounds_cnn": [[0.0, 2.5], [0.0, 2.5], [0.0, 2.5], [0.0, 2.5], [1.0, 3.0], [0.1, 0.9], [0.1, 0.9], [0.1, 0.9], [0.1, 0.9], [0.0, 2.0]],
"boundary_param_bounds_network": [],
"boundary_param_bounds_sampler": [],
"boundary_param_bounds_cnn": [],
"dgp_hyperparameters": dict([['s', 1.0],
['delta_t', 0.01],
['max_t', config['dgp_hyperparameters_global']['max_t']],
['binned_max_t', config['dgp_hyperparameters_global']['binned_max_t']],
['n_samples', 20000],
['print_info', False],
['boundary', bf.constant],
['boundary_multiplicative', True],
['possible_choices', [0, 1, 2, 3]]]),
},
"lca_3":{
"dgp": cd.lca,
"boundary": bf.constant,
"boundary_multiplicative": True,
"folder_suffix": "models/lca_3/",
"param_names": ['v_0', 'v_1', 'v_2', 'a', 'w_0', 'w_1', 'w_2', 'g', 'b', 'ndt'],
"param_depends_on_n_choice": [1, 0, 1, 0, 0, 0],
"boundary_param_names": [],
"param_bounds_network": [[0, 2.0], [0, 2.0], [0, 2.0], [1.0, 3.0], [0.2, 0.8], [0.2, 0.8], [0.2, 0.8], [-1.0, 1.0], [-1.0, 1.0], [0.0, 1.0]],
"param_bounds_sampler": [[0, 2.0], [0, 2.0], [0, 2.0], [1.0, 3.0], [0.2, 0.8], [0.2, 0.8], [0.2, 0.8], [-0.9, 0.9], [-0.9, 0.9], [0.05, 0.95]],
"param_bounds_cnn": [[0, 2.5], [0, 2.5], [0, 2.5], [1.0, 3.0], [0.1, 0.9], [0.1, 0.9], [0.1, 0.9], [-1.0, 1.0], [-1.0, 1.0], [0.0, 2.0]],
"boundary_param_bounds_cnn": [],
"boundary_param_bounds_network": [],
"boundary_param_bounds_sampler": [],
"dgp_hyperparameters": dict([['s', 1.0],
['delta_t', 0.01],
['max_t', config['dgp_hyperparameters_global']['max_t']],
['binned_max_t', config['dgp_hyperparameters_global']['binned_max_t']],
['n_samples', 20000],
['print_info', False],
['boundary', bf.constant],
['boundary_multiplicative', True],
['possible_choices', [0, 1, 2]]]),
},
"lca_4":{
"dgp": cd.lca,
"boundary": bf.constant,
"boundary_multiplicative": True,
"folder_suffix": "models/lca_4/",
"param_names": ['v_0', 'v_1', 'v_2', 'v_3', 'a', 'w_0', 'w_1', 'w_2', 'w_3', 'g', 'b', 'ndt'],
"param_depends_on_n_choice": [1, 0, 1, 0, 0, 0],
"boundary_param_names": [],
"param_bounds_network": [[0, 2.0], [0, 2.0], [0, 2.0], [0, 2.0], [1.0, 3.0], [0.2, 0.8], [0.2, 0.8], [0.2, 0.8], [0.2, 0.8], [-1.0, 1.0], [-1.0, 1.0], [0.0, 1.0]],
"param_bounds_sampler": [[0, 2.0], [0, 2.0], [0, 2.0], [0, 2.0], [1.0, 3.0], [0.2, 0.8], [0.2, 0.8], [0.2, 0.8], [0.2, 0.8], [-0.9, 0.9], [-0.9, 0.9], [0.05, 0.95]],
"param_bounds_cnn": [[0, 2.5], [0, 2.5], [0, 2.5], [0, 2.5], [1.0, 3.0], [0.1, 0.9], [0.1, 0.9], [0.1, 0.9], [0.1, 0.9], [-1.0, 1.0], [-1.0, 1.0], [0.0, 2.0]],
"boundary_param_bounds_cnn": [],
"boundary_param_bounds_network": [],
"boundary_param_bounds_sampler": [],
"dgp_hyperparameters": dict([['s', 1.0],
['delta_t', 0.01],
['max_t', config['dgp_hyperparameters_global']['max_t']],
['binned_max_t', config['dgp_hyperparameters_global']['binned_max_t']],
['n_samples', 20000],
['print_info', False],
['boundary', bf.constant],
['boundary_multiplicative', True],
['possible_choices', [0, 1, 2, 3]]]),
},
"ddm_seq2":{
"dgp": cd.ddm_flexbound_seq2,
"boundary": bf.constant,
"boundary_multiplicative": True,
"folder_suffix": "models/ddm_seq2/",
"param_names": ['v_h', 'v_l_1', 'v_l_2', 'a', 'w_h', 'w_l_1', 'w_l_2', 'ndt'],
"param_depends_on_n_choice": [0, 0, 0, 0, 0, 0, 0, 0],
"boundary_param_names": [],
"param_bounds_network": [[-2.0, 2.0], [-2.0, 2.0], [-2.0, 2.0],
[0.3, 2], [0.2, 0.8], [0.2, 0.8], [0.2, 0.8],[0.0, 2.0]],
"param_bounds_sampler": [[-1.9, 1.9], [0.6, 1.4], [0.31, 0.69], [0.1, 0.9]],
"param_bounds_cnn": [[-2.5, 2.5], [-2.5, 2.5], [-2.5, 2.5],
[0.2, 2], [0.1, 0.9], [0.1, 0.9], [0.1, 0.9], [0.0, 2.0]],
"boundary_param_bounds_network": [],
"boundary_param_bounds_sampler": [],
"boundary_param_bounds_cnn":[],
"dgp_hyperparameters": dict([['s', 1.0],
['delta_t', 0.01],
['max_t', config['dgp_hyperparameters_global']['max_t']],
['binned_max_t', config['dgp_hyperparameters_global']['binned_max_t']],
['n_samples', 20000],
['print_info', False],
['boundary', bf.constant],
['boundary_multiplicative', True],
['possible_choices', [0, 1, 2, 3]]]),
},
"ddm_seq2_angle":{
"dgp": cd.ddm_flexbound_seq2,
"boundary": bf.angle,
"boundary_multiplicative": False,
"folder_suffix": "models/ddm_seq2_angle/",
"param_names": ['v_h', 'v_l_1', 'v_l_2', 'a', 'w_h', 'w_l_1', 'w_l_2', 'ndt'],
"param_depends_on_n_choice": [0, 0, 0, 0, 0, 0, 0, 0],
"boundary_param_names": ['theta'],
"param_bounds_network": [[-2.0, 2.0], [-2.0, 2.0], [-2.0, 2.0],
[0.3, 2], [0.2, 0.8], [0.2, 0.8], [0.2, 0.8],[0.0, 2.0]],
"param_bounds_sampler": [[-1.9, 1.9], [0.6, 1.4], [0.31, 0.69], [0.1, 0.9]],
"param_bounds_cnn": [[-2.5, 2.5], [-2.5, 2.5], [-2.5, 2.5],
[0.2, 2], [0.1, 0.9], [0.1, 0.9], [0.1, 0.9], [0.0, 2.0]],
"boundary_param_bounds_network":[[0, (np.pi / 2 - .2)]],
"boundary_param_bounds_sampler": [[0.05, np.pi / 2 - .3]],
"boundary_param_bounds_cnn": [[0, (np.pi / 2 - .2)]],
"dgp_hyperparameters": dict([['s', 1.0],
['delta_t', 0.01],
['max_t', config['dgp_hyperparameters_global']['max_t']],
['binned_max_t', config['dgp_hyperparameters_global']['binned_max_t']],
['n_samples', 20000],
['print_info', False],
['boundary', bf.angle],
['boundary_multiplicative', False],
['possible_choices', [0, 1, 2, 3]]]),
},
"ddm_par2":{
"dgp": cd.ddm_flexbound_par2,
"boundary": bf.constant,
"boundary_multiplicative": True,
"folder_suffix": "models/ddm_par2/",
"param_names": ['v_h', 'v_l_1', 'v_l_2', 'a', 'w_h', 'w_l_1', 'w_l_2', 'ndt'],
"param_depends_on_n_choice": [0, 0, 0, 0, 0, 0, 0, 0],
"boundary_param_names": [],
"param_bounds_network": [[-2.0, 2.0], [-2.0, 2.0], [-2.0, 2.0],
[0.3, 2], [0.2, 0.8], [0.2, 0.8], [0.2, 0.8],[0.0, 2.0]],
"param_bounds_sampler": [[-1.9, 1.9], [0.6, 1.4], [0.31, 0.69], [0.1, 0.9]],
"param_bounds_cnn": [[-2.5, 2.5], [-2.5, 2.5], [-2.5, 2.5],
[0.2, 2], [0.1, 0.9], [0.1, 0.9], [0.1, 0.9], [0.0, 2.0]],
"boundary_param_bounds_network": [],
"boundary_param_bounds_sampler": [],
"boundary_param_bounds_cnn":[],
"dgp_hyperparameters": dict([['s', 1.0],
['delta_t', 0.01],
['max_t', config['dgp_hyperparameters_global']['max_t']],
['binned_max_t', config['dgp_hyperparameters_global']['binned_max_t']],
['n_samples', 20000],
['print_info', False],
['boundary', bf.constant],
['boundary_multiplicative', True],
['possible_choices', [0, 1, 2, 3]]]),
},
"ddm_par2_angle":{
"dgp": cd.ddm_flexbound_par2,
"boundary": bf.angle,
"boundary_multiplicative": False,
"folder_suffix": "models/ddm_par2_angle/",
"param_names": ['v_h', 'v_l_1', 'v_l_2', 'a', 'w_h', 'w_l_1', 'w_l_2', 'ndt'],
"param_depends_on_n_choice": [0, 0, 0, 0, 0, 0, 0, 0],
"boundary_param_names": ['theta'],
"param_bounds_network": [[-2.0, 2.0], [-2.0, 2.0], [-2.0, 2.0],
[0.3, 2], [0.2, 0.8], [0.2, 0.8], [0.2, 0.8],[0.0, 2.0]],
"param_bounds_sampler": [[-1.9, 1.9], [0.6, 1.4], [0.31, 0.69], [0.1, 0.9]],
"param_bounds_cnn": [[-2.5, 2.5], [-2.5, 2.5], [-2.5, 2.5],
[0.2, 2], [0.1, 0.9], [0.1, 0.9], [0.1, 0.9], [0.0, 2.0]],
"boundary_param_bounds_network":[[0, (np.pi / 2 - .2)]],
"boundary_param_bounds_sampler": [[0.05, np.pi / 2 - .3]],
"boundary_param_bounds_cnn": [[0, (np.pi / 2 - .2)]],
"dgp_hyperparameters": dict([['s', 1.0],
['delta_t', 0.01],
['max_t', config['dgp_hyperparameters_global']['max_t']],
['binned_max_t', config['dgp_hyperparameters_global']['binned_max_t']],
['n_samples', 20000],
['print_info', False],
['boundary', bf.angle],
['boundary_multiplicative', False],
['possible_choices', [0, 1, 2, 3]]]),
},
"ddm_mic2":{
"dgp": cd.ddm_flexbound_mic2,
"boundary": bf.constant,
"boundary_multiplicative": True,
"folder_suffix": "models/ddm_mic2/",
"param_names": ['v_h', 'v_l_1', 'v_l_2', 'a', 'w_h', 'w_l_1', 'w_l_2', 'd' ,'ndt'],
"param_depends_on_n_choice": [0, 0, 0, 0, 0, 0, 0, 0, 0],
"boundary_param_names": [],
"param_bounds_network": [[-2.0, 2.0], [-2.0, 2.0], [-2.0, 2.0],
[0.3, 2], [0.2, 0.8], [0.2, 0.8], [0.2, 0.8], [0.0, 1.0], [0.0, 2.0]],
"param_bounds_sampler": [[-1.9, 1.9], [0.6, 1.4], [0.31, 0.69], [0.1, 0.9]],
"param_bounds_cnn": [[-2.5, 2.5], [-2.5, 2.5], [-2.5, 2.5],
[0.2, 2], [0.1, 0.9], [0.1, 0.9], [0.1, 0.9], [0.0, 1.0], [0.0, 2.0]],
"boundary_param_bounds_network": [],
"boundary_param_bounds_sampler": [],
"boundary_param_bounds_cnn":[],
"dgp_hyperparameters": dict([['s', 1.0],
['delta_t', 0.01],
['max_t', config['dgp_hyperparameters_global']['max_t']],
['binned_max_t', config['dgp_hyperparameters_global']['binned_max_t']],
['n_samples', 20000],
['print_info', False],
['boundary', bf.constant],
['boundary_multiplicative', True],
['possible_choices', [0, 1, 2, 3]]]),
},
"ddm_mic2_angle":{
"dgp": cd.ddm_flexbound_mic2,
"boundary": bf.angle,
"boundary_multiplicative": False,
"folder_suffix": "models/ddm_mic2_angle/",
"param_names": ['v_h', 'v_l_1', 'v_l_2', 'a', 'w_h', 'w_l_1', 'w_l_2', 'd' ,'ndt'],
"param_depends_on_n_choice": [0, 0, 0, 0, 0, 0, 0, 0, 0],
"boundary_param_names": ['theta'],
"param_bounds_network": [[-2.0, 2.0], [-2.0, 2.0], [-2.0, 2.0],
[0.3, 2], [0.2, 0.8], [0.2, 0.8], [0.2, 0.8], [0.0, 1.0], [0.0, 2.0]],
"param_bounds_sampler": [[-1.9, 1.9], [0.6, 1.4], [0.31, 0.69], [0.1, 0.9]],
"param_bounds_cnn": [[-2.5, 2.5], [-2.5, 2.5], [-2.5, 2.5],
[0.2, 2], [0.1, 0.9], [0.1, 0.9], [0.1, 0.9], [0.0, 1.0], [0.0, 2.0]],
"boundary_param_bounds_network":[[0, (np.pi / 2 - .2)]],
"boundary_param_bounds_sampler": [[0.05, np.pi / 2 - .3]],
"boundary_param_bounds_cnn": [[0, (np.pi / 2 - .2)]],
"dgp_hyperparameters": dict([['s', 1.0],
['delta_t', 0.001],
['max_t', config['dgp_hyperparameters_global']['max_t']],
['binned_max_t', config['dgp_hyperparameters_global']['binned_max_t']],
['n_samples', 20000],
['print_info', False],
['boundary', bf.angle],
['boundary_multiplicative', False],
['possible_choices', [0, 1, 2, 3]]]),
},
}
| 53.46069
| 169
| 0.469491
| 5,187
| 38,759
| 3.259688
| 0.041835
| 0.026733
| 0.028034
| 0.017507
| 0.929442
| 0.921339
| 0.914301
| 0.899397
| 0.896913
| 0.888337
| 0
| 0.101634
| 0.308496
| 38,759
| 725
| 170
| 53.46069
| 0.529214
| 0.016745
| 0
| 0.760284
| 0
| 0
| 0.345382
| 0.151546
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.008511
| 0
| 0.008511
| 0.039716
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
58ef517c18b539e16905b821524295d7ae83859e
| 109
|
py
|
Python
|
rdoasis/algorithms/tasks/__init__.py
|
ResonantGeoData/RD-OASIS
|
6423aca34e5f4757279479b531241174e4cf98af
|
[
"Apache-2.0"
] | 2
|
2022-01-28T02:45:55.000Z
|
2022-02-08T22:09:29.000Z
|
rdoasis/algorithms/tasks/__init__.py
|
ResonantGeoData/RD-OASIS
|
6423aca34e5f4757279479b531241174e4cf98af
|
[
"Apache-2.0"
] | 31
|
2021-07-05T17:25:14.000Z
|
2022-03-29T14:36:07.000Z
|
rdoasis/algorithms/tasks/__init__.py
|
ResonantGeoData/RD-OASIS
|
6423aca34e5f4757279479b531241174e4cf98af
|
[
"Apache-2.0"
] | null | null | null |
from .docker import run_algorithm_task_docker # noqa
from .kubernetes import run_algorithm_task_k8s # noqa
| 36.333333
| 54
| 0.834862
| 16
| 109
| 5.3125
| 0.5625
| 0.211765
| 0.423529
| 0.517647
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010526
| 0.12844
| 109
| 2
| 55
| 54.5
| 0.884211
| 0.082569
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
4503be63b3e50a6f6828c0f24f0c1ee87047c3f9
| 205
|
py
|
Python
|
pyfiction/agents/agent.py
|
FPreta/pyfiction
|
a8af76c6badb11aa442122b1f2c4fbda1cf2ac53
|
[
"MIT"
] | 32
|
2016-05-28T06:12:38.000Z
|
2021-09-03T23:10:18.000Z
|
pyfiction/agents/agent.py
|
KailashDN/pyfiction
|
dc126d48578c53a3d2f95723c94da0afdd3282d0
|
[
"MIT"
] | 4
|
2019-12-16T20:18:25.000Z
|
2022-03-01T11:23:10.000Z
|
pyfiction/agents/agent.py
|
KailashDN/pyfiction
|
dc126d48578c53a3d2f95723c94da0afdd3282d0
|
[
"MIT"
] | 13
|
2017-08-15T13:14:00.000Z
|
2022-03-01T01:42:37.000Z
|
class Agent(object):
def __init__(self):
raise NotImplementedError("Agent is an abstract class.")
def act(self, **kwargs):
raise NotImplementedError("Agent is an abstract class.")
| 29.285714
| 64
| 0.678049
| 24
| 205
| 5.625
| 0.541667
| 0.355556
| 0.42963
| 0.459259
| 0.681481
| 0.681481
| 0.681481
| 0
| 0
| 0
| 0
| 0
| 0.214634
| 205
| 6
| 65
| 34.166667
| 0.838509
| 0
| 0
| 0.4
| 0
| 0
| 0.263415
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0
| 0
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 7
|
452f91a064e4ecb98b5a81da17e344db546ad281
| 40
|
py
|
Python
|
src/lib/plistlib.py
|
DTenore/skulpt
|
098d20acfb088d6db85535132c324b7ac2f2d212
|
[
"MIT"
] | 2,671
|
2015-01-03T08:23:25.000Z
|
2022-03-31T06:15:48.000Z
|
src/lib/plistlib.py
|
wakeupmuyunhe/skulpt
|
a8fb11a80fb6d7c016bab5dfe3712517a350b347
|
[
"MIT"
] | 972
|
2015-01-05T08:11:00.000Z
|
2022-03-29T13:47:15.000Z
|
src/lib/plistlib.py
|
wakeupmuyunhe/skulpt
|
a8fb11a80fb6d7c016bab5dfe3712517a350b347
|
[
"MIT"
] | 845
|
2015-01-03T19:53:36.000Z
|
2022-03-29T18:34:22.000Z
|
import _sk_fail; _sk_fail._("plistlib")
| 20
| 39
| 0.775
| 6
| 40
| 4.333333
| 0.666667
| 0.461538
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.075
| 40
| 1
| 40
| 40
| 0.702703
| 0
| 0
| 0
| 0
| 0
| 0.2
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
1884940654a07681250d9b2926f193eb4fd05438
| 8,650
|
py
|
Python
|
model.py
|
Gonxolo/tarea3Sim
|
340e4b13e325b0b87fefd8fa323ed52ca5382c09
|
[
"MIT"
] | null | null | null |
model.py
|
Gonxolo/tarea3Sim
|
340e4b13e325b0b87fefd8fa323ed52ca5382c09
|
[
"MIT"
] | null | null | null |
model.py
|
Gonxolo/tarea3Sim
|
340e4b13e325b0b87fefd8fa323ed52ca5382c09
|
[
"MIT"
] | null | null | null |
"""
Model
"""
import numpy as np
from numpy.random import rand
import lib.basic_shapes as bs
import lib.local_shapes as loc_s
def generateSun(nTheta, nPhi):
vertices = []
indices = []
theta_angs = np.linspace(0, np.pi, nTheta, endpoint=True)
phi_angs = np.linspace(0, 2 * np.pi, nPhi, endpoint=True)
start_index = 0
for theta_ind in range(len(theta_angs)-1): # vertical
cos_theta = np.cos(theta_angs[theta_ind]) # z_top
cos_theta_next = np.cos(theta_angs[theta_ind + 1]) # z_bottom
sin_theta = np.sin(theta_angs[theta_ind])
sin_theta_next = np.sin(theta_angs[theta_ind + 1])
# d === c <---- z_top
# | |
# | |
# a === b <--- z_bottom
# ^ ^
# phi phi + dphi
for phi_ind in range(len(phi_angs)-1): # horizontal
cos_phi = np.cos(phi_angs[phi_ind])
cos_phi_next = np.cos(phi_angs[phi_ind + 1])
sin_phi = np.sin(phi_angs[phi_ind])
sin_phi_next = np.sin(phi_angs[phi_ind + 1])
# we will asume radius = 1, so scaling should be enough.
# x = cosφ sinθ
# y = sinφ sinθ
# z = cosθ
# X Y Z
a = np.array([cos_phi * sin_theta_next, sin_phi * sin_theta_next , cos_theta_next])
b = np.array([cos_phi_next * sin_theta_next, sin_phi_next * sin_theta_next, cos_theta_next])
c = np.array([cos_phi_next * sin_theta , sin_phi_next * sin_theta , cos_theta])
d = np.array([cos_phi * sin_theta , sin_phi * sin_theta , cos_theta])
_vertex, _indices = loc_s.createColorQuadIndexation(
start_index,
a, b, c, d,
color=[rand(), rand(), rand()]
)
vertices += _vertex
indices += _indices
start_index += 4
return bs.Shape(vertices, indices)
def generateSunNormals(nTheta, nPhi):
vertices = []
indices = []
theta_angs = np.linspace(0, np.pi, nTheta, endpoint=True)
phi_angs = np.linspace(0, 2 * np.pi, nPhi, endpoint=True)
start_index = 0
for theta_ind in range(len(theta_angs)-1): # vertical
cos_theta = np.cos(theta_angs[theta_ind]) # z_top
cos_theta_next = np.cos(theta_angs[theta_ind + 1]) # z_bottom
sin_theta = np.sin(theta_angs[theta_ind])
sin_theta_next = np.sin(theta_angs[theta_ind + 1])
# d === c <---- z_top
# | |
# | |
# a === b <--- z_bottom
# ^ ^
# phi phi + dphi
for phi_ind in range(len(phi_angs)-1): # horizontal
cos_phi = np.cos(phi_angs[phi_ind])
cos_phi_next = np.cos(phi_angs[phi_ind + 1])
sin_phi = np.sin(phi_angs[phi_ind])
sin_phi_next = np.sin(phi_angs[phi_ind + 1])
# we will asume radius = 1, so scaling should be enough.
# x = cosφ sinθ
# y = sinφ sinθ
# z = cosθ
# X Y Z
a = np.array([cos_phi * sin_theta_next, sin_phi * sin_theta_next , cos_theta_next])
b = np.array([cos_phi_next * sin_theta_next, sin_phi_next * sin_theta_next, cos_theta_next])
c = np.array([cos_phi_next * sin_theta , sin_phi_next * sin_theta , cos_theta])
d = np.array([cos_phi * sin_theta , sin_phi * sin_theta , cos_theta])
_vertex, _indices = loc_s.createColorQuadIndexation(
start_index,
a, b, c, d,
color=[rand(), rand(), rand()]
)
vertices += _vertex
indices += _indices
start_index += 4
return bs.Shape(vertices, indices)
def generateSphereShapeNormals(nTheta, nPhi):
vertices = []
indices = []
theta_angs = np.linspace(0, np.pi, nTheta, endpoint=True)
phi_angs = np.linspace(0, 2 * np.pi, nPhi, endpoint=True)
start_index = 0
for theta_ind in range(len(theta_angs)-1): # vertical
cos_theta = np.cos(theta_angs[theta_ind]) # z_top
cos_theta_next = np.cos(theta_angs[theta_ind + 1]) # z_bottom
sin_theta = np.sin(theta_angs[theta_ind])
sin_theta_next = np.sin(theta_angs[theta_ind + 1])
# d === c <---- z_top
# | |
# | |
# a === b <--- z_bottom
# ^ ^
# phi phi + dphi
for phi_ind in range(len(phi_angs)-1): # horizontal
cos_phi = np.cos(phi_angs[phi_ind])
cos_phi_next = np.cos(phi_angs[phi_ind + 1])
sin_phi = np.sin(phi_angs[phi_ind])
sin_phi_next = np.sin(phi_angs[phi_ind + 1])
# we will asume radius = 1, so scaling should be enough.
# x = cosφ sinθ
# y = sinφ sinθ
# z = cosθ
# X Y Z
a = np.array([cos_phi * sin_theta_next, sin_phi * sin_theta_next , cos_theta_next])
b = np.array([cos_phi_next * sin_theta_next, sin_phi_next * sin_theta_next, cos_theta_next])
c = np.array([cos_phi_next * sin_theta , sin_phi_next * sin_theta , cos_theta])
d = np.array([cos_phi * sin_theta , sin_phi * sin_theta , cos_theta])
a_n = 2*np.array([cos_phi * sin_theta_next, sin_phi * sin_theta_next , cos_theta_next])
b_n = 2*np.array([cos_phi_next * sin_theta_next, sin_phi_next * sin_theta_next, cos_theta_next])
c_n = 2*np.array([cos_phi_next * sin_theta , sin_phi_next * sin_theta , cos_theta])
d_n = 2*np.array([cos_phi * sin_theta , sin_phi * sin_theta , cos_theta])
mu = 0.5
sigma = 0.1
color = np.random.normal(mu, sigma, 3)
_vertex, _indices = loc_s.createColorSpecificNormals(start_index, a, b, c, d, a_n, b_n, c_n, d_n, color=color)
vertices += _vertex
indices += _indices
start_index += 4
return bs.Shape(vertices, indices)
def generateHumanNormals(nTheta, nPhi, color=(1.0,1.0,1.0)):
vertices = []
indices = []
theta_angs = np.linspace(0, np.pi, nTheta, endpoint=True)
phi_angs = np.linspace(0, 2 * np.pi, nPhi, endpoint=True)
start_index = 0
for theta_ind in range(len(theta_angs)-1): # vertical
cos_theta = np.cos(theta_angs[theta_ind]) # z_top
cos_theta_next = np.cos(theta_angs[theta_ind + 1]) # z_bottom
sin_theta = np.sin(theta_angs[theta_ind])
sin_theta_next = np.sin(theta_angs[theta_ind + 1])
# d === c <---- z_top
# | |
# | |
# a === b <--- z_bottom
# ^ ^
# phi phi + dphi
for phi_ind in range(len(phi_angs)-1): # horizontal
cos_phi = np.cos(phi_angs[phi_ind])
cos_phi_next = np.cos(phi_angs[phi_ind + 1])
sin_phi = np.sin(phi_angs[phi_ind])
sin_phi_next = np.sin(phi_angs[phi_ind + 1])
# we will asume radius = 1, so scaling should be enough.
# x = cosφ sinθ
# y = sinφ sinθ
# z = cosθ
# X Y Z
a = np.array([cos_phi * sin_theta_next, sin_phi * sin_theta_next , cos_theta_next])
b = np.array([cos_phi_next * sin_theta_next, sin_phi_next * sin_theta_next, cos_theta_next])
c = np.array([cos_phi_next * sin_theta , sin_phi_next * sin_theta , cos_theta])
d = np.array([cos_phi * sin_theta , sin_phi * sin_theta , cos_theta])
a_n = 2*np.array([cos_phi * sin_theta_next, sin_phi * sin_theta_next , cos_theta_next])
b_n = 2*np.array([cos_phi_next * sin_theta_next, sin_phi_next * sin_theta_next, cos_theta_next])
c_n = 2*np.array([cos_phi_next * sin_theta , sin_phi_next * sin_theta , cos_theta])
d_n = 2*np.array([cos_phi * sin_theta , sin_phi * sin_theta , cos_theta])
mu = 0.5
sigma = 0.1
_vertex, _indices = loc_s.createColorSpecificNormals(start_index, a, b, c, d, a_n, b_n, c_n, d_n, color=color)
vertices += _vertex
indices += _indices
start_index += 4
return bs.Shape(vertices, indices)
| 39.318182
| 122
| 0.535145
| 1,189
| 8,650
| 3.571068
| 0.069807
| 0.120584
| 0.079133
| 0.073481
| 0.947951
| 0.947951
| 0.947951
| 0.947951
| 0.947951
| 0.947951
| 0
| 0.012747
| 0.356069
| 8,650
| 219
| 123
| 39.497717
| 0.749551
| 0.134798
| 0
| 0.912
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.032
| false
| 0
| 0.032
| 0
| 0.096
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
18fccbed1faab5ad00a26823d326b7350169f3c0
| 67
|
py
|
Python
|
jupyterlabpymolpysnips/Tragjectories/loadAmberTrajs.py
|
MooersLab/pymolpysnips
|
50a89c85adf8006d85c1d6cd3f8aad7e440a0b92
|
[
"MIT"
] | null | null | null |
jupyterlabpymolpysnips/Tragjectories/loadAmberTrajs.py
|
MooersLab/pymolpysnips
|
50a89c85adf8006d85c1d6cd3f8aad7e440a0b92
|
[
"MIT"
] | null | null | null |
jupyterlabpymolpysnips/Tragjectories/loadAmberTrajs.py
|
MooersLab/pymolpysnips
|
50a89c85adf8006d85c1d6cd3f8aad7e440a0b92
|
[
"MIT"
] | null | null | null |
cmd.do('load file.top, protein;')
cmd.do('load file.rst, protein')
| 22.333333
| 33
| 0.686567
| 12
| 67
| 3.833333
| 0.583333
| 0.217391
| 0.391304
| 0.565217
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.089552
| 67
| 2
| 34
| 33.5
| 0.754098
| 0
| 0
| 0
| 0
| 0
| 0.671642
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e146b74b9483e17cf89f845b0c38492561b36235
| 128
|
py
|
Python
|
python/testData/completion/heavyStarPropagation/lib/_pkg0/_pkg0_1/_pkg0_1_1/_pkg0_1_1_0/_pkg0_1_1_0_1/_mod0_1_1_0_1_2.py
|
truthiswill/intellij-community
|
fff88cfb0dc168eea18ecb745d3e5b93f57b0b95
|
[
"Apache-2.0"
] | 2
|
2018-12-29T09:53:39.000Z
|
2018-12-29T09:53:42.000Z
|
python/testData/completion/heavyStarPropagation/lib/_pkg0/_pkg0_1/_pkg0_1_1/_pkg0_1_1_0/_pkg0_1_1_0_1/_mod0_1_1_0_1_2.py
|
truthiswill/intellij-community
|
fff88cfb0dc168eea18ecb745d3e5b93f57b0b95
|
[
"Apache-2.0"
] | 173
|
2018-07-05T13:59:39.000Z
|
2018-08-09T01:12:03.000Z
|
python/testData/completion/heavyStarPropagation/lib/_pkg0/_pkg0_1/_pkg0_1_1/_pkg0_1_1_0/_pkg0_1_1_0_1/_mod0_1_1_0_1_2.py
|
truthiswill/intellij-community
|
fff88cfb0dc168eea18ecb745d3e5b93f57b0b95
|
[
"Apache-2.0"
] | 2
|
2020-03-15T08:57:37.000Z
|
2020-04-07T04:48:14.000Z
|
name0_1_1_0_1_2_0 = None
name0_1_1_0_1_2_1 = None
name0_1_1_0_1_2_2 = None
name0_1_1_0_1_2_3 = None
name0_1_1_0_1_2_4 = None
| 14.222222
| 24
| 0.820313
| 40
| 128
| 1.875
| 0.175
| 0.4
| 0.466667
| 0.533333
| 0.88
| 0.88
| 0.746667
| 0
| 0
| 0
| 0
| 0.318182
| 0.140625
| 128
| 9
| 25
| 14.222222
| 0.363636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
e14973005ca59842fdee1ebd6ec69a774e2ba951
| 5,384
|
py
|
Python
|
tests/utils/test_password_manager.py
|
python-pitfalls/poetry
|
008ba9fbfcc329ea3c86bc7e0cb6a71855b672cf
|
[
"MIT"
] | 12,347
|
2019-12-12T07:07:32.000Z
|
2022-03-31T21:08:50.000Z
|
tests/utils/test_password_manager.py
|
python-pitfalls/poetry
|
008ba9fbfcc329ea3c86bc7e0cb6a71855b672cf
|
[
"MIT"
] | 3,483
|
2019-12-11T20:20:20.000Z
|
2022-03-31T23:18:18.000Z
|
tests/utils/test_password_manager.py
|
python-pitfalls/poetry
|
008ba9fbfcc329ea3c86bc7e0cb6a71855b672cf
|
[
"MIT"
] | 1,399
|
2019-12-12T12:27:46.000Z
|
2022-03-31T09:12:53.000Z
|
import os
import pytest
from poetry.utils.password_manager import KeyRing
from poetry.utils.password_manager import KeyRingError
from poetry.utils.password_manager import PasswordManager
def test_set_http_password(config, with_simple_keyring, dummy_keyring):
manager = PasswordManager(config)
assert manager.keyring.is_available()
manager.set_http_password("foo", "bar", "baz")
assert "baz" == dummy_keyring.get_password("poetry-repository-foo", "bar")
auth = config.get("http-basic.foo")
assert "bar" == auth["username"]
assert "password" not in auth
def test_get_http_auth(config, with_simple_keyring, dummy_keyring):
dummy_keyring.set_password("poetry-repository-foo", "bar", "baz")
config.auth_config_source.add_property("http-basic.foo", {"username": "bar"})
manager = PasswordManager(config)
assert manager.keyring.is_available()
auth = manager.get_http_auth("foo")
assert "bar" == auth["username"]
assert "baz" == auth["password"]
def test_delete_http_password(config, with_simple_keyring, dummy_keyring):
dummy_keyring.set_password("poetry-repository-foo", "bar", "baz")
config.auth_config_source.add_property("http-basic.foo", {"username": "bar"})
manager = PasswordManager(config)
assert manager.keyring.is_available()
manager.delete_http_password("foo")
assert dummy_keyring.get_password("poetry-repository-foo", "bar") is None
assert config.get("http-basic.foo") is None
def test_set_pypi_token(config, with_simple_keyring, dummy_keyring):
manager = PasswordManager(config)
assert manager.keyring.is_available()
manager.set_pypi_token("foo", "baz")
assert config.get("pypi-token.foo") is None
assert "baz" == dummy_keyring.get_password("poetry-repository-foo", "__token__")
def test_get_pypi_token(config, with_simple_keyring, dummy_keyring):
dummy_keyring.set_password("poetry-repository-foo", "__token__", "baz")
manager = PasswordManager(config)
assert manager.keyring.is_available()
assert "baz" == manager.get_pypi_token("foo")
def test_delete_pypi_token(config, with_simple_keyring, dummy_keyring):
dummy_keyring.set_password("poetry-repository-foo", "__token__", "baz")
manager = PasswordManager(config)
assert manager.keyring.is_available()
manager.delete_pypi_token("foo")
assert dummy_keyring.get_password("poetry-repository-foo", "__token__") is None
def test_set_http_password_with_unavailable_backend(config, with_fail_keyring):
manager = PasswordManager(config)
assert not manager.keyring.is_available()
manager.set_http_password("foo", "bar", "baz")
auth = config.get("http-basic.foo")
assert "bar" == auth["username"]
assert "baz" == auth["password"]
def test_get_http_auth_with_unavailable_backend(config, with_fail_keyring):
config.auth_config_source.add_property(
"http-basic.foo", {"username": "bar", "password": "baz"}
)
manager = PasswordManager(config)
assert not manager.keyring.is_available()
auth = manager.get_http_auth("foo")
assert "bar" == auth["username"]
assert "baz" == auth["password"]
def test_delete_http_password_with_unavailable_backend(config, with_fail_keyring):
config.auth_config_source.add_property(
"http-basic.foo", {"username": "bar", "password": "baz"}
)
manager = PasswordManager(config)
assert not manager.keyring.is_available()
manager.delete_http_password("foo")
assert config.get("http-basic.foo") is None
def test_set_pypi_token_with_unavailable_backend(config, with_fail_keyring):
manager = PasswordManager(config)
assert not manager.keyring.is_available()
manager.set_pypi_token("foo", "baz")
assert "baz" == config.get("pypi-token.foo")
def test_get_pypi_token_with_unavailable_backend(config, with_fail_keyring):
config.auth_config_source.add_property("pypi-token.foo", "baz")
manager = PasswordManager(config)
assert not manager.keyring.is_available()
assert "baz" == manager.get_pypi_token("foo")
def test_delete_pypi_token_with_unavailable_backend(config, with_fail_keyring):
config.auth_config_source.add_property("pypi-token.foo", "baz")
manager = PasswordManager(config)
assert not manager.keyring.is_available()
manager.delete_pypi_token("foo")
assert config.get("pypi-token.foo") is None
def test_keyring_raises_errors_on_keyring_errors(mocker, with_fail_keyring):
mocker.patch("poetry.utils.password_manager.KeyRing._check")
key_ring = KeyRing("poetry")
with pytest.raises(KeyRingError):
key_ring.set_password("foo", "bar", "baz")
with pytest.raises(KeyRingError):
key_ring.get_password("foo", "bar")
with pytest.raises(KeyRingError):
key_ring.delete_password("foo", "bar")
def test_keyring_with_chainer_backend_and_not_compatible_only_should_be_unavailable(
with_chained_keyring,
):
key_ring = KeyRing("poetry")
assert not key_ring.is_available()
def test_get_http_auth_from_environment_variables(environ, config, with_simple_keyring):
os.environ["POETRY_HTTP_BASIC_FOO_USERNAME"] = "bar"
os.environ["POETRY_HTTP_BASIC_FOO_PASSWORD"] = "baz"
manager = PasswordManager(config)
auth = manager.get_http_auth("foo")
assert "bar" == auth["username"]
assert "baz" == auth["password"]
| 31.48538
| 88
| 0.738856
| 694
| 5,384
| 5.409222
| 0.086455
| 0.040757
| 0.096963
| 0.108684
| 0.879861
| 0.84017
| 0.763186
| 0.763186
| 0.741343
| 0.688599
| 0
| 0
| 0.13893
| 5,384
| 170
| 89
| 31.670588
| 0.80975
| 0
| 0
| 0.654206
| 0
| 0
| 0.149331
| 0.05052
| 0
| 0
| 0
| 0
| 0.317757
| 1
| 0.140187
| false
| 0.411215
| 0.046729
| 0
| 0.186916
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
e180d1f78e22e5b24e43d25b8c4599ccb8578cdf
| 4,359
|
py
|
Python
|
ababe/stru/tests/test_clarifier.py
|
shaobinqiu/pyabc
|
eefd322bdd0bb04ae6d42554d24140c6ffbd5c34
|
[
"MIT"
] | null | null | null |
ababe/stru/tests/test_clarifier.py
|
shaobinqiu/pyabc
|
eefd322bdd0bb04ae6d42554d24140c6ffbd5c34
|
[
"MIT"
] | null | null | null |
ababe/stru/tests/test_clarifier.py
|
shaobinqiu/pyabc
|
eefd322bdd0bb04ae6d42554d24140c6ffbd5c34
|
[
"MIT"
] | null | null | null |
# coding: utf-8
# Distributed under the terms of the MIT License.
import unittest
import numpy as np
from ababe.stru.scaffold import ModifiedCell
from ababe.stru.clarifier import AtomRemoveClarifier, VerboseAtomRemoveClarifier
from ababe.stru.element import Specie
class TestAtomRemoveClarifier(unittest.TestCase):
def setUp(self):
self.latt = np.array([[4.898979, 0.000000, 0.000000],
[2.449490, 4.242641, 0.000000],
[1.632993, -0.000000, 4.618802]])
self.pos = np.array([[0.208333, 0.333333, 0.375000],
[0.375000, 0.000000, 0.875000],
[0.541667, 0.666667, 0.375000],
[0.708333, 0.333333, 0.875000],
[0.875000, 0.000000, 0.375000],
[0.000000, 0.000000, 0.000000],
[0.166667, 0.666667, 0.500000],
[0.333333, 0.333333, 0.000000],
[0.500000, 0.000000, 0.500000],
[0.666667, 0.666667, 0.000000],
[0.833333, 0.333333, 0.500000],
[0.041667, 0.666667, 0.875000]])
self.numbers = np.array([16,16,16,16,16,30,30,30,30,30,30,55])
self.modcell = ModifiedCell(self.latt, self.pos, self.numbers)
centers = np.array([[0.041667, 0.666667, 0.875000]])
ele = Specie('Zn')
r = 2
self.nearZnClarifier = AtomRemoveClarifier(centers, r, Specie('Zn'))
def test_clarify(self):
expect_pos = np.array([[0.208333, 0.333333, 0.375000],
[0.375000, 0.000000, 0.875000],
[0.541667, 0.666667, 0.375000],
[0.708333, 0.333333, 0.875000],
[0.875000, 0.000000, 0.375000],
[0.500000, 0.000000, 0.500000],
[0.833333, 0.333333, 0.500000],
[0.041667, 0.666667, 0.875000]])
expect_numbers = np.array([16,16,16,16,16,30,30,55])
expect_newcell = ModifiedCell(self.latt, expect_pos, expect_numbers)
newcell = self.nearZnClarifier.clarify(self.modcell)
self.assertEqual(newcell, expect_newcell)
class TestVerboseAtomRemoveClarifier(unittest.TestCase):
def setUp(self):
self.latt = np.array([[4.898979, 0.000000, 0.000000],
[2.449490, 4.242641, 0.000000],
[1.632993, -0.000000, 4.618802]])
self.pos = np.array([[0.208333, 0.333333, 0.375000],
[0.375000, 0.000000, 0.875000],
[0.541667, 0.666667, 0.375000],
[0.708333, 0.333333, 0.875000],
[0.875000, 0.000000, 0.375000],
[0.000000, 0.000000, 0.000000],
[0.166667, 0.666667, 0.500000],
[0.333333, 0.333333, 0.000000],
[0.500000, 0.000000, 0.500000],
[0.666667, 0.666667, 0.000000],
[0.833333, 0.333333, 0.500000],
[0.041667, 0.666667, 0.875000]])
self.numbers = np.array([16,16,16,16,55,30,30,30,30,30,30,55])
self.modcell = ModifiedCell(self.latt, self.pos, self.numbers)
ele = Specie('Zn')
r = 2
self.nearZnClarifier = VerboseAtomRemoveClarifier(Specie('Cs'), r, Specie('Zn'))
def test_clarify(self):
expect_pos = np.array([[0.208333, 0.333333, 0.375000],
[0.375000, 0.000000, 0.875000],
[0.541667, 0.666667, 0.375000],
[0.708333, 0.333333, 0.875000],
[0.875000, 0.000000, 0.375000],
[0.041667, 0.666667, 0.875000]])
expect_numbers = np.array([16,16,16,16,55,55])
expect_newcell = ModifiedCell(self.latt, expect_pos, expect_numbers)
newcell = self.nearZnClarifier.clarify(self.modcell)
self.assertEqual(newcell, expect_newcell)
if __name__ == "__main__":
import nose2
nose2.main()
| 45.884211
| 88
| 0.490479
| 499
| 4,359
| 4.240481
| 0.152305
| 0.095936
| 0.086957
| 0.039698
| 0.816163
| 0.816163
| 0.806238
| 0.766541
| 0.76465
| 0.76465
| 0
| 0.376514
| 0.374857
| 4,359
| 94
| 89
| 46.37234
| 0.4
| 0.013994
| 0
| 0.779221
| 0
| 0
| 0.004191
| 0
| 0
| 0
| 0
| 0
| 0.025974
| 1
| 0.051948
| false
| 0
| 0.077922
| 0
| 0.155844
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
e1ad0333c385ad536dd5441c979931a9da736795
| 5,382
|
py
|
Python
|
generate/generate_semeval_BERT_single.py
|
bubblemans/ABSA-BERT-pair
|
aced5582cefc6398e196da01773fbfee4cd9126b
|
[
"MIT"
] | 462
|
2019-03-25T06:48:12.000Z
|
2022-03-31T08:34:06.000Z
|
generate/generate_semeval_BERT_single.py
|
bubblemans/ABSA-BERT-pair
|
aced5582cefc6398e196da01773fbfee4cd9126b
|
[
"MIT"
] | 24
|
2019-04-27T16:35:19.000Z
|
2022-02-07T12:33:48.000Z
|
generate/generate_semeval_BERT_single.py
|
bubblemans/ABSA-BERT-pair
|
aced5582cefc6398e196da01773fbfee4cd9126b
|
[
"MIT"
] | 139
|
2019-03-25T06:48:16.000Z
|
2022-03-19T14:00:12.000Z
|
import os
data_dir='../data/semeval2014/'
aspect_name = ['price', 'anecdotes', 'food', 'ambience', 'service']
dir_path = [data_dir + 'bert-single/' + i + '/' for i in aspect_name]
for path in dir_path:
if not os.path.exists(path):
os.makedirs(path)
with open(dir_path[0]+"test.csv", "w", encoding="utf-8") as g_price, \
open(dir_path[1]+"test.csv", "w", encoding="utf-8") as g_anecdotes,\
open(dir_path[2]+"test.csv", "w", encoding="utf-8") as g_food,\
open(dir_path[3]+"test.csv", "w", encoding="utf-8") as g_ambience,\
open(dir_path[4]+"test.csv", "w", encoding="utf-8") as g_service,\
open(data_dir+"Restaurants_Test_Gold.xml","r",encoding="utf-8") as f:
s=f.readline().strip()
while s:
category=[]
polarity=[]
if "<sentence id" in s:
left=s.find("id")
right=s.find(">")
id=s[left+4:right-1]
while not "</sentence>" in s:
if "<text>" in s:
left=s.find("<text>")
right=s.find("</text>")
text=s[left+6:right]
if "aspectCategory" in s:
left=s.find("category=")
right=s.find("polarity=")
category.append(s[left+10:right-2])
left=s.find("polarity=")
right=s.find("/>")
polarity.append(s[left+10:right-2])
s=f.readline().strip()
if "price" in category:
g_price.write(id+"\t"+polarity[category.index("price")]+"\t"+"price"+"\t"+text+"\n")
else:
g_price.write(id + "\t" + "none" + "\t" + "price" + "\t" + text + "\n")
if "anecdotes/miscellaneous" in category:
g_anecdotes.write(id+"\t"+polarity[category.index("anecdotes/miscellaneous")]+"\t"+"anecdotes"+"\t"+text+"\n")
else:
g_anecdotes.write(id + "\t" + "none" + "\t" + "anecdotes" + "\t" + text + "\n")
if "food" in category:
g_food.write(id+"\t"+polarity[category.index("food")]+"\t"+"food"+"\t"+text+"\n")
else:
g_food.write(id + "\t" + "none" + "\t" + "food" + "\t" + text + "\n")
if "ambience" in category:
g_ambience.write(id+"\t"+polarity[category.index("ambience")]+"\t"+"ambience"+"\t"+text+"\n")
else:
g_ambience.write(id + "\t" + "none" + "\t" + "ambience" + "\t" + text + "\n")
if "service" in category:
g_service.write(id+"\t"+polarity[category.index("service")]+"\t"+"service"+"\t"+text+"\n")
else:
g_service.write(id + "\t" + "none" + "\t" + "service" + "\t" + text + "\n")
else:
s = f.readline().strip()
with open(dir_path[0]+"train.csv", "w", encoding="utf-8") as g_price, \
open(dir_path[1]+"train.csv", "w", encoding="utf-8") as g_anecdotes,\
open(dir_path[2]+"train.csv", "w", encoding="utf-8") as g_food,\
open(dir_path[3]+"train.csv", "w", encoding="utf-8") as g_ambience,\
open(dir_path[4]+"train.csv", "w", encoding="utf-8") as g_service,\
open(data_dir+"Restaurants_Train.xml","r",encoding="utf-8") as f:
s=f.readline().strip()
while s:
category=[]
polarity=[]
if "<sentence id" in s:
left=s.find("id")
right=s.find(">")
id=s[left+4:right-1]
while not "</sentence>" in s:
if "<text>" in s:
left=s.find("<text>")
right=s.find("</text>")
text=s[left+6:right]
if "aspectCategory" in s:
left=s.find("category=")
right=s.find("polarity=")
category.append(s[left+10:right-2])
left=s.find("polarity=")
right=s.find("/>")
polarity.append(s[left+10:right-1])
s=f.readline().strip()
if "price" in category:
g_price.write(id+"\t"+polarity[category.index("price")]+"\t"+"price"+"\t"+text+"\n")
else:
g_price.write(id + "\t" + "none" + "\t" + "price" + "\t" + text + "\n")
if "anecdotes/miscellaneous" in category:
g_anecdotes.write(id+"\t"+polarity[category.index("anecdotes/miscellaneous")]+"\t"+"anecdotes"+"\t"+text+"\n")
else:
g_anecdotes.write(id + "\t" + "none" + "\t" + "anecdotes" + "\t" + text + "\n")
if "food" in category:
g_food.write(id+"\t"+polarity[category.index("food")]+"\t"+"food"+"\t"+text+"\n")
else:
g_food.write(id + "\t" + "none" + "\t" + "food" + "\t" + text + "\n")
if "ambience" in category:
g_ambience.write(id+"\t"+polarity[category.index("ambience")]+"\t"+"ambience"+"\t"+text+"\n")
else:
g_ambience.write(id + "\t" + "none" + "\t" + "ambience" + "\t" + text + "\n")
if "service" in category:
g_service.write(id+"\t"+polarity[category.index("service")]+"\t"+"service"+"\t"+text+"\n")
else:
g_service.write(id + "\t" + "none" + "\t" + "service" + "\t" + text + "\n")
else:
s = f.readline().strip()
print("Finished!")
| 47.210526
| 126
| 0.479933
| 682
| 5,382
| 3.71261
| 0.099707
| 0.055292
| 0.063191
| 0.066351
| 0.924171
| 0.911532
| 0.911137
| 0.911137
| 0.893365
| 0.893365
| 0
| 0.011812
| 0.307878
| 5,382
| 113
| 127
| 47.628319
| 0.667919
| 0
| 0
| 0.792453
| 0
| 0
| 0.181007
| 0.025646
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.009434
| 0
| 0.009434
| 0.009434
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8362e150712455f45e6bcd8d4e1c073ce1f17558
| 21,005
|
py
|
Python
|
src/genie/libs/parser/iosxe/tests/ShowNvePeers/cli/equal/golden_output2_expected.py
|
balmasea/genieparser
|
d1e71a96dfb081e0a8591707b9d4872decd5d9d3
|
[
"Apache-2.0"
] | 204
|
2018-06-27T00:55:27.000Z
|
2022-03-06T21:12:18.000Z
|
src/genie/libs/parser/iosxe/tests/ShowNvePeers/cli/equal/golden_output2_expected.py
|
balmasea/genieparser
|
d1e71a96dfb081e0a8591707b9d4872decd5d9d3
|
[
"Apache-2.0"
] | 468
|
2018-06-19T00:33:18.000Z
|
2022-03-31T23:23:35.000Z
|
src/genie/libs/parser/iosxe/tests/ShowNvePeers/cli/equal/golden_output2_expected.py
|
balmasea/genieparser
|
d1e71a96dfb081e0a8591707b9d4872decd5d9d3
|
[
"Apache-2.0"
] | 309
|
2019-01-16T20:21:07.000Z
|
2022-03-30T12:56:41.000Z
|
expected_output = {
"interface": {
"nve1": {
"vni": {
"3000101": {
"peer_ip": {
"20.0.101.2": {
"type": "L3CP",
"rmac_num_rt": "5c71.0dfe.fb60",
"evni": "3000101",
"state": "UP",
"flags": "A/M/4",
"uptime": "1w0d"
},
"30.0.107.78": {
"type": "L3CP",
"rmac_num_rt": "ac3a.6767.049f",
"evni": "3000101",
"state": "UP",
"flags": "A/M/4",
"uptime": "1w0d"
}
}
},
"200051": {
"peer_ip": {
"20.0.101.2": {
"type": "L2CP",
"rmac_num_rt": "1",
"evni": "200051",
"state": "UP",
"flags": "N/A",
"uptime": "2d02h"
},
"20.0.101.3": {
"type": "L2CP",
"rmac_num_rt": "1",
"evni": "200051",
"state": "UP",
"flags": "N/A",
"uptime": "6d20h"
},
"30.0.107.78": {
"type": "L2CP",
"rmac_num_rt": "4",
"evni": "200051",
"state": "UP",
"flags": "N/A",
"uptime": "6d20h"
}
}
},
"200052": {
"peer_ip": {
"20.0.101.2": {
"type": "L2CP",
"rmac_num_rt": "1",
"evni": "200052",
"state": "UP",
"flags": "N/A",
"uptime": "2d02h"
},
"20.0.101.3": {
"type": "L2CP",
"rmac_num_rt": "1",
"evni": "200052",
"state": "UP",
"flags": "N/A",
"uptime": "6d20h"
},
"30.0.107.78": {
"type": "L2CP",
"rmac_num_rt": "4",
"evni": "200052",
"state": "UP",
"flags": "N/A",
"uptime": "6d20h"
}
}
},
"200053": {
"peer_ip": {
"20.0.101.2": {
"type": "L2CP",
"rmac_num_rt": "1",
"evni": "200053",
"state": "UP",
"flags": "N/A",
"uptime": "2d02h"
},
"20.0.101.3": {
"type": "L2CP",
"rmac_num_rt": "1",
"evni": "200053",
"state": "UP",
"flags": "N/A",
"uptime": "6d20h"
},
"30.0.107.78": {
"type": "L2CP",
"rmac_num_rt": "4",
"evni": "200053",
"state": "UP",
"flags": "N/A",
"uptime": "6d20h"
}
}
},
"200054": {
"peer_ip": {
"20.0.101.2": {
"type": "L2CP",
"rmac_num_rt": "1",
"evni": "200054",
"state": "UP",
"flags": "N/A",
"uptime": "2d02h"
},
"20.0.101.3": {
"type": "L2CP",
"rmac_num_rt": "1",
"evni": "200054",
"state": "UP",
"flags": "N/A",
"uptime": "6d20h"
},
"30.0.107.78": {
"type": "L2CP",
"rmac_num_rt": "4",
"evni": "200054",
"state": "UP",
"flags": "N/A",
"uptime": "6d20h"
}
}
},
"200055": {
"peer_ip": {
"20.0.101.2": {
"type": "L2CP",
"rmac_num_rt": "1",
"evni": "200055",
"state": "UP",
"flags": "N/A",
"uptime": "2d02h"
},
"20.0.101.3": {
"type": "L2CP",
"rmac_num_rt": "1",
"evni": "200055",
"state": "UP",
"flags": "N/A",
"uptime": "6d20h"
},
"30.0.107.78": {
"type": "L2CP",
"rmac_num_rt": "4",
"evni": "200055",
"state": "UP",
"flags": "N/A",
"uptime": "6d20h"
}
}
},
"200056": {
"peer_ip": {
"20.0.101.2": {
"type": "L2CP",
"rmac_num_rt": "1",
"evni": "200056",
"state": "UP",
"flags": "N/A",
"uptime": "2d02h"
},
"20.0.101.3": {
"type": "L2CP",
"rmac_num_rt": "1",
"evni": "200056",
"state": "UP",
"flags": "N/A",
"uptime": "6d20h"
},
"30.0.107.78": {
"type": "L2CP",
"rmac_num_rt": "4",
"evni": "200056",
"state": "UP",
"flags": "N/A",
"uptime": "6d20h"
}
}
},
"200057": {
"peer_ip": {
"20.0.101.2": {
"type": "L2CP",
"rmac_num_rt": "1",
"evni": "200057",
"state": "UP",
"flags": "N/A",
"uptime": "2d02h"
},
"20.0.101.3": {
"type": "L2CP",
"rmac_num_rt": "1",
"evni": "200057",
"state": "UP",
"flags": "N/A",
"uptime": "6d20h"
},
"30.0.107.78": {
"type": "L2CP",
"rmac_num_rt": "4",
"evni": "200057",
"state": "UP",
"flags": "N/A",
"uptime": "6d20h"
}
}
},
"200058": {
"peer_ip": {
"20.0.101.2": {
"type": "L2CP",
"rmac_num_rt": "1",
"evni": "200058",
"state": "UP",
"flags": "N/A",
"uptime": "2d02h"
},
"20.0.101.3": {
"type": "L2CP",
"rmac_num_rt": "1",
"evni": "200058",
"state": "UP",
"flags": "N/A",
"uptime": "6d20h"
},
"30.0.107.78": {
"type": "L2CP",
"rmac_num_rt": "4",
"evni": "200058",
"state": "UP",
"flags": "N/A",
"uptime": "6d20h"
}
}
},
"200059": {
"peer_ip": {
"20.0.101.2": {
"type": "L2CP",
"rmac_num_rt": "1",
"evni": "200059",
"state": "UP",
"flags": "N/A",
"uptime": "2d02h"
},
"20.0.101.3": {
"type": "L2CP",
"rmac_num_rt": "1",
"evni": "200059",
"state": "UP",
"flags": "N/A",
"uptime": "6d20h"
},
"30.0.107.78": {
"type": "L2CP",
"rmac_num_rt": "4",
"evni": "200059",
"state": "UP",
"flags": "N/A",
"uptime": "6d20h"
}
}
},
"200060": {
"peer_ip": {
"20.0.101.2": {
"type": "L2CP",
"rmac_num_rt": "1",
"evni": "200060",
"state": "UP",
"flags": "N/A",
"uptime": "2d02h"
},
"20.0.101.3": {
"type": "L2CP",
"rmac_num_rt": "1",
"evni": "200060",
"state": "UP",
"flags": "N/A",
"uptime": "6d20h"
},
"30.0.107.78": {
"type": "L2CP",
"rmac_num_rt": "4",
"evni": "200060",
"state": "UP",
"flags": "N/A",
"uptime": "6d20h"
}
}
},
"200061": {
"peer_ip": {
"20.0.101.2": {
"type": "L2CP",
"rmac_num_rt": "1",
"evni": "200061",
"state": "UP",
"flags": "N/A",
"uptime": "2d02h"
},
"20.0.101.3": {
"type": "L2CP",
"rmac_num_rt": "1",
"evni": "200061",
"state": "UP",
"flags": "N/A",
"uptime": "6d20h"
},
"30.0.107.78": {
"type": "L2CP",
"rmac_num_rt": "4",
"evni": "200061",
"state": "UP",
"flags": "N/A",
"uptime": "6d20h"
}
}
},
"200062": {
"peer_ip": {
"20.0.101.2": {
"type": "L2CP",
"rmac_num_rt": "1",
"evni": "200062",
"state": "UP",
"flags": "N/A",
"uptime": "2d02h"
},
"20.0.101.3": {
"type": "L2CP",
"rmac_num_rt": "1",
"evni": "200062",
"state": "UP",
"flags": "N/A",
"uptime": "6d20h"
},
"30.0.107.78": {
"type": "L2CP",
"rmac_num_rt": "4",
"evni": "200062",
"state": "UP",
"flags": "N/A",
"uptime": "6d20h"
}
}
},
"200063": {
"peer_ip": {
"20.0.101.2": {
"type": "L2CP",
"rmac_num_rt": "1",
"evni": "200063",
"state": "UP",
"flags": "N/A",
"uptime": "2d02h"
},
"20.0.101.3": {
"type": "L2CP",
"rmac_num_rt": "1",
"evni": "200063",
"state": "UP",
"flags": "N/A",
"uptime": "6d20h"
},
"30.0.107.78": {
"type": "L2CP",
"rmac_num_rt": "4",
"evni": "200063",
"state": "UP",
"flags": "N/A",
"uptime": "6d20h"
}
}
},
"200064": {
"peer_ip": {
"20.0.101.2": {
"type": "L2CP",
"rmac_num_rt": "1",
"evni": "200064",
"state": "UP",
"flags": "N/A",
"uptime": "2d02h"
},
"20.0.101.3": {
"type": "L2CP",
"rmac_num_rt": "1",
"evni": "200064",
"state": "UP",
"flags": "N/A",
"uptime": "6d20h"
},
"30.0.107.78": {
"type": "L2CP",
"rmac_num_rt": "4",
"evni": "200064",
"state": "UP",
"flags": "N/A",
"uptime": "6d20h"
}
}
},
"200065": {
"peer_ip": {
"20.0.101.2": {
"type": "L2CP",
"rmac_num_rt": "1",
"evni": "200065",
"state": "UP",
"flags": "N/A",
"uptime": "2d02h"
},
"20.0.101.3": {
"type": "L2CP",
"rmac_num_rt": "1",
"evni": "200065",
"state": "UP",
"flags": "N/A",
"uptime": "6d20h"
},
"30.0.107.78": {
"type": "L2CP",
"rmac_num_rt": "4",
"evni": "200065",
"state": "UP",
"flags": "N/A",
"uptime": "6d20h"
}
}
},
"200066": {
"peer_ip": {
"20.0.101.2": {
"type": "L2CP",
"rmac_num_rt": "1",
"evni": "200066",
"state": "UP",
"flags": "N/A",
"uptime": "2d02h"
},
"20.0.101.3": {
"type": "L2CP",
"rmac_num_rt": "1",
"evni": "200066",
"state": "UP",
"flags": "N/A",
"uptime": "6d20h"
},
"30.0.107.78": {
"type": "L2CP",
"rmac_num_rt": "4",
"evni": "200066",
"state": "UP",
"flags": "N/A",
"uptime": "6d20h"
}
}
},
"200067": {
"peer_ip": {
"20.0.101.2": {
"type": "L2CP",
"rmac_num_rt": "1",
"evni": "200067",
"state": "UP",
"flags": "N/A",
"uptime": "2d02h"
},
"20.0.101.3": {
"type": "L2CP",
"rmac_num_rt": "1",
"evni": "200067",
"state": "UP",
"flags": "N/A",
"uptime": "6d20h"
},
"30.0.107.78": {
"type": "L2CP",
"rmac_num_rt": "4",
"evni": "200067",
"state": "UP",
"flags": "N/A",
"uptime": "6d20h"
}
}
},
"200068": {
"peer_ip": {
"20.0.101.2": {
"type": "L2CP",
"rmac_num_rt": "1",
"evni": "200068",
"state": "UP",
"flags": "N/A",
"uptime": "2d02h"
},
"20.0.101.3": {
"type": "L2CP",
"rmac_num_rt": "1",
"evni": "200068",
"state": "UP",
"flags": "N/A",
"uptime": "6d20h"
},
"30.0.107.78": {
"type": "L2CP",
"rmac_num_rt": "4",
"evni": "200068",
"state": "UP",
"flags": "N/A",
"uptime": "6d20h"
}
}
}
}
}
}
}
| 39.483083
| 60
| 0.180624
| 1,132
| 21,005
| 3.234982
| 0.05212
| 0.107045
| 0.13763
| 0.221191
| 0.95385
| 0.943474
| 0.943474
| 0.938831
| 0.938831
| 0.786455
| 0
| 0.177264
| 0.689264
| 21,005
| 532
| 61
| 39.483083
| 0.38379
| 0
| 0
| 0.768797
| 0
| 0
| 0.19085
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
3621959049b8b650e416e676f29d842d5d43b801
| 25,207
|
py
|
Python
|
tests/test_var.py
|
michelp/cxxheaderparser
|
83bb2903790cf448bf838cdb8a93ca96e758bd1a
|
[
"BSD-3-Clause"
] | 12
|
2020-12-28T09:40:53.000Z
|
2022-03-13T15:36:21.000Z
|
tests/test_var.py
|
michelp/cxxheaderparser
|
83bb2903790cf448bf838cdb8a93ca96e758bd1a
|
[
"BSD-3-Clause"
] | 28
|
2021-01-04T14:58:59.000Z
|
2022-01-03T03:00:16.000Z
|
tests/test_var.py
|
michelp/cxxheaderparser
|
83bb2903790cf448bf838cdb8a93ca96e758bd1a
|
[
"BSD-3-Clause"
] | 1
|
2021-11-06T03:44:53.000Z
|
2021-11-06T03:44:53.000Z
|
# Note: testcases generated via `python -m cxxheaderparser.gentest`
from cxxheaderparser.types import (
Array,
ClassDecl,
EnumDecl,
Enumerator,
Field,
FunctionType,
FundamentalSpecifier,
NameSpecifier,
PQName,
Parameter,
Pointer,
Reference,
Token,
Type,
Value,
Variable,
)
from cxxheaderparser.simple import ClassScope, NamespaceScope, ParsedData, parse_string
def test_var_unixwiz_ridiculous():
# http://unixwiz.net/techtips/reading-cdecl.html
#
# .. "we have no idea how this variable is useful, but at least we can
# describe the type correctly"
content = """
char *(*(**foo[][8])())[];
"""
data = parse_string(content, cleandoc=True)
assert data == ParsedData(
namespace=NamespaceScope(
variables=[
Variable(
name=PQName(segments=[NameSpecifier(name="foo")]),
type=Array(
array_of=Array(
array_of=Pointer(
ptr_to=Pointer(
ptr_to=FunctionType(
return_type=Pointer(
ptr_to=Array(
array_of=Pointer(
ptr_to=Type(
typename=PQName(
segments=[
FundamentalSpecifier(
name="char"
)
]
)
)
),
size=None,
)
),
parameters=[],
)
)
),
size=Value(tokens=[Token(value="8")]),
),
size=None,
),
)
]
)
)
def test_var_ptr_to_array15_of_ptr_to_int():
content = """
int *(*crocodile)[15];
"""
data = parse_string(content, cleandoc=True)
assert data == ParsedData(
namespace=NamespaceScope(
variables=[
Variable(
name=PQName(segments=[NameSpecifier(name="crocodile")]),
type=Pointer(
ptr_to=Array(
array_of=Pointer(
ptr_to=Type(
typename=PQName(
segments=[FundamentalSpecifier(name="int")]
)
)
),
size=Value(tokens=[Token(value="15")]),
)
),
)
]
)
)
def test_var_ref_to_array():
content = """
int abase[3];
int (&aname)[3] = abase;
"""
data = parse_string(content, cleandoc=True)
assert data == ParsedData(
namespace=NamespaceScope(
variables=[
Variable(
name=PQName(segments=[NameSpecifier(name="abase")]),
type=Array(
array_of=Type(
typename=PQName(segments=[FundamentalSpecifier(name="int")])
),
size=Value(tokens=[Token(value="3")]),
),
),
Variable(
name=PQName(segments=[NameSpecifier(name="aname")]),
type=Reference(
ref_to=Array(
array_of=Type(
typename=PQName(
segments=[FundamentalSpecifier(name="int")]
)
),
size=Value(tokens=[Token(value="3")]),
)
),
value=Value(tokens=[Token(value="abase")]),
),
]
)
)
def test_var_ptr_to_array():
content = """
int zz, (*aname)[3] = &abase;
"""
data = parse_string(content, cleandoc=True)
assert data == ParsedData(
namespace=NamespaceScope(
variables=[
Variable(
name=PQName(segments=[NameSpecifier(name="zz")]),
type=Type(
typename=PQName(segments=[FundamentalSpecifier(name="int")])
),
),
Variable(
name=PQName(segments=[NameSpecifier(name="aname")]),
type=Pointer(
ptr_to=Array(
array_of=Type(
typename=PQName(
segments=[FundamentalSpecifier(name="int")]
)
),
size=Value(tokens=[Token(value="3")]),
)
),
value=Value(tokens=[Token(value="&"), Token(value="abase")]),
),
]
)
)
def test_var_multi_1():
content = """
int zz, (&aname)[3] = abase;
"""
data = parse_string(content, cleandoc=True)
assert data == ParsedData(
namespace=NamespaceScope(
variables=[
Variable(
name=PQName(segments=[NameSpecifier(name="zz")]),
type=Type(
typename=PQName(segments=[FundamentalSpecifier(name="int")])
),
),
Variable(
name=PQName(segments=[NameSpecifier(name="aname")]),
type=Reference(
ref_to=Array(
array_of=Type(
typename=PQName(
segments=[FundamentalSpecifier(name="int")]
)
),
size=Value(tokens=[Token(value="3")]),
)
),
value=Value(tokens=[Token(value="abase")]),
),
]
)
)
def test_var_array_of_fnptr_varargs():
content = """
void (*a3[3])(int, ...);
"""
data = parse_string(content, cleandoc=True)
assert data == ParsedData(
namespace=NamespaceScope(
variables=[
Variable(
name=PQName(segments=[NameSpecifier(name="a3")]),
type=Array(
array_of=Pointer(
ptr_to=FunctionType(
return_type=Type(
typename=PQName(
segments=[FundamentalSpecifier(name="void")]
)
),
parameters=[
Parameter(
type=Type(
typename=PQName(
segments=[
FundamentalSpecifier(name="int")
]
)
)
)
],
vararg=True,
)
),
size=Value(tokens=[Token(value="3")]),
),
)
]
)
)
def test_var_double_fnptr_varargs():
content = """
void (*(*a4))(int, ...);
"""
data = parse_string(content, cleandoc=True)
assert data == ParsedData(
namespace=NamespaceScope(
variables=[
Variable(
name=PQName(segments=[NameSpecifier(name="a4")]),
type=Pointer(
ptr_to=Pointer(
ptr_to=FunctionType(
return_type=Type(
typename=PQName(
segments=[FundamentalSpecifier(name="void")]
)
),
parameters=[
Parameter(
type=Type(
typename=PQName(
segments=[
FundamentalSpecifier(name="int")
]
)
)
)
],
vararg=True,
)
)
),
)
]
)
)
def test_var_fnptr_voidstar():
content = """
void(*(*a5)(int));
"""
data = parse_string(content, cleandoc=True)
assert data == ParsedData(
namespace=NamespaceScope(
variables=[
Variable(
name=PQName(segments=[NameSpecifier(name="a5")]),
type=Pointer(
ptr_to=FunctionType(
return_type=Pointer(
ptr_to=Type(
typename=PQName(
segments=[FundamentalSpecifier(name="void")]
)
)
),
parameters=[
Parameter(
type=Type(
typename=PQName(
segments=[FundamentalSpecifier(name="int")]
)
)
)
],
)
),
)
]
)
)
def test_var_fnptr_moreparens():
content = """
void (*x)(int(p1), int);
"""
data = parse_string(content, cleandoc=True)
assert data == ParsedData(
namespace=NamespaceScope(
variables=[
Variable(
name=PQName(segments=[NameSpecifier(name="x")]),
type=Pointer(
ptr_to=FunctionType(
return_type=Type(
typename=PQName(
segments=[FundamentalSpecifier(name="void")]
)
),
parameters=[
Parameter(
type=Type(
typename=PQName(
segments=[FundamentalSpecifier(name="int")]
)
),
name="p1",
),
Parameter(
type=Type(
typename=PQName(
segments=[FundamentalSpecifier(name="int")]
)
)
),
],
)
),
)
]
)
)
# From pycparser:
# Pointer decls nest from inside out. This is important when different
# levels have different qualifiers. For example:
#
# char * const * p;
#
# Means "pointer to const pointer to char"
#
# While:
#
# char ** const p;
#
# Means "const pointer to pointer to char"
def test_var_ptr_to_const_ptr_to_char():
content = """
char *const *p;
"""
data = parse_string(content, cleandoc=True)
assert data == ParsedData(
namespace=NamespaceScope(
variables=[
Variable(
name=PQName(segments=[NameSpecifier(name="p")]),
type=Pointer(
ptr_to=Pointer(
ptr_to=Type(
typename=PQName(
segments=[FundamentalSpecifier(name="char")]
)
),
const=True,
)
),
)
]
)
)
def test_var_const_ptr_to_ptr_to_char():
content = """
char **const p;
"""
data = parse_string(content, cleandoc=True)
assert data == ParsedData(
namespace=NamespaceScope(
variables=[
Variable(
name=PQName(segments=[NameSpecifier(name="p")]),
type=Pointer(
ptr_to=Pointer(
ptr_to=Type(
typename=PQName(
segments=[FundamentalSpecifier(name="char")]
)
)
),
const=True,
),
)
]
)
)
def test_var_array_initializer1():
content = """
int x[3]{1, 2, 3};
"""
data = parse_string(content, cleandoc=True)
assert data == ParsedData(
namespace=NamespaceScope(
variables=[
Variable(
name=PQName(segments=[NameSpecifier(name="x")]),
type=Array(
array_of=Type(
typename=PQName(segments=[FundamentalSpecifier(name="int")])
),
size=Value(tokens=[Token(value="3")]),
),
value=Value(
tokens=[
Token(value="{"),
Token(value="1"),
Token(value=","),
Token(value="2"),
Token(value=","),
Token(value="3"),
Token(value="}"),
]
),
)
]
)
)
def test_var_array_initializer2():
content = """
int x[3] = {1, 2, 3};
"""
data = parse_string(content, cleandoc=True)
assert data == ParsedData(
namespace=NamespaceScope(
variables=[
Variable(
name=PQName(segments=[NameSpecifier(name="x")]),
type=Array(
array_of=Type(
typename=PQName(segments=[FundamentalSpecifier(name="int")])
),
size=Value(tokens=[Token(value="3")]),
),
value=Value(
tokens=[
Token(value="{"),
Token(value="1"),
Token(value=","),
Token(value="2"),
Token(value=","),
Token(value="3"),
Token(value="}"),
]
),
)
]
)
)
def test_var_extern_c():
content = """
extern "C" int x;
"""
data = parse_string(content, cleandoc=True)
assert data == ParsedData(
namespace=NamespaceScope(
variables=[
Variable(
name=PQName(segments=[NameSpecifier(name="x")]),
type=Type(
typename=PQName(segments=[FundamentalSpecifier(name="int")])
),
# TODO: store linkage
extern=True,
)
]
)
)
def test_var_ns_1():
content = """
int N::x;
"""
data = parse_string(content, cleandoc=True)
assert data == ParsedData(
namespace=NamespaceScope(
variables=[
Variable(
name=PQName(
segments=[NameSpecifier(name="N"), NameSpecifier(name="x")]
),
type=Type(
typename=PQName(segments=[FundamentalSpecifier(name="int")])
),
)
]
)
)
def test_var_ns_2():
content = """
int N::x = 4;
"""
data = parse_string(content, cleandoc=True)
assert data == ParsedData(
namespace=NamespaceScope(
variables=[
Variable(
name=PQName(
segments=[NameSpecifier(name="N"), NameSpecifier(name="x")]
),
type=Type(
typename=PQName(segments=[FundamentalSpecifier(name="int")])
),
value=Value(tokens=[Token(value="4")]),
)
]
)
)
def test_var_ns_3():
content = """
int N::x{4};
"""
data = parse_string(content, cleandoc=True)
assert data == ParsedData(
namespace=NamespaceScope(
variables=[
Variable(
name=PQName(
segments=[NameSpecifier(name="N"), NameSpecifier(name="x")]
),
type=Type(
typename=PQName(segments=[FundamentalSpecifier(name="int")])
),
value=Value(
tokens=[Token(value="{"), Token(value="4"), Token(value="}")]
),
)
]
)
)
def test_var_static_struct():
content = """
constexpr static struct SS {} s;
"""
data = parse_string(content, cleandoc=True)
assert data == ParsedData(
namespace=NamespaceScope(
classes=[
ClassScope(
class_decl=ClassDecl(
typename=PQName(
segments=[NameSpecifier(name="SS")], classkey="struct"
)
)
)
],
variables=[
Variable(
name=PQName(segments=[NameSpecifier(name="s")]),
type=Type(
typename=PQName(
segments=[NameSpecifier(name="SS")], classkey="struct"
)
),
constexpr=True,
static=True,
)
],
)
)
def test_var_constexpr_enum():
content = """
constexpr enum E { EE } e = EE;
"""
data = parse_string(content, cleandoc=True)
assert data == ParsedData(
namespace=NamespaceScope(
enums=[
EnumDecl(
typename=PQName(
segments=[NameSpecifier(name="E")], classkey="enum"
),
values=[Enumerator(name="EE")],
)
],
variables=[
Variable(
name=PQName(segments=[NameSpecifier(name="e")]),
type=Type(
typename=PQName(
segments=[NameSpecifier(name="E")], classkey="enum"
)
),
value=Value(tokens=[Token(value="EE")]),
constexpr=True,
)
],
)
)
def test_var_fnptr_in_class():
content = """
struct DriverFuncs {
void *(*init)();
void (*write)(void *buf, int buflen);
};
"""
data = parse_string(content, cleandoc=True)
assert data == ParsedData(
namespace=NamespaceScope(
classes=[
ClassScope(
class_decl=ClassDecl(
typename=PQName(
segments=[NameSpecifier(name="DriverFuncs")],
classkey="struct",
)
),
fields=[
Field(
access="public",
type=Pointer(
ptr_to=FunctionType(
return_type=Pointer(
ptr_to=Type(
typename=PQName(
segments=[
FundamentalSpecifier(name="void")
]
)
)
),
parameters=[],
)
),
name="init",
),
Field(
access="public",
type=Pointer(
ptr_to=FunctionType(
return_type=Type(
typename=PQName(
segments=[FundamentalSpecifier(name="void")]
)
),
parameters=[
Parameter(
type=Pointer(
ptr_to=Type(
typename=PQName(
segments=[
FundamentalSpecifier(
name="void"
)
]
)
)
),
name="buf",
),
Parameter(
type=Type(
typename=PQName(
segments=[
FundamentalSpecifier(name="int")
]
)
),
name="buflen",
),
],
)
),
name="write",
),
],
)
]
)
)
def test_var_extern():
content = """
extern int externVar;
"""
data = parse_string(content, cleandoc=True)
assert data == ParsedData(
namespace=NamespaceScope(
variables=[
Variable(
name=PQName(segments=[NameSpecifier(name="externVar")]),
type=Type(
typename=PQName(segments=[FundamentalSpecifier(name="int")])
),
extern=True,
)
]
)
)
| 32.778934
| 88
| 0.325544
| 1,377
| 25,207
| 5.853304
| 0.111837
| 0.100744
| 0.095534
| 0.103226
| 0.823325
| 0.805831
| 0.797767
| 0.779901
| 0.751117
| 0.736228
| 0
| 0.004987
| 0.586305
| 25,207
| 768
| 89
| 32.821615
| 0.767932
| 0.019518
| 0
| 0.647577
| 1
| 0
| 0.043855
| 0.00085
| 0
| 0
| 0
| 0.001302
| 0.030837
| 1
| 0.030837
| false
| 0
| 0.002937
| 0
| 0.033774
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
7fc1908479e4b178881cf7e2038b85e65633179f
| 8,948
|
py
|
Python
|
tests/slack/slack_payloads.py
|
danpalmer/response
|
7699f14f9248636875dbf06a74e6ebdde018d0f2
|
[
"MIT"
] | null | null | null |
tests/slack/slack_payloads.py
|
danpalmer/response
|
7699f14f9248636875dbf06a74e6ebdde018d0f2
|
[
"MIT"
] | null | null | null |
tests/slack/slack_payloads.py
|
danpalmer/response
|
7699f14f9248636875dbf06a74e6ebdde018d0f2
|
[
"MIT"
] | null | null | null |
users_list_response = {
"ok": "True",
"members": [
{
"id": "W012A3CDE",
"team_id": "T012AB3C4",
"name": "spengler",
"deleted": "False",
"color": "9f69e7",
"real_name": "spengler",
"tz": "America/Los_Angeles",
"tz_label": "Pacific Daylight Time",
"tz_offset": -25200,
"profile": {
"avatar_hash": "ge3b51ca72de",
"status_text": "Print is dead",
"status_emoji": ":books:",
"real_name": "Egon Spengler",
"display_name": "spengler",
"real_name_normalized": "Egon Spengler",
"display_name_normalized": "spengler",
"email": "spengler@ghostbusters.example.com",
"image_24": "https://.../avatar/e3b51ca72dee4ef87916ae2b9240df50.jpg",
"image_32": "https://.../avatar/e3b51ca72dee4ef87916ae2b9240df50.jpg",
"image_48": "https://.../avatar/e3b51ca72dee4ef87916ae2b9240df50.jpg",
"image_72": "https://.../avatar/e3b51ca72dee4ef87916ae2b9240df50.jpg",
"image_192": "https://.../avatar/e3b51ca72dee4ef87916ae2b9240df50.jpg",
"image_512": "https://.../avatar/e3b51ca72dee4ef87916ae2b9240df50.jpg",
"team": "T012AB3C4",
},
"is_admin": "True",
"is_owner": "False",
"is_primary_owner": "False",
"is_restricted": "False",
"is_ultra_restricted": "False",
"is_bot": "False",
"updated": 1502138686,
"is_app_user": "False",
"has_2fa": "False",
},
{
"id": "U12345678",
"team_id": "T0G9PQBBK",
"name": "glinda",
"deleted": "False",
"color": "9f69e7",
"real_name": "Glinda Southgood",
"tz": "America/Los_Angeles",
"tz_label": "Pacific Daylight Time",
"tz_offset": -25200,
"profile": {
"avatar_hash": "8fbdd10b41c6",
"image_24": "https://a.slack-edge.com...png",
"image_32": "https://a.slack-edge.com...png",
"image_48": "https://a.slack-edge.com...png",
"image_72": "https://a.slack-edge.com...png",
"image_192": "https://a.slack-edge.com...png",
"image_512": "https://a.slack-edge.com...png",
"image_1024": "https://a.slack-edge.com...png",
"image_original": "https://a.slack-edge.com...png",
"first_name": "Glinda",
"last_name": "Southgood",
"title": "Glinda the Good",
"phone": "",
"skype": "",
"real_name": "Glinda Southgood",
"real_name_normalized": "Glinda Southgood",
"display_name": "Glinda the Fairly Good",
"display_name_normalized": "Glinda the Fairly Good",
"email": "glenda@south.oz.coven",
},
"is_admin": "True",
"is_owner": "False",
"is_primary_owner": "False",
"is_restricted": "False",
"is_ultra_restricted": "False",
"is_bot": "False",
"updated": 1480527098,
"has_2fa": "False",
},
],
"cache_ts": 1498777272,
"response_metadata": {"next_cursor": ""},
}
users_list_new = {
"ok": "True",
"members": [
{
"id": "U10293847",
"team_id": "T012AB3C4",
"name": "venkman",
"deleted": "False",
"color": "9f69e7",
"real_name": "venkman",
"tz": "America/Los_Angeles",
"tz_label": "Pacific Daylight Time",
"tz_offset": -25200,
"profile": {
"avatar_hash": "ge3b51ca72de",
"status_text": "Back off man, I'm a scientist!",
"status_emoji": ":male-scientist:",
"real_name": "Peter Venkman",
"display_name": "venkman",
"real_name_normalized": "Peter Venkman",
"display_name_normalized": "venkman",
"email": "venkman@ghostbusters.example.com",
"image_24": "https://.../avatar/e3b51ca72dee4ef87916ae2b9240df50.jpg",
"image_32": "https://.../avatar/e3b51ca72dee4ef87916ae2b9240df50.jpg",
"image_48": "https://.../avatar/e3b51ca72dee4ef87916ae2b9240df50.jpg",
"image_72": "https://.../avatar/e3b51ca72dee4ef87916ae2b9240df50.jpg",
"image_192": "https://.../avatar/e3b51ca72dee4ef87916ae2b9240df50.jpg",
"image_512": "https://.../avatar/e3b51ca72dee4ef87916ae2b9240df50.jpg",
"team": "T012AB3C4",
},
"is_admin": "True",
"is_owner": "False",
"is_primary_owner": "False",
"is_restricted": "False",
"is_ultra_restricted": "False",
"is_bot": "False",
"updated": 1502138686,
"is_app_user": "False",
"has_2fa": "False",
}
],
"cache_ts": 1498777272,
"response_metadata": {"next_cursor": ""},
}
users_list_page_1 = {
"ok": True,
"members": [
{
"id": "W012A3CDE",
"team_id": "T012AB3C4",
"name": "spengler",
"deleted": False,
"color": "9f69e7",
"real_name": "spengler",
"tz": "America/Los_Angeles",
"tz_label": "Pacific Daylight Time",
"tz_offset": -25200,
"profile": {
"avatar_hash": "ge3b51ca72de",
"status_text": "Print is dead",
"status_emoji": ":books:",
"real_name": "Egon Spengler",
"display_name": "spengler",
"real_name_normalized": "Egon Spengler",
"display_name_normalized": "spengler",
"email": "spengler@ghostbusters.example.com",
"image_24": "https://.../avatar/e3b51ca72dee4ef87916ae2b9240df50.jpg",
"image_32": "https://.../avatar/e3b51ca72dee4ef87916ae2b9240df50.jpg",
"image_48": "https://.../avatar/e3b51ca72dee4ef87916ae2b9240df50.jpg",
"image_72": "https://.../avatar/e3b51ca72dee4ef87916ae2b9240df50.jpg",
"image_192": "https://.../avatar/e3b51ca72dee4ef87916ae2b9240df50.jpg",
"image_512": "https://.../avatar/e3b51ca72dee4ef87916ae2b9240df50.jpg",
"team": "T012AB3C4",
},
"is_admin": True,
"is_owner": False,
"is_primary_owner": False,
"is_restricted": False,
"is_ultra_restricted": False,
"is_bot": False,
"updated": 1502138686,
"is_app_user": False,
"has_2fa": False,
}
],
"cache_ts": 1498777272,
"response_metadata": {"next_cursor": "page2"},
}
users_list_page_2 = {
"ok": True,
"members": [
{
"id": "W07QCRPA4",
"team_id": "T0G9PQBBK",
"name": "glinda",
"deleted": False,
"color": "9f69e7",
"real_name": "Glinda Southgood",
"tz": "America/Los_Angeles",
"tz_label": "Pacific Daylight Time",
"tz_offset": -25200,
"profile": {
"avatar_hash": "8fbdd10b41c6",
"image_24": "https://a.slack-edge.com...png",
"image_32": "https://a.slack-edge.com...png",
"image_48": "https://a.slack-edge.com...png",
"image_72": "https://a.slack-edge.com...png",
"image_192": "https://a.slack-edge.com...png",
"image_512": "https://a.slack-edge.com...png",
"image_1024": "https://a.slack-edge.com...png",
"image_original": "https://a.slack-edge.com...png",
"first_name": "Glinda",
"last_name": "Southgood",
"title": "Glinda the Good",
"phone": "",
"skype": "",
"real_name": "Glinda Southgood",
"real_name_normalized": "Glinda Southgood",
"display_name": "Glinda the Fairly Good",
"display_name_normalized": "Glinda the Fairly Good",
"email": "glenda@south.oz.coven",
},
"is_admin": True,
"is_owner": False,
"is_primary_owner": False,
"is_restricted": False,
"is_ultra_restricted": False,
"is_bot": False,
"updated": 1480527098,
"has_2fa": False,
}
],
"cache_ts": 1498777272,
"response_metadata": {"next_cursor": ""},
}
| 39.946429
| 87
| 0.484913
| 756
| 8,948
| 5.505291
| 0.149471
| 0.033638
| 0.185968
| 0.198943
| 0.930322
| 0.930322
| 0.922874
| 0.922874
| 0.922874
| 0.922874
| 0
| 0.10895
| 0.350693
| 8,948
| 223
| 88
| 40.125561
| 0.607401
| 0
| 0
| 0.83105
| 0
| 0
| 0.497206
| 0.028498
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
7fcb5c51e2c8e522aa7af511ce357f004566f3eb
| 2,847
|
py
|
Python
|
benchmark/floating.py
|
tushushu/uvec
|
ab57251136d375a5e47a61af9a3262394795c0db
|
[
"BSD-3-Clause"
] | 7
|
2021-11-29T02:43:15.000Z
|
2022-01-03T13:59:11.000Z
|
benchmark/floating.py
|
tushushu/uvec
|
ab57251136d375a5e47a61af9a3262394795c0db
|
[
"BSD-3-Clause"
] | 87
|
2022-01-10T13:15:23.000Z
|
2022-03-31T12:10:15.000Z
|
benchmark/floating.py
|
tushushu/ulist
|
987d3a1bbcf2caab7ed2253d94921b1588e5175f
|
[
"BSD-3-Clause"
] | null | null | null |
from random import random, seed
import numpy as np
from ulist.utils import Benchmarker
seed(100)
class AddOne(Benchmarker):
def cases(self) -> list:
return [
([float(x) for x in range(100)],),
([float(x) for x in range(1000)],),
([float(x) for x in range(10000)],),
([float(x) for x in range(100000)],),
([float(x) for x in range(1000000)],),
]
def ulist_fn(self, args) -> None:
args[0] + 1.0
def other_fn(self, args) -> None:
args[0] + 1.0
class ArraySum(Benchmarker):
def cases(self) -> list:
return [
([random() for _ in range(100)],),
([random() for _ in range(1000)],),
([random() for _ in range(10000)],),
([random() for _ in range(100000)],),
([random() for _ in range(1000000)],),
]
def ulist_fn(self, args) -> None:
args[0].sum()
def other_fn(self, args) -> None:
args[0].sum()
class LessThanOne(Benchmarker):
def cases(self) -> list:
return [
([random() * 2 for _ in range(100)],),
([random() * 2 for _ in range(1000)],),
([random() * 2 for _ in range(10000)],),
([random() * 2 for _ in range(100000)],),
([random() * 2 for _ in range(1000000)],),
]
def ulist_fn(self, args) -> None:
args[0] < 1
def other_fn(self, args) -> None:
args[0] < 1
class Max(Benchmarker):
def cases(self) -> list:
return [
([float(x) for x in range(100)],),
([float(x) for x in range(1000)],),
([float(x) for x in range(10000)],),
([float(x) for x in range(100000)],),
([float(x) for x in range(1000000)],),
]
def ulist_fn(self, args) -> None:
args[0].max()
def other_fn(self, args) -> None:
args[0].max()
class MulTwo(Benchmarker):
def cases(self) -> list:
return [
([float(x) for x in range(100)],),
([float(x) for x in range(1000)],),
([float(x) for x in range(10000)],),
([float(x) for x in range(100000)],),
([float(x) for x in range(1000000)],),
]
def ulist_fn(self, args) -> None:
args[0] * 2.0
def other_fn(self, args) -> None:
args[0] * 2.0
class Sort(Benchmarker):
def cases(self) -> list:
return [
([random() for _ in range(100)],),
([random() for _ in range(1000)],),
([random() for _ in range(10000)],),
([random() for _ in range(100000)],),
([random() for _ in range(1000000)],),
]
def ulist_fn(self, args) -> None:
args[0].sort(ascending=True)
def other_fn(self, args) -> None:
np.sort(args[0])
| 26.119266
| 54
| 0.488584
| 364
| 2,847
| 3.747253
| 0.115385
| 0.153959
| 0.098974
| 0.109971
| 0.871701
| 0.807918
| 0.791789
| 0.754399
| 0.711877
| 0.64956
| 0
| 0.095491
| 0.3379
| 2,847
| 108
| 55
| 26.361111
| 0.628117
| 0
| 0
| 0.719512
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.219512
| false
| 0
| 0.036585
| 0.073171
| 0.402439
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
7fcbc76e00826f0d7381f3a5bde7e260b3266753
| 2,027
|
py
|
Python
|
test/test_edit_group.py
|
Droriel/python_training
|
e0fbbf3df4289e5af606d9c752e99cab82c653a6
|
[
"Apache-2.0"
] | null | null | null |
test/test_edit_group.py
|
Droriel/python_training
|
e0fbbf3df4289e5af606d9c752e99cab82c653a6
|
[
"Apache-2.0"
] | null | null | null |
test/test_edit_group.py
|
Droriel/python_training
|
e0fbbf3df4289e5af606d9c752e99cab82c653a6
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from random import randrange
from model.group import Group
import random
from test_addons import adjustments
def test_edit_first_group_top_edit(app, db, json_groups, check_ui):
if app.group.count() == 0:
app.group.create(Group(name="test"))
old_groups = db.get_group_list()
# randrange - generuje losową wartość od 0 do podanego parametru
group_to_edit = random.choice(old_groups)
group = json_groups
group.id = group_to_edit.id
app.group.edit_group_by_id(group.id, group, edit_button='top')
new_groups = db.get_group_list()
for i in range(len(old_groups)):
if old_groups[i] == group_to_edit:
old_groups[i] = group
assert sorted(new_groups, key=Group.id_or_max) == sorted(old_groups, key=Group.id_or_max)
if check_ui:
def clean(group):
return Group(id=group.id, name=adjustments.clear_multiple_spaces(group.name).strip())
clear_new_groups = map(clean, new_groups)
assert sorted(clear_new_groups, key=Group.id_or_max) == sorted(app.group.get_group_list(), key=Group.id_or_max)
def test_edit_first_group_bottom_edit(app, db, json_groups, check_ui):
if app.group.count() == 0:
app.group.create(Group(name="test"))
old_groups = db.get_group_list()
group_to_edit = random.choice(old_groups)
group = json_groups
group.id = group_to_edit.id
app.group.edit_group_by_id(group.id, group, edit_button='bottom')
new_groups = db.get_group_list()
for i in range(len(old_groups)):
if old_groups[i] == group_to_edit:
old_groups[i] = group
assert sorted(new_groups, key=Group.id_or_max) == sorted(old_groups, key=Group.id_or_max)
if check_ui:
def clean(group):
return Group(id=group.id, name=adjustments.clear_multiple_spaces(group.name).strip())
clear_new_groups = map(clean, new_groups)
assert sorted(clear_new_groups, key=Group.id_or_max) == sorted(app.group.get_group_list(), key=Group.id_or_max)
| 40.54
| 119
| 0.701529
| 320
| 2,027
| 4.140625
| 0.190625
| 0.084528
| 0.060377
| 0.072453
| 0.869434
| 0.837736
| 0.837736
| 0.837736
| 0.837736
| 0.837736
| 0
| 0.002418
| 0.184016
| 2,027
| 49
| 120
| 41.367347
| 0.79867
| 0.041441
| 0
| 0.8
| 0
| 0
| 0.008767
| 0
| 0
| 0
| 0
| 0
| 0.1
| 1
| 0.1
| false
| 0
| 0.1
| 0.05
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7fdbe0aa2cc47d8ada644d20f9644573dc6e7203
| 33,305
|
py
|
Python
|
sdk/python/pulumi_azure/search/service.py
|
henriktao/pulumi-azure
|
f1cbcf100b42b916da36d8fe28be3a159abaf022
|
[
"ECL-2.0",
"Apache-2.0"
] | 109
|
2018-06-18T00:19:44.000Z
|
2022-02-20T05:32:57.000Z
|
sdk/python/pulumi_azure/search/service.py
|
henriktao/pulumi-azure
|
f1cbcf100b42b916da36d8fe28be3a159abaf022
|
[
"ECL-2.0",
"Apache-2.0"
] | 663
|
2018-06-18T21:08:46.000Z
|
2022-03-31T20:10:11.000Z
|
sdk/python/pulumi_azure/search/service.py
|
henriktao/pulumi-azure
|
f1cbcf100b42b916da36d8fe28be3a159abaf022
|
[
"ECL-2.0",
"Apache-2.0"
] | 41
|
2018-07-19T22:37:38.000Z
|
2022-03-14T10:56:26.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['ServiceArgs', 'Service']
@pulumi.input_type
class ServiceArgs:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
sku: pulumi.Input[str],
allowed_ips: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
identity: Optional[pulumi.Input['ServiceIdentityArgs']] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
partition_count: Optional[pulumi.Input[int]] = None,
public_network_access_enabled: Optional[pulumi.Input[bool]] = None,
replica_count: Optional[pulumi.Input[int]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a Service resource.
:param pulumi.Input[str] resource_group_name: The name of the Resource Group where the Search Service should exist. Changing this forces a new Search Service to be created.
:param pulumi.Input[str] sku: The SKU which should be used for this Search Service. Possible values are `basic`, `free`, `standard`, `standard2`, `standard3`, `storage_optimized_l1` and `storage_optimized_l2`. Changing this forces a new Search Service to be created.
:param pulumi.Input[Sequence[pulumi.Input[str]]] allowed_ips: A list of IPv4 addresses or CIDRs that are allowed access to the search service endpoint.
:param pulumi.Input['ServiceIdentityArgs'] identity: An `identity` block as defined below.
:param pulumi.Input[str] location: The Azure Region where the Search Service should exist. Changing this forces a new Search Service to be created.
:param pulumi.Input[str] name: The Name which should be used for this Search Service. Changing this forces a new Search Service to be created.
:param pulumi.Input[int] partition_count: The number of partitions which should be created.
:param pulumi.Input[bool] public_network_access_enabled: Whether or not public network access is allowed for this resource. Defaults to `true`.
:param pulumi.Input[int] replica_count: The number of replica's which should be created.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags which should be assigned to the Search Service.
"""
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "sku", sku)
if allowed_ips is not None:
pulumi.set(__self__, "allowed_ips", allowed_ips)
if identity is not None:
pulumi.set(__self__, "identity", identity)
if location is not None:
pulumi.set(__self__, "location", location)
if name is not None:
pulumi.set(__self__, "name", name)
if partition_count is not None:
pulumi.set(__self__, "partition_count", partition_count)
if public_network_access_enabled is not None:
pulumi.set(__self__, "public_network_access_enabled", public_network_access_enabled)
if replica_count is not None:
pulumi.set(__self__, "replica_count", replica_count)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the Resource Group where the Search Service should exist. Changing this forces a new Search Service to be created.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter
def sku(self) -> pulumi.Input[str]:
"""
The SKU which should be used for this Search Service. Possible values are `basic`, `free`, `standard`, `standard2`, `standard3`, `storage_optimized_l1` and `storage_optimized_l2`. Changing this forces a new Search Service to be created.
"""
return pulumi.get(self, "sku")
@sku.setter
def sku(self, value: pulumi.Input[str]):
pulumi.set(self, "sku", value)
@property
@pulumi.getter(name="allowedIps")
def allowed_ips(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of IPv4 addresses or CIDRs that are allowed access to the search service endpoint.
"""
return pulumi.get(self, "allowed_ips")
@allowed_ips.setter
def allowed_ips(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "allowed_ips", value)
@property
@pulumi.getter
def identity(self) -> Optional[pulumi.Input['ServiceIdentityArgs']]:
"""
An `identity` block as defined below.
"""
return pulumi.get(self, "identity")
@identity.setter
def identity(self, value: Optional[pulumi.Input['ServiceIdentityArgs']]):
pulumi.set(self, "identity", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
The Azure Region where the Search Service should exist. Changing this forces a new Search Service to be created.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The Name which should be used for this Search Service. Changing this forces a new Search Service to be created.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="partitionCount")
def partition_count(self) -> Optional[pulumi.Input[int]]:
"""
The number of partitions which should be created.
"""
return pulumi.get(self, "partition_count")
@partition_count.setter
def partition_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "partition_count", value)
@property
@pulumi.getter(name="publicNetworkAccessEnabled")
def public_network_access_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Whether or not public network access is allowed for this resource. Defaults to `true`.
"""
return pulumi.get(self, "public_network_access_enabled")
@public_network_access_enabled.setter
def public_network_access_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "public_network_access_enabled", value)
@property
@pulumi.getter(name="replicaCount")
def replica_count(self) -> Optional[pulumi.Input[int]]:
"""
The number of replica's which should be created.
"""
return pulumi.get(self, "replica_count")
@replica_count.setter
def replica_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "replica_count", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A mapping of tags which should be assigned to the Search Service.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@pulumi.input_type
class _ServiceState:
def __init__(__self__, *,
allowed_ips: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
identity: Optional[pulumi.Input['ServiceIdentityArgs']] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
partition_count: Optional[pulumi.Input[int]] = None,
primary_key: Optional[pulumi.Input[str]] = None,
public_network_access_enabled: Optional[pulumi.Input[bool]] = None,
query_keys: Optional[pulumi.Input[Sequence[pulumi.Input['ServiceQueryKeyArgs']]]] = None,
replica_count: Optional[pulumi.Input[int]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
secondary_key: Optional[pulumi.Input[str]] = None,
sku: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
Input properties used for looking up and filtering Service resources.
:param pulumi.Input[Sequence[pulumi.Input[str]]] allowed_ips: A list of IPv4 addresses or CIDRs that are allowed access to the search service endpoint.
:param pulumi.Input['ServiceIdentityArgs'] identity: An `identity` block as defined below.
:param pulumi.Input[str] location: The Azure Region where the Search Service should exist. Changing this forces a new Search Service to be created.
:param pulumi.Input[str] name: The Name which should be used for this Search Service. Changing this forces a new Search Service to be created.
:param pulumi.Input[int] partition_count: The number of partitions which should be created.
:param pulumi.Input[str] primary_key: The Primary Key used for Search Service Administration.
:param pulumi.Input[bool] public_network_access_enabled: Whether or not public network access is allowed for this resource. Defaults to `true`.
:param pulumi.Input[Sequence[pulumi.Input['ServiceQueryKeyArgs']]] query_keys: A `query_keys` block as defined below.
:param pulumi.Input[int] replica_count: The number of replica's which should be created.
:param pulumi.Input[str] resource_group_name: The name of the Resource Group where the Search Service should exist. Changing this forces a new Search Service to be created.
:param pulumi.Input[str] secondary_key: The Secondary Key used for Search Service Administration.
:param pulumi.Input[str] sku: The SKU which should be used for this Search Service. Possible values are `basic`, `free`, `standard`, `standard2`, `standard3`, `storage_optimized_l1` and `storage_optimized_l2`. Changing this forces a new Search Service to be created.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags which should be assigned to the Search Service.
"""
if allowed_ips is not None:
pulumi.set(__self__, "allowed_ips", allowed_ips)
if identity is not None:
pulumi.set(__self__, "identity", identity)
if location is not None:
pulumi.set(__self__, "location", location)
if name is not None:
pulumi.set(__self__, "name", name)
if partition_count is not None:
pulumi.set(__self__, "partition_count", partition_count)
if primary_key is not None:
pulumi.set(__self__, "primary_key", primary_key)
if public_network_access_enabled is not None:
pulumi.set(__self__, "public_network_access_enabled", public_network_access_enabled)
if query_keys is not None:
pulumi.set(__self__, "query_keys", query_keys)
if replica_count is not None:
pulumi.set(__self__, "replica_count", replica_count)
if resource_group_name is not None:
pulumi.set(__self__, "resource_group_name", resource_group_name)
if secondary_key is not None:
pulumi.set(__self__, "secondary_key", secondary_key)
if sku is not None:
pulumi.set(__self__, "sku", sku)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="allowedIps")
def allowed_ips(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of IPv4 addresses or CIDRs that are allowed access to the search service endpoint.
"""
return pulumi.get(self, "allowed_ips")
@allowed_ips.setter
def allowed_ips(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "allowed_ips", value)
@property
@pulumi.getter
def identity(self) -> Optional[pulumi.Input['ServiceIdentityArgs']]:
"""
An `identity` block as defined below.
"""
return pulumi.get(self, "identity")
@identity.setter
def identity(self, value: Optional[pulumi.Input['ServiceIdentityArgs']]):
pulumi.set(self, "identity", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
The Azure Region where the Search Service should exist. Changing this forces a new Search Service to be created.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The Name which should be used for this Search Service. Changing this forces a new Search Service to be created.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="partitionCount")
def partition_count(self) -> Optional[pulumi.Input[int]]:
"""
The number of partitions which should be created.
"""
return pulumi.get(self, "partition_count")
@partition_count.setter
def partition_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "partition_count", value)
@property
@pulumi.getter(name="primaryKey")
def primary_key(self) -> Optional[pulumi.Input[str]]:
"""
The Primary Key used for Search Service Administration.
"""
return pulumi.get(self, "primary_key")
@primary_key.setter
def primary_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "primary_key", value)
@property
@pulumi.getter(name="publicNetworkAccessEnabled")
def public_network_access_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Whether or not public network access is allowed for this resource. Defaults to `true`.
"""
return pulumi.get(self, "public_network_access_enabled")
@public_network_access_enabled.setter
def public_network_access_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "public_network_access_enabled", value)
@property
@pulumi.getter(name="queryKeys")
def query_keys(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ServiceQueryKeyArgs']]]]:
"""
A `query_keys` block as defined below.
"""
return pulumi.get(self, "query_keys")
@query_keys.setter
def query_keys(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ServiceQueryKeyArgs']]]]):
pulumi.set(self, "query_keys", value)
@property
@pulumi.getter(name="replicaCount")
def replica_count(self) -> Optional[pulumi.Input[int]]:
"""
The number of replica's which should be created.
"""
return pulumi.get(self, "replica_count")
@replica_count.setter
def replica_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "replica_count", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the Resource Group where the Search Service should exist. Changing this forces a new Search Service to be created.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="secondaryKey")
def secondary_key(self) -> Optional[pulumi.Input[str]]:
"""
The Secondary Key used for Search Service Administration.
"""
return pulumi.get(self, "secondary_key")
@secondary_key.setter
def secondary_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "secondary_key", value)
@property
@pulumi.getter
def sku(self) -> Optional[pulumi.Input[str]]:
"""
The SKU which should be used for this Search Service. Possible values are `basic`, `free`, `standard`, `standard2`, `standard3`, `storage_optimized_l1` and `storage_optimized_l2`. Changing this forces a new Search Service to be created.
"""
return pulumi.get(self, "sku")
@sku.setter
def sku(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "sku", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A mapping of tags which should be assigned to the Search Service.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
class Service(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
allowed_ips: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
identity: Optional[pulumi.Input[pulumi.InputType['ServiceIdentityArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
partition_count: Optional[pulumi.Input[int]] = None,
public_network_access_enabled: Optional[pulumi.Input[bool]] = None,
replica_count: Optional[pulumi.Input[int]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
sku: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
"""
Manages a Search Service.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_service = azure.search.Service("exampleService",
resource_group_name=example_resource_group.name,
location=example_resource_group.location,
sku="standard")
```
## Import
Search Services can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:search/service:Service example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.Search/searchServices/service1
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] allowed_ips: A list of IPv4 addresses or CIDRs that are allowed access to the search service endpoint.
:param pulumi.Input[pulumi.InputType['ServiceIdentityArgs']] identity: An `identity` block as defined below.
:param pulumi.Input[str] location: The Azure Region where the Search Service should exist. Changing this forces a new Search Service to be created.
:param pulumi.Input[str] name: The Name which should be used for this Search Service. Changing this forces a new Search Service to be created.
:param pulumi.Input[int] partition_count: The number of partitions which should be created.
:param pulumi.Input[bool] public_network_access_enabled: Whether or not public network access is allowed for this resource. Defaults to `true`.
:param pulumi.Input[int] replica_count: The number of replica's which should be created.
:param pulumi.Input[str] resource_group_name: The name of the Resource Group where the Search Service should exist. Changing this forces a new Search Service to be created.
:param pulumi.Input[str] sku: The SKU which should be used for this Search Service. Possible values are `basic`, `free`, `standard`, `standard2`, `standard3`, `storage_optimized_l1` and `storage_optimized_l2`. Changing this forces a new Search Service to be created.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags which should be assigned to the Search Service.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ServiceArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Manages a Search Service.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_service = azure.search.Service("exampleService",
resource_group_name=example_resource_group.name,
location=example_resource_group.location,
sku="standard")
```
## Import
Search Services can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:search/service:Service example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.Search/searchServices/service1
```
:param str resource_name: The name of the resource.
:param ServiceArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ServiceArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
allowed_ips: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
identity: Optional[pulumi.Input[pulumi.InputType['ServiceIdentityArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
partition_count: Optional[pulumi.Input[int]] = None,
public_network_access_enabled: Optional[pulumi.Input[bool]] = None,
replica_count: Optional[pulumi.Input[int]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
sku: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ServiceArgs.__new__(ServiceArgs)
__props__.__dict__["allowed_ips"] = allowed_ips
__props__.__dict__["identity"] = identity
__props__.__dict__["location"] = location
__props__.__dict__["name"] = name
__props__.__dict__["partition_count"] = partition_count
__props__.__dict__["public_network_access_enabled"] = public_network_access_enabled
__props__.__dict__["replica_count"] = replica_count
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
if sku is None and not opts.urn:
raise TypeError("Missing required property 'sku'")
__props__.__dict__["sku"] = sku
__props__.__dict__["tags"] = tags
__props__.__dict__["primary_key"] = None
__props__.__dict__["query_keys"] = None
__props__.__dict__["secondary_key"] = None
super(Service, __self__).__init__(
'azure:search/service:Service',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
allowed_ips: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
identity: Optional[pulumi.Input[pulumi.InputType['ServiceIdentityArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
partition_count: Optional[pulumi.Input[int]] = None,
primary_key: Optional[pulumi.Input[str]] = None,
public_network_access_enabled: Optional[pulumi.Input[bool]] = None,
query_keys: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ServiceQueryKeyArgs']]]]] = None,
replica_count: Optional[pulumi.Input[int]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
secondary_key: Optional[pulumi.Input[str]] = None,
sku: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None) -> 'Service':
"""
Get an existing Service resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] allowed_ips: A list of IPv4 addresses or CIDRs that are allowed access to the search service endpoint.
:param pulumi.Input[pulumi.InputType['ServiceIdentityArgs']] identity: An `identity` block as defined below.
:param pulumi.Input[str] location: The Azure Region where the Search Service should exist. Changing this forces a new Search Service to be created.
:param pulumi.Input[str] name: The Name which should be used for this Search Service. Changing this forces a new Search Service to be created.
:param pulumi.Input[int] partition_count: The number of partitions which should be created.
:param pulumi.Input[str] primary_key: The Primary Key used for Search Service Administration.
:param pulumi.Input[bool] public_network_access_enabled: Whether or not public network access is allowed for this resource. Defaults to `true`.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ServiceQueryKeyArgs']]]] query_keys: A `query_keys` block as defined below.
:param pulumi.Input[int] replica_count: The number of replica's which should be created.
:param pulumi.Input[str] resource_group_name: The name of the Resource Group where the Search Service should exist. Changing this forces a new Search Service to be created.
:param pulumi.Input[str] secondary_key: The Secondary Key used for Search Service Administration.
:param pulumi.Input[str] sku: The SKU which should be used for this Search Service. Possible values are `basic`, `free`, `standard`, `standard2`, `standard3`, `storage_optimized_l1` and `storage_optimized_l2`. Changing this forces a new Search Service to be created.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags which should be assigned to the Search Service.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _ServiceState.__new__(_ServiceState)
__props__.__dict__["allowed_ips"] = allowed_ips
__props__.__dict__["identity"] = identity
__props__.__dict__["location"] = location
__props__.__dict__["name"] = name
__props__.__dict__["partition_count"] = partition_count
__props__.__dict__["primary_key"] = primary_key
__props__.__dict__["public_network_access_enabled"] = public_network_access_enabled
__props__.__dict__["query_keys"] = query_keys
__props__.__dict__["replica_count"] = replica_count
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["secondary_key"] = secondary_key
__props__.__dict__["sku"] = sku
__props__.__dict__["tags"] = tags
return Service(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="allowedIps")
def allowed_ips(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
A list of IPv4 addresses or CIDRs that are allowed access to the search service endpoint.
"""
return pulumi.get(self, "allowed_ips")
@property
@pulumi.getter
def identity(self) -> pulumi.Output[Optional['outputs.ServiceIdentity']]:
"""
An `identity` block as defined below.
"""
return pulumi.get(self, "identity")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
The Azure Region where the Search Service should exist. Changing this forces a new Search Service to be created.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The Name which should be used for this Search Service. Changing this forces a new Search Service to be created.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="partitionCount")
def partition_count(self) -> pulumi.Output[int]:
"""
The number of partitions which should be created.
"""
return pulumi.get(self, "partition_count")
@property
@pulumi.getter(name="primaryKey")
def primary_key(self) -> pulumi.Output[str]:
"""
The Primary Key used for Search Service Administration.
"""
return pulumi.get(self, "primary_key")
@property
@pulumi.getter(name="publicNetworkAccessEnabled")
def public_network_access_enabled(self) -> pulumi.Output[Optional[bool]]:
"""
Whether or not public network access is allowed for this resource. Defaults to `true`.
"""
return pulumi.get(self, "public_network_access_enabled")
@property
@pulumi.getter(name="queryKeys")
def query_keys(self) -> pulumi.Output[Sequence['outputs.ServiceQueryKey']]:
"""
A `query_keys` block as defined below.
"""
return pulumi.get(self, "query_keys")
@property
@pulumi.getter(name="replicaCount")
def replica_count(self) -> pulumi.Output[int]:
"""
The number of replica's which should be created.
"""
return pulumi.get(self, "replica_count")
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Output[str]:
"""
The name of the Resource Group where the Search Service should exist. Changing this forces a new Search Service to be created.
"""
return pulumi.get(self, "resource_group_name")
@property
@pulumi.getter(name="secondaryKey")
def secondary_key(self) -> pulumi.Output[str]:
"""
The Secondary Key used for Search Service Administration.
"""
return pulumi.get(self, "secondary_key")
@property
@pulumi.getter
def sku(self) -> pulumi.Output[str]:
"""
The SKU which should be used for this Search Service. Possible values are `basic`, `free`, `standard`, `standard2`, `standard3`, `storage_optimized_l1` and `storage_optimized_l2`. Changing this forces a new Search Service to be created.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
A mapping of tags which should be assigned to the Search Service.
"""
return pulumi.get(self, "tags")
| 47.510699
| 274
| 0.662543
| 4,031
| 33,305
| 5.275862
| 0.055569
| 0.095171
| 0.085767
| 0.03931
| 0.909249
| 0.893168
| 0.881036
| 0.864062
| 0.849485
| 0.822636
| 0
| 0.00408
| 0.234649
| 33,305
| 700
| 275
| 47.578571
| 0.830247
| 0.354962
| 0
| 0.758706
| 1
| 0
| 0.106265
| 0.021857
| 0
| 0
| 0
| 0
| 0
| 1
| 0.164179
| false
| 0.002488
| 0.017413
| 0
| 0.281095
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
185c4b89d4fcd4fce137d5602176b546affa0461
| 30,789
|
py
|
Python
|
authentication_service/api/stubs.py
|
hedleyroos/core-authentication-service
|
4a59430cddf23c58322230dd1fe70998fcc46736
|
[
"BSD-3-Clause"
] | 1
|
2018-03-15T12:49:05.000Z
|
2018-03-15T12:49:05.000Z
|
authentication_service/api/stubs.py
|
hedleyroos/core-authentication-service
|
4a59430cddf23c58322230dd1fe70998fcc46736
|
[
"BSD-3-Clause"
] | 215
|
2017-12-07T09:11:52.000Z
|
2022-03-11T23:19:59.000Z
|
authentication_service/api/stubs.py
|
hedleyroos/core-authentication-service
|
4a59430cddf23c58322230dd1fe70998fcc46736
|
[
"BSD-3-Clause"
] | 1
|
2021-08-17T12:05:32.000Z
|
2021-08-17T12:05:32.000Z
|
"""
Do not modify this file. It is generated from the Swagger specification.
"""
import json
from apitools.datagenerator import DataGenerator
import authentication_service.api.schemas as schemas
class AbstractStubClass(object):
"""
Implementations need to be derived from this class.
"""
# client_list -- Synchronisation point for meld
@staticmethod
def client_list(request, offset=None, limit=None, client_ids=None, client_token_id=None, *args, **kwargs):
"""
:param request: An HttpRequest
:param offset: (optional) An optional query parameter specifying the offset in the result set to start from.
:type offset: integer
:param limit: (optional) An optional query parameter to limit the number of results returned.
:type limit: integer
:param client_ids: (optional) An optional list of client ids
:type client_ids: array
:param client_token_id: (optional) An optional client id to filter on. This is not the primary key.
:type client_token_id: string
"""
raise NotImplementedError()
# client_read -- Synchronisation point for meld
@staticmethod
def client_read(request, client_id, *args, **kwargs):
"""
:param request: An HttpRequest
:param client_id: A string value identifying the client
:type client_id: string
"""
raise NotImplementedError()
# country_list -- Synchronisation point for meld
@staticmethod
def country_list(request, offset=None, limit=None, country_codes=None, *args, **kwargs):
"""
:param request: An HttpRequest
:param offset: (optional) An optional query parameter specifying the offset in the result set to start from.
:type offset: integer
:param limit: (optional) An optional query parameter to limit the number of results returned.
:type limit: integer
:param country_codes: (optional) An optional list of country codes
:type country_codes: array
"""
raise NotImplementedError()
# country_read -- Synchronisation point for meld
@staticmethod
def country_read(request, country_code, *args, **kwargs):
"""
:param request: An HttpRequest
:param country_code: A string value identifying the country
:type country_code: string
"""
raise NotImplementedError()
# invitation_send -- Synchronisation point for meld
@staticmethod
def invitation_send(request, invitation_id, language=None, *args, **kwargs):
"""
:param request: An HttpRequest
:param invitation_id:
:type invitation_id: string
:param language: (optional)
:type language: string
"""
raise NotImplementedError()
# purge_expired_invitations -- Synchronisation point for meld
@staticmethod
def purge_expired_invitations(request, cutoff_date=None, *args, **kwargs):
"""
:param request: An HttpRequest
:param cutoff_date: (optional) An optional cutoff date to purge invites before this date
:type cutoff_date: string
"""
raise NotImplementedError()
# organisation_list -- Synchronisation point for meld
@staticmethod
def organisation_list(request, offset=None, limit=None, organisation_ids=None, *args, **kwargs):
"""
:param request: An HttpRequest
:param offset: (optional) An optional query parameter specifying the offset in the result set to start from.
:type offset: integer
:param limit: (optional) An optional query parameter to limit the number of results returned.
:type limit: integer
:param organisation_ids: (optional) An optional list of organisation ids
:type organisation_ids: array
"""
raise NotImplementedError()
# organisation_create -- Synchronisation point for meld
@staticmethod
def organisation_create(request, body, *args, **kwargs):
"""
:param request: An HttpRequest
:param body: A dictionary containing the parsed and validated body
:type body: dict
"""
raise NotImplementedError()
# organisation_delete -- Synchronisation point for meld
@staticmethod
def organisation_delete(request, organisation_id, *args, **kwargs):
"""
:param request: An HttpRequest
:param organisation_id: An integer identifying an organisation a user belongs to
:type organisation_id: integer
"""
raise NotImplementedError()
# organisation_read -- Synchronisation point for meld
@staticmethod
def organisation_read(request, organisation_id, *args, **kwargs):
"""
:param request: An HttpRequest
:param organisation_id: An integer identifying an organisation a user belongs to
:type organisation_id: integer
"""
raise NotImplementedError()
# organisation_update -- Synchronisation point for meld
@staticmethod
def organisation_update(request, body, organisation_id, *args, **kwargs):
"""
:param request: An HttpRequest
:param body: A dictionary containing the parsed and validated body
:type body: dict
:param organisation_id: An integer identifying an organisation a user belongs to
:type organisation_id: integer
"""
raise NotImplementedError()
# request_user_deletion -- Synchronisation point for meld
@staticmethod
def request_user_deletion(request, body, *args, **kwargs):
"""
:param request: An HttpRequest
:param body: A dictionary containing the parsed and validated body
:type body: dict
"""
raise NotImplementedError()
# user_list -- Synchronisation point for meld
@staticmethod
def user_list(request, offset=None, limit=None, birth_date=None, country=None, date_joined=None, email=None, email_verified=None, first_name=None, gender=None, is_active=None, last_login=None, last_name=None, msisdn=None, msisdn_verified=None, nickname=None, organisation_id=None, updated_at=None, username=None, q=None, tfa_enabled=None, has_organisation=None, order_by=None, user_ids=None, site_ids=None, *args, **kwargs):
"""
:param request: An HttpRequest
:param offset: (optional) An optional query parameter specifying the offset in the result set to start from.
:type offset: integer
:param limit: (optional) An optional query parameter to limit the number of results returned.
:type limit: integer
:param birth_date: (optional) An optional birth_date range filter
:type birth_date: string
:param country: (optional) An optional country filter
:type country: string
:param date_joined: (optional) An optional date joined range filter
:type date_joined: string
:param email: (optional) An optional case insensitive email inner match filter
:type email: string
:param email_verified: (optional) An optional email verified filter
:type email_verified: boolean
:param first_name: (optional) An optional case insensitive first name inner match filter
:type first_name: string
:param gender: (optional) An optional gender filter
:type gender: string
:param is_active: (optional) An optional is_active filter
:type is_active: boolean
:param last_login: (optional) An optional last login range filter
:type last_login: string
:param last_name: (optional) An optional case insensitive last name inner match filter
:type last_name: string
:param msisdn: (optional) An optional case insensitive MSISDN inner match filter
:type msisdn: string
:param msisdn_verified: (optional) An optional MSISDN verified filter
:type msisdn_verified: boolean
:param nickname: (optional) An optional case insensitive nickname inner match filter
:type nickname: string
:param organisation_id: (optional) An optional filter on the organisation id
:type organisation_id: integer
:param updated_at: (optional) An optional updated_at range filter
:type updated_at: string
:param username: (optional) An optional case insensitive username inner match filter
:type username: string
:param q: (optional) An optional case insensitive inner match filter across all searchable text fields
:type q: string
:param tfa_enabled: (optional) An optional filter based on whether a user has 2FA enabled or not
:type tfa_enabled: boolean
:param has_organisation: (optional) An optional filter based on whether a user belongs to an organisation or not
:type has_organisation: boolean
:param order_by: (optional) Fields and directions to order by, e.g. "-created_at,username". Add "-" in front of a field name to indicate descending order.
:type order_by: array
:param user_ids: (optional) An optional list of user ids
:type user_ids: array
:param site_ids: (optional) An optional list of site ids
:type site_ids: array
"""
raise NotImplementedError()
# user_delete -- Synchronisation point for meld
@staticmethod
def user_delete(request, user_id, *args, **kwargs):
"""
:param request: An HttpRequest
:param user_id: A UUID value identifying the user.
:type user_id: string
"""
raise NotImplementedError()
# user_read -- Synchronisation point for meld
@staticmethod
def user_read(request, user_id, *args, **kwargs):
"""
:param request: An HttpRequest
:param user_id: A UUID value identifying the user.
:type user_id: string
"""
raise NotImplementedError()
# user_update -- Synchronisation point for meld
@staticmethod
def user_update(request, body, user_id, *args, **kwargs):
"""
:param request: An HttpRequest
:param body: A dictionary containing the parsed and validated body
:type body: dict
:param user_id: A UUID value identifying the user.
:type user_id: string
"""
raise NotImplementedError()
class MockedStubClass(AbstractStubClass):
"""
Provides a mocked implementation of the AbstractStubClass.
"""
GENERATOR = DataGenerator()
@staticmethod
def client_list(request, offset=None, limit=None, client_ids=None, client_token_id=None, *args, **kwargs):
"""
:param request: An HttpRequest
:param offset: (optional) An optional query parameter specifying the offset in the result set to start from.
:type offset: integer
:param limit: (optional) An optional query parameter to limit the number of results returned.
:type limit: integer
:param client_ids: (optional) An optional list of client ids
:type client_ids: array
:param client_token_id: (optional) An optional client id to filter on. This is not the primary key.
:type client_token_id: string
"""
response_schema = json.loads("""{
"items": {
"properties": {
"_post_logout_redirect_uris": {
"description": "New-line delimited list of post-logout redirect URIs",
"type": "string"
},
"_redirect_uris": {
"description": "New-line delimited list of redirect URIs",
"type": "string"
},
"client_id": {
"description": "",
"type": "string"
},
"contact_email": {
"description": "",
"type": "string"
},
"id": {
"description": "",
"type": "integer"
},
"logo": {
"description": "",
"format": "uri",
"type": "string"
},
"name": {
"description": "",
"type": "string"
},
"require_consent": {
"description": "If disabled, the Server will NEVER ask the user for consent.",
"type": "boolean"
},
"response_type": {
"description": "",
"type": "string"
},
"reuse_consent": {
"description": "If enabled, the Server will save the user consent given to a specific client, so that user won't be prompted for the same authorization multiple times.",
"type": "boolean"
},
"terms_url": {
"description": "External reference to the privacy policy of the client.",
"type": "string"
},
"website_url": {
"description": "",
"type": "string"
}
},
"required": [
"id",
"client_id",
"response_type"
],
"type": "object",
"x-scope": [
""
]
},
"type": "array"
}""")
if "type" not in response_schema:
response_schema["type"] = "object"
if response_schema["type"] == "array" and "type" not in response_schema["items"]:
response_schema["items"]["type"] = "object"
return MockedStubClass.GENERATOR.random_value(response_schema)
@staticmethod
def client_read(request, client_id, *args, **kwargs):
"""
:param request: An HttpRequest
:param client_id: A string value identifying the client
:type client_id: string
"""
response_schema = schemas.client
if "type" not in response_schema:
response_schema["type"] = "object"
if response_schema["type"] == "array" and "type" not in response_schema["items"]:
response_schema["items"]["type"] = "object"
return MockedStubClass.GENERATOR.random_value(response_schema)
@staticmethod
def country_list(request, offset=None, limit=None, country_codes=None, *args, **kwargs):
"""
:param request: An HttpRequest
:param offset: (optional) An optional query parameter specifying the offset in the result set to start from.
:type offset: integer
:param limit: (optional) An optional query parameter to limit the number of results returned.
:type limit: integer
:param country_codes: (optional) An optional list of country codes
:type country_codes: array
"""
response_schema = json.loads("""{
"items": {
"properties": {
"code": {
"maxLength": 2,
"minLength": 2,
"type": "string"
},
"name": {
"maxLength": 100,
"type": "string"
}
},
"required": [
"code",
"name"
],
"type": "object",
"x-scope": [
""
]
},
"type": "array"
}""")
if "type" not in response_schema:
response_schema["type"] = "object"
if response_schema["type"] == "array" and "type" not in response_schema["items"]:
response_schema["items"]["type"] = "object"
return MockedStubClass.GENERATOR.random_value(response_schema)
@staticmethod
def country_read(request, country_code, *args, **kwargs):
"""
:param request: An HttpRequest
:param country_code: A string value identifying the country
:type country_code: string
"""
response_schema = schemas.country
if "type" not in response_schema:
response_schema["type"] = "object"
if response_schema["type"] == "array" and "type" not in response_schema["items"]:
response_schema["items"]["type"] = "object"
return MockedStubClass.GENERATOR.random_value(response_schema)
@staticmethod
def invitation_send(request, invitation_id, language=None, *args, **kwargs):
"""
:param request: An HttpRequest
:param invitation_id:
:type invitation_id: string
:param language: (optional)
:type language: string
"""
response_schema = schemas.__UNSPECIFIED__
if "type" not in response_schema:
response_schema["type"] = "object"
if response_schema["type"] == "array" and "type" not in response_schema["items"]:
response_schema["items"]["type"] = "object"
return MockedStubClass.GENERATOR.random_value(response_schema)
@staticmethod
def purge_expired_invitations(request, cutoff_date=None, *args, **kwargs):
"""
:param request: An HttpRequest
:param cutoff_date: (optional) An optional cutoff date to purge invites before this date
:type cutoff_date: string
"""
response_schema = schemas.__UNSPECIFIED__
if "type" not in response_schema:
response_schema["type"] = "object"
if response_schema["type"] == "array" and "type" not in response_schema["items"]:
response_schema["items"]["type"] = "object"
return MockedStubClass.GENERATOR.random_value(response_schema)
@staticmethod
def organisation_list(request, offset=None, limit=None, organisation_ids=None, *args, **kwargs):
"""
:param request: An HttpRequest
:param offset: (optional) An optional query parameter specifying the offset in the result set to start from.
:type offset: integer
:param limit: (optional) An optional query parameter to limit the number of results returned.
:type limit: integer
:param organisation_ids: (optional) An optional list of organisation ids
:type organisation_ids: array
"""
response_schema = json.loads("""{
"items": {
"properties": {
"created_at": {
"format": "date-time",
"readOnly": true,
"type": "string"
},
"description": {
"type": "string"
},
"id": {
"type": "integer"
},
"name": {
"type": "string"
},
"updated_at": {
"format": "date-time",
"readOnly": true,
"type": "string"
}
},
"required": [
"id",
"name",
"description",
"created_at",
"updated_at"
],
"type": "object",
"x-scope": [
""
]
},
"type": "array"
}""")
if "type" not in response_schema:
response_schema["type"] = "object"
if response_schema["type"] == "array" and "type" not in response_schema["items"]:
response_schema["items"]["type"] = "object"
return MockedStubClass.GENERATOR.random_value(response_schema)
@staticmethod
def organisation_create(request, body, *args, **kwargs):
"""
:param request: An HttpRequest
:param body: A dictionary containing the parsed and validated body
:type body: dict
"""
response_schema = schemas.organisation
if "type" not in response_schema:
response_schema["type"] = "object"
if response_schema["type"] == "array" and "type" not in response_schema["items"]:
response_schema["items"]["type"] = "object"
return MockedStubClass.GENERATOR.random_value(response_schema)
@staticmethod
def organisation_delete(request, organisation_id, *args, **kwargs):
"""
:param request: An HttpRequest
:param organisation_id: An integer identifying an organisation a user belongs to
:type organisation_id: integer
"""
response_schema = schemas.__UNSPECIFIED__
if "type" not in response_schema:
response_schema["type"] = "object"
if response_schema["type"] == "array" and "type" not in response_schema["items"]:
response_schema["items"]["type"] = "object"
return MockedStubClass.GENERATOR.random_value(response_schema)
@staticmethod
def organisation_read(request, organisation_id, *args, **kwargs):
"""
:param request: An HttpRequest
:param organisation_id: An integer identifying an organisation a user belongs to
:type organisation_id: integer
"""
response_schema = schemas.organisation
if "type" not in response_schema:
response_schema["type"] = "object"
if response_schema["type"] == "array" and "type" not in response_schema["items"]:
response_schema["items"]["type"] = "object"
return MockedStubClass.GENERATOR.random_value(response_schema)
@staticmethod
def organisation_update(request, body, organisation_id, *args, **kwargs):
"""
:param request: An HttpRequest
:param body: A dictionary containing the parsed and validated body
:type body: dict
:param organisation_id: An integer identifying an organisation a user belongs to
:type organisation_id: integer
"""
response_schema = schemas.organisation
if "type" not in response_schema:
response_schema["type"] = "object"
if response_schema["type"] == "array" and "type" not in response_schema["items"]:
response_schema["items"]["type"] = "object"
return MockedStubClass.GENERATOR.random_value(response_schema)
@staticmethod
def request_user_deletion(request, body, *args, **kwargs):
"""
:param request: An HttpRequest
:param body: A dictionary containing the parsed and validated body
:type body: dict
"""
response_schema = schemas.__UNSPECIFIED__
if "type" not in response_schema:
response_schema["type"] = "object"
if response_schema["type"] == "array" and "type" not in response_schema["items"]:
response_schema["items"]["type"] = "object"
return MockedStubClass.GENERATOR.random_value(response_schema)
@staticmethod
def user_list(request, offset=None, limit=None, birth_date=None, country=None, date_joined=None, email=None, email_verified=None, first_name=None, gender=None, is_active=None, last_login=None, last_name=None, msisdn=None, msisdn_verified=None, nickname=None, organisation_id=None, updated_at=None, username=None, q=None, tfa_enabled=None, has_organisation=None, order_by=None, user_ids=None, site_ids=None, *args, **kwargs):
"""
:param request: An HttpRequest
:param offset: (optional) An optional query parameter specifying the offset in the result set to start from.
:type offset: integer
:param limit: (optional) An optional query parameter to limit the number of results returned.
:type limit: integer
:param birth_date: (optional) An optional birth_date range filter
:type birth_date: string
:param country: (optional) An optional country filter
:type country: string
:param date_joined: (optional) An optional date joined range filter
:type date_joined: string
:param email: (optional) An optional case insensitive email inner match filter
:type email: string
:param email_verified: (optional) An optional email verified filter
:type email_verified: boolean
:param first_name: (optional) An optional case insensitive first name inner match filter
:type first_name: string
:param gender: (optional) An optional gender filter
:type gender: string
:param is_active: (optional) An optional is_active filter
:type is_active: boolean
:param last_login: (optional) An optional last login range filter
:type last_login: string
:param last_name: (optional) An optional case insensitive last name inner match filter
:type last_name: string
:param msisdn: (optional) An optional case insensitive MSISDN inner match filter
:type msisdn: string
:param msisdn_verified: (optional) An optional MSISDN verified filter
:type msisdn_verified: boolean
:param nickname: (optional) An optional case insensitive nickname inner match filter
:type nickname: string
:param organisation_id: (optional) An optional filter on the organisation id
:type organisation_id: integer
:param updated_at: (optional) An optional updated_at range filter
:type updated_at: string
:param username: (optional) An optional case insensitive username inner match filter
:type username: string
:param q: (optional) An optional case insensitive inner match filter across all searchable text fields
:type q: string
:param tfa_enabled: (optional) An optional filter based on whether a user has 2FA enabled or not
:type tfa_enabled: boolean
:param has_organisation: (optional) An optional filter based on whether a user belongs to an organisation or not
:type has_organisation: boolean
:param order_by: (optional) Fields and directions to order by, e.g. "-created_at,username". Add "-" in front of a field name to indicate descending order.
:type order_by: array
:param user_ids: (optional) An optional list of user ids
:type user_ids: array
:param site_ids: (optional) An optional list of site ids
:type site_ids: array
"""
response_schema = json.loads("""{
"items": {
"properties": {
"avatar": {
"format": "uri",
"type": "string"
},
"birth_date": {
"format": "date",
"type": "string"
},
"country_code": {
"maxLength": 2,
"minLength": 2,
"type": "string"
},
"created_at": {
"format": "date-time",
"readOnly": true,
"type": "string"
},
"date_joined": {
"description": "",
"format": "date-time",
"readOnly": true,
"type": "string"
},
"email": {
"description": "",
"format": "email",
"type": "string"
},
"email_verified": {
"type": "boolean"
},
"first_name": {
"description": "",
"type": "string"
},
"gender": {
"type": "string"
},
"id": {
"description": "A UUID identifying the user",
"format": "uuid",
"readOnly": true,
"type": "string"
},
"is_active": {
"description": "Designates whether this user should be treated as active. Deselect this instead of deleting accounts.",
"type": "boolean"
},
"last_login": {
"description": "",
"format": "date-time",
"readOnly": true,
"type": "string"
},
"last_name": {
"description": "",
"type": "string"
},
"msisdn": {
"maxLength": 15,
"type": "string"
},
"msisdn_verified": {
"type": "boolean"
},
"organisation_id": {
"readOnly": true,
"type": "integer"
},
"updated_at": {
"format": "date-time",
"readOnly": true,
"type": "string"
},
"username": {
"description": "Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.",
"readOnly": true,
"type": "string"
}
},
"required": [
"id",
"username",
"is_active",
"date_joined",
"created_at",
"updated_at"
],
"type": "object",
"x-scope": [
""
]
},
"type": "array"
}""")
if "type" not in response_schema:
response_schema["type"] = "object"
if response_schema["type"] == "array" and "type" not in response_schema["items"]:
response_schema["items"]["type"] = "object"
return MockedStubClass.GENERATOR.random_value(response_schema)
@staticmethod
def user_delete(request, user_id, *args, **kwargs):
"""
:param request: An HttpRequest
:param user_id: A UUID value identifying the user.
:type user_id: string
"""
response_schema = schemas.__UNSPECIFIED__
if "type" not in response_schema:
response_schema["type"] = "object"
if response_schema["type"] == "array" and "type" not in response_schema["items"]:
response_schema["items"]["type"] = "object"
return MockedStubClass.GENERATOR.random_value(response_schema)
@staticmethod
def user_read(request, user_id, *args, **kwargs):
"""
:param request: An HttpRequest
:param user_id: A UUID value identifying the user.
:type user_id: string
"""
response_schema = schemas.user
if "type" not in response_schema:
response_schema["type"] = "object"
if response_schema["type"] == "array" and "type" not in response_schema["items"]:
response_schema["items"]["type"] = "object"
return MockedStubClass.GENERATOR.random_value(response_schema)
@staticmethod
def user_update(request, body, user_id, *args, **kwargs):
"""
:param request: An HttpRequest
:param body: A dictionary containing the parsed and validated body
:type body: dict
:param user_id: A UUID value identifying the user.
:type user_id: string
"""
response_schema = schemas.user
if "type" not in response_schema:
response_schema["type"] = "object"
if response_schema["type"] == "array" and "type" not in response_schema["items"]:
response_schema["items"]["type"] = "object"
return MockedStubClass.GENERATOR.random_value(response_schema)
| 38.973418
| 428
| 0.598493
| 3,235
| 30,789
| 5.564142
| 0.07017
| 0.087111
| 0.068
| 0.039111
| 0.888778
| 0.887611
| 0.878667
| 0.850167
| 0.835722
| 0.825722
| 0
| 0.000653
| 0.304167
| 30,789
| 789
| 429
| 39.022814
| 0.839526
| 0.385203
| 0
| 0.736973
| 1
| 0.004963
| 0.411236
| 0.00175
| 0
| 0
| 0
| 0
| 0
| 1
| 0.079404
| false
| 0
| 0.007444
| 0
| 0.133995
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
a12d5cf97d06149a69f27cac653005b17e85655a
| 15,149
|
py
|
Python
|
python/cuxfilter/charts/bokeh/plots.py
|
kkraus14/cuxfilter
|
99d7cf67802270d24db0051162df4feb798f2e15
|
[
"Apache-2.0"
] | null | null | null |
python/cuxfilter/charts/bokeh/plots.py
|
kkraus14/cuxfilter
|
99d7cf67802270d24db0051162df4feb798f2e15
|
[
"Apache-2.0"
] | null | null | null |
python/cuxfilter/charts/bokeh/plots.py
|
kkraus14/cuxfilter
|
99d7cf67802270d24db0051162df4feb798f2e15
|
[
"Apache-2.0"
] | null | null | null |
from ..core.aggregate import BaseAggregateChart
import numpy as np
from bokeh import events
from bokeh.plotting import figure
from bokeh.models import ColumnDataSource
class Bar(BaseAggregateChart):
"""
Description:
"""
reset_event = events.Reset
data_y_axis = "top"
data_x_axis = "x"
def format_source_data(self, source_dict, patch_update=False):
"""
format source
Parameters:
-----------
source_dict: {'X': [], 'Y': []}
"""
if patch_update is False:
self.source = ColumnDataSource(
{
self.data_x_axis: np.array(source_dict["X"]),
self.data_y_axis: np.array(source_dict["Y"]),
}
)
self.source_backup = self.source.to_df()
else:
patch_dict = {
self.data_y_axis: [
(slice(len(source_dict["Y"])), np.array(source_dict["Y"]))
]
}
self.source.patch(patch_dict)
def get_source_y_axis(self):
"""
get y axis column value
"""
if self.source is not None:
return self.source.data[self.data_y_axis]
return self.source
def generate_chart(self):
"""
generate chart
"""
self.chart = figure(
title=self.title,
x_range=(
self.source.data[self.data_x_axis]
if self.x_dtype == "object"
else None
),
tools="pan, wheel_zoom, reset",
active_scroll="wheel_zoom",
active_drag="pan",
)
if self.color is None:
self.sub_chart = self.chart.vbar(
x=self.data_x_axis,
top=self.data_y_axis,
width=0.9,
source=self.source,
**self.library_specific_params,
)
else:
self.sub_chart = self.chart.vbar(
x=self.data_x_axis,
top=self.data_y_axis,
width=0.9,
source=self.source,
color=self.color,
**self.library_specific_params,
)
self.chart.xaxis.axis_label = self.x
if self.x_axis_tick_formatter:
self.chart.xaxis.formatter = self.x_axis_tick_formatter
if self.y_axis_tick_formatter:
self.chart.yaxis.formatter = self.y_axis_tick_formatter
if self.autoscaling is False:
self.chart.y_range.end = self.source.data[self.data_y_axis].max()
if self.y != self.x:
self.chart.yaxis.axis_label = self.y
else:
self.chart.yaxis.axis_label = self.aggregate_fn
def update_dimensions(self, width=None, height=None):
"""
update dimensions
"""
if width is not None:
self.chart.plot_width = width
if height is not None:
self.chart.plot_height = height
def apply_mappers(self):
"""
apply dict mappers to x and y axes if provided
"""
if self.x_label_map is not None:
self.chart.xaxis.major_label_overrides = self.x_label_map
if self.y_label_map is not None:
self.chart.yaxis.major_label_overrides = self.y_label_map
def reload_chart(self, data, patch_update=True):
"""
reload chart
"""
self.calculate_source(data, patch_update=patch_update)
def reset_chart(self, data: np.array = np.array([])):
"""
if len(data) is 0, reset the chart using self.source_backup
Parmeters:
----------
data = list() --> update self.data_y_axis in self.source
"""
if data.size == 0:
data = self.source_backup[self.data_y_axis]
# verifying length is same as x axis
x_axis_len = self.source.data[self.data_x_axis].size
data = data[:x_axis_len]
patch_dict = {self.data_y_axis: [(slice(data.size), data)]}
self.source.patch(patch_dict)
def apply_theme(self, properties_dict):
"""
apply thematic changes to the chart based on the input
properties dictionary.
"""
if self.color is None:
self.sub_chart.glyph.fill_color = properties_dict["chart_color"][
"color"
]
self.sub_chart.glyph.line_color = properties_dict["chart_color"][
"color"
]
self.chart.xgrid.grid_line_color = properties_dict["agg_charts_grids"][
"xgrid"
]
self.chart.ygrid.grid_line_color = properties_dict["agg_charts_grids"][
"ygrid"
]
# title
self.chart.title.text_color = properties_dict["title"]["text_color"]
self.chart.title.text_font = properties_dict["title"]["text_font"]
self.chart.title.text_font_style = properties_dict["title"][
"text_font_style"
]
self.chart.title.text_font_size = properties_dict["title"][
"text_font_size"
]
# background, border, padding
self.chart.background_fill_color = properties_dict[
"background_fill_color"
]
self.chart.border_fill_color = properties_dict["border_fill_color"]
self.chart.min_border = properties_dict["min_border"]
self.chart.outline_line_width = properties_dict["outline_line_width"]
self.chart.outline_line_alpha = properties_dict["outline_line_alpha"]
self.chart.outline_line_color = properties_dict["outline_line_color"]
# x axis title
self.chart.xaxis.axis_label_text_font_style = properties_dict["xaxis"][
"axis_label_text_font_style"
]
self.chart.xaxis.axis_label_text_color = properties_dict["xaxis"][
"axis_label_text_color"
]
self.chart.xaxis.axis_label_standoff = properties_dict["xaxis"][
"axis_label_standoff"
]
self.chart.xaxis.major_label_text_color = properties_dict["xaxis"][
"major_label_text_color"
]
self.chart.xaxis.axis_line_width = properties_dict["xaxis"][
"axis_line_width"
]
self.chart.xaxis.axis_line_color = properties_dict["xaxis"][
"axis_line_color"
]
# y axis title
self.chart.yaxis.axis_label_text_font_style = properties_dict["yaxis"][
"axis_label_text_font_style"
]
self.chart.yaxis.axis_label_text_color = properties_dict["yaxis"][
"axis_label_text_color"
]
self.chart.yaxis.axis_label_standoff = properties_dict["yaxis"][
"axis_label_standoff"
]
self.chart.yaxis.major_label_text_color = properties_dict["yaxis"][
"major_label_text_color"
]
self.chart.yaxis.axis_line_width = properties_dict["yaxis"][
"axis_line_width"
]
self.chart.yaxis.axis_line_color = properties_dict["yaxis"][
"axis_line_color"
]
# axis ticks
self.chart.axis.major_tick_line_color = properties_dict["axis"][
"major_tick_line_color"
]
self.chart.axis.minor_tick_line_color = properties_dict["axis"][
"minor_tick_line_color"
]
self.chart.axis.minor_tick_out = properties_dict["axis"][
"minor_tick_out"
]
self.chart.axis.major_tick_out = properties_dict["axis"][
"major_tick_out"
]
self.chart.axis.major_tick_in = properties_dict["axis"][
"major_tick_in"
]
# interactive slider
self.datatile_active_color = properties_dict["widgets"][
"datatile_active_color"
]
class Line(BaseAggregateChart):
"""
Description:
"""
reset_event = events.Reset
data_y_axis = "y"
data_x_axis = "x"
def format_source_data(self, source_dict, patch_update=False):
"""
format source
Parameters:
-----------
source_dict: {'X': [], 'Y': []}
"""
if patch_update is False:
self.source = ColumnDataSource(
{
self.data_x_axis: np.array(source_dict["X"]),
self.data_y_axis: np.array(source_dict["Y"]),
}
)
self.source_backup = self.source.to_df()
else:
patch_dict = {
self.data_y_axis: [
(slice(len(source_dict["Y"])), np.array(source_dict["Y"]))
]
}
self.source.patch(patch_dict)
def get_source_y_axis(self):
"""
get y axis column value
"""
if self.source is not None:
return self.source.data[self.data_y_axis]
return self.source
def generate_chart(self):
"""
generate chart
"""
self.chart = figure(
title=self.title,
x_range=(
self.source.data[self.data_x_axis]
if self.x_dtype == "object"
else None
),
tools="pan, wheel_zoom, reset",
active_scroll="wheel_zoom",
active_drag="pan",
)
if self.x_axis_tick_formatter:
self.chart.xaxis.formatter = self.x_axis_tick_formatter
if self.y_axis_tick_formatter:
self.chart.yaxis.formatter = self.y_axis_tick_formatter
if self.autoscaling is False:
self.chart.y_range.end = self.source.data[self.data_y_axis].max()
if self.color is None:
self.sub_chart = self.chart.line(
x=self.data_x_axis,
y=self.data_y_axis,
source=self.source,
**self.library_specific_params,
)
else:
self.sub_chart = self.chart.line(
x=self.data_x_axis,
y=self.data_y_axis,
source=self.source,
color=self.color,
**self.library_specific_params,
)
def update_dimensions(self, width=None, height=None):
"""
update dimensions
"""
if width is not None:
self.chart.plot_width = width
if height is not None:
self.chart.plot_height = height
def apply_mappers(self):
"""
apply dict mappers to x and y axes if provided
"""
if self.x_label_map is not None:
self.chart.xaxis.major_label_overrides = self.x_label_map
if self.y_label_map is not None:
self.chart.yaxis.major_label_overrides = self.y_label_map
def reload_chart(self, data, patch_update=True):
"""
reload chart
"""
self.calculate_source(data, patch_update=patch_update)
def reset_chart(self, data: np.array = np.array([])):
"""
if len(data) is 0, reset the chart using self.source_backup.
Parameters:
-----------
data = list() --> update self.data_y_axis in self.source
"""
if data.size == 0:
data = self.source_backup[self.data_y_axis]
# verifying length is same as x axis
x_axis_len = self.source.data[self.data_x_axis].size
data = data[:x_axis_len]
patch_dict = {self.data_y_axis: [(slice(data.size), data)]}
self.source.patch(patch_dict)
def apply_theme(self, properties_dict):
"""
apply thematic changes to the chart based on the input
properties dictionary.
"""
if self.color is None:
self.sub_chart.glyph.line_color = properties_dict["chart_color"][
"color"
]
self.chart.xgrid.grid_line_color = properties_dict["agg_charts_grids"][
"xgrid"
]
self.chart.ygrid.grid_line_color = properties_dict["agg_charts_grids"][
"ygrid"
]
# title
self.chart.title.text_color = properties_dict["title"]["text_color"]
self.chart.title.text_font = properties_dict["title"]["text_font"]
self.chart.title.text_font_style = properties_dict["title"][
"text_font_style"
]
self.chart.title.text_font_size = properties_dict["title"][
"text_font_size"
]
# background, border, padding
self.chart.background_fill_color = properties_dict[
"background_fill_color"
]
self.chart.border_fill_color = properties_dict["border_fill_color"]
self.chart.min_border = properties_dict["min_border"]
self.chart.outline_line_width = properties_dict["outline_line_width"]
self.chart.outline_line_alpha = properties_dict["outline_line_alpha"]
self.chart.outline_line_color = properties_dict["outline_line_color"]
# x axis title
self.chart.xaxis.axis_label_text_font_style = properties_dict["xaxis"][
"axis_label_text_font_style"
]
self.chart.xaxis.axis_label_text_color = properties_dict["xaxis"][
"axis_label_text_color"
]
self.chart.xaxis.axis_label_standoff = properties_dict["xaxis"][
"axis_label_standoff"
]
self.chart.xaxis.major_label_text_color = properties_dict["xaxis"][
"major_label_text_color"
]
self.chart.xaxis.axis_line_width = properties_dict["xaxis"][
"axis_line_width"
]
self.chart.xaxis.axis_line_color = properties_dict["xaxis"][
"axis_line_color"
]
# y axis title
self.chart.yaxis.axis_label_text_font_style = properties_dict["yaxis"][
"axis_label_text_font_style"
]
self.chart.yaxis.axis_label_text_color = properties_dict["yaxis"][
"axis_label_text_color"
]
self.chart.yaxis.axis_label_standoff = properties_dict["yaxis"][
"axis_label_standoff"
]
self.chart.yaxis.major_label_text_color = properties_dict["yaxis"][
"major_label_text_color"
]
self.chart.yaxis.axis_line_width = properties_dict["yaxis"][
"axis_line_width"
]
self.chart.yaxis.axis_line_color = properties_dict["yaxis"][
"axis_line_color"
]
# axis ticks
self.chart.axis.major_tick_line_color = properties_dict["axis"][
"major_tick_line_color"
]
self.chart.axis.minor_tick_line_color = properties_dict["axis"][
"minor_tick_line_color"
]
self.chart.axis.minor_tick_out = properties_dict["axis"][
"minor_tick_out"
]
self.chart.axis.major_tick_out = properties_dict["axis"][
"major_tick_out"
]
self.chart.axis.major_tick_in = properties_dict["axis"][
"major_tick_in"
]
# interactive slider
self.datatile_active_color = properties_dict["widgets"][
"datatile_active_color"
]
| 33.076419
| 79
| 0.575616
| 1,762
| 15,149
| 4.631101
| 0.077185
| 0.089338
| 0.076838
| 0.028676
| 0.973775
| 0.970956
| 0.964338
| 0.960172
| 0.960172
| 0.945711
| 0
| 0.000779
| 0.321935
| 15,149
| 457
| 80
| 33.148797
| 0.793614
| 0.072018
| 0
| 0.759878
| 0
| 0
| 0.105139
| 0.032828
| 0
| 0
| 0
| 0
| 0
| 1
| 0.048632
| false
| 0
| 0.015198
| 0
| 0.100304
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
a176cef7bd4adfdd213efda5255576dc20fe8f1c
| 406
|
py
|
Python
|
optalg/opt_solver/_cbc/__init__.py
|
ttinoco/OPTALG
|
9103b99e6bc3517c7052ab075cbc5bdad310a25a
|
[
"BSD-2-Clause"
] | 10
|
2015-11-13T22:34:47.000Z
|
2020-01-31T17:54:02.000Z
|
optalg/opt_solver/_cbc/__init__.py
|
ttinoco/OPTALG
|
9103b99e6bc3517c7052ab075cbc5bdad310a25a
|
[
"BSD-2-Clause"
] | 40
|
2016-05-08T12:22:01.000Z
|
2019-04-01T01:39:28.000Z
|
optalg/opt_solver/_cbc/__init__.py
|
romcon/OPTALG
|
5ebe18c7a98e3a0feaa7be2658a2fc4f97eeeef3
|
[
"BSD-2-Clause"
] | 12
|
2016-06-30T19:30:12.000Z
|
2019-09-26T16:29:37.000Z
|
#****************************************************#
# This file is part of OPTALG. #
# #
# Copyright (c) 2015-2017, Tomas Tinoco De Rubira. #
# #
# OPTALG is released under the BSD 2-clause license. #
#****************************************************#
from .ccbc import *
| 40.6
| 54
| 0.280788
| 26
| 406
| 4.384615
| 0.923077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.03719
| 0.403941
| 406
| 9
| 55
| 45.111111
| 0.433884
| 0.896552
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
a18c1d7f6285839b59fe2045da9d42f8808cb003
| 128
|
py
|
Python
|
proauth2/__init__.py
|
charlesthomas/proauth2
|
f88c8df966a1802414047ed304d02df1dd520097
|
[
"MIT"
] | null | null | null |
proauth2/__init__.py
|
charlesthomas/proauth2
|
f88c8df966a1802414047ed304d02df1dd520097
|
[
"MIT"
] | null | null | null |
proauth2/__init__.py
|
charlesthomas/proauth2
|
f88c8df966a1802414047ed304d02df1dd520097
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
from proauth2 import Proauth2
from async_proauth2 import AsyncProauth2
from proauth2 import Proauth2Error
| 25.6
| 40
| 0.851563
| 17
| 128
| 6.352941
| 0.588235
| 0.388889
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.052632
| 0.109375
| 128
| 4
| 41
| 32
| 0.894737
| 0.15625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
a1c28fbeb10eaa55176878a6a6f934de7ce0186f
| 227
|
py
|
Python
|
lib/galaxy/selenium/toolbox/filters.py
|
rikeshi/galaxy
|
c536a877e4a9b3d12aa0d00fd4d5e705109a0d0a
|
[
"CC-BY-3.0"
] | 1,085
|
2015-02-18T16:14:38.000Z
|
2022-03-30T23:52:07.000Z
|
lib/galaxy/selenium/toolbox/filters.py
|
rikeshi/galaxy
|
c536a877e4a9b3d12aa0d00fd4d5e705109a0d0a
|
[
"CC-BY-3.0"
] | 11,253
|
2015-02-18T17:47:32.000Z
|
2022-03-31T21:47:03.000Z
|
lib/galaxy/selenium/toolbox/filters.py
|
rikeshi/galaxy
|
c536a877e4a9b3d12aa0d00fd4d5e705109a0d0a
|
[
"CC-BY-3.0"
] | 1,000
|
2015-02-18T16:18:10.000Z
|
2022-03-29T08:22:56.000Z
|
def restrict_test(context, section):
"""
Disable the Test Section section
This tool filter will disable the Test Section section.
"""
if section.name == 'Test Section':
return False
return True
| 22.7
| 59
| 0.656388
| 28
| 227
| 5.285714
| 0.571429
| 0.222973
| 0.189189
| 0.283784
| 0.378378
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.268722
| 227
| 9
| 60
| 25.222222
| 0.891566
| 0.39207
| 0
| 0
| 0
| 0
| 0.101695
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 7
|
b813c1117b48cb3a8bf006f2f5039432a88b4813
| 2,120
|
py
|
Python
|
tests/test_localtree.py
|
byteskeptical/sftpretty
|
0b242f7d32086aa50a308d0df9ad4578b05f2701
|
[
"BSD-3-Clause"
] | 11
|
2021-06-04T21:27:35.000Z
|
2021-12-05T09:58:26.000Z
|
tests/test_localtree.py
|
byteskeptical/sftpretty
|
0b242f7d32086aa50a308d0df9ad4578b05f2701
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_localtree.py
|
byteskeptical/sftpretty
|
0b242f7d32086aa50a308d0df9ad4578b05f2701
|
[
"BSD-3-Clause"
] | 3
|
2021-08-30T09:17:27.000Z
|
2021-12-26T20:51:50.000Z
|
'''test sftpretty.localtree'''
from common import conn, rmdir, VFS
from sftpretty import Connection, localtree
from tempfile import mkdtemp
def test_localtree(sftpserver):
'''test the localtree function, with recurse'''
with sftpserver.serve_content(VFS):
with Connection(**conn(sftpserver)) as sftp:
localpath = mkdtemp()
sftp.get_r('.', localpath)
cwd = sftp.pwd
directories = {}
localtree(directories, localpath + cwd, '/')
dkeys = [f'{localpath}/home/test',
f'{localpath}/home/test/pub',
f'{localpath}/home/test/pub/foo2']
dvalues = [[(f'{localpath}/home/test/pub',
f'{localpath}/home/test/pub')],
[(f'{localpath}/home/test/pub/foo1',
f'{localpath}/home/test/pub/foo1'),
(f'{localpath}/home/test/pub/foo2',
f'{localpath}/home/test/pub/foo2')],
[(f'{localpath}/home/test/pub/foo2/bar1',
f'{localpath}/home/test/pub/foo2/bar1')]]
assert sorted(directories.keys()) == dkeys
assert sorted(directories.values()) == dvalues
# cleanup local
rmdir(localpath)
def test_localtree_no_recurse(sftpserver):
'''test the localtree function, without recursing'''
with sftpserver.serve_content(VFS):
with Connection(**conn(sftpserver)) as sftp:
localpath = mkdtemp()
sftp.chdir('pub/foo2')
sftp.get_r('.', localpath)
cwd = sftp.pwd
directories = {}
localtree(directories, localpath + cwd, '/', recurse=False)
dkeys = [f'{localpath}/home/test/pub/foo2']
dvalues = [[(f'{localpath}/home/test/pub/foo2/bar1',
f'{localpath}/home/test/pub/foo2/bar1')]]
assert sorted(directories.keys()) == dkeys
assert sorted(directories.values()) == dvalues
# cleanup local
rmdir(localpath)
| 33.650794
| 71
| 0.538679
| 211
| 2,120
| 5.374408
| 0.222749
| 0.123457
| 0.17284
| 0.222222
| 0.819224
| 0.734568
| 0.734568
| 0.734568
| 0.734568
| 0.734568
| 0
| 0.010468
| 0.324057
| 2,120
| 62
| 72
| 34.193548
| 0.780879
| 0.066981
| 0
| 0.5
| 0
| 0
| 0.218256
| 0.212137
| 0
| 0
| 0
| 0
| 0.1
| 1
| 0.05
| false
| 0
| 0.075
| 0
| 0.125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
62960466f78b57982fde14d87dff2decc2ec00ee
| 121,028
|
py
|
Python
|
tools/models.py
|
UnIcOrn7618/MonthlyRunoffForecastByAutoReg
|
2d66c628141f001e4ffb3dc3b7520a0f0f0ff239
|
[
"MIT"
] | 2
|
2020-09-24T13:31:06.000Z
|
2020-11-11T09:08:16.000Z
|
tools/models.py
|
UnIcOrn7618/MonthlyRunoffForecastByAutoReg
|
2d66c628141f001e4ffb3dc3b7520a0f0f0ff239
|
[
"MIT"
] | null | null | null |
tools/models.py
|
UnIcOrn7618/MonthlyRunoffForecastByAutoReg
|
2d66c628141f001e4ffb3dc3b7520a0f0f0ff239
|
[
"MIT"
] | 1
|
2020-12-16T07:29:32.000Z
|
2020-12-16T07:29:32.000Z
|
#### import basic external libs
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
plt.set_cmap("viridis")
import datetime
import time
#### import libs for optimize SVR or GBRT
from sklearn.svm import SVR,NuSVR
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.model_selection import cross_val_score
from sklearn.metrics import r2_score, mean_absolute_error, mean_squared_error, mean_squared_log_error
from sklearn.externals.joblib import Parallel, delayed
from skopt.space import Real, Integer
from skopt.utils import use_named_args
from skopt import gp_minimize,forest_minimize, dummy_minimize
from skopt.plots import plot_convergence,plot_objective,plot_evaluations
from skopt import dump, load
from skopt import Optimizer
from skopt.benchmarks import branin
from functools import partial
from statsmodels.tsa.arima_model import ARIMA
from random import seed
from random import random
seed(1)
# from skopt.callbacks import CheckpointSaver
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
print(tf.__version__)
from tensorflow.keras.callbacks import ReduceLROnPlateau,EarlyStopping
import os
root_path = os.path.dirname(os.path.abspath('_file_'))
import sys
sys.path.append(root_path)
from config.globalLog import logger
# import own coding libs
from tools.plot_utils import plot_convergence_
from tools.plot_utils import plot_evaluations_
from tools.plot_utils import plot_objective_
from tools.plot_utils import plot_rela_pred
from tools.plot_utils import plot_history
from tools.plot_utils import plot_error_distribution
from tools.dump_data import dum_pred_results
ESVR_SPACE = [
# Penalty parameter `C` of the error term
Real(0.1, 200, name='C'),
# `epsilon` in epsilon-SVR model. It specifies the epsilon-tube
# within which no penalty is associated in the training loss
# function with points predicted within a distance epsilon from the actual value.
Real(10**-6, 10**0, name='epsilon'),
# kernel coefficient for 'rbf','poly' and 'sigmoid'
Real(10**-6, 10**0, name='gamma'),
]
DIMENSION_ESVR = ['C','epsilon','gamma']
DIMENSION_GBRT = ['max depth','learning rate','max features','min samples split','min samples leaf']
EPS_DPI = 2000
TIFF_DPI=1200
def multi_optimizer_esvr(root_path,station,predict_pattern,n_calls=100,cv=10):
# Set the time series and model parameters
predictor = 'esvr'
data_path = root_path + '/'+station+'/data/'+predict_pattern+'/'
model_path = root_path+'/'+station+'/projects/'+predictor+'/'+predict_pattern+'/multi_optimizer/history/'
if not os.path.exists(model_path):
os.makedirs(model_path)
model_name = 'nc'+str(n_calls)+'_cv'+str(cv)
logger.info("Build multiple optimizer epsilon SVR...")
logger.info("Root path:{}".format(root_path))
logger.info("Station:{}".format(station))
logger.info("Predict pattern:{}".format(predict_pattern))
logger.info("Number of calls:{}".format(n_calls))
logger.info("Data Path:{}".format(data_path))
logger.info("Model Path:{}".format(model_path))
if os.path.exists(model_path +model_name+'_optimized_params.csv') :
optimal_params = pd.read_csv(model_path +model_name+'_optimized_params.csv')
pre_n_calls = optimal_params['n_calls'][0]
if pre_n_calls==n_calls:
logger.info("The n_calls="+str(n_calls)+" was already tuned")
else:
logger.info('Load learning samples...')
# Load the training, development and testing samples
train = pd.read_csv(data_path+'minmax_unsample_train.csv',index_col=False)
dev = pd.read_csv(data_path+'minmax_unsample_dev.csv',index_col=False)
test = pd.read_csv(data_path+'minmax_unsample_test.csv',index_col=False)
train_dev = pd.concat([train,dev],axis=0)
# shuffle the training samples
train_dev = train_dev.sample(frac=1)
train_y = train['Y']
train_x = train.drop('Y', axis=1)
dev_y = dev['Y']
dev_x = dev.drop('Y', axis=1)
test_y = test['Y']
test_x = test.drop('Y', axis=1)
train_dev_y = train_dev['Y']
train_dev_x = train_dev.drop('Y', axis=1)
logger.info('Build SVR model and set the evaluation space of Bayesian optimization.')
reg = SVR(tol=1e-4)
# Set the space of hyper-parameters for tuning them
space = ESVR_SPACE
# Define an objective function of hyper-parameters tuning
@use_named_args(space)
def objective(**params):
reg.set_params(**params)
return -np.mean(cross_val_score(reg,train_dev_x,train_dev_y,cv=cv,n_jobs=-1,scoring='neg_mean_squared_error'))
def run(minimizer, n_iter=5):
return [minimizer(objective, space, n_calls=n_calls, random_state=n)
for n in range(n_iter)]
#checkpoint_saver = CheckpointSaver(model_path+model_name+'/checkpoint.pkl',compress=9)
# Random search
dummy_res = run(dummy_minimize)
# Gaussian processes
gp_res = run(gp_minimize)
# Random forest
rf_res = run(partial(forest_minimize, base_estimator="RF"))
# Extra trees
et_res = run(partial(forest_minimize, base_estimator="ET"))
plot = plot_convergence(("dummy_minimize", dummy_res),
("gp_minimize", gp_res),
("forest_minimize('rf')", rf_res),
("forest_minimize('et)", et_res),
true_minimum=0.397887, yscale="log")
plot.legend(loc="best", prop={'size': 6}, numpoints=1);
plt.close('all')
def esvr(root_path,station,predict_pattern,optimizer='gp',n_calls=100,cv=10):
logger.info("Build monoscale epsilon SVR model ...")
logger.info("Root path:{}".format(root_path))
logger.info("Station:{}".format(station))
logger.info("Predict pattern:{}".format(predict_pattern))
logger.info("Optimizer:{}".format(optimizer))
logger.info("Number of calls:{}".format(n_calls))
predictor = 'esvr'
data_path = root_path + '/'+station+'/data/'+predict_pattern+'/'
model_path = root_path+'/'+station+'/projects/'+predictor+'/'+predict_pattern+'/history/'
if not os.path.exists(model_path):
os.makedirs(model_path)
model_name = optimizer+'_nc'+str(n_calls)+'_cv'+str(cv)
logger.info("Data Path:{}".format(data_path))
logger.info("Model Path:{}".format(model_path))
logger.info("Model name:{}".format(model_name))
# Load the training, development and testing samples
logger.info('Load learning samples...')
train = pd.read_csv(data_path+'minmax_unsample_train.csv',index_col=False)
dev = pd.read_csv(data_path+'minmax_unsample_dev.csv',index_col=False)
test = pd.read_csv(data_path+'minmax_unsample_test.csv',index_col=False)
train_dev = pd.concat([train,dev],axis=0)
# shuffle the training samples
train_dev = train_dev.sample(frac=1)
train_y = train['Y']
train_x = train.drop('Y', axis=1)
dev_y = dev['Y']
dev_x = dev.drop('Y', axis=1)
test_y = test['Y']
test_x = test.drop('Y', axis=1)
train_dev_y = train_dev['Y']
train_dev_x = train_dev.drop('Y', axis=1)
if os.path.exists(model_path +model_name+'_optimized_params.csv'):
optimal_params = pd.read_csv(model_path +model_name+'_optimized_params.csv')
pre_n_calls = optimal_params['n_calls'][0]
if pre_n_calls==n_calls:
logger.info("The n_calls="+str(n_calls)+" was already tuned")
esvr = SVR(C=optimal_params['C'][0], epsilon=optimal_params['epsilon'][0], gamma=optimal_params['gamma'][0])
train_predictions = esvr.fit(train_dev_x,train_dev_y).predict(train_x)
dev_predictions = esvr.fit(train_dev_x,train_dev_y).predict(dev_x)
test_predictions = esvr.fit(train_dev_x,train_dev_y).predict(test_x)
train_y=(train_y.values).flatten()
dev_y=(dev_y.values).flatten()
test_y=(test_y.values).flatten()
norm_id = pd.read_csv(data_path + 'norm_unsample_id.csv')
sMin = norm_id['series_min'][norm_id.shape[0]-1]
sMax = norm_id['series_max'][norm_id.shape[0]-1]
logger.debug('Series Min:\n {}'.format(sMin))
logger.debug('Series Max:\n {}'.format(sMax))
train_y = np.multiply(train_y + 1,sMax - sMin) / 2 + sMin
dev_y = np.multiply(dev_y + 1,sMax - sMin) / 2 + sMin
test_y = np.multiply(test_y + 1,sMax - sMin) / 2 + sMin
train_predictions = np.multiply(train_predictions + 1, sMax -sMin) / 2 + sMin
train_predictions[train_predictions<0.0]=0.0
dev_predictions = np.multiply(dev_predictions + 1, sMax -sMin) / 2 + sMin
dev_predictions[dev_predictions<0.0]=0.0
test_predictions = np.multiply(test_predictions + 1, sMax -sMin) / 2 + sMin
test_predictions[test_predictions<0.0]=0.0
dum_pred_results(
path = model_path+model_name+'.csv',
train_y = train_y,
train_predictions=train_predictions,
dev_y = dev_y,
dev_predictions = dev_predictions,
test_y = test_y,
test_predictions = test_predictions,
time_cost = optimal_params['time_cost'][0],
)
else:
reg = SVR(tol=1e-4)
# Set the space of hyper-parameters for tuning them
space = ESVR_SPACE
# Define an objective function of hyper-parameters tuning
@use_named_args(space)
def objective(**params):
reg.set_params(**params)
return -np.mean(cross_val_score(reg,train_dev_x,train_dev_y,cv=cv,n_jobs=-1,scoring='neg_mean_squared_error'))
# Tuning the hyper-parameters using Bayesian Optimization based on Gaussion Process
start = time.process_time()
if optimizer=='gp':
res = gp_minimize(objective,space,n_calls=n_calls ,random_state=0,verbose=True,n_jobs=-1)
elif optimizer=='fr_et':
res = forest_minimize(objective,space,n_calls=n_calls,base_estimator='ET',random_state=0,verbose=True,n_jobs=-1)
elif optimizer=='fr_rf':
res = forest_minimize(objective,space,n_calls=n_calls,base_estimator='RF',random_state=0,verbose=True,n_jobs=-1)
elif optimizer=='dm':
res = dummy_minimize(objective,space,n_calls=n_calls)
end = time.process_time()
time_cost = end-start
dump(res,model_path+model_name+'_result.pkl',store_objective=False)
returned_results = load(model_path+model_name+'_result.pkl')
# Visualizing the results of hyper-parameaters tuning
plot_objective_(res,dimensions=DIMENSION_ESVR,fig_savepath=model_path+model_name+'_objective.png')
plot_evaluations_(res,dimensions=DIMENSION_ESVR,fig_savepath=model_path+model_name+'_evaluation.png')
plot_convergence_(res,fig_savepath=model_path+model_name+'_convergence.png')
# Plot the optimal hyperparameters
logger.info('Best score=%.4f'%res.fun)
logger.info(""" Best parameters:
-C = %.8f
-epsilon = %.8f
-gamma = %.8f
"""%(res.x[0],res.x[1],res.x[2]))
logger.info('Time cost:{} seconds'.format(time_cost))
# Construct the optimal hyperparameters to restore them
params_dict={
'C':res.x[0],
'epsilon':res.x[1],
'gamma':res.x[2],
'time_cost':time_cost,
'n_calls':n_calls,
}
# Transform the optimal hyperparameters dict to pandas DataFrame and restore it
params_df = pd.DataFrame(params_dict,index=[0])
params_df.to_csv(model_path +model_name+'_optimized_params.csv')
# Initialize a SVR with the optimal hyperparameters
esvr = SVR(C=res.x[0], epsilon=res.x[1], gamma=res.x[2])
# Do prediction with the opyimal model
train_predictions = esvr.fit(train_dev_x,train_dev_y).predict(train_x)
dev_predictions = esvr.fit(train_dev_x,train_dev_y).predict(dev_x)
test_predictions = esvr.fit(train_dev_x,train_dev_y).predict(test_x)
train_y=(train_y.values).flatten()
dev_y=(dev_y.values).flatten()
test_y=(test_y.values).flatten()
norm_id = pd.read_csv(data_path + 'norm_unsample_id.csv')
sMin = norm_id['series_min'][norm_id.shape[0]-1]
sMax = norm_id['series_max'][norm_id.shape[0]-1]
logger.debug('Series Min:\n {}'.format(sMin))
logger.debug('Series Max:\n {}'.format(sMax))
# Renormalized the records and predictions and cap the negative predictions to 0
train_y = np.multiply(train_y + 1,sMax - sMin) / 2 + sMin
dev_y = np.multiply(dev_y + 1,sMax - sMin) / 2 + sMin
test_y = np.multiply(test_y + 1,sMax - sMin) / 2 + sMin
train_predictions = np.multiply(train_predictions + 1, sMax -sMin) / 2 + sMin
train_predictions[train_predictions<0.0]=0.0
dev_predictions = np.multiply(dev_predictions + 1, sMax -sMin) / 2 + sMin
dev_predictions[dev_predictions<0.0]=0.0
test_predictions = np.multiply(test_predictions + 1, sMax -sMin) / 2 + sMin
test_predictions[test_predictions<0.0]=0.0
dum_pred_results(
path = model_path+model_name+'.csv',
train_y = train_y,
train_predictions=train_predictions,
dev_y = dev_y,
dev_predictions = dev_predictions,
test_y = test_y,
test_predictions = test_predictions,
time_cost = time_cost,
)
plot_rela_pred(train_y,train_predictions,fig_savepath=model_path +model_name + '_train_pred.png')
plot_rela_pred(dev_y,dev_predictions,fig_savepath=model_path +model_name + "_dev_pred.png")
plot_rela_pred(test_y,test_predictions,fig_savepath=model_path +model_name + "_test_pred.png")
plot_error_distribution(test_y,test_predictions,fig_savepath=model_path+model_name+"_test_error.png")
plt.close('all')
def esvr_multi_seed(root_path,station,predict_pattern,optimizer='gp',n_calls=100,cv=10,iterations=10):
logger.info("Build epsilon SVR with multiple seed...")
logger.info("Root path:{}".format(root_path))
logger.info("Station:{}".format(station))
logger.info("Predict pattern:{}".format(predict_pattern))
logger.info("Optimizer:{}".format(optimizer))
logger.info("Number of calls:{}".format(n_calls))
# Set the time series and model parameters
predictor = 'esvr'
data_path = root_path + '/'+station+'/data/'+predict_pattern+'/'
model_path = root_path+'/'+station+'/projects/'+predictor+'/'+predict_pattern+'/history/'
if not os.path.exists(model_path):
os.makedirs(model_path)
logger.info("Data Path:{}".format(data_path))
logger.info("Model Path:{}".format(model_path))
for random_state in range(1,iterations+1):
model_name = optimizer+'_nc'+str(n_calls)+'_cv'+str(cv)+'_seed'+str(random_state)
logger.info('Model Name:{}'.format(model_name))
# Load the training, development and testing samples
train = pd.read_csv(data_path+'minmax_unsample_train.csv',index_col=False)
dev = pd.read_csv(data_path+'minmax_unsample_dev.csv',index_col=False)
test = pd.read_csv(data_path+'minmax_unsample_test.csv',index_col=False)
train_dev = pd.concat([train,dev],axis=0)
# shuffle the training samples
train_dev = train_dev.sample(frac=1)
train_y = train['Y']
train_x = train.drop('Y', axis=1)
dev_y = dev['Y']
dev_x = dev.drop('Y', axis=1)
test_y = test['Y']
test_x = test.drop('Y', axis=1)
train_dev_y = train_dev['Y']
train_dev_x = train_dev.drop('Y', axis=1)
logger.info("Optimized params:{}".format(model_path +model_name+'_optimized_params.csv'))
if os.path.exists(model_path +model_name+'_optimized_params.csv'):
optimal_params = pd.read_csv(model_path +model_name+'_optimized_params.csv')
pre_n_calls = optimal_params['n_calls'][0]
if pre_n_calls==n_calls:
logger.info("The n_calls="+str(n_calls)+" was already tuned")
esvr = SVR(C=optimal_params['C'][0], epsilon=optimal_params['epsilon'][0], gamma=optimal_params['gamma'][0])
# Do prediction with the opyimal model
train_predictions = esvr.fit(train_dev_x,train_dev_y).predict(train_x)
dev_predictions = esvr.fit(train_dev_x,train_dev_y).predict(dev_x)
test_predictions = esvr.fit(train_dev_x,train_dev_y).predict(test_x)
train_y=(train_y.values).flatten()
dev_y=(dev_y.values).flatten()
test_y=(test_y.values).flatten()
norm_id = pd.read_csv(data_path + 'norm_unsample_id.csv')
sMin = norm_id['series_min'][norm_id.shape[0]-1]
sMax = norm_id['series_max'][norm_id.shape[0]-1]
logger.debug('Series Min:\n {}'.format(sMin))
logger.debug('Series Max:\n {}'.format(sMax))
train_y = np.multiply(train_y + 1,sMax - sMin) / 2 + sMin
dev_y = np.multiply(dev_y + 1,sMax - sMin) / 2 + sMin
test_y = np.multiply(test_y + 1,sMax - sMin) / 2 + sMin
train_predictions = np.multiply(train_predictions + 1, sMax -sMin) / 2 + sMin
train_predictions[train_predictions<0.0]=0.0
dev_predictions = np.multiply(dev_predictions + 1, sMax -sMin) / 2 + sMin
dev_predictions[dev_predictions<0.0]=0.0
test_predictions = np.multiply(test_predictions + 1, sMax -sMin) / 2 + sMin
test_predictions[test_predictions<0.0]=0.0
dum_pred_results(
path = model_path+model_name+'.csv',
train_y = train_y,
train_predictions=train_predictions,
dev_y = dev_y,
dev_predictions = dev_predictions,
test_y = test_y,
test_predictions = test_predictions,
time_cost = optimal_params['time_cost'][0],
)
else:
reg = SVR(tol=1e-4)
# Set the space of hyper-parameters for tuning them
space = ESVR_SPACE
# Define an objective function of hyper-parameters tuning
@use_named_args(space)
def objective(**params):
reg.set_params(**params)
return -np.mean(cross_val_score(reg,train_dev_x,train_dev_y,cv=cv,n_jobs=-1,scoring='neg_mean_squared_error'))
# Tuning the hyper-parameters using Bayesian Optimization based on Gaussion Process
start = time.process_time()
if optimizer=='gp':
res = gp_minimize(objective,space,n_calls=n_calls ,random_state=random_state,verbose=True,n_jobs=-1)
elif optimizer=='fr_bt':
res = forest_minimize(objective,space,n_calls=n_calls,base_estimator='ET',random_state=random_state,verbose=True,n_jobs=-1)
elif optimizer=='fr_rf':
res = forest_minimize(objective,space,n_calls=n_calls,base_estimator='RF',random_state=random_state,verbose=True,n_jobs=-1)
elif optimizer=='dm':
res = dummy_minimize(objective,space,n_calls=n_calls)
end = time.process_time()
time_cost = end-start
dump(res,model_path+model_name+'_result_seed'+str(random_state)+'.pkl',store_objective=False)
returned_results = load(model_path+model_name+'_result_seed'+str(random_state)+'.pkl')
# Visualizing the results of hyper-parameaters tuning
plot_objective_(res,dimensions=DIMENSION_ESVR,fig_savepath=model_path+model_name+'_objective.png')
plot_evaluations_(res,dimensions=DIMENSION_ESVR,fig_savepath=model_path+model_name+'_evaluation.png')
plot_convergence_(res,fig_savepath=model_path+model_name+'_convergence.png')
# Plot the optimal hyperparameters
logger.info('Best score=%.4f'%res.fun)
logger.info(""" Best parameters:
-C = %.8f
-epsilon = %.8f
-gamma = %.8f
"""%(res.x[0],res.x[1],res.x[2]))
logger.info('Time cost:{} seconds'.format(time_cost))
# Construct the optimal hyperparameters to restore them
params_dict={
'C':res.x[0],
'epsilon':res.x[1],
'gamma':res.x[2],
'time_cost':time_cost,
'n_calls':n_calls,
}
# Transform the optimal hyperparameters dict to pandas DataFrame and restore it
params_df = pd.DataFrame(params_dict,index=[0])
params_df.to_csv(model_path +model_name+'_optimized_params.csv')
# Initialize a SVR with the optimal hyperparameters
esvr = SVR(C=res.x[0], epsilon=res.x[1], gamma=res.x[2])
# Do prediction with the opyimal model
train_predictions = esvr.fit(train_dev_x,train_dev_y).predict(train_x)
dev_predictions = esvr.fit(train_dev_x,train_dev_y).predict(dev_x)
test_predictions = esvr.fit(train_dev_x,train_dev_y).predict(test_x)
train_y=(train_y.values).flatten()
dev_y=(dev_y.values).flatten()
test_y=(test_y.values).flatten()
norm_id = pd.read_csv(data_path + 'norm_unsample_id.csv')
sMin = norm_id['series_min'][norm_id.shape[0]-1]
sMax = norm_id['series_max'][norm_id.shape[0]-1]
logger.debug('Series Min:\n {}'.format(sMin))
logger.debug('Series Max:\n {}'.format(sMax))
# Renormalized the records and predictions and cap the negative predictions to 0
train_y = np.multiply(train_y + 1,sMax - sMin) / 2 + sMin
dev_y = np.multiply(dev_y + 1,sMax - sMin) / 2 + sMin
test_y = np.multiply(test_y + 1,sMax - sMin) / 2 + sMin
train_predictions = np.multiply(train_predictions + 1, sMax -sMin) / 2 + sMin
train_predictions[train_predictions<0.0]=0.0
dev_predictions = np.multiply(dev_predictions + 1, sMax -sMin) / 2 + sMin
dev_predictions[dev_predictions<0.0]=0.0
test_predictions = np.multiply(test_predictions + 1, sMax -sMin) / 2 + sMin
test_predictions[test_predictions<0.0]=0.0
dum_pred_results(
path = model_path+model_name+'.csv',
train_y = train_y,
train_predictions=train_predictions,
dev_y = dev_y,
dev_predictions = dev_predictions,
test_y = test_y,
test_predictions = test_predictions,
time_cost = time_cost,
)
plot_rela_pred(train_y,train_predictions,fig_savepath=model_path +model_name + '_train_pred.png')
plot_rela_pred(dev_y,dev_predictions,fig_savepath=model_path +model_name + "_dev_pred.png")
plot_rela_pred(test_y,test_predictions,fig_savepath=model_path +model_name + "_test_pred.png")
plot_error_distribution(test_y,test_predictions,fig_savepath=model_path+model_name+"_test_error.png")
plt.close('all')
def one_step_esvr(root_path,station,decomposer,predict_pattern,optimizer='gp',wavelet_level='db10-2',n_calls=100,cv=10):
# Set project parameters
logger.info('Build one-step epsilon SVR model...')
logger.info('Root path:{}'.format(root_path))
logger.info('Station:{}'.format(station))
logger.info('Decomposer:{}'.format(decomposer))
logger.info('Predict pattern:{}'.format(predict_pattern))
logger.info('Optimizer:{}'.format(optimizer))
logger.info('Monther wavelet and decomposition level of WA:{}'.format(wavelet_level))
logger.info('Number of calls:{}'.format(n_calls))
predictor = 'esvr'
signals = station+'_'+decomposer
if decomposer == 'dwt' or decomposer=='modwt':
data_path = root_path + '/'+signals+'/data/'+wavelet_level+'/'+predict_pattern+'/'
model_path = root_path+'/'+signals+'/projects/'+predictor+'/'+wavelet_level+'/'+predict_pattern+'/history/'
else:
data_path = root_path + '/'+signals+'/data/'+predict_pattern+'/'
model_path = root_path+'/'+signals+'/projects/'+predictor+'/'+predict_pattern+'/history/'
if not os.path.exists(model_path):
os.makedirs(model_path)
model_name = optimizer+'_nc'+str(n_calls)+'_cv'+str(cv)
logger.info("Data Path:{}".format(data_path))
logger.info("Model Path:{}".format(model_path))
# load data
train = pd.read_csv(data_path+'minmax_unsample_train.csv')
dev = pd.read_csv(data_path+'minmax_unsample_dev.csv')
test = pd.read_csv(data_path+'minmax_unsample_test.csv')
train_dev = pd.concat([train,dev],axis=0)
# shuffle
train_dev = train_dev.sample(frac=1)
train_y = train['Y']
train_x = train.drop('Y', axis=1)
dev_y = dev['Y']
dev_x = dev.drop('Y', axis=1)
test_y = test['Y']
test_x = test.drop('Y', axis=1)
train_dev_y = train_dev['Y']
train_dev_x = train_dev.drop('Y', axis=1)
if os.path.exists(model_path + model_name+'_optimized_params.csv'):
optimal_params = pd.read_csv(model_path + model_name+'_optimized_params.csv')
pre_n_calls = optimal_params['n_calls'][0]
if pre_n_calls==n_calls:
logger.info("The n_calls="+str(n_calls)+" was already tuned")
esvr = SVR(C=optimal_params['C'][0], epsilon=optimal_params['epsilon'][0], gamma=optimal_params['gamma'][0])
# Do prediction with the opyimal model
train_predictions = esvr.fit(train_dev_x,train_dev_y).predict(train_x)
dev_predictions = esvr.fit(train_dev_x,train_dev_y).predict(dev_x)
test_predictions = esvr.fit(train_dev_x,train_dev_y).predict(test_x)
train_y=(train_y.values).flatten()
dev_y=(dev_y.values).flatten()
test_y=(test_y.values).flatten()
norm_id = pd.read_csv(data_path + 'norm_unsample_id.csv')
sMin = norm_id['series_min'][norm_id.shape[0]-1]
sMax = norm_id['series_max'][norm_id.shape[0]-1]
logger.debug('Series Min:\n {}'.format(sMin))
logger.debug('Series Max:\n {}'.format(sMax))
# Renormalized the records and predictions
train_y = np.multiply(train_y + 1,sMax - sMin) / 2 + sMin
dev_y = np.multiply(dev_y + 1,sMax - sMin) / 2 + sMin
test_y = np.multiply(test_y + 1,sMax - sMin) / 2 + sMin
train_predictions = np.multiply(train_predictions + 1, sMax -sMin) / 2 + sMin
train_predictions[train_predictions<0.0]=0.0
dev_predictions = np.multiply(dev_predictions + 1, sMax -sMin) / 2 + sMin
dev_predictions[dev_predictions<0.0]=0.0
test_predictions = np.multiply(test_predictions + 1, sMax -sMin) / 2 + sMin
test_predictions[test_predictions<0.0]=0.0
dum_pred_results(
path = model_path+model_name+'.csv',
train_y = train_y,
train_predictions=train_predictions,
dev_y = dev_y,
dev_predictions = dev_predictions,
test_y = test_y,
test_predictions = test_predictions,
time_cost = optimal_params['time_cost'][0],
)
else:
reg = SVR(tol=1e-4)
space = ESVR_SPACE
@use_named_args(space)
def objective(**params):
reg.set_params(**params)
return -np.mean(cross_val_score(reg,train_dev_x,train_dev_y,cv=cv,n_jobs=-1,scoring='neg_mean_squared_error'))
#checkpoint_saver = CheckpointSaver(model_path+model_name+'/checkpoint.pkl',compress=9)
start = time.process_time()
if optimizer=='gp':
res = gp_minimize(objective,space,n_calls=n_calls ,random_state=0,verbose=True,n_jobs=-1)
elif optimizer=='fr_bt':
res = forest_minimize(objective,space,n_calls=n_calls,base_estimator='ET',random_state=0,verbose=True,n_jobs=-1)
elif optimizer=='fr_rf':
res = forest_minimize(objective,space,n_calls=n_calls,base_estimator='RF',random_state=0,verbose=True,n_jobs=-1)
elif optimizer=='dm':
res = dummy_minimize(objective,space,n_calls=n_calls)
end = time.process_time()
time_cost = end -start
dump(res,model_path+model_name+'_result.pkl',store_objective=False)
returned_results = load(model_path+model_name+'_result.pkl')
plot_objective_(res,dimensions=DIMENSION_ESVR,fig_savepath=model_path+model_name+'_objective.png')
plot_evaluations_(res,dimensions=DIMENSION_ESVR,fig_savepath=model_path+model_name+'_evaluation.png')
plot_convergence_(res,fig_savepath=model_path+model_name+'_convergence.png')
logger.info('Best score=%.4f'%res.fun)
logger.info(""" Best parameters:
-C = %.8f
-epsilon = %.8f
-gamma = %.8f
"""%(res.x[0],res.x[1],res.x[2]))
logger.info('Time cost:{}'.format(time_cost))
params_dict={
'C':res.x[0],
'epsilon':res.x[1],
'gamma':res.x[2],
'time_cost':(time_cost),
'n_calls':n_calls,
}
params_df = pd.DataFrame(params_dict,index=[0])
params_df.to_csv(model_path + model_name+'_optimized_params.csv')
esvr = SVR(C=res.x[0], epsilon=res.x[1], gamma=res.x[2])
# Do prediction with the opyimal model
train_predictions = esvr.fit(train_dev_x,train_dev_y).predict(train_x)
dev_predictions = esvr.fit(train_dev_x,train_dev_y).predict(dev_x)
test_predictions = esvr.fit(train_dev_x,train_dev_y).predict(test_x)
train_y=(train_y.values).flatten()
dev_y=(dev_y.values).flatten()
test_y=(test_y.values).flatten()
norm_id = pd.read_csv(data_path + 'norm_unsample_id.csv')
sMin = norm_id['series_min'][norm_id.shape[0]-1]
sMax = norm_id['series_max'][norm_id.shape[0]-1]
logger.debug('Series Min:\n {}'.format(sMin))
logger.debug('Series Max:\n {}'.format(sMax))
# Renormalized the records and predictions
train_y = np.multiply(train_y + 1,sMax - sMin) / 2 + sMin
dev_y = np.multiply(dev_y + 1,sMax - sMin) / 2 + sMin
test_y = np.multiply(test_y + 1,sMax - sMin) / 2 + sMin
train_predictions = np.multiply(train_predictions + 1, sMax -sMin) / 2 + sMin
train_predictions[train_predictions<0.0]=0.0
dev_predictions = np.multiply(dev_predictions + 1, sMax -sMin) / 2 + sMin
dev_predictions[dev_predictions<0.0]=0.0
test_predictions = np.multiply(test_predictions + 1, sMax -sMin) / 2 + sMin
test_predictions[test_predictions<0.0]=0.0
dum_pred_results(
path = model_path+model_name+'.csv',
train_y = train_y,
train_predictions=train_predictions,
dev_y = dev_y,
dev_predictions = dev_predictions,
test_y = test_y,
test_predictions = test_predictions,
time_cost=time_cost)
plot_rela_pred(train_y,train_predictions,fig_savepath=model_path +model_name + '_train_pred.png')
plot_rela_pred(dev_y,dev_predictions,fig_savepath=model_path +model_name + "_dev_pred.png")
plot_rela_pred(test_y,test_predictions,fig_savepath=model_path +model_name + "_test_pred.png")
plot_error_distribution(test_y,test_predictions,fig_savepath=model_path +model_name + "_test_error.png")
plt.close('all')
def one_step_esvr_multi_seed(root_path,station,decomposer,predict_pattern,optimizer='gp',wavelet_level='db10-2',n_calls=100,cv=10,iterations=10):
logger.info('Build one-step epsilon SVR model with multiple seed...')
logger.info('Root path:{}'.format(root_path))
logger.info('Station:{}'.format(station))
logger.info('Decomposer:{}'.format(decomposer))
logger.info('Predict pattern:{}'.format(predict_pattern))
logger.info('Optimizer:{}'.format(optimizer))
logger.info('Monther wavelet and decomposition level of WA:{}'.format(wavelet_level))
logger.info('Number of calls:{}'.format(n_calls))
logger.info('Seed iterations:{}'.format(iterations))
predictor = 'esvr'
signals = station+'_'+decomposer
if decomposer == 'dwt' or decomposer=='modwt':
data_path = root_path + '/'+signals+'/data/'+wavelet_level+'/'+predict_pattern+'/'
model_path = root_path+'/'+signals+'/projects/'+predictor+'/'+wavelet_level+'/'+predict_pattern+'/history/'
else:
data_path = root_path + '/'+signals+'/data/'+predict_pattern+'/'
model_path = root_path+'/'+signals+'/projects/'+predictor+'/'+predict_pattern+'/history/'
if not os.path.exists(model_path):
os.makedirs(model_path)
logger.info("Data Path:{}".format(data_path))
logger.info("Model Path:{}".format(model_path))
for random_state in range(1,iterations+1):
model_name = optimizer+'_nc'+str(n_calls)+'_cv'+str(cv)+'_seed'+str(random_state)
logger.info('Model Name:{}'.format(model_name))
# load data
train = pd.read_csv(data_path+'minmax_unsample_train.csv')
dev = pd.read_csv(data_path+'minmax_unsample_dev.csv')
test = pd.read_csv(data_path+'minmax_unsample_test.csv')
train_dev = pd.concat([train,dev],axis=0)
# shuffle
train_dev = train_dev.sample(frac=1)
train_y = train['Y']
train_x = train.drop('Y', axis=1)
dev_y = dev['Y']
dev_x = dev.drop('Y', axis=1)
test_y = test['Y']
test_x = test.drop('Y', axis=1)
train_dev_y = train_dev['Y']
train_dev_x = train_dev.drop('Y', axis=1)
logger.info("Optimized params:{}".format(model_path +model_name+'_optimized_params.csv'))
if os.path.exists(model_path + model_name+'_optimized_params.csv'):
optimal_params = pd.read_csv(model_path + model_name+'_optimized_params.csv')
pre_n_calls = optimal_params['n_calls'][0]
if pre_n_calls==n_calls:
logger.info("The n_calls="+str(n_calls)+" was already tuned")
esvr = SVR(C=optimal_params['C'][0], epsilon=optimal_params['epsilon'][0], gamma=optimal_params['gamma'][0])
# Do prediction with the opyimal model
train_predictions = esvr.fit(train_dev_x,train_dev_y).predict(train_x)
dev_predictions = esvr.fit(train_dev_x,train_dev_y).predict(dev_x)
test_predictions = esvr.fit(train_dev_x,train_dev_y).predict(test_x)
train_y=(train_y.values).flatten()
dev_y=(dev_y.values).flatten()
test_y=(test_y.values).flatten()
norm_id = pd.read_csv(data_path + 'norm_unsample_id.csv')
sMin = norm_id['series_min'][norm_id.shape[0]-1]
sMax = norm_id['series_max'][norm_id.shape[0]-1]
logger.debug('Series Min:\n {}'.format(sMin))
logger.debug('Series Max:\n {}'.format(sMax))
train_y = np.multiply(train_y + 1,sMax - sMin) / 2 + sMin
dev_y = np.multiply(dev_y + 1,sMax - sMin) / 2 + sMin
test_y = np.multiply(test_y + 1,sMax - sMin) / 2 + sMin
train_predictions = np.multiply(train_predictions + 1, sMax -sMin) / 2 + sMin
train_predictions[train_predictions<0.0]=0.0
dev_predictions = np.multiply(dev_predictions + 1, sMax -sMin) / 2 + sMin
dev_predictions[dev_predictions<0.0]=0.0
test_predictions = np.multiply(test_predictions + 1, sMax -sMin) / 2 + sMin
test_predictions[test_predictions<0.0]=0.0
dum_pred_results(
path = model_path+model_name+'.csv',
train_y = train_y,
train_predictions=train_predictions,
dev_y = dev_y,
dev_predictions = dev_predictions,
test_y = test_y,
test_predictions = test_predictions,
time_cost = optimal_params['time_cost'][0],
)
else:
reg = SVR(tol=1e-4)
space = ESVR_SPACE
@use_named_args(space)
def objective(**params):
reg.set_params(**params)
return -np.mean(cross_val_score(reg,train_dev_x,train_dev_y,cv=cv,n_jobs=-1,scoring='neg_mean_squared_error'))
#checkpoint_saver = CheckpointSaver(model_path+model_name+'/checkpoint.pkl',compress=9)
start = time.process_time()
if optimizer=='gp':
res = gp_minimize(objective,space,n_calls=n_calls ,random_state=random_state,verbose=True,n_jobs=-1)
elif optimizer=='fr_bt':
res = forest_minimize(objective,space,n_calls=n_calls,base_estimator='ET',random_state=random_state,verbose=True,n_jobs=-1)
elif optimizer=='fr_rf':
res = forest_minimize(objective,space,n_calls=n_calls,base_estimator='RF',random_state=random_state,verbose=True,n_jobs=-1)
elif optimizer=='dm':
res = dummy_minimize(objective,space,n_calls=n_calls)
end = time.process_time()
time_cost = end -start
dump(res,model_path+model_name+'_result_seed'+str(random_state)+'.pkl',store_objective=False)
returned_results = load(model_path+model_name+'_result_seed'+str(random_state)+'.pkl')
plot_objective_(res,dimensions=DIMENSION_ESVR,fig_savepath=model_path+model_name+'_objective.png')
plot_evaluations_(res,dimensions=DIMENSION_ESVR,fig_savepath=model_path+model_name+'_evaluation.png')
plot_convergence_(res,fig_savepath=model_path+model_name+'_convergence.png')
logger.info('Best score=%.4f'%res.fun)
logger.info(""" Best parameters:
-C = %.8f
-epsilon = %.8f
-gamma = %.8f
"""%(res.x[0],res.x[1],res.x[2]))
logger.info('Time cost:{}'.format(time_cost))
params_dict={
'C':res.x[0],
'epsilon':res.x[1],
'gamma':res.x[2],
'time_cost':(time_cost),
'n_calls':n_calls,
}
params_df = pd.DataFrame(params_dict,index=[0])
params_df.to_csv(model_path + model_name+'_optimized_params.csv')
esvr = SVR(C=res.x[0], epsilon=res.x[1], gamma=res.x[2])
# Do prediction with the opyimal model
train_predictions = esvr.fit(train_dev_x,train_dev_y).predict(train_x)
dev_predictions = esvr.fit(train_dev_x,train_dev_y).predict(dev_x)
test_predictions = esvr.fit(train_dev_x,train_dev_y).predict(test_x)
train_y=(train_y.values).flatten()
dev_y=(dev_y.values).flatten()
test_y=(test_y.values).flatten()
norm_id = pd.read_csv(data_path + 'norm_unsample_id.csv')
sMin = norm_id['series_min'][norm_id.shape[0]-1]
sMax = norm_id['series_max'][norm_id.shape[0]-1]
logger.debug('Series Min:\n {}'.format(sMin))
logger.debug('Series Max:\n {}'.format(sMax))
# Renormalized the records and predictions
train_y = np.multiply(train_y + 1,sMax - sMin) / 2 + sMin
dev_y = np.multiply(dev_y + 1,sMax - sMin) / 2 + sMin
test_y = np.multiply(test_y + 1,sMax - sMin) / 2 + sMin
train_predictions = np.multiply(train_predictions + 1, sMax -sMin) / 2 + sMin
train_predictions[train_predictions<0.0]=0.0
dev_predictions = np.multiply(dev_predictions + 1, sMax -sMin) / 2 + sMin
dev_predictions[dev_predictions<0.0]=0.0
test_predictions = np.multiply(test_predictions + 1, sMax -sMin) / 2 + sMin
test_predictions[test_predictions<0.0]=0.0
dum_pred_results(
path = model_path+model_name+'.csv',
train_y = train_y,
train_predictions=train_predictions,
dev_y = dev_y,
dev_predictions = dev_predictions,
test_y = test_y,
test_predictions = test_predictions,
time_cost=time_cost)
plot_rela_pred(train_y,train_predictions,fig_savepath=model_path +model_name + '_train_pred.png')
plot_rela_pred(dev_y,dev_predictions,fig_savepath=model_path +model_name + "_dev_pred.png")
plot_rela_pred(test_y,test_predictions,fig_savepath=model_path +model_name + "_test_pred.png")
plot_error_distribution(test_y,test_predictions,fig_savepath=model_path +model_name + "_test_error.png")
plt.close('all')
def multi_step_esvr(root_path,station,decomposer,predict_pattern,lags,model_id,optimizer='gp',wavelet_level='db10-2',n_calls=100,cv=10):
logger.info('Build multi-step epsilon SVR model...')
logger.info('Root path:{}'.format(root_path))
logger.info('Station:{}'.format(station))
logger.info('Decomposer:{}'.format(decomposer))
logger.info('Predict pattern:{}'.format(predict_pattern))
logger.info('Lags:{}'.format(lags))
logger.info('Model index:{}'.format(model_id))
logger.info('Optimizer:{}'.format(optimizer))
logger.info('Mother wavelet and decomposition level of WA:{}'.format(wavelet_level))
logger.info('Number of calls:{}'.format(n_calls))
if model_id>len(lags):
raise Exception("The model id exceed the number of sub-signals")
predictor = 'esvr'
signals = station+'_'+decomposer
if decomposer=='dwt' or decomposer=='modwt':
data_path = root_path + '/'+signals+'/data/'+wavelet_level+'/'+predict_pattern+'/'
model_path = root_path+'/'+signals+'/projects/'+predictor+'/'+wavelet_level+'/'+predict_pattern+'/s'+str(model_id)+'/history/'
else:
data_path = root_path + '/'+signals+'/data/'+predict_pattern+'/'
model_path = root_path+'/'+signals+'/projects/'+predictor+'/'+predict_pattern+'/s'+str(model_id)+'/history/'
if not os.path.exists(model_path):
os.makedirs(model_path)
model_name = optimizer+'_nc'+str(n_calls)+'_cv'+str(cv)+'_s'+str(model_id)
logger.info("Data Path:{}".format(data_path))
logger.info("Model Path:{}".format(model_path))
train = pd.read_csv(data_path+'minmax_unsample_train_s'+str(model_id)+'.csv')
dev = pd.read_csv(data_path+'minmax_unsample_dev_s'+str(model_id)+'.csv')
test = pd.read_csv(data_path+'minmax_unsample_test_s'+str(model_id)+'.csv')
train_dev = pd.concat([train,dev],axis=0)
# shuffle
train_dev = train_dev.sample(frac=1)
train_y = train['Y']
train_x = train.drop('Y', axis=1)
dev_y = dev['Y']
dev_x = dev.drop('Y', axis=1)
test_y = test['Y']
test_x = test.drop('Y', axis=1)
train_dev_y = train_dev['Y']
train_dev_x = train_dev.drop('Y', axis=1)
logger.info("Optimized params:{}".format(model_path + model_name +'_optimized_params_s' + str(model_id) +'.csv'))
if os.path.exists(model_path + model_name +'_optimized_params_s' + str(model_id) +'.csv'):
optimal_params = pd.read_csv(model_path + model_name +'_optimized_params_s' + str(model_id) +'.csv')
pre_n_calls = optimal_params['n_calls'][0]
if pre_n_calls==n_calls:
logger.info("The n_calls="+str(n_calls)+" was already tuned")
esvr = SVR(C=optimal_params['C'][0], epsilon=optimal_params['epsilon'][0], gamma=optimal_params['gamma'][0])
# Do prediction with the opyimal model
train_predictions = esvr.fit(train_dev_x,train_dev_y).predict(train_x)
dev_predictions = esvr.fit(train_dev_x,train_dev_y).predict(dev_x)
test_predictions = esvr.fit(train_dev_x,train_dev_y).predict(test_x)
train_y=(train_y.values).flatten()
dev_y=(dev_y.values).flatten()
test_y=(test_y.values).flatten()
norm_id = pd.read_csv(data_path + 'norm_unsample_id_s'+str(model_id)+'.csv')
sMin = norm_id['series_min'][norm_id.shape[0]-1]
sMax = norm_id['series_max'][norm_id.shape[0]-1]
logger.debug('Series Min:\n {}'.format(sMin))
logger.debug('Series Max:\n {}'.format(sMax))
# Renormalized the records and predictions
train_y = np.multiply(train_y + 1,sMax - sMin) / 2 + sMin
dev_y = np.multiply(dev_y + 1,sMax - sMin) / 2 + sMin
test_y = np.multiply(test_y + 1,sMax - sMin) / 2 + sMin
train_predictions = np.multiply(train_predictions + 1, sMax -sMin) / 2 + sMin
dev_predictions = np.multiply(dev_predictions + 1, sMax -sMin) / 2 + sMin
test_predictions = np.multiply(test_predictions + 1, sMax -sMin) / 2 + sMin
dum_pred_results(
path = model_path+model_name+'.csv',
train_y = train_y,
train_predictions=train_predictions,
dev_y = dev_y,
dev_predictions = dev_predictions,
test_y = test_y,
test_predictions = test_predictions,
time_cost = optimal_params['time_cost'][0],
)
else:
reg = SVR(tol=1e-4)
space = ESVR_SPACE
@use_named_args(space)
def objective(**params):
reg.set_params(**params)
return -np.mean(cross_val_score(reg,train_dev_x,train_dev_y,cv=cv,n_jobs=-1,scoring='neg_mean_squared_error'))
#checkpoint_saver = CheckpointSaver(model_path+model_name+'/checkpoint.pkl',compress=9)
start = time.process_time()
if optimizer=='gp':
res = gp_minimize(objective,space,n_calls=n_calls ,random_state=0,verbose=True,n_jobs=-1)
elif optimizer=='fr_bt':
res = forest_minimize(objective,space,n_calls=n_calls,base_estimator='ET',random_state=0,verbose=True,n_jobs=-1)
elif optimizer=='fr_rf':
res = forest_minimize(objective,space,n_calls=n_calls,base_estimator='RF',random_state=0,verbose=True,n_jobs=-1)
elif optimizer=='dm':
res = dummy_minimize(objective,space,n_calls=n_calls)
end=time.process_time()
time_cost = end -start
dump(res,model_path+model_name+'_result.pkl',store_objective=False)
returned_results = load(model_path+model_name+'_result.pkl')
plot_objective_(res,dimensions=DIMENSION_ESVR,fig_savepath=model_path+model_name+'_objective.png')
plot_evaluations_(res,dimensions=DIMENSION_ESVR,fig_savepath=model_path+model_name+'_evaluation.png')
plot_convergence_(res,fig_savepath=model_path+model_name+'_convergence.png')
logger.info('Best score=%.4f'%res.fun)
logger.info(""" Best parameters:
-C = %.8f
-epsilon = %.8f
-gamma = %.8f
"""%(res.x[0],res.x[1],res.x[2]))
logger.info('Time cost:{}'.format(time_cost))
params_dict={
'C':res.x[0],
'epsilon':res.x[1],
'gamma':res.x[2],
'time_cost':(time_cost),
'n_calls':n_calls,
}
params_df = pd.DataFrame(params_dict,index=[0])
params_df.to_csv(model_path + model_name +'_optimized_params_s' + str(model_id) +'.csv')
esvr = SVR(C=res.x[0], epsilon=res.x[1], gamma=res.x[2])
# Do prediction with the opyimal model
train_predictions = esvr.fit(train_dev_x,train_dev_y).predict(train_x)
dev_predictions = esvr.fit(train_dev_x,train_dev_y).predict(dev_x)
test_predictions = esvr.fit(train_dev_x,train_dev_y).predict(test_x)
train_y=(train_y.values).flatten()
dev_y=(dev_y.values).flatten()
test_y=(test_y.values).flatten()
norm_id = pd.read_csv(data_path + 'norm_unsample_id_s' + str(model_id) + '.csv')
sMin = norm_id['series_min'][norm_id.shape[0]-1]
sMax = norm_id['series_max'][norm_id.shape[0]-1]
logger.debug('Series Min:\n {}'.format(sMin))
logger.debug('Series Max:\n {}'.format(sMax))
# Renormalized the records and predictions
train_y = np.multiply(train_y + 1,sMax - sMin) / 2 + sMin
dev_y = np.multiply(dev_y + 1,sMax - sMin) / 2 + sMin
test_y = np.multiply(test_y + 1,sMax - sMin) / 2 + sMin
train_predictions = np.multiply(train_predictions + 1, sMax -sMin) / 2 + sMin
dev_predictions = np.multiply(dev_predictions + 1, sMax -sMin) / 2 + sMin
test_predictions = np.multiply(test_predictions + 1, sMax -sMin) / 2 + sMin
dum_pred_results(
path = model_path+model_name+'.csv',
train_y = train_y,
train_predictions=train_predictions,
dev_y = dev_y,
dev_predictions = dev_predictions,
test_y = test_y,
test_predictions = test_predictions,
time_cost=time_cost)
plot_rela_pred(train_y,train_predictions,fig_savepath=model_path + model_name + '_train_pred.png')
plot_rela_pred(dev_y,dev_predictions,fig_savepath=model_path + model_name + "_dev_pred.png")
plot_rela_pred(test_y,test_predictions,fig_savepath=model_path + model_name + "_test_pred.png")
plot_error_distribution(test_y,test_predictions,fig_savepath=model_path + model_name + "_test_error.png",)
plt.close('all')
def multi_step_esvr_multi_seed(root_path,station,decomposer,predict_pattern,lags,model_id,optimizer='gp',wavelet_level='db10-2',n_calls=100,cv=10,iterations=10):
logger.info('Roo path:{}'.format(root_path))
logger.info('Station:{}'.format(station))
logger.info('Decomposer:{}'.format(decomposer))
logger.info('Predict pattern:{}'.format(predict_pattern))
logger.info('Lags:{}'.format(lags))
logger.info('Model index:{}'.format(model_id))
logger.info('Optimizer:{}'.format(optimizer))
if model_id>len(lags):
raise Exception("The model id exceed the number of sub-signals")
predictor = 'esvr'
signals = station+'_'+decomposer
if decomposer=='dwt' or decomposer=='modwt':
data_path = root_path + '/'+signals+'/data/'+wavelet_level+'/'+predict_pattern+'/'
model_path = root_path+'/'+signals+'/projects/'+predictor+'/'+wavelet_level+'/'+predict_pattern+'/s'+str(model_id)+'/history/'
else:
data_path = root_path + '/'+signals+'/data/'+predict_pattern+'/'
model_path = root_path+'/'+signals+'/projects/'+predictor+'/'+predict_pattern+'/s'+str(model_id)+'/history/'
if not os.path.exists(model_path):
os.makedirs(model_path)
for random_state in range(1,iterations+1):
model_name = optimizer+'_nc'+str(n_calls)+'_cv'+str(cv)+'_s'+str(model_id)+'_seed'+str(random_state)
logger.info("Data Path:{}".format(data_path))
logger.info("Model Path:{}".format(model_path))
train = pd.read_csv(data_path+'minmax_unsample_train_s'+str(model_id)+'.csv')
dev = pd.read_csv(data_path+'minmax_unsample_dev_s'+str(model_id)+'.csv')
test = pd.read_csv(data_path+'minmax_unsample_test_s'+str(model_id)+'.csv')
train_dev = pd.concat([train,dev],axis=0)
# shuffle
train_dev = train_dev.sample(frac=1)
train_y = train['Y']
train_x = train.drop('Y', axis=1)
dev_y = dev['Y']
dev_x = dev.drop('Y', axis=1)
test_y = test['Y']
test_x = test.drop('Y', axis=1)
train_dev_y = train_dev['Y']
train_dev_x = train_dev.drop('Y', axis=1)
logger.info("Optimized params:{}".format(model_path + model_name +'_optimized_params_s' + str(model_id) +'.csv'))
if os.path.exists(model_path + model_name +'_optimized_params_s' + str(model_id) +'.csv'):
optimal_params = pd.read_csv(model_path + model_name +'_optimized_params_s' + str(model_id) +'.csv')
pre_n_calls = optimal_params['n_calls'][0]
if pre_n_calls==n_calls:
logger.info("The n_calls="+str(n_calls)+" was already tuned")
esvr = SVR(C=optimal_params['C'][0], epsilon=optimal_params['epsilon'][0], gamma=optimal_params['gamma'][0])
# Do prediction with the opyimal model
train_predictions = esvr.fit(train_dev_x,train_dev_y).predict(train_x)
dev_predictions = esvr.fit(train_dev_x,train_dev_y).predict(dev_x)
test_predictions = esvr.fit(train_dev_x,train_dev_y).predict(test_x)
train_y=(train_y.values).flatten()
dev_y=(dev_y.values).flatten()
test_y=(test_y.values).flatten()
norm_id = pd.read_csv(data_path + 'norm_unsample_id_s'+str(model_id)+'.csv')
sMin = norm_id['series_min'][norm_id.shape[0]-1]
sMax = norm_id['series_max'][norm_id.shape[0]-1]
logger.debug('Series Min:\n {}'.format(sMin))
logger.debug('Series Max:\n {}'.format(sMax))
# Renormalized the records and predictions
train_y = np.multiply(train_y + 1,sMax - sMin) / 2 + sMin
dev_y = np.multiply(dev_y + 1,sMax - sMin) / 2 + sMin
test_y = np.multiply(test_y + 1,sMax - sMin) / 2 + sMin
train_predictions = np.multiply(train_predictions + 1, sMax -sMin) / 2 + sMin
dev_predictions = np.multiply(dev_predictions + 1, sMax -sMin) / 2 + sMin
test_predictions = np.multiply(test_predictions + 1, sMax -sMin) / 2 + sMin
dum_pred_results(
path = model_path+model_name+'.csv',
train_y = train_y,
train_predictions=train_predictions,
dev_y = dev_y,
dev_predictions = dev_predictions,
test_y = test_y,
test_predictions = test_predictions,
time_cost = optimal_params['time_cost'][0],
)
else:
reg = SVR(tol=1e-4)
space = ESVR_SPACE
@use_named_args(space)
def objective(**params):
reg.set_params(**params)
return -np.mean(cross_val_score(reg,train_dev_x,train_dev_y,cv=cv,n_jobs=-1,scoring='neg_mean_squared_error'))
#checkpoint_saver = CheckpointSaver(model_path+model_name+'/checkpoint.pkl',compress=9)
start = time.process_time()
if optimizer=='gp':
res = gp_minimize(objective,space,n_calls=n_calls ,random_state=0,verbose=True,n_jobs=-1)
elif optimizer=='fr_bt':
res = forest_minimize(objective,space,n_calls=n_calls,base_estimator='ET',random_state=0,verbose=True,n_jobs=-1)
elif optimizer=='fr_rf':
res = forest_minimize(objective,space,n_calls=n_calls,base_estimator='RF',random_state=0,verbose=True,n_jobs=-1)
elif optimizer=='dm':
res = dummy_minimize(objective,space,n_calls=n_calls)
end=time.process_time()
time_cost = end -start
dump(res,model_path+model_name+'_result_seed'+str(random_state)+'.pkl',store_objective=False)
returned_results = load(model_path+model_name+'_result_seed'+str(random_state)+'.pkl')
plot_objective_(res,dimensions=DIMENSION_ESVR,fig_savepath=model_path+model_name+'_objective.png')
plot_evaluations_(res,dimensions=DIMENSION_ESVR,fig_savepath=model_path+model_name+'_evaluation.png')
plot_convergence_(res,fig_savepath=model_path+model_name+'_convergence.png')
logger.info('Best score=%.4f'%res.fun)
logger.info(""" Best parameters:
-C = %.8f
-epsilon = %.8f
-gamma = %.8f
"""%(res.x[0],res.x[1],res.x[2]))
logger.info('Time cost:{}'.format(time_cost))
params_dict={
'C':res.x[0],
'epsilon':res.x[1],
'gamma':res.x[2],
'time_cost':(time_cost),
'n_calls':n_calls,
}
params_df = pd.DataFrame(params_dict,index=[0])
params_df.to_csv(model_path + model_name +'_optimized_params_s' + str(model_id) +'.csv')
esvr = SVR(C=res.x[0], epsilon=res.x[1], gamma=res.x[2])
# Do prediction with the opyimal model
train_predictions = esvr.fit(train_dev_x,train_dev_y).predict(train_x)
dev_predictions = esvr.fit(train_dev_x,train_dev_y).predict(dev_x)
test_predictions = esvr.fit(train_dev_x,train_dev_y).predict(test_x)
train_y=(train_y.values).flatten()
dev_y=(dev_y.values).flatten()
test_y=(test_y.values).flatten()
norm_id = pd.read_csv(data_path + 'norm_unsample_id_s' + str(model_id) + '.csv')
sMin = norm_id['series_min'][norm_id.shape[0]-1]
sMax = norm_id['series_max'][norm_id.shape[0]-1]
logger.debug('Series Min:\n {}'.format(sMin))
logger.debug('Series Max:\n {}'.format(sMax))
# Renormalized the records and predictions
train_y = np.multiply(train_y + 1,sMax - sMin) / 2 + sMin
dev_y = np.multiply(dev_y + 1,sMax - sMin) / 2 + sMin
test_y = np.multiply(test_y + 1,sMax - sMin) / 2 + sMin
train_predictions = np.multiply(train_predictions + 1, sMax -sMin) / 2 + sMin
dev_predictions = np.multiply(dev_predictions + 1, sMax -sMin) / 2 + sMin
test_predictions = np.multiply(test_predictions + 1, sMax -sMin) / 2 + sMin
dum_pred_results(
path = model_path+model_name+'.csv',
train_y = train_y,
train_predictions=train_predictions,
dev_y = dev_y,
dev_predictions = dev_predictions,
test_y = test_y,
test_predictions = test_predictions,
time_cost=time_cost)
plot_rela_pred(train_y,train_predictions,fig_savepath=model_path + model_name + '_train_pred.png')
plot_rela_pred(dev_y,dev_predictions,fig_savepath=model_path + model_name + "_dev_pred.png")
plot_rela_pred(test_y,test_predictions,fig_savepath=model_path + model_name + "_test_pred.png")
plot_error_distribution(test_y,test_predictions,fig_savepath=model_path + model_name + "_test_error.png",)
plt.close('all')
def gbrt(root_path,station,predict_pattern,optimizer='gp',n_calls=100,cv=10):
logger.info('Root path:{}'.format(root_path))
logger.info('Station:{}'.format(station))
logger.info('Predict pattern:{}'.format(predict_pattern))
logger.info('Optimizer:{}'.format(optimizer))
logger.info('Number of calls:{}'.format(n_calls))
predictor = 'gbrt'
data_path = root_path + '/'+station+'/data/'
model_path = root_path+'/'+station+'/projects/'+predictor+'/history/'
if not os.path.exists(model_path):
os.makedirs(model_path)
model_name = optimizer+'_nc'+str(n_calls)+'_cv'+str(cv)
logger.info("Data Path:{}".format(data_path))
logger.info("Model Path:{}".format(model_path))
# load data
train = pd.read_csv(data_path+'minmax_unsample_train.csv')
dev = pd.read_csv(data_path+'minmax_unsample_dev.csv')
test = pd.read_csv(data_path+'minmax_unsample_test.csv')
train_dev = pd.concat([train,dev],axis=0)
# shuffle
train_dev = train_dev.sample(frac=1)
assert train.shape[1]==dev.shape[1]==test.shape[1]==train_dev.shape[1]
train_y = train['Y']
train_x = train.drop('Y', axis=1)
dev_y = dev['Y']
dev_x = dev.drop('Y', axis=1)
test_y = test['Y']
test_x = test.drop('Y', axis=1)
train_dev_y = train_dev['Y']
train_dev_x = train_dev.drop('Y', axis=1)
if os.path.exists(model_path +model_name+'_optimized_params.csv'):
optimal_params = pd.read_csv(model_path +model_name+'_optimized_params.csv')
pre_n_calls = optimal_params['n_calls'][0]
if pre_n_calls==n_calls:
logger.info("The n_calls="+str(n_calls)+" was already tuned")
else:
# Get the feature num
n_features = train_dev_x.shape[1]
reg = GradientBoostingRegressor(n_estimators=100,random_state=0)
# The list hyper-parameters we want
space = [
Integer(1,25,name='max_depth'),
Real(10**-5,10**0,'log-uniform',name='learning_rate'),
Integer(1,n_features,name='max_features'),
Integer(2,100,name='min_samples_split'),
Integer(1,100,name='min_samples_leaf'),
]
@use_named_args(space)
def objective(**params):
reg.set_params(**params)
return -np.mean(cross_val_score(reg,train_dev_x,train_dev_y,cv=cv,n_jobs=-1,scoring='neg_mean_squared_error'))
#checkpoint_saver = CheckpointSaver(model_path+model_name+'/checkpoint.pkl',compress=9)
start = time.process_time()
if optimizer=='gp':
res = gp_minimize(objective,space,n_calls=n_calls ,random_state=0,verbose=True,n_jobs=-1)
elif optimizer=='fr_bt':
res = forest_minimize(objective,space,n_calls=n_calls,base_estimator='ET',random_state=0,verbose=True,n_jobs=-1)
elif optimizer=='fr_rf':
res = forest_minimize(objective,space,n_calls=n_calls,base_estimator='RF',random_state=0,verbose=True,n_jobs=-1)
elif optimizer=='dm':
res = dummy_minimize(objective,space,n_calls=n_calls)
end=time.process_time()
time_cost = end-start
dump(res,model_path+model_name+'_result.pkl',store_objective=False)
returned_results = load(model_path+model_name+'_result.pkl')
plot_objective_(res,dimensions=DIMENSION_GBRT,fig_savepath=model_path+model_name+'_objective.png')
plot_evaluations_(res,dimensions=DIMENSION_GBRT,fig_savepath=model_path+model_name+'_evaluation.png')
plot_convergence_(res,fig_savepath=model_path+model_name+'_convergence.png')
logger.info('Best score=%.4f'%res.fun)
logger.info("""Best parameters:
- max_depth=%d
- learning_rate=%.6f
- max_features=%d
- min_samples_split=%d
- min_samples_leaf=%d""" % (res.x[0], res.x[1], res.x[2], res.x[3],
res.x[4]))
# end=datetime.datetime.now()
logger.info('Time cost:{}'.format(time_cost))
params_dict={
'max_depth':res.x[0],
'learning_rate':res.x[1],
'max_features':res.x[2],
'min_samples_split':res.x[3],
'min_samples_leaf':res.x[4],
'time_cost':time_cost,
'n_calls':n_calls,
}
params_df = pd.DataFrame(params_dict,index=[0])
params_df.to_csv(model_path +model_name+'_optimized_params.csv')
GBR = GradientBoostingRegressor(
max_depth=res.x[0],
learning_rate=res.x[1],
max_features=res.x[2],
min_samples_split=res.x[3],
min_samples_leaf=res.x[4])
# Do prediction with the opyimal model
train_predictions = GBR.fit(train_dev_x,train_dev_y).predict(train_x)
dev_predictions = GBR.fit(train_dev_x,train_dev_y).predict(dev_x)
test_predictions = GBR.fit(train_dev_x,train_dev_y).predict(test_x)
train_y=(train_y.values).flatten()
dev_y=(dev_y.values).flatten()
test_y=(test_y.values).flatten()
norm_id = pd.read_csv(data_path + 'norm_unsample_id.csv')
sMin = norm_id['series_min'][norm_id.shape[0]-1]
sMax = norm_id['series_max'][norm_id.shape[0]-1]
logger.debug('Series Min:\n {}'.format(sMin))
logger.debug('Series Max:\n {}'.format(sMax))
# Renormalized the records and predictions
train_y = np.multiply(train_y + 1,sMax - sMin) / 2 + sMin
dev_y = np.multiply(dev_y + 1,sMax - sMin) / 2 + sMin
test_y = np.multiply(test_y + 1,sMax - sMin) / 2 + sMin
train_predictions = np.multiply(train_predictions + 1, sMax -sMin) / 2 + sMin
train_predictions[train_predictions<0.0]=0.0
dev_predictions = np.multiply(dev_predictions + 1, sMax -sMin) / 2 + sMin
dev_predictions[dev_predictions<0.0]=0.0
test_predictions = np.multiply(test_predictions + 1, sMax -sMin) / 2 + sMin
test_predictions[test_predictions<0.0]=0.0
dum_pred_results(
path = model_path+model_name+'.csv',
train_y = train_y,
train_predictions=train_predictions,
dev_y = dev_y,
dev_predictions = dev_predictions,
test_y = test_y,
test_predictions = test_predictions,
time_cost=time_cost)
plot_rela_pred(train_y,train_predictions,fig_savepath=model_path +model_name + '_train_pred.png')
plot_rela_pred(dev_y,dev_predictions,fig_savepath=model_path +model_name + "_dev_pred.png")
plot_rela_pred(test_y,test_predictions,fig_savepath=model_path +model_name + "_test_pred.png")
plot_error_distribution(test_y,test_predictions,fig_savepath=model_path +model_name + "_test_error.png")
plt.close('all')
def one_step_gbrt(root_path,station,decomposer,predict_pattern,optimizer='gp',wavelet_level='db10-2',n_calls=100,cv=10):
logger.info('Roo path:{}'.format(root_path))
logger.info('Station:{}'.format(station))
logger.info('Decomposer:{}'.format(decomposer))
logger.info('Predict pattern:{}'.format(predict_pattern))
logger.info('Optimizer:{}'.format(optimizer))
predictor = 'gbrt'
signals = station+'_'+decomposer
if decomposer=='dwt' or decomposer=='modwt':
data_path = root_path + '/'+signals+'/data/'+wavelet_level+'/'+predict_pattern+'/'
model_path = root_path+'/'+signals+'/projects/'+predictor+'/'+wavelet_level+'/'+predict_pattern+'/history/'
else:
data_path = root_path + '/'+signals+'/data/'+predict_pattern+'/'
model_path = root_path+'/'+signals+'/projects/'+predictor+'/'+predict_pattern+'/history/'
if not os.path.exists(model_path):
os.makedirs(model_path)
model_name = optimizer+'_nc'+str(n_calls)+'_cv'+str(cv)
logger.info("Data Path:{}".format(data_path))
logger.info("Model Path:{}".format(model_path))
# load data
train = pd.read_csv(data_path+'minmax_unsample_train.csv')
dev = pd.read_csv(data_path+'minmax_unsample_dev.csv')
test = pd.read_csv(data_path+'minmax_unsample_test.csv')
train_dev = pd.concat([train,dev],axis=0)
# shuffle
train_dev = train_dev.sample(frac=1)
train_y = train['Y']
train_x = train.drop('Y', axis=1)
dev_y = dev['Y']
dev_x = dev.drop('Y', axis=1)
test_y = test['Y']
test_x = test.drop('Y', axis=1)
train_dev_y = train_dev['Y']
train_dev_x = train_dev.drop('Y', axis=1)
if os.path.exists(model_path + model_name+ '_optimized_params.csv'):
optimal_params = pd.read_csv(model_path + model_name+ '_optimized_params.csv')
pre_n_calls = optimal_params['n_calls'][0]
if pre_n_calls==n_calls:
logger.info("The n_calls="+str(n_calls)+" was already tuned")
else:
n_features = train_dev_x.shape[1]
reg = GradientBoostingRegressor(n_estimators=100,random_state=0)
# The list hyper-parameters we want
space = [
Integer(1,25,name='max_depth'),
Real(10**-5,10**0,'log-uniform',name='learning_rate'),
Integer(1,n_features,name='max_features'),
Integer(2,100,name='min_samples_split'),
Integer(1,100,name='min_samples_leaf'),
]
@use_named_args(space)
def objective(**params):
reg.set_params(**params)
return -np.mean(cross_val_score(reg,train_dev_x,train_dev_y,cv=cv,n_jobs=-1,scoring='neg_mean_squared_error'))
#checkpoint_saver = CheckpointSaver(model_path+model_name+'/checkpoint.pkl',compress=9)
start = time.process_time()
if optimizer=='gp':
res = gp_minimize(objective,space,n_calls=n_calls ,random_state=0,verbose=True,n_jobs=-1)
elif optimizer=='fr_bt':
res = forest_minimize(objective,space,n_calls=n_calls,base_estimator='ET',random_state=0,verbose=True,n_jobs=-1)
elif optimizer=='fr_rf':
res = forest_minimize(objective,space,n_calls=n_calls,base_estimator='RF',random_state=0,verbose=True,n_jobs=-1)
elif optimizer=='dm':
res = dummy_minimize(objective,space,n_calls=n_calls)
end=time.process_time()
time_cost = end - start
dump(res,model_path+model_name+'_result.pkl',store_objective=False)
returned_results = load(model_path+model_name+'_result.pkl')
plot_objective_(res,dimensions=DIMENSION_GBRT,fig_savepath=model_path+model_name+'_objective.png')
plot_evaluations_(res,dimensions=DIMENSION_GBRT,fig_savepath=model_path+model_name+'_evaluation.png')
plot_convergence_(res,fig_savepath=model_path+model_name+'_convergence.png')
logger.info('Best score=%.4f'%res.fun)
logger.info("""Best parameters:
- max_depth=%d
- learning_rate=%.6f
- max_features=%d
- min_samples_split=%d
- min_samples_leaf=%d""" % (res.x[0], res.x[1], res.x[2], res.x[3],
res.x[4]))
# end=datetime.datetime.now()
logger.info('Time cost:{}'.format(time_cost))
params_dict={
'max_depth':res.x[0],
'learning_rate':res.x[1],
'max_features':res.x[2],
'min_samples_split':res.x[3],
'min_samples_leaf':res.x[4],
'time_cost':(time_cost),
'n_calls':n_calls,
}
params_df = pd.DataFrame(params_dict,index=[0])
params_df.to_csv(model_path + model_name+ '_optimized_params.csv')
GBR = GradientBoostingRegressor(
max_depth=res.x[0],
learning_rate=res.x[1],
max_features=res.x[2],
min_samples_split=res.x[3],
min_samples_leaf=res.x[4])
# Do prediction with the opyimal model
train_predictions = GBR.fit(train_dev_x,train_dev_y).predict(train_x)
dev_predictions = GBR.fit(train_dev_x,train_dev_y).predict(dev_x)
test_predictions = GBR.fit(train_dev_x,train_dev_y).predict(test_x)
train_y=(train_y.values).flatten()
dev_y=(dev_y.values).flatten()
test_y=(test_y.values).flatten()
norm_id = pd.read_csv(data_path + 'norm_unsample_id.csv')
sMin = norm_id['series_min'][norm_id.shape[0]-1]
sMax = norm_id['series_max'][norm_id.shape[0]-1]
logger.debug('Series Min:\n {}'.format(sMin))
logger.debug('Series Max:\n {}'.format(sMax))
# Renormalized the records and predictions
train_y = np.multiply(train_y + 1,sMax - sMin) / 2 + sMin
dev_y = np.multiply(dev_y + 1,sMax - sMin) / 2 + sMin
test_y = np.multiply(test_y + 1,sMax - sMin) / 2 + sMin
train_predictions = np.multiply(train_predictions + 1, sMax -sMin) / 2 + sMin
train_predictions[train_predictions<0.0]=0.0
dev_predictions = np.multiply(dev_predictions + 1, sMax -sMin) / 2 + sMin
dev_predictions[dev_predictions<0.0]=0.0
test_predictions = np.multiply(test_predictions + 1, sMax -sMin) / 2 + sMin
test_predictions[test_predictions<0.0]=0.0
dum_pred_results(
path = model_path+model_name+'.csv',
train_y = train_y,
train_predictions=train_predictions,
dev_y = dev_y,
dev_predictions = dev_predictions,
test_y = test_y,
test_predictions = test_predictions,
time_cost=time_cost)
plot_rela_pred(train_y,train_predictions,fig_savepath=model_path + model_name + '_train_pred.png')
plot_rela_pred(dev_y,dev_predictions,fig_savepath=model_path + model_name + "_dev_pred.png")
plot_rela_pred(test_y,test_predictions,fig_savepath=model_path + model_name + "_test_pred.png")
plot_error_distribution(test_y,test_predictions,fig_savepath=model_path + model_name + "_test_error.png",)
plt.close('all')
def multi_step_gbrt(root_path,station,decomposer,predict_pattern,lags,model_id,optimizer='gp',wavelet_level='db10-2',n_calls=100,cv=10):
logger.info('Roo path:{}'.format(root_path))
logger.info('Station:{}'.format(station))
logger.info('Decomposer:{}'.format(decomposer))
logger.info('Predict pattern:{}'.format(predict_pattern))
logger.info('Lags:{}'.format(lags))
logger.info('Model index:{}'.format(model_id))
logger.info('Optimizer:{}'.format(optimizer))
logger.info('Monther wavelet and decomposition level of WA:{}'.format(wavelet_level))
logger.info('Number of calls:{}'.format(n_calls))
if model_id>len(lags):
raise Exception("The model id exceed the number of sub-signals")
# Set project parameters
predictor = 'gbrt'
signals = station+'_'+decomposer
# Set the mode id:
if decomposer=='dwt' or decomposer=='modwt':
data_path = root_path + '/'+signals+'/data/'+wavelet_level+'/'+predict_pattern+'/'
model_path = root_path+'/'+signals+'/projects/'+predictor+'/'+wavelet_level+'/'+predict_pattern+'/s'+str(model_id)+'/history/'
else:
data_path = root_path + '/'+signals+'/data/'+predict_pattern+'/'
model_path = root_path+'/'+signals+'/projects/'+predictor+'/'+predict_pattern+'/s'+str(model_id)+'/history/'
if not os.path.exists(model_path):
os.makedirs(model_path)
model_name = station+'_'+decomposer+'_'+predictor+'_'+predict_pattern+'_s'+str(model_id)
logger.info("Data Path:{}".format(data_path))
logger.info("Model Path:{}".format(model_path))
# load data
train = pd.read_csv(data_path+'minmax_unsample_train_s'+str(model_id)+'.csv')
dev = pd.read_csv(data_path+'minmax_unsample_dev_s'+str(model_id)+'.csv')
test = pd.read_csv(data_path+'minmax_unsample_test_s'+str(model_id)+'.csv')
train_dev = pd.concat([train,dev],axis=0)
# shuffle
train_dev = train_dev.sample(frac=1)
train_y = train['Y']
train_x = train.drop('Y', axis=1)
dev_y = dev['Y']
dev_x = dev.drop('Y', axis=1)
test_y = test['Y']
test_x = test.drop('Y', axis=1)
train_dev_y = train_dev['Y']
train_dev_x = train_dev.drop('Y', axis=1)
if os.path.exists(model_path + model_name+'_optimized_params_s' + str(model_id) +'.csv'):
optimal_params = pd.read_csv(model_path + model_name+'_optimized_params_s' + str(model_id) +'.csv')
pre_n_calls = optimal_params['n_calls'][0]
if pre_n_calls==n_calls:
logger.info("The n_calls="+str(n_calls)+" was already tuned")
else:
n_features = train_dev_x.shape[1]
reg = GradientBoostingRegressor(n_estimators=100,random_state=0)
# The list hyper-parameters we want
space = [
Integer(1,25,name='max_depth'),
Real(10**-5,10**0,'log-uniform',name='learning_rate'),
Integer(1,n_features,name='max_features'),
Integer(2,100,name='min_samples_split'),
Integer(1,100,name='min_samples_leaf'),
]
@use_named_args(space)
def objective(**params):
reg.set_params(**params)
return -np.mean(cross_val_score(reg,train_dev_x,train_dev_y,cv=cv,n_jobs=-1,scoring='neg_mean_squared_error'))
#checkpoint_saver = CheckpointSaver(model_path+model_name+'/checkpoint.pkl',compress=9)
start = time.process_time()
if optimizer=='gp':
res = gp_minimize(objective,space,n_calls=n_calls ,random_state=0,verbose=True,n_jobs=-1)
elif optimizer=='fr_bt':
res = forest_minimize(objective,space,n_calls=n_calls,base_estimator='ET',random_state=0,verbose=True,n_jobs=-1)
elif optimizer=='fr_rf':
res = forest_minimize(objective,space,n_calls=n_calls,base_estimator='RF',random_state=0,verbose=True,n_jobs=-1)
elif optimizer=='dm':
res = dummy_minimize(objective,space,n_calls=n_calls)
end=time.process_time()
time_cost = end -start
dump(res,model_path+model_name+'_result.pkl',store_objective=False)
returned_results = load(model_path+model_name+'_result.pkl')
plot_objective_(res,dimensions=DIMENSION_GBRT,fig_savepath=model_path+model_name+'_objective.png')
plot_evaluations_(res,dimensions=DIMENSION_GBRT,fig_savepath=model_path+model_name+'_evaluation.png')
plot_convergence_(res,fig_savepath=model_path+model_name+'_convergence.png')
logger.info('Best score=%.4f'%res.fun)
logger.info("""Best parameters:
- max_depth=%d
- learning_rate=%.6f
- max_features=%d
- min_samples_split=%d
- min_samples_leaf=%d""" % (res.x[0], res.x[1], res.x[2], res.x[3],
res.x[4]))
# end=datetime.datetime.now()
logger.info('Time cost:{}'.format(time_cost))
params_dict={
'max_depth':res.x[0],
'learning_rate':res.x[1],
'max_features':res.x[2],
'min_samples_split':res.x[3],
'min_samples_leaf':res.x[4],
'time_cost':(time_cost),
'n_calls':n_calls,
}
params_df = pd.DataFrame(params_dict,index=[0])
params_df.to_csv(model_path + model_name+'_optimized_params_s' + str(model_id) +'.csv')
GBR = GradientBoostingRegressor(
max_depth=res.x[0],
learning_rate=res.x[1],
max_features=res.x[2],
min_samples_split=res.x[3],
min_samples_leaf=res.x[4])
# Do prediction with the opyimal model
train_predictions = GBR.fit(train_dev_x,train_dev_y).predict(train_x)
dev_predictions = GBR.fit(train_dev_x,train_dev_y).predict(dev_x)
test_predictions = GBR.fit(train_dev_x,train_dev_y).predict(test_x)
train_y=(train_y.values).flatten()
dev_y=(dev_y.values).flatten()
test_y=(test_y.values).flatten()
norm_id = pd.read_csv(data_path + 'norm_unsample_id_s' + str(model_id) + '.csv')
sMin = norm_id['series_min'][norm_id.shape[0]-1]
sMax = norm_id['series_max'][norm_id.shape[0]-1]
logger.debug('Series Min:\n {}'.format(sMin))
logger.debug('Series Max:\n {}'.format(sMax))
# Renormalized the records and predictions
train_y = np.multiply(train_y + 1,sMax - sMin) / 2 + sMin
dev_y = np.multiply(dev_y + 1,sMax - sMin) / 2 + sMin
test_y = np.multiply(test_y + 1,sMax - sMin) / 2 + sMin
train_predictions = np.multiply(train_predictions + 1, sMax -sMin) / 2 + sMin
dev_predictions = np.multiply(dev_predictions + 1, sMax -sMin) / 2 + sMin
test_predictions = np.multiply(test_predictions + 1, sMax -sMin) / 2 + sMin
dum_pred_results(
path = model_path+model_name+'.csv',
train_y = train_y,
train_predictions=train_predictions,
dev_y = dev_y,
dev_predictions = dev_predictions,
test_y = test_y,
test_predictions = test_predictions,
time_cost=time_cost)
plot_rela_pred(train_y,train_predictions,fig_savepath=model_path + model_name + '_train_pred.png')
plot_rela_pred(dev_y,dev_predictions,fig_savepath=model_path + model_name + "_dev_pred.png")
plot_rela_pred(test_y,test_predictions,fig_savepath=model_path + model_name + "_test_pred.png")
plot_error_distribution(test_y,test_predictions,fig_savepath=model_path + model_name + "_test_error.png")
plt.close('all')
def lstm(root_path,station,predict_pattern,seed,
n_epochs=1000,
batch_size=128,
learn_rate=0.007,
decay_rate=0.0,
n_hidden_layers=1,
hidden_units=[8],
dropout_rates=[0.0],
early_stop=True,
retrain=False,
warm_up=False,
initial_epoch=None,
):
logger.info('Build monoscale LSTM model...')
logger.info('Model informattion:')
logger.info('Root path:{}'.format(root_path))
logger.info('Station:{}'.format(station))
logger.info('Predict pattern:{}'.format(predict_pattern))
logger.info('Seed:{}'.format(seed))
logger.info('Number of epochs:{}'.format(n_epochs))
logger.info('Batch size:{}'.format(batch_size))
logger.info('Learning rate:{}'.format(learn_rate))
logger.info('Decay rate of learning rate:{}'.format(decay_rate))
logger.info('Number of hidden layers:{}'.format(n_hidden_layers))
logger.info('Number of hidden units:{}'.format(hidden_units))
logger.info('Dropout rates:{}'.format(dropout_rates))
logger.info('Early stoping:{}'.format(early_stop))
logger.info('Retrain model:{}'.format(retrain))
logger.info('Warm up:{}'.format(warm_up))
logger.info('Initial epoch of warm up:{}'.format(initial_epoch))
predictor = 'lstm'
data_path = root_path + '/'+station+'/data/'+predict_pattern+'/'
model_path = root_path+'/'+station+'/projects/'+predictor+'/'+predict_pattern+'/'
if not os.path.exists(model_path):
os.makedirs(model_path)
logger.info('Data path:{}'.format(data_path))
logger.info('Model path:{}'.format(model_path))
# 1.Import the sampled normalized data set from disk
logger.info('Load learning samples...')
train = pd.read_csv(data_path+'minmax_unsample_train.csv')
dev = pd.read_csv(data_path+'minmax_unsample_dev.csv')
test = pd.read_csv(data_path+'minmax_unsample_test.csv')
train_x = train
train_y = train.pop('Y')
train_y = train_y.as_matrix()
dev_x = dev
dev_y = dev.pop('Y')
dev_y = dev_y.as_matrix()
test_x = test
test_y = test.pop('Y')
test_y = test_y.as_matrix()
# reshape the input features for LSTM
train_x = (train_x.values).reshape(train_x.shape[0],1,train_x.shape[1])
dev_x = (dev_x.values).reshape(dev_x.shape[0],1,dev_x.shape[1])
test_x = (test_x.values).reshape(test_x.shape[0],1,test_x.shape[1])
model_name = 'LSTM-LR['+str(learn_rate)+\
']-HU'+str(hidden_units)+\
'-EPS['+str(n_epochs)+\
']-BS['+str(batch_size)+\
']-DR'+str(dropout_rates)+\
'-DC['+str(decay_rate)+\
']-SEED['+str(seed)+']'
def build_model():
logger.info('Define LSTM model...')
if n_hidden_layers==2:
model = keras.Sequential(
[
layers.LSTM(hidden_units[0],activation=tf.nn.relu,return_sequences=True,input_shape=(train_x.shape[1],train_x.shape[2])),
layers.Dropout(dropout_rates[0], noise_shape=None, seed=seed),
layers.LSTM(hidden_units[1],activation=tf.nn.relu,return_sequences=False), # first hidden layer if hasnext hidden layer
layers.Dropout(dropout_rates[1], noise_shape=None, seed=seed),
layers.Dense(1)
]
)
else:
model = keras.Sequential(
[
layers.LSTM(hidden_units[0],activation=tf.nn.relu,input_shape=(train_x.shape[1],train_x.shape[2])),
layers.Dropout(dropout_rates[0], noise_shape=None, seed=seed),
layers.Dense(1)
]
)
optimizer = keras.optimizers.Adam(learn_rate,decay=decay_rate)
model.compile(loss='mean_squared_error',optimizer=optimizer,metrics=['mean_absolute_error','mean_squared_error'])
return model
logger.info('Set model parameters restore path...')
cp_path = model_path+model_name+'\\'
if not os.path.exists(cp_path):
os.makedirs(cp_path)
checkpoint_path = model_path+model_name+'\\cp.ckpt' #restore only the latest checkpoint after every update
# checkpoint_path = model_path+'cp-{epoch:04d}.ckpt' #restore the checkpoint every period=x epoch
checkpoint_dir = os.path.dirname(checkpoint_path)
logger.info('checkpoint dir:{}'.format(checkpoint_dir))
cp_callback = keras.callbacks.ModelCheckpoint(checkpoint_path,save_best_only=True,mode='min',save_weights_only=True,verbose=1)
# cp_callback = keras.callbacks.ModelCheckpoint(checkpoint_path,save_weights_only=True,period=5,verbose=1)
# if not RESUME_TRAINING:
# print("Removing previous artifacts...")
# shutil.rmtree(checkpoint_dir, ignore_errors=True)
# else:
# print("Resuming training...")
# initialize a new model
model = build_model()
model.summary() #print a simple description for the model
"""
# Evaluate before training or load trained weights and biases
loss, mae, mse = model.evaluate(test_x, test_y, verbose=1)
# Try the model with initial weights and biases
example_batch = train_x[:10]
example_result = model.predict(example_batch)
print(example_result)
"""
# 3.Train the model
# Display training progress by printing a single dot for each completed epoch
class PrintDot(keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs):
if epoch % 100 == 0: print('')
print('.', end='')
files = os.listdir(checkpoint_dir)
# reduce_lr = ReduceLROnPlateau(monitor='val_loss', patience=10, mode='auto')
reduce_lr = ReduceLROnPlateau(monitor='val_loss',min_lr=0.00001,factor=0.2, verbose=1,patience=10, mode='min')
early_stopping = EarlyStopping(monitor='val_loss', mode='min',verbose=1,patience=100,restore_best_weights=True)
warm_dir = 'LSTM-LR['+str(learn_rate)+\
']-HU'+str(hidden_units)+\
'-EPS['+str(initial_epoch)+\
']-BS['+str(batch_size)+\
']-DR'+str(dropout_rates)+\
'-DC['+str(decay_rate)+\
']-SEED['+str(seed)+']'
logger.info("WARM UP PATH:{}".format(os.path.exists(model_path+warm_dir)))
logger.info('Train the LSTM model ...')
if retrain: # Retraining the LSTM model
logger.info('retrain the model')
if early_stop:
start = time.process_time()
history = model.fit(train_x,train_y,epochs=n_epochs,batch_size=batch_size ,validation_data=(dev_x,dev_y),verbose=1,
callbacks=[
cp_callback,
early_stopping,
])
end = time.process_time()
time_cost = end-start
else:
start = time.process_time()
history = model.fit(train_x,train_y,epochs=n_epochs,batch_size=batch_size ,validation_data=(dev_x,dev_y),verbose=1,callbacks=[cp_callback])
end =time.process_time()
time_cost = end-start
# # Visualize the model's training progress using the stats stored in the history object
hist = pd.DataFrame(history.history)
hist.to_csv(model_path+model_name+'-HISTORY-TRAIN-TEST.csv')
hist['epoch']=history.epoch
# print(hist.tail())
plot_history(history,model_path+model_name+'-MAE-ERRORS-TRAINTEST.png',model_path+model_name+'-MSE-ERRORS-TRAINTEST.png')
elif len(files)==0: # The current model has not been trained
if os.path.exists(model_path+warm_dir) and warm_up: # Training the model using the trained weights and biases as initialized parameters
logger.info('WARM UP FROM EPOCH '+str(initial_epoch)) # Warm up from the last epoch of the target model
prev_time_cost = (pd.read_csv(model_path+warm_dir+'.csv')['time_cost'])[0]
warm_path=model_path+warm_dir+'\\cp.ckpt'
model.load_weights(warm_path)
if early_stop:
start=time.process_time()
history = model.fit(train_x,train_y,initial_epoch=initial_epoch,epochs=n_epochs,batch_size=batch_size ,validation_data=(dev_x,dev_y),verbose=1,
callbacks=[
cp_callback,
early_stopping,
])
end = time.process_time()
time_cost = end - start + prev_time_cost
else:
start = time.process_time()
history = model.fit(train_x,train_y,initial_epoch=initial_epoch,epochs=n_epochs,batch_size=batch_size ,validation_data=(dev_x,dev_y),verbose=1,
callbacks=[
cp_callback,
])
end = time.process_time()
time_cost = end - start + prev_time_cost
hist = pd.DataFrame(history.history)
hist.to_csv(model_path+model_name+'-HISTORY-TRAIN-TEST.csv')
hist['epoch']=history.epoch
# print(hist.tail())
plot_history(history,model_path+model_name+'-MAE-ERRORS-TRAINTEST.png',model_path+model_name+'-MSE-ERRORS-TRAINTEST.png')
else: # Training entirely new model
logger.info('new train')
if early_stop:
start = time.process_time()
history = model.fit(train_x,train_y,epochs=n_epochs,batch_size=batch_size ,validation_data=(dev_x,dev_y),verbose=1,callbacks=[
cp_callback,
early_stopping,
])
end = time.process_time()
time_cost = end -start
else:
start = time.process_time()
history = model.fit(train_x,train_y,epochs=n_epochs,batch_size=batch_size ,validation_data=(dev_x,dev_y),verbose=1,
callbacks=[
cp_callback,
])
end = time.process_time()
time_cost = end - start
hist = pd.DataFrame(history.history)
hist.to_csv(model_path+model_name+'-HISTORY-TRAIN-TEST.csv')
hist['epoch']=history.epoch
# print(hist.tail())
plot_history(history,model_path+model_name+'-MAE-ERRORS-TRAINTEST.png',model_path+model_name+'-MSE-ERRORS-TRAINTEST.png')
else:
logger.info('#'*10+'Already Trained')
time_cost = (pd.read_csv(model_path+model_name+'.csv')['time_cost'])[0]
model.load_weights(checkpoint_path)
# loss, mae, mse = model.evaluate(test_x, test_y, verbose=1)
"""
# Evaluate after training or load trained weights and biases
loss, mae, mse = model.evaluate(test_x, test_y, verbose=1)
print("Testing set Mean Abs Error: {:5.2f} ".format(mae))
"""
logger.info('Predict the training, development and testing samples...')
train_predictions = model.predict(train_x).flatten()
dev_predictions = model.predict(dev_x).flatten()
test_predictions = model.predict(test_x).flatten()
# renormized the predictions and labels
# load the normalized traindev indicators
norm = pd.read_csv(data_path+'norm_unsample_id.csv')
sMax = norm['series_max'][norm.shape[0]-1]
sMin = norm['series_min'][norm.shape[0]-1]
logger.debug('Series min:{}'.format(sMin))
logger.debug('Series max:{}'.format(sMax))
train_y = np.multiply(train_y + 1,sMax - sMin) / 2 + sMin
train_predictions = np.multiply(train_predictions + 1,sMax - sMin) / 2 + sMin
train_predictions[train_predictions<0.0]=0.0
dev_y = np.multiply(dev_y + 1,sMax - sMin) / 2 + sMin
dev_predictions = np.multiply(dev_predictions + 1,sMax - sMin) / 2 + sMin
dev_predictions[dev_predictions<0.0]=0.0
test_y = np.multiply(test_y + 1,sMax - sMin) / 2 + sMin
test_predictions = np.multiply(test_predictions + 1,sMax - sMin) / 2 + sMin
test_predictions[test_predictions<0.0]=0.0
logger.info('Dump the prediction results...')
dum_pred_results(
path = model_path+model_name+'.csv',
train_y = train_y,
train_predictions=train_predictions,
dev_y = dev_y,
dev_predictions = dev_predictions,
test_y = test_y,
test_predictions = test_predictions,
time_cost=time_cost,
)
logger.info('Plot the prediction results...')
plot_rela_pred(train_y,train_predictions,fig_savepath=model_path + model_name + '-TRAIN-PRED.png')
plot_rela_pred(dev_y,dev_predictions,fig_savepath=model_path + model_name + "-DEV-PRED.png")
plot_rela_pred(test_y,test_predictions,fig_savepath=model_path + model_name + "-TEST-PRED.png")
plot_error_distribution(test_predictions,test_y,model_path+model_name+'-TEST-ERROR-DSTRI.png')
plt.close('all')
def one_step_lstm(
root_path,station,decomposer,predict_pattern,seed,
wavelet_level='db10-2',
n_epochs=1000,
batch_size=128,
learn_rate=0.007,
decay_rate=0.0,
n_hidden_layers=1,
hidden_units=[8],
dropout_rates=[0.0],
early_stop=True,
retrain=False,
warm_up=False,
initial_epoch=None,
):
logger.info('Build one-step LSTM model...')
logger.info('Model informattion:')
logger.info('Root path:{}'.format(root_path))
logger.info('Station:{}'.format(station))
logger.info('Decomposer:{}'.format(decomposer))
logger.info('Predict pattern:{}'.format(predict_pattern))
logger.info('Seed:{}'.format(seed))
logger.info('Monther wavelet and decomposition level of WA:{}'.format(wavelet_level))
logger.info('Number of epochs:{}'.format(n_epochs))
logger.info('Batch size:{}'.format(batch_size))
logger.info('Learning rate:{}'.format(learn_rate))
logger.info('Decay rate of learning rate:{}'.format(decay_rate))
logger.info('Number of hidden layers:{}'.format(n_hidden_layers))
logger.info('Number of hidden units:{}'.format(hidden_units))
logger.info('Dropout rates:{}'.format(dropout_rates))
logger.info('Early stoping:{}'.format(early_stop))
logger.info('Retrain model:{}'.format(retrain))
logger.info('Warm up:{}'.format(warm_up))
logger.info('Initial epoch of warm up:{}'.format(initial_epoch))
# Set project parameters
predictor = 'lstm'
predict_pattern = predict_pattern # hindcast or forecast
signals = station+'_'+decomposer
if decomposer=='dwt' or decomposer=='modwt':
data_path = root_path + '/'+signals+'/data/'+wavelet_level+'/'+predict_pattern+'/'
model_path = root_path+'/'+signals+'/projects/'+predictor+'/'+wavelet_level+'/'+predict_pattern+'/history/'
else:
data_path = root_path + '/'+signals+'/data/'+predict_pattern+'/'
model_path = root_path+'/'+signals+'/projects/'+predictor+'/'+predict_pattern+'/history/'
if not os.path.exists(model_path):
os.makedirs(model_path)
logger.info('Data path:{}'.format(data_path))
logger.info('Model path:{}'.format(model_path))
######################################################
logger.info('Load learning samples...')
# 1.Import the sampled normalized data set from disk
train = pd.read_csv(data_path+'minmax_unsample_train.csv')
dev = pd.read_csv(data_path+'minmax_unsample_dev.csv')
test = pd.read_csv(data_path+'minmax_unsample_test.csv')
# Split features from labels
train_x = train
train_y = train.pop('Y')
train_y = train_y.as_matrix()
dev_x = dev
dev_y = dev.pop('Y')
dev_y = dev_y.as_matrix()
test_x = test
test_y = test.pop('Y')
test_y = test_y.as_matrix()
# reshape the input features for LSTM
train_x = (train_x.values).reshape(train_x.shape[0],1,train_x.shape[1])
dev_x = (dev_x.values).reshape(dev_x.shape[0],1,dev_x.shape[1])
test_x = (test_x.values).reshape(test_x.shape[0],1,test_x.shape[1])
# 2.Build LSTM model with keras
model_name = 'LSTM-LR['+str(learn_rate)+\
']-HU'+str(hidden_units)+\
'-EPS['+str(n_epochs)+\
']-BS['+str(batch_size)+\
']-DR'+str(dropout_rates)+\
'-DC['+str(decay_rate)+\
']-SEED['+str(seed)+']'
# RESUME_TRAINING = True
def build_model():
logger.info('Build LSTM model...')
if n_hidden_layers==2:
model = keras.Sequential(
[
layers.LSTM(hidden_units[0],activation=tf.nn.relu,return_sequences=True,input_shape=(train_x.shape[1],train_x.shape[2])),
layers.Dropout(dropout_rates[0], noise_shape=None, seed=seed),
layers.LSTM(hidden_units[1],activation=tf.nn.relu,return_sequences=False), # first hidden layer if hasnext hidden layer
layers.Dropout(dropout_rates[1], noise_shape=None, seed=seed),
layers.Dense(1)
]
)
else:
model = keras.Sequential(
[
layers.LSTM(hidden_units[0],activation=tf.nn.relu,input_shape=(train_x.shape[1],train_x.shape[2])),
layers.Dropout(dropout_rates[0], noise_shape=None, seed=seed),
layers.Dense(1)
]
)
optimizer = keras.optimizers.Adam(learn_rate,decay=decay_rate)
model.compile(loss='mean_squared_error',optimizer=optimizer,
metrics=['mean_absolute_error','mean_squared_error'])
return model
logger.info('Set model parameters restore path...')
# set model's parameters restore path
cp_path = model_path+model_name+'\\'
if not os.path.exists(cp_path):
os.makedirs(cp_path)
checkpoint_path = model_path+model_name+'\\cp.ckpt' #restore only the latest checkpoint after every update
# checkpoint_path = model_path+'cp-{epoch:04d}.ckpt' #restore the checkpoint every period=x epoch
checkpoint_dir = os.path.dirname(checkpoint_path)
logger.info('checkpoint dir:{}'.format(checkpoint_dir))
cp_callback = keras.callbacks.ModelCheckpoint(checkpoint_path,save_best_only=True,mode='min',save_weights_only=True,verbose=1)
# cp_callback = keras.callbacks.ModelCheckpoint(checkpoint_path,save_weights_only=True,period=5,verbose=1)
# if not RESUME_TRAINING:
# print("Removing previous artifacts...")
# shutil.rmtree(checkpoint_dir, ignore_errors=True)
# else:
# print("Resuming training...")
# initialize a new model
model = build_model()
model.summary() #print a simple description for the model
"""
# Evaluate before training or load trained weights and biases
loss, mae, mse = model.evaluate(test_x, test_y, verbose=1)
# Try the model with initial weights and biases
example_batch = train_x[:10]
example_result = model.predict(example_batch)
print(example_result)
"""
# 3.Train the model
# Display training progress by printing a single dot for each completed epoch
class PrintDot(keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs):
if epoch % 100 == 0: print('')
print('.', end='')
files = os.listdir(checkpoint_dir)
from tensorflow.keras.callbacks import ReduceLROnPlateau,EarlyStopping
# reduce_lr = ReduceLROnPlateau(monitor='val_loss', patience=10, mode='auto')
reduce_lr = ReduceLROnPlateau(monitor='val_loss',min_lr=0.00001,factor=0.2, verbose=1,patience=10, mode='min')
early_stopping = EarlyStopping(monitor='val_loss', mode='min',verbose=1,patience=200,restore_best_weights=True)
warm_dir = 'LSTM-LR['+str(learn_rate)+\
']-HU'+str(hidden_units)+\
'-EPS['+str(initial_epoch)+\
']-BS['+str(batch_size)+\
']-DR'+str(dropout_rates)+\
'-DC['+str(decay_rate)+\
']-SEED['+str(seed)+']'
logger.info("WARM UP PATH:{}".format(os.path.exists(model_path+warm_dir)))
# Training models
logger.info('Train the LSTM model...')
if retrain: # Retraining the LSTM model
print('retrain the model')
if early_stop:
start = time.process_time()
history = model.fit(train_x,train_y,epochs=n_epochs,
batch_size=batch_size ,
validation_data=(dev_x,dev_y),
verbose=1,
callbacks=[
cp_callback,
early_stopping,
])
end = time.process_time()
time_cost = end-start
else:
start=time.process_time()
history = model.fit(train_x,train_y,epochs=n_epochs,
batch_size=batch_size ,
validation_data=(dev_x,dev_y),
verbose=1,
callbacks=[
cp_callback,
])
end = time.process_time()
time_cost = end - start
hist = pd.DataFrame(history.history)
hist.to_csv(model_path+model_name+'-HISTORY-TRAIN-TEST.csv')
hist['epoch']=history.epoch
# print(hist.tail())
plot_history(history,
model_path+model_name+'-MAE-ERRORS-TRAINTEST.png',
model_path+model_name+'-MSE-ERRORS-TRAINTEST.png')
elif len(files)==0:# The current model has not been trained
# Training the model using the trained weights and biases as initialized parameters
if os.path.exists(model_path+warm_dir) and warm_up:
# Warm up from the last epoch of the target model
logger.info('WARM UP FROM EPOCH '+str(initial_epoch))
prev_time_cost = (pd.read_csv(model_path+warm_dir+'.csv')['time_cost'])[0]
warm_path=model_path+warm_dir+'\\cp.ckpt'
model.load_weights(warm_path)
if early_stop:
start = time.process_time()
history = model.fit(train_x,train_y,
initial_epoch=initial_epoch,
epochs=n_epochs,
batch_size=batch_size ,
validation_data=(dev_x,dev_y),
verbose=1,
callbacks=[
cp_callback,
early_stopping,
])
end = time.process_time()
time_cost = end-start+prev_time_cost
else:
start = time.process_time()
history = model.fit(train_x,train_y,
initial_epoch=initial_epoch,
epochs=n_epochs,
batch_size=batch_size ,
validation_data=(dev_x,dev_y),
verbose=1,
callbacks=[
cp_callback,
])
end = time.process_time()
time_cost = end - start + prev_time_cost
hist = pd.DataFrame(history.history)
hist.to_csv(model_path+model_name+'-HISTORY-TRAIN-TEST.csv')
hist['epoch']=history.epoch
# print(hist.tail())
plot_history(history,
model_path+model_name+'-MAE-ERRORS-TRAINTEST.png',
model_path+model_name+'-MSE-ERRORS-TRAINTEST.png')
else:
print('new train')
if early_stop:
start = time.process_time()
history = model.fit(train_x,train_y,
epochs=n_epochs,
batch_size=batch_size ,
validation_data=(dev_x,dev_y),
verbose=1,
callbacks=[
cp_callback,
early_stopping,
])
end = time.process_time()
time_cost = end - start
else:
start = time.process_time()
history = model.fit(train_x,train_y,
epochs=n_epochs,
batch_size=batch_size ,
validation_data=(dev_x,dev_y),
verbose=1,
callbacks=[cp_callback,])
end = time.process_time()
time_cost = end - start
hist = pd.DataFrame(history.history)
hist.to_csv(model_path+model_name+'-HISTORY-TRAIN-TEST.csv')
hist['epoch']=history.epoch
# print(hist.tail())
plot_history(history,
model_path+model_name+'-MAE-ERRORS-TRAINTEST.png',
model_path+model_name+'-MSE-ERRORS-TRAINTEST.png')
else:
logger.info('#'*10+'Already Trained')
time_cost = (pd.read_csv(model_path+model_name+'.csv')['time_cost'])[0]
model.load_weights(checkpoint_path)
# loss, mae, mse = model.evaluate(test_x, test_y, verbose=1)
"""
# Evaluate after training or load trained weights and biases
loss, mae, mse = model.evaluate(test_x, test_y, verbose=1)
print("Testing set Mean Abs Error: {:5.2f} ".format(mae))
"""
# 4. Predict the model
# load the unsample data
logger.info('Predict the training, development and testing samples...')
train_predictions = model.predict(train_x).flatten()
dev_predictions = model.predict(dev_x).flatten()
test_predictions = model.predict(test_x).flatten()
# renormized the predictions and labels
# load the normalized traindev indicators
norm = pd.read_csv(data_path+'norm_unsample_id.csv')
sMax = norm['series_max'][norm.shape[0]-1]
sMin = norm['series_min'][norm.shape[0]-1]
logger.debug('Series min:{}'.format(sMin))
logger.debug('Series max:{}'.format(sMax))
train_y = np.multiply(train_y + 1,sMax - sMin) / 2 + sMin
dev_y = np.multiply(dev_y + 1,sMax - sMin) / 2 + sMin
test_y = np.multiply(test_y + 1,sMax - sMin) / 2 + sMin
train_predictions = np.multiply(train_predictions + 1, sMax -sMin) / 2 + sMin
train_predictions[train_predictions<0.0]=0.0
dev_predictions = np.multiply(dev_predictions + 1, sMax -sMin) / 2 + sMin
dev_predictions[dev_predictions<0.0]=0.0
test_predictions = np.multiply(test_predictions + 1, sMax -sMin) / 2 + sMin
test_predictions[test_predictions<0.0]=0.0
logger.info('Dump prediction results...')
dum_pred_results(
path = model_path+model_name+'.csv',
train_y = train_y,
train_predictions=train_predictions,
dev_y = dev_y,
dev_predictions = dev_predictions,
test_y = test_y,
test_predictions = test_predictions,
time_cost = time_cost)
logger.info('Plot the prediction results...')
plot_rela_pred(train_y,train_predictions,fig_savepath=model_path + model_name + '-TRAIN-PRED.png')
plot_rela_pred(dev_y,dev_predictions,fig_savepath=model_path + model_name + "-DEV-PRED.png")
plot_rela_pred(test_y,test_predictions,fig_savepath=model_path + model_name + "-TEST-PRED.png")
plot_error_distribution(test_predictions,test_y,model_path+model_name+'-TEST-ERROR-DSTRI.png')
plt.close('all')
def multi_step_lstm(
root_path,station,decomposer,predict_pattern,lags,model_id,seed,
wavelet_level='db10-2',
n_epochs=1000,
batch_size=128,
learn_rate=0.007,
decay_rate=0.0,
n_hidden_layers=1,
hidden_units=[8],
dropout_rates=[0.0],
early_stop=True,
retrain=False,
warm_up=False,
initial_epoch=None,
):
logger.info('Build multi-step LSTM model...')
logger.info('Model informattion:')
logger.info('Root path:{}'.format(root_path))
logger.info('Station:{}'.format(station))
logger.info('Decomposer:{}'.format(decomposer))
logger.info('Predict pattern:{}'.format(predict_pattern))
logger.info('Lags:{}'.format(lags))
logger.info('Model index:{}'.format(model_id))
logger.info('Seed:{}'.format(seed))
logger.info('Monther wavelet and decomposition level of WA:{}'.format(wavelet_level))
logger.info('Number of epochs:{}'.format(n_epochs))
logger.info('Batch size:{}'.format(batch_size))
logger.info('Learning rate:{}'.format(learn_rate))
logger.info('Decay rate of learning rate:{}'.format(decay_rate))
logger.info('Number of hidden layers:{}'.format(n_hidden_layers))
logger.info('Number of hidden units:{}'.format(hidden_units))
logger.info('Dropout rates:{}'.format(dropout_rates))
logger.info('Early stoping:{}'.format(early_stop))
logger.info('Retrain model:{}'.format(retrain))
logger.info('Warm up:{}'.format(warm_up))
logger.info('Initial epoch of warm up:{}'.format(initial_epoch))
if model_id>len(lags):
raise Exception("The model id exceed the number of sub-signals")
# Set project parameters
predictor = 'lstm'
predict_pattern = predict_pattern # hindcast or forecast
signals = station+'_'+decomposer
# Set the model id
model_id=model_id
if decomposer=='dwt' or decomposer=='modwt':
data_path = root_path + '/'+signals+'/data/'+wavelet_level+'/'+predict_pattern+'/'
model_path = root_path+'/'+signals+'/projects/'+predictor+'/'+wavelet_level+'/'+predict_pattern+'/s'+str(model_id)+'/history/'
else:
data_path = root_path + '/'+signals+'/data/'+predict_pattern+'/'
model_path = root_path+'/'+signals+'/projects/'+predictor+'/'+predict_pattern+'/s'+str(model_id)+'/history/'
if not os.path.exists(model_path):
os.makedirs(model_path)
logger.info('Data path:{}'.format(data_path))
logger.info('Model path:{}'.format(model_path))
######################################################
logger.info('Load learning samples...')
# 1.Import the sampled normalized data set from disk
train = pd.read_csv(data_path+'minmax_unsample_train_s'+str(model_id)+'.csv')
dev = pd.read_csv(data_path+'minmax_unsample_dev_s'+str(model_id)+'.csv')
test = pd.read_csv(data_path+'minmax_unsample_test_s'+str(model_id)+'.csv')
# Split features from labels
train_x = train
train_y = train.pop('Y')
train_y = train_y.as_matrix()
dev_x = dev
dev_y = dev.pop('Y')
dev_y = dev_y.as_matrix()
test_x = test
test_y = test.pop('Y')
test_y = test_y.as_matrix()
# reshape the input features for LSTM
train_x = (train_x.values).reshape(train_x.shape[0],1,train_x.shape[1])
dev_x = (dev_x.values).reshape(dev_x.shape[0],1,dev_x.shape[1])
test_x = (test_x.values).reshape(test_x.shape[0],1,test_x.shape[1])
# 2.Build LSTM model with keras
model_name = 'LSTM-S'+str(model_id)+\
'-LR['+str(learn_rate)+\
']-HU'+str(hidden_units)+\
'-EPS['+str(n_epochs)+\
']-BS['+str(batch_size)+\
']-DR'+str(dropout_rates)+\
'-DC['+str(decay_rate)+\
']-SEED['+str(seed)+']'
# RESUME_TRAINING = True
def build_model():
logger.info('Build LSTM model...')
if n_hidden_layers==2:
model = keras.Sequential(
[
layers.LSTM(hidden_units[0],activation=tf.nn.relu,return_sequences=True,input_shape=(train_x.shape[1],train_x.shape[2])),
layers.Dropout(dropout_rates[0], noise_shape=None, seed=seed),
layers.LSTM(hidden_units[1],activation=tf.nn.relu,return_sequences=False), # first hidden layer if hasnext hidden layer
layers.Dropout(dropout_rates[1], noise_shape=None, seed=seed),
layers.Dense(1)
]
)
else:
model = keras.Sequential(
[
layers.LSTM(hidden_units[0],activation=tf.nn.relu,input_shape=(train_x.shape[1],train_x.shape[2])),
layers.Dropout(dropout_rates[0], noise_shape=None, seed=seed),
layers.Dense(1)
]
)
optimizer = keras.optimizers.Adam(learn_rate,decay=decay_rate)
model.compile(loss='mean_squared_error',optimizer=optimizer,
metrics=['mean_absolute_error','mean_squared_error'])
return model
logger.info('Set model parameters restore path...')
# set model's parameters restore path
cp_path = model_path+model_name+'\\'
if not os.path.exists(cp_path):
os.makedirs(cp_path)
checkpoint_path = model_path+model_name+'\\cp.ckpt' #restore only the latest checkpoint after every update
# checkpoint_path = model_path+'cp-{epoch:04d}.ckpt' #restore the checkpoint every period=x epoch
checkpoint_dir = os.path.dirname(checkpoint_path)
logger.info('checkpoint dir:{}'.format(checkpoint_dir))
cp_callback = keras.callbacks.ModelCheckpoint(checkpoint_path,save_best_only=True,mode='min',save_weights_only=True,verbose=1)
# cp_callback = keras.callbacks.ModelCheckpoint(checkpoint_path,save_weights_only=True,period=5,verbose=1)
# if not RESUME_TRAINING:
# print("Removing previous artifacts...")
# shutil.rmtree(checkpoint_dir, ignore_errors=True)
# else:
# print("Resuming training...")
# initialize a new model
model = build_model()
model.summary() #print a simple description for the model
"""
# Evaluate before training or load trained weights and biases
loss, mae, mse = model.evaluate(test_x, test_y, verbose=1)
# Try the model with initial weights and biases
example_batch = train_x[:10]
example_result = model.predict(example_batch)
print(example_result)
"""
# 3.Train the model
# Display training progress by printing a single dot for each completed epoch
class PrintDot(keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs):
if epoch % 100 == 0: print('')
print('.', end='')
files = os.listdir(checkpoint_dir)
# reduce_lr = ReduceLROnPlateau(monitor='val_loss', patience=10, mode='auto')
reduce_lr = ReduceLROnPlateau(monitor='val_loss',min_lr=0.00001,factor=0.2, verbose=1,patience=10, mode='min')
early_stopping = EarlyStopping(monitor='val_loss', mode='min',verbose=1,patience=200,restore_best_weights=True)
warm_dir = 'LSTM-S'+str(model_id)+\
'-LR['+str(learn_rate)+\
']-HU'+str(hidden_units)+\
'-EPS['+str(initial_epoch)+\
']-BS['+str(batch_size)+\
']-DR'+str(dropout_rates)+\
'-DC['+str(decay_rate)+\
']-SEED['+str(seed)+']'
logger.info("WARM UP PATH:{}".format(os.path.exists(model_path+warm_dir)))
# Training models
logger.info('Train the LSTM model...')
if retrain: # Retraining the LSTM model
print('retrain the model')
if early_stop:
start = time.process_time()
history = model.fit(train_x,train_y,epochs=n_epochs,
batch_size=batch_size ,
validation_data=(dev_x,dev_y),
verbose=1,
callbacks=[
cp_callback,
early_stopping,
])
end = time.process_time()
time_cost = end -start
else:
start = time.process_time()
history = model.fit(train_x,train_y,epochs=n_epochs,
batch_size=batch_size ,
validation_data=(dev_x,dev_y),
verbose=1,
callbacks=[
cp_callback,
])
end = time.process_time()
time_cost = end - start
hist = pd.DataFrame(history.history)
hist.to_csv(model_path+model_name+'-HISTORY-TRAIN-TEST.csv')
hist['epoch']=history.epoch
logger.debug(hist.tail())
plot_history(history,
model_path+model_name+'-MAE-ERRORS-TRAINTEST.png',
model_path+model_name+'-MSE-ERRORS-TRAINTEST.png')
elif len(files)==0: # The current model has not been trained
# Training the model using the trained weights and biases as initialized parameters
if os.path.exists(model_path+warm_dir) and warm_up:
# Warm up from the last epoch of the target model
print('WARM UP FROM EPOCH '+str(initial_epoch))
prev_time_cost = (pd.read_csv(model_path+warm_dir+'.csv')['time_cost'])[0]
warm_path=model_path+warm_dir+'\\cp.ckpt'
model.load_weights(warm_path)
if early_stop:
start = time.process_time()
history = model.fit(train_x,train_y,initial_epoch=initial_epoch,epochs=n_epochs,
batch_size=batch_size ,
validation_data=(dev_x,dev_y),
verbose=1,
callbacks=[
cp_callback,
early_stopping,
])
end = time.process_time()
time_cost = end -start + prev_time_cost
else:
start = time.process_time()
history = model.fit(train_x,train_y,initial_epoch=initial_epoch,epochs=n_epochs,
batch_size=batch_size ,
validation_data=(dev_x,dev_y),
verbose=1,
callbacks=[
cp_callback,
])
end = time.process_time()
time_cost = end - start + prev_time_cost
hist = pd.DataFrame(history.history)
hist.to_csv(model_path+model_name+'-HISTORY-TRAIN-TEST.csv')
hist['epoch']=history.epoch
logger.debug(hist.tail())
plot_history(history,
model_path+model_name+'-MAE-ERRORS-TRAINTEST.png',
model_path+model_name+'-MSE-ERRORS-TRAINTEST.png')
else:
logger.info('new train')
if early_stop:
start = time.process_time()
history = model.fit(train_x,train_y,epochs=n_epochs,
batch_size=batch_size ,
validation_data=(dev_x,dev_y),
verbose=1,
callbacks=[
cp_callback,
early_stopping,
])
end = time.process_time()
time_cost = end - start
else:
start = time.process_time()
history = model.fit(train_x,train_y,epochs=n_epochs,
batch_size=batch_size ,
validation_data=(dev_x,dev_y),
verbose=1,
callbacks=[
cp_callback,
])
end = time.process_time()
time_cost = end-start
hist = pd.DataFrame(history.history)
hist.to_csv(model_path+model_name+'-HISTORY-TRAIN-TEST.csv')
hist['epoch']=history.epoch
logger.debug(hist.tail())
plot_history(history,
model_path+model_name+'-MAE-ERRORS-TRAINTEST.png',
model_path+model_name+'-MSE-ERRORS-TRAINTEST.png')
else:
logger.info('#'*10+'Already Trained')
time_cost = (pd.read_csv(model_path+model_name+'.csv')['time_cost'])[0]
model.load_weights(checkpoint_path)
# loss, mae, mse = model.evaluate(test_x, test_y, verbose=1)
"""
# Evaluate after training or load trained weights and biases
loss, mae, mse = model.evaluate(test_x, test_y, verbose=1)
print("Testing set Mean Abs Error: {:5.2f} ".format(mae))
"""
# 4. Predict the model
# load the unsample data
logger.info('Predict the training, development and testing samples...')
train_predictions = model.predict(train_x).flatten()
dev_predictions = model.predict(dev_x).flatten()
test_predictions = model.predict(test_x).flatten()
# renormized the predictions and labels
# load the normalized traindev indicators
norm = pd.read_csv(data_path+'norm_unsample_id_s'+str(model_id)+'.csv')
sMax = norm['series_max'][norm.shape[0]-1]
sMin = norm['series_min'][norm.shape[0]-1]
print('Series min:{}'.format(sMin))
print('Series max:{}'.format(sMax))
train_y = np.multiply(train_y + 1,sMax - sMin) / 2 + sMin
train_predictions = np.multiply(train_predictions + 1,sMax - sMin) / 2 + sMin
dev_y = np.multiply(dev_y + 1,sMax - sMin) / 2 + sMin
dev_predictions = np.multiply(dev_predictions + 1,sMax - sMin) / 2 + sMin
test_y = np.multiply(test_y + 1,sMax - sMin) / 2 + sMin
test_predictions = np.multiply(test_predictions + 1,sMax - sMin) / 2 + sMin
logger.info('Dump prediction results...')
dum_pred_results(
path = model_path+model_name+'.csv',
train_y = train_y,
train_predictions=train_predictions,
dev_y = dev_y,
dev_predictions = dev_predictions,
test_y = test_y,
test_predictions = test_predictions,
time_cost=time_cost)
logger.info('Plot the prediction results...')
plot_rela_pred(train_y,train_predictions,fig_savepath=model_path + model_name + '-TRAIN-PRED.png')
plot_rela_pred(dev_y,dev_predictions,fig_savepath=model_path + model_name + "-DEV-PRED.png")
plot_rela_pred(test_y,test_predictions,fig_savepath=model_path + model_name + "-TEST-PRED.png")
plot_error_distribution(test_predictions,test_y,model_path+model_name+'-TEST-ERROR-DSTRI.png')
plt.close('all')
| 48.391843
| 161
| 0.631408
| 16,267
| 121,028
| 4.429274
| 0.029815
| 0.034476
| 0.03789
| 0.048715
| 0.968203
| 0.965969
| 0.96125
| 0.957364
| 0.955282
| 0.952922
| 0
| 0.012982
| 0.23559
| 121,028
| 2,501
| 162
| 48.391843
| 0.765822
| 0.065976
| 0
| 0.926175
| 0
| 0
| 0.123782
| 0.021218
| 0
| 0
| 0
| 0
| 0.000479
| 1
| 0.014382
| false
| 0
| 0.017258
| 0.000479
| 0.039789
| 0.006232
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
62a5a116c2a40c47892a3c35a37dcb4a40b8f3ae
| 113
|
py
|
Python
|
boa3_test/test_sc/interop_test/stdlib/Base58EncodeMismatchedType.py
|
hal0x2328/neo3-boa
|
6825a3533384cb01660773050719402a9703065b
|
[
"Apache-2.0"
] | 25
|
2020-07-22T19:37:43.000Z
|
2022-03-08T03:23:55.000Z
|
boa3_test/test_sc/interop_test/stdlib/Base58EncodeMismatchedType.py
|
hal0x2328/neo3-boa
|
6825a3533384cb01660773050719402a9703065b
|
[
"Apache-2.0"
] | 419
|
2020-04-23T17:48:14.000Z
|
2022-03-31T13:17:45.000Z
|
boa3_test/test_sc/interop_test/stdlib/Base58EncodeMismatchedType.py
|
hal0x2328/neo3-boa
|
6825a3533384cb01660773050719402a9703065b
|
[
"Apache-2.0"
] | 15
|
2020-05-21T21:54:24.000Z
|
2021-11-18T06:17:24.000Z
|
from boa3.builtin.interop.stdlib import base58_encode
def Main(key: int) -> str:
return base58_encode(key)
| 18.833333
| 53
| 0.752212
| 17
| 113
| 4.882353
| 0.823529
| 0.289157
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.052083
| 0.150442
| 113
| 5
| 54
| 22.6
| 0.8125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
62b1e45cff35d277a481146c6bc53a2c086638fe
| 2,320
|
py
|
Python
|
test/nn/conv/test_general_conv.py
|
rusty1s/pytorch-geometric
|
ded9a7b10ad8ebc19c97e567c7bb1ae6605253db
|
[
"MIT"
] | 2,350
|
2021-09-12T08:32:50.000Z
|
2022-03-31T18:09:36.000Z
|
test/nn/conv/test_general_conv.py
|
rusty1s/pytorch-geometric
|
ded9a7b10ad8ebc19c97e567c7bb1ae6605253db
|
[
"MIT"
] | 588
|
2021-09-12T08:49:08.000Z
|
2022-03-31T21:02:13.000Z
|
test/nn/conv/test_general_conv.py
|
rusty1s/pytorch-geometric
|
ded9a7b10ad8ebc19c97e567c7bb1ae6605253db
|
[
"MIT"
] | 505
|
2021-09-13T13:13:32.000Z
|
2022-03-31T15:54:00.000Z
|
import torch
from torch_geometric.nn import GeneralConv
def test_general_conv():
x1 = torch.randn(4, 8)
e1 = torch.randn(4, 16)
edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]])
conv = GeneralConv(8, 32, 16)
assert conv.__repr__() == 'GeneralConv(8, 32)'
out = conv(x1, edge_index, edge_attr=e1)
assert out.size() == (4, 32)
assert torch.allclose(conv(x1, edge_index, edge_attr=e1), out, atol=1e-7)
conv = GeneralConv(8, 32, 16, skip_linear=True)
assert conv.__repr__() == 'GeneralConv(8, 32)'
out = conv(x1, edge_index, edge_attr=e1)
assert out.size() == (4, 32)
assert torch.allclose(conv(x1, edge_index, edge_attr=e1), out, atol=1e-7)
conv = GeneralConv(8, 32, 16, directed_msg=False)
assert conv.__repr__() == 'GeneralConv(8, 32)'
out = conv(x1, edge_index, edge_attr=e1)
assert out.size() == (4, 32)
assert torch.allclose(conv(x1, edge_index, edge_attr=e1), out, atol=1e-7)
conv = GeneralConv(8, 32, 16, heads=3)
assert conv.__repr__() == 'GeneralConv(8, 32)'
out = conv(x1, edge_index, edge_attr=e1)
assert out.size() == (4, 32)
assert torch.allclose(conv(x1, edge_index, edge_attr=e1), out, atol=1e-7)
conv = GeneralConv(8, 32, 16, attention=True)
assert conv.__repr__() == 'GeneralConv(8, 32)'
out = conv(x1, edge_index, edge_attr=e1)
assert out.size() == (4, 32)
assert torch.allclose(conv(x1, edge_index, edge_attr=e1), out, atol=1e-7)
conv = GeneralConv(8, 32, 16, heads=3, attention=True)
assert conv.__repr__() == 'GeneralConv(8, 32)'
out = conv(x1, edge_index, edge_attr=e1)
assert out.size() == (4, 32)
assert torch.allclose(conv(x1, edge_index, edge_attr=e1), out, atol=1e-7)
conv = GeneralConv(8, 32, 16, heads=3, attention=True,
attention_type='dot_product')
assert conv.__repr__() == 'GeneralConv(8, 32)'
out = conv(x1, edge_index, edge_attr=e1)
assert out.size() == (4, 32)
assert torch.allclose(conv(x1, edge_index, edge_attr=e1), out, atol=1e-7)
conv = GeneralConv(8, 32, 16, l2_normalize=True)
assert conv.__repr__() == 'GeneralConv(8, 32)'
out = conv(x1, edge_index, edge_attr=e1)
assert out.size() == (4, 32)
assert torch.allclose(conv(x1, edge_index, edge_attr=e1), out, atol=1e-7)
| 39.322034
| 77
| 0.64181
| 363
| 2,320
| 3.900826
| 0.126722
| 0.072034
| 0.158192
| 0.169492
| 0.870763
| 0.856638
| 0.856638
| 0.856638
| 0.856638
| 0.856638
| 0
| 0.082888
| 0.193966
| 2,320
| 58
| 78
| 40
| 0.674332
| 0
| 0
| 0.680851
| 0
| 0
| 0.06681
| 0
| 0
| 0
| 0
| 0
| 0.510638
| 1
| 0.021277
| false
| 0
| 0.042553
| 0
| 0.06383
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
62b49304e7ee39f19375c5546a88a27a88f5e32c
| 15,936
|
py
|
Python
|
struntho/inference/maxmin_spmp_sequence.py
|
alexnowakvila/maxminloss
|
15c45da5b8c4c214ba2aa596931aff998e3f1c92
|
[
"Apache-2.0"
] | 6
|
2020-07-28T12:13:50.000Z
|
2022-01-06T10:35:10.000Z
|
struntho/inference/maxmin_spmp_sequence.py
|
alexnowakvila/maxminloss
|
15c45da5b8c4c214ba2aa596931aff998e3f1c92
|
[
"Apache-2.0"
] | 1
|
2021-07-12T15:10:19.000Z
|
2021-07-12T15:10:19.000Z
|
struntho/inference/maxmin_spmp_sequence.py
|
alexnowakvila/maxminloss
|
15c45da5b8c4c214ba2aa596931aff998e3f1c92
|
[
"Apache-2.0"
] | 4
|
2020-10-05T16:48:13.000Z
|
2021-05-04T13:59:24.000Z
|
import sys
# sys.path.append("..")
import cvxopt as cvx
from cvxopt import matrix, solvers
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from scipy.linalg import toeplitz
import scipy.special as sp
from struntho.inference.sum_product_chain import sum_product_p
from struntho.inference._sum_product_chain import viterbi, sum_product_c
def softmax(a, b):
c = b * np.exp(a)
return c / c.sum(1, keepdims=True)
def maxmin_spmp_sequence_p(nu_nodes,
nu_edges,
p,
unary_potentials,
pairwise_potentials,
Loss,
max_iter,
eta,
sum_product_cython=False):
"""
INPUT
unary_potentials: length * n_states
pairwise_potentials: n_states * n_states (pwpot same at all edges)
edges: (length - 1) * 2
L: n_states * n_states
OUPTUT
node_marginals: length * n_states
pairwise_marginals: (length - 1) * n_states * n_states
"""
# choose sum product functionality
sum_product = sum_product_c if sum_product_cython else sum_product_p
def grad_entropy(MU, edges):
marginal_nodes, marginal_edges = MU
grad_nodes = np.log(marginal_nodes + 1e-5)
grad_edges = -np.log(marginal_edges + 1e-5)
return grad_nodes, grad_edges
n_states = pairwise_potentials.shape[0]
length = unary_potentials.shape[0]
# initialize optimization variables
nu_nodes = np.log(nu_nodes + 1e-16)
nu_edges = np.log(nu_edges + 1e-16)
# initialize auxiliar variables
q = np.zeros((length, n_states))
mu_nodes = np.zeros((length, n_states))
mu_edges = np.zeros((length - 1, n_states, n_states))
# initialize averages
q_avg = np.zeros((length, n_states))
mu_avg_nodes = np.zeros((length, n_states))
mu_avg_edges = np.zeros((length - 1, n_states, n_states))
p_avg = np.zeros((length, n_states))
nu_avg_nodes = np.zeros((length, n_states))
nu_avg_edges = np.zeros((length - 1, n_states, n_states))
# repeated_potentials = np.tile(pairwise_potentials, length - 1)
repeated_potentials = np.repeat(pairwise_potentials[np.newaxis, :, :], length - 1, axis=0)
dual_gaps = []
max_iter = length * max_iter
for k in range(max_iter):
# FIRST PROXIMAL MAPPING
q = softmax(-eta * np.dot(np.exp(nu_nodes), Loss.T), p)
# prepare uscores
uscores = eta * np.dot(p, Loss) + eta * unary_potentials - nu_nodes
uscores[0] = uscores[0] + nu_nodes[0]
uscores[-1] = uscores[-1] + nu_nodes[-1]
bscores = eta * repeated_potentials + nu_edges
sum_product(uscores, bscores, mu_nodes, mu_edges)
# SECOND PROXIMAL MAPPING
p = softmax(-eta * np.dot(np.exp(mu_nodes), Loss.T), p)
# prepare uscores
uscores = eta * np.dot(q, Loss) + eta * unary_potentials - nu_nodes
uscores[0] = uscores[0] + nu_nodes[0]
uscores[-1] = uscores[-1] + nu_nodes[-1]
bscores = eta * repeated_potentials + nu_edges
sum_product(uscores, bscores, nu_nodes, nu_edges)
# UPDATE AVERAGES
q_avg = k * q_avg / (k+1) + q / (k+1)
mu_avg_nodes = k * mu_avg_nodes / (k+1) + np.exp(mu_nodes) / (k+1)
mu_avg_edges = k * mu_avg_edges / (k+1) + np.exp(mu_edges) / (k+1)
p_avg = k * p_avg / (k+1) + p / (k+1)
nu_avg_nodes = k * nu_avg_nodes / (k+1) + np.exp(nu_nodes) / (k+1)
nu_avg_edges = k * nu_avg_edges / (k+1) + np.exp(nu_edges) / (k+1)
# COMPUTE DUAL GAP
ymax = np.zeros(length, dtype=np.int32)
viterbi(np.dot(q_avg, Loss) + unary_potentials, pairwise_potentials, ymax)
# print("ymax", ymax)
#make one hot encoding
node_embeddings = np.zeros((length, n_states), dtype=np.int)
gx = np.ogrid[:length]
node_embeddings[gx, ymax] = 1
##accumulated pairwise
edges = np.stack((np.arange(0, length - 1), np.arange(1, length)), 1)
sum_edge_embeddings = np.dot(node_embeddings[edges[:, 0]].T,
node_embeddings[edges[:, 1]])
# compute value of y_max
m1 = (np.dot(q_avg, Loss) + unary_potentials)[np.arange(length), ymax].sum()
m2 = (pairwise_potentials * sum_edge_embeddings).sum()
maxval = m1 + m2
en1 = (unary_potentials * mu_avg_nodes).sum()
en2 = (repeated_potentials * mu_avg_edges).sum()
minval = np.min(np.dot(mu_avg_nodes, Loss), axis=1).sum() + en1 + en2
dual_gap = maxval - minval
# print("Iteration: {}. Dual gap: {}".format(k, dual_gap))
dual_gaps.append(dual_gap)
# check for positive values
# if mu_nodes.max() > 0.1: pdb.set_trace()
out1 = [[mu_avg_nodes, mu_avg_edges], q_avg]
out2 = [[nu_avg_nodes, nu_avg_edges], p_avg]
return out1, out2, dual_gaps
def maxmin_spmp_sequence_p2(nu_nodes,
nu_edges,
p,
unary_potentials,
pairwise_potentials,
Loss,
max_iter,
eta,
sum_product_cython=False):
"""
INPUT
unary_potentials: length * n_states
pairwise_potentials: n_states * n_states (pwpot same at all edges)
edges: (length - 1) * 2
L: n_states * n_states
OUPTUT
node_marginals: length * n_states
pairwise_marginals: (length - 1) * n_states * n_states
"""
# choose sum product functionality
sum_product = sum_product_c if sum_product_cython else sum_product_p
def grad_entropy(MU, edges):
marginal_nodes, marginal_edges = MU
grad_nodes = np.log(marginal_nodes + 1e-10)
grad_edges = -np.log(marginal_edges + 1e-10)
return grad_nodes, grad_edges
n_states = pairwise_potentials.shape[0]
length = unary_potentials.shape[0]
# initialize optimization variables
nu_nodes = np.log(nu_nodes + 1e-16)
nu_edges = np.log(nu_edges + 1e-16)
# initialize auxiliar variables
q = np.zeros((length, n_states))
mu_nodes = np.zeros((length, n_states))
mu_edges = np.zeros((length - 1, n_states, n_states))
# initialize averages
q_avg = np.zeros((length, n_states))
mu_avg_nodes = np.zeros((length, n_states))
mu_avg_edges = np.zeros((length - 1, n_states, n_states))
p_avg = np.zeros((length, n_states))
nu_avg_nodes = np.zeros((length, n_states))
nu_avg_edges = np.zeros((length - 1, n_states, n_states))
# repeated_potentials = np.tile(pairwise_potentials, length - 1)
repeated_potentials = np.repeat(pairwise_potentials[np.newaxis, :, :], length - 1, axis=0)
dual_gaps = []
max_iter = length * max_iter
for k in range(max_iter):
# FIRST PROXIMAL MAPPING
q = softmax(-eta * np.dot(np.exp(nu_nodes), Loss.T), p)
# prepare uscores
uscores = eta * np.dot(p, Loss) + eta * unary_potentials - nu_nodes
uscores[0] = uscores[0] + nu_nodes[0]
uscores[-1] = uscores[-1] + nu_nodes[-1]
bscores = eta * repeated_potentials + nu_edges
sum_product(uscores, bscores, mu_nodes, mu_edges)
# SECOND PROXIMAL MAPPING
# if np.exp(mu_nodes).max() == np.inf or mu_nodes.min() == -np.inf:
# import pdb; pdb.set_trace()
p = softmax(-eta * np.dot(np.exp(mu_nodes), Loss.T), q)
# prepare uscores
uscores = eta * np.dot(q, Loss) + eta * unary_potentials - mu_nodes
uscores[0] = uscores[0] + mu_nodes[0]
uscores[-1] = uscores[-1] + mu_nodes[-1]
bscores = eta * repeated_potentials + mu_edges
# if np.isinf(uscores.max()):
# import pdb; pdb.set_trace()
sum_product(uscores, bscores, nu_nodes, nu_edges)
# UPDATE AVERAGES
q_avg = k * q_avg / (k+1) + q / (k+1)
mu_avg_nodes = k * mu_avg_nodes / (k+1) + np.exp(mu_nodes) / (k+1)
mu_avg_edges = k * mu_avg_edges / (k+1) + np.exp(mu_edges) / (k+1)
p_avg = k * p_avg / (k+1) + p / (k+1)
nu_avg_nodes = k * nu_avg_nodes / (k+1) + np.exp(nu_nodes) / (k+1)
nu_avg_edges = k * nu_avg_edges / (k+1) + np.exp(nu_edges) / (k+1)
# COMPUTE DUAL GAP
ymax = np.zeros(length, dtype=np.int32)
viterbi(np.dot(q_avg, Loss) + unary_potentials, pairwise_potentials, ymax)
# print("ymax", ymax)
#make one hot encoding
node_embeddings = np.zeros((length, n_states), dtype=np.int)
gx = np.ogrid[:length]
node_embeddings[gx, ymax] = 1
##accumulated pairwise
edges = np.stack((np.arange(0, length - 1), np.arange(1, length)), 1)
sum_edge_embeddings = np.dot(node_embeddings[edges[:, 0]].T,
node_embeddings[edges[:, 1]])
# compute value of y_max
m1 = (np.dot(q_avg, Loss) + unary_potentials)[np.arange(length), ymax].sum()
m2 = (pairwise_potentials * sum_edge_embeddings).sum()
maxval = m1 + m2
en1 = (unary_potentials * mu_avg_nodes).sum()
en2 = (repeated_potentials * mu_avg_edges).sum()
minval = np.min(np.dot(mu_avg_nodes, Loss), axis=1).sum() + en1 + en2
dual_gap = maxval - minval
# print("Iteration: {}. Dual gap: {}".format(k, dual_gap))
dual_gaps.append(dual_gap)
# check for positive values
# if mu_nodes.max() > 0.1: pdb.set_trace()
out1 = [[mu_avg_nodes, mu_avg_edges], q_avg]
out2 = [[nu_avg_nodes, nu_avg_edges], p_avg]
return out1, out2, dual_gaps
# def CVXOPT(unary_potentials, pairwise_potentials, Loss):
# Loss = matrix(Loss)
# n_states = pairwise_potentials.shape[0]
# length = unary_potentials.shape[0]
# # COMPUTE MATRIX A
# A1 = np.zeros((n_states, n_states ** 2))
# A2 = np.tile(np.arange(n_states), (n_states, n_states))
# for j in range(n_states):
# A1[j, j * n_states: (j+1) * n_states] = 1.
# A2[j] = -1 * (A2[j] % n_states == j).astype(float)
# A3 = np.concatenate((A1, A2), axis=1)
# A = np.zeros((n_states * (length - 2), (length - 1) * n_states ** 2))
# for l in range(length - 2):
# A[l * n_states: (l+1) * n_states, l * n_states ** 2: (l+2) * n_states ** 2] = A3
# # A has shape n_states * (length - 1) X length * n_states ** 2
# # A4 = np.zeros((length - 1, (length - 1) * n_states ** 2))
# # for l in range(length - 1):
# # A4[l, l * n_states ** 2 : (l+1) * n_states ** 2] = 1.
# # A = np.concatenate((A, A4), axis=0)
# # # insert part associated to z
# # A = np.concatenate((A, np.zeros((A.shape[0], length))), axis=1)
# # assert A.shape[0] == (length - 2) * n_states + length - 1
# A4 = np.zeros((1, (length - 1) * n_states ** 2))
# A4[0,:n_states ** 2] = 1.
# A = np.concatenate((A, A4), axis=0)
# # insert part associated to z
# A = np.concatenate((A, np.zeros((A.shape[0], length))), axis=1)
# assert A.shape[0] == (length - 2) * n_states + 1
# # COMPUTE VECTOR b
# b = np.zeros(A.shape[0])
# b[(length - 2) * n_states:] = 1.
# # COMPUTE MATRIX G
# # we separate the computation between G1, G2, G3, G4
# G0 = np.zeros((n_states, n_states ** 2))
# for j in range(n_states):
# g = np.ones((n_states, 1)).dot(Loss[[j], :])
# G0[j] = g.flatten()
# G1 = np.zeros((length * n_states, (length - 1) * n_states ** 2))
# for l in range(length - 1):
# G1[l * n_states: (l+1) * n_states, l * n_states ** 2: (l+1) * n_states ** 2] = G0
# G1[(length - 1) * n_states : length * n_states, (length - 2) * n_states ** 2 : (length - 1) * n_states ** 2] = G0
# G1 = -1 * G1
# G2 = np.zeros((length * n_states, length))
# for l in range(length):
# G2[l * n_states: (l+1)* n_states, l] = 1.
# G3 = -1 * np.eye((length - 1) * n_states ** 2)
# G4 = np.zeros(((length - 1) * n_states ** 2, length))
# G = np.concatenate((G1, G2), axis=1)
# G = np.concatenate((G, np.concatenate((G3, G4), axis=1)), axis=0)
# # COMPUTE VECTOR h
# h = np.zeros(G.shape[0])
# # COMPUTE COST VECTOR c
# C1 = np.tile(pairwise_potentials.flatten(), (length - 1, 1))
# for l in range(length - 1):
# C1[l] += unary_potentials[[l], :].transpose().dot(np.ones((1, n_states))).flatten()
# C1[length - 2] += unary_potentials[[length - 1], :].transpose().dot(np.ones((1, n_states))).flatten()
# # C1 has shape length - 1 X n_states ** 2
# C2 = np.ones(length)
# c = -1 * np.concatenate((C1.flatten(), C2), axis=0)
# # PRINT SHAPES
# print("c has shape (length - 1) * n_states * n_states + length = {}".format(c.shape[0]))
# print("G has shape (length * n_states + (length - 1) * n_states * n_states) X ((length - 1) * n_states * n_states + length) = {}".format(G.shape))
# print("h has shape length * n_states + (length - 1) * n_states * n_states = {}".format(h.shape[0]))
# print("A has shape ((length - 2) * n_states + (length - 1)) X ((length - 1) * n_states * n_states + length) = {}".format(A.shape))
# print("b has shape (length - 2) * n_states + (length - 1) = {}".format(b.shape[0]))
# assert c.shape[0] == (length - 1) * n_states * n_states + length
# assert G.shape[0] == (length * n_states + (length - 1) * n_states * n_states)
# assert G.shape[1] == ((length - 1) * n_states * n_states + length)
# assert h.shape[0] == length * n_states + (length - 1) * n_states * n_states
# # assert A.shape[0] == ((length - 2) * n_states + (length - 1))
# assert A.shape[0] == ((length - 2) * n_states + 1)
# assert A.shape[1] == ((length - 1) * n_states * n_states + length)
# # assert b.shape[0] == (length - 2) * n_states + (length - 1)
# assert b.shape[0] == (length - 2) * n_states + 1
# # check rank
# # print(A.shape[0] - np.linalg.matrix_rank(A))
# # pass to matrix format
# c, G, h, A, b = matrix(c), matrix(G), matrix(h), matrix(A), matrix(b)
# sol=solvers.lp(c, G, h, A, b)
# en = -1 * sol['primal objective']
# dual_gap = sol['gap']
# mu = np.array(sol['x'][: (length - 1) * n_states ** 2]).flatten()
# mu_edges = np.reshape(mu, (length - 1, n_states, n_states))
# mu_nodes = np.zeros((length, n_states))
# for l in range(length - 1):
# mu_nodes[l] = mu_edges[l].sum(0)
# mu_nodes[length - 1] = mu_edges[-1].sum(1)
# out = [[mu_nodes, mu_edges], None]
# return out, en, dual_gap
if __name__ == "__main__":
np.random.seed(1)
eps = 1e-3
n_states = 5
Loss = np.ones((n_states, n_states))
np.fill_diagonal(Loss, 0.0)
Loss = toeplitz(np.arange(n_states))
length = 10
unary_potentials = np.random.random_sample((length, n_states))
pairwise_potentials = np.random.random_sample((n_states, n_states))
edges = np.stack((np.arange(0, length - 1), np.arange(1, length)), 1)
p = np.ones((length, n_states)) / n_states
nu_nodes = np.ones((length, n_states)) / n_states
nu_edges = np.ones((length - 1, n_states, n_states)) / (n_states ** 2)
max_iter = 50
eta = 1 / (2 * np.max(Loss))
out, dual_gaps = maxmin_spmp_sequence_p(nu_nodes,
nu_edges,
p,
unary_potentials,
pairwise_potentials,
Loss,
max_iter,
eta,
sum_product_cython=True)
| 40.446701
| 152
| 0.569654
| 2,278
| 15,936
| 3.775241
| 0.092186
| 0.113953
| 0.034419
| 0.053721
| 0.818837
| 0.789419
| 0.750233
| 0.726977
| 0.701395
| 0.675233
| 0
| 0.029052
| 0.287211
| 15,936
| 393
| 153
| 40.549618
| 0.728057
| 0.401355
| 0
| 0.755682
| 0
| 0
| 0.000866
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.028409
| false
| 0
| 0.056818
| 0
| 0.113636
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
62b789628fb8711ec050ed22195efc1a42b1376c
| 1,625
|
py
|
Python
|
test/docstrings/codetag2.py
|
Setonas/MagicSetonas
|
ef76da5f27a0506b194c58072b81424e3ce985d7
|
[
"MIT"
] | 5
|
2017-02-22T10:17:39.000Z
|
2021-04-06T16:36:13.000Z
|
test/docstrings/codetag2.py
|
Setonas/MagicSetonas
|
ef76da5f27a0506b194c58072b81424e3ce985d7
|
[
"MIT"
] | null | null | null |
test/docstrings/codetag2.py
|
Setonas/MagicSetonas
|
ef76da5f27a0506b194c58072b81424e3ce985d7
|
[
"MIT"
] | 1
|
2020-08-29T02:30:52.000Z
|
2020-08-29T02:30:52.000Z
|
' foo bar XXX baz '
apibrėžti foo():
' foo FIXME baz '
' : punctuation.definition.string.begin.python, source.python, string.quoted.docstring.single.python
foo bar : source.python, string.quoted.docstring.single.python
XXX : keyword.codetag.notation.python, source.python, string.quoted.docstring.single.python
baz : source.python, string.quoted.docstring.single.python
' : punctuation.definition.string.end.python, source.python, string.quoted.docstring.single.python
: source.python
apibrėžti : meta.function.python, source.python, storage.type.function.python
: meta.function.python, source.python
foo : entity.name.function.python, meta.function.python, source.python
( : meta.function.parameters.python, meta.function.python, punctuation.definition.parameters.begin.python, source.python
) : meta.function.parameters.python, meta.function.python, punctuation.definition.parameters.end.python, source.python
: : meta.function.python, punctuation.section.function.begin.python, source.python
: source.python
' : punctuation.definition.string.begin.python, source.python, string.quoted.docstring.single.python
foo : source.python, string.quoted.docstring.single.python
FIXME : keyword.codetag.notation.python, source.python, string.quoted.docstring.single.python
baz : source.python, string.quoted.docstring.single.python
' : punctuation.definition.string.end.python, source.python, string.quoted.docstring.single.python
| 62.5
| 132
| 0.708923
| 181
| 1,625
| 6.364641
| 0.149171
| 0.1875
| 0.21875
| 0.208333
| 0.87066
| 0.803819
| 0.803819
| 0.664931
| 0.664931
| 0.664931
| 0
| 0
| 0.182154
| 1,625
| 25
| 133
| 65
| 0.866817
| 0
| 0
| 0.380952
| 0
| 0.190476
| 0.019692
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
62c1b8d0e89dfbac955c76e93d4d715344e87b5d
| 17,179
|
py
|
Python
|
unit/protoss_unit.py
|
jixiaozhong/mind-SC2
|
eece7f165a74c7e448361e19b20327e38309ce81
|
[
"MIT"
] | 30
|
2019-03-05T09:50:27.000Z
|
2019-08-28T11:33:43.000Z
|
unit/protoss_unit.py
|
jixiaozhong/mind-SC2
|
eece7f165a74c7e448361e19b20327e38309ce81
|
[
"MIT"
] | 10
|
2020-01-05T15:22:37.000Z
|
2021-08-25T15:29:40.000Z
|
unit/protoss_unit.py
|
mindgameSC2/Thought-SC2
|
9c138416a65fd3c4093b2841b6163e81c60b9be5
|
[
"MIT"
] | 6
|
2019-09-19T07:51:04.000Z
|
2022-01-23T11:02:51.000Z
|
from unit.units import Building, Creature, Queue
class ProtossBuilding(Building):
def __init__(self):
super().__init__()
self.shield = 0
self.shield_armor = 0
def getEquivalentHP(self, attack):
if attack == 0:
return self.hp + self.shield
else:
return self.hp * attack / max(attack - self.armor, 1) + \
self.shield * attack / max(attack - self.shield_armor, 1)
class ProtossCreature(Creature):
def __init__(self):
super().__init__()
self.shield = 0
self.shield_armor = 0
def getEquivalentHP(self, attack):
if attack == 0:
return self.hp + self.shield
else:
return self.hp * attack / max(attack - self.armor, 1) + \
self.shield * attack / max(attack - self.shield_armor, 1)
# Basic Buildings
class Nexus(ProtossBuilding):
def __init__(self):
super().__init__()
self.specialization()
def specialization(self):
self.mineral_price = 400
self.gas_price = 0
self.build_time = 71
self.food_supply = 15
self.hp = 1000
self.shield = 1000
self.armor = 1
self.shield_armor = 0
self.queue = Queue()
class Pylon(ProtossBuilding):
def __init__(self):
super().__init__()
self.specialization()
def specialization(self):
self.mineral_price = 100
self.gas_price = 0
self.build_time = 18
self.food_supply = 8
self.hp = 200
self.shield = 200
self.armor = 1
self.shield_armor = 0
class Assimilator(ProtossBuilding):
def __init__(self):
super().__init__()
self.specialization()
def specialization(self):
self.mineral_price = 75
self.gas_price = 0
self.build_time = 21
self.hp = 300
self.shield = 300
self.armor = 1
self.shield_armor = 0
class PhotonCannon(ProtossBuilding):
def __init__(self):
super().__init__()
self.specialization()
def specialization(self):
self.mineral_price = 150
self.gas_price = 0
self.build_time = 29
self.hp = 150
self.shield = 150
self.armor = 1
self.shield_armor = 0
self.attack = 20
self.range = 7
self.dps = 22.4
class ShieldBattery(ProtossBuilding):
def __init__(self):
super().__init__()
self.specialization()
def specialization(self):
self.mineral_price = 100
self.gas_price = 0
self.build_time = 29
self.hp = 150
self.shield = 150
self.armor = 1
self.shield_armor = 0
# Producing Buildings
class Gateway(ProtossBuilding):
def __init__(self):
super().__init__()
self.specialization()
def specialization(self):
self.mineral_price = 150
self.gas_price = 0
self.build_time = 46
self.hp = 500
self.shield = 500
self.armor = 1
self.shield_armor = 0
self.max_size = 5
self.queue = Queue()
class Warpgate(ProtossBuilding):
def __init__(self):
super().__init__()
self.specialization()
def specialization(self):
self.mineral_price = 0
self.gas_price = 0
self.build_time = 7
self.hp = 500
self.shield = 500
self.armor = 1
self.shield_armor = 0
class RoboticsFacility(ProtossBuilding):
def __init__(self):
super().__init__()
self.specialization()
def specialization(self):
self.mineral_price = 200
self.gas_price = 100
self.build_time = 46
self.hp = 450
self.shield = 450
self.armor = 1
self.shield_armor = 0
class Stargate(ProtossBuilding):
def __init__(self):
super().__init__()
self.specialization()
def specialization(self):
self.mineral_price = 150
self.gas_price = 150
self.build_time = 43
self.hp = 600
self.shield = 600
self.armor = 1
self.shield_armor = 0
# Technologic Buildings
class Forge(ProtossBuilding):
def __init__(self):
super().__init__()
self.specialization()
def specialization(self):
self.mineral_price = 150
self.gas_price = 0
self.build_time = 32
self.hp = 400
self.shield = 400
self.armor = 1
self.shield_armor = 0
class CyberneticsCore(ProtossBuilding):
def __init__(self):
super().__init__()
self.specialization()
def specialization(self):
self.mineral_price = 150
self.gas_price = 0
self.build_time = 36
self.hp = 550
self.shield = 550
self.armor = 1
self.shield_armor = 0
class TwilightCouncil(ProtossBuilding):
def __init__(self):
super().__init__()
self.specialization()
def specialization(self):
self.mineral_price = 150
self.gas_price = 0
self.build_time = 36
self.hp = 550
self.shield = 550
self.armor = 1
self.shield_armor = 0
class TemplarArchives(ProtossBuilding):
def __init__(self):
super().__init__()
self.specialization()
def specialization(self):
self.mineral_price = 150
self.gas_price = 200
self.build_time = 36
self.hp = 500
self.shield = 500
self.armor = 1
self.shield_armor = 0
class DarkShrine(ProtossBuilding):
def __init__(self):
super().__init__()
self.specialization()
def specialization(self):
self.mineral_price = 150
self.gas_price = 150
self.build_time = 71
self.hp = 500
self.shield = 500
self.armor = 1
self.shield_armor = 0
class RoboticsBBay(ProtossBuilding):
def __init__(self):
super().__init__()
self.specialization()
def specialization(self):
self.mineral_price = 150
self.gas_price = 150
self.build_time = 46
self.hp = 500
self.shield = 500
self.armor = 1
self.shield_armor = 0
class FleetBeacon(ProtossBuilding):
def __init__(self):
super().__init__()
self.specialization()
def specialization(self):
self.mineral_price = 300
self.gas_price = 200
self.build_time = 43
self.hp = 500
self.shield = 500
self.armor = 1
self.shield_armor = 0
# Worker
class Probe(ProtossCreature):
def __init__(self):
super().__init__()
self.specialization()
def specialization(self):
self.mineral_price = 50
self.gas_price = 0
self.build_time = 12
self.food_used = 1
self.hp = 20
self.shield = 20
self.armor = 0
self.shield_armor = 0
self.attribute = ['L', 'M']
self.attack = 5
self.range = 0
self.dps = 4.7
self.movement = 3.94
# Gateway Units
class Zealot(ProtossCreature):
def __init__(self):
super().__init__()
self.specialization()
def specialization(self):
self.mineral_price = 100
self.gas_price = 0
self.build_time = 27
self.food_used = 2
self.hp = 100
self.shield = 50
self.armor = 1
self.shield_armor = 0
self.attribute = ['L', 'B']
self.attack = 16
self.range = 0
self.dps = 18.6
self.bonus_attack = {}
self.movement = 3.15
class Stalker(ProtossCreature):
def __init__(self):
super().__init__()
self.specialization()
def specialization(self):
self.mineral_price = 125
self.gas_price = 50
self.build_time = 30
self.food_used = 2
self.hp = 80
self.shield = 80
self.armor = 1
self.shield_armor = 0
self.attribute = ['A', 'M']
self.attack = 13
self.range = 6
self.dps = 9.7
self.bonus_attack = {'A': 5}
self.movement = 4.13
class Sentry(ProtossCreature):
def __init__(self):
super().__init__()
self.specialization()
def specialization(self):
self.mineral_price = 50
self.gas_price = 100
self.build_time = 26
self.food_used = 2
self.hp = 40
self.shield = 40
self.armor = 1
self.shield_armor = 0
self.attribute = ['L', 'M', 'P']
self.attack = 6
self.range = 5
self.dps = 8.5
self.movement = 3.15
class Adept(ProtossCreature):
def __init__(self):
super().__init__()
self.specialization()
def specialization(self):
self.mineral_price = 100
self.gas_price = 25
self.build_time = 27
self.food_used = 2
self.hp = 70
self.shield = 70
self.armor = 1
self.shield_armor = 0
self.attribute = ['L', 'B']
self.attack = 10
self.range = 4
self.dps = 6.2
self.bonus_attack = {'L': 12}
self.movement = 3.5
class HighTemplar(ProtossCreature):
def __init__(self):
super().__init__()
self.specialization()
def specialization(self):
self.mineral_price = 50
self.gas_price = 150
self.build_time = 39
self.food_used = 2
self.hp = 40
self.shield = 40
self.armor = 0
self.shield_armor = 0
self.attribute = ['L', 'B', 'P']
self.attack = 4
self.range = 6
self.dps = 3.2
self.movement = 2.62
class DarkTemplar(ProtossCreature):
def __init__(self):
super().__init__()
self.specialization()
def specialization(self):
self.mineral_price = 125
self.gas_price = 125
self.build_time = 39
self.food_used = 2
self.hp = 40
self.shield = 80
self.armor = 1
self.shield_armor = 0
self.attribute = ['L', 'B', 'P']
self.attack = 45
self.range = 0
self.dps = 37.2
self.movement = 3.94
class Archon(ProtossCreature):
def __init__(self):
super().__init__()
self.specialization()
def specialization(self):
self.mineral_price = 0
self.gas_price = 0
self.build_time = 9
self.food_used = 4
self.hp = 10
self.shield = 350
self.armor = 0
self.shield_armor = 0
self.attribute = ['P', 'Ma']
self.attack = 25
self.range = 3
self.dps = 20
self.bonus_attack = {'B': 10}
self.movement = 3.94
# Robotics Facility Units
class Observer(ProtossCreature):
def __init__(self):
super().__init__()
self.specialization()
def specialization(self):
self.mineral_price = 25
self.gas_price = 75
self.build_time = 21
self.food_used = 1
self.hp = 40
self.shield = 20
self.armor = 0
self.shield_armor = 0
self.attack = 0
self.range = 0
self.dps = 0
self.movement = 2.62
class WarpPrism(ProtossCreature):
def __init__(self):
super().__init__()
self.specialization()
def specialization(self):
self.mineral_price = 200
self.gas_price = 0
self.build_time = 36
self.food_used = 2
self.hp = 80
self.shield = 100
self.armor = 0
self.shield_armor = 0
self.attribute = ['A', 'M', 'P']
self.attack = 0
self.range = 0
self.dps = 0
self.movement = 4.13
class Immortal(ProtossCreature):
def __init__(self):
super().__init__()
self.specialization()
def specialization(self):
self.mineral_price = 250
self.gas_price = 100
self.build_time = 39
self.food_used = 4
self.hp = 250
self.shield = 100
self.armor = 1
self.shield_armor = 0
self.attribute = ['A', 'M']
self.attack = 20
self.range = 6
self.dps = 19.2
self.bonus_attack = {'A': 30}
self.movement = 3.15
class Colossus(ProtossCreature):
def __init__(self):
super().__init__()
self.specialization()
def specialization(self):
self.mineral_price = 300
self.gas_price = 200
self.build_time = 54
self.food_used = 6
self.hp = 200
self.shield = 150
self.armor = 1
self.shield_armor = 0
self.attribute = ['A', 'M', 'Ma']
self.attack = 20
self.range = 7
self.dps = 18.7
self.bonus_attack = {'L': 5}
self.movement = 3.15
class Disruptor(ProtossCreature):
def __init__(self):
super().__init__()
self.specialization()
def specialization(self):
self.mineral_price = 150
self.gas_price = 150
self.build_time = 36
self.food_used = 3
self.hp = 100
self.shield = 100
self.armor = 1
self.shield_armor = 0
self.attribute = ['A', 'M']
self.attack = 0
self.range = 0
self.dps = 0
self.movement = 3.15
# Stargate Units
class Phoenix(ProtossCreature):
def __init__(self):
super().__init__()
self.specialization()
def specialization(self):
self.mineral_price = 150
self.gas_price = 100
self.build_time = 25
self.food_used = 2
self.hp = 120
self.shield = 60
self.armor = 0
self.shield_armor = 0
self.attribute = ['L', 'M']
self.attack = 10
self.range = 5
self.dps = 12.6
self.bonus_attack = {'L': 5}
self.movement = 5.95
class VoidRay(ProtossCreature):
def __init__(self):
super().__init__()
self.specialization()
def specialization(self):
self.mineral_price = 250
self.gas_price = 150
self.build_time = 43
self.food_used = 4
self.hp = 150
self.shield = 100
self.armor = 0
self.shield_armor = 0
self.attribute = ['A', 'M']
self.attack = 6
self.range = 6
self.dps = 16.8
self.bonus_attack = {'A': 4}
self.movement = 3.5
class Oracle(ProtossCreature):
def __init__(self):
super().__init__()
self.specialization()
def specialization(self):
self.mineral_price = 150
self.gas_price = 150
self.build_time = 36
self.food_used = 3
self.hp = 100
self.shield = 60
self.armor = 0
self.shield_armor = 0
self.attribute = ['L', 'M', 'P']
self.attack = 15
self.range = 4
self.dps = 24.4
self.bonus_attack = {'L': 7}
self.movement = 5.6
class Tempest(ProtossCreature):
def __init__(self):
super().__init__()
self.specialization()
def specialization(self):
self.mineral_price = 250
self.gas_price = 175
self.build_time = 43
self.food_used = 5
self.hp = 150
self.shield = 125
self.armor = 2
self.shield_armor = 0
self.attribute = ['A', 'M', 'Ma']
self.attack = 40
self.range = 15
self.dps = 16.97
self.bonus_attack = {'Ma': 22}
self.movement = 3.5
class Carrier(ProtossCreature):
def __init__(self):
super().__init__()
self.specialization()
def specialization(self):
self.mineral_price = 350
self.gas_price = 250
self.build_time = 64
self.food_used = 6
self.hp = 300
self.shield = 150
self.armor = 2
self.shield_armor = 0
self.attribute = ['A', 'M', 'Ma']
self.attack = 10
self.range = 8
self.dps = 37.8
self.movement = 2.62
class Interceptor(ProtossCreature):
# created by Carrier
def __init__(self):
super().__init__()
self.specialization()
def specialization(self):
self.mineral_price = 15
self.gas_price = 0
self.build_time = 11
self.food_used = 0
self.hp = 40
self.shield = 40
self.armor = 0
self.shield_armor = 0
self.attribute = ['L', 'M']
self.attack = 10
self.range = 2
self.dps = 4.7
self.movement = 10.5
class Mothership(ProtossCreature):
# producted by Nexus
def __init__(self):
super().__init__()
self.specialization()
def specialization(self):
self.mineral_price = 300
self.gas_price = 300
self.build_time = 71
self.food_used = 8
self.hp = 350
self.shield = 350
self.armor = 2
self.shield_armor = 0
self.attribute = ['A', 'M', 'P', 'Ma']
self.attack = 36
self.range = 7
self.dps = 22.8
self.movement = 2.62
| 20.772672
| 73
| 0.549566
| 2,005
| 17,179
| 4.466833
| 0.07581
| 0.091559
| 0.066994
| 0.067887
| 0.858531
| 0.800804
| 0.770992
| 0.736601
| 0.714046
| 0.699866
| 0
| 0.064657
| 0.349089
| 17,179
| 826
| 74
| 20.797821
| 0.736273
| 0.009023
| 0
| 0.780405
| 0
| 0
| 0.003703
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.128378
| false
| 0
| 0.001689
| 0
| 0.201014
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
62e80a7c75d4538a8532bc9bd03d65c579aacd18
| 32
|
py
|
Python
|
python/quine.py
|
ahuglajbclajep/quine
|
5d8d41461f7bdec1dafba2b1939c0c2b2022c9a8
|
[
"MIT"
] | null | null | null |
python/quine.py
|
ahuglajbclajep/quine
|
5d8d41461f7bdec1dafba2b1939c0c2b2022c9a8
|
[
"MIT"
] | null | null | null |
python/quine.py
|
ahuglajbclajep/quine
|
5d8d41461f7bdec1dafba2b1939c0c2b2022c9a8
|
[
"MIT"
] | null | null | null |
q='q=%r;print(q%%q)';print(q%q)
| 16
| 31
| 0.53125
| 9
| 32
| 1.888889
| 0.333333
| 0.352941
| 0.823529
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.03125
| 32
| 1
| 32
| 32
| 0.548387
| 0
| 0
| 0
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 7
|
1a28c56859c561eb00b0d006dddb2ed83b0b5924
| 217
|
py
|
Python
|
trochilidae/tests/test_interoperable_filter.py
|
MATTHEWFRAZER/trochilidae
|
35e907ba9dcb1f283f79f4f32d61db6b53a1ca97
|
[
"MIT"
] | null | null | null |
trochilidae/tests/test_interoperable_filter.py
|
MATTHEWFRAZER/trochilidae
|
35e907ba9dcb1f283f79f4f32d61db6b53a1ca97
|
[
"MIT"
] | null | null | null |
trochilidae/tests/test_interoperable_filter.py
|
MATTHEWFRAZER/trochilidae
|
35e907ba9dcb1f283f79f4f32d61db6b53a1ca97
|
[
"MIT"
] | 1
|
2021-11-12T18:49:15.000Z
|
2021-11-12T18:49:15.000Z
|
import pytest
def test_interoperable_filter_import():
try:
from trochilidae.interoperable_filter import interoperable_filter
except Exception as ex:
pytest.fail("import failed:{0}".format(ex))
| 31
| 73
| 0.741935
| 26
| 217
| 6
| 0.653846
| 0.365385
| 0.320513
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005618
| 0.179724
| 217
| 7
| 74
| 31
| 0.870787
| 0
| 0
| 0
| 0
| 0
| 0.077982
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| true
| 0
| 0.666667
| 0
| 0.833333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
a7ef51e0c4cf71bcc5f9f53ff2982019fd8d6d8a
| 98
|
py
|
Python
|
brainlit/algorithms/connect_fragments/__init__.py
|
NeuroDataDesign/brainl
|
fc99f59a9d835039dac713a028ac2521ac217e95
|
[
"Apache-2.0"
] | null | null | null |
brainlit/algorithms/connect_fragments/__init__.py
|
NeuroDataDesign/brainl
|
fc99f59a9d835039dac713a028ac2521ac217e95
|
[
"Apache-2.0"
] | null | null | null |
brainlit/algorithms/connect_fragments/__init__.py
|
NeuroDataDesign/brainl
|
fc99f59a9d835039dac713a028ac2521ac217e95
|
[
"Apache-2.0"
] | null | null | null |
import brainlit.algorithms.connect_fragments
from brainlit.algorithms.connect_fragments import *
| 24.5
| 51
| 0.877551
| 11
| 98
| 7.636364
| 0.545455
| 0.428571
| 0.595238
| 0.809524
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.071429
| 98
| 3
| 52
| 32.666667
| 0.923077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
c50088fd368b2171cc613d837e2b6ffd9c5216e5
| 76,206
|
py
|
Python
|
try/data_processing/resume_pb2.py
|
searobbersduck/ResumeAnalyze
|
984484dd1c6af090ae1b7854bc931e06a9294586
|
[
"MIT"
] | null | null | null |
try/data_processing/resume_pb2.py
|
searobbersduck/ResumeAnalyze
|
984484dd1c6af090ae1b7854bc931e06a9294586
|
[
"MIT"
] | null | null | null |
try/data_processing/resume_pb2.py
|
searobbersduck/ResumeAnalyze
|
984484dd1c6af090ae1b7854bc931e06a9294586
|
[
"MIT"
] | null | null | null |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: resume.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='resume.proto',
package='com.inmind.idmg.search.common',
syntax='proto3',
serialized_pb=_b('\n\x0cresume.proto\x12\x1d\x63om.inmind.idmg.search.common\"\xdd\x12\n\x06Resume\x12\x13\n\x0b\x63hineseName\x18\x01 \x01(\t\x12\x13\n\x0b\x65nglishName\x18\x02 \x01(\t\x12\x10\n\x08\x62irthday\x18\x03 \x01(\t\x12\x0e\n\x06gender\x18\x04 \x01(\x05\x12\x17\n\x0f\x63urrentJobTitle\x18\x05 \x01(\t\x12<\n\x0f\x65xpectFunctions\x18\x06 \x03(\x0b\x32#.com.inmind.idmg.search.common.Dict\x12\x0c\n\x04qqNo\x18\x07 \x01(\t\x12\x10\n\x08wechatNo\x18\x08 \x01(\t\x12\x37\n\nindustries\x18\t \x03(\x0b\x32#.com.inmind.idmg.search.common.Dict\x12=\n\x10\x65xpectIndustries\x18\n \x03(\x0b\x32#.com.inmind.idmg.search.common.Dict\x12@\n\x0f\x65xpectLocations\x18\x0b \x03(\x0b\x32\'.com.inmind.idmg.search.common.Location\x12\x41\n\x12\x65xpectSalaryDetail\x18\x0c \x01(\x0b\x32%.com.inmind.idmg.search.common.Salary\x12\x1b\n\x13\x65xpectSalaryInMonth\x18\r \x01(\x02\x12;\n\x0csalaryDetail\x18\x0e \x01(\x0b\x32%.com.inmind.idmg.search.common.Salary\x12\x0e\n\x06salary\x18\x0f \x01(\x02\x12\x17\n\x0fjobSearchStatus\x18\x10 \x01(\x05\x12\x37\n\x07\x61\x64\x64ress\x18\x11 \x01(\x0b\x32&.com.inmind.idmg.search.common.Address\x12\x11\n\tseniority\x18\x12 \x01(\x05\x12\x15\n\rmaritalStatus\x18\x13 \x01(\x05\x12\x17\n\x0f\x65\x64ucationDegree\x18\x14 \x01(\x05\x12\x10\n\x08siteUrls\x18\x15 \x03(\t\x12\x1a\n\x12registerLocationId\x18\x16 \x01(\x05\x12\x41\n\x10registerLocation\x18\x17 \x01(\x0b\x32\'.com.inmind.idmg.search.common.Location\x12\x11\n\tpartyInfo\x18\x18 \x01(\t\x12\x10\n\x08typeCode\x18\x19 \x01(\x05\x12\x0e\n\x06source\x18\x1a \x01(\x05\x12\x11\n\tintroduce\x18\x1b \x01(\t\x12\x15\n\rlanguageCodes\x18\x1c \x03(\x05\x12?\n\x0elanguageSkills\x18\x1d \x03(\x0b\x32\'.com.inmind.idmg.search.common.Language\x12\x33\n\x06skills\x18\x1e \x03(\x0b\x32#.com.inmind.idmg.search.common.Dict\x12;\n\x0e\x63\x65rtifications\x18\x1f \x03(\x0b\x32#.com.inmind.idmg.search.common.Dict\x12\x14\n\x0cprivateEmail\x18 \x01(\t\x12\x0f\n\x07workTel\x18! \x01(\t\x12\x0e\n\x06mobile\x18\" \x01(\t\x12\x10\n\x08otherTel\x18# \x01(\t\x12\x15\n\rjobGradeCodes\x18$ \x03(\x05\x12L\n\x15\x63urrentWorkExperience\x18% \x01(\x0b\x32-.com.inmind.idmg.search.common.WorkExperience\x12J\n\x13pastWorkExperiences\x18& \x03(\x0b\x32-.com.inmind.idmg.search.common.WorkExperience\x12L\n\x12projectExperiences\x18\' \x03(\x0b\x32\x30.com.inmind.idmg.search.common.ProjectExperience\x12P\n\x14\x65\x64ucationExperiences\x18( \x03(\x0b\x32\x32.com.inmind.idmg.search.common.EducationExperience\x12\x10\n\x08idNumber\x18) \x01(\t\x12\x15\n\rstartWorkDate\x18* \x01(\t\x12\x12\n\nupdateTime\x18+ \x01(\t\x12\x11\n\tavatarUrl\x18, \x01(\t\x12\x46\n\x0fworkExperiences\x18- \x03(\x0b\x32-.com.inmind.idmg.search.common.WorkExperience\x12\x16\n\x0e\x65xpectPosition\x18. \x01(\t\x12\r\n\x05score\x18/ \x01(\x02\x12\x30\n\x04tags\x18\x30 \x03(\x0b\x32\".com.inmind.idmg.search.common.Tag\x12\x13\n\x0b\x65xtralInfos\x18\x31 \x01(\t\x12?\n\x0enativeLocation\x18\x32 \x01(\x0b\x32\'.com.inmind.idmg.search.common.Location\x12\x18\n\x10nativeLocationId\x18\x33 \x01(\x05\x12\x12\n\nresumeLang\x18\x34 \x01(\x05\x12\x38\n\x07\x63ountry\x18\x35 \x01(\x0b\x32\'.com.inmind.idmg.search.common.Location\x12\x11\n\tcountryId\x18\x36 \x01(\x05\x12\x12\n\nnegotiable\x18\x37 \x01(\x05\x12\x0b\n\x03\x61ge\x18\x38 \x01(\x05\x12\x13\n\x0bskillDetail\x18\x39 \x01(\t\x12\x41\n\x14\x65xpectIndustriesNorm\x18: \x03(\x0b\x32#.com.inmind.idmg.search.common.Dict\x12\x1a\n\x12\x65xpectPositionNorm\x18; \x01(\t\x12;\n\x0eindustriesNorm\x18< \x03(\x0b\x32#.com.inmind.idmg.search.common.Dict\x12\x1b\n\x13\x63urrentJobTitleNorm\x18= \x01(\t\x12@\n\x13\x65xpectFunctionsNorm\x18> \x03(\x0b\x32#.com.inmind.idmg.search.common.Dict\x12\x43\n\x12languageSkillsNorm\x18? \x03(\x0b\x32\'.com.inmind.idmg.search.common.Language\x12?\n\x12\x63\x65rtificationsNorm\x18@ \x03(\x0b\x32#.com.inmind.idmg.search.common.Dict\"[\n\x03Tag\x12\x11\n\ttopSchool\x18\x01 \x01(\x05\x12\x10\n\x08topMajor\x18\x02 \x01(\x05\x12\x0b\n\x03\x61ge\x18\x03 \x01(\t\x12\x12\n\ntopCompany\x18\x04 \x01(\x05\x12\x0e\n\x06stable\x18\x05 \x01(\x05\"#\n\x04\x44ict\x12\x0c\n\x04\x63ode\x18\x01 \x01(\x05\x12\r\n\x05title\x18\x02 \x01(\t\">\n\tMajorDict\x12\x0c\n\x04\x63ode\x18\x01 \x01(\x05\x12\r\n\x05title\x18\x02 \x01(\t\x12\x14\n\x0csuggestMajor\x18\x03 \x01(\t\"\xaf\x01\n\x08Location\x12\n\n\x02id\x18\x01 \x01(\x05\x12\x0c\n\x04\x63ode\x18\x02 \x01(\x05\x12\x0c\n\x04name\x18\x03 \x01(\t\x12\x10\n\x08\x66ullname\x18\x04 \x03(\t\x12:\n\tancestors\x18\x05 \x03(\x0b\x32\'.com.inmind.idmg.search.common.Location\x12\x0c\n\x04type\x18\x06 \x01(\t\x12\r\n\x05level\x18\x07 \x01(\x05\x12\x10\n\x08parentId\x18\x08 \x01(\x05\";\n\x06Salary\x12\x12\n\nsalaryType\x18\x01 \x01(\x05\x12\x0e\n\x06salary\x18\x02 \x01(\x02\x12\r\n\x05month\x18\x03 \x01(\x05\"T\n\x07\x41\x64\x64ress\x12\x39\n\x08location\x18\x01 \x01(\x0b\x32\'.com.inmind.idmg.search.common.Location\x12\x0e\n\x06\x64\x65tail\x18\x02 \x01(\t\"P\n\x08Language\x12\x35\n\x08language\x18\x01 \x01(\x0b\x32#.com.inmind.idmg.search.common.Dict\x12\r\n\x05grade\x18\x02 \x01(\x05\"\x9e\x06\n\x0eWorkExperience\x12/\n\x03org\x18\x01 \x01(\x0b\x32\".com.inmind.idmg.search.common.Org\x12\x10\n\x08jobTitle\x18\x02 \x01(\t\x12\x10\n\x08jobgrade\x18\x03 \x01(\x05\x12\x11\n\tstartedAt\x18\x04 \x01(\t\x12\x0f\n\x07\x65ndedAt\x18\x05 \x01(\t\x12\x0f\n\x07isOnJob\x18\x06 \x01(\x08\x12\x37\n\ndepartment\x18\x07 \x01(\x0b\x32#.com.inmind.idmg.search.common.Dept\x12;\n\x0csalaryDetail\x18\x08 \x01(\x0b\x32%.com.inmind.idmg.search.common.Salary\x12\x0e\n\x06salary\x18\t \x01(\x02\x12\x16\n\x0eleaderJobTitle\x18\n \x01(\t\x12\x14\n\x0cnSubordinate\x18\x0b \x01(\x05\x12\x13\n\x0b\x64\x65scription\x18\x0c \x01(\t\x12\x36\n\tfunctions\x18\r \x03(\x0b\x32#.com.inmind.idmg.search.common.Dict\x12\x39\n\x0cindustryDict\x18\x0e \x01(\x0b\x32#.com.inmind.idmg.search.common.Dict\x12\x12\n\nsuggestJob\x18\x0f \x01(\t\x12\x14\n\x0cisBigCompany\x18\x10 \x01(\x05\x12\r\n\x05jobId\x18\x11 \x01(\x05\x12\x15\n\rskillKeyWords\x18\x12 \x03(\t\x12\x10\n\x08\x63orpDesc\x18\x13 \x01(\t\x12\x17\n\x0fworkPerformance\x18\x14 \x01(\t\x12\x13\n\x0bleaveReason\x18\x15 \x01(\t\x12\x10\n\x08isIntern\x18\x16 \x01(\x08\x12\x13\n\x0borgNameNorm\x18\x17 \x01(\t\x12=\n\x10industryDictNorm\x18\x18 \x01(\x0b\x32#.com.inmind.idmg.search.common.Dict\x12\x14\n\x0cjobTitleNorm\x18\x19 \x01(\t\x12:\n\rfunctionsNorm\x18\x1a \x03(\x0b\x32#.com.inmind.idmg.search.common.Dict\"\xba\x01\n\x03Org\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x14\n\x0cindustryText\x18\x02 \x01(\t\x12\x10\n\x08\x63oreName\x18\x03 \x01(\t\x12\x10\n\x08industry\x18\x04 \x01(\t\x12\x10\n\x08location\x18\x05 \x01(\t\x12\x0f\n\x07suggest\x18\x06 \x01(\t\x12\x12\n\nnameAndInd\x18\x07 \x01(\t\x12\x18\n\x10locAndNameAndInd\x18\x08 \x01(\t\x12\x1a\n\x12locAndNameAndIndID\x18\t \x01(\x05\"\x15\n\x04\x44\x65pt\x12\r\n\x05title\x18\x01 \x01(\t\"\xf0\x01\n\x11ProjectExperience\x12\x11\n\tstartedAt\x18\x01 \x01(\t\x12\x0f\n\x07\x65ndedAt\x18\x02 \x01(\t\x12\x13\n\x0bisOnProject\x18\x03 \x01(\x08\x12/\n\x03org\x18\x04 \x01(\x0b\x32\".com.inmind.idmg.search.common.Org\x12\x10\n\x08jobTitle\x18\x05 \x01(\t\x12\r\n\x05title\x18\x06 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x07 \x01(\t\x12\x11\n\ttechnique\x18\x08 \x01(\t\x12\x13\n\x0bperformance\x18\t \x01(\t\x12\x13\n\x0borgNameNorm\x18\n \x01(\t\"\xd8\x02\n\x13\x45\x64ucationExperience\x12\x11\n\tstartedAt\x18\x01 \x01(\t\x12\x0f\n\x07\x65ndedAt\x18\x02 \x01(\t\x12\x12\n\nisOnSchool\x18\x03 \x01(\x08\x12\x0c\n\x04type\x18\x04 \x01(\x05\x12\x32\n\x05major\x18\x05 \x01(\x0b\x32#.com.inmind.idmg.search.common.Dict\x12\x35\n\x06school\x18\x06 \x01(\x0b\x32%.com.inmind.idmg.search.common.School\x12\x1d\n\x15\x62yEntranceExamination\x18\x07 \x01(\x05\x12\x39\n\nschoolNorm\x18\x08 \x01(\x0b\x32%.com.inmind.idmg.search.common.School\x12\x36\n\tmajorNorm\x18\t \x01(\x0b\x32#.com.inmind.idmg.search.common.Dict\"V\n\x06School\x12\n\n\x02id\x18\x01 \x01(\x05\x12\x0c\n\x04\x63ode\x18\x02 \x01(\x05\x12\r\n\x05title\x18\x03 \x01(\t\x12\x12\n\nschoolType\x18\x04 \x01(\x05\x12\x0f\n\x07suggest\x18\x05 \x01(\tB\x02P\x01\x62\x06proto3')
)
_RESUME = _descriptor.Descriptor(
name='Resume',
full_name='com.inmind.idmg.search.common.Resume',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='chineseName', full_name='com.inmind.idmg.search.common.Resume.chineseName', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='englishName', full_name='com.inmind.idmg.search.common.Resume.englishName', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='birthday', full_name='com.inmind.idmg.search.common.Resume.birthday', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='gender', full_name='com.inmind.idmg.search.common.Resume.gender', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='currentJobTitle', full_name='com.inmind.idmg.search.common.Resume.currentJobTitle', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='expectFunctions', full_name='com.inmind.idmg.search.common.Resume.expectFunctions', index=5,
number=6, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='qqNo', full_name='com.inmind.idmg.search.common.Resume.qqNo', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='wechatNo', full_name='com.inmind.idmg.search.common.Resume.wechatNo', index=7,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='industries', full_name='com.inmind.idmg.search.common.Resume.industries', index=8,
number=9, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='expectIndustries', full_name='com.inmind.idmg.search.common.Resume.expectIndustries', index=9,
number=10, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='expectLocations', full_name='com.inmind.idmg.search.common.Resume.expectLocations', index=10,
number=11, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='expectSalaryDetail', full_name='com.inmind.idmg.search.common.Resume.expectSalaryDetail', index=11,
number=12, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='expectSalaryInMonth', full_name='com.inmind.idmg.search.common.Resume.expectSalaryInMonth', index=12,
number=13, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='salaryDetail', full_name='com.inmind.idmg.search.common.Resume.salaryDetail', index=13,
number=14, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='salary', full_name='com.inmind.idmg.search.common.Resume.salary', index=14,
number=15, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='jobSearchStatus', full_name='com.inmind.idmg.search.common.Resume.jobSearchStatus', index=15,
number=16, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='address', full_name='com.inmind.idmg.search.common.Resume.address', index=16,
number=17, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='seniority', full_name='com.inmind.idmg.search.common.Resume.seniority', index=17,
number=18, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='maritalStatus', full_name='com.inmind.idmg.search.common.Resume.maritalStatus', index=18,
number=19, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='educationDegree', full_name='com.inmind.idmg.search.common.Resume.educationDegree', index=19,
number=20, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='siteUrls', full_name='com.inmind.idmg.search.common.Resume.siteUrls', index=20,
number=21, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='registerLocationId', full_name='com.inmind.idmg.search.common.Resume.registerLocationId', index=21,
number=22, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='registerLocation', full_name='com.inmind.idmg.search.common.Resume.registerLocation', index=22,
number=23, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='partyInfo', full_name='com.inmind.idmg.search.common.Resume.partyInfo', index=23,
number=24, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='typeCode', full_name='com.inmind.idmg.search.common.Resume.typeCode', index=24,
number=25, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='source', full_name='com.inmind.idmg.search.common.Resume.source', index=25,
number=26, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='introduce', full_name='com.inmind.idmg.search.common.Resume.introduce', index=26,
number=27, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='languageCodes', full_name='com.inmind.idmg.search.common.Resume.languageCodes', index=27,
number=28, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='languageSkills', full_name='com.inmind.idmg.search.common.Resume.languageSkills', index=28,
number=29, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='skills', full_name='com.inmind.idmg.search.common.Resume.skills', index=29,
number=30, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='certifications', full_name='com.inmind.idmg.search.common.Resume.certifications', index=30,
number=31, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='privateEmail', full_name='com.inmind.idmg.search.common.Resume.privateEmail', index=31,
number=32, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='workTel', full_name='com.inmind.idmg.search.common.Resume.workTel', index=32,
number=33, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='mobile', full_name='com.inmind.idmg.search.common.Resume.mobile', index=33,
number=34, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='otherTel', full_name='com.inmind.idmg.search.common.Resume.otherTel', index=34,
number=35, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='jobGradeCodes', full_name='com.inmind.idmg.search.common.Resume.jobGradeCodes', index=35,
number=36, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='currentWorkExperience', full_name='com.inmind.idmg.search.common.Resume.currentWorkExperience', index=36,
number=37, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='pastWorkExperiences', full_name='com.inmind.idmg.search.common.Resume.pastWorkExperiences', index=37,
number=38, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='projectExperiences', full_name='com.inmind.idmg.search.common.Resume.projectExperiences', index=38,
number=39, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='educationExperiences', full_name='com.inmind.idmg.search.common.Resume.educationExperiences', index=39,
number=40, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='idNumber', full_name='com.inmind.idmg.search.common.Resume.idNumber', index=40,
number=41, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='startWorkDate', full_name='com.inmind.idmg.search.common.Resume.startWorkDate', index=41,
number=42, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='updateTime', full_name='com.inmind.idmg.search.common.Resume.updateTime', index=42,
number=43, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='avatarUrl', full_name='com.inmind.idmg.search.common.Resume.avatarUrl', index=43,
number=44, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='workExperiences', full_name='com.inmind.idmg.search.common.Resume.workExperiences', index=44,
number=45, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='expectPosition', full_name='com.inmind.idmg.search.common.Resume.expectPosition', index=45,
number=46, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='score', full_name='com.inmind.idmg.search.common.Resume.score', index=46,
number=47, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='tags', full_name='com.inmind.idmg.search.common.Resume.tags', index=47,
number=48, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='extralInfos', full_name='com.inmind.idmg.search.common.Resume.extralInfos', index=48,
number=49, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='nativeLocation', full_name='com.inmind.idmg.search.common.Resume.nativeLocation', index=49,
number=50, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='nativeLocationId', full_name='com.inmind.idmg.search.common.Resume.nativeLocationId', index=50,
number=51, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='resumeLang', full_name='com.inmind.idmg.search.common.Resume.resumeLang', index=51,
number=52, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='country', full_name='com.inmind.idmg.search.common.Resume.country', index=52,
number=53, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='countryId', full_name='com.inmind.idmg.search.common.Resume.countryId', index=53,
number=54, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='negotiable', full_name='com.inmind.idmg.search.common.Resume.negotiable', index=54,
number=55, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='age', full_name='com.inmind.idmg.search.common.Resume.age', index=55,
number=56, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='skillDetail', full_name='com.inmind.idmg.search.common.Resume.skillDetail', index=56,
number=57, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='expectIndustriesNorm', full_name='com.inmind.idmg.search.common.Resume.expectIndustriesNorm', index=57,
number=58, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='expectPositionNorm', full_name='com.inmind.idmg.search.common.Resume.expectPositionNorm', index=58,
number=59, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='industriesNorm', full_name='com.inmind.idmg.search.common.Resume.industriesNorm', index=59,
number=60, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='currentJobTitleNorm', full_name='com.inmind.idmg.search.common.Resume.currentJobTitleNorm', index=60,
number=61, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='expectFunctionsNorm', full_name='com.inmind.idmg.search.common.Resume.expectFunctionsNorm', index=61,
number=62, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='languageSkillsNorm', full_name='com.inmind.idmg.search.common.Resume.languageSkillsNorm', index=62,
number=63, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='certificationsNorm', full_name='com.inmind.idmg.search.common.Resume.certificationsNorm', index=63,
number=64, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=48,
serialized_end=2445,
)
_TAG = _descriptor.Descriptor(
name='Tag',
full_name='com.inmind.idmg.search.common.Tag',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='topSchool', full_name='com.inmind.idmg.search.common.Tag.topSchool', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='topMajor', full_name='com.inmind.idmg.search.common.Tag.topMajor', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='age', full_name='com.inmind.idmg.search.common.Tag.age', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='topCompany', full_name='com.inmind.idmg.search.common.Tag.topCompany', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='stable', full_name='com.inmind.idmg.search.common.Tag.stable', index=4,
number=5, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2447,
serialized_end=2538,
)
_DICT = _descriptor.Descriptor(
name='Dict',
full_name='com.inmind.idmg.search.common.Dict',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='code', full_name='com.inmind.idmg.search.common.Dict.code', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='title', full_name='com.inmind.idmg.search.common.Dict.title', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2540,
serialized_end=2575,
)
_MAJORDICT = _descriptor.Descriptor(
name='MajorDict',
full_name='com.inmind.idmg.search.common.MajorDict',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='code', full_name='com.inmind.idmg.search.common.MajorDict.code', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='title', full_name='com.inmind.idmg.search.common.MajorDict.title', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='suggestMajor', full_name='com.inmind.idmg.search.common.MajorDict.suggestMajor', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2577,
serialized_end=2639,
)
_LOCATION = _descriptor.Descriptor(
name='Location',
full_name='com.inmind.idmg.search.common.Location',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='com.inmind.idmg.search.common.Location.id', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='code', full_name='com.inmind.idmg.search.common.Location.code', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='name', full_name='com.inmind.idmg.search.common.Location.name', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='fullname', full_name='com.inmind.idmg.search.common.Location.fullname', index=3,
number=4, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ancestors', full_name='com.inmind.idmg.search.common.Location.ancestors', index=4,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='type', full_name='com.inmind.idmg.search.common.Location.type', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='level', full_name='com.inmind.idmg.search.common.Location.level', index=6,
number=7, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='parentId', full_name='com.inmind.idmg.search.common.Location.parentId', index=7,
number=8, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2642,
serialized_end=2817,
)
_SALARY = _descriptor.Descriptor(
name='Salary',
full_name='com.inmind.idmg.search.common.Salary',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='salaryType', full_name='com.inmind.idmg.search.common.Salary.salaryType', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='salary', full_name='com.inmind.idmg.search.common.Salary.salary', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='month', full_name='com.inmind.idmg.search.common.Salary.month', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2819,
serialized_end=2878,
)
_ADDRESS = _descriptor.Descriptor(
name='Address',
full_name='com.inmind.idmg.search.common.Address',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='location', full_name='com.inmind.idmg.search.common.Address.location', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='detail', full_name='com.inmind.idmg.search.common.Address.detail', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2880,
serialized_end=2964,
)
_LANGUAGE = _descriptor.Descriptor(
name='Language',
full_name='com.inmind.idmg.search.common.Language',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='language', full_name='com.inmind.idmg.search.common.Language.language', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='grade', full_name='com.inmind.idmg.search.common.Language.grade', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2966,
serialized_end=3046,
)
_WORKEXPERIENCE = _descriptor.Descriptor(
name='WorkExperience',
full_name='com.inmind.idmg.search.common.WorkExperience',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='org', full_name='com.inmind.idmg.search.common.WorkExperience.org', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='jobTitle', full_name='com.inmind.idmg.search.common.WorkExperience.jobTitle', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='jobgrade', full_name='com.inmind.idmg.search.common.WorkExperience.jobgrade', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='startedAt', full_name='com.inmind.idmg.search.common.WorkExperience.startedAt', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='endedAt', full_name='com.inmind.idmg.search.common.WorkExperience.endedAt', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='isOnJob', full_name='com.inmind.idmg.search.common.WorkExperience.isOnJob', index=5,
number=6, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='department', full_name='com.inmind.idmg.search.common.WorkExperience.department', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='salaryDetail', full_name='com.inmind.idmg.search.common.WorkExperience.salaryDetail', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='salary', full_name='com.inmind.idmg.search.common.WorkExperience.salary', index=8,
number=9, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='leaderJobTitle', full_name='com.inmind.idmg.search.common.WorkExperience.leaderJobTitle', index=9,
number=10, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='nSubordinate', full_name='com.inmind.idmg.search.common.WorkExperience.nSubordinate', index=10,
number=11, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='description', full_name='com.inmind.idmg.search.common.WorkExperience.description', index=11,
number=12, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='functions', full_name='com.inmind.idmg.search.common.WorkExperience.functions', index=12,
number=13, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='industryDict', full_name='com.inmind.idmg.search.common.WorkExperience.industryDict', index=13,
number=14, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='suggestJob', full_name='com.inmind.idmg.search.common.WorkExperience.suggestJob', index=14,
number=15, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='isBigCompany', full_name='com.inmind.idmg.search.common.WorkExperience.isBigCompany', index=15,
number=16, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='jobId', full_name='com.inmind.idmg.search.common.WorkExperience.jobId', index=16,
number=17, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='skillKeyWords', full_name='com.inmind.idmg.search.common.WorkExperience.skillKeyWords', index=17,
number=18, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='corpDesc', full_name='com.inmind.idmg.search.common.WorkExperience.corpDesc', index=18,
number=19, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='workPerformance', full_name='com.inmind.idmg.search.common.WorkExperience.workPerformance', index=19,
number=20, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='leaveReason', full_name='com.inmind.idmg.search.common.WorkExperience.leaveReason', index=20,
number=21, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='isIntern', full_name='com.inmind.idmg.search.common.WorkExperience.isIntern', index=21,
number=22, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='orgNameNorm', full_name='com.inmind.idmg.search.common.WorkExperience.orgNameNorm', index=22,
number=23, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='industryDictNorm', full_name='com.inmind.idmg.search.common.WorkExperience.industryDictNorm', index=23,
number=24, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='jobTitleNorm', full_name='com.inmind.idmg.search.common.WorkExperience.jobTitleNorm', index=24,
number=25, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='functionsNorm', full_name='com.inmind.idmg.search.common.WorkExperience.functionsNorm', index=25,
number=26, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3049,
serialized_end=3847,
)
_ORG = _descriptor.Descriptor(
name='Org',
full_name='com.inmind.idmg.search.common.Org',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='com.inmind.idmg.search.common.Org.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='industryText', full_name='com.inmind.idmg.search.common.Org.industryText', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='coreName', full_name='com.inmind.idmg.search.common.Org.coreName', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='industry', full_name='com.inmind.idmg.search.common.Org.industry', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='location', full_name='com.inmind.idmg.search.common.Org.location', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='suggest', full_name='com.inmind.idmg.search.common.Org.suggest', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='nameAndInd', full_name='com.inmind.idmg.search.common.Org.nameAndInd', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='locAndNameAndInd', full_name='com.inmind.idmg.search.common.Org.locAndNameAndInd', index=7,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='locAndNameAndIndID', full_name='com.inmind.idmg.search.common.Org.locAndNameAndIndID', index=8,
number=9, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3850,
serialized_end=4036,
)
_DEPT = _descriptor.Descriptor(
name='Dept',
full_name='com.inmind.idmg.search.common.Dept',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='title', full_name='com.inmind.idmg.search.common.Dept.title', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=4038,
serialized_end=4059,
)
_PROJECTEXPERIENCE = _descriptor.Descriptor(
name='ProjectExperience',
full_name='com.inmind.idmg.search.common.ProjectExperience',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='startedAt', full_name='com.inmind.idmg.search.common.ProjectExperience.startedAt', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='endedAt', full_name='com.inmind.idmg.search.common.ProjectExperience.endedAt', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='isOnProject', full_name='com.inmind.idmg.search.common.ProjectExperience.isOnProject', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='org', full_name='com.inmind.idmg.search.common.ProjectExperience.org', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='jobTitle', full_name='com.inmind.idmg.search.common.ProjectExperience.jobTitle', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='title', full_name='com.inmind.idmg.search.common.ProjectExperience.title', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='description', full_name='com.inmind.idmg.search.common.ProjectExperience.description', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='technique', full_name='com.inmind.idmg.search.common.ProjectExperience.technique', index=7,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='performance', full_name='com.inmind.idmg.search.common.ProjectExperience.performance', index=8,
number=9, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='orgNameNorm', full_name='com.inmind.idmg.search.common.ProjectExperience.orgNameNorm', index=9,
number=10, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=4062,
serialized_end=4302,
)
_EDUCATIONEXPERIENCE = _descriptor.Descriptor(
name='EducationExperience',
full_name='com.inmind.idmg.search.common.EducationExperience',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='startedAt', full_name='com.inmind.idmg.search.common.EducationExperience.startedAt', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='endedAt', full_name='com.inmind.idmg.search.common.EducationExperience.endedAt', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='isOnSchool', full_name='com.inmind.idmg.search.common.EducationExperience.isOnSchool', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='type', full_name='com.inmind.idmg.search.common.EducationExperience.type', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='major', full_name='com.inmind.idmg.search.common.EducationExperience.major', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='school', full_name='com.inmind.idmg.search.common.EducationExperience.school', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='byEntranceExamination', full_name='com.inmind.idmg.search.common.EducationExperience.byEntranceExamination', index=6,
number=7, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='schoolNorm', full_name='com.inmind.idmg.search.common.EducationExperience.schoolNorm', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='majorNorm', full_name='com.inmind.idmg.search.common.EducationExperience.majorNorm', index=8,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=4305,
serialized_end=4649,
)
_SCHOOL = _descriptor.Descriptor(
name='School',
full_name='com.inmind.idmg.search.common.School',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='com.inmind.idmg.search.common.School.id', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='code', full_name='com.inmind.idmg.search.common.School.code', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='title', full_name='com.inmind.idmg.search.common.School.title', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='schoolType', full_name='com.inmind.idmg.search.common.School.schoolType', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='suggest', full_name='com.inmind.idmg.search.common.School.suggest', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=4651,
serialized_end=4737,
)
_RESUME.fields_by_name['expectFunctions'].message_type = _DICT
_RESUME.fields_by_name['industries'].message_type = _DICT
_RESUME.fields_by_name['expectIndustries'].message_type = _DICT
_RESUME.fields_by_name['expectLocations'].message_type = _LOCATION
_RESUME.fields_by_name['expectSalaryDetail'].message_type = _SALARY
_RESUME.fields_by_name['salaryDetail'].message_type = _SALARY
_RESUME.fields_by_name['address'].message_type = _ADDRESS
_RESUME.fields_by_name['registerLocation'].message_type = _LOCATION
_RESUME.fields_by_name['languageSkills'].message_type = _LANGUAGE
_RESUME.fields_by_name['skills'].message_type = _DICT
_RESUME.fields_by_name['certifications'].message_type = _DICT
_RESUME.fields_by_name['currentWorkExperience'].message_type = _WORKEXPERIENCE
_RESUME.fields_by_name['pastWorkExperiences'].message_type = _WORKEXPERIENCE
_RESUME.fields_by_name['projectExperiences'].message_type = _PROJECTEXPERIENCE
_RESUME.fields_by_name['educationExperiences'].message_type = _EDUCATIONEXPERIENCE
_RESUME.fields_by_name['workExperiences'].message_type = _WORKEXPERIENCE
_RESUME.fields_by_name['tags'].message_type = _TAG
_RESUME.fields_by_name['nativeLocation'].message_type = _LOCATION
_RESUME.fields_by_name['country'].message_type = _LOCATION
_RESUME.fields_by_name['expectIndustriesNorm'].message_type = _DICT
_RESUME.fields_by_name['industriesNorm'].message_type = _DICT
_RESUME.fields_by_name['expectFunctionsNorm'].message_type = _DICT
_RESUME.fields_by_name['languageSkillsNorm'].message_type = _LANGUAGE
_RESUME.fields_by_name['certificationsNorm'].message_type = _DICT
_LOCATION.fields_by_name['ancestors'].message_type = _LOCATION
_ADDRESS.fields_by_name['location'].message_type = _LOCATION
_LANGUAGE.fields_by_name['language'].message_type = _DICT
_WORKEXPERIENCE.fields_by_name['org'].message_type = _ORG
_WORKEXPERIENCE.fields_by_name['department'].message_type = _DEPT
_WORKEXPERIENCE.fields_by_name['salaryDetail'].message_type = _SALARY
_WORKEXPERIENCE.fields_by_name['functions'].message_type = _DICT
_WORKEXPERIENCE.fields_by_name['industryDict'].message_type = _DICT
_WORKEXPERIENCE.fields_by_name['industryDictNorm'].message_type = _DICT
_WORKEXPERIENCE.fields_by_name['functionsNorm'].message_type = _DICT
_PROJECTEXPERIENCE.fields_by_name['org'].message_type = _ORG
_EDUCATIONEXPERIENCE.fields_by_name['major'].message_type = _DICT
_EDUCATIONEXPERIENCE.fields_by_name['school'].message_type = _SCHOOL
_EDUCATIONEXPERIENCE.fields_by_name['schoolNorm'].message_type = _SCHOOL
_EDUCATIONEXPERIENCE.fields_by_name['majorNorm'].message_type = _DICT
DESCRIPTOR.message_types_by_name['Resume'] = _RESUME
DESCRIPTOR.message_types_by_name['Tag'] = _TAG
DESCRIPTOR.message_types_by_name['Dict'] = _DICT
DESCRIPTOR.message_types_by_name['MajorDict'] = _MAJORDICT
DESCRIPTOR.message_types_by_name['Location'] = _LOCATION
DESCRIPTOR.message_types_by_name['Salary'] = _SALARY
DESCRIPTOR.message_types_by_name['Address'] = _ADDRESS
DESCRIPTOR.message_types_by_name['Language'] = _LANGUAGE
DESCRIPTOR.message_types_by_name['WorkExperience'] = _WORKEXPERIENCE
DESCRIPTOR.message_types_by_name['Org'] = _ORG
DESCRIPTOR.message_types_by_name['Dept'] = _DEPT
DESCRIPTOR.message_types_by_name['ProjectExperience'] = _PROJECTEXPERIENCE
DESCRIPTOR.message_types_by_name['EducationExperience'] = _EDUCATIONEXPERIENCE
DESCRIPTOR.message_types_by_name['School'] = _SCHOOL
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Resume = _reflection.GeneratedProtocolMessageType('Resume', (_message.Message,), dict(
DESCRIPTOR = _RESUME,
__module__ = 'resume_pb2'
# @@protoc_insertion_point(class_scope:com.inmind.idmg.search.common.Resume)
))
_sym_db.RegisterMessage(Resume)
Tag = _reflection.GeneratedProtocolMessageType('Tag', (_message.Message,), dict(
DESCRIPTOR = _TAG,
__module__ = 'resume_pb2'
# @@protoc_insertion_point(class_scope:com.inmind.idmg.search.common.Tag)
))
_sym_db.RegisterMessage(Tag)
Dict = _reflection.GeneratedProtocolMessageType('Dict', (_message.Message,), dict(
DESCRIPTOR = _DICT,
__module__ = 'resume_pb2'
# @@protoc_insertion_point(class_scope:com.inmind.idmg.search.common.Dict)
))
_sym_db.RegisterMessage(Dict)
MajorDict = _reflection.GeneratedProtocolMessageType('MajorDict', (_message.Message,), dict(
DESCRIPTOR = _MAJORDICT,
__module__ = 'resume_pb2'
# @@protoc_insertion_point(class_scope:com.inmind.idmg.search.common.MajorDict)
))
_sym_db.RegisterMessage(MajorDict)
Location = _reflection.GeneratedProtocolMessageType('Location', (_message.Message,), dict(
DESCRIPTOR = _LOCATION,
__module__ = 'resume_pb2'
# @@protoc_insertion_point(class_scope:com.inmind.idmg.search.common.Location)
))
_sym_db.RegisterMessage(Location)
Salary = _reflection.GeneratedProtocolMessageType('Salary', (_message.Message,), dict(
DESCRIPTOR = _SALARY,
__module__ = 'resume_pb2'
# @@protoc_insertion_point(class_scope:com.inmind.idmg.search.common.Salary)
))
_sym_db.RegisterMessage(Salary)
Address = _reflection.GeneratedProtocolMessageType('Address', (_message.Message,), dict(
DESCRIPTOR = _ADDRESS,
__module__ = 'resume_pb2'
# @@protoc_insertion_point(class_scope:com.inmind.idmg.search.common.Address)
))
_sym_db.RegisterMessage(Address)
Language = _reflection.GeneratedProtocolMessageType('Language', (_message.Message,), dict(
DESCRIPTOR = _LANGUAGE,
__module__ = 'resume_pb2'
# @@protoc_insertion_point(class_scope:com.inmind.idmg.search.common.Language)
))
_sym_db.RegisterMessage(Language)
WorkExperience = _reflection.GeneratedProtocolMessageType('WorkExperience', (_message.Message,), dict(
DESCRIPTOR = _WORKEXPERIENCE,
__module__ = 'resume_pb2'
# @@protoc_insertion_point(class_scope:com.inmind.idmg.search.common.WorkExperience)
))
_sym_db.RegisterMessage(WorkExperience)
Org = _reflection.GeneratedProtocolMessageType('Org', (_message.Message,), dict(
DESCRIPTOR = _ORG,
__module__ = 'resume_pb2'
# @@protoc_insertion_point(class_scope:com.inmind.idmg.search.common.Org)
))
_sym_db.RegisterMessage(Org)
Dept = _reflection.GeneratedProtocolMessageType('Dept', (_message.Message,), dict(
DESCRIPTOR = _DEPT,
__module__ = 'resume_pb2'
# @@protoc_insertion_point(class_scope:com.inmind.idmg.search.common.Dept)
))
_sym_db.RegisterMessage(Dept)
ProjectExperience = _reflection.GeneratedProtocolMessageType('ProjectExperience', (_message.Message,), dict(
DESCRIPTOR = _PROJECTEXPERIENCE,
__module__ = 'resume_pb2'
# @@protoc_insertion_point(class_scope:com.inmind.idmg.search.common.ProjectExperience)
))
_sym_db.RegisterMessage(ProjectExperience)
EducationExperience = _reflection.GeneratedProtocolMessageType('EducationExperience', (_message.Message,), dict(
DESCRIPTOR = _EDUCATIONEXPERIENCE,
__module__ = 'resume_pb2'
# @@protoc_insertion_point(class_scope:com.inmind.idmg.search.common.EducationExperience)
))
_sym_db.RegisterMessage(EducationExperience)
School = _reflection.GeneratedProtocolMessageType('School', (_message.Message,), dict(
DESCRIPTOR = _SCHOOL,
__module__ = 'resume_pb2'
# @@protoc_insertion_point(class_scope:com.inmind.idmg.search.common.School)
))
_sym_db.RegisterMessage(School)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('P\001'))
# @@protoc_insertion_point(module_scope)
| 48.756238
| 8,121
| 0.729588
| 10,340
| 76,206
| 5.154545
| 0.049613
| 0.069196
| 0.065443
| 0.089985
| 0.80958
| 0.775639
| 0.768322
| 0.745431
| 0.644302
| 0.63736
| 0
| 0.047009
| 0.131302
| 76,206
| 1,562
| 8,122
| 48.787452
| 0.758097
| 0.01614
| 0
| 0.713615
| 1
| 0.004695
| 0.226143
| 0.186187
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.004024
| 0
| 0.004024
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.