hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ed82a4a78a0149a1bab4715f105e3af1a4cafcec
| 199
|
py
|
Python
|
minato_namikaze/lib/classes/__init__.py
|
EitoZX/yondaime-hokage
|
c86285b385a60e3e47b9a7205ae36e7249b47eee
|
[
"Apache-2.0"
] | null | null | null |
minato_namikaze/lib/classes/__init__.py
|
EitoZX/yondaime-hokage
|
c86285b385a60e3e47b9a7205ae36e7249b47eee
|
[
"Apache-2.0"
] | null | null | null |
minato_namikaze/lib/classes/__init__.py
|
EitoZX/yondaime-hokage
|
c86285b385a60e3e47b9a7205ae36e7249b47eee
|
[
"Apache-2.0"
] | null | null | null |
from .badge_entry import *
from .barcode import *
from .converter import *
from .games import *
from .select_help import *
from .time_class import *
from .reaction_roles import *
from .music import *
| 24.875
| 29
| 0.763819
| 28
| 199
| 5.285714
| 0.5
| 0.472973
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.155779
| 199
| 8
| 30
| 24.875
| 0.880952
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
131398ce55c93ef8604eff57218350f5249a2d65
| 109
|
py
|
Python
|
src/utilities/commonutils.py
|
mug-auth/ssl-chewing
|
90b9717c862087a727ef56a04e048b70174b8918
|
[
"Apache-2.0"
] | null | null | null |
src/utilities/commonutils.py
|
mug-auth/ssl-chewing
|
90b9717c862087a727ef56a04e048b70174b8918
|
[
"Apache-2.0"
] | null | null | null |
src/utilities/commonutils.py
|
mug-auth/ssl-chewing
|
90b9717c862087a727ef56a04e048b70174b8918
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
def sec2samples(sec: float, fs_hz: float) -> int:
return int(np.floor(sec * fs_hz))
| 18.166667
| 49
| 0.678899
| 19
| 109
| 3.789474
| 0.684211
| 0.111111
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011364
| 0.192661
| 109
| 5
| 50
| 21.8
| 0.806818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
13304b6034bf9c0b6c596b924da41809bb3ab3bd
| 55
|
py
|
Python
|
code/sample_4-3-17.py
|
KoyanagiHitoshi/AtCoder-Python-Introduction
|
6d014e333a873f545b4d32d438e57cf428b10b96
|
[
"MIT"
] | 1
|
2022-03-29T13:50:12.000Z
|
2022-03-29T13:50:12.000Z
|
code/sample_4-3-17.py
|
KoyanagiHitoshi/AtCoder-Python-Introduction
|
6d014e333a873f545b4d32d438e57cf428b10b96
|
[
"MIT"
] | null | null | null |
code/sample_4-3-17.py
|
KoyanagiHitoshi/AtCoder-Python-Introduction
|
6d014e333a873f545b4d32d438e57cf428b10b96
|
[
"MIT"
] | null | null | null |
x = ["a", "b", "b", "c", "c", "c"]
print(x.count("d"))
| 18.333333
| 34
| 0.345455
| 11
| 55
| 1.727273
| 0.636364
| 0.210526
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.163636
| 55
| 2
| 35
| 27.5
| 0.413043
| 0
| 0
| 0
| 0
| 0
| 0.127273
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
1335972549488556e11e14df8ef5168fd25e1ac2
| 152
|
py
|
Python
|
__init__.py
|
lum4chi/chinltk
|
f129394984858e7789bec39a2900ebff6f9ae380
|
[
"Apache-2.0"
] | null | null | null |
__init__.py
|
lum4chi/chinltk
|
f129394984858e7789bec39a2900ebff6f9ae380
|
[
"Apache-2.0"
] | null | null | null |
__init__.py
|
lum4chi/chinltk
|
f129394984858e7789bec39a2900ebff6f9ae380
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2016 Francesco Lumachi <francesco.lumachi@gmail.com>
from .vocabulary import Vocabulary
| 25.333333
| 68
| 0.717105
| 20
| 152
| 5.45
| 0.85
| 0.293578
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.037594
| 0.125
| 152
| 5
| 69
| 30.4
| 0.781955
| 0.717105
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
133a67acd7d55f43f7e5091faba00d9d780aa67d
| 60,863
|
py
|
Python
|
Model_methods.py
|
elisa-aleman/MachineLearning
|
136f660041313913fee435eb84c42a12dbb9e4dd
|
[
"MIT"
] | 2
|
2019-09-06T01:40:54.000Z
|
2019-11-03T15:46:43.000Z
|
Model_methods.py
|
elisa-aleman/MachineLearning
|
136f660041313913fee435eb84c42a12dbb9e4dd
|
[
"MIT"
] | null | null | null |
Model_methods.py
|
elisa-aleman/MachineLearning
|
136f660041313913fee435eb84c42a12dbb9e4dd
|
[
"MIT"
] | 2
|
2019-07-04T13:04:25.000Z
|
2019-07-15T15:55:40.000Z
|
#-*- coding: utf-8 -*-
import scipy
import numpy
import gensim
import sklearn
from sklearn.feature_extraction.text import CountVectorizer
from sklearn import svm
from sklearn.ensemble import GradientBoostingClassifier
import xgboost
from sklearn.linear_model import LogisticRegression
import lightgbm
import random
from Model_metrics import F_score_multiclass_Kfolds
from sklearn.model_selection import train_test_split
from scipy.special import softmax
##############################
### Support Vector Machine ###
##############################
def SVM_Train(x, y, test_size=None, shuffle=True, C=1.0, kernel ='linear', gamma=0.001):
'''
Trains a Support Vector Classifier using the data and test_size given to split it into training data and testing data.
Returns the classifier, and the predictions and true values for performance testing.
Parameters for train_test_split:
Originally, the method has more parameters available, but for simplicity I only use the following:
*arrays: sequence of indexables with same length / shape[0]
Allowed inputs are lists, numpy arrays, scipy-sparse matrices or pandas dataframes.
:param (array or indexable) x: input data
:param (array or indexable) y: target data
:param (float or int) test_size: If float, should be between 0.0 and 1.0 and represent the proportion of the dataset to include in the test split.
If int, represents the absolute number of test samples.
If None, the value is set to the complement of the train size.
If train_size is also None, it will be set to 0.25.
:param (bool) shuffle: Whether or not to shuffle the data before splitting. If shuffle=False then stratify must be None.
Parameters for SVC:
Originally, the method has more parameters available, but for simplicity I only use the following:
:param (float) C: Regularization parameter. The strength of the regularization is inversely proportional to C.
Must be strictly positive. The penalty is a squared l2 penalty.
:param (str) kernel: Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or a callable.
If none is given, 'linear' will be used. If a callable is given it is
used to pre-compute the kernel matrix from data matrices; that matrix
should be an array of shape ``(n_samples, n_samples)``.
:param (str or float) gamma: {'scale', 'auto'} or float, default=0.001. Kernel coefficient for 'rbf', 'poly' and 'sigmoid'
:return:
(SVC) clf: The SVC classifier object
(list) test_y: true values of y, used for model performance testing purposes
(list) y_preds: predicted values of y, used for model performance testing purposes
'''
if test_size>0:
train_x, test_x, train_y, test_y = train_test_split(x,y, test_size=test_size, shuffle=shuffle)
else:
train_x = x
train_y = y
test_x = []
test_y = []
testsize = len(test_y)
#Define classifier
clf = svm.SVC(
kernel = kernel,
C = C,
gamma = gamma
)
clf.fit(train_x,train_y)
#Test data
y_preds = []
if test_size>0:
for i in range(testsize):
predicted = clf.predict(test_x[i].reshape(1,-1))[0]
y_preds.append(predicted)
return clf, test_y, y_preds
def SVM_Kfolds(x, y, k, kernel='linear', C=1.0, gamma=0.001, multiclass=False, with_counts=True, with_lists=True, with_confusion_matrix=True):
'''
Trains a Support Vector Classifier using the shuffled and split data for each cycle of a K-folds cross validation process.
Then it calculates the performance of the SVC for each cycle and outputs the average performance results.
*arrays: sequence of indexables with same length / shape[0]
Allowed inputs are lists, numpy arrays, scipy-sparse matrices or pandas dataframes.
:param (array or indexable) x: input data
:param (array or indexable) y: target data
:param (int) k: Number of cycles for the k-folds cross validation. Test size is len(y)//k, and the data is shuffled each cycle.
Parameters for SVC:
Originally, the method has more parameters available, but for simplicity I only use the following:
:param (float) C: Regularization parameter. The strength of the regularization is inversely proportional to C.
Must be strictly positive. The penalty is a squared l2 penalty.
:param (str) kernel: Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or a callable.
If none is given, 'linear' will be used. If a callable is given it is
used to pre-compute the kernel matrix from data matrices; that matrix
should be an array of shape ``(n_samples, n_samples)``.
:param (str or float) gamma: {'scale', 'auto'} or float, default=0.001. Kernel coefficient for 'rbf', 'poly' and 'sigmoid'
Parameters for the F-score method:
:param (bool) multiclass: if true, uses the F_score_multiclass_Kfolds() method. If false, uses F_score_Kfolds() for output.
:param (bool) with_counts: if true, returns a list of the counts dictionaries as part of the resulting output for each cycle in the k-folds operation.
:param (bool) with_lists: if true, returns the list of values used to calculate the average and standard deviation of each result.
:param (bool) with_confusion_matrix: if true, returns the confusion matrix used in the multi-class analysis.
:return:
if multiclass:
results dictionary with shape:
{
0: {
"precision":{
"average":_,
"std": _,
"list": [...],
},
"recall": {
"average":_,
"std": _,
"list": [...],
},
"accuracy": {
"average":_,
"std": _,
"list": [...],
},
"F1": {
"average":_,
"std": _,
"list": [...],
},
"counts": [
{
"CP":_,
"TP":_,
"TN":_,
"IP":_,
"FP":_,
"FN":_
}, {...} ...
]
"confusion_matrix": {
"sum":_,
"average":_,
"std":_,
"list": [...]
}
},
1: {...},
2: {...},
...
class_index_n: {...}
}
if not multiclass:
results dictionary with shape:
{
"precision":{
"average":_,
"std": _,
"list": [...],
},
"recall": {
"average":_,
"std": _,
"list": [...],
},
"accuracy": {
"average":_,
"std": _,
"list": [...],
},
"F1": {
"average":_,
"std": _,
"list": [...],
},
"counts": [
{
"CP":_,
"TP":_,
"TN":_,
"IP":_,
"FP":_,
"FN":_
}, {...} ...
]
}
'''
test_size = len(y)//k
y_pred_list = []
true_ys_list = []
for t in range(k):
clf, test_y, y_preds = SVM_Train(x, y, test_size, shuffle=True, kernel=kernel, C=C, gamma=gamma)
y_pred_list.append(y_preds)
true_ys_list.append(test_y)
if multiclass:
results = F_score_multiclass_Kfolds(true_ys_list, y_pred_list, with_counts=with_counts, with_lists=with_lists, with_confusion_matrix=with_confusion_matrix)
else:
results = F_score_Kfolds(true_ys_list, y_pred_list, with_counts=with_counts, with_lists=with_lists)
return results
def SVM_weights_trained(clf,keyword_list, min_df=1, token_pattern='(?u)\\b\\w+\\b'):
'''
For knowing the weight vector in an SVM used in text classification with the Bag Of Words method.
Input a trained SVC classifier, the keyword list used to classify text and the settings used in the BOW process.
Words with stronger weight will be closer to the dividing hyperplane, and will have a stronger impact on the decision for either class.
High weighted keywords can be interpreted as vital for classification.
:param (SVC) clf: SVC classifier object.
:param (list of strings) keyword_list: list of keywords used in the Bag Of Words as features in the training process.
Parameters of the CountVectorizer:
:param (float [0.0, 1.0] or int) min_df: When building the vocabulary ignore terms that have a document frequency strictly lower than the given threshold. This value is also called cut-off in the literature. If float, the parameter represents a proportion of documents, integer absolute counts. This parameter is ignored if vocabulary is not None.
:param (string) token_pattern: Regular expression denoting what constitutes a “token”, only used if analyzer == 'word'. The default regexp select tokens of 2 or more alphanumeric characters (punctuation is completely ignored and always treated as a token separator).
:return:
influences: zipped list of feature names and weight values
'''
weights = clf.coef_.tolist()[0]
vectorizer = CountVectorizer(min_df=min_df, token_pattern=token_pattern)
IM = vectorizer.fit_transform(keyword_list)
feature_names = vectorizer.get_feature_names()
influences = list(zip(feature_names, weights))
return influences
#################################
### Gradient Boosting Machine ###
#################################
def GBM_Train(x, y, test_size, shuffle=True, n_estimators=100, subsample=0.8, max_depth=3):
'''
Trains a Gradient Boosting Machine using the data and test_size given to split it into training data and testing data.
Returns the classifier, and the predictions and true values for performance testing.
Parameters for train_test_split:
Originally, the method has more parameters available, but for simplicity I only use the following:
*arrays: sequence of indexables with same length / shape[0]
Allowed inputs are lists, numpy arrays, scipy-sparse matrices or pandas dataframes.
:param (array or indexable) x: input data
:param (array or indexable) y: target data
:param (float or int) test_size: If float, should be between 0.0 and 1.0 and represent the proportion of the dataset to include in the test split.
If int, represents the absolute number of test samples.
If None, the value is set to the complement of the train size.
If train_size is also None, it will be set to 0.25.
:param (bool) shuffle: Whether or not to shuffle the data before splitting. If shuffle=False then stratify must be None.
Parameters for GradientBoostingClassifier:
Originally, the method has more parameters available, but for simplicity I only use the following:
:param (int) n_estimators: The number of boosting stages to perform. Gradient boosting is fairly robust to over-fitting so a large number usually results in better performance.
:param (float) subsample: The fraction of samples to be used for fitting the individual base learners.
If smaller than 1.0 this results in Stochastic Gradient Boosting.
subsample interacts with the parameter n_estimators.
Choosing subsample < 1.0 leads to a reduction of variance and an increase in bias.
:param (int) max_depth: maximum depth of the individual regression estimators.
The maximum depth limits the number of nodes in the tree.
Tune this parameter for best performance; the best value depends on the interaction of the input variables.
:return:
(GBC) clf: The GradientBoostingClassifier object
(list) test_y: true values of y, used for model performance testing purposes
(list) y_preds: predicted values of y, used for model performance testing purposes
'''
if test_size>0:
train_x, test_x, train_y, test_y = train_test_split(x,y, test_size=test_size, shuffle=shuffle)
else:
train_x = x
train_y = y
test_x = []
test_y = []
testsize = len(test_y)
#Define classifier
clf = GradientBoostingClassifier(n_estimators=n_estimators, subsample=subsample, max_depth=max_depth)
clf.fit(train_x,train_y)
#Test data
y_preds = []
if test_size>0:
for i in range(testsize):
predicted = clf.predict(test_x[i].reshape(1,-1))[0]
y_preds.append(predicted)
return clf, test_y, y_preds
def GBM_Kfolds(x, y, k, n_estimators=100, subsample=0.8, max_depth=3, multiclass=False, with_counts= True, with_lists= True, with_confusion_matrix=True):
'''
Trains a Gradient Boosting Classifier using the shuffled and split data for each cycle of a K-folds cross validation process.
Then it calculates the performance of the GBC for each cycle and outputs the average performance results.
*arrays: sequence of indexables with same length / shape[0]
Allowed inputs are lists, numpy arrays, scipy-sparse matrices or pandas dataframes.
:param (array or indexable) x: input data
:param (array or indexable) y: target data
:param (int) k: Number of cycles for the k-folds cross validation. Test size is len(y)//k, and the data is shuffled each cycle.
Parameters for GradientBoostingClassifier:
Originally, the method has more parameters available, but for simplicity I only use the following:
:param (int) n_estimators: The number of boosting stages to perform. Gradient boosting is fairly robust to over-fitting so a large number usually results in better performance.
:param (float) subsample: The fraction of samples to be used for fitting the individual base learners.
If smaller than 1.0 this results in Stochastic Gradient Boosting.
subsample interacts with the parameter n_estimators.
Choosing subsample < 1.0 leads to a reduction of variance and an increase in bias.
:param (int) max_depth: maximum depth of the individual regression estimators.
The maximum depth limits the number of nodes in the tree.
Tune this parameter for best performance; the best value depends on the interaction of the input variables.
Parameters for the F-score method:
:param (bool) multiclass: if true, uses the F_score_multiclass_Kfolds() method. If false, uses F_score_Kfolds() for output.
:param (bool) with_counts: if true, returns a list of the counts dictionaries as part of the resulting output for each cycle in the k-folds operation.
:param (bool) with_lists: if true, returns the list of values used to calculate the average and standard deviation of each result.
:param (bool) with_confusion_matrix: if true, returns the confusion matrix used in the multi-class analysis.
:return:
if multiclass:
results dictionary with shape:
{
0: {
"precision":{
"average":_,
"std": _,
"list": [...],
},
"recall": {
"average":_,
"std": _,
"list": [...],
},
"accuracy": {
"average":_,
"std": _,
"list": [...],
},
"F1": {
"average":_,
"std": _,
"list": [...],
},
"counts": [
{
"CP":_,
"TP":_,
"TN":_,
"IP":_,
"FP":_,
"FN":_
}, {...} ...
]
"confusion_matrix": {
"sum":_,
"average":_,
"std":_,
"list": [...]
}
},
1: {...},
2: {...},
...
class_index_n: {...}
}
if not multiclass:
results dictionary with shape:
{
"precision":{
"average":_,
"std": _,
"list": [...],
},
"recall": {
"average":_,
"std": _,
"list": [...],
},
"accuracy": {
"average":_,
"std": _,
"list": [...],
},
"F1": {
"average":_,
"std": _,
"list": [...],
},
"counts": [
{
"CP":_,
"TP":_,
"TN":_,
"IP":_,
"FP":_,
"FN":_
}, {...} ...
]
}
'''
test_size = len(y)//k
y_pred_list = []
true_ys_list = []
for t in range(k):
clf, test_y, y_preds = GBM_Train(x, y, test_size, shuffle=True, n_estimators=n_estimators, subsample=subsample, max_depth=max_depth)
y_pred_list.append(y_preds)
true_ys_list.append(test_y)
if multiclass:
results = F_score_multiclass_Kfolds(true_ys_list, y_pred_list, with_counts=with_counts, with_lists=with_lists, with_confusion_matrix=with_confusion_matrix)
else:
results = F_score_Kfolds(true_ys_list, y_pred_list, with_counts=with_counts, with_lists=with_lists)
return results
########################
### XGBoost Learning ###
########################
# http://xgboost.readthedocs.io/en/latest/parameter.html
def XGBoost_Train(x, y, test_size, shuffle=True, probability_cutoff=0.5, max_depth=3, learning_rate=0.1, eta=0.1, n_estimators=100, verbosity=1, objective='binary:logistic', min_child_weight=1, num_round=2):
'''
Trains an XGBoost using the data and test_size given to split it into training data and testing data.
Returns the classifier, and the predictions and true values for performance testing.
Parameters for train_test_split:
Originally, the method has more parameters available, but for simplicity I only use the following:
*arrays: sequence of indexables with same length / shape[0]
Allowed inputs are lists, numpy arrays, scipy-sparse matrices or pandas dataframes.
:param (array or indexable) x: input data
:param (array or indexable) y: target data
:param (float or int) test_size: If float, should be between 0.0 and 1.0 and represent the proportion of the dataset to include in the test split.
If int, represents the absolute number of test samples.
If None, the value is set to the complement of the train size.
If train_size is also None, it will be set to 0.25.
:param (bool) shuffle: Whether or not to shuffle the data before splitting. If shuffle=False then stratify must be None.
Parameters for parse_predictions_binary_Probability_Cutoff:
:param (float) probability_cutoff: Probability cutoff point for binary class decisions.
XGBoost returns probabilities of belonging to either class. In the case of binary predictions, it just returns one probability.
To be able to run performance tests, the cutoff decides it is class 1 when above it, or class 0 when below it.
Parameters for XGBoost:
Originally, the method has more parameters available, but for simplicity I only use the following:
:param (int) max_depth: Maximum depth of a tree. Increasing this value will make the model more complex and more likely to overfit. 0 is only accepted in lossguided growing policy when tree_method is set as hist and it indicates no limit on depth. Beware that XGBoost aggressively consumes memory when training a deep tree.
:param (float) learning_rate (alias eta): Step size shrinkage used in update to prevents overfitting. After each boosting step, we can directly get the weights of new features, and eta shrinks the feature weights to make the boosting process more conservative.
:param (int) n_estimators: Number of gradient boosted trees. Equivalent to number of boosting rounds.
:param (int) verbosity: Verbosity of printing messages. Valid values are 0 (silent), 1 (warning), 2 (info), 3 (debug). Sometimes XGBoost tries to change configurations based on heuristics, which is displayed as warning message. If there’s unexpected behaviour, please try to increase value of verbosity.
:param (str) objective: There's more options in XGBoost, but since I only know binary or multiclass uses, my method only accepts these:
binary:logistic: logistic regression for binary classification, output probability
binary:logitraw: logistic regression for binary classification, output score before logistic transformation
binary:hinge: hinge loss for binary classification. This makes predictions of 0 or 1, rather than producing probabilities.
multi:softmax: set XGBoost to do multiclass classification using the softmax objective, you also need to set num_class(number of classes)
multi:softprob: same as softmax, but output a vector of ndata * nclass, which can be further reshaped to ndata * nclass matrix. The result contains predicted probability of each data point belonging to each class.
:param (int) min_child_weight: Minimum sum of instance weight (hessian) needed in a child. If the tree partition step results in a leaf node with the sum of instance weight less than min_child_weight, then the building process will give up further partitioning. In linear regression task, this simply corresponds to minimum number of instances needed to be in each node. The larger min_child_weight is, the more conservative the algorithm will be.
:param (int) num_round: The number of rounds for boosting
:return:
(XGBoost) clf: The XGBoost object
(list) test_y: true values of y, used for model performance testing purposes
(list) y_preds: predicted values of y, used for model performance testing purposes
'''
if test_size>0:
train_x, test_x, train_y, test_y = train_test_split(x,y, test_size=test_size, shuffle=shuffle)
else:
train_x = x
train_y = y
test_x = []
test_y = []
#Define classifier
# specify parameters via map
param = {'max_depth':max_depth, 'learning_rate':learning_rate, 'eta':eta, 'verbosity':verbosity, 'objective':objective, 'n_estimators':n_estimators}
dtrain = xgboost.DMatrix(train_x, label=train_y)
clf = xgboost.train(param, dtrain, num_round)
#Test data
if test_size>0:
dtest = xgboost.DMatrix(test_x)
predicted_probs = clf.predict(dtest)
if param["objective"].startswith("binary"):
y_preds = parse_predictions_binary_Probability_Cutoff(predicted_probs, probability_cutoff=probability_cutoff)
elif param["objective"].startswith("multi"):
y_preds = predicted_probs.argmax(axis=1)
else:
y_preds = []
else:
y_preds = []
return clf, test_y, y_preds
def XGBoost_Kfolds(x, y, k, probability_cutoff=0.5, max_depth=3, learning_rate=0.1, n_estimators=100, eta=1, silent=1, objective='binary:logistic', min_child_weight=1, num_round=2, with_counts=True, with_lists=True, with_confusion_matrix=True):
'''
Trains an XGBoost using the shuffled and split data for each cycle of a K-folds cross validation process.
Then it calculates the performance of the GBC for each cycle and outputs the average performance results.
*arrays: sequence of indexables with same length / shape[0]
Allowed inputs are lists, numpy arrays, scipy-sparse matrices or pandas dataframes.
:param (array or indexable) x: input data
:param (array or indexable) y: target data
:param (int) k: Number of cycles for the k-folds cross validation. Test size is len(y)//k, and the data is shuffled each cycle.
Parameters for parse_predictions_binary_Probability_Cutoff:
:param (float) probability_cutoff: Probability cutoff point for binary class decisions.
XGBoost returns probabilities of belonging to either class. In the case of binary predictions, it just returns one probability.
To be able to run performance tests, the cutoff decides it is class 1 when above it, or class 0 when below it.
Parameters for XGBoost:
Originally, the method has more parameters available, but for simplicity I only use the following:
:param (int) max_depth: Maximum depth of a tree. Increasing this value will make the model more complex and more likely to overfit. 0 is only accepted in lossguided growing policy when tree_method is set as hist and it indicates no limit on depth. Beware that XGBoost aggressively consumes memory when training a deep tree.
:param (float) learning_rate (alias eta): Step size shrinkage used in update to prevents overfitting. After each boosting step, we can directly get the weights of new features, and eta shrinks the feature weights to make the boosting process more conservative.
:param (int) n_estimators: Number of gradient boosted trees. Equivalent to number of boosting rounds.
:param (int) verbosity: Verbosity of printing messages. Valid values are 0 (silent), 1 (warning), 2 (info), 3 (debug). Sometimes XGBoost tries to change configurations based on heuristics, which is displayed as warning message. If there’s unexpected behaviour, please try to increase value of verbosity.
:param (str) objective: There's more options in XGBoost, but since I only know binary or multiclass uses, my method only accepts these:
binary:logistic: logistic regression for binary classification, output probability
binary:logitraw: logistic regression for binary classification, output score before logistic transformation
binary:hinge: hinge loss for binary classification. This makes predictions of 0 or 1, rather than producing probabilities.
multi:softmax: set XGBoost to do multiclass classification using the softmax objective, you also need to set num_class(number of classes)
multi:softprob: same as softmax, but output a vector of ndata * nclass, which can be further reshaped to ndata * nclass matrix. The result contains predicted probability of each data point belonging to each class.
:param (int) min_child_weight: Minimum sum of instance weight (hessian) needed in a child. If the tree partition step results in a leaf node with the sum of instance weight less than min_child_weight, then the building process will give up further partitioning. In linear regression task, this simply corresponds to minimum number of instances needed to be in each node. The larger min_child_weight is, the more conservative the algorithm will be.
:param (int) num_round: The number of rounds for boosting
Parameters for the F-score method:
:param (bool) multiclass: if true, uses the F_score_multiclass_Kfolds() method. If false, uses F_score_Kfolds() for output.
:param (bool) with_counts: if true, returns a list of the counts dictionaries as part of the resulting output for each cycle in the k-folds operation.
:param (bool) with_lists: if true, returns the list of values used to calculate the average and standard deviation of each result.
:param (bool) with_confusion_matrix: if true, returns the confusion matrix used in the multi-class analysis.
:return:
if objective starts with multi (multiclass):
results dictionary with shape:
{
0: {
"precision":{
"average":_,
"std": _,
"list": [...],
},
"recall": {
"average":_,
"std": _,
"list": [...],
},
"accuracy": {
"average":_,
"std": _,
"list": [...],
},
"F1": {
"average":_,
"std": _,
"list": [...],
},
"counts": [
{
"CP":_,
"TP":_,
"TN":_,
"IP":_,
"FP":_,
"FN":_
}, {...} ...
]
"confusion_matrix": {
"sum":_,
"average":_,
"std":_,
"list": [...]
}
},
1: {...},
2: {...},
...
class_index_n: {...}
}
if objective starts with binary:
results dictionary with shape:
{
"precision":{
"average":_,
"std": _,
"list": [...],
},
"recall": {
"average":_,
"std": _,
"list": [...],
},
"accuracy": {
"average":_,
"std": _,
"list": [...],
},
"F1": {
"average":_,
"std": _,
"list": [...],
},
"counts": [
{
"CP":_,
"TP":_,
"TN":_,
"IP":_,
"FP":_,
"FN":_
}, {...} ...
]
}
'''
test_size = len(y)//k
y_pred_list = []
true_ys_list = []
for t in range(k):
clf, test_y, y_preds = XGBoost_Train(x, y, test_size, shuffle=True, probability_cutoff=probability_cutoff, max_depth=max_depth, learning_rate=learning_rate, n_estimators=n_estimators, eta=eta, silent=silent, objective=objective, min_child_weight=min_child_weight, num_round=num_round)
y_pred_list.append(y_preds)
true_ys_list.append(test_y)
if objective.startswith("multi"):
results = F_score_multiclass_Kfolds(true_ys_list, y_pred_list, with_counts=with_counts, with_lists=with_lists, with_confusion_matrix=with_confusion_matrix)
elif objective.startswith("binary"):
results = F_score_Kfolds(true_ys_list, y_pred_list, with_counts=with_counts, with_lists=with_lists)
else:
results = []
return results
#########################
### LightGBM Learning ###
#########################
def LightGBM_train(x, y, test_size = 0.1, shuffle=True, binary=True, multiclass=False, n_class=2, params = None):
'''
Trains a LightGBM using the data and test_size given to split it into training data and testing data.
Returns the classifier, and the predictions and true values for performance testing.
The method has some default parameters, but they can be overwritten by the dictionary params.
Parameters for train_test_split:
Originally, the method has more parameters available, but for simplicity I only use the following:
*arrays: sequence of indexables with same length / shape[0]
Allowed inputs are lists, numpy arrays, scipy-sparse matrices or pandas dataframes.
:param (array or indexable) x: input data
:param (array or indexable) y: target data
:param (float or int) test_size: If float, should be between 0.0 and 1.0 and represent the proportion of the dataset to include in the test split.
If int, represents the absolute number of test samples.
If None, the value is set to the complement of the train size.
If train_size is also None, it will be set to 0.25.
:param (bool) shuffle: Whether or not to shuffle the data before splitting. If shuffle=False then stratify must be None.
Parameter for binary or multiclass decision:
:param (bool) binary: Vestigial parameter. if false, it sets parameters for LightGBM to use several classes.
It is the opposite of the newer param multiclass, but some old projects are using this older param.
:param (bool) multiclass: if true, it sets parameters for LightGBM to use several classes.
:param (int) n_class: if multiclass, will pass to LightGBM the number of classes to use.
Parameters for LightGBM:
Originally, the method has more parameters available, but for simplicity I only use the following:
:param (dict) params: Parameters for LightGBM. Consult https://lightgbm.readthedocs.io/en/latest/Parameters.html
:return:
(LightGBM) clf: The LightGBM object
(list) test_y: true values of y, used for model performance testing purposes
(list) y_preds: predicted values of y, used for model performance testing purposes
'''
if binary and multiclass:
binary=False
if not binary and not multiclass:
multiclass=True
x = numpy.array(x)
y = numpy.array(y)
if test_size>0:
train_x, test_x, train_y, test_y = train_test_split(x,y, test_size=test_size, shuffle=shuffle)
else:
train_x = numpy.array(x)
train_y = numpy.array(y)
test_x = numpy.array([])
test_y = numpy.array([])
train_data = lightgbm.Dataset(train_x, label=train_y)
validation_data = train_data.create_valid(test_x, label=test_y)
#Define classifier
default_params = {
"objective": "multiclass",
"metric": "multi_logloss",
"num_class": 2,
"learning_rate": 0.05,
"min_data": 10,
"num_leaves": 31,
"verbose": -1,
"num_threads": 1,
"max_bin": 255
}
if multiclass:
default_params["objective"]="multiclass"
default_params["num_class"]= n_class
default_params["metric"]="multi_logloss"
params = default_params.update(params)
if test_size>0:
clf = lightgbm.train(params, train_data, valid_sets=validation_data)
else:
clf = lightgbm.train(params, train_data)
# Test data
if test_size>0:
predicted_probs = clf.predict(test_x, num_iteration=clf.best_iteration)
# LightGBM's prediction output is always multi-class shaped even in binary, so:
y_preds = predicted_probs.argmax(axis=1)
else:
y_preds=[]
return clf, test_y, y_preds
def LightGBM_Kfolds(x, y, k, binary=True, multiclass=False, n_class=2, params = None, with_counts=True, with_lists=True, with_confusion_matrix=True):
'''
Trains an LightGBM using the shuffled and split data for each cycle of a K-folds cross validation process.
The method has some default parameters, but they can be overwritten by the dictionary params.
Then it calculates the performance of the GBC for each cycle and outputs the average performance results.
*arrays: sequence of indexables with same length / shape[0]
Allowed inputs are lists, numpy arrays, scipy-sparse matrices or pandas dataframes.
:param (array or indexable) x: input data
:param (array or indexable) y: target data
:param (int) k: Number of cycles for the k-folds cross validation. Test size is len(y)//k, and the data is shuffled each cycle.
Parameter for binary or multiclass decision:
:param (bool) binary: Vestigial parameter. if false, it sets parameters for LightGBM to use several classes.
It is the opposite of the newer param multiclass, but some old projects are using this older param.
:param (bool) multiclass: if true, it sets parameters for LightGBM to use several classes.
:param (int) n_class: if multiclass, will pass to LightGBM the number of classes to use.
Parameters for LightGBM:
Originally, the method has more parameters available, but for simplicity I only use the following:
:param (dict) params: Parameters for LightGBM. Consult https://lightgbm.readthedocs.io/en/latest/Parameters.html
Parameters for the F-score method:
:param (bool) multiclass: if true, uses the F_score_multiclass_Kfolds() method. If false, uses F_score_Kfolds() for output.
:param (bool) with_counts: if true, returns a list of the counts dictionaries as part of the resulting output for each cycle in the k-folds operation.
:param (bool) with_lists: if true, returns the list of values used to calculate the average and standard deviation of each result.
:param (bool) with_confusion_matrix: if true, returns the confusion matrix used in the multi-class analysis.
:return:
if objective starts with multi (multiclass):
results dictionary with shape:
{
0: {
"precision":{
"average":_,
"std": _,
"list": [...],
},
"recall": {
"average":_,
"std": _,
"list": [...],
},
"accuracy": {
"average":_,
"std": _,
"list": [...],
},
"F1": {
"average":_,
"std": _,
"list": [...],
},
"counts": [
{
"CP":_,
"TP":_,
"TN":_,
"IP":_,
"FP":_,
"FN":_
}, {...} ...
]
"confusion_matrix": {
"sum":_,
"average":_,
"std":_,
"list": [...]
}
},
1: {...},
2: {...},
...
class_index_n: {...}
}
if objective starts with binary:
results dictionary with shape:
{
"precision":{
"average":_,
"std": _,
"list": [...],
},
"recall": {
"average":_,
"std": _,
"list": [...],
},
"accuracy": {
"average":_,
"std": _,
"list": [...],
},
"F1": {
"average":_,
"std": _,
"list": [...],
},
"counts": [
{
"CP":_,
"TP":_,
"TN":_,
"IP":_,
"FP":_,
"FN":_
}, {...} ...
]
}
'''
if binary and multiclass:
binary=False
if not binary and not multiclass:
multiclass=True
test_size = len(y)//k
y_pred_list = []
true_ys_list = []
for t in range(k):
clf, test_y, y_preds = LightGBM_train(x, y, test_size, shuffle=True, binary=binary, multiclass=multiclass, n_class=n_class, params=params)
y_pred_list.append(y_preds)
true_ys_list.append(test_y)
if multiclass:
results = F_score_multiclass_Kfolds(true_ys_list, y_pred_list, with_counts=with_counts, with_lists=with_lists, with_confusion_matrix=with_confusion_matrix)
else:
results = F_score_Kfolds(true_ys_list, y_pred_list, with_counts=with_counts, with_lists=with_lists)
return results
def LightGBM_importance(clf, feature_names):
'''
For knowing the importance vector in a LightGBM with their feature names.
:param (LightGBM object) clf: The LightGBM object
:param (list of strings) feature_names: List of features used in the LightGBM training
:return:
importance_list: zipped list of feature names and importance values
'''
importance = clf.feature_importance()
importance_list = list(zip(feature_names, importance))
return importance_list
###########################
### Logistic Regression ###
###########################
def LogisticRegression(x,y, test_size, shuffle=True):
'''
Performs a logistic regression using the data and test_size given to split it into training data and testing data.
Returns the classifier, and the predictions and true values for performance testing.
Parameters for train_test_split:
Originally, the method has more parameters available, but for simplicity I only use the following:
*arrays: sequence of indexables with same length / shape[0]
Allowed inputs are lists, numpy arrays, scipy-sparse matrices or pandas dataframes.
:param (array or indexable) x: input data
:param (array or indexable) y: target data
:param (float or int) test_size: If float, should be between 0.0 and 1.0 and represent the proportion of the dataset to include in the test split.
If int, represents the absolute number of test samples.
If None, the value is set to the complement of the train size.
If train_size is also None, it will be set to 0.25.
:param (bool) shuffle: Whether or not to shuffle the data before splitting. If shuffle=False then stratify must be None.
:return:
(LogisticRegression) clf: The LogisticRegression classifier object
(list) test_y: true values of y, used for model performance testing purposes
(list) y_preds: predicted values of y, used for model performance testing purposes
'''
if test_size>0:
train_x, test_x, train_y, test_y = train_test_split(x,y, test_size=test_size, shuffle=shuffle)
else:
train_x = x
train_y = y
test_x = []
test_y = []
testsize = len(test_y)
#Define classifier
clf = LogisticRegression()
clf.fit(train_x,train_y)
#Test data
y_preds = []
if test_size>0:
for i in range(1, testsize+1):
predicted = clf.predict(test_x[i].reshape(1,-1))[0]
y_preds.append(predicted)
return clf, test_y, y_preds
def LogisticRegression_Kfolds(x,y,k, multiclass=False, with_counts=True, with_lists=True, with_confusion_matrix=True):
'''
Performs a logistic regression using the shuffled and split data for each cycle of a K-folds cross validation process.
Then it calculates the performance of the logistic regression for each cycle and outputs the average performance results.
*arrays: sequence of indexables with same length / shape[0]
Allowed inputs are lists, numpy arrays, scipy-sparse matrices or pandas dataframes.
:param (array or indexable) x: input data
:param (array or indexable) y: target data
:param (int) k: Number of cycles for the k-folds cross validation. Test size is len(y)//k, and the data is shuffled each cycle.
Parameters for the F-score method:
:param (bool) multiclass: if true, uses the F_score_multiclass_Kfolds() method. If false, uses F_score_Kfolds() for output.
:param (bool) with_counts: if true, returns a list of the counts dictionaries as part of the resulting output for each cycle in the k-folds operation.
:param (bool) with_lists: if true, returns the list of values used to calculate the average and standard deviation of each result.
:param (bool) with_confusion_matrix: if true, returns the confusion matrix used in the multi-class analysis.
:return:
if multiclass:
results dictionary with shape:
{
0: {
"precision":{
"average":_,
"std": _,
"list": [...],
},
"recall": {
"average":_,
"std": _,
"list": [...],
},
"accuracy": {
"average":_,
"std": _,
"list": [...],
},
"F1": {
"average":_,
"std": _,
"list": [...],
},
"counts": [
{
"CP":_,
"TP":_,
"TN":_,
"IP":_,
"FP":_,
"FN":_
}, {...} ...
]
"confusion_matrix": {
"sum":_,
"average":_,
"std":_,
"list": [...]
}
},
1: {...},
2: {...},
...
class_index_n: {...}
}
if not multiclass:
results dictionary with shape:
{
"precision":{
"average":_,
"std": _,
"list": [...],
},
"recall": {
"average":_,
"std": _,
"list": [...],
},
"accuracy": {
"average":_,
"std": _,
"list": [...],
},
"F1": {
"average":_,
"std": _,
"list": [...],
},
"counts": [
{
"CP":_,
"TP":_,
"TN":_,
"IP":_,
"FP":_,
"FN":_
}, {...} ...
]
}
'''
test_size = len(y)//k
y_pred_list = []
true_ys_list = []
for t in range(k):
clf, test_y, y_preds = LogisticRegression(x,y, test_size, shuffle=True)
y_pred_list.append(y_preds)
true_ys_list.append(test_y)
if multiclass:
results = F_score_multiclass_Kfolds(true_ys_list, y_pred_list, with_counts=with_counts, with_lists=with_lists, with_confusion_matrix=with_confusion_matrix)
else:
results = F_score_Kfolds(true_ys_list, y_pred_list, with_counts=with_counts, with_lists=with_lists)
return results
######################################
############## Useful ################
######################################
def OneHot(Y):
'''
Change a list of integers like [1,2,0] into a One-Hot encoding, [[0,1,0],[0,0,1],[1,0,0]]
:param (list or 1d array) Y: list denoting multiclass targets by their index number, like [1,2,0]
:return:
(2d numpy array) oneHotY: One-Hot encoding of the input, like [[0,1,0],[0,0,1],[1,0,0]]
'''
uniqueY = numpy.unique(Y)
oneHotY = numpy.zeros([Y.shape[0], uniqueY.shape[0]])
for num, i in enumerate(Y):
oneHotY[num][i] = 1
return oneHotY
# get X, Y, test_x, test_y
def ReadyData(data, test_size = 1000, do_shuffle=True):
'''
get X, Y, test_x, test_y from a numpy data file
:param (numpy array) data: array with pairs of x and y data points:
[[[x1],[y1]],
[[x2],[y2]],
...
[[xn],[yn]]
]
:param (float or int) test_size: If float, should be between 0.0 and 1.0 and represent the proportion of the dataset to include in the test split.
If int, represents the absolute number of test samples.
If None, the value is set to the complement of the train size.
If train_size is also None, it will be set to 0.25.
:param (bool) shuffle: Whether or not to shuffle the data before splitting. If shuffle=False then stratify must be None.
:return:
X,Y,test_x,test_y: split numpy arrays
'''
x, y = zip(*data)
X, test_x, Y, test_y = train_test_split(x,y, test_size=test_size, shuffle=do_shuffle)
return X,Y,test_x,test_y
def getCurrentAverageError(model,test_x,test_y):
'''
Test training average loss squared
:param (model) model: Any classifier that has a .predict() method.
:param (numpy array) test_x, test_y: test data
:return:
(float) av_error: average loss squared
'''
pred_y = [p[0] for p in model.predict(test_x)]
losses = [(i[0]-i[1])**2 for i in zip(pred_y,test_y)]
mean_square_man = numpy.average(losses)
av_error = mean_square_man**0.5
return av_error
def parse_predictions_binary_Probability_Cutoff(predicted_probs, probability_cutoff=0.5):
'''
Probability results converted to binary 0 or 1 using the cutoff value.
(e.g. 0.85 confidence being class 1, 0.15 being class 0 with threshold 0.5)
:param (list or 1d-array) predicted_probs: Results from a classifier in the shape of [0.0 to 1.0] probability of belonging to class 1.
:param (float) probability_cutoff: Probability cutoff point for binary class decisions.
To be able to run performance tests, the cutoff decides it is class 1 when above it, or class 0 when below it.
:return:
(list) y_preds: Predictions in binary form.
'''
y_preds = []
for ypred in predicted_probs:
if ypred>probability_cutoff:
y_preds.append(1)
elif ypred==probability_cutoff:
y_preds.append(random.randint(0,1))
else:
y_preds.append(0)
return y_preds
##############################################
####### Tensorboard helpful methods ##########
##############################################
def print_log_instructions():
'''
Instructions to check the tensorboard log from a local machine after training on a server.
'''
print("To be able to see Tensorboard on your local machine after training on a server")
print(" 1. exit current server session")
print(" 2. connect again with the following command:")
print(" ssh -L 16006:127.0.0.1:6006 -p [port] [user]@[server]")
print(" 3. execute in terminal")
print(" tensorboard --logdir='{}'".format(MakeLogFile('', server=True)))
print(" 4. on local machine, open browser on:")
print(" http://127.0.0.1:16006")
##################################
########## Other Models ##########
##################################
def LDA(vectorized, num_topics, vec_titles):
'''
Returns Latent Dirichlet allocation model for text analysis and topic detection. Still not sure how it works fully. No warranties.
corpus = vector >> using each title and its types of answers as different dimensions or "words"
id2word = titles >> the column titles_answer are the "words" in our data
:param (array) vectorized: Vectorized Bag of Words for the corpus.
:param (int) num_topics: Number of topics to split the data into.
:param (list) vec_titles: list of words used as features in the Bag of Words vector.
:return:
(LdaModel) lda: LdaModel object after applying to corpus.
'''
vec_titles = [[i] for i in vec_titles]
titles = gensim.corpora.Dictionary(vec_titles)
vector = [[(key,int(val)) for key,val in enumerate(row) if int(val)!=0] for row in vectorized]
lda = gensim.models.ldamodel.LdaModel(corpus=vector, num_topics=num_topics, id2word=titles)
return lda
def HDP(vectorized, vec_titles):
'''
Returns Hierarchical Dirichlet process model for clustering data. Still not sure how it works fully. No warranties.
corpus = vector >> using each title and its types of answers as different dimensions or "words"
id2word = titles >> the column titles_answer are the "words" in our data
:param (array) vectorized: Vectorized Bag of Words for the corpus.
:param (list) vec_titles: list of words used as features in the Bag of Words vector.
:return:
(HdpModel) hdp: HdpModel object after applying to corpus.
'''
vec_titles = [[i] for i in vec_titles]
titles = gensim.corpora.Dictionary(vec_titles)
vector = [[(key,int(val)) for key,val in enumerate(row) if int(val)!=0] for row in vectorized]
hdp = gensim.models.hdpmodel.HdpModel(corpus=vector, id2word=titles)
return hdp
def tSNE(input_filename, output_filename, header=True, n_dim=2):
'''
t-distributed stochastic neighbor embedding
This method is for visualizing multidimensional data in a lower dimension by using compression via the embedding method.
:param (path) input_filename: path to the data input file
:param (path) output_filename: path to the output file
:param (bool) header: if true, skips the first row of the input file
:param (int) n_dim: Number of dimensions to compress the data into.
:output:
Not returned but saved to output file, the tSNE compressed data form
'''
if header:
raw_data = numpy.genfromtxt(input_filename, delimiter=",", headerfilling_values=(0, 0, 0), skiprows=1)
else:
raw_data = numpy.genfromtxt(input_filename, delimiter=",", headerfilling_values=(0, 0, 0))
compressed_data = sklearn.manifold.TSNE(n_dim).fit_transform(raw_data)
numpy.savetxt(output_filename, compressed_data, delimiter=",")
if __name__ == '__main__':
pass
| 51.059564
| 455
| 0.531653
| 6,728
| 60,863
| 4.681629
| 0.099287
| 0.013969
| 0.020001
| 0.013334
| 0.80675
| 0.799987
| 0.789764
| 0.780462
| 0.775446
| 0.765953
| 0
| 0.008625
| 0.382778
| 60,863
| 1,191
| 456
| 51.102435
| 0.829846
| 0.680923
| 0
| 0.514706
| 0
| 0.003676
| 0.052349
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.073529
| false
| 0.003676
| 0.066176
| 0
| 0.205882
| 0.033088
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
1387ac966362c58b11ce5e38d301af8276a91b5e
| 25
|
py
|
Python
|
electroncash/utils/__init__.py
|
christroutner/Electron-Cash
|
d5217ed3e878bd56977181f022f9e5c43f449241
|
[
"MIT"
] | 208
|
2017-07-25T19:52:15.000Z
|
2018-09-21T13:44:58.000Z
|
electroncash/utils/__init__.py
|
christroutner/Electron-Cash
|
d5217ed3e878bd56977181f022f9e5c43f449241
|
[
"MIT"
] | 1,478
|
2018-09-24T09:30:13.000Z
|
2022-03-29T15:48:17.000Z
|
electroncash/utils/__init__.py
|
christroutner/Electron-Cash
|
d5217ed3e878bd56977181f022f9e5c43f449241
|
[
"MIT"
] | 159
|
2018-09-24T12:56:47.000Z
|
2022-03-28T23:52:17.000Z
|
from .event import Event
| 12.5
| 24
| 0.8
| 4
| 25
| 5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.16
| 25
| 1
| 25
| 25
| 0.952381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
13e506bf8e251c17b85f4c96706a27ee04f616af
| 104
|
py
|
Python
|
alvinchow_backend/app/flask/exceptions.py
|
alvinchow86/python-backend-template
|
46c07d733d68bc8682afd8510a17bc2aa360c606
|
[
"MIT"
] | 6
|
2021-01-07T00:20:49.000Z
|
2022-01-13T04:53:12.000Z
|
alvinchow_backend/app/flask/exceptions.py
|
alvinchow86/python-backend-template
|
46c07d733d68bc8682afd8510a17bc2aa360c606
|
[
"MIT"
] | 4
|
2021-01-06T22:07:43.000Z
|
2021-06-02T01:52:41.000Z
|
alvinchow_backend/app/flask/exceptions.py
|
alvinchow86/python-backend-template
|
46c07d733d68bc8682afd8510a17bc2aa360c606
|
[
"MIT"
] | 1
|
2021-11-09T07:46:44.000Z
|
2021-11-09T07:46:44.000Z
|
from alvinchow.lib.exceptions import BaseException
class CSRFValidationError(BaseException):
pass
| 17.333333
| 50
| 0.826923
| 10
| 104
| 8.6
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 104
| 5
| 51
| 20.8
| 0.945055
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
b91756b28dd1587b86133a878aac9a43eab5a34d
| 296
|
py
|
Python
|
src/process/process/application/CheckDatabaseConnection/CheckDatabaseConnectionCommand.py
|
ahmetcagriakca/pythondataintegrator
|
079b968d6c893008f02c88dbe34909a228ac1c7b
|
[
"MIT"
] | 1
|
2020-12-18T21:37:28.000Z
|
2020-12-18T21:37:28.000Z
|
src/process/process/application/CheckDatabaseConnection/CheckDatabaseConnectionCommand.py
|
ahmetcagriakca/pythondataintegrator
|
079b968d6c893008f02c88dbe34909a228ac1c7b
|
[
"MIT"
] | null | null | null |
src/process/process/application/CheckDatabaseConnection/CheckDatabaseConnectionCommand.py
|
ahmetcagriakca/pythondataintegrator
|
079b968d6c893008f02c88dbe34909a228ac1c7b
|
[
"MIT"
] | 1
|
2020-12-18T21:37:31.000Z
|
2020-12-18T21:37:31.000Z
|
from dataclasses import dataclass
from pdip.cqrs import ICommand
from process.application.CheckDatabaseConnection.CheckDatabaseConnectionRequest import CheckDatabaseConnectionRequest
@dataclass
class CheckDatabaseConnectionCommand(ICommand):
request: CheckDatabaseConnectionRequest = None
| 29.6
| 117
| 0.878378
| 23
| 296
| 11.304348
| 0.652174
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.087838
| 296
| 9
| 118
| 32.888889
| 0.962963
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.833333
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b940c4304e7a0ead35a342f49548c86a5f245b14
| 94
|
py
|
Python
|
applicationLib/__init__.py
|
AntonioConsiglio/5D-robot-consolle
|
8422e788da64391722283fd08fcb8ecd3eb6068b
|
[
"Apache-2.0"
] | null | null | null |
applicationLib/__init__.py
|
AntonioConsiglio/5D-robot-consolle
|
8422e788da64391722283fd08fcb8ecd3eb6068b
|
[
"Apache-2.0"
] | null | null | null |
applicationLib/__init__.py
|
AntonioConsiglio/5D-robot-consolle
|
8422e788da64391722283fd08fcb8ecd3eb6068b
|
[
"Apache-2.0"
] | null | null | null |
from .hmiLib import *
from .utilsLib import *
from .robotLib import *
from .cameraLib import *
| 23.5
| 24
| 0.755319
| 12
| 94
| 5.916667
| 0.5
| 0.422535
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.159574
| 94
| 4
| 24
| 23.5
| 0.898734
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b96870b1a2852044425f800f3d9b385bcf11bce8
| 39
|
py
|
Python
|
__init__.py
|
praeludo/aws_osc_matrix
|
c82e5a64482680e86552a7ce6c1df51a65462746
|
[
"MIT"
] | 1
|
2018-04-03T15:38:26.000Z
|
2018-04-03T15:38:26.000Z
|
__init__.py
|
praeludo/aws_osc_matrix
|
c82e5a64482680e86552a7ce6c1df51a65462746
|
[
"MIT"
] | null | null | null |
__init__.py
|
praeludo/aws_osc_matrix
|
c82e5a64482680e86552a7ce6c1df51a65462746
|
[
"MIT"
] | null | null | null |
from aws_osc_matrix.resources import *
| 19.5
| 38
| 0.846154
| 6
| 39
| 5.166667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.102564
| 39
| 1
| 39
| 39
| 0.885714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b97025abc63e832e57711798cac247930ba64ca9
| 39
|
py
|
Python
|
pretty_downloader/__init__.py
|
DEADSEC-SECURITY/pretty-downloader
|
704f3475468d3c4632b7704b8ef0685856ee7c00
|
[
"MIT"
] | 5
|
2021-02-26T22:02:26.000Z
|
2021-07-28T09:40:21.000Z
|
pretty_downloader/__init__.py
|
DEADSEC-SECURITY/pretty-downloader
|
704f3475468d3c4632b7704b8ef0685856ee7c00
|
[
"MIT"
] | null | null | null |
pretty_downloader/__init__.py
|
DEADSEC-SECURITY/pretty-downloader
|
704f3475468d3c4632b7704b8ef0685856ee7c00
|
[
"MIT"
] | 2
|
2021-02-26T21:01:00.000Z
|
2021-03-01T10:01:55.000Z
|
from .pretty_downloader import download
| 39
| 39
| 0.897436
| 5
| 39
| 6.8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.076923
| 39
| 1
| 39
| 39
| 0.944444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b98bdf586050937096df474fe156f6fbd9cb68c3
| 28
|
py
|
Python
|
bot/__init__.py
|
ModFest/modfest-site
|
c522d8702020b4ddcae42baf4027b061f4b18860
|
[
"MIT"
] | null | null | null |
bot/__init__.py
|
ModFest/modfest-site
|
c522d8702020b4ddcae42baf4027b061f4b18860
|
[
"MIT"
] | 2
|
2020-09-21T23:28:40.000Z
|
2020-10-11T10:27:48.000Z
|
bot/__init__.py
|
ModFest/modfest-site
|
c522d8702020b4ddcae42baf4027b061f4b18860
|
[
"MIT"
] | null | null | null |
from bot import discord_bot
| 14
| 27
| 0.857143
| 5
| 28
| 4.6
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 28
| 1
| 28
| 28
| 0.958333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b99625316a0135b31438f1b3c690d87d5da312f7
| 29
|
py
|
Python
|
src/MemeEngine/__init__.py
|
yanxx422/Python-Meme-Generator
|
ebd6907ce52df2a1bcb18cd0d4970ad2afc406e1
|
[
"CC0-1.0"
] | null | null | null |
src/MemeEngine/__init__.py
|
yanxx422/Python-Meme-Generator
|
ebd6907ce52df2a1bcb18cd0d4970ad2afc406e1
|
[
"CC0-1.0"
] | null | null | null |
src/MemeEngine/__init__.py
|
yanxx422/Python-Meme-Generator
|
ebd6907ce52df2a1bcb18cd0d4970ad2afc406e1
|
[
"CC0-1.0"
] | null | null | null |
from .Meme import MemeEngine
| 14.5
| 28
| 0.827586
| 4
| 29
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137931
| 29
| 1
| 29
| 29
| 0.96
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b9b0f8c823e7882d66f73d4ccaba858157810d31
| 24,107
|
py
|
Python
|
sympy/parsing/latex/_antlr/latexlexer.py
|
Knewton/sympy
|
a2ce003faaa504d3ad7aa57bbc53d5c1b37812bb
|
[
"BSD-3-Clause"
] | null | null | null |
sympy/parsing/latex/_antlr/latexlexer.py
|
Knewton/sympy
|
a2ce003faaa504d3ad7aa57bbc53d5c1b37812bb
|
[
"BSD-3-Clause"
] | null | null | null |
sympy/parsing/latex/_antlr/latexlexer.py
|
Knewton/sympy
|
a2ce003faaa504d3ad7aa57bbc53d5c1b37812bb
|
[
"BSD-3-Clause"
] | null | null | null |
# encoding: utf-8
# *** GENERATED BY `setup.py antlr`, DO NOT EDIT BY HAND ***
#
# Generated from ../LaTeX.g4, derived from latex2sympy
# latex2sympy is licensed under the MIT license
# https://github.com/augustt198/latex2sympy/blob/master/LICENSE.txt
#
# Generated with antlr4
# antlr4 is licensed under the BSD-3-Clause License
# https://github.com/antlr/antlr4/blob/master/LICENSE.txt
from __future__ import print_function
from antlr4 import *
from io import StringIO
import sys
def serializedATN():
with StringIO() as buf:
buf.write(u"\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\2")
buf.write(u"D\u023b\b\1\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4")
buf.write(u"\7\t\7\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t\13\4\f\t\f\4\r")
buf.write(u"\t\r\4\16\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t\22")
buf.write(u"\4\23\t\23\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27\4")
buf.write(u"\30\t\30\4\31\t\31\4\32\t\32\4\33\t\33\4\34\t\34\4\35")
buf.write(u"\t\35\4\36\t\36\4\37\t\37\4 \t \4!\t!\4\"\t\"\4#\t#\4")
buf.write(u"$\t$\4%\t%\4&\t&\4\'\t\'\4(\t(\4)\t)\4*\t*\4+\t+\4,\t")
buf.write(u",\4-\t-\4.\t.\4/\t/\4\60\t\60\4\61\t\61\4\62\t\62\4\63")
buf.write(u"\t\63\4\64\t\64\4\65\t\65\4\66\t\66\4\67\t\67\48\t8\4")
buf.write(u"9\t9\4:\t:\4;\t;\4<\t<\4=\t=\4>\t>\4?\t?\4@\t@\4A\tA")
buf.write(u"\4B\tB\4C\tC\4D\tD\4E\tE\3\2\3\2\3\3\6\3\u008f\n\3\r")
buf.write(u"\3\16\3\u0090\3\3\3\3\3\4\3\4\3\5\3\5\3\6\3\6\3\7\3\7")
buf.write(u"\3\b\3\b\3\t\3\t\3\n\3\n\3\13\3\13\3\f\3\f\3\r\3\r\3")
buf.write(u"\16\3\16\3\17\3\17\3\17\3\17\3\17\3\20\3\20\3\20\3\20")
buf.write(u"\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3")
buf.write(u"\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20")
buf.write(u"\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3")
buf.write(u"\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20")
buf.write(u"\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\5\20\u00e7")
buf.write(u"\n\20\3\21\3\21\3\21\3\21\3\21\3\22\3\22\3\22\3\22\3")
buf.write(u"\22\3\23\3\23\3\23\3\23\3\23\3\23\3\24\3\24\3\24\3\24")
buf.write(u"\3\24\3\25\3\25\3\25\3\25\3\26\3\26\3\26\3\26\3\26\3")
buf.write(u"\27\3\27\3\27\3\27\3\27\3\30\3\30\3\30\3\30\3\30\3\31")
buf.write(u"\3\31\3\31\3\31\3\31\3\32\3\32\3\32\3\32\3\32\3\33\3")
buf.write(u"\33\3\33\3\33\3\33\3\34\3\34\3\34\3\34\3\34\3\34\3\34")
buf.write(u"\3\34\3\35\3\35\3\35\3\35\3\35\3\35\3\35\3\35\3\36\3")
buf.write(u"\36\3\36\3\36\3\36\3\36\3\36\3\36\3\37\3\37\3\37\3\37")
buf.write(u"\3\37\3\37\3\37\3\37\3 \3 \3 \3 \3 \3 \3 \3 \3!\3!\3")
buf.write(u"!\3!\3!\3!\3!\3!\3\"\3\"\3\"\3\"\3\"\3\"\3#\3#\3#\3#")
buf.write(u"\3#\3#\3$\3$\3$\3$\3$\3$\3%\3%\3%\3%\3%\3%\3&\3&\3&\3")
buf.write(u"&\3&\3&\3\'\3\'\3\'\3\'\3\'\3\'\3(\3(\3(\3(\3(\3(\3(")
buf.write(u"\3(\3)\3)\3)\3)\3)\3)\3)\3)\3*\3*\3*\3*\3*\3*\3*\3*\3")
buf.write(u"+\3+\3+\3+\3+\3+\3+\3+\3,\3,\3,\3,\3,\3,\3,\3,\3-\3-")
buf.write(u"\3-\3-\3-\3-\3-\3-\3.\3.\3.\3.\3.\3.\3/\3/\3/\3/\3/\3")
buf.write(u"/\3/\3\60\3\60\3\60\3\60\3\60\3\60\3\61\3\61\3\61\3\61")
buf.write(u"\3\61\3\62\3\62\3\62\3\62\3\62\3\62\3\63\3\63\3\63\3")
buf.write(u"\63\3\63\3\63\3\63\3\64\3\64\3\64\3\64\3\64\3\64\3\64")
buf.write(u"\3\64\3\65\3\65\3\65\3\65\3\65\3\65\3\65\3\65\3\66\3")
buf.write(u"\66\3\66\3\66\3\66\3\66\3\66\3\66\3\67\3\67\38\38\39")
buf.write(u"\39\3:\3:\3;\3;\7;\u01eb\n;\f;\16;\u01ee\13;\3;\3;\3")
buf.write(u";\6;\u01f3\n;\r;\16;\u01f4\5;\u01f7\n;\3<\3<\3=\3=\3")
buf.write(u">\6>\u01fe\n>\r>\16>\u01ff\3>\3>\3>\3>\3>\7>\u0207\n")
buf.write(u">\f>\16>\u020a\13>\3>\7>\u020d\n>\f>\16>\u0210\13>\3")
buf.write(u">\3>\3>\3>\3>\7>\u0217\n>\f>\16>\u021a\13>\3>\3>\6>\u021e")
buf.write(u"\n>\r>\16>\u021f\5>\u0222\n>\3?\3?\3@\3@\3A\3A\3A\3A")
buf.write(u"\3A\3B\3B\3C\3C\3C\3C\3C\3D\3D\3E\3E\6E\u0238\nE\rE\16")
buf.write(u"E\u0239\3\u01ec\2F\3\3\5\4\7\5\t\6\13\7\r\b\17\t\21\n")
buf.write(u"\23\13\25\f\27\r\31\16\33\17\35\20\37\21!\22#\23%\24")
buf.write(u"\'\25)\26+\27-\30/\31\61\32\63\33\65\34\67\359\36;\37")
buf.write(u"= ?!A\"C#E$G%I&K\'M(O)Q*S+U,W-Y.[/]\60_\61a\62c\63e\64")
buf.write(u"g\65i\66k\67m8o9q:s\2u;w<y\2{=}>\177?\u0081@\u0083A\u0085")
buf.write(u"B\u0087C\u0089D\3\2\5\5\2\13\f\17\17\"\"\4\2C\\c|\3\2")
buf.write(u"\62;\2\u0247\2\3\3\2\2\2\2\5\3\2\2\2\2\7\3\2\2\2\2\t")
buf.write(u"\3\2\2\2\2\13\3\2\2\2\2\r\3\2\2\2\2\17\3\2\2\2\2\21\3")
buf.write(u"\2\2\2\2\23\3\2\2\2\2\25\3\2\2\2\2\27\3\2\2\2\2\31\3")
buf.write(u"\2\2\2\2\33\3\2\2\2\2\35\3\2\2\2\2\37\3\2\2\2\2!\3\2")
buf.write(u"\2\2\2#\3\2\2\2\2%\3\2\2\2\2\'\3\2\2\2\2)\3\2\2\2\2+")
buf.write(u"\3\2\2\2\2-\3\2\2\2\2/\3\2\2\2\2\61\3\2\2\2\2\63\3\2")
buf.write(u"\2\2\2\65\3\2\2\2\2\67\3\2\2\2\29\3\2\2\2\2;\3\2\2\2")
buf.write(u"\2=\3\2\2\2\2?\3\2\2\2\2A\3\2\2\2\2C\3\2\2\2\2E\3\2\2")
buf.write(u"\2\2G\3\2\2\2\2I\3\2\2\2\2K\3\2\2\2\2M\3\2\2\2\2O\3\2")
buf.write(u"\2\2\2Q\3\2\2\2\2S\3\2\2\2\2U\3\2\2\2\2W\3\2\2\2\2Y\3")
buf.write(u"\2\2\2\2[\3\2\2\2\2]\3\2\2\2\2_\3\2\2\2\2a\3\2\2\2\2")
buf.write(u"c\3\2\2\2\2e\3\2\2\2\2g\3\2\2\2\2i\3\2\2\2\2k\3\2\2\2")
buf.write(u"\2m\3\2\2\2\2o\3\2\2\2\2q\3\2\2\2\2u\3\2\2\2\2w\3\2\2")
buf.write(u"\2\2{\3\2\2\2\2}\3\2\2\2\2\177\3\2\2\2\2\u0081\3\2\2")
buf.write(u"\2\2\u0083\3\2\2\2\2\u0085\3\2\2\2\2\u0087\3\2\2\2\2")
buf.write(u"\u0089\3\2\2\2\3\u008b\3\2\2\2\5\u008e\3\2\2\2\7\u0094")
buf.write(u"\3\2\2\2\t\u0096\3\2\2\2\13\u0098\3\2\2\2\r\u009a\3\2")
buf.write(u"\2\2\17\u009c\3\2\2\2\21\u009e\3\2\2\2\23\u00a0\3\2\2")
buf.write(u"\2\25\u00a2\3\2\2\2\27\u00a4\3\2\2\2\31\u00a6\3\2\2\2")
buf.write(u"\33\u00a8\3\2\2\2\35\u00aa\3\2\2\2\37\u00e6\3\2\2\2!")
buf.write(u"\u00e8\3\2\2\2#\u00ed\3\2\2\2%\u00f2\3\2\2\2\'\u00f8")
buf.write(u"\3\2\2\2)\u00fd\3\2\2\2+\u0101\3\2\2\2-\u0106\3\2\2\2")
buf.write(u"/\u010b\3\2\2\2\61\u0110\3\2\2\2\63\u0115\3\2\2\2\65")
buf.write(u"\u011a\3\2\2\2\67\u011f\3\2\2\29\u0127\3\2\2\2;\u012f")
buf.write(u"\3\2\2\2=\u0137\3\2\2\2?\u013f\3\2\2\2A\u0147\3\2\2\2")
buf.write(u"C\u014f\3\2\2\2E\u0155\3\2\2\2G\u015b\3\2\2\2I\u0161")
buf.write(u"\3\2\2\2K\u0167\3\2\2\2M\u016d\3\2\2\2O\u0173\3\2\2\2")
buf.write(u"Q\u017b\3\2\2\2S\u0183\3\2\2\2U\u018b\3\2\2\2W\u0193")
buf.write(u"\3\2\2\2Y\u019b\3\2\2\2[\u01a3\3\2\2\2]\u01a9\3\2\2\2")
buf.write(u"_\u01b0\3\2\2\2a\u01b6\3\2\2\2c\u01bb\3\2\2\2e\u01c1")
buf.write(u"\3\2\2\2g\u01c8\3\2\2\2i\u01d0\3\2\2\2k\u01d8\3\2\2\2")
buf.write(u"m\u01e0\3\2\2\2o\u01e2\3\2\2\2q\u01e4\3\2\2\2s\u01e6")
buf.write(u"\3\2\2\2u\u01e8\3\2\2\2w\u01f8\3\2\2\2y\u01fa\3\2\2\2")
buf.write(u"{\u0221\3\2\2\2}\u0223\3\2\2\2\177\u0225\3\2\2\2\u0081")
buf.write(u"\u0227\3\2\2\2\u0083\u022c\3\2\2\2\u0085\u022e\3\2\2")
buf.write(u"\2\u0087\u0233\3\2\2\2\u0089\u0235\3\2\2\2\u008b\u008c")
buf.write(u"\7.\2\2\u008c\4\3\2\2\2\u008d\u008f\t\2\2\2\u008e\u008d")
buf.write(u"\3\2\2\2\u008f\u0090\3\2\2\2\u0090\u008e\3\2\2\2\u0090")
buf.write(u"\u0091\3\2\2\2\u0091\u0092\3\2\2\2\u0092\u0093\b\3\2")
buf.write(u"\2\u0093\6\3\2\2\2\u0094\u0095\7-\2\2\u0095\b\3\2\2\2")
buf.write(u"\u0096\u0097\7/\2\2\u0097\n\3\2\2\2\u0098\u0099\7,\2")
buf.write(u"\2\u0099\f\3\2\2\2\u009a\u009b\7\61\2\2\u009b\16\3\2")
buf.write(u"\2\2\u009c\u009d\7*\2\2\u009d\20\3\2\2\2\u009e\u009f")
buf.write(u"\7+\2\2\u009f\22\3\2\2\2\u00a0\u00a1\7}\2\2\u00a1\24")
buf.write(u"\3\2\2\2\u00a2\u00a3\7\177\2\2\u00a3\26\3\2\2\2\u00a4")
buf.write(u"\u00a5\7]\2\2\u00a5\30\3\2\2\2\u00a6\u00a7\7_\2\2\u00a7")
buf.write(u"\32\3\2\2\2\u00a8\u00a9\7~\2\2\u00a9\34\3\2\2\2\u00aa")
buf.write(u"\u00ab\7^\2\2\u00ab\u00ac\7n\2\2\u00ac\u00ad\7k\2\2\u00ad")
buf.write(u"\u00ae\7o\2\2\u00ae\36\3\2\2\2\u00af\u00b0\7^\2\2\u00b0")
buf.write(u"\u00b1\7v\2\2\u00b1\u00e7\7q\2\2\u00b2\u00b3\7^\2\2\u00b3")
buf.write(u"\u00b4\7t\2\2\u00b4\u00b5\7k\2\2\u00b5\u00b6\7i\2\2\u00b6")
buf.write(u"\u00b7\7j\2\2\u00b7\u00b8\7v\2\2\u00b8\u00b9\7c\2\2\u00b9")
buf.write(u"\u00ba\7t\2\2\u00ba\u00bb\7t\2\2\u00bb\u00bc\7q\2\2\u00bc")
buf.write(u"\u00e7\7y\2\2\u00bd\u00be\7^\2\2\u00be\u00bf\7T\2\2\u00bf")
buf.write(u"\u00c0\7k\2\2\u00c0\u00c1\7i\2\2\u00c1\u00c2\7j\2\2\u00c2")
buf.write(u"\u00c3\7v\2\2\u00c3\u00c4\7c\2\2\u00c4\u00c5\7t\2\2\u00c5")
buf.write(u"\u00c6\7t\2\2\u00c6\u00c7\7q\2\2\u00c7\u00e7\7y\2\2\u00c8")
buf.write(u"\u00c9\7^\2\2\u00c9\u00ca\7n\2\2\u00ca\u00cb\7q\2\2\u00cb")
buf.write(u"\u00cc\7p\2\2\u00cc\u00cd\7i\2\2\u00cd\u00ce\7t\2\2\u00ce")
buf.write(u"\u00cf\7k\2\2\u00cf\u00d0\7i\2\2\u00d0\u00d1\7j\2\2\u00d1")
buf.write(u"\u00d2\7v\2\2\u00d2\u00d3\7c\2\2\u00d3\u00d4\7t\2\2\u00d4")
buf.write(u"\u00d5\7t\2\2\u00d5\u00d6\7q\2\2\u00d6\u00e7\7y\2\2\u00d7")
buf.write(u"\u00d8\7^\2\2\u00d8\u00d9\7N\2\2\u00d9\u00da\7q\2\2\u00da")
buf.write(u"\u00db\7p\2\2\u00db\u00dc\7i\2\2\u00dc\u00dd\7t\2\2\u00dd")
buf.write(u"\u00de\7k\2\2\u00de\u00df\7i\2\2\u00df\u00e0\7j\2\2\u00e0")
buf.write(u"\u00e1\7v\2\2\u00e1\u00e2\7c\2\2\u00e2\u00e3\7t\2\2\u00e3")
buf.write(u"\u00e4\7t\2\2\u00e4\u00e5\7q\2\2\u00e5\u00e7\7y\2\2\u00e6")
buf.write(u"\u00af\3\2\2\2\u00e6\u00b2\3\2\2\2\u00e6\u00bd\3\2\2")
buf.write(u"\2\u00e6\u00c8\3\2\2\2\u00e6\u00d7\3\2\2\2\u00e7 \3\2")
buf.write(u"\2\2\u00e8\u00e9\7^\2\2\u00e9\u00ea\7k\2\2\u00ea\u00eb")
buf.write(u"\7p\2\2\u00eb\u00ec\7v\2\2\u00ec\"\3\2\2\2\u00ed\u00ee")
buf.write(u"\7^\2\2\u00ee\u00ef\7u\2\2\u00ef\u00f0\7w\2\2\u00f0\u00f1")
buf.write(u"\7o\2\2\u00f1$\3\2\2\2\u00f2\u00f3\7^\2\2\u00f3\u00f4")
buf.write(u"\7r\2\2\u00f4\u00f5\7t\2\2\u00f5\u00f6\7q\2\2\u00f6\u00f7")
buf.write(u"\7f\2\2\u00f7&\3\2\2\2\u00f8\u00f9\7^\2\2\u00f9\u00fa")
buf.write(u"\7n\2\2\u00fa\u00fb\7q\2\2\u00fb\u00fc\7i\2\2\u00fc(")
buf.write(u"\3\2\2\2\u00fd\u00fe\7^\2\2\u00fe\u00ff\7n\2\2\u00ff")
buf.write(u"\u0100\7p\2\2\u0100*\3\2\2\2\u0101\u0102\7^\2\2\u0102")
buf.write(u"\u0103\7u\2\2\u0103\u0104\7k\2\2\u0104\u0105\7p\2\2\u0105")
buf.write(u",\3\2\2\2\u0106\u0107\7^\2\2\u0107\u0108\7e\2\2\u0108")
buf.write(u"\u0109\7q\2\2\u0109\u010a\7u\2\2\u010a.\3\2\2\2\u010b")
buf.write(u"\u010c\7^\2\2\u010c\u010d\7v\2\2\u010d\u010e\7c\2\2\u010e")
buf.write(u"\u010f\7p\2\2\u010f\60\3\2\2\2\u0110\u0111\7^\2\2\u0111")
buf.write(u"\u0112\7e\2\2\u0112\u0113\7u\2\2\u0113\u0114\7e\2\2\u0114")
buf.write(u"\62\3\2\2\2\u0115\u0116\7^\2\2\u0116\u0117\7u\2\2\u0117")
buf.write(u"\u0118\7g\2\2\u0118\u0119\7e\2\2\u0119\64\3\2\2\2\u011a")
buf.write(u"\u011b\7^\2\2\u011b\u011c\7e\2\2\u011c\u011d\7q\2\2\u011d")
buf.write(u"\u011e\7v\2\2\u011e\66\3\2\2\2\u011f\u0120\7^\2\2\u0120")
buf.write(u"\u0121\7c\2\2\u0121\u0122\7t\2\2\u0122\u0123\7e\2\2\u0123")
buf.write(u"\u0124\7u\2\2\u0124\u0125\7k\2\2\u0125\u0126\7p\2\2\u0126")
buf.write(u"8\3\2\2\2\u0127\u0128\7^\2\2\u0128\u0129\7c\2\2\u0129")
buf.write(u"\u012a\7t\2\2\u012a\u012b\7e\2\2\u012b\u012c\7e\2\2\u012c")
buf.write(u"\u012d\7q\2\2\u012d\u012e\7u\2\2\u012e:\3\2\2\2\u012f")
buf.write(u"\u0130\7^\2\2\u0130\u0131\7c\2\2\u0131\u0132\7t\2\2\u0132")
buf.write(u"\u0133\7e\2\2\u0133\u0134\7v\2\2\u0134\u0135\7c\2\2\u0135")
buf.write(u"\u0136\7p\2\2\u0136<\3\2\2\2\u0137\u0138\7^\2\2\u0138")
buf.write(u"\u0139\7c\2\2\u0139\u013a\7t\2\2\u013a\u013b\7e\2\2\u013b")
buf.write(u"\u013c\7e\2\2\u013c\u013d\7u\2\2\u013d\u013e\7e\2\2\u013e")
buf.write(u">\3\2\2\2\u013f\u0140\7^\2\2\u0140\u0141\7c\2\2\u0141")
buf.write(u"\u0142\7t\2\2\u0142\u0143\7e\2\2\u0143\u0144\7u\2\2\u0144")
buf.write(u"\u0145\7g\2\2\u0145\u0146\7e\2\2\u0146@\3\2\2\2\u0147")
buf.write(u"\u0148\7^\2\2\u0148\u0149\7c\2\2\u0149\u014a\7t\2\2\u014a")
buf.write(u"\u014b\7e\2\2\u014b\u014c\7e\2\2\u014c\u014d\7q\2\2\u014d")
buf.write(u"\u014e\7v\2\2\u014eB\3\2\2\2\u014f\u0150\7^\2\2\u0150")
buf.write(u"\u0151\7u\2\2\u0151\u0152\7k\2\2\u0152\u0153\7p\2\2\u0153")
buf.write(u"\u0154\7j\2\2\u0154D\3\2\2\2\u0155\u0156\7^\2\2\u0156")
buf.write(u"\u0157\7e\2\2\u0157\u0158\7q\2\2\u0158\u0159\7u\2\2\u0159")
buf.write(u"\u015a\7j\2\2\u015aF\3\2\2\2\u015b\u015c\7^\2\2\u015c")
buf.write(u"\u015d\7v\2\2\u015d\u015e\7c\2\2\u015e\u015f\7p\2\2\u015f")
buf.write(u"\u0160\7j\2\2\u0160H\3\2\2\2\u0161\u0162\7^\2\2\u0162")
buf.write(u"\u0163\7e\2\2\u0163\u0164\7u\2\2\u0164\u0165\7e\2\2\u0165")
buf.write(u"\u0166\7j\2\2\u0166J\3\2\2\2\u0167\u0168\7^\2\2\u0168")
buf.write(u"\u0169\7u\2\2\u0169\u016a\7g\2\2\u016a\u016b\7e\2\2\u016b")
buf.write(u"\u016c\7j\2\2\u016cL\3\2\2\2\u016d\u016e\7^\2\2\u016e")
buf.write(u"\u016f\7e\2\2\u016f\u0170\7q\2\2\u0170\u0171\7v\2\2\u0171")
buf.write(u"\u0172\7j\2\2\u0172N\3\2\2\2\u0173\u0174\7^\2\2\u0174")
buf.write(u"\u0175\7c\2\2\u0175\u0176\7t\2\2\u0176\u0177\7u\2\2\u0177")
buf.write(u"\u0178\7k\2\2\u0178\u0179\7p\2\2\u0179\u017a\7j\2\2\u017a")
buf.write(u"P\3\2\2\2\u017b\u017c\7^\2\2\u017c\u017d\7c\2\2\u017d")
buf.write(u"\u017e\7t\2\2\u017e\u017f\7e\2\2\u017f\u0180\7q\2\2\u0180")
buf.write(u"\u0181\7u\2\2\u0181\u0182\7j\2\2\u0182R\3\2\2\2\u0183")
buf.write(u"\u0184\7^\2\2\u0184\u0185\7c\2\2\u0185\u0186\7t\2\2\u0186")
buf.write(u"\u0187\7v\2\2\u0187\u0188\7c\2\2\u0188\u0189\7p\2\2\u0189")
buf.write(u"\u018a\7j\2\2\u018aT\3\2\2\2\u018b\u018c\7^\2\2\u018c")
buf.write(u"\u018d\7c\2\2\u018d\u018e\7t\2\2\u018e\u018f\7e\2\2\u018f")
buf.write(u"\u0190\7u\2\2\u0190\u0191\7e\2\2\u0191\u0192\7j\2\2\u0192")
buf.write(u"V\3\2\2\2\u0193\u0194\7^\2\2\u0194\u0195\7c\2\2\u0195")
buf.write(u"\u0196\7t\2\2\u0196\u0197\7u\2\2\u0197\u0198\7g\2\2\u0198")
buf.write(u"\u0199\7e\2\2\u0199\u019a\7j\2\2\u019aX\3\2\2\2\u019b")
buf.write(u"\u019c\7^\2\2\u019c\u019d\7c\2\2\u019d\u019e\7t\2\2\u019e")
buf.write(u"\u019f\7e\2\2\u019f\u01a0\7q\2\2\u01a0\u01a1\7v\2\2\u01a1")
buf.write(u"\u01a2\7j\2\2\u01a2Z\3\2\2\2\u01a3\u01a4\7^\2\2\u01a4")
buf.write(u"\u01a5\7u\2\2\u01a5\u01a6\7s\2\2\u01a6\u01a7\7t\2\2\u01a7")
buf.write(u"\u01a8\7v\2\2\u01a8\\\3\2\2\2\u01a9\u01aa\7^\2\2\u01aa")
buf.write(u"\u01ab\7v\2\2\u01ab\u01ac\7k\2\2\u01ac\u01ad\7o\2\2\u01ad")
buf.write(u"\u01ae\7g\2\2\u01ae\u01af\7u\2\2\u01af^\3\2\2\2\u01b0")
buf.write(u"\u01b1\7^\2\2\u01b1\u01b2\7e\2\2\u01b2\u01b3\7f\2\2\u01b3")
buf.write(u"\u01b4\7q\2\2\u01b4\u01b5\7v\2\2\u01b5`\3\2\2\2\u01b6")
buf.write(u"\u01b7\7^\2\2\u01b7\u01b8\7f\2\2\u01b8\u01b9\7k\2\2\u01b9")
buf.write(u"\u01ba\7x\2\2\u01bab\3\2\2\2\u01bb\u01bc\7^\2\2\u01bc")
buf.write(u"\u01bd\7h\2\2\u01bd\u01be\7t\2\2\u01be\u01bf\7c\2\2\u01bf")
buf.write(u"\u01c0\7e\2\2\u01c0d\3\2\2\2\u01c1\u01c2\7^\2\2\u01c2")
buf.write(u"\u01c3\7d\2\2\u01c3\u01c4\7k\2\2\u01c4\u01c5\7p\2\2\u01c5")
buf.write(u"\u01c6\7q\2\2\u01c6\u01c7\7o\2\2\u01c7f\3\2\2\2\u01c8")
buf.write(u"\u01c9\7^\2\2\u01c9\u01ca\7f\2\2\u01ca\u01cb\7d\2\2\u01cb")
buf.write(u"\u01cc\7k\2\2\u01cc\u01cd\7p\2\2\u01cd\u01ce\7q\2\2\u01ce")
buf.write(u"\u01cf\7o\2\2\u01cfh\3\2\2\2\u01d0\u01d1\7^\2\2\u01d1")
buf.write(u"\u01d2\7v\2\2\u01d2\u01d3\7d\2\2\u01d3\u01d4\7k\2\2\u01d4")
buf.write(u"\u01d5\7p\2\2\u01d5\u01d6\7q\2\2\u01d6\u01d7\7o\2\2\u01d7")
buf.write(u"j\3\2\2\2\u01d8\u01d9\7^\2\2\u01d9\u01da\7o\2\2\u01da")
buf.write(u"\u01db\7c\2\2\u01db\u01dc\7v\2\2\u01dc\u01dd\7j\2\2\u01dd")
buf.write(u"\u01de\7k\2\2\u01de\u01df\7v\2\2\u01dfl\3\2\2\2\u01e0")
buf.write(u"\u01e1\7a\2\2\u01e1n\3\2\2\2\u01e2\u01e3\7`\2\2\u01e3")
buf.write(u"p\3\2\2\2\u01e4\u01e5\7<\2\2\u01e5r\3\2\2\2\u01e6\u01e7")
buf.write(u"\t\2\2\2\u01e7t\3\2\2\2\u01e8\u01ec\7f\2\2\u01e9\u01eb")
buf.write(u"\5s:\2\u01ea\u01e9\3\2\2\2\u01eb\u01ee\3\2\2\2\u01ec")
buf.write(u"\u01ed\3\2\2\2\u01ec\u01ea\3\2\2\2\u01ed\u01f6\3\2\2")
buf.write(u"\2\u01ee\u01ec\3\2\2\2\u01ef\u01f7\t\3\2\2\u01f0\u01f2")
buf.write(u"\7^\2\2\u01f1\u01f3\t\3\2\2\u01f2\u01f1\3\2\2\2\u01f3")
buf.write(u"\u01f4\3\2\2\2\u01f4\u01f2\3\2\2\2\u01f4\u01f5\3\2\2")
buf.write(u"\2\u01f5\u01f7\3\2\2\2\u01f6\u01ef\3\2\2\2\u01f6\u01f0")
buf.write(u"\3\2\2\2\u01f7v\3\2\2\2\u01f8\u01f9\t\3\2\2\u01f9x\3")
buf.write(u"\2\2\2\u01fa\u01fb\t\4\2\2\u01fbz\3\2\2\2\u01fc\u01fe")
buf.write(u"\5y=\2\u01fd\u01fc\3\2\2\2\u01fe\u01ff\3\2\2\2\u01ff")
buf.write(u"\u01fd\3\2\2\2\u01ff\u0200\3\2\2\2\u0200\u0208\3\2\2")
buf.write(u"\2\u0201\u0202\7.\2\2\u0202\u0203\5y=\2\u0203\u0204\5")
buf.write(u"y=\2\u0204\u0205\5y=\2\u0205\u0207\3\2\2\2\u0206\u0201")
buf.write(u"\3\2\2\2\u0207\u020a\3\2\2\2\u0208\u0206\3\2\2\2\u0208")
buf.write(u"\u0209\3\2\2\2\u0209\u0222\3\2\2\2\u020a\u0208\3\2\2")
buf.write(u"\2\u020b\u020d\5y=\2\u020c\u020b\3\2\2\2\u020d\u0210")
buf.write(u"\3\2\2\2\u020e\u020c\3\2\2\2\u020e\u020f\3\2\2\2\u020f")
buf.write(u"\u0218\3\2\2\2\u0210\u020e\3\2\2\2\u0211\u0212\7.\2\2")
buf.write(u"\u0212\u0213\5y=\2\u0213\u0214\5y=\2\u0214\u0215\5y=")
buf.write(u"\2\u0215\u0217\3\2\2\2\u0216\u0211\3\2\2\2\u0217\u021a")
buf.write(u"\3\2\2\2\u0218\u0216\3\2\2\2\u0218\u0219\3\2\2\2\u0219")
buf.write(u"\u021b\3\2\2\2\u021a\u0218\3\2\2\2\u021b\u021d\7\60\2")
buf.write(u"\2\u021c\u021e\5y=\2\u021d\u021c\3\2\2\2\u021e\u021f")
buf.write(u"\3\2\2\2\u021f\u021d\3\2\2\2\u021f\u0220\3\2\2\2\u0220")
buf.write(u"\u0222\3\2\2\2\u0221\u01fd\3\2\2\2\u0221\u020e\3\2\2")
buf.write(u"\2\u0222|\3\2\2\2\u0223\u0224\7?\2\2\u0224~\3\2\2\2\u0225")
buf.write(u"\u0226\7>\2\2\u0226\u0080\3\2\2\2\u0227\u0228\7^\2\2")
buf.write(u"\u0228\u0229\7n\2\2\u0229\u022a\7g\2\2\u022a\u022b\7")
buf.write(u"s\2\2\u022b\u0082\3\2\2\2\u022c\u022d\7@\2\2\u022d\u0084")
buf.write(u"\3\2\2\2\u022e\u022f\7^\2\2\u022f\u0230\7i\2\2\u0230")
buf.write(u"\u0231\7g\2\2\u0231\u0232\7s\2\2\u0232\u0086\3\2\2\2")
buf.write(u"\u0233\u0234\7#\2\2\u0234\u0088\3\2\2\2\u0235\u0237\7")
buf.write(u"^\2\2\u0236\u0238\t\3\2\2\u0237\u0236\3\2\2\2\u0238\u0239")
buf.write(u"\3\2\2\2\u0239\u0237\3\2\2\2\u0239\u023a\3\2\2\2\u023a")
buf.write(u"\u008a\3\2\2\2\17\2\u0090\u00e6\u01ec\u01f4\u01f6\u01ff")
buf.write(u"\u0208\u020e\u0218\u021f\u0221\u0239\3\b\2\2")
return buf.getvalue()
class LaTeXLexer(Lexer):
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
T__0 = 1
WS = 2
ADD = 3
SUB = 4
MUL = 5
DIV = 6
L_PAREN = 7
R_PAREN = 8
L_BRACE = 9
R_BRACE = 10
L_BRACKET = 11
R_BRACKET = 12
BAR = 13
FUNC_LIM = 14
LIM_APPROACH_SYM = 15
FUNC_INT = 16
FUNC_SUM = 17
FUNC_PROD = 18
FUNC_LOG = 19
FUNC_LN = 20
FUNC_SIN = 21
FUNC_COS = 22
FUNC_TAN = 23
FUNC_CSC = 24
FUNC_SEC = 25
FUNC_COT = 26
FUNC_ARCSIN = 27
FUNC_ARCCOS = 28
FUNC_ARCTAN = 29
FUNC_ARCCSC = 30
FUNC_ARCSEC = 31
FUNC_ARCCOT = 32
FUNC_SINH = 33
FUNC_COSH = 34
FUNC_TANH = 35
FUNC_CSCH = 36
FUNC_SECH = 37
FUNC_COTH = 38
FUNC_ARSINH = 39
FUNC_ARCOSH = 40
FUNC_ARTANH = 41
FUNC_ARCSCH = 42
FUNC_ARSECH = 43
FUNC_ARCOTH = 44
FUNC_SQRT = 45
CMD_TIMES = 46
CMD_CDOT = 47
CMD_DIV = 48
CMD_FRAC = 49
CMD_BINOM = 50
CMD_DBINOM = 51
CMD_TBINOM = 52
CMD_MATHIT = 53
UNDERSCORE = 54
CARET = 55
COLON = 56
DIFFERENTIAL = 57
LETTER = 58
NUMBER = 59
EQUAL = 60
LT = 61
LTE = 62
GT = 63
GTE = 64
BANG = 65
SYMBOL = 66
channelNames = [ u"DEFAULT_TOKEN_CHANNEL", u"HIDDEN" ]
modeNames = [ u"DEFAULT_MODE" ]
literalNames = [ u"<INVALID>",
u"','", u"'+'", u"'-'", u"'*'", u"'/'", u"'('", u"')'", u"'{'",
u"'}'", u"'['", u"']'", u"'|'", u"'\\lim'", u"'\\int'", u"'\\sum'",
u"'\\prod'", u"'\\log'", u"'\\ln'", u"'\\sin'", u"'\\cos'",
u"'\\tan'", u"'\\csc'", u"'\\sec'", u"'\\cot'", u"'\\arcsin'",
u"'\\arccos'", u"'\\arctan'", u"'\\arccsc'", u"'\\arcsec'",
u"'\\arccot'", u"'\\sinh'", u"'\\cosh'", u"'\\tanh'", u"'\\csch'",
u"'\\sech'", u"'\\coth'", u"'\\arsinh'", u"'\\arcosh'", u"'\\artanh'",
u"'\\arcsch'", u"'\\arsech'", u"'\\arcoth'", u"'\\sqrt'", u"'\\times'",
u"'\\cdot'", u"'\\div'", u"'\\frac'", u"'\\binom'", u"'\\dbinom'",
u"'\\tbinom'", u"'\\mathit'", u"'_'", u"'^'", u"':'", u"'='",
u"'<'", u"'\\leq'", u"'>'", u"'\\geq'", u"'!'" ]
symbolicNames = [ u"<INVALID>",
u"WS", u"ADD", u"SUB", u"MUL", u"DIV", u"L_PAREN", u"R_PAREN",
u"L_BRACE", u"R_BRACE", u"L_BRACKET", u"R_BRACKET", u"BAR",
u"FUNC_LIM", u"LIM_APPROACH_SYM", u"FUNC_INT", u"FUNC_SUM",
u"FUNC_PROD", u"FUNC_LOG", u"FUNC_LN", u"FUNC_SIN", u"FUNC_COS",
u"FUNC_TAN", u"FUNC_CSC", u"FUNC_SEC", u"FUNC_COT", u"FUNC_ARCSIN",
u"FUNC_ARCCOS", u"FUNC_ARCTAN", u"FUNC_ARCCSC", u"FUNC_ARCSEC",
u"FUNC_ARCCOT", u"FUNC_SINH", u"FUNC_COSH", u"FUNC_TANH", u"FUNC_CSCH",
u"FUNC_SECH", u"FUNC_COTH", u"FUNC_ARSINH", u"FUNC_ARCOSH",
u"FUNC_ARTANH", u"FUNC_ARCSCH", u"FUNC_ARSECH", u"FUNC_ARCOTH",
u"FUNC_SQRT", u"CMD_TIMES", u"CMD_CDOT", u"CMD_DIV", u"CMD_FRAC",
u"CMD_BINOM", u"CMD_DBINOM", u"CMD_TBINOM", u"CMD_MATHIT", u"UNDERSCORE",
u"CARET", u"COLON", u"DIFFERENTIAL", u"LETTER", u"NUMBER", u"EQUAL",
u"LT", u"LTE", u"GT", u"GTE", u"BANG", u"SYMBOL" ]
ruleNames = [ u"T__0", u"WS", u"ADD", u"SUB", u"MUL", u"DIV", u"L_PAREN",
u"R_PAREN", u"L_BRACE", u"R_BRACE", u"L_BRACKET", u"R_BRACKET",
u"BAR", u"FUNC_LIM", u"LIM_APPROACH_SYM", u"FUNC_INT",
u"FUNC_SUM", u"FUNC_PROD", u"FUNC_LOG", u"FUNC_LN", u"FUNC_SIN",
u"FUNC_COS", u"FUNC_TAN", u"FUNC_CSC", u"FUNC_SEC", u"FUNC_COT",
u"FUNC_ARCSIN", u"FUNC_ARCCOS", u"FUNC_ARCTAN", u"FUNC_ARCCSC",
u"FUNC_ARCSEC", u"FUNC_ARCCOT", u"FUNC_SINH", u"FUNC_COSH",
u"FUNC_TANH", u"FUNC_CSCH", u"FUNC_SECH", u"FUNC_COTH",
u"FUNC_ARSINH", u"FUNC_ARCOSH", u"FUNC_ARTANH", u"FUNC_ARCSCH",
u"FUNC_ARSECH", u"FUNC_ARCOTH", u"FUNC_SQRT", u"CMD_TIMES",
u"CMD_CDOT", u"CMD_DIV", u"CMD_FRAC", u"CMD_BINOM", u"CMD_DBINOM",
u"CMD_TBINOM", u"CMD_MATHIT", u"UNDERSCORE", u"CARET",
u"COLON", u"WS_CHAR", u"DIFFERENTIAL", u"LETTER", u"DIGIT",
u"NUMBER", u"EQUAL", u"LT", u"LTE", u"GT", u"GTE", u"BANG",
u"SYMBOL" ]
grammarFileName = u"LaTeX.g4"
def __init__(self, input=None, output=sys.stdout):
super(LaTeXLexer, self).__init__(input, output=output)
self.checkVersion("4.7.2")
self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache())
self._actions = None
self._predicates = None
| 60.418546
| 103
| 0.571452
| 5,608
| 24,107
| 2.426177
| 0.140514
| 0.122152
| 0.057328
| 0.063501
| 0.2665
| 0.219682
| 0.206968
| 0.166544
| 0.139203
| 0.132074
| 0
| 0.319728
| 0.151367
| 24,107
| 398
| 104
| 60.570352
| 0.345342
| 0.015805
| 0
| 0.01084
| 1
| 0.612466
| 0.612812
| 0.545378
| 0
| 0
| 0
| 0
| 0
| 1
| 0.00542
| false
| 0
| 0.01084
| 0
| 0.222222
| 0.00271
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b9d9eba24d5ea1d630bc1392beeccdd1233a1f7a
| 7,814
|
py
|
Python
|
tests/unit/utils/test_templates.py
|
yuriks/salt
|
d2a5bd8adddb98ec1718d79384aa13b4f37e8028
|
[
"Apache-2.0",
"MIT"
] | 1
|
2019-09-17T17:48:55.000Z
|
2019-09-17T17:48:55.000Z
|
tests/unit/utils/test_templates.py
|
yuriks/salt
|
d2a5bd8adddb98ec1718d79384aa13b4f37e8028
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
tests/unit/utils/test_templates.py
|
yuriks/salt
|
d2a5bd8adddb98ec1718d79384aa13b4f37e8028
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
'''
Unit tests for salt.utils.templates.py
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import os
import sys
import logging
# Import Salt libs
import salt.utils.templates
import salt.utils.files
# Import Salt Testing Libs
from tests.support.helpers import with_tempdir
from tests.support.unit import TestCase, skipIf
log = logging.getLogger(__name__)
### Here we go!
class RenderTestCase(TestCase):
def setUp(self):
# Default context for salt.utils.templates.render_*_tmpl to work
self.context = {
'opts': {
'cachedir': '/D',
'__cli': 'salt',
},
'saltenv': None,
}
### Tests for Jinja (whitespace-friendly)
def test_render_jinja_sanity(self):
tmpl = '''OK'''
res = salt.utils.templates.render_jinja_tmpl(tmpl, dict(self.context))
self.assertEqual(res, 'OK')
def test_render_jinja_evaluate(self):
tmpl = '''{{ "OK" }}'''
res = salt.utils.templates.render_jinja_tmpl(tmpl, dict(self.context))
self.assertEqual(res, 'OK')
def test_render_jinja_evaluate_multi(self):
tmpl = '''{% if 1 -%}OK{%- endif %}'''
res = salt.utils.templates.render_jinja_tmpl(tmpl, dict(self.context))
self.assertEqual(res, 'OK')
def test_render_jinja_variable(self):
tmpl = '''{{ var }}'''
ctx = dict(self.context)
ctx['var'] = 'OK'
res = salt.utils.templates.render_jinja_tmpl(tmpl, ctx)
self.assertEqual(res, 'OK')
### Tests for mako template
def test_render_mako_sanity(self):
tmpl = '''OK'''
res = salt.utils.templates.render_mako_tmpl(tmpl, dict(self.context))
self.assertEqual(res, 'OK')
def test_render_mako_evaluate(self):
tmpl = '''${ "OK" }'''
res = salt.utils.templates.render_mako_tmpl(tmpl, dict(self.context))
self.assertEqual(res, 'OK')
def test_render_mako_evaluate_multi(self):
tmpl = '''
% if 1:
OK
% endif
'''
res = salt.utils.templates.render_mako_tmpl(tmpl, dict(self.context))
stripped = res.strip()
self.assertEqual(stripped, 'OK')
def test_render_mako_variable(self):
tmpl = '''${ var }'''
ctx = dict(self.context)
ctx['var'] = 'OK'
res = salt.utils.templates.render_mako_tmpl(tmpl, ctx)
self.assertEqual(res, 'OK')
### Tests for wempy template
@skipIf(sys.version_info > (3,), 'The wempy module is currently unsupported under Python3')
def test_render_wempy_sanity(self):
tmpl = '''OK'''
res = salt.utils.templates.render_wempy_tmpl(tmpl, dict(self.context))
self.assertEqual(res, 'OK')
@skipIf(sys.version_info > (3,), 'The wempy module is currently unsupported under Python3')
def test_render_wempy_evaluate(self):
tmpl = '''{{="OK"}}'''
res = salt.utils.templates.render_wempy_tmpl(tmpl, dict(self.context))
self.assertEqual(res, 'OK')
@skipIf(sys.version_info > (3,), 'The wempy module is currently unsupported under Python3')
def test_render_wempy_evaluate_multi(self):
tmpl = '''{{if 1:}}OK{{pass}}'''
res = salt.utils.templates.render_wempy_tmpl(tmpl, dict(self.context))
self.assertEqual(res, 'OK')
@skipIf(sys.version_info > (3,), 'The wempy module is currently unsupported under Python3')
def test_render_wempy_variable(self):
tmpl = '''{{=var}}'''
ctx = dict(self.context)
ctx['var'] = 'OK'
res = salt.utils.templates.render_wempy_tmpl(tmpl, ctx)
self.assertEqual(res, 'OK')
### Tests for genshi template (xml-based)
def test_render_genshi_sanity(self):
tmpl = '''<RU>OK</RU>'''
res = salt.utils.templates.render_genshi_tmpl(tmpl, dict(self.context))
self.assertEqual(res, '<RU>OK</RU>')
def test_render_genshi_evaluate(self):
tmpl = '''<RU>${ "OK" }</RU>'''
res = salt.utils.templates.render_genshi_tmpl(tmpl, dict(self.context))
self.assertEqual(res, '<RU>OK</RU>')
def test_render_genshi_evaluate_condition(self):
tmpl = '''<RU xmlns:py="http://genshi.edgewall.org/" py:if="1">OK</RU>'''
res = salt.utils.templates.render_genshi_tmpl(tmpl, dict(self.context))
self.assertEqual(res, '<RU>OK</RU>')
def test_render_genshi_variable(self):
tmpl = '''<RU>$var</RU>'''
ctx = dict(self.context)
ctx['var'] = 'OK'
res = salt.utils.templates.render_genshi_tmpl(tmpl, ctx)
self.assertEqual(res, '<RU>OK</RU>')
def test_render_genshi_variable_replace(self):
tmpl = '''<RU xmlns:py="http://genshi.edgewall.org/" py:content="var">not ok</RU>'''
ctx = dict(self.context)
ctx['var'] = 'OK'
res = salt.utils.templates.render_genshi_tmpl(tmpl, ctx)
self.assertEqual(res, '<RU>OK</RU>')
### Tests for cheetah template (line-oriented and xml-friendly)
def test_render_cheetah_sanity(self):
tmpl = '''OK'''
res = salt.utils.templates.render_cheetah_tmpl(tmpl, dict(self.context))
self.assertEqual(res, 'OK')
def test_render_cheetah_evaluate(self):
tmpl = '''<%="OK"%>'''
res = salt.utils.templates.render_cheetah_tmpl(tmpl, dict(self.context))
self.assertEqual(res, 'OK')
def test_render_cheetah_evaluate_xml(self):
tmpl = '''
<% if 1: %>
OK
<% pass %>
'''
res = salt.utils.templates.render_cheetah_tmpl(tmpl, dict(self.context))
stripped = res.strip()
self.assertEqual(stripped, 'OK')
def test_render_cheetah_evaluate_text(self):
tmpl = '''
#if 1
OK
#end if
'''
res = salt.utils.templates.render_cheetah_tmpl(tmpl, dict(self.context))
stripped = res.strip()
self.assertEqual(stripped, 'OK')
def test_render_cheetah_variable(self):
tmpl = '''$var'''
ctx = dict(self.context)
ctx['var'] = 'OK'
res = salt.utils.templates.render_cheetah_tmpl(tmpl, ctx)
self.assertEqual(res.strip(), 'OK')
class MockRender(object):
def __call__(self, tplstr, context, tmplpath=None):
self.tplstr = tplstr
self.context = context
self.tmplpath = tmplpath
return tplstr
class WrapRenderTestCase(TestCase):
@with_tempdir()
def test_wrap_issue_56119_a(self, tempdir):
slsfile = os.path.join(tempdir, 'foo')
with salt.utils.files.fopen(slsfile, 'w') as fp:
fp.write('{{ slspath }}')
context = {'opts': {}, 'saltenv': 'base', 'sls': 'foo.bar'}
render = MockRender()
wrapped = salt.utils.templates.wrap_tmpl_func(render)
res = wrapped(
slsfile,
context=context,
tmplpath='/tmp/foo/bar/init.sls'
)
assert render.context['slspath'] == 'foo/bar', render.context['slspath']
assert render.context['tpldir'] == 'foo/bar', render.context['tpldir']
@with_tempdir()
def test_wrap_issue_56119_b(self, tempdir):
slsfile = os.path.join(tempdir, 'foo')
with salt.utils.files.fopen(slsfile, 'w') as fp:
fp.write('{{ slspath }}')
context = {'opts': {}, 'saltenv': 'base', 'sls': 'foo.bar.bang'}
render = MockRender()
wrapped = salt.utils.templates.wrap_tmpl_func(render)
res = wrapped(
slsfile,
context=context,
tmplpath='/tmp/foo/bar/bang.sls'
)
assert render.context['slspath'] == 'foo/bar', render.context['slspath']
assert render.context['tpldir'] == 'foo/bar', render.context['tpldir']
| 33.973913
| 95
| 0.607243
| 950
| 7,814
| 4.831579
| 0.144211
| 0.058824
| 0.105882
| 0.120261
| 0.801961
| 0.798257
| 0.791939
| 0.776906
| 0.776906
| 0.739869
| 0
| 0.004223
| 0.242385
| 7,814
| 229
| 96
| 34.122271
| 0.771115
| 0.048887
| 0
| 0.534483
| 0
| 0.011494
| 0.139422
| 0.005674
| 0
| 0
| 0
| 0
| 0.149425
| 1
| 0.149425
| false
| 0.011494
| 0.045977
| 0
| 0.218391
| 0.005747
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b9edbdf12d817c50e2a93887a3cab9e30fec2290
| 2,126
|
py
|
Python
|
test/test_token.py
|
ulf1/nlptasks
|
07d36448b517a18f76088f5d9cfb853e7602b079
|
[
"Apache-2.0"
] | 2
|
2020-12-30T13:11:09.000Z
|
2021-11-04T19:40:31.000Z
|
test/test_token.py
|
ulf1/nlptasks
|
07d36448b517a18f76088f5d9cfb853e7602b079
|
[
"Apache-2.0"
] | 99
|
2020-11-02T14:58:04.000Z
|
2021-04-09T18:01:34.000Z
|
test/test_token.py
|
ulf1/nlptasks
|
07d36448b517a18f76088f5d9cfb853e7602b079
|
[
"Apache-2.0"
] | null | null | null |
import nlptasks as nt
import nlptasks.token
def test_01():
target = [["Die", "Kuh", "ist", "bunt", "."],
["Die", "Bäuerin", "mäht", "die", "Wiese", "."]]
sentences = ["Die Kuh ist bunt.", "Die Bäuerin mäht die Wiese."]
tokensequences = nt.token.spacy_de(sentences)
assert tokensequences == target
def test_02():
target = [["Die", "Kuh", "ist", "bunt", "."],
["Die", "Bäuerin", "mäht", "die", "Wiese", "."]]
sentences = ["Die Kuh ist bunt.", "Die Bäuerin mäht die Wiese."]
tokenizer_fn = nt.token.factory("spacy")
assert tokenizer_fn.__name__ == "spacy_de"
sequences = tokenizer_fn(sentences)
assert sequences == target
def test_03():
target = [["Die", "Kuh", "ist", "bunt", "."],
["Die", "Bäuerin", "mäht", "die", "Wiese", "."]]
sentences = ["Die Kuh ist bunt.", "Die Bäuerin mäht die Wiese."]
identifier = "spacy-de"
model = nt.token.get_model(identifier)
fn = nt.token.factory(identifier)
tokensequences = fn(sentences, model=model)
assert tokensequences == target
def test_11():
target = [["Die", "Kuh", "ist", "bunt", "."],
["Die", "Bäuerin", "mäht", "die", "Wiese", "."]]
sentences = ["Die Kuh ist bunt.", "Die Bäuerin mäht die Wiese."]
tokensequences = nt.token.stanza_de(sentences)
assert tokensequences == target
def test_22():
target = [["Die", "Kuh", "ist", "bunt", "."],
["Die", "Bäuerin", "mäht", "die", "Wiese", "."]]
sentences = ["Die Kuh ist bunt.", "Die Bäuerin mäht die Wiese."]
tokenizer_fn = nt.token.factory("stanza")
assert tokenizer_fn.__name__ == "stanza_de"
sequences = tokenizer_fn(sentences)
assert sequences == target
def test_23():
target = [["Die", "Kuh", "ist", "bunt", "."],
["Die", "Bäuerin", "mäht", "die", "Wiese", "."]]
sentences = ["Die Kuh ist bunt.", "Die Bäuerin mäht die Wiese."]
identifier = "stanza-de"
model = nt.token.get_model(identifier)
fn = nt.token.factory(identifier)
tokensequences = fn(sentences, model=model)
assert tokensequences == target
| 34.852459
| 68
| 0.583725
| 245
| 2,126
| 4.959184
| 0.142857
| 0.059259
| 0.088889
| 0.128395
| 0.883128
| 0.877366
| 0.877366
| 0.804938
| 0.804938
| 0.804938
| 0
| 0.007246
| 0.221072
| 2,126
| 60
| 69
| 35.433333
| 0.726449
| 0
| 0
| 0.666667
| 0
| 0
| 0.249765
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 1
| 0.125
| false
| 0
| 0.041667
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
dbead83ddecfc429cad3cd961dea3d0c45feb412
| 219
|
py
|
Python
|
OBlog/blueprint/images/admin.py
|
OhYee/OBlog
|
a9d7e4fda5651cf9c5afd4c128c4df4442794e97
|
[
"BSD-3-Clause"
] | 23
|
2018-02-23T12:56:43.000Z
|
2021-12-20T13:21:47.000Z
|
OBlog/blueprint/images/admin.py
|
OhYee/OBlog
|
a9d7e4fda5651cf9c5afd4c128c4df4442794e97
|
[
"BSD-3-Clause"
] | 17
|
2018-02-23T12:52:39.000Z
|
2018-12-04T05:50:58.000Z
|
OBlog/blueprint/images/admin.py
|
OhYee/OBlog
|
a9d7e4fda5651cf9c5afd4c128c4df4442794e97
|
[
"BSD-3-Clause"
] | 2
|
2018-06-16T20:52:23.000Z
|
2021-04-08T15:29:44.000Z
|
from . import imagesAdminBP
from flask import render_template, abort
from .main import getImageList
@imagesAdminBP.route('/')
def index():
return render_template("admin/images.html",images = getImageList())
| 27.375
| 72
| 0.748858
| 25
| 219
| 6.48
| 0.64
| 0.17284
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.146119
| 219
| 7
| 73
| 31.285714
| 0.86631
| 0
| 0
| 0
| 0
| 0
| 0.084906
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| true
| 0
| 0.5
| 0.166667
| 0.833333
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
dbf9fad2e4c8d776d82956f70facb243fc0e8e21
| 12,879
|
py
|
Python
|
tests/test_interface.py
|
sam-austin-sri/atmosense-abcgan
|
b046676f70da69313126aaa323145af2a0e7b404
|
[
"MIT"
] | null | null | null |
tests/test_interface.py
|
sam-austin-sri/atmosense-abcgan
|
b046676f70da69313126aaa323145af2a0e7b404
|
[
"MIT"
] | null | null | null |
tests/test_interface.py
|
sam-austin-sri/atmosense-abcgan
|
b046676f70da69313126aaa323145af2a0e7b404
|
[
"MIT"
] | 1
|
2022-01-04T21:20:18.000Z
|
2022-01-04T21:20:18.000Z
|
import unittest
import abcgan # import interface components directly from abcgan
import abcgan.constants as const
import numpy as np
import os
import h5py
from abcgan.interface import estimate_drivers, load_h5_data, generate_multi
from abcgan.interface import hellinger_scores_hfp, hellinger_scores_bv
dir_path = os.path.dirname(os.path.realpath(__file__))
fname = os.path.join(dir_path, "..", "tutorials", "tutorial_all.h5")
with h5py.File(fname, 'r') as hf:
nSamples = hf['Drivers'][abcgan.driver_names[0]].shape[0]
def fake_drivers(n):
return np.exp(np.random.normal(size=(n, const.n_driver)))
def fake_bvs(n):
bvs = [np.random.uniform(low=const.bv_thresholds[i, 0],
high=const.bv_thresholds[i, 1],
size=(n, const.max_alt)) for i in range(const.n_bv)]
return np.stack(bvs, axis=-1)
def fake_hfp(n):
hfps = [np.random.uniform(low=const.hfp_thresholds[i, 0],
high=const.hfp_thresholds[i, 1],
size=n) for i in range(const.n_hfp)]
return np.stack(hfps, axis=-1)[:, None, :]
def fake_lidar_bvs(n):
return np.exp(np.random.normal(size=(n, const.max_alt_lidar, const.n_lidar_bv)))
class TestInterface(unittest.TestCase):
def test_driver_estimation(self):
drivers = fake_drivers(5)
est_drs = estimate_drivers(drivers)
self.assertEqual(est_drs.shape, drivers.shape)
def test_generator(self):
drivers = fake_drivers(5)
bvs = abcgan.generate(drivers, verbose=0)
self.assertEqual(bvs.shape[0], drivers.shape[0])
def test_generator_zscale(self):
drivers = fake_drivers(5)
bvs = abcgan.generate(drivers, return_z_scale=True, verbose=0)
self.assertEqual(bvs.shape[0], drivers.shape[0])
self.assertTrue(np.isclose(np.std(bvs), 1, rtol=1, atol=1))
self.assertTrue(np.isclose(np.mean(bvs), 0, rtol=1, atol=1))
def test_generator_bv_inputs(self):
top_alt = np.random.randint(const.max_alt - 1)
drivers = fake_drivers(5)
bvs = fake_bvs(5)[:, :top_alt, :]
G_bvs = abcgan.generate(drivers, measurements=bvs, verbose=0)
self.assertEqual(G_bvs.shape[0], drivers.shape[0])
self.assertTrue((bvs == G_bvs[:, :top_alt, :]).all())
self.assertEqual(G_bvs.shape, (len(bvs), const.max_alt, const.n_bv))
def test_hfp_generator(self):
drivers = fake_drivers(5)
bvs, hfps, b = abcgan.generate(drivers, generate_hfps=True, verbose=0)
self.assertEqual(bvs.shape, (drivers.shape[0], const.max_alt, const.n_bv))
self.assertEqual(hfps.shape, (drivers.shape[0], const.n_waves, const.n_hfp))
self.assertEqual(b.shape, (drivers.shape[0], ))
def test_generator_hfp_zscale(self):
drivers = fake_drivers(5)
bvs, hfps, b = abcgan.generate(drivers, return_z_scale=True, generate_hfps=True, verbose=0)
self.assertTrue(np.isclose(np.std(bvs), 1, rtol=1, atol=1))
self.assertTrue(np.isclose(np.mean(bvs), 0, rtol=1, atol=1))
self.assertTrue(np.isclose(np.std(hfps), 1, rtol=1, atol=1))
self.assertTrue(np.isclose(np.mean(hfps), 0, rtol=1, atol=1))
def test_hfp_generator_bv_inputs(self):
top_alt = np.random.randint(const.max_alt - 1)
drivers = fake_drivers(5)
bvs = fake_bvs(5)[:, :top_alt, :]
G_bvs, hfps, b = abcgan.generate(drivers, measurements=bvs,
generate_hfps=True, verbose=0)
self.assertEqual(G_bvs.shape, (drivers.shape[0], const.max_alt, const.n_bv))
self.assertTrue((bvs == G_bvs[:, :top_alt, :]).all())
self.assertEqual(hfps.shape, (drivers.shape[0], const.n_waves, const.n_hfp))
self.assertEqual(b.shape, (drivers.shape[0], ))
def test_bv_gen_multi_inputs(self):
n_repeat = 5
drivers = fake_drivers(5)
G_bvs = generate_multi(drivers, n_repeat=n_repeat, verbose=0)
self.assertEqual(G_bvs.shape, (drivers.shape[0], n_repeat, const.max_alt, const.n_bv))
def test_gen_multi_bv_inputs(self):
n_repeat = 5
top_alt = np.random.randint(const.max_alt - 1)
drivers = fake_drivers(5)
bvs = fake_bvs(5)[:, :top_alt, :]
G_bvs = generate_multi(drivers, bvs=bvs, n_repeat=n_repeat, verbose=0)
self.assertEqual(G_bvs.shape, (drivers.shape[0], n_repeat, const.max_alt, const.n_bv))
self.assertTrue((bvs == G_bvs[:, 0, :top_alt, :]).all())
def test_hfp_gen_multi_inputs(self):
n_repeat = 5
drivers = fake_drivers(5)
G_bvs, G_hfps, G_bs = generate_multi(drivers, generate_hfps=True, n_repeat=n_repeat, verbose=0)
self.assertEqual(G_bvs.shape, (drivers.shape[0], n_repeat, const.max_alt, const.n_bv))
self.assertEqual(G_hfps.shape, (drivers.shape[0], n_repeat, const.n_waves, const.n_hfp))
self.assertEqual(G_bs.shape, (drivers.shape[0], n_repeat, ))
def test_hfp_gen_multi_bv_inputs(self):
n_repeat = 5
top_alt = np.random.randint(const.max_alt - 1)
drivers = fake_drivers(5)
bvs = fake_bvs(5)[:, :top_alt, :]
G_bvs, G_hfps, G_bs = generate_multi(drivers, bvs=bvs, n_repeat=n_repeat,
generate_hfps=True, verbose=0)
self.assertEqual(G_bvs.shape, (drivers.shape[0], n_repeat, const.max_alt, const.n_bv))
self.assertTrue((bvs == G_bvs[:, 0, :top_alt, :]).all())
self.assertEqual(G_hfps.shape, (drivers.shape[0], n_repeat, const.n_waves, const.n_hfp))
self.assertEqual(G_bs.shape, (drivers.shape[0], n_repeat, ))
def test_hfp_discriminator(self):
drivers = fake_drivers(5)
bvs = fake_bvs(5)
hfps = fake_hfp(5)
bv_scores, hfp_scores = abcgan.discriminate(drivers, bvs, hfps=hfps)
self.assertEqual(bv_scores.shape, (drivers.shape[0], bvs.shape[1]))
self.assertEqual(hfp_scores.shape, (drivers.shape[0], hfps.shape[1]))
def test_lidar_generator(self):
drivers = fake_drivers(5)
bvs = abcgan.generate(drivers, n_alt=const.max_alt_lidar,
bv_model='bv_lidar_gan', bv_type='lidar', verbose=0)
self.assertEqual(bvs.shape[0], drivers.shape[0])
def test_lidar_discriminator(self):
drivers = fake_drivers(5)
bvs = fake_lidar_bvs(5)
disc = abcgan.discriminate(drivers, bvs, bv_model='bv_lidar_gan', bv_type='lidar')
self.assertEqual(disc.shape, (drivers.shape[0], bvs.shape[1]))
def test_discriminator(self):
drivers = fake_drivers(5)
bvs = fake_bvs(5)
disc = abcgan.discriminate(drivers, bvs, bv_model='bv_gan')
self.assertEqual(disc.shape, (drivers.shape[0], bvs.shape[1]))
def test_stack_drivers(self):
with h5py.File(fname, 'r') as hf:
drivers = abcgan.stack_drivers(hf['Drivers'])
self.assertTrue(isinstance(drivers, np.ndarray))
self.assertEqual(drivers.shape, (nSamples, const.n_driver))
def test_stack_lidar_bvs(self):
with h5py.File(fname, 'r') as hf:
bvs = abcgan.stack_bvs(hf['BackgroundValues'], bv_type='lidar')
self.assertTrue(isinstance(bvs, np.ndarray))
self.assertEqual(bvs.shape,
(nSamples, const.max_alt_lidar, const.n_lidar_bv))
def test_stack_bvs_radar(self):
with h5py.File(fname, 'r') as hf:
bvs = abcgan.stack_bvs(hf['BackgroundValues'], bv_type='radar')
self.assertTrue(isinstance(bvs, np.ndarray))
self.assertEqual(bvs.shape,
(nSamples, const.max_alt, const.n_bv))
def test_load_h5_data_bv(self):
drivers, bvs, alt_mask, unix_time = load_h5_data(fname)
n_samples = len(drivers)
self.assertTrue(len(drivers) == len(bvs) == len(alt_mask) == len(unix_time))
self.assertEqual(drivers.shape, (n_samples, const.n_driver))
self.assertEqual(bvs.shape, (n_samples, const.max_alt, const.n_bv))
self.assertEqual(alt_mask.shape, (n_samples, const.max_alt))
self.assertEqual(unix_time.shape, (n_samples,))
n_samples = 500
drivers, bvs, alt_mask, unix_time = load_h5_data(fname, n_samples=n_samples)
self.assertTrue(len(drivers) == len(bvs) == len(alt_mask) == len(unix_time))
self.assertEqual(drivers.shape, (n_samples, const.n_driver))
self.assertEqual(bvs.shape, (n_samples, const.max_alt, const.n_bv))
self.assertEqual(alt_mask.shape, (n_samples, const.max_alt))
self.assertEqual(unix_time.shape, (n_samples,))
drivers, bvs, alt_mask, hfps, wave_mask, unix_time = load_h5_data(fname, load_hfp=True)
n_samples = len(drivers)
self.assertTrue(len(drivers) == len(bvs) == len(alt_mask) == len(unix_time) == len(hfps) == len(wave_mask))
self.assertEqual(drivers.shape, (n_samples, const.n_driver))
self.assertEqual(bvs.shape, (n_samples, const.max_alt, const.n_bv))
self.assertEqual(alt_mask.shape, (n_samples, const.max_alt))
self.assertEqual(unix_time.shape, (n_samples,))
n_samples = 500
drivers, bvs, alt_mask, hfps, wave_mask, unix_time = load_h5_data(fname, n_samples=n_samples, load_hfp=True)
self.assertTrue(len(drivers) == len(bvs) == len(alt_mask) == len(unix_time) == len(hfps) == len(wave_mask))
self.assertEqual(drivers.shape, (n_samples, const.n_driver))
self.assertEqual(bvs.shape, (n_samples, const.max_alt, const.n_bv))
self.assertEqual(alt_mask.shape, (n_samples, const.max_alt))
self.assertEqual(unix_time.shape, (n_samples,))
def test_bv_hellinger_scoring(self):
n_samples = 50
drivers, bvs, alt_mask, unix_time = load_h5_data(fname, n_samples=n_samples)
G_bvs = abcgan.generate(drivers, verbose=0)
dist = hellinger_scores_bv(bvs, G_bvs, alt_mask)
self.assertEqual(dist.shape, (const.max_alt, const.n_bv))
dist = hellinger_scores_bv(bvs, G_bvs, alt_mask, filter_length=0)
self.assertEqual(dist.shape, (const.max_alt, const.n_bv))
dist = hellinger_scores_bv(bvs, G_bvs, alt_mask, z_scale=False)
self.assertEqual(dist.shape, (const.max_alt, const.n_bv))
def test_hfp_hellinger_scoring(self):
n_samples = 50
drivers, bvs, alt_mask, hfps, wave_mask, unix_time = load_h5_data(fname, n_samples=n_samples, load_hfp=True)
G_bvs, G_hfps, G_bs = abcgan.generate(drivers, generate_hfps=True, verbose=0)
Gb_mask = (G_bs < np.random.uniform(size=len(G_bs)))[:, None]
dist = hellinger_scores_hfp(hfps, G_hfps, r_mask=wave_mask, f_mask=Gb_mask)
self.assertEqual(dist.shape, (const.n_waves, const.n_hfp))
dist = hellinger_scores_hfp(hfps, G_hfps, r_mask=wave_mask, f_mask=Gb_mask, filter_length=0)
self.assertEqual(dist.shape, (const.n_waves, const.n_hfp))
dist = hellinger_scores_hfp(hfps, G_hfps, r_mask=wave_mask, f_mask=Gb_mask, z_scale=False)
self.assertEqual(dist.shape, (const.n_waves, const.n_hfp))
def test_type(self):
with h5py.File(fname, 'r') as hf:
driver_dict = {k: v[()] for k, v in hf['Drivers'].items()}
bv_dict = {k: v[()] for k, v in
hf['BackgroundValues'].items()}
driver_dict[const.driver_names[0]] = None
with self.assertRaises(ValueError):
abcgan.stack_drivers(driver_dict)
bv_dict[const.bv_names[0]] = None
with self.assertRaises(ValueError):
abcgan.stack_bvs(bv_dict)
def test_missing_name(self):
with h5py.File(fname, 'r') as hf:
driver_dict = {k: v[()] for k, v in hf['Drivers'].items()}
bv_dict = {k: v[()] for k, v in
hf['BackgroundValues'].items()}
del driver_dict[const.driver_names[0]]
with self.assertRaises(KeyError):
abcgan.stack_drivers(driver_dict)
del bv_dict[const.bv_names[0]]
with self.assertRaises(KeyError):
abcgan.stack_bvs(bv_dict)
def test_wrong_shape(self):
with h5py.File(fname, 'r') as hf:
driver_dict = {k: v[()] for k, v in hf['Drivers'].items()}
bv_dict = {k: v[()] for k, v in
hf['BackgroundValues'].items()}
driver_dict[const.driver_names[0]] = \
driver_dict[const.driver_names[0]][:10]
with self.assertRaises(ValueError):
abcgan.stack_drivers(driver_dict)
bv_dict[const.bv_names[0]] = \
bv_dict[const.bv_names[0]][:5]
with self.assertRaises(ValueError):
abcgan.stack_bvs(bv_dict)
if __name__ == "__main__":
unittest.main(argv=['first-arg-is-ignored'], exit=False)
| 47.877323
| 116
| 0.641975
| 1,849
| 12,879
| 4.232558
| 0.078421
| 0.093918
| 0.03795
| 0.0414
| 0.849732
| 0.812292
| 0.783031
| 0.758881
| 0.712752
| 0.66113
| 0
| 0.014187
| 0.222843
| 12,879
| 268
| 117
| 48.05597
| 0.767709
| 0.003727
| 0
| 0.541485
| 0
| 0
| 0.017616
| 0
| 0
| 0
| 0
| 0
| 0.31441
| 1
| 0.122271
| false
| 0
| 0.034935
| 0.008734
| 0.179039
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e02041a471c06f31a6da300349ccb2a5355ee986
| 28
|
py
|
Python
|
contrib/python/numpy/numpy/linalg/_umath_linalg/__init__.py
|
HeyLey/catboost
|
f472aed90604ebe727537d9d4a37147985e10ec2
|
[
"Apache-2.0"
] | 1
|
2019-01-26T02:58:50.000Z
|
2019-01-26T02:58:50.000Z
|
contrib/python/numpy/numpy/linalg/_umath_linalg/__init__.py
|
HeyLey/catboost
|
f472aed90604ebe727537d9d4a37147985e10ec2
|
[
"Apache-2.0"
] | null | null | null |
contrib/python/numpy/numpy/linalg/_umath_linalg/__init__.py
|
HeyLey/catboost
|
f472aed90604ebe727537d9d4a37147985e10ec2
|
[
"Apache-2.0"
] | 1
|
2019-02-01T12:17:04.000Z
|
2019-02-01T12:17:04.000Z
|
from _umath_linalg import *
| 14
| 27
| 0.821429
| 4
| 28
| 5.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 28
| 1
| 28
| 28
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
1649b17530ad621388fd339f09c04e9a32beb13d
| 106,630
|
py
|
Python
|
tests/test_ecs/test_ecs_boto3.py
|
Preskton/moto
|
c9c30b82867294030833cef292db167955bc8240
|
[
"Apache-2.0"
] | 1
|
2021-04-06T12:48:00.000Z
|
2021-04-06T12:48:00.000Z
|
tests/test_ecs/test_ecs_boto3.py
|
Preskton/moto
|
c9c30b82867294030833cef292db167955bc8240
|
[
"Apache-2.0"
] | null | null | null |
tests/test_ecs/test_ecs_boto3.py
|
Preskton/moto
|
c9c30b82867294030833cef292db167955bc8240
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import unicode_literals
from datetime import datetime
from botocore.exceptions import ClientError
import boto3
import sure # noqa
import json
from moto.ec2 import utils as ec2_utils
from uuid import UUID
from moto import mock_ecs
from moto import mock_ec2
from moto.ecs.exceptions import (
ClusterNotFoundException,
ServiceNotFoundException,
InvalidParameterException,
TaskDefinitionNotFoundException,
RevisionNotFoundException,
)
import pytest
@mock_ecs
def test_create_cluster():
client = boto3.client("ecs", region_name="us-east-1")
response = client.create_cluster(clusterName="test_ecs_cluster")
response["cluster"]["clusterName"].should.equal("test_ecs_cluster")
response["cluster"]["clusterArn"].should.equal(
"arn:aws:ecs:us-east-1:012345678910:cluster/test_ecs_cluster"
)
response["cluster"]["status"].should.equal("ACTIVE")
response["cluster"]["registeredContainerInstancesCount"].should.equal(0)
response["cluster"]["runningTasksCount"].should.equal(0)
response["cluster"]["pendingTasksCount"].should.equal(0)
response["cluster"]["activeServicesCount"].should.equal(0)
@mock_ecs
def test_list_clusters():
client = boto3.client("ecs", region_name="us-east-2")
_ = client.create_cluster(clusterName="test_cluster0")
_ = client.create_cluster(clusterName="test_cluster1")
response = client.list_clusters()
response["clusterArns"].should.contain(
"arn:aws:ecs:us-east-2:012345678910:cluster/test_cluster0"
)
response["clusterArns"].should.contain(
"arn:aws:ecs:us-east-2:012345678910:cluster/test_cluster1"
)
@mock_ecs
def test_describe_clusters():
client = boto3.client("ecs", region_name="us-east-1")
response = client.describe_clusters(clusters=["some-cluster"])
response["failures"].should.contain(
{
"arn": "arn:aws:ecs:us-east-1:012345678910:cluster/some-cluster",
"reason": "MISSING",
}
)
@mock_ecs
def test_delete_cluster():
client = boto3.client("ecs", region_name="us-east-1")
_ = client.create_cluster(clusterName="test_ecs_cluster")
response = client.delete_cluster(cluster="test_ecs_cluster")
response["cluster"]["clusterName"].should.equal("test_ecs_cluster")
response["cluster"]["clusterArn"].should.equal(
"arn:aws:ecs:us-east-1:012345678910:cluster/test_ecs_cluster"
)
response["cluster"]["status"].should.equal("ACTIVE")
response["cluster"]["registeredContainerInstancesCount"].should.equal(0)
response["cluster"]["runningTasksCount"].should.equal(0)
response["cluster"]["pendingTasksCount"].should.equal(0)
response["cluster"]["activeServicesCount"].should.equal(0)
response = client.list_clusters()
len(response["clusterArns"]).should.equal(0)
@mock_ecs
def test_delete_cluster_exceptions():
client = boto3.client("ecs", region_name="us-east-1")
client.delete_cluster.when.called_with(cluster="not_a_cluster").should.throw(
ClientError, ClusterNotFoundException().message
)
@mock_ecs
def test_register_task_definition():
client = boto3.client("ecs", region_name="us-east-1")
response = client.register_task_definition(
family="test_ecs_task",
containerDefinitions=[
{
"name": "hello_world",
"image": "docker/hello-world:latest",
"cpu": 1024,
"memory": 400,
"essential": True,
"environment": [
{"name": "AWS_ACCESS_KEY_ID", "value": "SOME_ACCESS_KEY"}
],
"logConfiguration": {"logDriver": "json-file"},
}
],
networkMode="bridge",
tags=[
{"key": "createdBy", "value": "moto-unittest"},
{"key": "foo", "value": "bar"},
],
)
type(response["taskDefinition"]).should.be(dict)
response["taskDefinition"]["revision"].should.equal(1)
response["taskDefinition"]["taskDefinitionArn"].should.equal(
"arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:1"
)
response["taskDefinition"]["containerDefinitions"][0]["name"].should.equal(
"hello_world"
)
response["taskDefinition"]["containerDefinitions"][0]["image"].should.equal(
"docker/hello-world:latest"
)
response["taskDefinition"]["containerDefinitions"][0]["cpu"].should.equal(1024)
response["taskDefinition"]["containerDefinitions"][0]["memory"].should.equal(400)
response["taskDefinition"]["containerDefinitions"][0]["essential"].should.equal(
True
)
response["taskDefinition"]["containerDefinitions"][0]["environment"][0][
"name"
].should.equal("AWS_ACCESS_KEY_ID")
response["taskDefinition"]["containerDefinitions"][0]["environment"][0][
"value"
].should.equal("SOME_ACCESS_KEY")
response["taskDefinition"]["containerDefinitions"][0]["logConfiguration"][
"logDriver"
].should.equal("json-file")
response["taskDefinition"]["networkMode"].should.equal("bridge")
@mock_ecs
def test_list_task_definitions():
client = boto3.client("ecs", region_name="us-east-1")
_ = client.register_task_definition(
family="test_ecs_task",
containerDefinitions=[
{
"name": "hello_world",
"image": "docker/hello-world:latest",
"cpu": 1024,
"memory": 400,
"essential": True,
"environment": [
{"name": "AWS_ACCESS_KEY_ID", "value": "SOME_ACCESS_KEY"}
],
"logConfiguration": {"logDriver": "json-file"},
}
],
)
_ = client.register_task_definition(
family="test_ecs_task",
containerDefinitions=[
{
"name": "hello_world2",
"image": "docker/hello-world2:latest",
"cpu": 1024,
"memory": 400,
"essential": True,
"environment": [
{"name": "AWS_ACCESS_KEY_ID", "value": "SOME_ACCESS_KEY2"}
],
"logConfiguration": {"logDriver": "json-file"},
}
],
)
response = client.list_task_definitions()
len(response["taskDefinitionArns"]).should.equal(2)
response["taskDefinitionArns"][0].should.equal(
"arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:1"
)
response["taskDefinitionArns"][1].should.equal(
"arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:2"
)
@mock_ecs
def test_list_task_definitions_with_family_prefix():
client = boto3.client("ecs", region_name="us-east-1")
_ = client.register_task_definition(
family="test_ecs_task_a",
containerDefinitions=[
{
"name": "hello_world",
"image": "docker/hello-world:latest",
"cpu": 1024,
"memory": 400,
"essential": True,
"environment": [
{"name": "AWS_ACCESS_KEY_ID", "value": "SOME_ACCESS_KEY"}
],
"logConfiguration": {"logDriver": "json-file"},
}
],
)
_ = client.register_task_definition(
family="test_ecs_task_a",
containerDefinitions=[
{
"name": "hello_world",
"image": "docker/hello-world:latest",
"cpu": 1024,
"memory": 400,
"essential": True,
"environment": [
{"name": "AWS_ACCESS_KEY_ID", "value": "SOME_ACCESS_KEY"}
],
"logConfiguration": {"logDriver": "json-file"},
}
],
)
_ = client.register_task_definition(
family="test_ecs_task_b",
containerDefinitions=[
{
"name": "hello_world2",
"image": "docker/hello-world2:latest",
"cpu": 1024,
"memory": 400,
"essential": True,
"environment": [
{"name": "AWS_ACCESS_KEY_ID", "value": "SOME_ACCESS_KEY2"}
],
"logConfiguration": {"logDriver": "json-file"},
}
],
)
empty_response = client.list_task_definitions(familyPrefix="test_ecs_task")
len(empty_response["taskDefinitionArns"]).should.equal(0)
filtered_response = client.list_task_definitions(familyPrefix="test_ecs_task_a")
len(filtered_response["taskDefinitionArns"]).should.equal(2)
filtered_response["taskDefinitionArns"][0].should.equal(
"arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task_a:1"
)
filtered_response["taskDefinitionArns"][1].should.equal(
"arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task_a:2"
)
@mock_ecs
def test_describe_task_definition():
client = boto3.client("ecs", region_name="us-east-1")
_ = client.register_task_definition(
family="test_ecs_task",
containerDefinitions=[
{
"name": "hello_world",
"image": "docker/hello-world:latest",
"cpu": 1024,
"memory": 400,
"essential": True,
"environment": [
{"name": "AWS_ACCESS_KEY_ID", "value": "SOME_ACCESS_KEY"}
],
"logConfiguration": {"logDriver": "json-file"},
}
],
tags=[{"key": "Name", "value": "test_ecs_task"}],
)
_ = client.register_task_definition(
family="test_ecs_task",
containerDefinitions=[
{
"name": "hello_world2",
"image": "docker/hello-world2:latest",
"cpu": 1024,
"memory": 400,
"essential": True,
"environment": [
{"name": "AWS_ACCESS_KEY_ID", "value": "SOME_ACCESS_KEY2"}
],
"logConfiguration": {"logDriver": "json-file"},
}
],
)
_ = client.register_task_definition(
family="test_ecs_task",
containerDefinitions=[
{
"name": "hello_world3",
"image": "docker/hello-world3:latest",
"cpu": 1024,
"memory": 400,
"essential": True,
"environment": [
{"name": "AWS_ACCESS_KEY_ID", "value": "SOME_ACCESS_KEY3"}
],
"logConfiguration": {"logDriver": "json-file"},
}
],
)
response = client.describe_task_definition(taskDefinition="test_ecs_task")
response["taskDefinition"]["taskDefinitionArn"].should.equal(
"arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:3"
)
response = client.describe_task_definition(taskDefinition="test_ecs_task:2")
response["taskDefinition"]["taskDefinitionArn"].should.equal(
"arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:2"
)
response = client.describe_task_definition(
taskDefinition="test_ecs_task:1", include=["TAGS"]
)
response["tags"].should.equal([{"key": "Name", "value": "test_ecs_task"}])
@mock_ecs
def test_deregister_task_definition():
client = boto3.client("ecs", region_name="us-east-1")
_ = client.register_task_definition(
family="test_ecs_task",
containerDefinitions=[
{
"name": "hello_world",
"image": "docker/hello-world:latest",
"cpu": 1024,
"memory": 400,
"essential": True,
"environment": [
{"name": "AWS_ACCESS_KEY_ID", "value": "SOME_ACCESS_KEY"}
],
"logConfiguration": {"logDriver": "json-file"},
}
],
)
response = client.deregister_task_definition(taskDefinition="test_ecs_task:1")
type(response["taskDefinition"]).should.be(dict)
response["taskDefinition"]["taskDefinitionArn"].should.equal(
"arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:1"
)
response["taskDefinition"]["containerDefinitions"][0]["name"].should.equal(
"hello_world"
)
response["taskDefinition"]["containerDefinitions"][0]["image"].should.equal(
"docker/hello-world:latest"
)
response["taskDefinition"]["containerDefinitions"][0]["cpu"].should.equal(1024)
response["taskDefinition"]["containerDefinitions"][0]["memory"].should.equal(400)
response["taskDefinition"]["containerDefinitions"][0]["essential"].should.equal(
True
)
response["taskDefinition"]["containerDefinitions"][0]["environment"][0][
"name"
].should.equal("AWS_ACCESS_KEY_ID")
response["taskDefinition"]["containerDefinitions"][0]["environment"][0][
"value"
].should.equal("SOME_ACCESS_KEY")
response["taskDefinition"]["containerDefinitions"][0]["logConfiguration"][
"logDriver"
].should.equal("json-file")
@mock_ecs
def test_deregister_task_definition():
client = boto3.client("ecs", region_name="us-east-1")
client.deregister_task_definition.when.called_with(
taskDefinition="fake_task"
).should.throw(ClientError, RevisionNotFoundException().message)
client.deregister_task_definition.when.called_with(
taskDefinition="fake_task:foo"
).should.throw(
ClientError,
InvalidParameterException("Invalid revision number. Number: foo").message,
)
client.deregister_task_definition.when.called_with(
taskDefinition="fake_task:1"
).should.throw(ClientError, TaskDefinitionNotFoundException().message)
@mock_ecs
def test_create_service():
client = boto3.client("ecs", region_name="us-east-1")
_ = client.create_cluster(clusterName="test_ecs_cluster")
_ = client.register_task_definition(
family="test_ecs_task",
containerDefinitions=[
{
"name": "hello_world",
"image": "docker/hello-world:latest",
"cpu": 1024,
"memory": 400,
"essential": True,
"environment": [
{"name": "AWS_ACCESS_KEY_ID", "value": "SOME_ACCESS_KEY"}
],
"logConfiguration": {"logDriver": "json-file"},
}
],
)
response = client.create_service(
cluster="test_ecs_cluster",
serviceName="test_ecs_service",
taskDefinition="test_ecs_task",
desiredCount=2,
)
response["service"]["clusterArn"].should.equal(
"arn:aws:ecs:us-east-1:012345678910:cluster/test_ecs_cluster"
)
response["service"]["desiredCount"].should.equal(2)
len(response["service"]["events"]).should.equal(0)
len(response["service"]["loadBalancers"]).should.equal(0)
response["service"]["pendingCount"].should.equal(0)
response["service"]["runningCount"].should.equal(0)
response["service"]["serviceArn"].should.equal(
"arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service"
)
response["service"]["serviceName"].should.equal("test_ecs_service")
response["service"]["status"].should.equal("ACTIVE")
response["service"]["taskDefinition"].should.equal(
"arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:1"
)
response["service"]["schedulingStrategy"].should.equal("REPLICA")
@mock_ecs
def test_create_service_scheduling_strategy():
client = boto3.client("ecs", region_name="us-east-1")
_ = client.create_cluster(clusterName="test_ecs_cluster")
_ = client.register_task_definition(
family="test_ecs_task",
containerDefinitions=[
{
"name": "hello_world",
"image": "docker/hello-world:latest",
"cpu": 1024,
"memory": 400,
"essential": True,
"environment": [
{"name": "AWS_ACCESS_KEY_ID", "value": "SOME_ACCESS_KEY"}
],
"logConfiguration": {"logDriver": "json-file"},
}
],
)
response = client.create_service(
cluster="test_ecs_cluster",
serviceName="test_ecs_service",
taskDefinition="test_ecs_task",
desiredCount=2,
schedulingStrategy="DAEMON",
)
response["service"]["clusterArn"].should.equal(
"arn:aws:ecs:us-east-1:012345678910:cluster/test_ecs_cluster"
)
response["service"]["desiredCount"].should.equal(2)
len(response["service"]["events"]).should.equal(0)
len(response["service"]["loadBalancers"]).should.equal(0)
response["service"]["pendingCount"].should.equal(0)
response["service"]["runningCount"].should.equal(0)
response["service"]["serviceArn"].should.equal(
"arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service"
)
response["service"]["serviceName"].should.equal("test_ecs_service")
response["service"]["status"].should.equal("ACTIVE")
response["service"]["taskDefinition"].should.equal(
"arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:1"
)
response["service"]["schedulingStrategy"].should.equal("DAEMON")
@mock_ecs
def test_list_services():
client = boto3.client("ecs", region_name="us-east-1")
_ = client.create_cluster(clusterName="test_ecs_cluster")
_ = client.register_task_definition(
family="test_ecs_task",
containerDefinitions=[
{
"name": "hello_world",
"image": "docker/hello-world:latest",
"cpu": 1024,
"memory": 400,
"essential": True,
"environment": [
{"name": "AWS_ACCESS_KEY_ID", "value": "SOME_ACCESS_KEY"}
],
"logConfiguration": {"logDriver": "json-file"},
}
],
)
_ = client.create_service(
cluster="test_ecs_cluster",
serviceName="test_ecs_service1",
taskDefinition="test_ecs_task",
schedulingStrategy="REPLICA",
desiredCount=2,
)
_ = client.create_service(
cluster="test_ecs_cluster",
serviceName="test_ecs_service2",
taskDefinition="test_ecs_task",
schedulingStrategy="DAEMON",
desiredCount=2,
)
unfiltered_response = client.list_services(cluster="test_ecs_cluster")
len(unfiltered_response["serviceArns"]).should.equal(2)
unfiltered_response["serviceArns"][0].should.equal(
"arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service1"
)
unfiltered_response["serviceArns"][1].should.equal(
"arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service2"
)
filtered_response = client.list_services(
cluster="test_ecs_cluster", schedulingStrategy="REPLICA"
)
len(filtered_response["serviceArns"]).should.equal(1)
filtered_response["serviceArns"][0].should.equal(
"arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service1"
)
@mock_ecs
def test_describe_services():
client = boto3.client("ecs", region_name="us-east-1")
_ = client.create_cluster(clusterName="test_ecs_cluster")
_ = client.register_task_definition(
family="test_ecs_task",
containerDefinitions=[
{
"name": "hello_world",
"image": "docker/hello-world:latest",
"cpu": 1024,
"memory": 400,
"essential": True,
"environment": [
{"name": "AWS_ACCESS_KEY_ID", "value": "SOME_ACCESS_KEY"}
],
"logConfiguration": {"logDriver": "json-file"},
}
],
)
_ = client.create_service(
cluster="test_ecs_cluster",
serviceName="test_ecs_service1",
taskDefinition="test_ecs_task",
desiredCount=2,
tags=[{"key": "Name", "value": "test_ecs_service1"}],
)
_ = client.create_service(
cluster="test_ecs_cluster",
serviceName="test_ecs_service2",
taskDefinition="test_ecs_task",
desiredCount=2,
)
_ = client.create_service(
cluster="test_ecs_cluster",
serviceName="test_ecs_service3",
taskDefinition="test_ecs_task",
desiredCount=2,
)
response = client.describe_services(
cluster="test_ecs_cluster",
services=[
"test_ecs_service1",
"arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service2",
],
)
len(response["services"]).should.equal(2)
response["services"][0]["serviceArn"].should.equal(
"arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service1"
)
response["services"][0]["serviceName"].should.equal("test_ecs_service1")
response["services"][1]["serviceArn"].should.equal(
"arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service2"
)
response["services"][1]["serviceName"].should.equal("test_ecs_service2")
response["services"][0]["deployments"][0]["desiredCount"].should.equal(2)
response["services"][0]["deployments"][0]["pendingCount"].should.equal(2)
response["services"][0]["deployments"][0]["runningCount"].should.equal(0)
response["services"][0]["deployments"][0]["status"].should.equal("PRIMARY")
(
datetime.now()
- response["services"][0]["deployments"][0]["createdAt"].replace(tzinfo=None)
).seconds.should.be.within(0, 10)
(
datetime.now()
- response["services"][0]["deployments"][0]["updatedAt"].replace(tzinfo=None)
).seconds.should.be.within(0, 10)
response = client.describe_services(
cluster="test_ecs_cluster",
services=[
"test_ecs_service1",
"arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service2",
],
include=["TAGS"],
)
response["services"][0]["tags"].should.equal(
[{"key": "Name", "value": "test_ecs_service1"}]
)
response["services"][1]["tags"].should.equal([])
@mock_ecs
def test_describe_services_scheduling_strategy():
client = boto3.client("ecs", region_name="us-east-1")
_ = client.create_cluster(clusterName="test_ecs_cluster")
_ = client.register_task_definition(
family="test_ecs_task",
containerDefinitions=[
{
"name": "hello_world",
"image": "docker/hello-world:latest",
"cpu": 1024,
"memory": 400,
"essential": True,
"environment": [
{"name": "AWS_ACCESS_KEY_ID", "value": "SOME_ACCESS_KEY"}
],
"logConfiguration": {"logDriver": "json-file"},
}
],
)
_ = client.create_service(
cluster="test_ecs_cluster",
serviceName="test_ecs_service1",
taskDefinition="test_ecs_task",
desiredCount=2,
)
_ = client.create_service(
cluster="test_ecs_cluster",
serviceName="test_ecs_service2",
taskDefinition="test_ecs_task",
desiredCount=2,
schedulingStrategy="DAEMON",
)
_ = client.create_service(
cluster="test_ecs_cluster",
serviceName="test_ecs_service3",
taskDefinition="test_ecs_task",
desiredCount=2,
)
response = client.describe_services(
cluster="test_ecs_cluster",
services=[
"test_ecs_service1",
"arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service2",
"test_ecs_service3",
],
)
len(response["services"]).should.equal(3)
response["services"][0]["serviceArn"].should.equal(
"arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service1"
)
response["services"][0]["serviceName"].should.equal("test_ecs_service1")
response["services"][1]["serviceArn"].should.equal(
"arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service2"
)
response["services"][1]["serviceName"].should.equal("test_ecs_service2")
response["services"][0]["deployments"][0]["desiredCount"].should.equal(2)
response["services"][0]["deployments"][0]["pendingCount"].should.equal(2)
response["services"][0]["deployments"][0]["runningCount"].should.equal(0)
response["services"][0]["deployments"][0]["status"].should.equal("PRIMARY")
response["services"][0]["schedulingStrategy"].should.equal("REPLICA")
response["services"][1]["schedulingStrategy"].should.equal("DAEMON")
response["services"][2]["schedulingStrategy"].should.equal("REPLICA")
@mock_ecs
def test_update_service():
client = boto3.client("ecs", region_name="us-east-1")
_ = client.create_cluster(clusterName="test_ecs_cluster")
_ = client.register_task_definition(
family="test_ecs_task",
containerDefinitions=[
{
"name": "hello_world",
"image": "docker/hello-world:latest",
"cpu": 1024,
"memory": 400,
"essential": True,
"environment": [
{"name": "AWS_ACCESS_KEY_ID", "value": "SOME_ACCESS_KEY"}
],
"logConfiguration": {"logDriver": "json-file"},
}
],
)
response = client.create_service(
cluster="test_ecs_cluster",
serviceName="test_ecs_service",
taskDefinition="test_ecs_task",
desiredCount=2,
)
response["service"]["desiredCount"].should.equal(2)
response = client.update_service(
cluster="test_ecs_cluster",
service="test_ecs_service",
taskDefinition="test_ecs_task",
desiredCount=0,
)
response["service"]["desiredCount"].should.equal(0)
response["service"]["schedulingStrategy"].should.equal("REPLICA")
# Verify we can pass the ARNs of the cluster and service
response = client.update_service(
cluster=response["service"]["clusterArn"],
service=response["service"]["serviceArn"],
taskDefinition="test_ecs_task",
desiredCount=1,
)
response["service"]["desiredCount"].should.equal(1)
@mock_ecs
def test_update_missing_service():
client = boto3.client("ecs", region_name="us-east-1")
_ = client.create_cluster(clusterName="test_ecs_cluster")
client.update_service.when.called_with(
cluster="test_ecs_cluster",
service="test_ecs_service",
taskDefinition="test_ecs_task",
desiredCount=0,
).should.throw(ClientError)
@mock_ecs
def test_delete_service():
client = boto3.client("ecs", region_name="us-east-1")
_ = client.create_cluster(clusterName="test_ecs_cluster")
_ = client.register_task_definition(
family="test_ecs_task",
containerDefinitions=[
{
"name": "hello_world",
"image": "docker/hello-world:latest",
"cpu": 1024,
"memory": 400,
"essential": True,
"environment": [
{"name": "AWS_ACCESS_KEY_ID", "value": "SOME_ACCESS_KEY"}
],
"logConfiguration": {"logDriver": "json-file"},
}
],
)
_ = client.create_service(
cluster="test_ecs_cluster",
serviceName="test_ecs_service",
taskDefinition="test_ecs_task",
desiredCount=2,
)
_ = client.update_service(
cluster="test_ecs_cluster", service="test_ecs_service", desiredCount=0
)
response = client.delete_service(
cluster="test_ecs_cluster", service="test_ecs_service"
)
response["service"]["clusterArn"].should.equal(
"arn:aws:ecs:us-east-1:012345678910:cluster/test_ecs_cluster"
)
response["service"]["desiredCount"].should.equal(0)
len(response["service"]["events"]).should.equal(0)
len(response["service"]["loadBalancers"]).should.equal(0)
response["service"]["pendingCount"].should.equal(0)
response["service"]["runningCount"].should.equal(0)
response["service"]["serviceArn"].should.equal(
"arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service"
)
response["service"]["serviceName"].should.equal("test_ecs_service")
response["service"]["status"].should.equal("ACTIVE")
response["service"]["schedulingStrategy"].should.equal("REPLICA")
response["service"]["taskDefinition"].should.equal(
"arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:1"
)
@mock_ecs
def test_delete_service_exceptions():
client = boto3.client("ecs", region_name="us-east-1")
# Raises ClusterNotFoundException because "default" is not a cluster
client.delete_service.when.called_with(service="not_as_service").should.throw(
ClientError, ClusterNotFoundException().message
)
_ = client.create_cluster()
client.delete_service.when.called_with(service="not_as_service").should.throw(
ClientError, ServiceNotFoundException().message
)
_ = client.register_task_definition(
family="test_ecs_task",
containerDefinitions=[
{
"name": "hello_world",
"image": "docker/hello-world:latest",
"cpu": 1024,
"memory": 400,
}
],
)
_ = client.create_service(
serviceName="test_ecs_service", taskDefinition="test_ecs_task", desiredCount=1,
)
client.delete_service.when.called_with(service="test_ecs_service").should.throw(
ClientError,
InvalidParameterException(
"The service cannot be stopped while it is scaled above 0."
).message,
)
@mock_ecs
def test_update_service_exceptions():
client = boto3.client("ecs", region_name="us-east-1")
client.update_service.when.called_with(
service="not_a_service", desiredCount=0
).should.throw(ClientError, ClusterNotFoundException().message)
_ = client.create_cluster()
client.update_service.when.called_with(
service="not_a_service", desiredCount=0
).should.throw(ClientError, ServiceNotFoundException().message)
@mock_ec2
@mock_ecs
def test_register_container_instance():
ecs_client = boto3.client("ecs", region_name="us-east-1")
ec2 = boto3.resource("ec2", region_name="us-east-1")
test_cluster_name = "test_ecs_cluster"
_ = ecs_client.create_cluster(clusterName=test_cluster_name)
test_instance = ec2.create_instances(
ImageId="ami-1234abcd", MinCount=1, MaxCount=1
)[0]
instance_id_document = json.dumps(
ec2_utils.generate_instance_identity_document(test_instance)
)
response = ecs_client.register_container_instance(
cluster=test_cluster_name, instanceIdentityDocument=instance_id_document
)
response["containerInstance"]["ec2InstanceId"].should.equal(test_instance.id)
full_arn = response["containerInstance"]["containerInstanceArn"]
arn_part = full_arn.split("/")
arn_part[0].should.equal("arn:aws:ecs:us-east-1:012345678910:container-instance")
arn_part[1].should.equal(str(UUID(arn_part[1])))
response["containerInstance"]["status"].should.equal("ACTIVE")
len(response["containerInstance"]["registeredResources"]).should.equal(4)
len(response["containerInstance"]["remainingResources"]).should.equal(4)
response["containerInstance"]["agentConnected"].should.equal(True)
response["containerInstance"]["versionInfo"]["agentVersion"].should.equal("1.0.0")
response["containerInstance"]["versionInfo"]["agentHash"].should.equal("4023248")
response["containerInstance"]["versionInfo"]["dockerVersion"].should.equal(
"DockerVersion: 1.5.0"
)
@mock_ec2
@mock_ecs
def test_deregister_container_instance():
ecs_client = boto3.client("ecs", region_name="us-east-1")
ec2 = boto3.resource("ec2", region_name="us-east-1")
test_cluster_name = "test_ecs_cluster"
_ = ecs_client.create_cluster(clusterName=test_cluster_name)
test_instance = ec2.create_instances(
ImageId="ami-1234abcd", MinCount=1, MaxCount=1
)[0]
instance_id_document = json.dumps(
ec2_utils.generate_instance_identity_document(test_instance)
)
response = ecs_client.register_container_instance(
cluster=test_cluster_name, instanceIdentityDocument=instance_id_document
)
container_instance_id = response["containerInstance"]["containerInstanceArn"]
response = ecs_client.deregister_container_instance(
cluster=test_cluster_name, containerInstance=container_instance_id
)
container_instances_response = ecs_client.list_container_instances(
cluster=test_cluster_name
)
len(container_instances_response["containerInstanceArns"]).should.equal(0)
response = ecs_client.register_container_instance(
cluster=test_cluster_name, instanceIdentityDocument=instance_id_document
)
container_instance_id = response["containerInstance"]["containerInstanceArn"]
_ = ecs_client.register_task_definition(
family="test_ecs_task",
containerDefinitions=[
{
"name": "hello_world",
"image": "docker/hello-world:latest",
"cpu": 1024,
"memory": 400,
"essential": True,
"environment": [
{"name": "AWS_ACCESS_KEY_ID", "value": "SOME_ACCESS_KEY"}
],
"logConfiguration": {"logDriver": "json-file"},
}
],
)
response = ecs_client.start_task(
cluster="test_ecs_cluster",
taskDefinition="test_ecs_task",
overrides={},
containerInstances=[container_instance_id],
startedBy="moto",
)
with pytest.raises(Exception) as e:
ecs_client.deregister_container_instance(
cluster=test_cluster_name, containerInstance=container_instance_id
).should.have.raised(Exception)
container_instances_response = ecs_client.list_container_instances(
cluster=test_cluster_name
)
len(container_instances_response["containerInstanceArns"]).should.equal(1)
ecs_client.deregister_container_instance(
cluster=test_cluster_name, containerInstance=container_instance_id, force=True
)
container_instances_response = ecs_client.list_container_instances(
cluster=test_cluster_name
)
len(container_instances_response["containerInstanceArns"]).should.equal(0)
@mock_ec2
@mock_ecs
def test_list_container_instances():
ecs_client = boto3.client("ecs", region_name="us-east-1")
ec2 = boto3.resource("ec2", region_name="us-east-1")
test_cluster_name = "test_ecs_cluster"
_ = ecs_client.create_cluster(clusterName=test_cluster_name)
instance_to_create = 3
test_instance_arns = []
for i in range(0, instance_to_create):
test_instance = ec2.create_instances(
ImageId="ami-1234abcd", MinCount=1, MaxCount=1
)[0]
instance_id_document = json.dumps(
ec2_utils.generate_instance_identity_document(test_instance)
)
response = ecs_client.register_container_instance(
cluster=test_cluster_name, instanceIdentityDocument=instance_id_document
)
test_instance_arns.append(response["containerInstance"]["containerInstanceArn"])
response = ecs_client.list_container_instances(cluster=test_cluster_name)
len(response["containerInstanceArns"]).should.equal(instance_to_create)
for arn in test_instance_arns:
response["containerInstanceArns"].should.contain(arn)
@mock_ec2
@mock_ecs
def test_describe_container_instances():
ecs_client = boto3.client("ecs", region_name="us-east-1")
ec2 = boto3.resource("ec2", region_name="us-east-1")
test_cluster_name = "test_ecs_cluster"
_ = ecs_client.create_cluster(clusterName=test_cluster_name)
instance_to_create = 3
test_instance_arns = []
for i in range(0, instance_to_create):
test_instance = ec2.create_instances(
ImageId="ami-1234abcd", MinCount=1, MaxCount=1
)[0]
instance_id_document = json.dumps(
ec2_utils.generate_instance_identity_document(test_instance)
)
response = ecs_client.register_container_instance(
cluster=test_cluster_name, instanceIdentityDocument=instance_id_document
)
test_instance_arns.append(response["containerInstance"]["containerInstanceArn"])
test_instance_ids = list(map((lambda x: x.split("/")[1]), test_instance_arns))
response = ecs_client.describe_container_instances(
cluster=test_cluster_name, containerInstances=test_instance_ids
)
len(response["failures"]).should.equal(0)
len(response["containerInstances"]).should.equal(instance_to_create)
response_arns = [
ci["containerInstanceArn"] for ci in response["containerInstances"]
]
for arn in test_instance_arns:
response_arns.should.contain(arn)
for instance in response["containerInstances"]:
instance.keys().should.contain("runningTasksCount")
instance.keys().should.contain("pendingTasksCount")
instance["registeredAt"].should.be.a("datetime.datetime")
with pytest.raises(ClientError) as e:
ecs_client.describe_container_instances(
cluster=test_cluster_name, containerInstances=[]
)
@mock_ecs
def test_describe_container_instances_exceptions():
client = boto3.client("ecs", region_name="us-east-1")
client.describe_container_instances.when.called_with(
containerInstances=[]
).should.throw(ClientError, ClusterNotFoundException().message)
_ = client.create_cluster()
client.describe_container_instances.when.called_with(
containerInstances=[]
).should.throw(
ClientError,
InvalidParameterException("Container Instances cannot be empty.").message,
)
@mock_ec2
@mock_ecs
def test_update_container_instances_state():
ecs_client = boto3.client("ecs", region_name="us-east-1")
ec2 = boto3.resource("ec2", region_name="us-east-1")
test_cluster_name = "test_ecs_cluster"
_ = ecs_client.create_cluster(clusterName=test_cluster_name)
instance_to_create = 3
test_instance_arns = []
for i in range(0, instance_to_create):
test_instance = ec2.create_instances(
ImageId="ami-1234abcd", MinCount=1, MaxCount=1
)[0]
instance_id_document = json.dumps(
ec2_utils.generate_instance_identity_document(test_instance)
)
response = ecs_client.register_container_instance(
cluster=test_cluster_name, instanceIdentityDocument=instance_id_document
)
test_instance_arns.append(response["containerInstance"]["containerInstanceArn"])
test_instance_ids = list(map((lambda x: x.split("/")[1]), test_instance_arns))
response = ecs_client.update_container_instances_state(
cluster=test_cluster_name,
containerInstances=test_instance_ids,
status="DRAINING",
)
len(response["failures"]).should.equal(0)
len(response["containerInstances"]).should.equal(instance_to_create)
response_statuses = [ci["status"] for ci in response["containerInstances"]]
for status in response_statuses:
status.should.equal("DRAINING")
response = ecs_client.update_container_instances_state(
cluster=test_cluster_name,
containerInstances=test_instance_ids,
status="DRAINING",
)
len(response["failures"]).should.equal(0)
len(response["containerInstances"]).should.equal(instance_to_create)
response_statuses = [ci["status"] for ci in response["containerInstances"]]
for status in response_statuses:
status.should.equal("DRAINING")
response = ecs_client.update_container_instances_state(
cluster=test_cluster_name, containerInstances=test_instance_ids, status="ACTIVE"
)
len(response["failures"]).should.equal(0)
len(response["containerInstances"]).should.equal(instance_to_create)
response_statuses = [ci["status"] for ci in response["containerInstances"]]
for status in response_statuses:
status.should.equal("ACTIVE")
ecs_client.update_container_instances_state.when.called_with(
cluster=test_cluster_name,
containerInstances=test_instance_ids,
status="test_status",
).should.throw(Exception)
@mock_ec2
@mock_ecs
def test_update_container_instances_state_by_arn():
ecs_client = boto3.client("ecs", region_name="us-east-1")
ec2 = boto3.resource("ec2", region_name="us-east-1")
test_cluster_name = "test_ecs_cluster"
_ = ecs_client.create_cluster(clusterName=test_cluster_name)
instance_to_create = 3
test_instance_arns = []
for i in range(0, instance_to_create):
test_instance = ec2.create_instances(
ImageId="ami-1234abcd", MinCount=1, MaxCount=1
)[0]
instance_id_document = json.dumps(
ec2_utils.generate_instance_identity_document(test_instance)
)
response = ecs_client.register_container_instance(
cluster=test_cluster_name, instanceIdentityDocument=instance_id_document
)
test_instance_arns.append(response["containerInstance"]["containerInstanceArn"])
response = ecs_client.update_container_instances_state(
cluster=test_cluster_name,
containerInstances=test_instance_arns,
status="DRAINING",
)
len(response["failures"]).should.equal(0)
len(response["containerInstances"]).should.equal(instance_to_create)
response_statuses = [ci["status"] for ci in response["containerInstances"]]
for status in response_statuses:
status.should.equal("DRAINING")
response = ecs_client.update_container_instances_state(
cluster=test_cluster_name,
containerInstances=test_instance_arns,
status="DRAINING",
)
len(response["failures"]).should.equal(0)
len(response["containerInstances"]).should.equal(instance_to_create)
response_statuses = [ci["status"] for ci in response["containerInstances"]]
for status in response_statuses:
status.should.equal("DRAINING")
response = ecs_client.update_container_instances_state(
cluster=test_cluster_name,
containerInstances=test_instance_arns,
status="ACTIVE",
)
len(response["failures"]).should.equal(0)
len(response["containerInstances"]).should.equal(instance_to_create)
response_statuses = [ci["status"] for ci in response["containerInstances"]]
for status in response_statuses:
status.should.equal("ACTIVE")
ecs_client.update_container_instances_state.when.called_with(
cluster=test_cluster_name,
containerInstances=test_instance_arns,
status="test_status",
).should.throw(Exception)
@mock_ec2
@mock_ecs
def test_run_task():
client = boto3.client("ecs", region_name="us-east-1")
ec2 = boto3.resource("ec2", region_name="us-east-1")
test_cluster_name = "test_ecs_cluster"
_ = client.create_cluster(clusterName=test_cluster_name)
test_instance = ec2.create_instances(
ImageId="ami-1234abcd", MinCount=1, MaxCount=1
)[0]
instance_id_document = json.dumps(
ec2_utils.generate_instance_identity_document(test_instance)
)
response = client.register_container_instance(
cluster=test_cluster_name, instanceIdentityDocument=instance_id_document
)
_ = client.register_task_definition(
family="test_ecs_task",
containerDefinitions=[
{
"name": "hello_world",
"image": "docker/hello-world:latest",
"cpu": 1024,
"memory": 400,
"essential": True,
"environment": [
{"name": "AWS_ACCESS_KEY_ID", "value": "SOME_ACCESS_KEY"}
],
"logConfiguration": {"logDriver": "json-file"},
}
],
)
response = client.run_task(
cluster="test_ecs_cluster",
overrides={},
taskDefinition="test_ecs_task",
count=2,
startedBy="moto",
)
len(response["tasks"]).should.equal(2)
response["tasks"][0]["taskArn"].should.contain(
"arn:aws:ecs:us-east-1:012345678910:task/"
)
response["tasks"][0]["clusterArn"].should.equal(
"arn:aws:ecs:us-east-1:012345678910:cluster/test_ecs_cluster"
)
response["tasks"][0]["taskDefinitionArn"].should.equal(
"arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:1"
)
response["tasks"][0]["containerInstanceArn"].should.contain(
"arn:aws:ecs:us-east-1:012345678910:container-instance/"
)
response["tasks"][0]["overrides"].should.equal({})
response["tasks"][0]["lastStatus"].should.equal("RUNNING")
response["tasks"][0]["desiredStatus"].should.equal("RUNNING")
response["tasks"][0]["startedBy"].should.equal("moto")
response["tasks"][0]["stoppedReason"].should.equal("")
@mock_ec2
@mock_ecs
def test_run_task_default_cluster():
client = boto3.client("ecs", region_name="us-east-1")
ec2 = boto3.resource("ec2", region_name="us-east-1")
test_cluster_name = "default"
_ = client.create_cluster(clusterName=test_cluster_name)
test_instance = ec2.create_instances(
ImageId="ami-1234abcd", MinCount=1, MaxCount=1
)[0]
instance_id_document = json.dumps(
ec2_utils.generate_instance_identity_document(test_instance)
)
response = client.register_container_instance(
cluster=test_cluster_name, instanceIdentityDocument=instance_id_document
)
_ = client.register_task_definition(
family="test_ecs_task",
containerDefinitions=[
{
"name": "hello_world",
"image": "docker/hello-world:latest",
"cpu": 1024,
"memory": 400,
"essential": True,
"environment": [
{"name": "AWS_ACCESS_KEY_ID", "value": "SOME_ACCESS_KEY"}
],
"logConfiguration": {"logDriver": "json-file"},
}
],
)
response = client.run_task(
launchType="FARGATE",
overrides={},
taskDefinition="test_ecs_task",
count=2,
startedBy="moto",
)
len(response["tasks"]).should.equal(2)
response["tasks"][0]["taskArn"].should.contain(
"arn:aws:ecs:us-east-1:012345678910:task/"
)
response["tasks"][0]["clusterArn"].should.equal(
"arn:aws:ecs:us-east-1:012345678910:cluster/default"
)
response["tasks"][0]["taskDefinitionArn"].should.equal(
"arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:1"
)
response["tasks"][0]["containerInstanceArn"].should.contain(
"arn:aws:ecs:us-east-1:012345678910:container-instance/"
)
response["tasks"][0]["overrides"].should.equal({})
response["tasks"][0]["lastStatus"].should.equal("RUNNING")
response["tasks"][0]["desiredStatus"].should.equal("RUNNING")
response["tasks"][0]["startedBy"].should.equal("moto")
response["tasks"][0]["stoppedReason"].should.equal("")
@mock_ecs
def test_run_task_exceptions():
client = boto3.client("ecs", region_name="us-east-1")
_ = client.register_task_definition(
family="test_ecs_task",
containerDefinitions=[
{
"name": "hello_world",
"image": "docker/hello-world:latest",
"cpu": 1024,
"memory": 400,
}
],
)
client.run_task.when.called_with(
cluster="not_a_cluster", taskDefinition="test_ecs_task"
).should.throw(ClientError, ClusterNotFoundException().message)
@mock_ec2
@mock_ecs
def test_start_task():
client = boto3.client("ecs", region_name="us-east-1")
ec2 = boto3.resource("ec2", region_name="us-east-1")
test_cluster_name = "test_ecs_cluster"
_ = client.create_cluster(clusterName=test_cluster_name)
test_instance = ec2.create_instances(
ImageId="ami-1234abcd", MinCount=1, MaxCount=1
)[0]
instance_id_document = json.dumps(
ec2_utils.generate_instance_identity_document(test_instance)
)
response = client.register_container_instance(
cluster=test_cluster_name, instanceIdentityDocument=instance_id_document
)
container_instances = client.list_container_instances(cluster=test_cluster_name)
container_instance_id = container_instances["containerInstanceArns"][0].split("/")[
-1
]
_ = client.register_task_definition(
family="test_ecs_task",
containerDefinitions=[
{
"name": "hello_world",
"image": "docker/hello-world:latest",
"cpu": 1024,
"memory": 400,
"essential": True,
"environment": [
{"name": "AWS_ACCESS_KEY_ID", "value": "SOME_ACCESS_KEY"}
],
"logConfiguration": {"logDriver": "json-file"},
}
],
)
response = client.start_task(
cluster="test_ecs_cluster",
taskDefinition="test_ecs_task",
overrides={},
containerInstances=[container_instance_id],
startedBy="moto",
)
len(response["tasks"]).should.equal(1)
response["tasks"][0]["taskArn"].should.contain(
"arn:aws:ecs:us-east-1:012345678910:task/"
)
response["tasks"][0]["clusterArn"].should.equal(
"arn:aws:ecs:us-east-1:012345678910:cluster/test_ecs_cluster"
)
response["tasks"][0]["taskDefinitionArn"].should.equal(
"arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:1"
)
response["tasks"][0]["containerInstanceArn"].should.equal(
"arn:aws:ecs:us-east-1:012345678910:container-instance/{0}".format(
container_instance_id
)
)
response["tasks"][0]["overrides"].should.equal({})
response["tasks"][0]["lastStatus"].should.equal("RUNNING")
response["tasks"][0]["desiredStatus"].should.equal("RUNNING")
response["tasks"][0]["startedBy"].should.equal("moto")
response["tasks"][0]["stoppedReason"].should.equal("")
@mock_ecs
def test_start_task_exceptions():
client = boto3.client("ecs", region_name="us-east-1")
_ = client.register_task_definition(
family="test_ecs_task",
containerDefinitions=[
{
"name": "hello_world",
"image": "docker/hello-world:latest",
"cpu": 1024,
"memory": 400,
}
],
)
client.start_task.when.called_with(
taskDefinition="test_ecs_task", containerInstances=["not_a_container_instance"]
).should.throw(ClientError, ClusterNotFoundException().message)
_ = client.create_cluster()
client.start_task.when.called_with(
taskDefinition="test_ecs_task", containerInstances=[]
).should.throw(
ClientError, InvalidParameterException("Container Instances cannot be empty.")
)
@mock_ec2
@mock_ecs
def test_list_tasks():
client = boto3.client("ecs", region_name="us-east-1")
ec2 = boto3.resource("ec2", region_name="us-east-1")
_ = client.create_cluster()
test_instance = ec2.create_instances(
ImageId="ami-1234abcd", MinCount=1, MaxCount=1
)[0]
instance_id_document = json.dumps(
ec2_utils.generate_instance_identity_document(test_instance)
)
_ = client.register_container_instance(
instanceIdentityDocument=instance_id_document
)
container_instances = client.list_container_instances()
container_instance_id = container_instances["containerInstanceArns"][0].split("/")[
-1
]
_ = client.register_task_definition(
family="test_ecs_task",
containerDefinitions=[
{
"name": "hello_world",
"image": "docker/hello-world:latest",
"cpu": 1024,
"memory": 400,
"essential": True,
"environment": [
{"name": "AWS_ACCESS_KEY_ID", "value": "SOME_ACCESS_KEY"}
],
"logConfiguration": {"logDriver": "json-file"},
}
],
)
_ = client.start_task(
taskDefinition="test_ecs_task",
overrides={},
containerInstances=[container_instance_id],
startedBy="foo",
)
_ = client.start_task(
taskDefinition="test_ecs_task",
overrides={},
containerInstances=[container_instance_id],
startedBy="bar",
)
assert len(client.list_tasks()["taskArns"]).should.equal(2)
assert len(client.list_tasks(startedBy="foo")["taskArns"]).should.equal(1)
@mock_ecs
def test_list_tasks_exceptions():
client = boto3.client("ecs", region_name="us-east-1")
client.list_tasks.when.called_with(cluster="not_a_cluster").should.throw(
ClientError, ClusterNotFoundException().message
)
@mock_ec2
@mock_ecs
def test_describe_tasks():
client = boto3.client("ecs", region_name="us-east-1")
ec2 = boto3.resource("ec2", region_name="us-east-1")
test_cluster_name = "test_ecs_cluster"
_ = client.create_cluster(clusterName=test_cluster_name)
test_instance = ec2.create_instances(
ImageId="ami-1234abcd", MinCount=1, MaxCount=1
)[0]
instance_id_document = json.dumps(
ec2_utils.generate_instance_identity_document(test_instance)
)
response = client.register_container_instance(
cluster=test_cluster_name, instanceIdentityDocument=instance_id_document
)
_ = client.register_task_definition(
family="test_ecs_task",
containerDefinitions=[
{
"name": "hello_world",
"image": "docker/hello-world:latest",
"cpu": 1024,
"memory": 400,
"essential": True,
"environment": [
{"name": "AWS_ACCESS_KEY_ID", "value": "SOME_ACCESS_KEY"}
],
"logConfiguration": {"logDriver": "json-file"},
}
],
)
tasks_arns = [
task["taskArn"]
for task in client.run_task(
cluster="test_ecs_cluster",
overrides={},
taskDefinition="test_ecs_task",
count=2,
startedBy="moto",
)["tasks"]
]
response = client.describe_tasks(cluster="test_ecs_cluster", tasks=tasks_arns)
len(response["tasks"]).should.equal(2)
set(
[response["tasks"][0]["taskArn"], response["tasks"][1]["taskArn"]]
).should.equal(set(tasks_arns))
# Test we can pass task ids instead of ARNs
response = client.describe_tasks(
cluster="test_ecs_cluster", tasks=[tasks_arns[0].split("/")[-1]]
)
len(response["tasks"]).should.equal(1)
@mock_ecs
def test_describe_tasks_exceptions():
client = boto3.client("ecs", region_name="us-east-1")
client.describe_tasks.when.called_with(tasks=[]).should.throw(
ClientError, ClusterNotFoundException().message
)
_ = client.create_cluster()
client.describe_tasks.when.called_with(tasks=[]).should.throw(
ClientError, InvalidParameterException("Tasks cannot be empty.").message
)
@mock_ecs
def describe_task_definition():
client = boto3.client("ecs", region_name="us-east-1")
container_definition = {
"name": "hello_world",
"image": "docker/hello-world:latest",
"cpu": 1024,
"memory": 400,
"essential": True,
"environment": [{"name": "AWS_ACCESS_KEY_ID", "value": "SOME_ACCESS_KEY"}],
"logConfiguration": {"logDriver": "json-file"},
}
task_definition = client.register_task_definition(
family="test_ecs_task", containerDefinitions=[container_definition]
)
family = task_definition["family"]
task = client.describe_task_definition(taskDefinition=family)
task["containerDefinitions"][0].should.equal(container_definition)
task["taskDefinitionArn"].should.equal(
"arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task2:1"
)
task["volumes"].should.equal([])
@mock_ec2
@mock_ecs
def test_stop_task():
client = boto3.client("ecs", region_name="us-east-1")
ec2 = boto3.resource("ec2", region_name="us-east-1")
test_cluster_name = "test_ecs_cluster"
_ = client.create_cluster(clusterName=test_cluster_name)
test_instance = ec2.create_instances(
ImageId="ami-1234abcd", MinCount=1, MaxCount=1
)[0]
instance_id_document = json.dumps(
ec2_utils.generate_instance_identity_document(test_instance)
)
_ = client.register_container_instance(
cluster=test_cluster_name, instanceIdentityDocument=instance_id_document
)
_ = client.register_task_definition(
family="test_ecs_task",
containerDefinitions=[
{
"name": "hello_world",
"image": "docker/hello-world:latest",
"cpu": 1024,
"memory": 400,
"essential": True,
"environment": [
{"name": "AWS_ACCESS_KEY_ID", "value": "SOME_ACCESS_KEY"}
],
"logConfiguration": {"logDriver": "json-file"},
}
],
)
run_response = client.run_task(
cluster="test_ecs_cluster",
overrides={},
taskDefinition="test_ecs_task",
count=1,
startedBy="moto",
)
stop_response = client.stop_task(
cluster="test_ecs_cluster",
task=run_response["tasks"][0].get("taskArn"),
reason="moto testing",
)
stop_response["task"]["taskArn"].should.equal(
run_response["tasks"][0].get("taskArn")
)
stop_response["task"]["lastStatus"].should.equal("STOPPED")
stop_response["task"]["desiredStatus"].should.equal("STOPPED")
stop_response["task"]["stoppedReason"].should.equal("moto testing")
@mock_ecs
def test_stop_task_exceptions():
client = boto3.client("ecs", region_name="us-east-1")
client.stop_task.when.called_with(task="fake_task").should.throw(
ClientError, ClusterNotFoundException().message
)
@mock_ec2
@mock_ecs
def test_resource_reservation_and_release():
client = boto3.client("ecs", region_name="us-east-1")
ec2 = boto3.resource("ec2", region_name="us-east-1")
test_cluster_name = "test_ecs_cluster"
_ = client.create_cluster(clusterName=test_cluster_name)
test_instance = ec2.create_instances(
ImageId="ami-1234abcd", MinCount=1, MaxCount=1
)[0]
instance_id_document = json.dumps(
ec2_utils.generate_instance_identity_document(test_instance)
)
_ = client.register_container_instance(
cluster=test_cluster_name, instanceIdentityDocument=instance_id_document
)
_ = client.register_task_definition(
family="test_ecs_task",
containerDefinitions=[
{
"name": "hello_world",
"image": "docker/hello-world:latest",
"cpu": 1024,
"memory": 400,
"essential": True,
"environment": [
{"name": "AWS_ACCESS_KEY_ID", "value": "SOME_ACCESS_KEY"}
],
"logConfiguration": {"logDriver": "json-file"},
"portMappings": [{"hostPort": 80, "containerPort": 8080}],
}
],
)
run_response = client.run_task(
cluster="test_ecs_cluster",
overrides={},
taskDefinition="test_ecs_task",
count=1,
startedBy="moto",
)
container_instance_arn = run_response["tasks"][0].get("containerInstanceArn")
container_instance_description = client.describe_container_instances(
cluster="test_ecs_cluster", containerInstances=[container_instance_arn]
)["containerInstances"][0]
remaining_resources, registered_resources = _fetch_container_instance_resources(
container_instance_description
)
remaining_resources["CPU"].should.equal(registered_resources["CPU"] - 1024)
remaining_resources["MEMORY"].should.equal(registered_resources["MEMORY"] - 400)
registered_resources["PORTS"].append("80")
remaining_resources["PORTS"].should.equal(registered_resources["PORTS"])
container_instance_description["runningTasksCount"].should.equal(1)
client.stop_task(
cluster="test_ecs_cluster",
task=run_response["tasks"][0].get("taskArn"),
reason="moto testing",
)
container_instance_description = client.describe_container_instances(
cluster="test_ecs_cluster", containerInstances=[container_instance_arn]
)["containerInstances"][0]
remaining_resources, registered_resources = _fetch_container_instance_resources(
container_instance_description
)
remaining_resources["CPU"].should.equal(registered_resources["CPU"])
remaining_resources["MEMORY"].should.equal(registered_resources["MEMORY"])
remaining_resources["PORTS"].should.equal(registered_resources["PORTS"])
container_instance_description["runningTasksCount"].should.equal(0)
@mock_ec2
@mock_ecs
def test_resource_reservation_and_release_memory_reservation():
client = boto3.client("ecs", region_name="us-east-1")
ec2 = boto3.resource("ec2", region_name="us-east-1")
test_cluster_name = "test_ecs_cluster"
_ = client.create_cluster(clusterName=test_cluster_name)
test_instance = ec2.create_instances(
ImageId="ami-1234abcd", MinCount=1, MaxCount=1
)[0]
instance_id_document = json.dumps(
ec2_utils.generate_instance_identity_document(test_instance)
)
_ = client.register_container_instance(
cluster=test_cluster_name, instanceIdentityDocument=instance_id_document
)
_ = client.register_task_definition(
family="test_ecs_task",
containerDefinitions=[
{
"name": "hello_world",
"image": "docker/hello-world:latest",
"memoryReservation": 400,
"essential": True,
"environment": [
{"name": "AWS_ACCESS_KEY_ID", "value": "SOME_ACCESS_KEY"}
],
"logConfiguration": {"logDriver": "json-file"},
"portMappings": [{"containerPort": 8080}],
}
],
)
run_response = client.run_task(
cluster="test_ecs_cluster",
overrides={},
taskDefinition="test_ecs_task",
count=1,
startedBy="moto",
)
container_instance_arn = run_response["tasks"][0].get("containerInstanceArn")
container_instance_description = client.describe_container_instances(
cluster="test_ecs_cluster", containerInstances=[container_instance_arn]
)["containerInstances"][0]
remaining_resources, registered_resources = _fetch_container_instance_resources(
container_instance_description
)
remaining_resources["CPU"].should.equal(registered_resources["CPU"])
remaining_resources["MEMORY"].should.equal(registered_resources["MEMORY"] - 400)
remaining_resources["PORTS"].should.equal(registered_resources["PORTS"])
container_instance_description["runningTasksCount"].should.equal(1)
client.stop_task(
cluster="test_ecs_cluster",
task=run_response["tasks"][0].get("taskArn"),
reason="moto testing",
)
container_instance_description = client.describe_container_instances(
cluster="test_ecs_cluster", containerInstances=[container_instance_arn]
)["containerInstances"][0]
remaining_resources, registered_resources = _fetch_container_instance_resources(
container_instance_description
)
remaining_resources["CPU"].should.equal(registered_resources["CPU"])
remaining_resources["MEMORY"].should.equal(registered_resources["MEMORY"])
remaining_resources["PORTS"].should.equal(registered_resources["PORTS"])
container_instance_description["runningTasksCount"].should.equal(0)
@mock_ec2
@mock_ecs
def test_task_definitions_unable_to_be_placed():
client = boto3.client("ecs", region_name="us-east-1")
ec2 = boto3.resource("ec2", region_name="us-east-1")
test_cluster_name = "test_ecs_cluster"
_ = client.create_cluster(clusterName=test_cluster_name)
test_instance = ec2.create_instances(
ImageId="ami-1234abcd", MinCount=1, MaxCount=1
)[0]
instance_id_document = json.dumps(
ec2_utils.generate_instance_identity_document(test_instance)
)
response = client.register_container_instance(
cluster=test_cluster_name, instanceIdentityDocument=instance_id_document
)
_ = client.register_task_definition(
family="test_ecs_task",
containerDefinitions=[
{
"name": "hello_world",
"image": "docker/hello-world:latest",
"cpu": 5000,
"memory": 40000,
"essential": True,
"environment": [
{"name": "AWS_ACCESS_KEY_ID", "value": "SOME_ACCESS_KEY"}
],
"logConfiguration": {"logDriver": "json-file"},
}
],
)
response = client.run_task(
cluster="test_ecs_cluster",
overrides={},
taskDefinition="test_ecs_task",
count=2,
startedBy="moto",
)
len(response["tasks"]).should.equal(0)
@mock_ec2
@mock_ecs
def test_task_definitions_with_port_clash():
client = boto3.client("ecs", region_name="us-east-1")
ec2 = boto3.resource("ec2", region_name="us-east-1")
test_cluster_name = "test_ecs_cluster"
_ = client.create_cluster(clusterName=test_cluster_name)
test_instance = ec2.create_instances(
ImageId="ami-1234abcd", MinCount=1, MaxCount=1
)[0]
instance_id_document = json.dumps(
ec2_utils.generate_instance_identity_document(test_instance)
)
response = client.register_container_instance(
cluster=test_cluster_name, instanceIdentityDocument=instance_id_document
)
_ = client.register_task_definition(
family="test_ecs_task",
containerDefinitions=[
{
"name": "hello_world",
"image": "docker/hello-world:latest",
"cpu": 256,
"memory": 512,
"essential": True,
"environment": [
{"name": "AWS_ACCESS_KEY_ID", "value": "SOME_ACCESS_KEY"}
],
"logConfiguration": {"logDriver": "json-file"},
"portMappings": [{"hostPort": 80, "containerPort": 8080}],
}
],
)
response = client.run_task(
cluster="test_ecs_cluster",
overrides={},
taskDefinition="test_ecs_task",
count=2,
startedBy="moto",
)
len(response["tasks"]).should.equal(1)
response["tasks"][0]["taskArn"].should.contain(
"arn:aws:ecs:us-east-1:012345678910:task/"
)
response["tasks"][0]["clusterArn"].should.equal(
"arn:aws:ecs:us-east-1:012345678910:cluster/test_ecs_cluster"
)
response["tasks"][0]["taskDefinitionArn"].should.equal(
"arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:1"
)
response["tasks"][0]["containerInstanceArn"].should.contain(
"arn:aws:ecs:us-east-1:012345678910:container-instance/"
)
response["tasks"][0]["overrides"].should.equal({})
response["tasks"][0]["lastStatus"].should.equal("RUNNING")
response["tasks"][0]["desiredStatus"].should.equal("RUNNING")
response["tasks"][0]["startedBy"].should.equal("moto")
response["tasks"][0]["stoppedReason"].should.equal("")
@mock_ec2
@mock_ecs
def test_attributes():
# Combined put, list delete attributes into the same test due to the amount of setup
ecs_client = boto3.client("ecs", region_name="us-east-1")
ec2 = boto3.resource("ec2", region_name="us-east-1")
test_cluster_name = "test_ecs_cluster"
_ = ecs_client.create_cluster(clusterName=test_cluster_name)
instances = []
test_instance = ec2.create_instances(
ImageId="ami-1234abcd", MinCount=1, MaxCount=1
)[0]
instances.append(test_instance)
instance_id_document = json.dumps(
ec2_utils.generate_instance_identity_document(test_instance)
)
response = ecs_client.register_container_instance(
cluster=test_cluster_name, instanceIdentityDocument=instance_id_document
)
response["containerInstance"]["ec2InstanceId"].should.equal(test_instance.id)
full_arn1 = response["containerInstance"]["containerInstanceArn"]
test_instance = ec2.create_instances(
ImageId="ami-1234abcd", MinCount=1, MaxCount=1
)[0]
instances.append(test_instance)
instance_id_document = json.dumps(
ec2_utils.generate_instance_identity_document(test_instance)
)
response = ecs_client.register_container_instance(
cluster=test_cluster_name, instanceIdentityDocument=instance_id_document
)
response["containerInstance"]["ec2InstanceId"].should.equal(test_instance.id)
full_arn2 = response["containerInstance"]["containerInstanceArn"]
partial_arn2 = full_arn2.rsplit("/", 1)[-1]
full_arn2.should_not.equal(
full_arn1
) # uuid1 isnt unique enough when the pc is fast ;-)
# Ok set instance 1 with 1 attribute, instance 2 with another, and all of them with a 3rd.
ecs_client.put_attributes(
cluster=test_cluster_name,
attributes=[
{"name": "env", "value": "prod"},
{"name": "attr1", "value": "instance1", "targetId": full_arn1},
{
"name": "attr1",
"value": "instance2",
"targetId": partial_arn2,
"targetType": "container-instance",
},
],
)
resp = ecs_client.list_attributes(
cluster=test_cluster_name, targetType="container-instance"
)
attrs = resp["attributes"]
NUM_CUSTOM_ATTRIBUTES = 4 # 2 specific to individual machines and 1 global, going to both machines (2 + 1*2)
NUM_DEFAULT_ATTRIBUTES = 4
len(attrs).should.equal(
NUM_CUSTOM_ATTRIBUTES + (NUM_DEFAULT_ATTRIBUTES * len(instances))
)
# Tests that the attrs have been set properly
len(list(filter(lambda item: item["name"] == "env", attrs))).should.equal(2)
len(
list(
filter(
lambda item: item["name"] == "attr1" and item["value"] == "instance1",
attrs,
)
)
).should.equal(1)
ecs_client.delete_attributes(
cluster=test_cluster_name,
attributes=[
{
"name": "attr1",
"value": "instance2",
"targetId": partial_arn2,
"targetType": "container-instance",
}
],
)
NUM_CUSTOM_ATTRIBUTES -= 1
resp = ecs_client.list_attributes(
cluster=test_cluster_name, targetType="container-instance"
)
attrs = resp["attributes"]
len(attrs).should.equal(
NUM_CUSTOM_ATTRIBUTES + (NUM_DEFAULT_ATTRIBUTES * len(instances))
)
@mock_ecs
def test_poll_endpoint():
# Combined put, list delete attributes into the same test due to the amount of setup
ecs_client = boto3.client("ecs", region_name="us-east-1")
# Just a placeholder until someone actually wants useless data, just testing it doesnt raise an exception
resp = ecs_client.discover_poll_endpoint(cluster="blah", containerInstance="blah")
resp.should.contain("endpoint")
resp.should.contain("telemetryEndpoint")
@mock_ecs
def test_list_task_definition_families():
client = boto3.client("ecs", region_name="us-east-1")
client.register_task_definition(
family="test_ecs_task",
containerDefinitions=[
{
"name": "hello_world",
"image": "docker/hello-world:latest",
"cpu": 1024,
"memory": 400,
"essential": True,
"environment": [
{"name": "AWS_ACCESS_KEY_ID", "value": "SOME_ACCESS_KEY"}
],
"logConfiguration": {"logDriver": "json-file"},
}
],
)
client.register_task_definition(
family="alt_test_ecs_task",
containerDefinitions=[
{
"name": "hello_world",
"image": "docker/hello-world:latest",
"cpu": 1024,
"memory": 400,
"essential": True,
"environment": [
{"name": "AWS_ACCESS_KEY_ID", "value": "SOME_ACCESS_KEY"}
],
"logConfiguration": {"logDriver": "json-file"},
}
],
)
resp1 = client.list_task_definition_families()
resp2 = client.list_task_definition_families(familyPrefix="alt")
len(resp1["families"]).should.equal(2)
len(resp2["families"]).should.equal(1)
@mock_ec2
@mock_ecs
def test_default_container_instance_attributes():
ecs_client = boto3.client("ecs", region_name="us-east-1")
ec2 = boto3.resource("ec2", region_name="us-east-1")
test_cluster_name = "test_ecs_cluster"
# Create cluster and EC2 instance
_ = ecs_client.create_cluster(clusterName=test_cluster_name)
test_instance = ec2.create_instances(
ImageId="ami-1234abcd", MinCount=1, MaxCount=1
)[0]
instance_id_document = json.dumps(
ec2_utils.generate_instance_identity_document(test_instance)
)
# Register container instance
response = ecs_client.register_container_instance(
cluster=test_cluster_name, instanceIdentityDocument=instance_id_document
)
response["containerInstance"]["ec2InstanceId"].should.equal(test_instance.id)
full_arn = response["containerInstance"]["containerInstanceArn"]
container_instance_id = full_arn.rsplit("/", 1)[-1]
default_attributes = response["containerInstance"]["attributes"]
assert len(default_attributes) == 4
expected_result = [
{
"name": "ecs.availability-zone",
"value": test_instance.placement["AvailabilityZone"],
},
{"name": "ecs.ami-id", "value": test_instance.image_id},
{"name": "ecs.instance-type", "value": test_instance.instance_type},
{"name": "ecs.os-type", "value": test_instance.platform or "linux"},
]
assert sorted(default_attributes, key=lambda item: item["name"]) == sorted(
expected_result, key=lambda item: item["name"]
)
@mock_ec2
@mock_ecs
def test_describe_container_instances_with_attributes():
ecs_client = boto3.client("ecs", region_name="us-east-1")
ec2 = boto3.resource("ec2", region_name="us-east-1")
test_cluster_name = "test_ecs_cluster"
# Create cluster and EC2 instance
_ = ecs_client.create_cluster(clusterName=test_cluster_name)
test_instance = ec2.create_instances(
ImageId="ami-1234abcd", MinCount=1, MaxCount=1
)[0]
instance_id_document = json.dumps(
ec2_utils.generate_instance_identity_document(test_instance)
)
# Register container instance
response = ecs_client.register_container_instance(
cluster=test_cluster_name, instanceIdentityDocument=instance_id_document
)
response["containerInstance"]["ec2InstanceId"].should.equal(test_instance.id)
full_arn = response["containerInstance"]["containerInstanceArn"]
container_instance_id = full_arn.rsplit("/", 1)[-1]
default_attributes = response["containerInstance"]["attributes"]
# Set attributes on container instance, one without a value
attributes = [
{"name": "env", "value": "prod"},
{
"name": "attr1",
"value": "instance1",
"targetId": container_instance_id,
"targetType": "container-instance",
},
{"name": "attr_without_value"},
]
ecs_client.put_attributes(cluster=test_cluster_name, attributes=attributes)
# Describe container instance, should have attributes previously set
described_instance = ecs_client.describe_container_instances(
cluster=test_cluster_name, containerInstances=[container_instance_id]
)
assert len(described_instance["containerInstances"]) == 1
assert isinstance(described_instance["containerInstances"][0]["attributes"], list)
# Remove additional info passed to put_attributes
cleaned_attributes = []
for attribute in attributes:
attribute.pop("targetId", None)
attribute.pop("targetType", None)
cleaned_attributes.append(attribute)
described_attributes = sorted(
described_instance["containerInstances"][0]["attributes"],
key=lambda item: item["name"],
)
expected_attributes = sorted(
default_attributes + cleaned_attributes, key=lambda item: item["name"]
)
assert described_attributes == expected_attributes
def _fetch_container_instance_resources(container_instance_description):
remaining_resources = {}
registered_resources = {}
remaining_resources_list = container_instance_description["remainingResources"]
registered_resources_list = container_instance_description["registeredResources"]
remaining_resources["CPU"] = [
x["integerValue"] for x in remaining_resources_list if x["name"] == "CPU"
][0]
remaining_resources["MEMORY"] = [
x["integerValue"] for x in remaining_resources_list if x["name"] == "MEMORY"
][0]
remaining_resources["PORTS"] = [
x["stringSetValue"] for x in remaining_resources_list if x["name"] == "PORTS"
][0]
registered_resources["CPU"] = [
x["integerValue"] for x in registered_resources_list if x["name"] == "CPU"
][0]
registered_resources["MEMORY"] = [
x["integerValue"] for x in registered_resources_list if x["name"] == "MEMORY"
][0]
registered_resources["PORTS"] = [
x["stringSetValue"] for x in registered_resources_list if x["name"] == "PORTS"
][0]
return remaining_resources, registered_resources
@mock_ecs
def test_create_service_load_balancing():
client = boto3.client("ecs", region_name="us-east-1")
client.create_cluster(clusterName="test_ecs_cluster")
client.register_task_definition(
family="test_ecs_task",
containerDefinitions=[
{
"name": "hello_world",
"image": "docker/hello-world:latest",
"cpu": 1024,
"memory": 400,
"essential": True,
"environment": [
{"name": "AWS_ACCESS_KEY_ID", "value": "SOME_ACCESS_KEY"}
],
"logConfiguration": {"logDriver": "json-file"},
}
],
)
response = client.create_service(
cluster="test_ecs_cluster",
serviceName="test_ecs_service",
taskDefinition="test_ecs_task",
desiredCount=2,
loadBalancers=[
{
"targetGroupArn": "test_target_group_arn",
"loadBalancerName": "test_load_balancer_name",
"containerName": "test_container_name",
"containerPort": 123,
}
],
)
response["service"]["clusterArn"].should.equal(
"arn:aws:ecs:us-east-1:012345678910:cluster/test_ecs_cluster"
)
response["service"]["desiredCount"].should.equal(2)
len(response["service"]["events"]).should.equal(0)
len(response["service"]["loadBalancers"]).should.equal(1)
response["service"]["loadBalancers"][0]["targetGroupArn"].should.equal(
"test_target_group_arn"
)
response["service"]["loadBalancers"][0]["loadBalancerName"].should.equal(
"test_load_balancer_name"
)
response["service"]["loadBalancers"][0]["containerName"].should.equal(
"test_container_name"
)
response["service"]["loadBalancers"][0]["containerPort"].should.equal(123)
response["service"]["pendingCount"].should.equal(0)
response["service"]["runningCount"].should.equal(0)
response["service"]["serviceArn"].should.equal(
"arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service"
)
response["service"]["serviceName"].should.equal("test_ecs_service")
response["service"]["status"].should.equal("ACTIVE")
response["service"]["taskDefinition"].should.equal(
"arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:1"
)
@mock_ecs
def test_list_tags_for_resource():
client = boto3.client("ecs", region_name="us-east-1")
response = client.register_task_definition(
family="test_ecs_task",
containerDefinitions=[
{
"name": "hello_world",
"image": "docker/hello-world:latest",
"cpu": 1024,
"memory": 400,
"essential": True,
"environment": [
{"name": "AWS_ACCESS_KEY_ID", "value": "SOME_ACCESS_KEY"}
],
"logConfiguration": {"logDriver": "json-file"},
}
],
tags=[
{"key": "createdBy", "value": "moto-unittest"},
{"key": "foo", "value": "bar"},
],
)
type(response["taskDefinition"]).should.be(dict)
response["taskDefinition"]["revision"].should.equal(1)
response["taskDefinition"]["taskDefinitionArn"].should.equal(
"arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:1"
)
task_definition_arn = response["taskDefinition"]["taskDefinitionArn"]
response = client.list_tags_for_resource(resourceArn=task_definition_arn)
type(response["tags"]).should.be(list)
response["tags"].should.equal(
[{"key": "createdBy", "value": "moto-unittest"}, {"key": "foo", "value": "bar"}]
)
@mock_ecs
def test_list_tags_exceptions():
client = boto3.client("ecs", region_name="us-east-1")
client.list_tags_for_resource.when.called_with(
resourceArn="arn:aws:ecs:us-east-1:012345678910:service/fake_service:1"
).should.throw(ClientError, ServiceNotFoundException().message)
client.list_tags_for_resource.when.called_with(
resourceArn="arn:aws:ecs:us-east-1:012345678910:task-definition/fake_task:1"
).should.throw(ClientError, TaskDefinitionNotFoundException().message)
@mock_ecs
def test_list_tags_for_resource_ecs_service():
client = boto3.client("ecs", region_name="us-east-1")
_ = client.create_cluster(clusterName="test_ecs_cluster")
_ = client.register_task_definition(
family="test_ecs_task",
containerDefinitions=[
{
"name": "hello_world",
"image": "docker/hello-world:latest",
"cpu": 1024,
"memory": 400,
"essential": True,
"environment": [
{"name": "AWS_ACCESS_KEY_ID", "value": "SOME_ACCESS_KEY"}
],
"logConfiguration": {"logDriver": "json-file"},
}
],
)
response = client.create_service(
cluster="test_ecs_cluster",
serviceName="test_ecs_service",
taskDefinition="test_ecs_task",
desiredCount=2,
tags=[
{"key": "createdBy", "value": "moto-unittest"},
{"key": "foo", "value": "bar"},
],
)
response = client.list_tags_for_resource(
resourceArn=response["service"]["serviceArn"]
)
type(response["tags"]).should.be(list)
response["tags"].should.equal(
[{"key": "createdBy", "value": "moto-unittest"}, {"key": "foo", "value": "bar"}]
)
@mock_ecs
def test_ecs_service_tag_resource():
client = boto3.client("ecs", region_name="us-east-1")
_ = client.create_cluster(clusterName="test_ecs_cluster")
_ = client.register_task_definition(
family="test_ecs_task",
containerDefinitions=[
{
"name": "hello_world",
"image": "docker/hello-world:latest",
"cpu": 1024,
"memory": 400,
"essential": True,
"environment": [
{"name": "AWS_ACCESS_KEY_ID", "value": "SOME_ACCESS_KEY"}
],
"logConfiguration": {"logDriver": "json-file"},
}
],
)
response = client.create_service(
cluster="test_ecs_cluster",
serviceName="test_ecs_service",
taskDefinition="test_ecs_task",
desiredCount=2,
)
client.tag_resource(
resourceArn=response["service"]["serviceArn"],
tags=[
{"key": "createdBy", "value": "moto-unittest"},
{"key": "foo", "value": "bar"},
],
)
response = client.list_tags_for_resource(
resourceArn=response["service"]["serviceArn"]
)
type(response["tags"]).should.be(list)
response["tags"].should.equal(
[{"key": "createdBy", "value": "moto-unittest"}, {"key": "foo", "value": "bar"}]
)
@mock_ecs
def test_ecs_service_tag_resource_overwrites_tag():
client = boto3.client("ecs", region_name="us-east-1")
_ = client.create_cluster(clusterName="test_ecs_cluster")
_ = client.register_task_definition(
family="test_ecs_task",
containerDefinitions=[
{
"name": "hello_world",
"image": "docker/hello-world:latest",
"cpu": 1024,
"memory": 400,
"essential": True,
"environment": [
{"name": "AWS_ACCESS_KEY_ID", "value": "SOME_ACCESS_KEY"}
],
"logConfiguration": {"logDriver": "json-file"},
}
],
)
response = client.create_service(
cluster="test_ecs_cluster",
serviceName="test_ecs_service",
taskDefinition="test_ecs_task",
desiredCount=2,
tags=[{"key": "foo", "value": "bar"}],
)
client.tag_resource(
resourceArn=response["service"]["serviceArn"],
tags=[
{"key": "createdBy", "value": "moto-unittest"},
{"key": "foo", "value": "hello world"},
],
)
response = client.list_tags_for_resource(
resourceArn=response["service"]["serviceArn"]
)
type(response["tags"]).should.be(list)
response["tags"].should.equal(
[
{"key": "createdBy", "value": "moto-unittest"},
{"key": "foo", "value": "hello world"},
]
)
@mock_ecs
def test_ecs_service_untag_resource():
client = boto3.client("ecs", region_name="us-east-1")
_ = client.create_cluster(clusterName="test_ecs_cluster")
_ = client.register_task_definition(
family="test_ecs_task",
containerDefinitions=[
{
"name": "hello_world",
"image": "docker/hello-world:latest",
"cpu": 1024,
"memory": 400,
"essential": True,
"environment": [
{"name": "AWS_ACCESS_KEY_ID", "value": "SOME_ACCESS_KEY"}
],
"logConfiguration": {"logDriver": "json-file"},
}
],
)
response = client.create_service(
cluster="test_ecs_cluster",
serviceName="test_ecs_service",
taskDefinition="test_ecs_task",
desiredCount=2,
tags=[{"key": "foo", "value": "bar"}],
)
client.untag_resource(
resourceArn=response["service"]["serviceArn"], tagKeys=["foo"]
)
response = client.list_tags_for_resource(
resourceArn=response["service"]["serviceArn"]
)
response["tags"].should.equal([])
@mock_ecs
def test_ecs_service_untag_resource_multiple_tags():
client = boto3.client("ecs", region_name="us-east-1")
_ = client.create_cluster(clusterName="test_ecs_cluster")
_ = client.register_task_definition(
family="test_ecs_task",
containerDefinitions=[
{
"name": "hello_world",
"image": "docker/hello-world:latest",
"cpu": 1024,
"memory": 400,
"essential": True,
"environment": [
{"name": "AWS_ACCESS_KEY_ID", "value": "SOME_ACCESS_KEY"}
],
"logConfiguration": {"logDriver": "json-file"},
}
],
)
response = client.create_service(
cluster="test_ecs_cluster",
serviceName="test_ecs_service",
taskDefinition="test_ecs_task",
desiredCount=2,
tags=[
{"key": "foo", "value": "bar"},
{"key": "createdBy", "value": "moto-unittest"},
{"key": "hello", "value": "world"},
],
)
client.untag_resource(
resourceArn=response["service"]["serviceArn"], tagKeys=["foo", "createdBy"]
)
response = client.list_tags_for_resource(
resourceArn=response["service"]["serviceArn"]
)
response["tags"].should.equal([{"key": "hello", "value": "world"}])
@mock_ecs
def test_ecs_task_definition_placement_constraints():
client = boto3.client("ecs", region_name="us-east-1")
response = client.register_task_definition(
family="test_ecs_task",
containerDefinitions=[
{
"name": "hello_world",
"image": "docker/hello-world:latest",
"cpu": 1024,
"memory": 400,
"essential": True,
"environment": [
{"name": "AWS_ACCESS_KEY_ID", "value": "SOME_ACCESS_KEY"}
],
"logConfiguration": {"logDriver": "json-file"},
}
],
networkMode="bridge",
tags=[
{"key": "createdBy", "value": "moto-unittest"},
{"key": "foo", "value": "bar"},
],
placementConstraints=[
{"type": "memberOf", "expression": "attribute:ecs.instance-type =~ t2.*"}
],
)
type(response["taskDefinition"]["placementConstraints"]).should.be(list)
response["taskDefinition"]["placementConstraints"].should.equal(
[{"type": "memberOf", "expression": "attribute:ecs.instance-type =~ t2.*"}]
)
@mock_ecs
def test_create_task_set():
cluster_name = "test_ecs_cluster"
service_name = "test_ecs_service"
task_def_name = "test_ecs_task"
client = boto3.client("ecs", region_name="us-east-1")
_ = client.create_cluster(clusterName=cluster_name)
_ = client.register_task_definition(
family="test_ecs_task",
containerDefinitions=[
{
"name": "hello_world",
"image": "docker/hello-world:latest",
"cpu": 1024,
"memory": 400,
"essential": True,
"environment": [
{"name": "AWS_ACCESS_KEY_ID", "value": "SOME_ACCESS_KEY"}
],
"logConfiguration": {"logDriver": "json-file"},
}
],
)
_ = client.create_service(
cluster=cluster_name,
serviceName=service_name,
taskDefinition=task_def_name,
desiredCount=2,
deploymentController={"type": "EXTERNAL"},
)
load_balancers = [
{
"targetGroupArn": "arn:aws:elasticloadbalancing:us-east-1:01234567890:targetgroup/c26b93c1bc35466ba792d5b08fe6a5bc/ec39113f8831453a",
"containerName": "hello_world",
"containerPort": 8080,
},
]
task_set = client.create_task_set(
cluster=cluster_name,
service=service_name,
taskDefinition=task_def_name,
loadBalancers=load_balancers,
)["taskSet"]
cluster_arn = client.describe_clusters(clusters=[cluster_name])["clusters"][0][
"clusterArn"
]
service_arn = client.describe_services(
cluster=cluster_name, services=[service_name]
)["services"][0]["serviceArn"]
assert task_set["clusterArn"] == cluster_arn
assert task_set["serviceArn"] == service_arn
assert task_set["taskDefinition"].endswith("{0}:1".format(task_def_name))
assert task_set["scale"] == {"value": 100.0, "unit": "PERCENT"}
assert (
task_set["loadBalancers"][0]["targetGroupArn"]
== "arn:aws:elasticloadbalancing:us-east-1:01234567890:targetgroup/c26b93c1bc35466ba792d5b08fe6a5bc/ec39113f8831453a"
)
assert task_set["loadBalancers"][0]["containerPort"] == 8080
assert task_set["loadBalancers"][0]["containerName"] == "hello_world"
@mock_ecs
def test_describe_task_sets():
cluster_name = "test_ecs_cluster"
service_name = "test_ecs_service"
task_def_name = "test_ecs_task"
client = boto3.client("ecs", region_name="us-east-1")
_ = client.create_cluster(clusterName=cluster_name)
_ = client.register_task_definition(
family=task_def_name,
containerDefinitions=[
{
"name": "hello_world",
"image": "docker/hello-world:latest",
"cpu": 1024,
"memory": 400,
"essential": True,
"environment": [
{"name": "AWS_ACCESS_KEY_ID", "value": "SOME_ACCESS_KEY"}
],
"logConfiguration": {"logDriver": "json-file"},
}
],
)
_ = client.create_service(
cluster=cluster_name,
serviceName=service_name,
taskDefinition=task_def_name,
desiredCount=2,
deploymentController={"type": "EXTERNAL"},
)
load_balancers = [
{
"targetGroupArn": "arn:aws:elasticloadbalancing:us-east-1:01234567890:targetgroup/c26b93c1bc35466ba792d5b08fe6a5bc/ec39113f8831453a",
"containerName": "hello_world",
"containerPort": 8080,
}
]
_ = client.create_task_set(
cluster=cluster_name,
service=service_name,
taskDefinition=task_def_name,
loadBalancers=load_balancers,
)
task_sets = client.describe_task_sets(cluster=cluster_name, service=service_name)[
"taskSets"
]
assert "tags" not in task_sets[0]
task_sets = client.describe_task_sets(
cluster=cluster_name, service=service_name, include=["TAGS"],
)["taskSets"]
cluster_arn = client.describe_clusters(clusters=[cluster_name])["clusters"][0][
"clusterArn"
]
service_arn = client.describe_services(
cluster=cluster_name, services=[service_name]
)["services"][0]["serviceArn"]
assert "tags" in task_sets[0]
assert len(task_sets) == 1
assert task_sets[0]["taskDefinition"].endswith("{0}:1".format(task_def_name))
assert task_sets[0]["clusterArn"] == cluster_arn
assert task_sets[0]["serviceArn"] == service_arn
assert task_sets[0]["serviceArn"].endswith(service_name)
assert task_sets[0]["scale"] == {"value": 100.0, "unit": "PERCENT"}
assert task_sets[0]["taskSetArn"].endswith(task_sets[0]["id"])
assert (
task_sets[0]["loadBalancers"][0]["targetGroupArn"]
== "arn:aws:elasticloadbalancing:us-east-1:01234567890:targetgroup/c26b93c1bc35466ba792d5b08fe6a5bc/ec39113f8831453a"
)
assert task_sets[0]["loadBalancers"][0]["containerPort"] == 8080
assert task_sets[0]["loadBalancers"][0]["containerName"] == "hello_world"
@mock_ecs
def test_delete_task_set():
cluster_name = "test_ecs_cluster"
service_name = "test_ecs_service"
task_def_name = "test_ecs_task"
client = boto3.client("ecs", region_name="us-east-1")
_ = client.create_cluster(clusterName=cluster_name)
_ = client.register_task_definition(
family=task_def_name,
containerDefinitions=[
{
"name": "hello_world",
"image": "docker/hello-world:latest",
"cpu": 1024,
"memory": 400,
"essential": True,
"environment": [
{"name": "AWS_ACCESS_KEY_ID", "value": "SOME_ACCESS_KEY"}
],
"logConfiguration": {"logDriver": "json-file"},
}
],
)
_ = client.create_service(
cluster=cluster_name,
serviceName=service_name,
taskDefinition=task_def_name,
desiredCount=2,
deploymentController={"type": "EXTERNAL"},
)
task_set = client.create_task_set(
cluster=cluster_name, service=service_name, taskDefinition=task_def_name,
)["taskSet"]
task_sets = client.describe_task_sets(
cluster=cluster_name, service=service_name, taskSets=[task_set["taskSetArn"]],
)["taskSets"]
assert len(task_sets) == 1
response = client.delete_task_set(
cluster=cluster_name, service=service_name, taskSet=task_set["taskSetArn"],
)
assert response["taskSet"]["taskSetArn"] == task_set["taskSetArn"]
task_sets = client.describe_task_sets(
cluster=cluster_name, service=service_name, taskSets=[task_set["taskSetArn"]],
)["taskSets"]
assert len(task_sets) == 0
with pytest.raises(ClientError):
_ = client.delete_task_set(
cluster=cluster_name, service=service_name, taskSet=task_set["taskSetArn"],
)
@mock_ecs
def test_update_service_primary_task_set():
cluster_name = "test_ecs_cluster"
service_name = "test_ecs_service"
task_def_name = "test_ecs_task"
client = boto3.client("ecs", region_name="us-east-1")
_ = client.create_cluster(clusterName=cluster_name)
_ = client.register_task_definition(
family="test_ecs_task",
containerDefinitions=[
{
"name": "hello_world",
"image": "docker/hello-world:latest",
"cpu": 1024,
"memory": 400,
"essential": True,
"environment": [
{"name": "AWS_ACCESS_KEY_ID", "value": "SOME_ACCESS_KEY"}
],
"logConfiguration": {"logDriver": "json-file"},
}
],
)
_ = client.create_service(
cluster=cluster_name,
serviceName=service_name,
desiredCount=2,
deploymentController={"type": "EXTERNAL"},
)
task_set = client.create_task_set(
cluster=cluster_name, service=service_name, taskDefinition=task_def_name,
)["taskSet"]
service = client.describe_services(cluster=cluster_name, services=[service_name],)[
"services"
][0]
_ = client.update_service_primary_task_set(
cluster=cluster_name,
service=service_name,
primaryTaskSet=task_set["taskSetArn"],
)
service = client.describe_services(cluster=cluster_name, services=[service_name],)[
"services"
][0]
assert service["taskSets"][0]["status"] == "PRIMARY"
assert service["taskDefinition"] == service["taskSets"][0]["taskDefinition"]
another_task_set = client.create_task_set(
cluster=cluster_name, service=service_name, taskDefinition=task_def_name,
)["taskSet"]
service = client.describe_services(cluster=cluster_name, services=[service_name],)[
"services"
][0]
assert service["taskSets"][1]["status"] == "ACTIVE"
_ = client.update_service_primary_task_set(
cluster=cluster_name,
service=service_name,
primaryTaskSet=another_task_set["taskSetArn"],
)
service = client.describe_services(cluster=cluster_name, services=[service_name],)[
"services"
][0]
assert service["taskSets"][0]["status"] == "ACTIVE"
assert service["taskSets"][1]["status"] == "PRIMARY"
assert service["taskDefinition"] == service["taskSets"][1]["taskDefinition"]
@mock_ecs
def test_update_task_set():
cluster_name = "test_ecs_cluster"
service_name = "test_ecs_service"
task_def_name = "test_ecs_task"
client = boto3.client("ecs", region_name="us-east-1")
_ = client.create_cluster(clusterName=cluster_name)
_ = client.register_task_definition(
family=task_def_name,
containerDefinitions=[
{
"name": "hello_world",
"image": "docker/hello-world:latest",
"cpu": 1024,
"memory": 400,
"essential": True,
"environment": [
{"name": "AWS_ACCESS_KEY_ID", "value": "SOME_ACCESS_KEY"}
],
"logConfiguration": {"logDriver": "json-file"},
}
],
)
_ = client.create_service(
cluster=cluster_name,
serviceName=service_name,
desiredCount=2,
deploymentController={"type": "EXTERNAL"},
)
task_set = client.create_task_set(
cluster=cluster_name, service=service_name, taskDefinition=task_def_name,
)["taskSet"]
another_task_set = client.create_task_set(
cluster=cluster_name, service=service_name, taskDefinition=task_def_name,
)["taskSet"]
assert another_task_set["scale"]["unit"] == "PERCENT"
assert another_task_set["scale"]["value"] == 100.0
client.update_task_set(
cluster=cluster_name,
service=service_name,
taskSet=task_set["taskSetArn"],
scale={"value": 25.0, "unit": "PERCENT"},
)
updated_task_set = client.describe_task_sets(
cluster=cluster_name, service=service_name, taskSets=[task_set["taskSetArn"]],
)["taskSets"][0]
assert updated_task_set["scale"]["value"] == 25.0
assert updated_task_set["scale"]["unit"] == "PERCENT"
@mock_ec2
@mock_ecs
def test_list_tasks_with_filters():
ecs = boto3.client("ecs", region_name="us-east-1")
ec2 = boto3.resource("ec2", region_name="us-east-1")
_ = ecs.create_cluster(clusterName="test_cluster_1")
_ = ecs.create_cluster(clusterName="test_cluster_2")
test_instance = ec2.create_instances(
ImageId="ami-1234abcd", MinCount=1, MaxCount=1
)[0]
instance_id_document = json.dumps(
ec2_utils.generate_instance_identity_document(test_instance)
)
_ = ecs.register_container_instance(
cluster="test_cluster_1", instanceIdentityDocument=instance_id_document
)
_ = ecs.register_container_instance(
cluster="test_cluster_2", instanceIdentityDocument=instance_id_document
)
container_instances = ecs.list_container_instances(cluster="test_cluster_1")
container_id_1 = container_instances["containerInstanceArns"][0].split("/")[-1]
container_instances = ecs.list_container_instances(cluster="test_cluster_2")
container_id_2 = container_instances["containerInstanceArns"][0].split("/")[-1]
test_container_def = {
"name": "hello_world",
"image": "docker/hello-world:latest",
"cpu": 1024,
"memory": 400,
"essential": True,
"environment": [{"name": "AWS_ACCESS_KEY_ID", "value": "SOME_ACCESS_KEY"}],
"logConfiguration": {"logDriver": "json-file"},
}
_ = ecs.register_task_definition(
family="test_task_def_1", containerDefinitions=[test_container_def],
)
_ = ecs.register_task_definition(
family="test_task_def_2", containerDefinitions=[test_container_def],
)
_ = ecs.start_task(
cluster="test_cluster_1",
taskDefinition="test_task_def_1",
overrides={},
containerInstances=[container_id_1],
startedBy="foo",
)
resp = ecs.start_task(
cluster="test_cluster_2",
taskDefinition="test_task_def_2",
overrides={},
containerInstances=[container_id_2],
startedBy="foo",
)
task_to_stop = resp["tasks"][0]["taskArn"]
_ = ecs.start_task(
cluster="test_cluster_1",
taskDefinition="test_task_def_1",
overrides={},
containerInstances=[container_id_1],
startedBy="bar",
)
len(ecs.list_tasks(cluster="test_cluster_1")["taskArns"]).should.equal(2)
len(ecs.list_tasks(cluster="test_cluster_2")["taskArns"]).should.equal(1)
len(
ecs.list_tasks(cluster="test_cluster_1", containerInstance="bad-id")["taskArns"]
).should.equal(0)
len(
ecs.list_tasks(cluster="test_cluster_1", containerInstance=container_id_1)[
"taskArns"
]
).should.equal(2)
len(
ecs.list_tasks(cluster="test_cluster_2", containerInstance=container_id_2)[
"taskArns"
]
).should.equal(1)
len(
ecs.list_tasks(cluster="test_cluster_1", family="non-existent-family")[
"taskArns"
]
).should.equal(0)
len(
ecs.list_tasks(cluster="test_cluster_1", family="test_task_def_1")["taskArns"]
).should.equal(2)
len(
ecs.list_tasks(cluster="test_cluster_2", family="test_task_def_2")["taskArns"]
).should.equal(1)
len(
ecs.list_tasks(cluster="test_cluster_1", startedBy="non-existent-entity")[
"taskArns"
]
).should.equal(0)
len(
ecs.list_tasks(cluster="test_cluster_1", startedBy="foo")["taskArns"]
).should.equal(1)
len(
ecs.list_tasks(cluster="test_cluster_1", startedBy="bar")["taskArns"]
).should.equal(1)
len(
ecs.list_tasks(cluster="test_cluster_2", startedBy="foo")["taskArns"]
).should.equal(1)
len(
ecs.list_tasks(cluster="test_cluster_1", desiredStatus="RUNNING")["taskArns"]
).should.equal(2)
len(
ecs.list_tasks(cluster="test_cluster_2", desiredStatus="RUNNING")["taskArns"]
).should.equal(1)
_ = ecs.stop_task(cluster="test_cluster_2", task=task_to_stop, reason="for testing")
len(
ecs.list_tasks(cluster="test_cluster_1", desiredStatus="RUNNING")["taskArns"]
).should.equal(2)
len(
ecs.list_tasks(cluster="test_cluster_2", desiredStatus="STOPPED")["taskArns"]
).should.equal(1)
resp = ecs.list_tasks(cluster="test_cluster_1", startedBy="foo")
len(resp["taskArns"]).should.equal(1)
resp = ecs.list_tasks(
cluster="test_cluster_1", containerInstance=container_id_1, startedBy="bar"
)
len(resp["taskArns"]).should.equal(1)
| 35.145023
| 145
| 0.62836
| 10,909
| 106,630
| 5.869741
| 0.038042
| 0.029407
| 0.015414
| 0.020989
| 0.900707
| 0.876876
| 0.852842
| 0.834523
| 0.819952
| 0.795137
| 0
| 0.027384
| 0.234578
| 106,630
| 3,033
| 146
| 35.156611
| 0.757171
| 0.009322
| 0
| 0.688931
| 0
| 0.007129
| 0.245907
| 0.05181
| 0
| 0
| 0
| 0
| 0.014634
| 1
| 0.02439
| false
| 0
| 0.004503
| 0
| 0.029268
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
1684b66b879dcd743c48b492f504f0e834d072a6
| 1,369
|
py
|
Python
|
lambda_functions/download/features/test_helper.py
|
russbiggs/MapCampaigner
|
05b6c6eb1bdefbe0b60313a9d3cd1a0d2df2a148
|
[
"BSD-3-Clause"
] | 24
|
2018-10-05T06:39:11.000Z
|
2022-02-22T08:54:37.000Z
|
lambda_functions/download/features/test_helper.py
|
russbiggs/MapCampaigner
|
05b6c6eb1bdefbe0b60313a9d3cd1a0d2df2a148
|
[
"BSD-3-Clause"
] | 384
|
2017-05-17T07:50:02.000Z
|
2018-09-20T08:18:56.000Z
|
lambda_functions/download/features/test_helper.py
|
russbiggs/MapCampaigner
|
05b6c6eb1bdefbe0b60313a9d3cd1a0d2df2a148
|
[
"BSD-3-Clause"
] | 16
|
2017-05-11T08:52:19.000Z
|
2018-06-08T06:55:43.000Z
|
SELECTED_FUNCTIONS = {
"function-1": {
"function": "FeatureAttributeCompleteness",
"feature": "amenity=cafe", "attributes": {
"amenity": ["cafe"], "name": [], "cuisine": [], "operator": [],
"opening_hours": []},
"type": "cafe"},
"function-2": {
"function": "CountFeature",
"feature": "amenity=cafe", "attributes": {
"amenity": ["cafe"], "name": [], "cuisine": [], "operator": [],
"opening_hours": []},
"type": "cafe"},
"function-3": {
"function": "MapperEngagement",
"feature": "amenity=cafe", "attributes": {
"amenity": ["cafe"], "name": [], "cuisine": [], "operator": [],
"opening_hours": []},
"type": "cafe"},
"function-4": {
"function": "FeatureAttributeCompleteness",
"feature": "shop=supermarket", "attributes": {
"shop": ["supermarket"], "name": []},
"type": "supermarket"},
"function-5": {
"function": "CountFeature",
"feature": "shop=supermarket", "attributes": {
"shop": ["supermarket"], "name": []},
"type": "supermarket"},
"function-6": {
"function": "MapperEngagement",
"feature": "shop=supermarket", "attributes": {
"shop": ["supermarket"], "name": []},
"type": "supermarket"}
}
| 38.027778
| 75
| 0.487217
| 92
| 1,369
| 7.206522
| 0.26087
| 0.099548
| 0.081448
| 0.126697
| 0.711916
| 0.711916
| 0.711916
| 0.711916
| 0.711916
| 0.612368
| 0
| 0.006079
| 0.279036
| 1,369
| 35
| 76
| 39.114286
| 0.665654
| 0
| 0
| 0.742857
| 0
| 0
| 0.482834
| 0.040906
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
169639817e6084a87871e8952b4d0c024c62bebd
| 43
|
py
|
Python
|
examples/functions/Python/func.py
|
Russtopia/fx
|
37b2733dcf26bfba442141e32d62002f01461acc
|
[
"MIT"
] | null | null | null |
examples/functions/Python/func.py
|
Russtopia/fx
|
37b2733dcf26bfba442141e32d62002f01461acc
|
[
"MIT"
] | null | null | null |
examples/functions/Python/func.py
|
Russtopia/fx
|
37b2733dcf26bfba442141e32d62002f01461acc
|
[
"MIT"
] | 1
|
2019-12-11T18:40:09.000Z
|
2019-12-11T18:40:09.000Z
|
def fx(requenst):
return "hello world"
| 14.333333
| 24
| 0.674419
| 6
| 43
| 4.833333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.209302
| 43
| 2
| 25
| 21.5
| 0.852941
| 0
| 0
| 0
| 0
| 0
| 0.255814
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
16a6288959b1471633e1bacffec0228bb38521f6
| 151
|
py
|
Python
|
tests/extractor_test.py
|
kkmsun/dataengineeringweekly
|
b17458b82d547302e620acebd199cd4c303cabcb
|
[
"Apache-2.0"
] | 54
|
2021-10-08T22:32:59.000Z
|
2022-03-29T18:17:20.000Z
|
tests/extractor_test.py
|
kkmsun/dataengineeringweekly
|
b17458b82d547302e620acebd199cd4c303cabcb
|
[
"Apache-2.0"
] | 4
|
2021-10-09T14:06:35.000Z
|
2022-03-30T21:12:44.000Z
|
tests/extractor_test.py
|
kkmsun/dataengineeringweekly
|
b17458b82d547302e620acebd199cd4c303cabcb
|
[
"Apache-2.0"
] | 13
|
2021-10-09T01:57:40.000Z
|
2022-03-29T18:49:06.000Z
|
from publisher.publish import bootstrap, publish_edition
def test_bootstrap():
bootstrap()
def test_publish_edition():
publish_edition(69)
| 15.1
| 56
| 0.768212
| 18
| 151
| 6.166667
| 0.5
| 0.378378
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.015625
| 0.152318
| 151
| 9
| 57
| 16.777778
| 0.851563
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| true
| 0
| 0.2
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
16aabe7e952abdd078f2d8a95c61dc8d2b0f07be
| 117
|
py
|
Python
|
basename.py
|
linuxlizard/q60
|
dd0cf8a556584beb816b04c6a4913f19cb1142d1
|
[
"Apache-2.0"
] | null | null | null |
basename.py
|
linuxlizard/q60
|
dd0cf8a556584beb816b04c6a4913f19cb1142d1
|
[
"Apache-2.0"
] | null | null | null |
basename.py
|
linuxlizard/q60
|
dd0cf8a556584beb816b04c6a4913f19cb1142d1
|
[
"Apache-2.0"
] | null | null | null |
#!python
import os
def get_basename( filename ) :
return os.path.splitext( os.path.split( filename )[1] )[0]
| 14.625
| 62
| 0.666667
| 17
| 117
| 4.529412
| 0.764706
| 0.155844
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.021053
| 0.188034
| 117
| 7
| 63
| 16.714286
| 0.789474
| 0.059829
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
16b0cf6adbec2e3fbbf73c9375853f5f2c3ea31c
| 13,920
|
py
|
Python
|
auth0/v3/test/management/test_rest.py
|
Sytten/auth0-python
|
59c1942acbd9723adaf587ac4bc94c9583fe38a0
|
[
"MIT"
] | null | null | null |
auth0/v3/test/management/test_rest.py
|
Sytten/auth0-python
|
59c1942acbd9723adaf587ac4bc94c9583fe38a0
|
[
"MIT"
] | null | null | null |
auth0/v3/test/management/test_rest.py
|
Sytten/auth0-python
|
59c1942acbd9723adaf587ac4bc94c9583fe38a0
|
[
"MIT"
] | null | null | null |
import unittest
import sys
import json
import base64
import requests
import mock
from ...management.rest import RestClient
from ...exceptions import Auth0Error
class TestRest(unittest.TestCase):
@mock.patch('requests.get')
def test_get(self, mock_get):
rc = RestClient(jwt='a-token', telemetry=False)
headers = {
'Authorization': 'Bearer a-token',
'Content-Type': 'application/json',
}
mock_get.return_value.text = '["a", "b"]'
mock_get.return_value.status_code = 200
response = rc.get('the-url')
mock_get.assert_called_with('the-url', params=None, headers=headers, timeout=5.0)
self.assertEqual(response, ['a', 'b'])
response = rc.get(url='the/url', params={'A': 'param', 'B': 'param'})
mock_get.assert_called_with('the/url', params={'A': 'param',
'B': 'param'},
headers=headers, timeout=5.0)
self.assertEqual(response, ['a', 'b'])
mock_get.return_value.text = ''
response = rc.get('the/url')
self.assertEqual(response, '')
@mock.patch('requests.get')
def test_get_errors(self, mock_get):
rc = RestClient(jwt='a-token', telemetry=False)
mock_get.return_value.text = '{"statusCode": 999,' \
' "errorCode": "code",' \
' "message": "message"}'
mock_get.return_value.status_code = 999
with self.assertRaises(Auth0Error) as context:
rc.get('the/url')
self.assertEqual(context.exception.status_code, 999)
self.assertEqual(context.exception.error_code, 'code')
self.assertEqual(context.exception.message, 'message')
@mock.patch('requests.post')
def test_post(self, mock_post):
rc = RestClient(jwt='a-token', telemetry=False)
headers = {'Authorization': 'Bearer a-token',
'Content-Type': 'application/json'}
mock_post.return_value.text = '{"a": "b"}'
data = {'some': 'data'}
mock_post.return_value.status_code = 200
response = rc.post('the/url', data=data)
mock_post.assert_called_with('the/url', json=data,
headers=headers, timeout=5.0)
self.assertEqual(response, {'a': 'b'})
@mock.patch('requests.post')
def test_post_errors(self, mock_post):
rc = RestClient(jwt='a-token', telemetry=False)
mock_post.return_value.text = '{"statusCode": 999,' \
' "errorCode": "code",' \
' "message": "message"}'
mock_post.return_value.status_code = 999
with self.assertRaises(Auth0Error) as context:
rc.post('the-url')
self.assertEqual(context.exception.status_code, 999)
self.assertEqual(context.exception.error_code, 'code')
self.assertEqual(context.exception.message, 'message')
@mock.patch('requests.post')
def test_post_errors_with_no_message_property(self, mock_post):
rc = RestClient(jwt='a-token', telemetry=False)
mock_post.return_value.text = json.dumps({
"statusCode": 999,
"errorCode": "code",
"error": "error"
})
mock_post.return_value.status_code = 999
with self.assertRaises(Auth0Error) as context:
rc.post('the-url')
self.assertEqual(context.exception.status_code, 999)
self.assertEqual(context.exception.error_code, 'code')
self.assertEqual(context.exception.message, 'error')
@mock.patch('requests.post')
def test_post_errors_with_no_message_or_error_property(self, mock_post):
rc = RestClient(jwt='a-token', telemetry=False)
mock_post.return_value.text = json.dumps({
"statusCode": 999,
"errorCode": "code"
})
mock_post.return_value.status_code = 999
with self.assertRaises(Auth0Error) as context:
rc.post('the-url')
self.assertEqual(context.exception.status_code, 999)
self.assertEqual(context.exception.error_code, 'code')
self.assertEqual(context.exception.message, '')
@mock.patch('requests.post')
def test_post_errors_with_message_and_error_property(self, mock_post):
rc = RestClient(jwt='a-token', telemetry=False)
mock_post.return_value.text = json.dumps({
"statusCode": 999,
"errorCode": "code",
"error": "error",
"message": "message"
})
mock_post.return_value.status_code = 999
with self.assertRaises(Auth0Error) as context:
rc.post('the-url')
self.assertEqual(context.exception.status_code, 999)
self.assertEqual(context.exception.error_code, 'code')
self.assertEqual(context.exception.message, 'message')
@mock.patch('requests.post')
def test_post_error_with_code_property(self, mock_post):
rc = RestClient(jwt='a-token', telemetry=False)
for error_status in [400, 500, None]:
mock_post.return_value.status_code = error_status
mock_post.return_value.text = '{"errorCode": "e0",' \
'"message": "desc"}'
with self.assertRaises(Auth0Error) as context:
rc.post('the-url')
self.assertEqual(context.exception.status_code, error_status)
self.assertEqual(context.exception.error_code, 'e0')
self.assertEqual(context.exception.message, 'desc')
@mock.patch('requests.post')
def test_post_error_with_no_error_code(self, mock_post):
rc = RestClient(jwt='a-token', telemetry=False)
for error_status in [400, 500, None]:
mock_post.return_value.status_code = error_status
mock_post.return_value.text = '{"message": "desc"}'
with self.assertRaises(Auth0Error) as context:
rc.post('the-url')
self.assertEqual(context.exception.status_code, error_status)
self.assertEqual(context.exception.error_code, 'a0.sdk.internal.unknown')
self.assertEqual(context.exception.message, 'desc')
@mock.patch('requests.post')
def test_post_error_with_text_response(self, mock_post):
rc = RestClient(jwt='a-token', telemetry=False)
for error_status in [400, 500, None]:
mock_post.return_value.status_code = error_status
mock_post.return_value.text = 'there has been a terrible error'
with self.assertRaises(Auth0Error) as context:
rc.post('the-url')
self.assertEqual(context.exception.status_code, error_status)
self.assertEqual(context.exception.error_code, 'a0.sdk.internal.unknown')
self.assertEqual(context.exception.message,
'there has been a terrible error')
@mock.patch('requests.post')
def test_post_error_with_no_response_text(self, mock_post):
rc = RestClient(jwt='a-token', telemetry=False)
for error_status in [400, 500, None]:
mock_post.return_value.status_code = error_status
mock_post.return_value.text = None
with self.assertRaises(Auth0Error) as context:
rc.post('the-url')
self.assertEqual(context.exception.status_code, error_status)
self.assertEqual(context.exception.error_code, 'a0.sdk.internal.unknown')
self.assertEqual(context.exception.message, '')
@mock.patch('requests.post')
def test_file_post_content_type_is_none(self, mock_post):
rc = RestClient(jwt='a-token', telemetry=False)
headers = {'Authorization': 'Bearer a-token'}
mock_post.return_value.status_code = 200
mock_post.return_value.text = 'Success'
data = {'some': 'data'}
files = [mock.Mock()]
rc.file_post('the-url', data=data, files=files)
mock_post.assert_called_once_with('the-url', data=data, files=files, headers=headers, timeout=5.0)
@mock.patch('requests.put')
def test_put(self, mock_put):
rc = RestClient(jwt='a-token', telemetry=False)
headers = {'Authorization': 'Bearer a-token',
'Content-Type': 'application/json'}
mock_put.return_value.text = '["a", "b"]'
mock_put.return_value.status_code = 200
data = {'some': 'data'}
response = rc.put(url='the-url', data=data)
mock_put.assert_called_with('the-url', json=data,
headers=headers, timeout=5.0)
self.assertEqual(response, ['a', 'b'])
@mock.patch('requests.put')
def test_put_errors(self, mock_put):
rc = RestClient(jwt='a-token', telemetry=False)
mock_put.return_value.text = '{"statusCode": 999,' \
' "errorCode": "code",' \
' "message": "message"}'
mock_put.return_value.status_code = 999
with self.assertRaises(Auth0Error) as context:
rc.put(url='the/url')
self.assertEqual(context.exception.status_code, 999)
self.assertEqual(context.exception.error_code, 'code')
self.assertEqual(context.exception.message, 'message')
@mock.patch('requests.patch')
def test_patch(self, mock_patch):
rc = RestClient(jwt='a-token', telemetry=False)
headers = {'Authorization': 'Bearer a-token',
'Content-Type': 'application/json'}
mock_patch.return_value.text = '["a", "b"]'
mock_patch.return_value.status_code = 200
data = {'some': 'data'}
response = rc.patch(url='the-url', data=data)
mock_patch.assert_called_with('the-url', json=data,
headers=headers, timeout=5.0)
self.assertEqual(response, ['a', 'b'])
@mock.patch('requests.patch')
def test_patch_errors(self, mock_patch):
rc = RestClient(jwt='a-token', telemetry=False)
mock_patch.return_value.text = '{"statusCode": 999,' \
' "errorCode": "code",' \
' "message": "message"}'
mock_patch.return_value.status_code = 999
with self.assertRaises(Auth0Error) as context:
rc.patch(url='the/url')
self.assertEqual(context.exception.status_code, 999)
self.assertEqual(context.exception.error_code, 'code')
self.assertEqual(context.exception.message, 'message')
@mock.patch('requests.delete')
def test_delete(self, mock_delete):
rc = RestClient(jwt='a-token', telemetry=False)
headers = {
'Authorization': 'Bearer a-token',
'Content-Type': 'application/json',
}
mock_delete.return_value.text = '["a", "b"]'
mock_delete.return_value.status_code = 200
response = rc.delete(url='the-url/ID')
mock_delete.assert_called_with('the-url/ID', headers=headers, params={}, json=None, timeout=5.0)
self.assertEqual(response, ['a', 'b'])
@mock.patch('requests.delete')
def test_delete_with_body_and_params(self, mock_delete):
rc = RestClient(jwt='a-token', telemetry=False)
headers = {
'Authorization': 'Bearer a-token',
'Content-Type': 'application/json',
}
mock_delete.return_value.text = '["a", "b"]'
mock_delete.return_value.status_code = 200
data = {'some': 'data'}
params={'A': 'param', 'B': 'param'}
response = rc.delete(url='the-url/ID', params=params, data=data)
mock_delete.assert_called_with('the-url/ID', headers=headers, params=params, json=data, timeout=5.0)
self.assertEqual(response, ['a', 'b'])
@mock.patch('requests.delete')
def test_delete_errors(self, mock_delete):
rc = RestClient(jwt='a-token', telemetry=False)
mock_delete.return_value.text = '{"statusCode": 999,' \
' "errorCode": "code",' \
' "message": "message"}'
mock_delete.return_value.status_code = 999
with self.assertRaises(Auth0Error) as context:
rc.delete(url='the-url')
self.assertEqual(context.exception.status_code, 999)
self.assertEqual(context.exception.error_code, 'code')
self.assertEqual(context.exception.message, 'message')
def test_disabled_telemetry(self):
rc = RestClient(jwt='a-token', telemetry=False)
expected_headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer a-token',
}
self.assertEqual(rc.base_headers, expected_headers)
def test_enabled_telemetry(self):
rc = RestClient(jwt='a-token', telemetry=True)
user_agent = rc.base_headers['User-Agent']
auth0_client_bytes = base64.b64decode(rc.base_headers['Auth0-Client'])
auth0_client_json = auth0_client_bytes.decode('utf-8')
auth0_client = json.loads(auth0_client_json)
content_type = rc.base_headers['Content-Type']
from auth0 import __version__ as auth0_version
python_version = '{}.{}.{}'.format(sys.version_info.major,
sys.version_info.minor,
sys.version_info.micro)
client_info = {
'name': 'auth0-python',
'version': auth0_version,
'env': {
'python': python_version
}
}
self.assertEqual(user_agent, 'Python/{}'.format(python_version))
self.assertEqual(auth0_client, client_info)
self.assertEqual(content_type, 'application/json')
| 37.621622
| 108
| 0.599784
| 1,571
| 13,920
| 5.133036
| 0.075111
| 0.089286
| 0.098214
| 0.138393
| 0.863963
| 0.841394
| 0.807788
| 0.763269
| 0.736607
| 0.735491
| 0
| 0.016608
| 0.268966
| 13,920
| 369
| 109
| 37.723577
| 0.775845
| 0
| 0
| 0.588448
| 0
| 0
| 0.141882
| 0.004957
| 0
| 0
| 0
| 0
| 0.245487
| 1
| 0.075812
| false
| 0
| 0.032491
| 0
| 0.111913
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
16b4fbe0753c9cdff4539c3d120d34ca5a94cc8f
| 15,912
|
py
|
Python
|
src/labelling_algorithms.py
|
AminKaram/BipolarABASolver
|
3b858d8ea21ad9f39a393afbd5000932060d48a1
|
[
"MIT"
] | 1
|
2021-08-09T10:48:36.000Z
|
2021-08-09T10:48:36.000Z
|
src/labelling_algorithms.py
|
AminKaram/BipolarABASolver
|
3b858d8ea21ad9f39a393afbd5000932060d48a1
|
[
"MIT"
] | 1
|
2020-03-30T03:23:33.000Z
|
2020-03-30T10:23:47.000Z
|
src/labelling_algorithms.py
|
AminKaram/FYP
|
3b858d8ea21ad9f39a393afbd5000932060d48a1
|
[
"MIT"
] | 2
|
2019-10-07T12:14:24.000Z
|
2021-02-26T08:32:28.000Z
|
from enum import Enum
import json
def add_closure_to_label_in(labelling, closure, framework):
for a in closure:
labelling[a] = Label.IN
for attacked in framework.assumptions_directly_attacked_by(a):
for a in framework.get_inverse_closure(attacked):
labelling[a] = Label.OUT
def _is_terminal_labelling(labelling):
'''
:param labelling: A dictionary of Assumption, Label pairs.
:return: True if labelling is terminal in the spirit of [NAD16].
'''
return all(val != Label.BLANK for val in labelling.values())
def _get_next_must_in_assumption(framework, labelling):
'''
:param framework: A BipolarABA object.
:param labelling: A dictionary of Assumption, Label pairs.
:return: A must_in Assumption in framework under labelling in the spirit of [NAD16].
'''
return next(assumption for assumption, label in labelling.items() if label == Label.BLANK
and all(labelling[a] in [Label.OUT, Label.MUST_OUT]
for a in framework.assumptions_which_directly_attack(framework.get_closure(assumption))))
def _propagate_labelling(framework, labelling):
'''
:param framework: A BipolarABA object.
:param labelling: A dictionary of Assumption, Label pairs.
:return: propagate labelling in framework in the spirit of [NAD16].
'''
while(True):
try:
must_in_assumption = _get_next_must_in_assumption(framework, labelling)
except StopIteration:
break
closure = framework.get_closure(must_in_assumption)
add_closure_to_label_in(labelling, closure, framework)
def _get_most_influential_assumption(framework, labelling):
'''
:param framework: A BipolarABA object.
:param labelling: A dictionary of Assumption, Label pairs.
:return: The most influential assumption labelled BLANK in the spirit of [NAD16].
'''
def comparison_func(assumption):
closure = framework.get_closure(assumption)
score = 0
for a in closure:
score += len(framework.direct_attacks[a]) + len(framework.direct_supports[a]) + \
len(framework.direct_attacked_by[a]) + len(framework.direct_supported_by[a])
return score
return max((a for a, label in labelling.items() if label == Label.BLANK), key=comparison_func)
def _apply_left_transition_to_labelling(framework, labelling, target_assumption):
'''
:param labelling: A dictionary of Assumption, Label pairs.
:param target_assumption: An Assumption object.
:return: Apply a left-transition of labelling under target_assumption in the spirit of [NAD16].
'''
closure = framework.get_closure(target_assumption)
for a in closure:
labelling[a] = Label.IN
for attacked in framework.assumptions_directly_attacked_by(a):
for assumption in framework.get_inverse_closure(attacked):
labelling[assumption] = Label.OUT
for a in framework.direct_attacks[target_assumption]:
if labelling[a] != Label.OUT:
labelling[a] = Label.MUST_OUT
def _is_admissible_labelling(labelling):
'''
:param labelling: A dictionary of Assumption, Label pairs.
:return: True, if labelling is admissible in the spirit of [NAD16].
'''
return _is_terminal_labelling(labelling) and all(val != 'Label.MUST_OUT' for val in labelling.values())
def _apply_preferred_right_transition_to_labelling(labelling, target_assumption):
'''
:param labelling: A dictionary of Assumption, Label pairs.
:param target_assumption: An Assumption object.
:return: Apply a preferred semantics right-transition of labelling under target_assumption in the spirit of [NAD16].
'''
labelling[target_assumption] = Label.UNDEC
def _is_preferred_hopeless_labelling(framework, labelling):
'''
:param framework: A BipolarABA object.
:param labelling: A dictionary of Assumption, Label pairs.
:return: True, if labelling is hopeless in framework under preferred semantics, in the spirit of [NAD16].
'''
for k in framework.assumptions:
if labelling[k] == Label.MUST_OUT:
if all(labelling[a] in [Label.OUT, Label.MUST_OUT, Label.UNDEC] for a in
framework.assumptions_which_directly_attack(framework.get_closure(k))):
return True
return False
def assign_initial_labelling_for_preferred_semantics(framework):
'''
:return: A dictionary of Assumption, Label objects
containing the initial preferred labelling of assumptions in the spirit of [NAD16].
'''
# grounded_labelling = construct_grounded_labelling(framework)
# labelling = {}
# for a in framework.assumptions:
# closure = framework.get_closure(a)
# if grounded_labelling[a] == Label.IN or grounded_labelling[a] == Label.OUT:
# labelling[a] = grounded_labelling[a]
# elif framework.attacks_own_closure(a):
# for assumption in closure:
# labelling[assumption] = Label.UNDEC
# else:
# labelling[a] = Label.BLANK
labelling = {}
for a in framework.assumptions:
if framework.attacks_own_closure(a):
labelling[a] = Label.UNDEC
else:
labelling[a] = Label.BLANK
return labelling
def enumerate_preferred_extensions(framework, current_labelling, extensions):
'''
:param framework: A bipolar ABA object,
:param current_labelling: A dictionary of Assumption, Label objects.
:param extensions: A set of sets of Assumptions.
:return: extensions will contain all the preferred extensions of framework once execution completed.
'''
_propagate_labelling(framework, current_labelling)
if _is_preferred_hopeless_labelling(framework, current_labelling):
return
while not _is_terminal_labelling(current_labelling):
target_assumption = _get_most_influential_assumption(framework, current_labelling)
left_labelling = current_labelling.copy()
_apply_left_transition_to_labelling(framework, left_labelling, target_assumption)
if not _is_preferred_hopeless_labelling(framework, left_labelling):
enumerate_preferred_extensions(framework, left_labelling, extensions)
_apply_preferred_right_transition_to_labelling(current_labelling, target_assumption)
if _is_preferred_hopeless_labelling(framework, current_labelling):
return
if _is_admissible_labelling(current_labelling):
adm_set = frozenset({a for a, label in current_labelling.items() if label == Label.IN})
if all(not adm_set <= e for e in extensions):
extensions.add(adm_set)
def enumerate_preferred_extensions_with_steps(framework, current_labelling, extensions):
'''
:param framework: A bipolar ABA object,
:param current_labelling: A dictionary of Assumption, Label objects.
:param extensions: A set of sets of Assumptions.
:return: extensions will contain all the preferred extensions of framework once execution completed.
'''
_propagate_labelling(framework, current_labelling)
yield {'status': 'post-propagation', 'labelling': current_labelling.copy()}
if _is_preferred_hopeless_labelling(framework, current_labelling):
yield {'status': 'hopeless labelling', 'labelling': current_labelling.copy()}
return
while not _is_terminal_labelling(current_labelling):
target_assumption = _get_most_influential_assumption(framework, current_labelling)
left_labelling = current_labelling.copy()
_apply_left_transition_to_labelling(framework, left_labelling, target_assumption)
yield {'status': 'post-left-transition', 'labelling': left_labelling.copy()}
if not _is_preferred_hopeless_labelling(framework, left_labelling):
yield from enumerate_preferred_extensions_with_steps(framework, left_labelling, extensions)
else:
yield {'status': 'hopeless labelling', 'labelling': left_labelling.copy()}
_apply_preferred_right_transition_to_labelling(current_labelling, target_assumption)
yield {'status': 'post-right-transition', 'labelling': current_labelling.copy()}
if _is_preferred_hopeless_labelling(framework, current_labelling):
yield {'status': 'hopeless labelling', 'labelling': left_labelling.copy()}
return
if _is_admissible_labelling(current_labelling):
adm_set = frozenset({a for a, label in current_labelling.items() if label == Label.IN})
if all(not adm_set <= e for e in extensions):
extensions.add(adm_set)
print('extension found')
yield {'status': 'extension found', 'labelling': current_labelling.copy()}
def _apply_set_stable_right_transition_to_labelling(labelling, target_assumption):
'''
:param labelling: A dictionary of Assumption, Label pairs.
:param target_assumption: An Assumption object.
:return: Apply a set-stable semantics right-transition of labelling
under target_assumption in the spirit of [NAD16].
'''
labelling[target_assumption] = Label.MUST_OUT
def assign_initial_labelling_for_set_stable_semantics(framework):
'''
:return: A dictionary of Assumption, Label objects
containing the initial set-stable labelling in the spirit of [NAD16].
'''
# grounded_labelling = construct_grounded_labelling(framework)
# labelling = {}
# for a in framework.assumptions:
# closure = framework.get_closure(a)
# if grounded_labelling[a] == Label.IN or grounded_labelling[a] == Label.OUT:
# labelling[a] = grounded_labelling[a]
# elif framework.attacks_own_closure(a):
# for assumption in closure:
# labelling[assumption] = Label.MUST_OUT
# else:
# labelling[a] = Label.BLANK
labelling = {}
for a in framework.assumptions:
if framework.attacks_own_closure(a):
labelling[a] = Label.UNDEC
else:
labelling[a] = Label.BLANK
return labelling
def _is_set_stable_hopeless_labelling(framework, labelling):
'''
:param framework: A BipolarABA object.
:param labelling: A dictionary of Assumption, Label pairs.
:return: True, if labelling is hopeless in framework under set-stable semantics, in the spirit of [NAD16].
'''
for k in framework.assumptions:
if labelling[k] == Label.MUST_OUT:
if all(labelling[a] in [Label.OUT, Label.MUST_OUT] for a in
framework.assumptions_which_directly_attack(framework.get_closure(k))):
return True
return False
def _is_set_stable_labelling(labelling):
'''
:param labelling: A dictionary of Assumption, Label pairs.
:return: True, if labelling is set-stable in the spirit of [NAD16].
'''
return _is_terminal_labelling(labelling) and all(val != Label.MUST_OUT for val in labelling.values())
def enumerate_set_stable_extensions(framework, current_labelling, extensions):
'''
:param framework: A bipolar ABA object,
:param current_labelling: A dictionary of Assumption, Label objects.
:param extensions: A set of sets of Assumptions.
:return: extensions will contain all the set-stable extensions of framework once execution completed.
'''
_propagate_labelling(framework, current_labelling)
if _is_set_stable_hopeless_labelling(framework, current_labelling):
return
while not _is_terminal_labelling(current_labelling):
target_assumption = _get_most_influential_assumption(framework, current_labelling)
left_labelling = current_labelling.copy()
_apply_left_transition_to_labelling(framework, left_labelling, target_assumption)
if not _is_set_stable_hopeless_labelling(framework, left_labelling):
enumerate_set_stable_extensions(framework, left_labelling, extensions)
_apply_set_stable_right_transition_to_labelling(current_labelling, target_assumption)
if _is_set_stable_hopeless_labelling(framework, current_labelling):
return
if _is_set_stable_labelling(current_labelling):
set_stable_set = frozenset({a for a, label in current_labelling.items() if label == Label.IN})
extensions.add(set_stable_set)
def enumerate_set_stable_extensions_with_steps(framework, current_labelling, extensions):
'''
:param framework: A bipolar ABA object,
:param current_labelling: A dictionary of Assumption, Label objects.
:param extensions: A set of sets of Assumptions.
:return: extensions will contain all the set_stable extensions of framework once execution completed.
'''
_propagate_labelling(framework, current_labelling)
yield {'status': 'post-propagation', 'labelling': current_labelling.copy()}
if _is_set_stable_hopeless_labelling(framework, current_labelling):
yield {'status': 'hopeless labelling', 'labelling': current_labelling.copy()}
return
while not _is_terminal_labelling(current_labelling):
target_assumption = _get_most_influential_assumption(framework, current_labelling)
left_labelling = current_labelling.copy()
_apply_left_transition_to_labelling(framework, left_labelling, target_assumption)
yield {'status': 'post-left-transition', 'labelling': left_labelling.copy()}
if not _is_set_stable_hopeless_labelling(framework, left_labelling):
yield from enumerate_set_stable_extensions_with_steps(framework, left_labelling, extensions)
else:
yield {'status': 'hopeless labelling', 'labelling': left_labelling.copy()}
_apply_set_stable_right_transition_to_labelling(current_labelling, target_assumption)
yield {'status': 'post-right-transition', 'labelling': current_labelling.copy()}
if _is_set_stable_hopeless_labelling(framework, current_labelling):
yield {'status': 'hopeless labelling', 'labelling': left_labelling.copy()}
return
if _is_set_stable_labelling(current_labelling):
adm_set = frozenset({a for a, label in current_labelling.items() if label == Label.IN})
if all(not adm_set <= e for e in extensions):
extensions.add(adm_set)
yield {'status': 'extension found', 'labelling': current_labelling.copy()}
def construct_grounded_labelling(framework):
'''
:param framework: A BipolarABA framework.
:return: A grounded labelling of framework based on the simple algorithm described in argumentation in ai.
'''
def add_assumption_to_label_in(a, labelling):
closure = framework.get_closure(a)
for assumption in closure:
labelling[a] = Label.IN
for attacked in framework.assumptions_directly_attacked_by(assumption):
for cl_attacked in framework.get_inverse_closure(attacked):
labelling[cl_attacked] = Label.OUT
for direct_attacker_gone in framework.assumptions_directly_attacked_by(cl_attacked):
for attacker_gone in framework.get_inverse_closure(direct_attacker_gone):
agressor_count[attacker_gone] -= 1
labelling = {a: Label.UNDEC for a in framework.assumptions}
agressor_count = {a: len(framework.assumptions_which_directly_attack(framework.get_closure(a)))
for a in framework.assumptions}
changed = True
while changed:
changed = False
for a in framework.assumptions:
if labelling[a] == Label.UNDEC:
if agressor_count[a] == 0:
changed = True
add_assumption_to_label_in(a, labelling)
return labelling
class Label(Enum):
IN = 1
OUT = 2
UNDEC = 3
BLANK = 4
MUST_OUT = 5
| 43.594521
| 120
| 0.704563
| 1,869
| 15,912
| 5.742643
| 0.072766
| 0.077518
| 0.055902
| 0.03643
| 0.871984
| 0.848225
| 0.812727
| 0.778627
| 0.75021
| 0.725054
| 0
| 0.002719
| 0.214178
| 15,912
| 364
| 121
| 43.714286
| 0.855646
| 0.270299
| 0
| 0.567708
| 0
| 0
| 0.043926
| 0.003757
| 0
| 0
| 0
| 0
| 0
| 1
| 0.109375
| false
| 0
| 0.010417
| 0
| 0.260417
| 0.005208
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
16b6b4d74efc13e8b27317e1ff24fb896a9fb03c
| 40
|
py
|
Python
|
nhl/statsapi/__init__.py
|
toddrob99/nhl
|
8ab4150686665f44bf8108a8ff49b778e0357d31
|
[
"MIT"
] | 19
|
2019-03-03T07:34:58.000Z
|
2021-12-04T15:06:36.000Z
|
nhl/statsapi/__init__.py
|
toddrob99/nhl
|
8ab4150686665f44bf8108a8ff49b778e0357d31
|
[
"MIT"
] | 3
|
2020-12-18T03:58:31.000Z
|
2021-11-08T19:10:23.000Z
|
nhl/statsapi/__init__.py
|
toddrob99/nhl
|
8ab4150686665f44bf8108a8ff49b778e0357d31
|
[
"MIT"
] | 5
|
2019-09-24T17:33:02.000Z
|
2021-09-29T23:50:05.000Z
|
from .api import *
from .parse import *
| 13.333333
| 20
| 0.7
| 6
| 40
| 4.666667
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2
| 40
| 2
| 21
| 20
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
16d26a3bd17b2945eb9f901cf17e95572459588f
| 3,004
|
py
|
Python
|
src/playground.py
|
delixfe/valuenet
|
d5ddcb168a2c8086de5b7c781f6dc686ad183d09
|
[
"Apache-2.0"
] | 1
|
2020-11-28T17:44:05.000Z
|
2020-11-28T17:44:05.000Z
|
src/playground.py
|
delixfe/valuenet
|
d5ddcb168a2c8086de5b7c781f6dc686ad183d09
|
[
"Apache-2.0"
] | 4
|
2021-03-31T20:01:24.000Z
|
2021-12-13T20:43:13.000Z
|
src/playground.py
|
delixfe/valuenet
|
d5ddcb168a2c8086de5b7c781f6dc686ad183d09
|
[
"Apache-2.0"
] | 1
|
2021-01-26T17:22:11.000Z
|
2021-01-26T17:22:11.000Z
|
import json
# with open('data/spider/preprocessed_with_values.json', 'r', encoding='utf-8') as json_file:
# data = json.load(json_file)
# for row in data:
# values = row['values']
# if values:
# candidates = row['ner_extracted_values_processed']
# print(f'Values: {values} Candiates: {candidates}')
values = [('F', 'src_ap', 'routes'), ('F', 'dst_ap', 'routes'), ('John F Kennedy International Airport', 'name', 'airports'), ('F', 'airline', 'routes'), ('INTERNACIONAL', 'callsign', 'airlines'), ('Denver International Airport', 'name', 'airports'), ('Kent International Airport', 'name', 'airports'), ('John F Kennedy International Airport', 'name', 'airports'), ('Yap International Airport', 'name', 'airports'), ('MBS International Airport', 'name', 'airports'), ('Gan International Airport', 'name', 'airports'), ('Ufa International Airport', 'name', 'airports'), ('Key West International Airport', 'name', 'airports'), ('Rivne International Airport', 'name', 'airports'), ('Chennai International Airport', 'name', 'airports'), ('Senai International Airport', 'name', 'airports'), ('Juneau International Airport', 'name', 'airports'), ('Benina International Airport', 'name', 'airports'), ('Gander International Airport', 'name', 'airports'), ('Vienna International Airport', 'name', 'airports'), ('Cuneo International Airport', 'name', 'airports'), ('Cassidy International Airport', 'name', 'airports'), ('Conakry International Airport', 'name', 'airports'), ('Kansai International Airport', 'name', 'airports'), ('Laredo International Airport', 'name', 'airports'), ('Nadi International Airport', 'name', 'airports'), ('Jinnah International Airport', 'name', 'airports'), ('Kaunas International Airport', 'name', 'airports'), ('Brunei International Airport', 'name', 'airports'), ('Juanda International Airport', 'name', 'airports'), ('Entebbe International Airport', 'name', 'airports'), ('Kelowna International Airport', 'name', 'airports'), ('Penang International Airport', 'name', 'airports'), ('Valley International Airport', 'name', 'airports'), ('Minsk National Airport', 'name', 'airports'), ('Kempegowda International Airport', 'name', 'airports'), ('International AirLink', 'name', 'airlines'), ('Mati National Airport', 'name', 'airports'), ('Lviv International Airport', 'name', 'airports'), ('Arad International Airport', 'name', 'airports'), ('Rota International Airport', 'name', 'airports'), ('Beja International Airport', 'name', 'airports'), ('Taba International Airport', 'name', 'airports'), ('Ovda International Airport', 'name', 'airports'), ('Malé International Airport', 'name', 'airports'), ('Jeju International Airport', 'name', 'airports'), ('Aden International Airport', 'name', 'airports'), ('Pisa International Airport', 'name', 'airports'), ('Niue International Airport', 'name', 'airports'), ('Muan International Airport', 'name', 'airports')]
for v, _, _ in values:
print(f'"{v}",')
print(len(values))
| 200.266667
| 2,582
| 0.677097
| 302
| 3,004
| 6.698676
| 0.291391
| 0.244686
| 0.42264
| 0.680178
| 0.0435
| 0.0435
| 0.0435
| 0
| 0
| 0
| 0
| 0.000377
| 0.11751
| 3,004
| 15
| 2,583
| 200.266667
| 0.762731
| 0.109521
| 0
| 0
| 0
| 0
| 0.704273
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.2
| 0
| 0.2
| 0.4
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
bc41f81481cf9c22b85d5bba0360dd0e55bb8173
| 13,808
|
py
|
Python
|
test/algorithms/test_admm.py
|
tamiya-onodera/qiskit-optimization
|
4e6a22bb13a13e504de607a02f5afdfd33abfb7c
|
[
"Apache-2.0"
] | null | null | null |
test/algorithms/test_admm.py
|
tamiya-onodera/qiskit-optimization
|
4e6a22bb13a13e504de607a02f5afdfd33abfb7c
|
[
"Apache-2.0"
] | null | null | null |
test/algorithms/test_admm.py
|
tamiya-onodera/qiskit-optimization
|
4e6a22bb13a13e504de607a02f5afdfd33abfb7c
|
[
"Apache-2.0"
] | null | null | null |
# This code is part of Qiskit.
#
# (C) Copyright IBM 2020, 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Tests of the ADMM algorithm."""
from test import QiskitOptimizationTestCase
import numpy as np
from docplex.mp.model import Model
from qiskit_optimization.algorithms import CobylaOptimizer
from qiskit_optimization.algorithms.admm_optimizer import ADMMOptimizer, ADMMParameters, \
ADMMOptimizationResult, ADMMState
from qiskit_optimization.problems import QuadraticProgram
class TestADMMOptimizer(QiskitOptimizationTestCase):
"""ADMM Optimizer Tests"""
def test_admm_maximization(self):
"""Tests a simple maximization problem using ADMM optimizer"""
mdl = Model('simple-max')
c = mdl.continuous_var(lb=0, ub=10, name='c')
x = mdl.binary_var(name='x')
mdl.maximize(c + x * x)
op = QuadraticProgram()
op.from_docplex(mdl)
admm_params = ADMMParameters()
solver = ADMMOptimizer(params=admm_params, continuous_optimizer=CobylaOptimizer())
solution = solver.solve(op)
self.assertIsNotNone(solution)
self.assertIsInstance(solution, ADMMOptimizationResult)
self.assertIsNotNone(solution.x)
np.testing.assert_almost_equal([10, 0], solution.x, 3)
self.assertIsNotNone(solution.fval)
np.testing.assert_almost_equal(10, solution.fval, 3)
self.assertIsNotNone(solution.state)
self.assertIsInstance(solution.state, ADMMState)
self.assertEqual(len(solution.samples), 1)
self.assertAlmostEqual(solution.fval, solution.samples[0].fval)
np.testing.assert_almost_equal(solution.x, solution.samples[0].x)
self.assertEqual(solution.status, solution.samples[0].status)
self.assertAlmostEqual(solution.samples[0].probability, 1.0)
def test_admm_ex4(self):
"""Example 4 as a unit test. Example 4 is reported in:
Gambella, C., & Simonetto, A. (2020).
Multi-block ADMM Heuristics for Mixed-Binary Optimization on Classical
and Quantum Computers.
arXiv preprint arXiv:2001.02069."""
mdl = Model('ex4')
v = mdl.binary_var(name='v')
w = mdl.binary_var(name='w')
# pylint:disable=invalid-name
t = mdl.binary_var(name='t')
b = 2
mdl.minimize(v + w + t)
mdl.add_constraint(2 * v + 10 * w + t <= 3, "cons1")
mdl.add_constraint(v + w + t >= b, "cons2")
op = QuadraticProgram()
op.from_docplex(mdl)
admm_params = ADMMParameters(
rho_initial=1001, beta=1000, factor_c=900,
maxiter=100, three_block=False
)
solver = ADMMOptimizer(params=admm_params)
solution = solver.solve(op)
self.assertIsNotNone(solution)
self.assertIsInstance(solution, ADMMOptimizationResult)
self.assertIsNotNone(solution.x)
np.testing.assert_almost_equal([1., 0., 1.], solution.x, 3)
self.assertIsNotNone(solution.fval)
np.testing.assert_almost_equal(2., solution.fval, 3)
self.assertIsNotNone(solution.state)
self.assertIsInstance(solution.state, ADMMState)
def test_admm_ex4_no_bin_var_in_objective(self):
"""Modified Example 4 as a unit test. See the description of the previous test.
This test differs from the previous in the objective: one binary variable
is omitted in objective to test a problem when a binary variable defined but is used only
in constraints.
"""
mdl = Model('ex4')
v = mdl.binary_var(name='v')
w = mdl.binary_var(name='w')
# pylint:disable=invalid-name
t = mdl.binary_var(name='t')
b = 2
mdl.minimize(v + t)
mdl.add_constraint(2 * v + 10 * w + t <= 3, "cons1")
mdl.add_constraint(v + w + t >= b, "cons2")
op = QuadraticProgram()
op.from_docplex(mdl)
admm_params = ADMMParameters(
rho_initial=1001, beta=1000, factor_c=900,
maxiter=100, three_block=False
)
solver = ADMMOptimizer(params=admm_params)
solution = solver.solve(op)
self.assertIsNotNone(solution)
self.assertIsInstance(solution, ADMMOptimizationResult)
self.assertIsNotNone(solution.x)
np.testing.assert_almost_equal([1., 0., 1.], solution.x, 3)
self.assertIsNotNone(solution.fval)
np.testing.assert_almost_equal(2., solution.fval, 3)
self.assertIsNotNone(solution.state)
self.assertIsInstance(solution.state, ADMMState)
def test_admm_ex5(self):
"""Example 5 as a unit test. Example 5 is reported in:
Gambella, C., & Simonetto, A. (2020).
Multi-block ADMM Heuristics for Mixed-Binary Optimization on Classical
and Quantum Computers.
arXiv preprint arXiv:2001.02069."""
mdl = Model('ex5')
# pylint:disable=invalid-name
v = mdl.binary_var(name='v')
w = mdl.binary_var(name='w')
t = mdl.binary_var(name='t')
mdl.minimize(v + w + t)
mdl.add_constraint(2 * v + 2 * w + t <= 3, "cons1")
mdl.add_constraint(v + w + t >= 1, "cons2")
mdl.add_constraint(v + w == 1, "cons3")
op = QuadraticProgram()
op.from_docplex(mdl)
admm_params = ADMMParameters(
rho_initial=1001, beta=1000, factor_c=900,
maxiter=100, three_block=False
)
solver = ADMMOptimizer(params=admm_params)
solution = solver.solve(op)
self.assertIsNotNone(solution)
self.assertIsInstance(solution, ADMMOptimizationResult)
self.assertIsNotNone(solution.x)
np.testing.assert_almost_equal([1., 0., 1.], solution.x, 3)
self.assertIsNotNone(solution.fval)
np.testing.assert_almost_equal(2., solution.fval, 3)
self.assertIsNotNone(solution.state)
self.assertIsInstance(solution.state, ADMMState)
def test_admm_ex5_warm_start(self):
"""Example 5 but with a warm start"""
mdl = Model('ex5')
# pylint:disable=invalid-name
v = mdl.binary_var(name='v')
w = mdl.binary_var(name='w')
t = mdl.binary_var(name='t')
mdl.minimize(v + w + t)
mdl.add_constraint(2 * v + 2 * w + t <= 3, "cons1")
mdl.add_constraint(v + w + t >= 1, "cons2")
mdl.add_constraint(v + w == 1, "cons3")
op = QuadraticProgram()
op.from_docplex(mdl)
admm_params = ADMMParameters(
rho_initial=1001, beta=1000, factor_c=900,
maxiter=100, three_block=False, warm_start=True
)
solver = ADMMOptimizer(params=admm_params)
solution = solver.solve(op)
self.assertIsNotNone(solution)
self.assertIsInstance(solution, ADMMOptimizationResult)
self.assertIsNotNone(solution.x)
np.testing.assert_almost_equal([0., 1., 0.], solution.x, 3)
self.assertIsNotNone(solution.fval)
np.testing.assert_almost_equal(1., solution.fval, 3)
self.assertIsNotNone(solution.state)
self.assertIsInstance(solution.state, ADMMState)
def test_admm_ex6(self):
"""Example 6 as a unit test. Example 6 is reported in:
Gambella, C., & Simonetto, A. (2020).
Multi-block ADMM Heuristics for Mixed-Binary Optimization on Classical
and Quantum Computers.
arXiv preprint arXiv:2001.02069."""
mdl = Model('ex6')
# pylint:disable=invalid-name
v = mdl.binary_var(name='v')
w = mdl.binary_var(name='w')
t = mdl.binary_var(name='t')
u = mdl.continuous_var(name='u')
mdl.minimize(v + w + t + 5 * (u - 2) ** 2)
mdl.add_constraint(v + 2 * w + t + u <= 3, "cons1")
mdl.add_constraint(v + w + t >= 1, "cons2")
mdl.add_constraint(v + w == 1, "cons3")
op = QuadraticProgram()
op.from_docplex(mdl)
admm_params = ADMMParameters(
rho_initial=1001, beta=1000, factor_c=900,
maxiter=100, three_block=True, tol=1.e-6
)
solver = ADMMOptimizer(params=admm_params)
solution = solver.solve(op)
self.assertIsNotNone(solution)
self.assertIsInstance(solution, ADMMOptimizationResult)
self.assertIsNotNone(solution.x)
np.testing.assert_almost_equal([1., 0., 0., 2.], solution.x, 3)
self.assertIsNotNone(solution.fval)
np.testing.assert_almost_equal(1., solution.fval, 3)
self.assertIsNotNone(solution.state)
self.assertIsInstance(solution.state, ADMMState)
def test_admm_ex6_max(self):
"""Example 6 as maximization"""
mdl = Model('ex6-max')
# pylint:disable=invalid-name
v = mdl.binary_var(name='v')
w = mdl.binary_var(name='w')
t = mdl.binary_var(name='t')
u = mdl.continuous_var(name='u')
# mdl.minimize(v + w + t + 5 * (u - 2) ** 2)
mdl.maximize(- v - w - t - 5 * (u - 2) ** 2)
mdl.add_constraint(v + 2 * w + t + u <= 3, "cons1")
mdl.add_constraint(v + w + t >= 1, "cons2")
mdl.add_constraint(v + w == 1, "cons3")
op = QuadraticProgram()
op.from_docplex(mdl)
admm_params = ADMMParameters(
rho_initial=1001, beta=1000, factor_c=900,
maxiter=100, three_block=True, tol=1.e-6
)
solver = ADMMOptimizer(params=admm_params)
solution = solver.solve(op)
self.assertIsNotNone(solution)
self.assertIsInstance(solution, ADMMOptimizationResult)
self.assertIsNotNone(solution.x)
np.testing.assert_almost_equal([1., 0., 0., 2.], solution.x, 3)
self.assertIsNotNone(solution.fval)
np.testing.assert_almost_equal(-1., solution.fval, 3)
self.assertIsNotNone(solution.state)
self.assertIsInstance(solution.state, ADMMState)
def test_equality_constraints_with_continuous_variables(self):
"""Simple example to test equality constraints with continuous variables."""
mdl = Model("eq-constraints-cts-vars")
# pylint:disable=invalid-name
v = mdl.binary_var(name='v')
w = mdl.continuous_var(name='w', lb=0.)
t = mdl.continuous_var(name='t', lb=0.)
mdl.minimize(v + w + t)
mdl.add_constraint(2 * v + w >= 2, "cons1")
mdl.add_constraint(w + t == 1, "cons2")
op = QuadraticProgram()
op.from_docplex(mdl)
admm_params = ADMMParameters(
rho_initial=1001, beta=1000, factor_c=900,
maxiter=100, three_block=True,
)
solver = ADMMOptimizer(params=admm_params)
solution = solver.solve(op)
self.assertIsNotNone(solution)
self.assertIsInstance(solution, ADMMOptimizationResult)
self.assertIsNotNone(solution.x)
np.testing.assert_almost_equal([0., 1., 0.], solution.x, 3)
self.assertIsNotNone(solution.fval)
np.testing.assert_almost_equal(1., solution.fval, 3)
self.assertIsNotNone(solution.state)
self.assertIsInstance(solution.state, ADMMState)
def test_quad_constraints(self):
"""Simple example to test quadratic constraints."""
mdl = Model('quad-constraints')
v = mdl.binary_var(name='v')
w = mdl.continuous_var(name='w', lb=0.)
mdl.minimize(v + w)
mdl.add_constraint(v + w >= 1, "cons2")
mdl.add_constraint(v ** 2 + w ** 2 <= 1, "cons2")
op = QuadraticProgram()
op.from_docplex(mdl)
admm_params = ADMMParameters(
rho_initial=1001, beta=1000, factor_c=900,
maxiter=100, three_block=True,
)
solver = ADMMOptimizer(params=admm_params)
solution = solver.solve(op)
self.assertIsNotNone(solution)
self.assertIsInstance(solution, ADMMOptimizationResult)
self.assertIsNotNone(solution.x)
np.testing.assert_almost_equal([0., 1.], solution.x, 3)
self.assertIsNotNone(solution.fval)
np.testing.assert_almost_equal(1., solution.fval, 3)
self.assertIsNotNone(solution.state)
self.assertIsInstance(solution.state, ADMMState)
def test_admm_setters_getters(self):
"""Tests get/set properties of ADMMOptimizer"""
optimizer = ADMMOptimizer()
self.assertEqual(optimizer.parameters.maxiter, 10)
optimizer.parameters.maxiter = 11
self.assertEqual(optimizer.parameters.maxiter, 11)
params = ADMMParameters(maxiter=12)
optimizer.parameters = params
self.assertEqual(optimizer.parameters.maxiter, 12)
def test_integer_variables(self):
"""Tests ADMM with integer variables."""
mdl = Model('integer-variables')
v = mdl.integer_var(lb=5, ub=20, name='v')
w = mdl.continuous_var(name='w', lb=0.)
mdl.minimize(v + w)
op = QuadraticProgram()
op.from_docplex(mdl)
solver = ADMMOptimizer()
solution = solver.solve(op)
self.assertIsNotNone(solution)
self.assertIsInstance(solution, ADMMOptimizationResult)
self.assertIsNotNone(solution.x)
np.testing.assert_almost_equal([5., 0.], solution.x, 3)
self.assertIsNotNone(solution.fval)
np.testing.assert_almost_equal(5., solution.fval, 3)
self.assertIsNotNone(solution.state)
self.assertIsInstance(solution.state, ADMMState)
| 36.723404
| 97
| 0.635139
| 1,683
| 13,808
| 5.102198
| 0.123589
| 0.088506
| 0.125772
| 0.039129
| 0.775242
| 0.746943
| 0.718761
| 0.718761
| 0.718761
| 0.712007
| 0
| 0.032806
| 0.249421
| 13,808
| 375
| 98
| 36.821333
| 0.795735
| 0.142454
| 0
| 0.737255
| 0
| 0
| 0.018662
| 0.001978
| 0
| 0
| 0
| 0
| 0.345098
| 1
| 0.043137
| false
| 0
| 0.023529
| 0
| 0.070588
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
bc6f9ea8a79ea234397be9787028eb47b1abe7d0
| 47
|
py
|
Python
|
app/workers/sigma/__init__.py
|
d3vzer0/reternal-backend
|
aeeb613c820759212e7aef9150738a66b2882d50
|
[
"MIT"
] | 6
|
2019-01-01T23:38:12.000Z
|
2021-07-27T03:43:11.000Z
|
app/workers/sigma/__init__.py
|
d3vzer0/reternal-backend
|
aeeb613c820759212e7aef9150738a66b2882d50
|
[
"MIT"
] | 1
|
2020-08-02T00:21:41.000Z
|
2020-08-02T00:21:41.000Z
|
app/workers/sigma/__init__.py
|
d3vzer0/reternal-backend
|
aeeb613c820759212e7aef9150738a66b2882d50
|
[
"MIT"
] | 1
|
2021-07-27T03:43:24.000Z
|
2021-07-27T03:43:24.000Z
|
from app.workers.sigma.tasks.packager import *
| 23.5
| 46
| 0.808511
| 7
| 47
| 5.428571
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.085106
| 47
| 1
| 47
| 47
| 0.883721
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
bc908a8a7212cc3f64bea2da2e87f853c6b98966
| 4,261
|
py
|
Python
|
ceefax/fonts/size4mono/default.py
|
mscroggs/CEEFAX
|
8e7a075de1809064b77360da24ebbbaa409c3bf2
|
[
"MIT"
] | 1
|
2020-03-28T15:53:22.000Z
|
2020-03-28T15:53:22.000Z
|
ceefax/fonts/size4mono/default.py
|
mscroggs/CEEFAX
|
8e7a075de1809064b77360da24ebbbaa409c3bf2
|
[
"MIT"
] | 1
|
2021-02-05T13:43:52.000Z
|
2021-02-05T13:43:52.000Z
|
ceefax/fonts/size4mono/default.py
|
mscroggs/CEEFAX
|
8e7a075de1809064b77360da24ebbbaa409c3bf2
|
[
"MIT"
] | null | null | null |
from ceefax.fonts.font import Font
letters = {"|": "x\n"
"x\n"
"x\n"
"x",
"@": "',,,, x\n"
" x x x\n"
" x x\n"
",'''''x",
"A": "xx''xxx\n"
"x xx xx\n"
"x ,, xx\n"
"x,xx,xx",
"B": "x'''xxx\n"
"x '',xx\n"
"x xx xx\n"
"x,,,xxx",
"C": "xx''xxx\n"
"x xx,xx\n"
"x xx'xx\n"
"xx,,xxx",
"D": "x'''xxx\n"
"x xx xx\n"
"x xx xx\n"
"x,,,xxx",
"E": "x''''xx\n"
"x ''xxx\n"
"x xxxxx\n"
"x,,,,xx",
"F": "x''''xx\n"
"x ''xxx\n"
"x xxxxx\n"
"x,xxxxx",
"G": "xx''xxx\n"
"x xx,xx\n"
"x x, xx\n"
"xx,,xxx",
"H": "x'xx'xx\n"
"x '' xx\n"
"x xx xx\n"
"x,xx,xx",
"I": "x''''xx\n"
"xx xxx\n"
"xx xxx\n"
"x,,,,xx",
"J": "xxxx'xx\n"
"xxxx xx\n"
"x'xx xx\n"
"xx,,xxx",
"K": "x'xx'xx\n"
"x ',xxx\n"
"x x,'xx\n"
"x,xx,xx",
"L": "x'xxxxx\n"
"x xxxxx\n"
"x xxxxx\n"
"x,,,,xx",
"M": "'xxxx'x\n"
" ,'', x\n"
" xxxx x\n"
",xxxx,x",
"N": "x'xx'xx\n"
"x ,' xx\n"
"x xx xx\n"
"x,xx,xx",
"O": "xx''xxx\n"
"x xx xx\n"
"x xx xx\n"
"xx,,xxx",
"P": "x'''xxx\n"
"x xx xx\n"
"x ,,xxx\n"
"x,xxxxx",
"Q": "xx''xxx\n"
"x xx xx\n"
"x x' xx\n"
"xx,,,xx",
"R": "x'''xxx\n"
"x xx xx\n"
"x ,,'xx\n"
"x,xx,xx",
"S": "xx'''xx\n"
"x,''xxx\n"
"xxxx xx\n"
"x,,,xxx",
"T": "''''''x\n"
"xx xxx\n"
"xx xxx\n"
"xx,,xxx",
"U": "x'xx'xx\n"
"x xx xx\n"
"x xx xx\n"
"xx,,xxx",
"V": "x'xxx'x\n"
"x xxx x\n"
"x x',xx\n"
"xx,xxxx",
"W": "'xxxx'x\n"
" xxxx x\n"
" ',,' x\n"
",xxxx,x",
"X": "x'xx'xx\n"
"x,'',xx\n"
"x',,'xx\n"
"x,xx,xx",
"Y": "x'xx'xx\n"
"x,'' xx\n"
"xxx',xx\n"
"xx,xxxx",
"Z": "x''''xx\n"
"xxx',xx\n"
"x',xxxx\n"
"x,,,,xx",
"1": "xxx'xxx\n"
"xx, xxx\n"
"xxx xxx\n"
"xxx,xxx",
"2": "xx''xxx\n"
"x,x',xx\n"
"x',xxxx\n"
"x,,,,xx",
"3": "xx''xxx\n"
"x,x',xx\n"
"x'xx xx\n"
"xx,,xxx",
"4": "x'xxxxx\n"
"x x'xxx\n"
"x ' 'xx\n"
"xxx,xxx",
"5": "x''''xx\n"
"x ''xxx\n"
"x'xx xx\n"
"xx,,xxx",
"6": "xx''xxx\n"
"x ''xxx\n"
"x xx xx\n"
"xx,,xxx",
"7": "x''''xx\n"
"xxxx xx\n"
"xx',xxx\n"
"xx,xxxx",
"8": "xx''xxx\n"
"x,'',xx\n"
"x xx xx\n"
"xx,,xxx",
"9": "xx''xxx\n"
"x xx xx\n"
"xx,, xx\n"
"xx,,xxx",
"0": "xx''xxx\n"
"x xx xx\n"
"x xx xx\n"
"xx,,xxx"}
size4monofont = Font(letters)
| 27.140127
| 34
| 0.207228
| 501
| 4,261
| 1.762475
| 0.093812
| 0.1812
| 0.208381
| 0.210646
| 0.82786
| 0.750849
| 0.660249
| 0.503964
| 0.431484
| 0.328426
| 0
| 0.006377
| 0.595165
| 4,261
| 156
| 35
| 27.314103
| 0.505507
| 0
| 0
| 0.571429
| 0
| 0
| 0.306501
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.006494
| 0
| 0.006494
| 0
| 0
| 0
| 1
| null | 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
bcad46e50f0d306dd12a79b326b57bcdf2bbd02a
| 11,836
|
py
|
Python
|
src/mlalgms/pairwisemodel.py
|
sandeepbhojwani/foremast-brain
|
b083ea08c0506517ede8501b9ad44408e89afdc6
|
[
"Apache-2.0"
] | null | null | null |
src/mlalgms/pairwisemodel.py
|
sandeepbhojwani/foremast-brain
|
b083ea08c0506517ede8501b9ad44408e89afdc6
|
[
"Apache-2.0"
] | null | null | null |
src/mlalgms/pairwisemodel.py
|
sandeepbhojwani/foremast-brain
|
b083ea08c0506517ede8501b9ad44408e89afdc6
|
[
"Apache-2.0"
] | null | null | null |
from scipy.stats import mannwhitneyu
#, wilcoxon,kruskal,friedmanchisquare
from metadata.globalconfig import globalconfig
from mlalgms.statsmodel import IS_UPPER_BOUND,IS_LOWER_BOUND #,IS_UPPER_O_LOWER_BOUND
import warnings
warnings.filterwarnings('ignore')
########################################
#
# This package could use for
# canary deploy
#
########################################
MANN_WHITE = "mannwhitneyu"
#WILCOXON = "wilcoxon"
#KRUSKAL = "kruskal"
#FRIED_MANCHI_SQUARE = "friedmanchisquare"
#ALL = "all"
ANY = "any"
ERROR = "error"
MANN_WHITE_MIN_DATA_POINT =20
WILCOXON_MIN_DATA_POINTS =20
KRUSKAL_MIN_DATA_POINTS = 5
DEFAULT_PAIRWISE_THRESHOLD = 0.05
config = globalconfig()
def TwoDataSetSameDistribution(dataset1, dataset2, alpha=DEFAULT_PAIRWISE_THRESHOLD, algorithm=ANY, bound= IS_UPPER_BOUND):
size = min(len(dataset1),len(dataset2))
p = 0
try:
if (bound== IS_UPPER_BOUND):
stat, p = mannwhitneyu(dataset1, dataset2,True, 'greater')
elif bound == IS_LOWER_BOUND :
stat, p = mannwhitneyu(dataset1, dataset2,True, 'less')
else:
stat, p = mannwhitneyu(dataset1, dataset2,True, 'two-sided')
if p >= alpha:
return True, p , MANN_WHITE, size>=config.getValueByKey("MIN_MANN_WHITE_DATA_POINTS", MANN_WHITE_MIN_DATA_POINT)
else:
return False, p, MANN_WHITE, size>=config.getValueByKey("MIN_MANN_WHITE_DATA_POINTS", MANN_WHITE_MIN_DATA_POINT)
except Exception as e:
if str(e) == "All numbers are identical in mannwhitneyu" :
return True, 0, MANN_WHITE, size>=config.getValueByKey("MIN_MANN_WHITE_DATA_POINTS", MANN_WHITE_MIN_DATA_POINT)
return True, 0, ERROR, size>=config.getValueByKey("MIN_MANN_WHITE_DATA_POINTS", MANN_WHITE_MIN_DATA_POINT)
'''
def TwoDataSetSameDistribution(dataset1, dataset2, alpha=DEFAULT_PAIRWISE_THRESHOLD, algorithm=ANY, bound= IS_UPPER_BOUND):
size = min(len(dataset1),len(dataset2))
p = 0
if algorithm == WILCOXON:
try:
stat, p = wilcoxon(dataset1, dataset2,"pratt", True)
if p >= alpha:
return True, p, WILCOXON ,size>=config.getValueByKey("MIN_WILCOXON_DATA_POINTS", MANN_WHITE_MIN_DATA_POINT)
else:
return False, p, WILCOXON , size>=config.getValueByKey("MIN_WILCOXON_DATA_POINTS", WILCOXON_MIN_DATA_POINTS)
except Exception as e:
try:
if (bound== IS_UPPER_BOUND):
stat, p = mannwhitneyu(dataset1, dataset2,True, 'greater')
elif bound == IS_LOWER_BOUND :
stat, p = mannwhitneyu(dataset1, dataset2,True, 'less')
else:
stat, p = mannwhitneyu(dataset1, dataset2,True, 'two-sided')
if p >= alpha:
return True, p , MANN_WHITE , size>=config.getValueByKey("MIN_MANN_WHITE_DATA_POINTS", MANN_WHITE_MIN_DATA_POINT)
else:
return False, p, MANN_WHITE , size>=config.getValueByKey("MIN_MANN_WHITE_DATA_POINTS", MANN_WHITE_MIN_DATA_POINT)
except Exception as e:
return True, 0, ERROR , size>=config.getValueByKey("MIN_MANN_WHITE_DATA_POINTS", MANN_WHITE_MIN_DATA_POINT)
elif algorithm == KRUSKAL:
try:
stat, p = kruskal(dataset1, dataset2)
if p >= alpha:
return True, p,KRUSKAL , size>=config.getValueByKey("MIN_KRUSKAL_DATA_POINTS", KRUSKAL_MIN_DATA_POINTS)
else:
return False, p,KRUSKAL , size>=config.getValueByKey("MIN_KRUSKAL_DATA_POINTS", KRUSKAL_MIN_DATA_POINTS)
except Exception as e:
try:
if (bound== IS_UPPER_BOUND):
stat, p = mannwhitneyu(dataset1, dataset2,True, 'greater')
elif bound == IS_LOWER_BOUND :
stat, p = mannwhitneyu(dataset1, dataset2,True, 'less')
else:
stat, p = mannwhitneyu(dataset1, dataset2,True, 'two-sided')
if p >= alpha:
return True, p , MANN_WHITE , size>=config.getValueByKey("MIN_MANN_WHITE_DATA_POINTS", MANN_WHITE_MIN_DATA_POINT)
else:
return False, p, MANN_WHITE , size>=config.getValueByKey("MIN_MANN_WHITE_DATA_POINTS", MANN_WHITE_MIN_DATA_POINT)
except Exception as e:
return True, 0, ERROR , size>=config.getValueByKey("MIN_MANN_WHITE_DATA_POINTS", MANN_WHITE_MIN_DATA_POINT)
elif algorithm == ALL:
try:
if (bound== IS_UPPER_BOUND):
stat, p = mannwhitneyu(dataset1, dataset2,True, 'greater')
elif bound == IS_LOWER_BOUND :
stat, p = mannwhitneyu(dataset1, dataset2,True, 'less')
else:
stat, p = mannwhitneyu(dataset1, dataset2,True, 'two-sided')
if p >= alpha:
stat, p = wilcoxon(dataset1, dataset2,"pratt", True)
if p >= alpha:
stat, p = kruskal(dataset1, dataset2)
if p >= alpha:
return True, p, ALL , size>=config.getValueByKey("MIN_KRUSKAL_DATA_POINTS", KRUSKAL_MIN_DATA_POINTS)
else:
return False, p, KRUSKAL , size>=config.getValueByKey("MIN_KRUSKAL_DATA_POINTS", KRUSKAL_MIN_DATA_POINTS)
else:
return False, p, WILCOXON , size>=config.getValueByKey("MIN_WILCOXON_DATA_POINTS", WILCOXON_MIN_DATA_POINTS)
else:
return False, p, MANN_WHITE , size>=config.getValueByKey("MIN_MANN_WHITE_DATA_POINTS", MANN_WHITE_MIN_DATA_POINT)
except Exception as e:
try:
if (bound== IS_UPPER_BOUND):
stat, p = mannwhitneyu(dataset1, dataset2,True, 'greater')
elif bound == IS_LOWER_BOUND :
stat, p = mannwhitneyu(dataset1, dataset2,True, 'less')
else:
stat, p = mannwhitneyu(dataset1, dataset2,True, 'two-sided')
if p >= alpha:
return True, p , MANN_WHITE , size>=config.getValueByKey("MIN_MANN_WHITE_DATA_POINTS", MANN_WHITE_MIN_DATA_POINT)
else:
return False, p, MANN_WHITE, size>=config.getValueByKey("MIN_MANN_WHITE_DATA_POINTS", MANN_WHITE_MIN_DATA_POINT)
except Exception as e:
return True, 0, ERROR , MANN_WHITE, size>=config.getValueByKey("MIN_MANN_WHITE_DATA_POINTS", MANN_WHITE_MIN_DATA_POINT)
elif algorithm == ANY:
try:
if (bound== IS_UPPER_BOUND):
stat, p = mannwhitneyu(dataset1, dataset2,True, 'greater')
elif bound == IS_LOWER_BOUND :
stat, p = mannwhitneyu(dataset1, dataset2,True, 'less')
else:
stat, p = mannwhitneyu(dataset1, dataset2,True, 'two-sided')
if p >= alpha:
return True, p, MANN_WHITE , size>=config.getValueByKey("MIN_MANN_WHITE_DATA_POINTS", MANN_WHITE_MIN_DATA_POINT)
stat, p = wilcoxon(dataset1, dataset2,"pratt", True)
if p >= alpha:
return True, p, WILCOXON, size>=config.getValueByKey("MIN_WILCOXON_DATA_POINTS", WILCOXON_MIN_DATA_POINTS)
stat, p = kruskal(dataset1, dataset2)
if p >= alpha:
return True, p, KRUSKAL, size>=config.getValueByKey("MIN_KRUSKAL_DATA_POINTS", KRUSKAL_MIN_DATA_POINTS)
return False, p, ANY, size >= config.getValueByKey("MIN_KRUSKAL_DATA_POINTS", KRUSKAL_MIN_DATA_POINTS)
except Exception as e:
try:
if (bound== IS_UPPER_BOUND):
stat, p = mannwhitneyu(dataset1, dataset2,True, 'greater')
elif bound == IS_LOWER_BOUND :
stat, p = mannwhitneyu(dataset1, dataset2,True, 'less')
else:
stat, p = mannwhitneyu(dataset1, dataset2,True, 'two-sided')
if p >= alpha:
return True, p , MANN_WHITE , size>=config.getValueByKey("MIN_MANN_WHITE_DATA_POINTS", MANN_WHITE_MIN_DATA_POINT)
else:
return False, p, MANN_WHITE, size>=config.getValueByKey("MIN_MANN_WHITE_DATA_POINTS", MANN_WHITE_MIN_DATA_POINT)
except Exception as e:
return True, 0, ERROR , size>=config.getValueByKey("MIN_MANN_WHITE_DATA_POINTS", MANN_WHITE_MIN_DATA_POINT)
else:
try:
if (bound== IS_UPPER_BOUND):
stat, p = mannwhitneyu(dataset1, dataset2,True, 'greater')
elif bound == IS_LOWER_BOUND :
stat, p = mannwhitneyu(dataset1, dataset2,True, 'less')
else:
stat, p = mannwhitneyu(dataset1, dataset2,True, 'two-sided')
if p >= alpha:
return True, p , MANN_WHITE, size>=config.getValueByKey("MIN_MANN_WHITE_DATA_POINTS", MANN_WHITE_MIN_DATA_POINT)
else:
return False, p, MANN_WHITE, size>=config.getValueByKey("MIN_MANN_WHITE_DATA_POINTS", MANN_WHITE_MIN_DATA_POINT)
except Exception as e:
if str(e) == "All numbers are identical in mannwhitneyu" :
return True, 0, MANN_WHITE, size>=config.getValueByKey("MIN_MANN_WHITE_DATA_POINTS", MANN_WHITE_MIN_DATA_POINT)
return True, 0, ERROR, size>=config.getValueByKey("MIN_MANN_WHITE_DATA_POINTS", MANN_WHITE_MIN_DATA_POINT)
def MultipleDataSetSameDistribution(list, alpha = DEFAULT_PAIRWISE_THRESHOLD, algorithm=KRUSKAL):
stat=0
p=0
length = len(list)
size = 0
if algorithm == KRUSKAL:
if length==3:
size = min(len(list[0]), len(list[1]), len(list[2]))
stat, p = kruskal(list[0], list[1], list[2])
elif length==4:
size = min(len(list[0]), len(list[1]), len(list[2]), len(list[3]))
stat, p = kruskal(list[0], list[1], list[2],list[3])
elif length==5:
size = min(len(list[0]), len(list[1]), len(list[2]), len(list[3]), len(list[4]))
stat, p = kruskal(list[0], list[1], list[2],list[3],list[4])
elif length==6:
size = min(len(list[0]), len(list[1]), len(list[2]), len(list[3]), len(list[4]),len(list[5]))
stat, p = kruskal(list[0], list[1], list[2],list[3],list[4],list[5])
else:
size = minSize(list)
stat, p = kruskal(*list)
if p >= alpha:
return True, p, KRUSKAL, size>=config.getValueByKey("MIN_KRUSKAL_DATA_POINTS")
else:
return False, p, KRUSKAL, size>=config.getValueByKey("MIN_KRUSKAL_DATA_POINTS")
elif algorithm == FRIED_MANCHI_SQUARE:
if length==3:
stat, p =friedmanchisquare(list[0], list[1], list[2])
elif length==4:
stat, p = friedmanchisquare(list[0], list[1], list[2],list[3])
elif length==5:
stat, p = friedmanchisquare(list[0], list[1], list[2],list[3],list[4])
elif length==6:
stat, p = friedmanchisquare(list[0], list[1], list[2],list[3],list[4],list[5])
else:
stat, p = friedmanchisquare(*list)
if p >= alpha:
return True, p,FRIED_MANCHI_SQUARE,True
else:
return False,p,FRIED_MANCHI_SQUARE,True
return True, p, ERROR,True
def minSize(list):
min = 0
for data in list:
if min > len(data):
min = len(data)
return min
def MultipleDataSetSameDistribution(*dataset,algorithm=KRUSKAL, alpha = DEFAULT_PAIRWISE_THRESHOLD):
if algorithm == FRIED_MANCHI_SQUARE:
stat, p = friedmanchisquare(*dataset)
if p >= alpha:
return True,p
else:
return False,p
else:
stat, p = kruskal(*dataset)
if p >= alpha:
return True,p
else:
return False,p
return False,p
'''
| 46.968254
| 142
| 0.61051
| 1,435
| 11,836
| 4.802091
| 0.062021
| 0.083587
| 0.113481
| 0.128283
| 0.861268
| 0.846466
| 0.846466
| 0.842548
| 0.842548
| 0.830068
| 0
| 0.017747
| 0.27636
| 11,836
| 251
| 143
| 47.155378
| 0.786807
| 0.01656
| 0
| 0.064516
| 0
| 0
| 0.125328
| 0.068241
| 0
| 0
| 0
| 0
| 0
| 1
| 0.032258
| false
| 0
| 0.129032
| 0
| 0.290323
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
bcee98c38935c24b509fbd4444e11d4758ed75ce
| 49
|
py
|
Python
|
Chapter_I/hello.py
|
dragos-constantin-stoica/python_book
|
3b9cab74f7220f872d5c618c6f64fe3a43da3134
|
[
"Apache-2.0"
] | null | null | null |
Chapter_I/hello.py
|
dragos-constantin-stoica/python_book
|
3b9cab74f7220f872d5c618c6f64fe3a43da3134
|
[
"Apache-2.0"
] | null | null | null |
Chapter_I/hello.py
|
dragos-constantin-stoica/python_book
|
3b9cab74f7220f872d5c618c6f64fe3a43da3134
|
[
"Apache-2.0"
] | null | null | null |
print ("Hello world from my 1st Python program!")
| 49
| 49
| 0.755102
| 8
| 49
| 4.625
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.02381
| 0.142857
| 49
| 1
| 49
| 49
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0.78
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
bcfc18f2e97b33cadcba0d20a4d6bc9247df56eb
| 50,723
|
py
|
Python
|
src/main.py
|
Ethan-Francolla/Wordle-Solver
|
85c68530d3d6f8a2aaec7523b411be2d51190a38
|
[
"MIT"
] | 6
|
2022-01-24T13:51:14.000Z
|
2022-03-02T12:39:24.000Z
|
src/main.py
|
Ethan-Francolla/Wordle-Solver
|
85c68530d3d6f8a2aaec7523b411be2d51190a38
|
[
"MIT"
] | 6
|
2022-03-05T18:00:50.000Z
|
2022-03-07T23:33:13.000Z
|
src/main.py
|
Ethan-Francolla/Wordle-Analyzer
|
85c68530d3d6f8a2aaec7523b411be2d51190a38
|
[
"MIT"
] | null | null | null |
'''
Credit to dwyl for providing the database of all English words
https://github.com/dwyl/english-words
'''
# Import necessary libraries and modules
import random as rand # Used for random number generation
import word_list_generation as word_list_generation # Turn the words databases into lists of lists
import welcome as welcome # Module used to welcome the user to the program
import frequency_analysis as fqan # Import a module used for frequency analysis
# Welcome the user to the game,get their mode selection, difficulty, and run the necessary setup functions
welcome.print_instructions()
welcome.mode_selection()
welcome.game_difficulty()
word_list_generation.generate_deduction_lists()
word_list_generation.get_5_letter_words_list_all_inputs()
fqan.generate_highest_net_frequency_word()
from welcome import mode_selection as mode_selection
from welcome import game_difficulty as game_difficulty
from word_list_generation import unguessed_letters_list
from word_list_generation import known_letters_list
from word_list_generation import five_letter_words_list_all_inputs
from word_list_generation import possible_char_1_list
from word_list_generation import possible_char_2_list
from word_list_generation import possible_char_3_list
from word_list_generation import possible_char_4_list
from word_list_generation import possible_char_5_list
from frequency_analysis import highest_net_frequency_word
# Load in the proper data depending on which mode the user selected
original_five_letter_words_list = []
five_letter_words_list = []
for word in five_letter_words_list_all_inputs:
original_five_letter_words_list.append(word)
five_letter_words_list.append(word)
# Define a function that processes the first word
def process_first_word(first_word):
# Tell the user what the first word they should enter into wordle
print("Enter the word " + first_word.upper() + " as the your first guess.")
print("\nPlease enter the corresponding data for each letter below:")
# Assign variables for each of the characters in the first word and remove them from the unguessed letters list
global word_1_char_1,word_1_char_2,word_1_char_3,word_1_char_4,word_1_char_5
first_word_char_list = list(first_word)
word_1_char_1 = first_word_char_list[0]
word_1_char_2 = first_word_char_list[1]
word_1_char_3 = first_word_char_list[2]
word_1_char_4 = first_word_char_list[3]
word_1_char_5 = first_word_char_list[4]
if word_1_char_1 in unguessed_letters_list:
unguessed_letters_list.remove(word_1_char_1)
if word_1_char_2 in unguessed_letters_list:
unguessed_letters_list.remove(word_1_char_2)
if word_1_char_3 in unguessed_letters_list:
unguessed_letters_list.remove(word_1_char_3)
if word_1_char_4 in unguessed_letters_list:
unguessed_letters_list.remove(word_1_char_4)
if word_1_char_5 in unguessed_letters_list:
unguessed_letters_list.remove(word_1_char_5)
# Get the resulting data of the first word's first character and don't let users enter bad inputs
while True:
word_1_char_1_data = str(input(word_1_char_1.upper() +": "))
if word_1_char_1_data == 'g' or word_1_char_1_data == 'y' or word_1_char_1_data == 'gr':
break
while True:
word_1_char_2_data = str(input(word_1_char_2.upper() + ": "))
if word_1_char_2_data == 'g' or word_1_char_2_data == 'y' or word_1_char_2_data == 'gr':
break
while True:
word_1_char_3_data = str(input(word_1_char_3.upper() + ": "))
if word_1_char_3_data == 'g' or word_1_char_3_data == 'y' or word_1_char_3_data == 'gr':
break
while True:
word_1_char_4_data = str(input(word_1_char_4.upper() + ": "))
if word_1_char_4_data == 'g' or word_1_char_4_data == 'y' or word_1_char_4_data == 'gr':
break
while True:
word_1_char_5_data = str(input(word_1_char_5.upper() + ": "))
if word_1_char_5_data == 'g' or word_1_char_5_data == 'y' or word_1_char_5_data == 'gr':
break
print("processing...")
# Eliminate letter possibilites based on the user's inputted data
if word_1_char_1_data == 'g':
if word_1_char_1 in possible_char_1_list:
possible_char_1_list.remove(word_1_char_1)
if word_1_char_1 in possible_char_2_list:
possible_char_2_list.remove(word_1_char_1)
if word_1_char_1 in possible_char_3_list:
possible_char_3_list.remove(word_1_char_1)
if word_1_char_1 in possible_char_4_list:
possible_char_4_list.remove(word_1_char_1)
if word_1_char_1 in possible_char_5_list:
possible_char_5_list.remove(word_1_char_1)
if word_1_char_1_data == 'y':
if word_1_char_1 in possible_char_1_list:
possible_char_1_list.remove(word_1_char_1)
if word_1_char_1 not in known_letters_list:
known_letters_list.append(word_1_char_1)
if word_1_char_1_data == 'gr':
possible_char_1_list.clear()
possible_char_1_list.append(word_1_char_1)
if word_1_char_1 not in known_letters_list:
known_letters_list.append(word_1_char_1)
if word_1_char_2_data == 'g':
if word_1_char_2 in possible_char_1_list:
possible_char_1_list.remove(word_1_char_2)
if word_1_char_2 in possible_char_2_list:
possible_char_2_list.remove(word_1_char_2)
if word_1_char_2 in possible_char_3_list:
possible_char_3_list.remove(word_1_char_2)
if word_1_char_2 in possible_char_4_list:
possible_char_4_list.remove(word_1_char_2)
if word_1_char_2 in possible_char_5_list:
possible_char_5_list.remove(word_1_char_2)
if word_1_char_2_data == 'y':
if word_1_char_2 in possible_char_2_list:
possible_char_2_list.remove(word_1_char_2)
if word_1_char_2 not in known_letters_list:
known_letters_list.append(word_1_char_2)
if word_1_char_2_data == 'gr':
possible_char_2_list.clear()
possible_char_2_list.append(word_1_char_2)
if word_1_char_2 not in known_letters_list:
known_letters_list.append(word_1_char_2)
if word_1_char_3_data == 'g':
if word_1_char_3 in possible_char_1_list:
possible_char_1_list.remove(word_1_char_3)
if word_1_char_3 in possible_char_2_list:
possible_char_2_list.remove(word_1_char_3)
if word_1_char_3 in possible_char_3_list:
possible_char_3_list.remove(word_1_char_3)
if word_1_char_3 in possible_char_4_list:
possible_char_4_list.remove(word_1_char_3)
if word_1_char_3 in possible_char_5_list:
possible_char_5_list.remove(word_1_char_3)
if word_1_char_3_data == 'y':
if word_1_char_3 in possible_char_3_list:
possible_char_3_list.remove(word_1_char_3)
if word_1_char_3 not in known_letters_list:
known_letters_list.append(word_1_char_3)
if word_1_char_3_data == 'gr':
possible_char_3_list.clear()
possible_char_3_list.append(word_1_char_3)
if word_1_char_3 not in known_letters_list:
known_letters_list.append(word_1_char_3)
if word_1_char_4_data == 'g':
if word_1_char_4 in possible_char_1_list:
possible_char_1_list.remove(word_1_char_4)
if word_1_char_4 in possible_char_2_list:
possible_char_2_list.remove(word_1_char_4)
if word_1_char_4 in possible_char_3_list:
possible_char_3_list.remove(word_1_char_4)
if word_1_char_4 in possible_char_4_list:
possible_char_4_list.remove(word_1_char_4)
if word_1_char_4 in possible_char_5_list:
possible_char_5_list.remove(word_1_char_4)
if word_1_char_4_data == 'y':
if word_1_char_4 in possible_char_4_list:
possible_char_4_list.remove(word_1_char_4)
if word_1_char_4 not in known_letters_list:
known_letters_list.append(word_1_char_4)
if word_1_char_4_data == 'gr':
possible_char_4_list.clear()
possible_char_4_list.append(word_1_char_4)
if word_1_char_4 not in known_letters_list:
known_letters_list.append(word_1_char_4)
if word_1_char_5_data == 'g':
if word_1_char_5 in possible_char_1_list:
possible_char_1_list.remove(word_1_char_5)
if word_1_char_5 in possible_char_2_list:
possible_char_2_list.remove(word_1_char_5)
if word_1_char_5 in possible_char_3_list:
possible_char_3_list.remove(word_1_char_5)
if word_1_char_5 in possible_char_4_list:
possible_char_4_list.remove(word_1_char_5)
if word_1_char_5 in possible_char_5_list:
possible_char_5_list.remove(word_1_char_5)
if word_1_char_5_data == 'y':
if word_1_char_5 in possible_char_5_list:
possible_char_5_list.remove(word_1_char_5)
if word_1_char_5 not in known_letters_list:
known_letters_list.append(word_1_char_5)
if word_1_char_5_data == 'gr':
possible_char_5_list.clear()
possible_char_5_list.append(word_1_char_5)
if word_1_char_5 not in known_letters_list:
known_letters_list.append(word_1_char_5)
# Remove words from possibilites based on the user's inputted data
for i in range(20): # Repeat the algorithm several times because the remove function can only remove one word at a time
# Remove words with known impossible letters in a specific location
for word in five_letter_words_list:
if word[0] not in possible_char_1_list:
five_letter_words_list.remove(word)
for word in five_letter_words_list:
if word[1] not in possible_char_2_list:
five_letter_words_list.remove(word)
for word in five_letter_words_list:
if word[2] not in possible_char_3_list:
five_letter_words_list.remove(word)
for word in five_letter_words_list:
if word[3] not in possible_char_4_list:
five_letter_words_list.remove(word)
for word in five_letter_words_list:
if word[4] not in possible_char_5_list:
five_letter_words_list.remove(word)
# Remove words without letters that are known to be in the word
for word in five_letter_words_list:
for letter in known_letters_list:
if letter not in word:
if word in five_letter_words_list:
five_letter_words_list.remove(word)
# Select the next word to be guessed
select_next_guess(mode_selection)
# Define a function that processes the second word
def process_second_word(next_guess):
# Tell the user what the first word they should enter into wordle
print("Enter the word " + next_guess.upper() + " as the your second guess.")
print("\nPlease enter the corresponding data for each letter below:")
# Assign variables for each of the characters in the first word and remove them from the unguessed letters list
global word_2_char_1,word_2_char_2,word_2_char_3,word_2_char_4,word_2_char_5
next_guess_char_list = list(next_guess)
word_2_char_1 = next_guess_char_list[0]
word_2_char_2 = next_guess_char_list[1]
word_2_char_3 = next_guess_char_list[2]
word_2_char_4 = next_guess_char_list[3]
word_2_char_5 = next_guess_char_list[4]
if word_2_char_1 in unguessed_letters_list:
unguessed_letters_list.remove(word_2_char_1)
if word_2_char_2 in unguessed_letters_list:
unguessed_letters_list.remove(word_2_char_2)
if word_2_char_3 in unguessed_letters_list:
unguessed_letters_list.remove(word_2_char_3)
if word_2_char_4 in unguessed_letters_list:
unguessed_letters_list.remove(word_2_char_4)
if word_2_char_5 in unguessed_letters_list:
unguessed_letters_list.remove(word_2_char_5)
# Get the resulting data of the first word's first character and don't let users enter bad inputs
while True:
word_2_char_1_data = str(input(word_2_char_1.upper() +": "))
if word_2_char_1_data == 'g' or word_2_char_1_data == 'y' or word_2_char_1_data == 'gr':
break
while True:
word_2_char_2_data = str(input(word_2_char_2.upper() + ": "))
if word_2_char_2_data == 'g' or word_2_char_2_data == 'y' or word_2_char_2_data == 'gr':
break
while True:
word_2_char_3_data = str(input(word_2_char_3.upper() + ": "))
if word_2_char_3_data == 'g' or word_2_char_3_data == 'y' or word_2_char_3_data == 'gr':
break
while True:
word_2_char_4_data = str(input(word_2_char_4.upper() + ": "))
if word_2_char_4_data == 'g' or word_2_char_4_data == 'y' or word_2_char_4_data == 'gr':
break
while True:
word_2_char_5_data = str(input(word_2_char_5.upper() + ": "))
if word_2_char_5_data == 'g' or word_2_char_5_data == 'y' or word_2_char_5_data == 'gr':
break
print("processing...")
# Eliminate letter possibilites based on the user's inputted data
if word_2_char_1_data == 'g':
if word_2_char_1 in possible_char_1_list:
possible_char_1_list.remove(word_2_char_1)
if word_2_char_1 in possible_char_2_list:
possible_char_2_list.remove(word_2_char_1)
if word_2_char_1 in possible_char_3_list:
possible_char_3_list.remove(word_2_char_1)
if word_2_char_1 in possible_char_4_list:
possible_char_4_list.remove(word_2_char_1)
if word_2_char_1 in possible_char_5_list:
possible_char_5_list.remove(word_2_char_1)
if word_2_char_1_data == 'y':
if word_2_char_1 in possible_char_1_list:
possible_char_1_list.remove(word_2_char_1)
if word_2_char_1 not in known_letters_list:
known_letters_list.append(word_2_char_1)
if word_2_char_1_data == 'gr':
possible_char_1_list.clear()
possible_char_1_list.append(word_2_char_1)
if word_2_char_1 not in known_letters_list:
known_letters_list.append(word_2_char_1)
if word_2_char_2_data == 'g':
if word_2_char_2 in possible_char_1_list:
possible_char_1_list.remove(word_2_char_2)
if word_2_char_2 in possible_char_2_list:
possible_char_2_list.remove(word_2_char_2)
if word_2_char_2 in possible_char_3_list:
possible_char_3_list.remove(word_2_char_2)
if word_2_char_2 in possible_char_4_list:
possible_char_4_list.remove(word_2_char_2)
if word_2_char_2 in possible_char_5_list:
possible_char_5_list.remove(word_2_char_2)
if word_2_char_2_data == 'y':
if word_2_char_2 in possible_char_2_list:
possible_char_2_list.remove(word_2_char_2)
if word_2_char_2 not in known_letters_list:
known_letters_list.append(word_2_char_2)
if word_2_char_2_data == 'gr':
possible_char_2_list.clear()
possible_char_2_list.append(word_2_char_2)
if word_2_char_2 not in known_letters_list:
known_letters_list.append(word_2_char_2)
if word_2_char_3_data == 'g':
if word_2_char_3 in possible_char_1_list:
possible_char_1_list.remove(word_2_char_3)
if word_2_char_3 in possible_char_2_list:
possible_char_2_list.remove(word_2_char_3)
if word_2_char_3 in possible_char_3_list:
possible_char_3_list.remove(word_2_char_3)
if word_2_char_3 in possible_char_4_list:
possible_char_4_list.remove(word_2_char_3)
if word_2_char_3 in possible_char_5_list:
possible_char_5_list.remove(word_2_char_3)
if word_2_char_3_data == 'y':
if word_2_char_3 in possible_char_3_list:
possible_char_3_list.remove(word_2_char_3)
if word_2_char_3 not in known_letters_list:
known_letters_list.append(word_2_char_3)
if word_2_char_3_data == 'gr':
possible_char_3_list.clear()
possible_char_3_list.append(word_2_char_3)
if word_2_char_3 not in known_letters_list:
known_letters_list.append(word_2_char_3)
if word_2_char_4_data == 'g':
if word_2_char_4 in possible_char_1_list:
possible_char_1_list.remove(word_2_char_4)
if word_2_char_4 in possible_char_2_list:
possible_char_2_list.remove(word_2_char_4)
if word_2_char_4 in possible_char_3_list:
possible_char_3_list.remove(word_2_char_4)
if word_2_char_4 in possible_char_4_list:
possible_char_4_list.remove(word_2_char_4)
if word_2_char_4 in possible_char_5_list:
possible_char_5_list.remove(word_2_char_4)
if word_2_char_4_data == 'y':
if word_2_char_4 in possible_char_4_list:
possible_char_4_list.remove(word_2_char_4)
if word_2_char_4 not in known_letters_list:
known_letters_list.append(word_2_char_4)
if word_2_char_4_data == 'gr':
possible_char_4_list.clear()
possible_char_4_list.append(word_2_char_4)
if word_2_char_4 not in known_letters_list:
known_letters_list.append(word_2_char_4)
if word_2_char_5_data == 'g':
if word_2_char_5 in possible_char_1_list:
possible_char_1_list.remove(word_2_char_5)
if word_2_char_5 in possible_char_2_list:
possible_char_2_list.remove(word_2_char_5)
if word_2_char_5 in possible_char_3_list:
possible_char_3_list.remove(word_2_char_5)
if word_2_char_5 in possible_char_4_list:
possible_char_4_list.remove(word_2_char_5)
if word_2_char_5 in possible_char_5_list:
possible_char_5_list.remove(word_2_char_5)
if word_2_char_5_data == 'y':
if word_2_char_5 in possible_char_5_list:
possible_char_5_list.remove(word_2_char_5)
if word_2_char_5 not in known_letters_list:
known_letters_list.append(word_2_char_5)
if word_2_char_5_data == 'gr':
possible_char_5_list.clear()
possible_char_5_list.append(word_2_char_5)
if word_2_char_5 not in known_letters_list:
known_letters_list.append(word_2_char_5)
# Remove words from possibilites based on the user's inputted data
for i in range(20):
# Remove words with known impossible letters in a specific location
for word in five_letter_words_list:
if word[0] not in possible_char_1_list:
five_letter_words_list.remove(word)
for word in five_letter_words_list:
if word[1] not in possible_char_2_list:
five_letter_words_list.remove(word)
for word in five_letter_words_list:
if word[2] not in possible_char_3_list:
five_letter_words_list.remove(word)
for word in five_letter_words_list:
if word[3] not in possible_char_4_list:
five_letter_words_list.remove(word)
for word in five_letter_words_list:
if word[4] not in possible_char_5_list:
five_letter_words_list.remove(word)
# Remove words without letters that are known to be in the word
for word in five_letter_words_list:
for letter in known_letters_list:
if letter not in word:
if word in five_letter_words_list:
five_letter_words_list.remove(word)
# Select the next word to be guessed
select_next_guess(mode_selection)
# Define a function that processes the third word
def process_third_word(next_guess):
# Tell the user what the first word they should enter into wordle
print("Enter the word " + next_guess.upper() + " as the your third guess.")
print("\nPlease enter the corresponding data for each letter below:")
# Assign variables for each of the characters in the first word and remove them from the unguessed letters list
global word_3_char_1,word_3_char_2,word_3_char_3,word_3_char_4,word_3_char_5
next_guess_char_list = list(next_guess)
word_3_char_1 = next_guess_char_list[0]
word_3_char_2 = next_guess_char_list[1]
word_3_char_3 = next_guess_char_list[2]
word_3_char_4 = next_guess_char_list[3]
word_3_char_5 = next_guess_char_list[4]
if word_3_char_1 in unguessed_letters_list:
unguessed_letters_list.remove(word_3_char_1)
if word_3_char_2 in unguessed_letters_list:
unguessed_letters_list.remove(word_3_char_2)
if word_3_char_3 in unguessed_letters_list:
unguessed_letters_list.remove(word_3_char_3)
if word_3_char_4 in unguessed_letters_list:
unguessed_letters_list.remove(word_3_char_4)
if word_3_char_5 in unguessed_letters_list:
unguessed_letters_list.remove(word_3_char_5)
# Get the resulting data of the first word's first character and don't let users enter bad inputs
while True:
word_3_char_1_data = str(input(word_3_char_1.upper() +": "))
if word_3_char_1_data == 'g' or word_3_char_1_data == 'y' or word_3_char_1_data == 'gr':
break
while True:
word_3_char_2_data = str(input(word_3_char_2.upper() + ": "))
if word_3_char_2_data == 'g' or word_3_char_2_data == 'y' or word_3_char_2_data == 'gr':
break
while True:
word_3_char_3_data = str(input(word_3_char_3.upper() + ": "))
if word_3_char_3_data == 'g' or word_3_char_3_data == 'y' or word_3_char_3_data == 'gr':
break
while True:
word_3_char_4_data = str(input(word_3_char_4.upper() + ": "))
if word_3_char_4_data == 'g' or word_3_char_4_data == 'y' or word_3_char_4_data == 'gr':
break
while True:
word_3_char_5_data = str(input(word_3_char_5.upper() + ": "))
if word_3_char_5_data == 'g' or word_3_char_5_data == 'y' or word_3_char_5_data == 'gr':
break
print("processing...")
# Eliminate letter possibilites based on the user's inputted data
if word_3_char_1_data == 'g':
if word_3_char_1 in possible_char_1_list:
possible_char_1_list.remove(word_3_char_1)
if word_3_char_1 in possible_char_2_list:
possible_char_2_list.remove(word_3_char_1)
if word_3_char_1 in possible_char_3_list:
possible_char_3_list.remove(word_3_char_1)
if word_3_char_1 in possible_char_4_list:
possible_char_4_list.remove(word_3_char_1)
if word_3_char_1 in possible_char_5_list:
possible_char_5_list.remove(word_3_char_1)
if word_3_char_1_data == 'y':
if word_3_char_1 in possible_char_1_list:
possible_char_1_list.remove(word_3_char_1)
if word_3_char_1 not in known_letters_list:
known_letters_list.append(word_3_char_1)
if word_3_char_1_data == 'gr':
possible_char_1_list.clear()
possible_char_1_list.append(word_3_char_1)
if word_3_char_1 not in known_letters_list:
known_letters_list.append(word_3_char_1)
if word_3_char_2_data == 'g':
if word_3_char_2 in possible_char_1_list:
possible_char_1_list.remove(word_3_char_2)
if word_3_char_2 in possible_char_2_list:
possible_char_2_list.remove(word_3_char_2)
if word_3_char_2 in possible_char_3_list:
possible_char_3_list.remove(word_3_char_2)
if word_3_char_2 in possible_char_4_list:
possible_char_4_list.remove(word_3_char_2)
if word_3_char_2 in possible_char_5_list:
possible_char_5_list.remove(word_3_char_2)
if word_3_char_2_data == 'y':
if word_3_char_2 in possible_char_2_list:
possible_char_2_list.remove(word_3_char_2)
if word_3_char_2 not in known_letters_list:
known_letters_list.append(word_3_char_2)
if word_3_char_2_data == 'gr':
possible_char_2_list.clear()
possible_char_2_list.append(word_3_char_2)
if word_3_char_2 not in known_letters_list:
known_letters_list.append(word_3_char_2)
if word_3_char_3_data == 'g':
if word_3_char_3 in possible_char_1_list:
possible_char_1_list.remove(word_3_char_3)
if word_3_char_3 in possible_char_2_list:
possible_char_2_list.remove(word_3_char_3)
if word_3_char_3 in possible_char_3_list:
possible_char_3_list.remove(word_3_char_3)
if word_3_char_3 in possible_char_4_list:
possible_char_4_list.remove(word_3_char_3)
if word_3_char_3 in possible_char_5_list:
possible_char_5_list.remove(word_3_char_3)
if word_3_char_3_data == 'y':
if word_3_char_3 in possible_char_3_list:
possible_char_3_list.remove(word_3_char_3)
if word_3_char_3 not in known_letters_list:
known_letters_list.append(word_3_char_3)
if word_3_char_3_data == 'gr':
possible_char_3_list.clear()
possible_char_3_list.append(word_3_char_3)
if word_3_char_3 not in known_letters_list:
known_letters_list.append(word_3_char_3)
if word_3_char_4_data == 'g':
if word_3_char_4 in possible_char_1_list:
possible_char_1_list.remove(word_3_char_4)
if word_3_char_4 in possible_char_2_list:
possible_char_2_list.remove(word_3_char_4)
if word_3_char_4 in possible_char_3_list:
possible_char_3_list.remove(word_3_char_4)
if word_3_char_4 in possible_char_4_list:
possible_char_4_list.remove(word_3_char_4)
if word_3_char_4 in possible_char_5_list:
possible_char_5_list.remove(word_3_char_4)
if word_3_char_4_data == 'y':
if word_3_char_4 in possible_char_4_list:
possible_char_4_list.remove(word_3_char_4)
if word_3_char_4 not in known_letters_list:
known_letters_list.append(word_3_char_4)
if word_3_char_4_data == 'gr':
possible_char_4_list.clear()
possible_char_4_list.append(word_3_char_4)
if word_3_char_4 not in known_letters_list:
known_letters_list.append(word_3_char_4)
if word_3_char_5_data == 'g':
if word_3_char_5 in possible_char_1_list:
possible_char_1_list.remove(word_3_char_5)
if word_3_char_5 in possible_char_2_list:
possible_char_2_list.remove(word_3_char_5)
if word_3_char_5 in possible_char_3_list:
possible_char_3_list.remove(word_3_char_5)
if word_3_char_5 in possible_char_4_list:
possible_char_4_list.remove(word_3_char_5)
if word_3_char_5 in possible_char_5_list:
possible_char_5_list.remove(word_3_char_5)
if word_3_char_5_data == 'y':
if word_3_char_5 in possible_char_5_list:
possible_char_5_list.remove(word_3_char_5)
if word_3_char_5 not in known_letters_list:
known_letters_list.append(word_3_char_5)
if word_3_char_5_data == 'gr':
possible_char_5_list.clear()
possible_char_5_list.append(word_3_char_5)
if word_3_char_5 not in known_letters_list:
known_letters_list.append(word_3_char_5)
# Remove words from possibilites based on the user's inputted data
for i in range(20):
# Remove words with known impossible letters in a specific location
for word in five_letter_words_list:
if word[0] not in possible_char_1_list:
five_letter_words_list.remove(word)
for word in five_letter_words_list:
if word[1] not in possible_char_2_list:
five_letter_words_list.remove(word)
for word in five_letter_words_list:
if word[2] not in possible_char_3_list:
five_letter_words_list.remove(word)
for word in five_letter_words_list:
if word[3] not in possible_char_4_list:
five_letter_words_list.remove(word)
for word in five_letter_words_list:
if word[4] not in possible_char_5_list:
five_letter_words_list.remove(word)
# Remove words without letters that are known to be in the word
for word in five_letter_words_list:
for letter in known_letters_list:
if letter not in word:
if word in five_letter_words_list:
five_letter_words_list.remove(word)
# Select the next word to be guessed
select_next_guess(mode_selection)
# Define a function that process's the fourth word
def process_fourth_word(next_guess):
# Tell the user what the first word they should enter into wordle
print("Enter the word " + next_guess.upper() + " as the your fourth guess.")
print("\nPlease enter the corresponding data for each letter below:")
# Assign variables for each of the characters in the first word and remove them from the unguessed letters list
global word_4_char_1,word_4_char_2,word_4_char_3,word_4_char_4,word_4_char_5
next_guess_char_list = list(next_guess)
word_4_char_1 = next_guess_char_list[0]
word_4_char_2 = next_guess_char_list[1]
word_4_char_3 = next_guess_char_list[2]
word_4_char_4 = next_guess_char_list[3]
word_4_char_5 = next_guess_char_list[4]
if word_4_char_1 in unguessed_letters_list:
unguessed_letters_list.remove(word_4_char_1)
if word_4_char_2 in unguessed_letters_list:
unguessed_letters_list.remove(word_4_char_2)
if word_4_char_3 in unguessed_letters_list:
unguessed_letters_list.remove(word_4_char_3)
if word_4_char_4 in unguessed_letters_list:
unguessed_letters_list.remove(word_4_char_4)
if word_4_char_5 in unguessed_letters_list:
unguessed_letters_list.remove(word_4_char_5)
# Get the resulting data of the first word's first character and don't let users enter bad inputs
while True:
word_4_char_1_data = str(input(word_4_char_1.upper() +": "))
if word_4_char_1_data == 'g' or word_4_char_1_data == 'y' or word_4_char_1_data == 'gr':
break
while True:
word_4_char_2_data = str(input(word_4_char_2.upper() + ": "))
if word_4_char_2_data == 'g' or word_4_char_2_data == 'y' or word_4_char_2_data == 'gr':
break
while True:
word_4_char_3_data = str(input(word_4_char_3.upper() + ": "))
if word_4_char_3_data == 'g' or word_4_char_3_data == 'y' or word_4_char_3_data == 'gr':
break
while True:
word_4_char_4_data = str(input(word_4_char_4.upper() + ": "))
if word_4_char_4_data == 'g' or word_4_char_4_data == 'y' or word_4_char_4_data == 'gr':
break
while True:
word_4_char_5_data = str(input(word_4_char_5.upper() + ": "))
if word_4_char_5_data == 'g' or word_4_char_5_data == 'y' or word_4_char_5_data == 'gr':
break
print("processing...")
# Eliminate letter possibilites based on the user's inputted data
if word_4_char_1_data == 'g':
if word_4_char_1 in possible_char_1_list:
possible_char_1_list.remove(word_4_char_1)
if word_4_char_1 in possible_char_2_list:
possible_char_2_list.remove(word_4_char_1)
if word_4_char_1 in possible_char_3_list:
possible_char_3_list.remove(word_4_char_1)
if word_4_char_1 in possible_char_4_list:
possible_char_4_list.remove(word_4_char_1)
if word_4_char_1 in possible_char_5_list:
possible_char_5_list.remove(word_4_char_1)
if word_4_char_1_data == 'y':
if word_4_char_1 in possible_char_1_list:
possible_char_1_list.remove(word_4_char_1)
if word_4_char_1 not in known_letters_list:
known_letters_list.append(word_4_char_1)
if word_4_char_1_data == 'gr':
possible_char_1_list.clear()
possible_char_1_list.append(word_4_char_1)
if word_4_char_1 not in known_letters_list:
known_letters_list.append(word_4_char_1)
if word_4_char_2_data == 'g':
if word_4_char_2 in possible_char_1_list:
possible_char_1_list.remove(word_4_char_2)
if word_4_char_2 in possible_char_2_list:
possible_char_2_list.remove(word_4_char_2)
if word_4_char_2 in possible_char_3_list:
possible_char_3_list.remove(word_4_char_2)
if word_4_char_2 in possible_char_4_list:
possible_char_4_list.remove(word_4_char_2)
if word_4_char_2 in possible_char_5_list:
possible_char_5_list.remove(word_4_char_2)
if word_4_char_2_data == 'y':
if word_4_char_2 in possible_char_2_list:
possible_char_2_list.remove(word_4_char_2)
if word_4_char_2 not in known_letters_list:
known_letters_list.append(word_4_char_2)
if word_4_char_2_data == 'gr':
possible_char_2_list.clear()
possible_char_2_list.append(word_4_char_2)
if word_4_char_2 not in known_letters_list:
known_letters_list.append(word_4_char_2)
if word_4_char_3_data == 'g':
if word_4_char_3 in possible_char_1_list:
possible_char_1_list.remove(word_4_char_3)
if word_4_char_3 in possible_char_2_list:
possible_char_2_list.remove(word_4_char_3)
if word_4_char_3 in possible_char_3_list:
possible_char_3_list.remove(word_4_char_3)
if word_4_char_3 in possible_char_4_list:
possible_char_4_list.remove(word_4_char_3)
if word_4_char_3 in possible_char_5_list:
possible_char_5_list.remove(word_4_char_3)
if word_4_char_3_data == 'y':
if word_4_char_3 in possible_char_3_list:
possible_char_3_list.remove(word_4_char_3)
if word_4_char_3 not in known_letters_list:
known_letters_list.append(word_4_char_3)
if word_4_char_3_data == 'gr':
possible_char_3_list.clear()
possible_char_3_list.append(word_4_char_3)
if word_4_char_3 not in known_letters_list:
known_letters_list.append(word_4_char_3)
if word_4_char_4_data == 'g':
if word_4_char_4 in possible_char_1_list:
possible_char_1_list.remove(word_4_char_4)
if word_4_char_4 in possible_char_2_list:
possible_char_2_list.remove(word_4_char_4)
if word_4_char_4 in possible_char_3_list:
possible_char_3_list.remove(word_4_char_4)
if word_4_char_4 in possible_char_4_list:
possible_char_4_list.remove(word_4_char_4)
if word_4_char_4 in possible_char_5_list:
possible_char_5_list.remove(word_4_char_4)
if word_4_char_4_data == 'y':
if word_4_char_4 in possible_char_4_list:
possible_char_4_list.remove(word_4_char_4)
if word_4_char_4 not in known_letters_list:
known_letters_list.append(word_4_char_4)
if word_4_char_4_data == 'gr':
possible_char_4_list.clear()
possible_char_4_list.append(word_4_char_4)
if word_4_char_4 not in known_letters_list:
known_letters_list.append(word_4_char_4)
if word_4_char_5_data == 'g':
if word_4_char_5 in possible_char_1_list:
possible_char_1_list.remove(word_4_char_5)
if word_4_char_5 in possible_char_2_list:
possible_char_2_list.remove(word_4_char_5)
if word_4_char_5 in possible_char_3_list:
possible_char_3_list.remove(word_4_char_5)
if word_4_char_5 in possible_char_4_list:
possible_char_4_list.remove(word_4_char_5)
if word_4_char_5 in possible_char_5_list:
possible_char_5_list.remove(word_4_char_5)
if word_4_char_5_data == 'y':
if word_4_char_5 in possible_char_5_list:
possible_char_5_list.remove(word_4_char_5)
if word_4_char_5 not in known_letters_list:
known_letters_list.append(word_4_char_5)
if word_4_char_5_data == 'gr':
possible_char_5_list.clear()
possible_char_5_list.append(word_4_char_5)
if word_4_char_5 not in known_letters_list:
known_letters_list.append(word_4_char_5)
# Remove words from possibilites based on the user's inputted data
for i in range(20):
# Remove words with known impossible letters in a specific location
for word in five_letter_words_list:
if word[0] not in possible_char_1_list:
five_letter_words_list.remove(word)
for word in five_letter_words_list:
if word[1] not in possible_char_2_list:
five_letter_words_list.remove(word)
for word in five_letter_words_list:
if word[2] not in possible_char_3_list:
five_letter_words_list.remove(word)
for word in five_letter_words_list:
if word[3] not in possible_char_4_list:
five_letter_words_list.remove(word)
for word in five_letter_words_list:
if word[4] not in possible_char_5_list:
five_letter_words_list.remove(word)
# Remove words without letters that are known to be in the word
for word in five_letter_words_list:
for letter in known_letters_list:
if letter not in word:
if word in five_letter_words_list:
five_letter_words_list.remove(word)
# Select the next word to be guessed
select_next_guess(mode_selection)
# Define function that process's the fifth word
def process_fifth_word(next_guess):
# Tell the user what the first word they should enter into wordle
print("Enter the word " + next_guess.upper() + " as the your fifth guess.")
print("\nPlease enter the corresponding data for each letter below:")
# Assign variables for each of the characters in the first word and remove them from the unguessed letters list
global word_5_char_1,word_5_char_2,word_5_char_3,word_5_char_4,word_5_char_5
next_guess_char_list = list(next_guess)
word_5_char_1 = next_guess_char_list[0]
word_5_char_2 = next_guess_char_list[1]
word_5_char_3 = next_guess_char_list[2]
word_5_char_4 = next_guess_char_list[3]
word_5_char_5 = next_guess_char_list[4]
if word_5_char_1 in unguessed_letters_list:
unguessed_letters_list.remove(word_5_char_1)
if word_5_char_2 in unguessed_letters_list:
unguessed_letters_list.remove(word_5_char_2)
if word_5_char_3 in unguessed_letters_list:
unguessed_letters_list.remove(word_5_char_3)
if word_5_char_4 in unguessed_letters_list:
unguessed_letters_list.remove(word_5_char_4)
if word_5_char_5 in unguessed_letters_list:
unguessed_letters_list.remove(word_5_char_5)
# Get the resulting data of the first word's first character and don't let users enter bad inputs
while True:
word_5_char_1_data = str(input(word_5_char_1.upper() +": "))
if word_5_char_1_data == 'g' or word_5_char_1_data == 'y' or word_5_char_1_data == 'gr':
break
while True:
word_5_char_2_data = str(input(word_5_char_2.upper() + ": "))
if word_5_char_2_data == 'g' or word_5_char_2_data == 'y' or word_5_char_2_data == 'gr':
break
while True:
word_5_char_3_data = str(input(word_5_char_3.upper() + ": "))
if word_5_char_3_data == 'g' or word_5_char_3_data == 'y' or word_5_char_3_data == 'gr':
break
while True:
word_5_char_4_data = str(input(word_5_char_4.upper() + ": "))
if word_5_char_4_data == 'g' or word_5_char_4_data == 'y' or word_5_char_4_data == 'gr':
break
while True:
word_5_char_5_data = str(input(word_5_char_5.upper() + ": "))
if word_5_char_5_data == 'g' or word_5_char_5_data == 'y' or word_5_char_5_data == 'gr':
break
print("processing...")
# Eliminate letter possibilites based on the user's inputted data
if word_5_char_1_data == 'g':
if word_5_char_1 in possible_char_1_list:
possible_char_1_list.remove(word_5_char_1)
if word_5_char_1 in possible_char_2_list:
possible_char_2_list.remove(word_5_char_1)
if word_5_char_1 in possible_char_3_list:
possible_char_3_list.remove(word_5_char_1)
if word_5_char_1 in possible_char_4_list:
possible_char_4_list.remove(word_5_char_1)
if word_5_char_1 in possible_char_5_list:
possible_char_5_list.remove(word_5_char_1)
if word_5_char_1_data == 'y':
if word_5_char_1 in possible_char_1_list:
possible_char_1_list.remove(word_5_char_1)
if word_5_char_1 not in known_letters_list:
known_letters_list.append(word_5_char_1)
if word_5_char_1_data == 'gr':
possible_char_1_list.clear()
possible_char_1_list.append(word_5_char_1)
if word_5_char_1 not in known_letters_list:
known_letters_list.append(word_5_char_1)
if word_5_char_2_data == 'g':
if word_5_char_2 in possible_char_1_list:
possible_char_1_list.remove(word_5_char_2)
if word_5_char_2 in possible_char_2_list:
possible_char_2_list.remove(word_5_char_2)
if word_5_char_2 in possible_char_3_list:
possible_char_3_list.remove(word_5_char_2)
if word_5_char_2 in possible_char_4_list:
possible_char_4_list.remove(word_5_char_2)
if word_5_char_2 in possible_char_5_list:
possible_char_5_list.remove(word_5_char_2)
if word_5_char_2_data == 'y':
if word_5_char_2 in possible_char_2_list:
possible_char_2_list.remove(word_5_char_2)
if word_5_char_2 not in known_letters_list:
known_letters_list.append(word_5_char_2)
if word_5_char_2_data == 'gr':
possible_char_2_list.clear()
possible_char_2_list.append(word_5_char_2)
if word_5_char_2 not in known_letters_list:
known_letters_list.append(word_5_char_2)
if word_5_char_3_data == 'g':
if word_5_char_3 in possible_char_1_list:
possible_char_1_list.remove(word_5_char_3)
if word_5_char_3 in possible_char_2_list:
possible_char_2_list.remove(word_5_char_3)
if word_5_char_3 in possible_char_3_list:
possible_char_3_list.remove(word_5_char_3)
if word_5_char_3 in possible_char_4_list:
possible_char_4_list.remove(word_5_char_3)
if word_5_char_3 in possible_char_5_list:
possible_char_5_list.remove(word_5_char_3)
if word_5_char_3_data == 'y':
if word_5_char_3 in possible_char_3_list:
possible_char_3_list.remove(word_5_char_3)
if word_5_char_3 not in known_letters_list:
known_letters_list.append(word_5_char_3)
if word_5_char_3_data == 'gr':
possible_char_3_list.clear()
possible_char_3_list.append(word_5_char_3)
if word_5_char_3 not in known_letters_list:
known_letters_list.append(word_5_char_3)
if word_5_char_4_data == 'g':
if word_5_char_4 in possible_char_1_list:
possible_char_1_list.remove(word_5_char_4)
if word_5_char_4 in possible_char_2_list:
possible_char_2_list.remove(word_5_char_4)
if word_5_char_4 in possible_char_3_list:
possible_char_3_list.remove(word_5_char_4)
if word_5_char_4 in possible_char_4_list:
possible_char_4_list.remove(word_5_char_4)
if word_5_char_4 in possible_char_5_list:
possible_char_5_list.remove(word_5_char_4)
if word_5_char_4_data == 'y':
if word_5_char_4 in possible_char_4_list:
possible_char_4_list.remove(word_5_char_4)
if word_5_char_4 not in known_letters_list:
known_letters_list.append(word_5_char_4)
if word_5_char_4_data == 'gr':
possible_char_4_list.clear()
possible_char_4_list.append(word_5_char_4)
if word_5_char_4 not in known_letters_list:
known_letters_list.append(word_5_char_4)
if word_5_char_5_data == 'g':
if word_5_char_5 in possible_char_1_list:
possible_char_1_list.remove(word_5_char_5)
if word_5_char_5 in possible_char_2_list:
possible_char_2_list.remove(word_5_char_5)
if word_5_char_5 in possible_char_3_list:
possible_char_3_list.remove(word_5_char_5)
if word_5_char_5 in possible_char_4_list:
possible_char_4_list.remove(word_5_char_5)
if word_5_char_5 in possible_char_5_list:
possible_char_5_list.remove(word_5_char_5)
if word_5_char_5_data == 'y':
if word_5_char_5 in possible_char_5_list:
possible_char_5_list.remove(word_5_char_5)
if word_5_char_5 not in known_letters_list:
known_letters_list.append(word_5_char_5)
if word_5_char_5_data == 'gr':
possible_char_5_list.clear()
possible_char_5_list.append(word_5_char_5)
if word_5_char_5 not in known_letters_list:
known_letters_list.append(word_5_char_5)
# Remove words from possibilites based on the user's inputted data
for i in range(20):
# Remove words with known impossible letters in a specific location
for word in five_letter_words_list:
if word[0] not in possible_char_1_list:
five_letter_words_list.remove(word)
for word in five_letter_words_list:
if word[1] not in possible_char_2_list:
five_letter_words_list.remove(word)
for word in five_letter_words_list:
if word[2] not in possible_char_3_list:
five_letter_words_list.remove(word)
for word in five_letter_words_list:
if word[3] not in possible_char_4_list:
five_letter_words_list.remove(word)
for word in five_letter_words_list:
if word[4] not in possible_char_5_list:
five_letter_words_list.remove(word)
# Remove words without letters that are known to be in the word
for word in five_letter_words_list:
for letter in known_letters_list:
if letter not in word:
if word in five_letter_words_list:
five_letter_words_list.remove(word)
# Select the next word to be guessed
select_next_guess(mode_selection)
# Define a function that runs an algorithm to select the next best possible guess
def select_next_guess(mode_selection):
if len(five_letter_words_list) == 1:
print(five_letter_words_list)
quit()
global next_guess
if mode_selection == 'manual':
while True:
next_guess = str(input("Please enter your word: "))
if next_guess.isalpha():
next_guess = next_guess.lower()
break
if mode_selection == 'auto':
# Define variables to track word sorting
best_word = ['',0]
current_score = 0
# Sort each word based on how many characters it has in that haven't been guess yet
for word in original_five_letter_words_list:
if word[0] in unguessed_letters_list:
current_score += 1
if len(possible_char_1_list) == 1:
if word[0] in possible_char_1_list:
current_score -= 0.5
if word[1] in unguessed_letters_list:
current_score += 1
if len(possible_char_2_list) == 1:
if word[1] in possible_char_2_list:
current_score -= 0.5
if word[2] in unguessed_letters_list:
current_score += 1
if len(possible_char_3_list) == 1:
if word[2] in possible_char_3_list:
current_score -= 0.5
if word[3] in unguessed_letters_list:
current_score += 1
if len(possible_char_4_list) == 1:
if word[3] in possible_char_4_list:
current_score -= 0.5
if word[4] in unguessed_letters_list:
current_score += 1
if len(possible_char_5_list) == 1:
if word[4] in possible_char_5_list:
current_score -= 0.5
current_score = current_score -(len(list(word))-len(set(word))) # Subtracts the number of repeated letters
# Updates the best word tracker
if current_score >= best_word[1]:
best_word.clear()
best_word.append(word)
best_word.append(current_score)
current_score = 0
# Extract the best word from the sorting algorithm
if best_word[1] > 0:
next_guess = best_word[0]
else:
print("No more unused characters, random word from possible words chosen.")
if len(five_letter_words_list) == 0:
print("Sorry there are no possible word combinations with the data you entered")
quit()
# Define a function to run the solver if the user selects manual execution
def manual_execution(game_difficulty):
if game_difficulty == 'easy':
print("Manual selected")
while True:
first_word = str(input("Please enter your first word: "))
if first_word.isalpha():
first_word = first_word.lower()
break
else:
print("Sorry, that was an invalid input, please try again.")
process_first_word(first_word)
process_second_word(next_guess)
process_third_word(next_guess)
process_fourth_word(next_guess)
process_fifth_word(next_guess)
print(five_letter_words_list)
quit()
if game_difficulty == 'hard':
...
# Define a function to run if the user selects automatic execution
def automatic_execution(game_difficulty):
if game_difficulty == 'easy':
print("Auto selected")
first_word = highest_net_frequency_word # Grab the initial word after performing frequency analysis
process_first_word(first_word)
process_second_word(next_guess)
process_third_word(next_guess)
process_fourth_word(next_guess)
process_fifth_word(next_guess)
print(five_letter_words_list)
quit()
if game_difficulty == 'hard':
...
# Manual exectution
if mode_selection == 'manual':
manual_execution(game_difficulty)
# Automatic execution
if mode_selection == 'auto':
automatic_execution(game_difficulty)
| 47.360411
| 123
| 0.697672
| 8,560
| 50,723
| 3.626402
| 0.024533
| 0.150763
| 0.092455
| 0.042716
| 0.918175
| 0.901231
| 0.853682
| 0.826364
| 0.773533
| 0.769151
| 0
| 0.05056
| 0.243538
| 50,723
| 1,070
| 124
| 47.404673
| 0.758457
| 0.082527
| 0
| 0.464921
| 0
| 0
| 0.024168
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.008377
| false
| 0
| 0.015707
| 0
| 0.024084
| 0.025131
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
bcfda5b907ba7ddc2b9a4283ab86197b27797dd9
| 1,511
|
py
|
Python
|
tests/test_check.py
|
clararehmann/gpvolve
|
4e45b53b72184425c24d57b2e8779d3d51de39d7
|
[
"MIT"
] | 1
|
2021-12-05T23:00:59.000Z
|
2021-12-05T23:00:59.000Z
|
tests/test_check.py
|
clararehmann/gpvolve
|
4e45b53b72184425c24d57b2e8779d3d51de39d7
|
[
"MIT"
] | null | null | null |
tests/test_check.py
|
clararehmann/gpvolve
|
4e45b53b72184425c24d57b2e8779d3d51de39d7
|
[
"MIT"
] | null | null | null |
import pytest
import gpvolve.check as check
import gpmap
def test_check_gpm():
with pytest.raises(TypeError):
check.gpm_sanity("stupid")
gpm = gpmap.GenotypePhenotypeMap(genotype=["00","10","01","11"],
fitness=[0.1,0.2,0.2,0.3])
gpm._data = "stupid"
with pytest.raises(ValueError):
check.gpm_sanity(gpm)
gpm = gpmap.GenotypePhenotypeMap(genotype=["00","10","01","11"],
fitness=[0.1,0.2,0.2,0.3])
with pytest.raises(ValueError):
check.gpm_sanity(gpm)
gpm._neighbors = "stupid"
with pytest.raises(ValueError):
check.gpm_sanity(gpm)
# Screw up target column
gpm = gpmap.GenotypePhenotypeMap(genotype=["00","10","01","11"],
fitness=[0.1,0.2,0.2,0.3])
gpm.get_neighbors()
gpm.neighbors.drop(labels=["target"],axis=1,inplace=True)
with pytest.raises(ValueError):
check.gpm_sanity(gpm)
# Screw up source column
gpm = gpmap.GenotypePhenotypeMap(genotype=["00","10","01","11"],
fitness=[0.1,0.2,0.2,0.3])
gpm.get_neighbors()
gpm.neighbors.drop(labels=["source"],axis=1,inplace=True)
with pytest.raises(ValueError):
check.gpm_sanity(gpm)
# This should work
gpm = gpmap.GenotypePhenotypeMap(genotype=["00","10","01","11"],
fitness=[0.1,0.2,0.2,0.3])
gpm.get_neighbors()
check.gpm_sanity(gpm)
| 32.148936
| 68
| 0.572469
| 194
| 1,511
| 4.386598
| 0.216495
| 0.023502
| 0.035253
| 0.119859
| 0.793184
| 0.793184
| 0.793184
| 0.793184
| 0.793184
| 0.674501
| 0
| 0.074074
| 0.267373
| 1,511
| 46
| 69
| 32.847826
| 0.69467
| 0.041032
| 0
| 0.705882
| 0
| 0
| 0.048476
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.029412
| false
| 0
| 0.088235
| 0
| 0.117647
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4c6448a6eaa7726d2a8c94f3b72f5bf5660cc928
| 100
|
py
|
Python
|
pendulum/exceptions.py
|
seandstewart/pendulum
|
daa4b936daf3f4dfa7d211aa0ac1e9d82d5401d4
|
[
"MIT"
] | 5,049
|
2016-07-04T07:16:34.000Z
|
2022-03-31T07:41:48.000Z
|
pendulum/exceptions.py
|
seandstewart/pendulum
|
daa4b936daf3f4dfa7d211aa0ac1e9d82d5401d4
|
[
"MIT"
] | 536
|
2016-07-05T22:46:29.000Z
|
2022-03-22T12:41:54.000Z
|
pendulum/exceptions.py
|
seandstewart/pendulum
|
daa4b936daf3f4dfa7d211aa0ac1e9d82d5401d4
|
[
"MIT"
] | 373
|
2016-07-05T19:51:51.000Z
|
2022-03-23T16:57:46.000Z
|
from .parsing.exceptions import ParserError # noqa
class PendulumException(Exception):
pass
| 14.285714
| 51
| 0.77
| 10
| 100
| 7.7
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.17
| 100
| 6
| 52
| 16.666667
| 0.927711
| 0.04
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
4c71045ff95b29a39f2a6eefd0ac4b580093d709
| 147
|
py
|
Python
|
nitorch/nn/preproc/__init__.py
|
balbasty/nitorch
|
d30c3125a8a66ea1434f2b39ed03338afd9724b4
|
[
"MIT"
] | 46
|
2020-07-31T10:14:05.000Z
|
2022-03-24T12:51:46.000Z
|
nitorch/nn/preproc/__init__.py
|
balbasty/nitorch
|
d30c3125a8a66ea1434f2b39ed03338afd9724b4
|
[
"MIT"
] | 36
|
2020-10-06T19:01:38.000Z
|
2022-02-03T18:07:35.000Z
|
nitorch/nn/preproc/__init__.py
|
balbasty/nitorch
|
d30c3125a8a66ea1434f2b39ed03338afd9724b4
|
[
"MIT"
] | 6
|
2021-01-05T14:59:05.000Z
|
2021-11-18T18:26:45.000Z
|
from . import categorical
from . import intensity
from . import spatial
from .categorical import *
from .intensity import *
from .spatial import *
| 21
| 26
| 0.77551
| 18
| 147
| 6.333333
| 0.277778
| 0.263158
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.163265
| 147
| 7
| 27
| 21
| 0.926829
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
911ccc2a436676c5c5186046edec78aef627928c
| 36
|
py
|
Python
|
nipype/interfaces/dipy/__init__.py
|
sebastientourbier/nipype_lts5
|
3b9718d154443574cc6a5d0bbd76ccf7964e6a45
|
[
"BSD-3-Clause"
] | 1
|
2018-09-09T14:47:04.000Z
|
2018-09-09T14:47:04.000Z
|
nipype/interfaces/dipy/__init__.py
|
MarcCote/nipype
|
b4014e1d87509f35242f0547f51d2b8962f83cbe
|
[
"BSD-3-Clause"
] | null | null | null |
nipype/interfaces/dipy/__init__.py
|
MarcCote/nipype
|
b4014e1d87509f35242f0547f51d2b8962f83cbe
|
[
"BSD-3-Clause"
] | 1
|
2020-02-19T13:47:05.000Z
|
2020-02-19T13:47:05.000Z
|
from .tracks import TrackDensityMap
| 18
| 35
| 0.861111
| 4
| 36
| 7.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 36
| 1
| 36
| 36
| 0.96875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
912e3697eca9917132182b5ca67237454d41035e
| 23
|
py
|
Python
|
Thestral/__init__.py
|
Fang-Kevin/Thestral
|
dcc20f79ced798e357229797c2d0ab0900ce5d26
|
[
"MIT"
] | null | null | null |
Thestral/__init__.py
|
Fang-Kevin/Thestral
|
dcc20f79ced798e357229797c2d0ab0900ce5d26
|
[
"MIT"
] | null | null | null |
Thestral/__init__.py
|
Fang-Kevin/Thestral
|
dcc20f79ced798e357229797c2d0ab0900ce5d26
|
[
"MIT"
] | null | null | null |
from Thestral import *
| 11.5
| 22
| 0.782609
| 3
| 23
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.173913
| 23
| 1
| 23
| 23
| 0.947368
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e68143d9a2d2d820b2d176190d3d15df42cd51c9
| 18
|
py
|
Python
|
server/game/__init__.py
|
ldieselUT/DistSystemHW2
|
8b13ca8ad9cc459a5e522c1387bef969150c6ebf
|
[
"MIT"
] | null | null | null |
server/game/__init__.py
|
ldieselUT/DistSystemHW2
|
8b13ca8ad9cc459a5e522c1387bef969150c6ebf
|
[
"MIT"
] | null | null | null |
server/game/__init__.py
|
ldieselUT/DistSystemHW2
|
8b13ca8ad9cc459a5e522c1387bef969150c6ebf
|
[
"MIT"
] | null | null | null |
from game import *
| 18
| 18
| 0.777778
| 3
| 18
| 4.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 18
| 1
| 18
| 18
| 0.933333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e6ddd996ddcd843cc866619e4bb34d97c0b03271
| 20,794
|
py
|
Python
|
ding/model/template/maqac.py
|
jayyoung0802/DI-engine
|
efbb35ddaf184d1009291e6842fbbae09f193492
|
[
"Apache-2.0"
] | 1
|
2022-03-21T16:15:39.000Z
|
2022-03-21T16:15:39.000Z
|
ding/model/template/maqac.py
|
jayyoung0802/DI-engine
|
efbb35ddaf184d1009291e6842fbbae09f193492
|
[
"Apache-2.0"
] | null | null | null |
ding/model/template/maqac.py
|
jayyoung0802/DI-engine
|
efbb35ddaf184d1009291e6842fbbae09f193492
|
[
"Apache-2.0"
] | null | null | null |
from typing import Union, Dict, Optional
from easydict import EasyDict
import numpy as np
import torch
import torch.nn as nn
from ding.utils import SequenceType, squeeze, MODEL_REGISTRY
from ..common import RegressionHead, ReparameterizationHead, DiscreteHead, MultiHead, \
FCEncoder, ConvEncoder
@MODEL_REGISTRY.register('maqac')
class MAQAC(nn.Module):
r"""
Overview:
The MAQAC model.
Interfaces:
``__init__``, ``forward``, ``compute_actor``, ``compute_critic``
"""
mode = ['compute_actor', 'compute_critic']
def __init__(
self,
agent_obs_shape: Union[int, SequenceType],
global_obs_shape: Union[int, SequenceType],
action_shape: Union[int, SequenceType],
twin_critic: bool = False,
actor_head_hidden_size: int = 64,
actor_head_layer_num: int = 1,
critic_head_hidden_size: int = 64,
critic_head_layer_num: int = 1,
activation: Optional[nn.Module] = nn.ReLU(),
norm_type: Optional[str] = None,
) -> None:
r"""
Overview:
Init the MAQAC Model according to arguments.
Arguments:
- agent_obs_shape (:obj:`Union[int, SequenceType]`): Agent's observation's space.
- global_obs_shape (:obj:`Union[int, SequenceType]`): Global observation's space.
- obs_shape (:obj:`Union[int, SequenceType]`): Observation's space.
- action_shape (:obj:`Union[int, SequenceType]`): Action's space.
- twin_critic (:obj:`bool`): Whether include twin critic.
- actor_head_hidden_size (:obj:`Optional[int]`): The ``hidden_size`` to pass to actor-nn's ``Head``.
- actor_head_layer_num (:obj:`int`):
The num of layers used in the network to compute Q value output for actor's nn.
- critic_head_hidden_size (:obj:`Optional[int]`): The ``hidden_size`` to pass to critic-nn's ``Head``.
- critic_head_layer_num (:obj:`int`):
The num of layers used in the network to compute Q value output for critic's nn.
- activation (:obj:`Optional[nn.Module]`):
The type of activation function to use in ``MLP`` the after ``layer_fn``,
if ``None`` then default set to ``nn.ReLU()``
- norm_type (:obj:`Optional[str]`):
The type of normalization to use, see ``ding.torch_utils.fc_block`` for more details.
"""
super(MAQAC, self).__init__()
agent_obs_shape: int = squeeze(agent_obs_shape)
action_shape: int = squeeze(action_shape)
self.actor = nn.Sequential(
nn.Linear(agent_obs_shape, actor_head_hidden_size), activation,
DiscreteHead(
actor_head_hidden_size, action_shape, actor_head_layer_num, activation=activation, norm_type=norm_type
)
)
self.twin_critic = twin_critic
if self.twin_critic:
self.critic = nn.ModuleList()
for _ in range(2):
self.critic.append(
nn.Sequential(
nn.Linear(global_obs_shape, critic_head_hidden_size), activation,
DiscreteHead(
critic_head_hidden_size,
action_shape,
critic_head_layer_num,
activation=activation,
norm_type=norm_type
)
)
)
else:
self.critic = nn.Sequential(
nn.Linear(global_obs_shape, critic_head_hidden_size), activation,
DiscreteHead(
critic_head_hidden_size,
action_shape,
critic_head_layer_num,
activation=activation,
norm_type=norm_type
)
)
def forward(self, inputs: Union[torch.Tensor, Dict], mode: str) -> Dict:
r"""
Overview:
Use observation and action tensor to predict output.
Parameter updates with QAC's MLPs forward setup.
Arguments:
Forward with ``'compute_actor'``:
- inputs (:obj:`torch.Tensor`):
The encoded embedding tensor, determined with given ``hidden_size``, i.e. ``(B, N=hidden_size)``.
Whether ``actor_head_hidden_size`` or ``critic_head_hidden_size`` depend on ``mode``.
Forward with ``'compute_critic'``, inputs (`Dict`) Necessary Keys:
- ``obs``, ``action`` encoded tensors.
- mode (:obj:`str`): Name of the forward mode.
Returns:
- outputs (:obj:`Dict`): Outputs of network forward.
Forward with ``'compute_actor'``, Necessary Keys (either):
- action (:obj:`torch.Tensor`): Action tensor with same size as input ``x``.
- logit (:obj:`torch.Tensor`): Action's probabilities.
Forward with ``'compute_critic'``, Necessary Keys:
- q_value (:obj:`torch.Tensor`): Q value tensor with same size as batch size.
Actor Shapes:
- inputs (:obj:`torch.Tensor`): :math:`(B, N0)`, B is batch size and N0 corresponds to ``hidden_size``
- action (:obj:`torch.Tensor`): :math:`(B, N0)`
- q_value (:obj:`torch.FloatTensor`): :math:`(B, )`, where B is batch size.
Critic Shapes:
- obs (:obj:`torch.Tensor`): :math:`(B, N1)`, where B is batch size and N1 is ``global_obs_shape``
- logit (:obj:`torch.FloatTensor`): :math:`(B, N2)`, where B is batch size and N2 is ``action_shape``
"""
assert mode in self.mode, "not support forward mode: {}/{}".format(mode, self.mode)
return getattr(self, mode)(inputs)
def compute_actor(self, inputs: Dict) -> Dict:
r"""
Overview:
Use encoded embedding tensor to predict output.
Execute parameter updates with ``'compute_actor'`` mode
Use encoded embedding tensor to predict output.
Arguments:
- inputs (:obj:`torch.Tensor`):
The encoded embedding tensor, determined with given ``hidden_size``, i.e. ``(B, N=hidden_size)``.
``hidden_size = actor_head_hidden_size``
- mode (:obj:`str`): Name of the forward mode.
Returns:
- outputs (:obj:`Dict`): Outputs of forward pass encoder and head.
ReturnsKeys (either):
- action (:obj:`torch.Tensor`): Continuous action tensor with same size as ``action_shape``.
- logit (:obj:`torch.Tensor`):
Logit tensor encoding ``mu`` and ``sigma``, both with same size as input ``x``.
Shapes:
- inputs (:obj:`torch.Tensor`): :math:`(B, N0)`, B is batch size and N0 corresponds to ``hidden_size``
- action (:obj:`torch.Tensor`): :math:`(B, N0)`
- logit (:obj:`list`): 2 elements, mu and sigma, each is the shape of :math:`(B, N0)`.
- q_value (:obj:`torch.FloatTensor`): :math:`(B, )`, B is batch size.
Examples:
>>> # Regression mode
>>> model = QAC(64, 64, 'regression')
>>> inputs = torch.randn(4, 64)
>>> actor_outputs = model(inputs,'compute_actor')
>>> assert actor_outputs['action'].shape == torch.Size([4, 64])
>>> # Reparameterization Mode
>>> model = QAC(64, 64, 'reparameterization')
>>> inputs = torch.randn(4, 64)
>>> actor_outputs = model(inputs,'compute_actor')
>>> actor_outputs['logit'][0].shape # mu
>>> torch.Size([4, 64])
>>> actor_outputs['logit'][1].shape # sigma
>>> torch.Size([4, 64])
"""
action_mask = inputs['obs']['action_mask']
x = self.actor(inputs['obs']['agent_state'])
return {'logit': x['logit'], 'action_mask': action_mask}
def compute_critic(self, inputs: Dict) -> Dict:
r"""
Overview:
Execute parameter updates with ``'compute_critic'`` mode
Use encoded embedding tensor to predict output.
Arguments:
- ``obs``, ``action`` encoded tensors.
- mode (:obj:`str`): Name of the forward mode.
Returns:
- outputs (:obj:`Dict`): Q-value output.
ReturnKeys:
- q_value (:obj:`torch.Tensor`): Q value tensor with same size as batch size.
Shapes:
- obs (:obj:`torch.Tensor`): :math:`(B, N1)`, where B is batch size and N1 is ``obs_shape``
- action (:obj:`torch.Tensor`): :math:`(B, N2)`, where B is batch size and N2 is ``action_shape``
- q_value (:obj:`torch.FloatTensor`): :math:`(B, )`, where B is batch size.
"""
if self.twin_critic:
x = [m(inputs['obs']['global_state'])['logit'] for m in self.critic]
else:
x = self.critic(inputs['obs']['global_state'])['logit']
return {'q_value': x}
@MODEL_REGISTRY.register('maqac_continuous')
class ContinuousMAQAC(nn.Module):
r"""
Overview:
The Continuous MAQAC model.
Interfaces:
``__init__``, ``forward``, ``compute_actor``, ``compute_critic``
"""
mode = ['compute_actor', 'compute_critic']
def __init__(
self,
agent_obs_shape: Union[int, SequenceType],
global_obs_shape: Union[int, SequenceType],
action_shape: Union[int, SequenceType, EasyDict],
action_space: str,
twin_critic: bool = False,
actor_head_hidden_size: int = 64,
actor_head_layer_num: int = 1,
critic_head_hidden_size: int = 64,
critic_head_layer_num: int = 1,
activation: Optional[nn.Module] = nn.ReLU(),
norm_type: Optional[str] = None,
) -> None:
r"""
Overview:
Init the QAC Model according to arguments.
Arguments:
- obs_shape (:obj:`Union[int, SequenceType]`): Observation's space.
- action_shape (:obj:`Union[int, SequenceType, EasyDict]`): Action's space, such as 4, (3, )
- action_space (:obj:`str`): Whether choose ``regression`` or ``reparameterization``.
- twin_critic (:obj:`bool`): Whether include twin critic.
- actor_head_hidden_size (:obj:`Optional[int]`): The ``hidden_size`` to pass to actor-nn's ``Head``.
- actor_head_layer_num (:obj:`int`):
The num of layers used in the network to compute Q value output for actor's nn.
- critic_head_hidden_size (:obj:`Optional[int]`): The ``hidden_size`` to pass to critic-nn's ``Head``.
- critic_head_layer_num (:obj:`int`):
The num of layers used in the network to compute Q value output for critic's nn.
- activation (:obj:`Optional[nn.Module]`):
The type of activation function to use in ``MLP`` the after ``layer_fn``,
if ``None`` then default set to ``nn.ReLU()``
- norm_type (:obj:`Optional[str]`):
The type of normalization to use, see ``ding.torch_utils.fc_block`` for more details.
"""
super(ContinuousMAQAC, self).__init__()
obs_shape: int = squeeze(agent_obs_shape)
global_obs_shape: int = squeeze(global_obs_shape)
action_shape = squeeze(action_shape)
self.action_shape = action_shape
self.action_space = action_space
assert self.action_space in ['regression', 'reparameterization']
if self.action_space == 'regression': # DDPG, TD3
self.actor = nn.Sequential(
nn.Linear(obs_shape, actor_head_hidden_size), activation,
RegressionHead(
actor_head_hidden_size,
action_shape,
actor_head_layer_num,
final_tanh=True,
activation=activation,
norm_type=norm_type
)
)
else: # SAC
self.actor = nn.Sequential(
nn.Linear(obs_shape, actor_head_hidden_size), activation,
ReparameterizationHead(
actor_head_hidden_size,
action_shape,
actor_head_layer_num,
sigma_type='conditioned',
activation=activation,
norm_type=norm_type
)
)
self.twin_critic = twin_critic
critic_input_size = global_obs_shape + action_shape
if self.twin_critic:
self.critic = nn.ModuleList()
for _ in range(2):
self.critic.append(
nn.Sequential(
nn.Linear(critic_input_size, critic_head_hidden_size), activation,
RegressionHead(
critic_head_hidden_size,
1,
critic_head_layer_num,
final_tanh=False,
activation=activation,
norm_type=norm_type
)
)
)
else:
self.critic = nn.Sequential(
nn.Linear(critic_input_size, critic_head_hidden_size), activation,
RegressionHead(
critic_head_hidden_size,
1,
critic_head_layer_num,
final_tanh=False,
activation=activation,
norm_type=norm_type
)
)
def forward(self, inputs: Union[torch.Tensor, Dict], mode: str) -> Dict:
r"""
Overview:
Use observation and action tensor to predict output.
Parameter updates with QAC's MLPs forward setup.
Arguments:
Forward with ``'compute_actor'``:
- inputs (:obj:`torch.Tensor`):
The encoded embedding tensor, determined with given ``hidden_size``, i.e. ``(B, N=hidden_size)``.
Whether ``actor_head_hidden_size`` or ``critic_head_hidden_size`` depend on ``mode``.
Forward with ``'compute_critic'``, inputs (`Dict`) Necessary Keys:
- ``obs``, ``action`` encoded tensors.
- mode (:obj:`str`): Name of the forward mode.
Returns:
- outputs (:obj:`Dict`): Outputs of network forward.
Forward with ``'compute_actor'``, Necessary Keys (either):
- action (:obj:`torch.Tensor`): Action tensor with same size as input ``x``.
- logit (:obj:`torch.Tensor`):
Logit tensor encoding ``mu`` and ``sigma``, both with same size as input ``x``.
Forward with ``'compute_critic'``, Necessary Keys:
- q_value (:obj:`torch.Tensor`): Q value tensor with same size as batch size.
Actor Shapes:
- inputs (:obj:`torch.Tensor`): :math:`(B, N0)`, B is batch size and N0 corresponds to ``hidden_size``
- action (:obj:`torch.Tensor`): :math:`(B, N0)`
- q_value (:obj:`torch.FloatTensor`): :math:`(B, )`, where B is batch size.
Critic Shapes:
- obs (:obj:`torch.Tensor`): :math:`(B, N1)`, where B is batch size and N1 is ``obs_shape``
- action (:obj:`torch.Tensor`): :math:`(B, N2)`, where B is batch size and N2 is``action_shape``
- logit (:obj:`torch.FloatTensor`): :math:`(B, N2)`, where B is batch size and N3 is ``action_shape``
Actor Examples:
>>> # Regression mode
>>> model = QAC(64, 64, 'regression')
>>> inputs = torch.randn(4, 64)
>>> actor_outputs = model(inputs,'compute_actor')
>>> assert actor_outputs['action'].shape == torch.Size([4, 64])
>>> # Reparameterization Mode
>>> model = QAC(64, 64, 'reparameterization')
>>> inputs = torch.randn(4, 64)
>>> actor_outputs = model(inputs,'compute_actor')
>>> actor_outputs['logit'][0].shape # mu
>>> torch.Size([4, 64])
>>> actor_outputs['logit'][1].shape # sigma
>>> torch.Size([4, 64])
"""
assert mode in self.mode, "not support forward mode: {}/{}".format(mode, self.mode)
return getattr(self, mode)(inputs)
def compute_actor(self, inputs: Dict) -> Dict:
r"""
Overview:
Use encoded embedding tensor to predict output.
Execute parameter updates with ``'compute_actor'`` mode
Use encoded embedding tensor to predict output.
Arguments:
- inputs (:obj:`torch.Tensor`):
The encoded embedding tensor, determined with given ``hidden_size``, i.e. ``(B, N=hidden_size)``.
``hidden_size = actor_head_hidden_size``
- mode (:obj:`str`): Name of the forward mode.
Returns:
- outputs (:obj:`Dict`): Outputs of forward pass encoder and head.
ReturnsKeys (either):
- action (:obj:`torch.Tensor`): Continuous action tensor with same size as ``action_shape``.
- logit (:obj:`torch.Tensor`):
Logit tensor encoding ``mu`` and ``sigma``, both with same size as input ``x``.
- logit + action_args
Shapes:
- inputs (:obj:`torch.Tensor`): :math:`(B, N0)`, B is batch size and N0 corresponds to ``hidden_size``
- action (:obj:`torch.Tensor`): :math:`(B, N0)`
- logit (:obj:`Union[list, torch.Tensor]`):
- case1(continuous space, list): 2 elements, mu and sigma, each is the shape of :math:`(B, N0)`.
- case2(hybrid space, torch.Tensor): :math:`(B, N1)`, where N1 is action_type_shape
- q_value (:obj:`torch.FloatTensor`): :math:`(B, )`, B is batch size.
- action_args (:obj:`torch.FloatTensor`): :math:`(B, N2)`, where N2 is action_args_shape
(action_args are continuous real value)
Examples:
>>> # Regression mode
>>> model = QAC(64, 64, 'regression')
>>> inputs = torch.randn(4, 64)
>>> actor_outputs = model(inputs,'compute_actor')
>>> assert actor_outputs['action'].shape == torch.Size([4, 64])
>>> # Reparameterization Mode
>>> model = QAC(64, 64, 'reparameterization')
>>> inputs = torch.randn(4, 64)
>>> actor_outputs = model(inputs,'compute_actor')
>>> actor_outputs['logit'][0].shape # mu
>>> torch.Size([4, 64])
>>> actor_outputs['logit'][1].shape # sigma
>>> torch.Size([4, 64])
"""
inputs = inputs['agent_state']
if self.action_space == 'regression':
x = self.actor(inputs)
return {'action': x['pred']}
else:
x = self.actor(inputs)
return {'logit': [x['mu'], x['sigma']]}
def compute_critic(self, inputs: Dict) -> Dict:
r"""
Overview:
Execute parameter updates with ``'compute_critic'`` mode
Use encoded embedding tensor to predict output.
Arguments:
- inputs (:obj: `Dict`): ``obs``, ``action`` and ``logit` tensors.
- mode (:obj:`str`): Name of the forward mode.
Returns:
- outputs (:obj:`Dict`): Q-value output.
ArgumentsKeys:
- necessary:
- obs: (:obj:`torch.Tensor`): 2-dim vector observation
- action (:obj:`Union[torch.Tensor, Dict]`): action from actor
- optional:
- logit (:obj:`torch.Tensor`): discrete action logit
ReturnKeys:
- q_value (:obj:`torch.Tensor`): Q value tensor with same size as batch size.
Shapes:
- obs (:obj:`torch.Tensor`): :math:`(B, N1)`, where B is batch size and N1 is ``obs_shape``
- action (:obj:`torch.Tensor`): :math:`(B, N2)`, where B is batch size and N2 is ``action_shape``
- q_value (:obj:`torch.FloatTensor`): :math:`(B, )`, where B is batch size.
"""
obs, action = inputs['obs']['global_state'], inputs['action']
if len(action.shape) == 1: # (B, ) -> (B, 1)
action = action.unsqueeze(1)
x = torch.cat([obs, action], dim=-1)
if self.twin_critic:
x = [m(x)['pred'] for m in self.critic]
else:
x = self.critic(x)['pred']
return {'q_value': x}
| 47.692661
| 118
| 0.544532
| 2,336
| 20,794
| 4.690497
| 0.086045
| 0.041982
| 0.042165
| 0.020809
| 0.867756
| 0.834079
| 0.820389
| 0.804417
| 0.803961
| 0.798302
| 0
| 0.009941
| 0.327546
| 20,794
| 435
| 119
| 47.802299
| 0.773654
| 0.558286
| 0
| 0.60989
| 0
| 0
| 0.04927
| 0
| 0
| 0
| 0
| 0
| 0.016484
| 1
| 0.043956
| false
| 0
| 0.038462
| 0
| 0.142857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
fc079a863453356d206b139007a9e691dcc41468
| 35,907
|
py
|
Python
|
tests/slurm_plugin/slurm_resources/test_slurm_resources.py
|
awslabs/cfncluster-node
|
30169746c82fb17e522dd20a6c7985be8cbbe441
|
[
"Apache-2.0"
] | 18
|
2016-07-11T15:07:27.000Z
|
2018-09-25T18:08:28.000Z
|
tests/slurm_plugin/slurm_resources/test_slurm_resources.py
|
awslabs/cfncluster-node
|
30169746c82fb17e522dd20a6c7985be8cbbe441
|
[
"Apache-2.0"
] | 21
|
2016-03-21T20:17:20.000Z
|
2018-11-06T09:07:43.000Z
|
tests/slurm_plugin/slurm_resources/test_slurm_resources.py
|
awslabs/cfncluster-node
|
30169746c82fb17e522dd20a6c7985be8cbbe441
|
[
"Apache-2.0"
] | 26
|
2016-03-21T04:08:05.000Z
|
2018-09-27T20:15:18.000Z
|
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
# with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "LICENSE.txt" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions and
# limitations under the License.
import logging
from datetime import datetime
import pytest
from assertpy import assert_that
from slurm_plugin.slurm_resources import DynamicNode, EC2Instance, EC2InstanceHealthState, SlurmPartition, StaticNode
@pytest.mark.parametrize(
"node, expected_output",
[
(DynamicNode("queue-name-st-t2micro-1", "nodeip", "nodehostname", "somestate", "queue-name"), True),
(
DynamicNode("queuename-dy-t2micro-1", "queuename-dy-t2micro-1", "nodehostname", "somestate", "queuename"),
False,
),
],
)
def test_slurm_node_is_nodeaddr_set(node, expected_output):
assert_that(node.is_nodeaddr_set()).is_equal_to(expected_output)
@pytest.mark.parametrize(
"node, expected_output",
[
(StaticNode("queue1-st-c5xlarge-1", "nodeip", "nodehostname", "somestate", "queue1"), False),
(StaticNode("queue1-st-c5xlarge-1", "nodeip", "nodehostname", "MIXED+CLOUD+DRAIN+POWERING_UP", "queue1"), True),
(
StaticNode(
"queue1-st-c5xlarge-1", "nodeip", "nodehostname", "ALLOCATED+CLOUD+DRAIN+NOT_RESPONDING", "queue1"
),
True,
),
(DynamicNode("queue1-dy-c5xlarge-1", "nodeip", "nodehostname", "IDLE+CLOUD", "queue1"), False),
(DynamicNode("queue1-dy-c5xlarge-1", "nodeip", "nodehostname", "DOWN+CLOUD", "queue1"), False),
(DynamicNode("queue1-dy-c5xlarge-1", "nodeip", "nodehostname", "COMPLETING+DRAIN", "queue1"), True),
],
)
def test_slurm_node_has_job(node, expected_output):
assert_that(node.has_job()).is_equal_to(expected_output)
@pytest.mark.parametrize(
"node, expected_output",
[
(StaticNode("queue1-st-c5xlarge-1", "nodeip", "nodehostname", "somestate", "queue1"), False),
(
StaticNode("queue1-st-c5xlarge-1", "nodeip", "nodehostname", "MIXED+CLOUD+DRAIN+POWERING_UP", "queue1"),
False,
),
(
StaticNode(
"queue1-st-c5xlarge-1", "nodeip", "nodehostname", "ALLOCATED+CLOUD+DRAIN+NOT_RESPONDING", "queue1"
),
False,
),
(
StaticNode("queue1-st-c5xlarge-1", "nodeip", "nodehostname", "IDLE+CLOUD+DRAIN+POWER_DOWN", "queue1"),
False,
),
(
StaticNode("queue1-st-c5xlarge-1", "nodeip", "nodehostname", "IDLE+CLOUD+DRAIN+POWERING_DOWN", "queue1"),
True,
),
(
DynamicNode("queue1-dy-c5xlarge-1", "nodeip", "nodehostname", "IDLE+CLOUD+DRAIN+NOT_RESPONDING", "queue1"),
True,
),
(DynamicNode("queue1-dy-c5xlarge-1", "nodeip", "nodehostname", "DOWN+CLOUD+DRAIN", "queue1"), True),
(
DynamicNode("queue1-dy-c5xlarge-1", "nodeip", "nodehostname", "IDLE+CLOUD+DRAIN+POWER_DOWN", "queue1"),
False,
),
(
DynamicNode("queue1-dy-c5xlarge-1", "nodeip", "nodehostname", "IDLE+CLOUD+DRAIN+POWERING_DOWN", "queue1"),
True,
),
],
)
def test_slurm_node_is_drained(node, expected_output):
assert_that(node.is_drained()).is_equal_to(expected_output)
@pytest.mark.parametrize(
"node, expected_output",
[
(StaticNode("queue1-st-c5xlarge-1", "nodeip", "nodehostname", "somestate", "queue1"), False),
(StaticNode("queue1-st-c5xlarge-1", "nodeip", "nodehostname", "MIXED+CLOUD+DOWN+POWERING_UP", "queue1"), True),
(
StaticNode(
"queue1-st-c5xlarge-1", "nodeip", "nodehostname", "ALLOCATED+CLOUD+DRAIN+NOT_RESPONDING", "queue1"
),
False,
),
(DynamicNode("queue1-dy-c5xlarge-1", "nodeip", "nodehostname", "DOWN+CLOUD+NOT_RESPONDING", "queue1"), True),
(DynamicNode("queue1-dy-c5xlarge-1", "nodeip", "nodehostname", "DOWN+CLOUD+POWER", "queue1"), True),
(DynamicNode("queue1-dy-c5xlarge-1", "nodeip", "nodehostname", "IDLE+CLOUD+POWERING_DOWN", "queue1"), False),
(
DynamicNode(
"queue1-dy-c5xlarge-1",
"nodeip",
"nodehostname",
"DOWN+CLOUD+POWER_DOWN+POWERED_DOWN",
"queue1",
),
True,
),
(
DynamicNode(
"queue1-dy-c5xlarge-1",
"nodeip",
"nodehostname",
"DOWN+CLOUD+POWER_DOWN",
"queue1",
),
False,
),
(
DynamicNode(
"queue1-dy-c5xlarge-1",
"nodeip",
"nodehostname",
"DOWN+CLOUD+POWERING_DOWN",
"queue1",
),
False,
),
(
DynamicNode(
"queue1-dy-c5xlarge-1",
"nodeip",
"nodehostname",
"DOWN+CLOUD+POWERING_DOWN+POWERED_DOWN",
"queue1",
),
False,
),
(
DynamicNode(
"queue1-dy-c5xlarge-1",
"nodeip",
"nodehostname",
"DOWN+CLOUD+POWER_DOWN+POWERING_DOWN+POWERED_DOWN",
"queue1",
),
False,
),
],
)
def test_slurm_node_is_down(node, expected_output):
assert_that(node.is_down()).is_equal_to(expected_output)
@pytest.mark.parametrize(
"node, expected_output",
[
(StaticNode("queue1-st-c5xlarge-1", "nodeip", "nodehostname", "IDLE+CLOUD+POWER", "queue1"), True),
(
StaticNode("queue1-st-c5xlarge-1", "nodeip", "nodehostname", "MIXED+CLOUD+DRAIN+POWERING_UP", "queue1"),
False,
),
(
DynamicNode(
"queue1-dy-c5xlarge-1", "nodeip", "nodehostname", "ALLOCATED+CLOUD+DOWN+NOT_RESPONDING", "queue1"
),
False,
),
(DynamicNode("queue1-dy-c5xlarge-1", "nodeip", "nodehostname", "IDLE+CLOUD+POWERING_DOWN", "queue1"), False),
(
DynamicNode(
"queue1-dy-c5xlarge-1", "nodeip", "nodehostname", "IDLE+CLOUD+NOT_RESPONDING+POWERING_UP", "queue1"
),
True,
),
],
)
def test_slurm_node_is_up(node, expected_output):
assert_that(node.is_up()).is_equal_to(expected_output)
@pytest.mark.parametrize(
"node, expected_output",
[
(StaticNode("queue1-st-c5xlarge-1", "nodeip", "nodehostname", "IDLE+CLOUD+POWER", "queue1"), False),
(
DynamicNode(
"queue1-dy-c5xlarge-1", "nodeip", "nodehostname", "MIXED+CLOUD+NOT_RESPONDING+POWERING_UP", "queue1"
),
True,
),
],
)
def test_slurm_node_is_powering_up(node, expected_output):
assert_that(node.is_powering_up()).is_equal_to(expected_output)
@pytest.mark.parametrize(
"node, expected_output",
[
(StaticNode("queue1-st-c5xlarge-1", "nodeip", "nodehostname", "IDLE+CLOUD", "queue1"), True),
(StaticNode("queue1-st-c5xlarge-1", "nodeip", "nodehostname", "MIXED+CLOUD", "queue1"), True),
(StaticNode("queue1-st-c5xlarge-1", "nodeip", "nodehostname", "ALLOCATED+CLOUD", "queue1"), True),
(DynamicNode("queue1-dy-c5xlarge-1", "nodeip", "nodehostname", "COMPLETING+CLOUD", "queue1"), True),
(
DynamicNode(
"queue1-dy-c5xlarge-1", "nodeip", "nodehostname", "MIXED+CLOUD+NOT_RESPONDING+POWERING_UP", "queue1"
),
False,
),
(
DynamicNode("queue1-dy-c5xlarge-1", "nodeip", "nodehostname", "IDLE+CLOUD+DRAIN+NOT_RESPONDING", "queue1"),
False,
),
],
)
def test_slurm_node_is_online(node, expected_output):
assert_that(node.is_online()).is_equal_to(expected_output)
@pytest.mark.parametrize(
"node, expected_output",
[
(
StaticNode(
"queue1-st-c5xlarge-1", "nodeip", "nodehostname", "MIXED+CLOUD+NOT_RESPONDING+POWERING_UP", "queue1"
),
True,
),
(StaticNode("queue1-st-c5xlarge-1", "nodeip", "nodehostname", "IDLE+CLOUD+POWERING_UP", "queue1"), False),
(DynamicNode("queue1-dy-c5xlarge-1", "nodeip", "nodehostname", "MIXED+CLOUD", "queue1"), False),
(DynamicNode("queue1-dy-c5xlarge-1", "nodeip", "nodehostname", "ALLOCATED+CLOUD", "queue1"), False),
(DynamicNode("queue1-dy-c5xlarge-1", "nodeip", "nodehostname", "COMPLETING+CLOUD", "queue1"), False),
],
)
def test_slurm_node_is_configuring_job(node, expected_output):
assert_that(node.is_configuring_job()).is_equal_to(expected_output)
@pytest.mark.parametrize(
"node, expected_output",
[
(
StaticNode(
"queue1-st-c5xlarge-1", "nodeip", "nodehostname", "MIXED+CLOUD+NOT_RESPONDING+POWERING_UP", "queue1"
),
False,
),
(StaticNode("queue1-st-c5xlarge-1", "nodeip", "nodehostname", "MIXED+CLOUD+DOWN+POWERING_UP", "queue1"), False),
(StaticNode("queue1-st-c5xlarge-1", "nodeip", "nodehostname", "IDEL+CLOUD+POWERING_UP", "queue1"), False),
(StaticNode("queue1-st-c5xlarge-1", "nodeip", "nodehostname", "MIXED+CLOUD", "queue1"), True),
(DynamicNode("queue1-dy-c5xlarge-1", "nodeip", "nodehostname", "ALLOCATED+CLOUD", "queue1"), True),
(DynamicNode("queue1-dy-c5xlarge-1", "nodeip", "nodehostname", "COMPLETING+CLOUD", "queue1"), True),
(DynamicNode("queue1-dy-c5xlarge-1", "nodeip", "nodehostname", "MIXED+CLOUD+POWERED_DOWN", "queue1"), False),
(DynamicNode("queue1-dy-c5xlarge-1", "nodeip", "nodehostname", "MIXED+CLOUD+NOT_RESPONDING", "queue1"), True),
],
)
def test_slurm_node_is_running_job(node, expected_output):
assert_that(node.is_running_job()).is_equal_to(expected_output)
@pytest.mark.parametrize(
"node, expected_output",
[
(StaticNode("queue1-st-c5xlarge-1", "nodeip", "nodehostname", "MIXED+CLOUD+POWERED_DOWN", "queue1"), True),
(DynamicNode("queue1-st-c5xlarge-1", "nodeip", "nodehostname", "MIXED+CLOUD", "queue1"), False),
],
)
def test_slurm_node_is_power_with_job(node, expected_output):
assert_that(node.is_power_with_job()).is_equal_to(expected_output)
@pytest.mark.parametrize(
"node, expected_output",
[
(
DynamicNode("queue1-dy-c5xlarge-1", "nodeip", "nodehostname", "somestate", "queue1", "Failed to resume"),
False,
),
(
DynamicNode(
"queue1-dy-c5xlarge-1",
"nodeip",
"nodehostname",
"MIXED+CLOUD+DRAIN+POWERING_UP",
"queue1",
"Some reason",
),
False,
),
(
DynamicNode(
"queue1-dy-c5xlarge-1",
"nodeip",
"nodehostname",
"ALLOCATED+CLOUD+DRAIN+NOT_RESPONDING",
"queue1",
"(Code:RequestLimitExceeded)Failure when resuming nodes",
),
False,
),
(
DynamicNode(
"queue1-dy-c5xlarge-1",
"nodeip",
"nodehostname",
"IDLE+CLOUD",
"queue1",
"(Code:InsufficientInstanceCapacity)Failure when resuming nodes",
),
True,
),
(
DynamicNode(
"queue1-dy-c5xlarge-1",
"nodeip",
"nodehostname",
"DOWN+CLOUD",
"queue1",
"(Code:InsufficientHostCapacity)Failure when resuming nodes",
),
True,
),
(
DynamicNode(
"queue1-dy-c5xlarge-1",
"nodeip",
"nodehostname",
"COMPLETING+DRAIN",
"queue1",
"(Code:InsufficientReservedInstanceCapacity)Failure when resuming nodes",
),
True,
),
(
DynamicNode(
"queue1-dy-c5xlarge-1",
"nodeip",
"nodehostname",
"DOWN+CLOUD",
"queue1",
"(Code:Unsupported)Failure when resuming nodes",
),
True,
),
(
DynamicNode(
"queue1-dy-c5xlarge-1",
"nodeip",
"nodehostname",
"DOWN+CLOUD",
"queue1",
"(Code:SpotMaxPriceTooLow)Failure when resuming nodes",
),
True,
),
],
)
def test_slurm_node_is_ice(node, expected_output):
assert_that(node.is_ice()).is_equal_to(expected_output)
@pytest.mark.parametrize(
"nodes, expected_output",
[
(
[
StaticNode(
"queue1-st-c5xlarge-1", "nodeip", "nodehostname", "MIXED+CLOUD+NOT_RESPONDING+POWERING_UP", "queue1"
),
StaticNode("queue1-st-c5xlarge-1", "nodeip", "nodehostname", "MIXED+CLOUD", "queue1"),
],
True,
),
(
[
DynamicNode("queue1-dy-c5xlarge-1", "nodeip", "nodehostname", "MIXED+CLOUD+POWERED_DOWN", "queue1"),
StaticNode("queue1-st-c5xlarge-1", "nodeip", "nodehostname", "MIXED+CLOUD+DOWN+POWERING_UP", "queue1"),
],
False,
),
([DynamicNode("queue1-dy-c5xlarge-1", "nodeip", "nodehostname", "MIXED+CLOUD+NOT_RESPONDING", "queue1")], True),
],
)
def test_partition_is_inactive(nodes, expected_output):
partition = SlurmPartition("name", "nodenames", "state")
partition.slurm_nodes = nodes
assert_that(partition.has_running_job()).is_equal_to(expected_output)
@pytest.mark.parametrize(
"node, terminate_drain_nodes, terminate_down_nodes, mock_is_node_being_replaced, expected_result",
[
pytest.param(
DynamicNode("queue-dy-c5xlarge-1", "some_ip", "hostname", "MIXED+CLOUD", "queue"),
True,
True,
False,
True,
id="healthy_node",
),
pytest.param(
StaticNode("queue-st-c5xlarge-1", "some_ip", "hostname", "IDLE+CLOUD+DRAIN", "queue"),
True,
True,
False,
False,
id="drained_not_in_replacement",
),
pytest.param(
StaticNode("queue-st-c5xlarge-1", "some_ip", "hostname", "IDLE+CLOUD+DRAIN", "queue"),
True,
True,
True,
True,
id="drained_in_replacement",
),
pytest.param(
DynamicNode("queue-dy-c5xlarge-1", "some_ip", "hostname", "IDLE+CLOUD+DRAIN", "queue"),
False,
True,
False,
True,
id="drain_not_term",
),
pytest.param(
StaticNode("queue-st-c5xlarge-1", "some_ip", "hostname", "DOWN+CLOUD", "queue"),
True,
True,
False,
False,
id="down_not_in_replacement",
),
pytest.param(
StaticNode("queue-st-c5xlarge-1", "some_ip", "hostname", "DOWN+CLOUD", "queue"),
True,
True,
True,
True,
id="down_in_replacement",
),
pytest.param(
DynamicNode("queue-dy-c5xlarge-1", "some_ip", "hostname", "DOWN+CLOUD", "queue"),
True,
False,
False,
True,
id="down_not_term",
),
pytest.param(
StaticNode(
"queue-dy-c5xlarge-1",
"some_ip",
"hostname",
"DOWN+CLOUD+POWER_DOWN+POWERED_DOWN+NOT_RESPONDING",
"queue",
),
True,
True,
False,
False,
id="unhealthy_static_node",
),
pytest.param(
DynamicNode(
"queue-dy-c5xlarge-1",
"some_ip",
"hostname",
"DOWN+CLOUD+POWER_DOWN+POWERED_DOWN+NOT_RESPONDING",
"queue",
),
True,
True,
False,
False,
id="unhealthy_dynamic_node",
),
pytest.param(
StaticNode("queue-dy-c5xlarge-1", "some_ip", "hostname", "IDLE+CLOUD+POWER_DOWN+POWERED_DOWN", "queue"),
True,
True,
False,
True,
id="power_static_node",
),
pytest.param(
DynamicNode("queue-dy-c5xlarge-1", "some_ip", "hostname", "IDLE+CLOUD+POWER_DOWN+POWERED_DOWN", "queue"),
True,
True,
False,
True,
id="power_dynamic_node",
),
],
)
def test_slurm_node_is_state_healthy(
node, mock_is_node_being_replaced, terminate_drain_nodes, terminate_down_nodes, expected_result, mocker
):
node.is_being_replaced = mock_is_node_being_replaced
assert_that(node.is_state_healthy(terminate_drain_nodes, terminate_down_nodes)).is_equal_to(expected_result)
@pytest.mark.parametrize(
"node, instance, is_static_nodes_in_replacement, is_replacement_timeout, bootstrap_failure_messages, "
"is_failing_health_check, is_node_bootstrap_failure",
[
(
StaticNode("queue1-st-c5xlarge-1", "ip-1", "hostname", "DOWN+CLOUD+NOT_RESPONDING", "queue1"),
None,
True,
False,
"Node bootstrap error: Node queue1-st-c5xlarge-1(ip-1) is currently in replacement and no backing instance",
False,
True,
),
(
StaticNode("queue1-st-c5xlarge-1", "ip-1", "hostname", "DOWN+CLOUD+NOT_RESPONDING", "queue1"),
EC2Instance("id-1", "ip-1", "hostname", datetime(2020, 1, 1, 0, 0, 0)),
True,
True,
"Node bootstrap error: Replacement timeout expires for node queue1-st-c5xlarge-1(ip-1) in replacement",
False,
True,
),
(
DynamicNode("queue1-dy-c5xlarge-1", "ip-1", "hostname", "MIXED+CLOUD+NOT_RESPONDING+POWERING_UP", "queue1"),
None,
False,
False,
"Node bootstrap error: Node queue1-dy-c5xlarge-1(ip-1) is in power up state without valid backing instance",
False,
True,
),
(
DynamicNode("queue1-dy-c5xlarge-1", "ip-1", "hostname", "DOWN+CLOUD+POWERED_DOWN+NOT_RESPONDING", "queue1"),
EC2Instance("id-1", "ip-1", "hostname", "launch_time"),
False,
False,
"Node bootstrap error: Resume timeout expires",
False,
True,
),
(
DynamicNode("queue1-dy-c5xlarge-1", "queue1-dy-c5xlarge-1", "hostname", "DOWN+CLOUD+POWERING_UP", "queue1"),
None,
False,
False,
None,
False,
False,
),
(
StaticNode(
"queue1-st-c5xlarge-1", "queue1-dy-c5xlarge-1", "hostname", "DOWN+CLOUD+NOT_RESPONDING", "queue1"
),
None,
False,
False,
None,
False,
False,
),
(
StaticNode("queue1-st-c5xlarge-1", "ip-1", "hostname", "DOWN+CLOUD+NOT_RESPONDING", "queue1"),
EC2Instance("id-1", "ip-1", "hostname", datetime(2020, 1, 1, 0, 0, 0)),
False,
False,
None,
False,
False,
),
(
DynamicNode(
"queue1-dy-c5xlarge-1", "queue1-dy-c5xlarge-1", "hostname", "DOWN+CLOUD+POWERED_DOWN", "queue1"
),
EC2Instance("id-1", "ip-1", "hostname", "launch_time"),
False,
False,
None,
False,
False,
),
(
DynamicNode("queue1-dy-c5xlarge-1", "ip-1", "hostname", "DRAIN+CLOUD", "queue1"),
EC2Instance("id-1", "ip-1", "hostname", "launch_time"),
False,
False,
None,
False,
False,
),
(
DynamicNode("queue1-dy-c5xlarge-1", "ip-1", "hostname", "DOWN+CLOUD+POWERING_DOWN", "queue1"),
EC2Instance("id-1", "ip-1", "hostname", "launch_time"),
False,
False,
None,
False,
False,
),
(
StaticNode("queue1-st-c5xlarge-1", "ip-1", "hostname", "DOWN+CLOUD+POWERING_DOWN", "queue1"),
EC2Instance("id-1", "ip-1", "hostname", "launch_time"),
True,
False,
"failed during bootstrap when performing health check",
True,
True,
),
(
DynamicNode("queue1-dy-c5xlarge-1", "ip-1", "hostname", "MIXED+CLOUD+NOT_RESPONDING+POWERING_UP", "queue1"),
EC2Instance("id-1", "ip-1", "hostname", "launch_time"),
False,
False,
"failed during bootstrap when performing health check",
True,
True,
),
(
DynamicNode(
"queue1-dy-c5xlarge-1",
"queue1-dy-c5xlarge-1",
"hostname",
"DOWN+CLOUD+POWERED_DOWN+NOT_RESPONDING",
"queue1",
),
EC2Instance("id-1", "ip-1", "hostname", "launch_time"),
False,
False,
None,
False,
False,
),
(
DynamicNode("queue1-dy-c5xlarge-1", "ip-1", "hostname", "IDLE+CLOUD+NOT_RESPONDING+POWERING_UP", "queue1"),
None,
False,
False,
"Node bootstrap error: Node queue1-dy-c5xlarge-1(ip-1) is in power up state without valid backing instance",
False,
True,
),
],
ids=[
"static_self_terminate",
"static_timeout",
"dynamic_self_terminate",
"dynamic_timeout",
"dynamic_runinstance",
"static_runinstance",
"static_joined_cluster",
"dynamic_reset_incorrect",
"normal_down_1",
"normal_down_2",
"static_fail_health_check",
"dynamic_fail_health_check",
"dynamic_pcluster_stop",
"idle_powering_up",
],
)
def test_slurm_node_is_bootstrap_failure(
node,
is_static_nodes_in_replacement,
is_replacement_timeout,
bootstrap_failure_messages,
is_node_bootstrap_failure,
instance,
is_failing_health_check,
caplog,
):
node.instance = instance
node.is_static_nodes_in_replacement = is_static_nodes_in_replacement
node._is_replacement_timeout = is_replacement_timeout
node.is_failing_health_check = is_failing_health_check
caplog.set_level(logging.WARNING)
# Run tests and assert calls
assert_that(node.is_bootstrap_failure()).is_equal_to(is_node_bootstrap_failure)
if bootstrap_failure_messages:
assert_that(caplog.text).contains(bootstrap_failure_messages)
@pytest.mark.parametrize(
"node, instance, expected_result",
[
(
DynamicNode("queue-dy-c5xlarge-1", "ip-1", "hostname", "IDLE+CLOUD", "queue"),
EC2Instance("id-1", "ip-1", "hostname", datetime(2020, 1, 1, 0, 0, 0)),
True,
),
(
StaticNode("queue-st-c5xlarge-1", "queue-st-c5xlarge-1", "hostname", "IDLE+CLOUD", "queue"),
EC2Instance("id-1", "ip-1", "hostname", datetime(2020, 1, 1, 0, 0, 0)),
False,
),
(
DynamicNode("queue-dy-c5xlarge-1", "queue-dy-c5xlarge-1", "hostname", "IDLE+CLOUD", "queue"),
None,
True,
),
(
DynamicNode("queue-dy-c5xlarge-1", "ip-3", "hostname", "IDLE+CLOUD", "queue"),
None,
False,
),
(
DynamicNode("queue-st-c5xlarge-1", "ip-2", "hostname", "DOWN+CLOUD", "queue"),
None,
False,
),
# Powering_down nodes with backing instance is considered as healthy
(
DynamicNode("queue-dy-c5xlarge-1", "ip-2", "hostname", "DOWN+CLOUD+POWERING_DOWN", "queue"),
EC2Instance("id-2", "ip-2", "hostname", datetime(2020, 1, 1, 0, 0, 0)),
True,
),
# Powering_down nodes without backing instance is considered as unhealthy
(
DynamicNode("queue-dy-c5xlarge-1", "ip-2", "hostname", "DOWN+CLOUD+POWERING_DOWN", "queue"),
None,
False,
),
# Node in POWER_SAVE, but still has ip associated should be considered unhealthy
(
DynamicNode("queue-dy-c5xlarge-1", "ip-2", "hostname", "IDLE+CLOUD+POWER", "queue"),
None,
False,
),
# Node in POWER_SAVE, but also in DOWN should be considered unhealthy
(
DynamicNode("queue-dy-c5xlarge-1", "queue-dy-c5xlarge-1", "hostname", "DOWN+CLOUD+POWER", "queue"),
None,
False,
),
(
DynamicNode(
"queue-dy-c5xlarge-1", "queue-dy-c5xlarge-1", "queue-dy-c5xlarge-1", "IDLE+CLOUD+POWER", "queue"
),
None,
True,
),
],
ids=[
"basic",
"static_nodeaddr_not_set",
"dynamic_nodeaddr_not_set",
"dynamic_unhealthy",
"static_unhealthy",
"powering_down_healthy",
"powering_down_unhealthy",
"power_unhealthy1",
"power_unhealthy2",
"power_healthy",
],
)
def test_slurm_node_is_healthy(node, instance, expected_result):
node.instance = instance
assert_that(node.is_healthy(terminate_drain_nodes=True, terminate_down_nodes=True)).is_equal_to(expected_result)
@pytest.mark.parametrize(
"node, expected_result",
[
(
StaticNode(
"queue1-st-c5xlarge-1", "nodeip", "nodehostname", "MIXED+CLOUD+NOT_RESPONDING+POWERING_UP", "queue1"
),
False,
),
(DynamicNode("queue1-dy-c5xlarge-1", "nodeip", "nodehostname", "MIXED+CLOUD+POWERED_DOWN", "queue1"), False),
(DynamicNode("queue1-dy-c5xlarge-1", "nodeip", "nodehostname", "IDLE+CLOUD+POWERED_DOWN", "queue1"), True),
(
DynamicNode(
"queue1-dy-c5xlarge-1", "queue1-dy-c5xlarge-1", "nodehostname", "IDLE+CLOUD+POWERED_DOWN", "queue1"
),
False,
),
(
DynamicNode(
"queue1-dy-c5xlarge-1", "nodeip", "nodehostname", "IDLE+CLOUD+POWERED_DOWN+POWER_DOWN", "queue1"
),
True,
),
(
DynamicNode(
"queue1-dy-c5xlarge-1",
"queue1-dy-c5xlarge-1",
"nodehostname",
"IDLE+CLOUD+POWERED_DOWN+POWER_DOWN",
"queue1",
),
False,
),
(DynamicNode("queue1-dy-c5xlarge-1", "nodeip", "nodehostname", "POWERING_DOWN", "queue1"), True),
(DynamicNode("queue1-dy-c5xlarge-1", "queue1-dy-c5xlarge-1", "nodehostname", "POWERING_DOWN", "queue1"), False),
(
DynamicNode(
"queue1-dy-c5xlarge-1", "queue1-dy-c5xlarge-1", "nodehostname", "MIXED+CLOUD+NOT_RESPONDING", "queue1"
),
False,
),
(DynamicNode("queue1-dy-c5xlarge-1", "nodeip", "nodehostname", "MIXED+CLOUD+NOT_RESPONDING", "queue1"), False),
(
DynamicNode(
"queue1-dy-c5xlarge-1", "nodeip", "nodehostname", "MIXED+CLOUD+NOT_RESPONDING+POWERING_UP", "queue1"
),
False,
),
(
DynamicNode("queue1-dy-c5xlarge-1", "nodeip", "nodehostname", "MIXED+CLOUD+DRAIN+POWER_DOWN", "queue1"),
False,
),
(DynamicNode("queue1-dy-c5xlarge-1", "nodeip", "nodehostname", "IDLE+CLOUD+DRAIN+POWER_DOWN", "queue1"), False),
(
DynamicNode("queue1-dy-c5xlarge-1", "nodeip", "nodehostname", "IDLE+CLOUD+DRAIN+POWERING_DOWN", "queue1"),
True,
),
(
DynamicNode(
"queue1-dy-c5xlarge-1",
"queue1-dy-c5xlarge-1",
"nodehostname",
"IDLE+CLOUD+DRAIN+POWERING_DOWN",
"queue1",
),
False,
),
],
)
def test_slurm_node_is_powering_down_with_nodeaddr(node, expected_result):
assert_that(node.is_powering_down_with_nodeaddr()).is_equal_to(expected_result)
@pytest.mark.parametrize(
"node, instance, expected_result",
[
(
StaticNode("queue1-st-c5xlarge-1", "ip-1", "hostname", "IDLE+CLOUD", "queue1"),
None,
False,
),
(
DynamicNode("node-dy-c5xlarge-1", "node-dy-c5xlarge-1", "hostname", "IDLE+CLOUD+POWER", "node"),
None,
True,
),
(
DynamicNode("node-dy-c5xlarge-1", "ip-1", "hostname", "IDLE+CLOUD+POWER", "node"),
None,
False,
),
(
StaticNode("queue1-st-c5xlarge-1", "ip-1", "hostname", "IDLE+CLOUD+POWER", "queue1"),
EC2Instance("id-1", "ip-1", "hostname", datetime(2020, 1, 1, 0, 0, 0)),
True,
),
],
ids=["static_no_backing", "dynamic_power_save", "dynamic_no_backing", "static_valid"],
)
def test_slurm_node_is_backing_instance_valid(node, instance, expected_result):
node.instance = instance
assert_that(node.is_backing_instance_valid()).is_equal_to(expected_result)
@pytest.mark.parametrize(
"node, expected_result",
[
(
StaticNode(
"queue1-st-c5xlarge-1", "nodeip", "nodehostname", "MIXED+CLOUD+NOT_RESPONDING+POWERING_UP", "queue1"
),
True,
),
(
StaticNode(
"queue1-st-c5xlarge-1", "queue1-st-c5xlarge-1", "nodehostname", "MIXED+CLOUD+POWERED_DOWN", "queue1"
),
False,
),
(DynamicNode("queue1-dy-c5xlarge-1", "nodeip", "nodehostname", "IDLE+CLOUD+POWER", "queue1"), True),
(DynamicNode("queue1-dy-c5xlarge-1", "queue1-dy-c5xlarge-1", "nodehostname", "POWERING_DOWN", "queue1"), False),
(DynamicNode("queue1-dy-c5xlarge-1", "queue1-dy-c5xlarge-1", "nodehostname", "DOWN+CLOUD", "queue1"), False),
(
DynamicNode(
"queue1-dy-c5xlarge-1", "queue1-dy-c5xlarge-1", "nodehostname", "MIXED+CLOUD+NOT_RESPONDING", "queue1"
),
True,
),
(DynamicNode("queue1-dy-c5xlarge-1", "nodeip", "nodehostname", "MIXED+CLOUD+NOT_RESPONDING", "queue1"), True),
],
)
def test_slurm_node_needs_reset_when_inactive(node, expected_result):
assert_that(node.needs_reset_when_inactive()).is_equal_to(expected_result)
@pytest.mark.parametrize(
"node, expected_result",
[
(StaticNode("queue1-st-c5xlarge-1", "queue1-st-c5xlarge-1", "hostname", "IDLE+CLOUD", "queue1"), False),
(StaticNode("queue1-st-c5xlarge-1", "ip-1", "hostname", "IDLE+CLOUD", "queue1"), True),
],
ids=["static_addr_not_set", "static_valid"],
)
def test_is_static_node_configuration_valid(node, expected_result):
assert_that(node._is_static_node_configuration_valid()).is_equal_to(expected_result)
@pytest.mark.parametrize(
"instance_health_state, current_time, expected_result",
[
(
EC2InstanceHealthState(
"id-12345",
"running",
{"Details": [{}], "Status": "ok"},
{"Details": [{"ImpairedSince": datetime(2020, 1, 1, 0, 0, 0)}], "Status": "ok"},
None,
),
datetime(2020, 1, 1, 0, 0, 30),
False,
),
(
EC2InstanceHealthState(
"id-12345",
"stopped",
{"Details": [{}], "Status": "initializing"},
{"Details": [{"ImpairedSince": datetime(2020, 1, 1, 0, 0, 0)}], "Status": "initializing"},
None,
),
datetime(2020, 1, 1, 0, 0, 30),
False,
),
(
EC2InstanceHealthState(
"id-12345",
"stopped",
{"Details": [{"ImpairedSince": datetime(2020, 1, 1, 0, 0, 0)}], "Status": "not-applicable"},
{"Details": [{"ImpairedSince": datetime(2020, 1, 1, 0, 0, 0)}], "Status": "not-applicable"},
None,
),
datetime(2020, 1, 1, 0, 0, 30),
False,
),
(
EC2InstanceHealthState(
"id-12345",
"stopped",
{"Details": [{"ImpairedSince": datetime(2020, 1, 1, 0, 0, 0)}], "Status": "insufficient-data"},
{"Details": [{"ImpairedSince": datetime(2020, 1, 1, 0, 0, 0)}], "Status": "insufficient-data"},
None,
),
datetime(2020, 1, 1, 0, 0, 30),
False,
),
(
EC2InstanceHealthState(
"id-12345",
"stopped",
{"Details": [{"ImpairedSince": datetime(2020, 1, 1, 0, 0, 15)}], "Status": "initializing"},
{"Details": [{"ImpairedSince": datetime(2020, 1, 1, 0, 0, 0)}], "Status": "impaired"},
None,
),
datetime(2020, 1, 1, 0, 0, 30),
True,
),
(
EC2InstanceHealthState(
"id-12345",
"stopped",
{"Details": [{"ImpairedSince": datetime(2020, 1, 1, 0, 0, 15)}], "Status": "initializing"},
{"Details": [{"ImpairedSince": datetime(2020, 1, 1, 0, 0, 0)}], "Status": "impaired"},
None,
),
datetime(2020, 1, 1, 0, 0, 29),
False,
),
],
ids=["ok", "initializing", "not-applicable", "insufficient-data", "impaired", "timeout"],
)
def test_fail_ec2_health_check(instance_health_state, current_time, expected_result):
assert_that(instance_health_state.fail_ec2_health_check(current_time, health_check_timeout=30)).is_equal_to(
expected_result
)
@pytest.mark.parametrize(
"instance_health_state, expected_result",
[
(
EC2InstanceHealthState(
"id-12345",
"running",
{"Details": [{}], "Status": "ok"},
{"Details": [{}], "Status": "ok"},
[],
),
False,
),
(
EC2InstanceHealthState(
"id-12345",
"running",
{"Details": [{}], "Status": "ok"},
{"Details": [{}], "Status": "ok"},
[{"InstanceEventId": "someid"}],
),
True,
),
],
ids=["no_event", "has_event"],
)
def test_fail_scheduled_events_health_check(instance_health_state, expected_result):
assert_that(instance_health_state.fail_scheduled_events_check()).is_equal_to(expected_result)
| 35.134051
| 120
| 0.532153
| 3,431
| 35,907
| 5.400758
| 0.067327
| 0.074798
| 0.061144
| 0.074312
| 0.849541
| 0.808095
| 0.779924
| 0.741878
| 0.710038
| 0.692984
| 0
| 0.036141
| 0.321107
| 35,907
| 1,021
| 121
| 35.168462
| 0.724002
| 0.023561
| 0
| 0.65109
| 0
| 0.004154
| 0.349094
| 0.093508
| 0
| 0
| 0
| 0
| 0.023884
| 1
| 0.021807
| false
| 0
| 0.005192
| 0
| 0.026999
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
fc2be215cd39d2920b388bb851b41fdc514e2701
| 38
|
py
|
Python
|
tests/functions/bitxor.py
|
treeform/pystorm
|
3a2224bcdaccc5a2abf6a820c0bcf7afa3e6fed4
|
[
"MIT"
] | 50
|
2015-03-24T19:45:34.000Z
|
2022-02-20T04:34:26.000Z
|
tests/functions/bitxor.py
|
dusty-phillips/pyjaco
|
066895ae38d1828498e529c1875cb88df6cbc54d
|
[
"MIT"
] | 2
|
2017-02-26T09:43:07.000Z
|
2017-03-06T20:04:24.000Z
|
tests/functions/bitxor.py
|
Slater-Victoroff/pyjaco
|
89c4e3c46399c5023b0e160005d855a01241c58a
|
[
"MIT"
] | 12
|
2016-03-07T09:30:49.000Z
|
2021-09-05T20:38:47.000Z
|
x = 32213
y = 98743
z = x ^ y
print z
| 7.6
| 9
| 0.552632
| 9
| 38
| 2.333333
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.4
| 0.342105
| 38
| 4
| 10
| 9.5
| 0.44
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0.25
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
fc6fe6367d0c8416a0666687bfee124b968f70a0
| 38,146
|
py
|
Python
|
models/Translator.py
|
ybCliff/VideoCaptioning
|
93fc3b095c970e51e1e24909163a827df98d6ef3
|
[
"MIT"
] | 3
|
2020-05-16T23:59:57.000Z
|
2021-06-14T01:59:41.000Z
|
models/Translator.py
|
ybCliff/VideoCaptioning
|
93fc3b095c970e51e1e24909163a827df98d6ef3
|
[
"MIT"
] | null | null | null |
models/Translator.py
|
ybCliff/VideoCaptioning
|
93fc3b095c970e51e1e24909163a827df98d6ef3
|
[
"MIT"
] | 3
|
2020-05-17T00:01:01.000Z
|
2020-07-28T18:04:05.000Z
|
''' This module will handle the text generation with beam search. '''
import torch
import torch.nn as nn
import torch.nn.functional as F
from models.Beam import Beam
import os, json
import models.Constants as Constants
class Translator(object):
''' Load with trained model and handle the beam search '''
def __init__(self, model, opt, device=torch.device('cuda'), teacher_model=None, dict_mapping={}):
self.model = model
self.model.eval()
self.opt = opt
self.device = device
self.teacher_model = teacher_model
self.dict_mapping = dict_mapping
self.length_bias = opt.get('length_bias', 0)
def get_inst_idx_to_tensor_position_map(self, inst_idx_list):
''' Indicate the position of an instance in a tensor. '''
return {inst_idx: tensor_position for tensor_position, inst_idx in enumerate(inst_idx_list)}
def collect_active_part(self, beamed_tensor, curr_active_inst_idx, n_prev_active_inst, n_bm):
''' Collect tensor parts associated to active instances. '''
_, *d_hs = beamed_tensor.size()
n_curr_active_inst = len(curr_active_inst_idx)
new_shape = (n_curr_active_inst * n_bm, *d_hs)
#print('n_prev_active:', n_prev_active_inst)
#print('n_curr_active:', curr_active_inst_idx)
beamed_tensor = beamed_tensor.view(n_prev_active_inst, -1)
beamed_tensor = beamed_tensor.index_select(0, curr_active_inst_idx)
beamed_tensor = beamed_tensor.view(*new_shape)
return beamed_tensor
def collate_active_info(self, enc_output, inst_idx_to_position_map, active_inst_idx_list, category, n_bm, enc_hidden=None, tag=None):
# Sentences which are still active are collected,
# so the decoder will not run on completed sentences.
n_prev_active_inst = len(inst_idx_to_position_map)
active_inst_idx = [inst_idx_to_position_map[k] for k in active_inst_idx_list]
active_inst_idx = torch.LongTensor(active_inst_idx).to(self.device)
if isinstance(enc_output, list):
active_src_enc = []
for item in enc_output:
active_src_enc.append(self.collect_active_part(item, active_inst_idx, n_prev_active_inst, n_bm))
else:
active_src_enc = self.collect_active_part(enc_output, active_inst_idx, n_prev_active_inst, n_bm)
active_category = self.collect_active_part(category, active_inst_idx, n_prev_active_inst, n_bm)
if enc_hidden is not None:
if isinstance(enc_hidden, list):
active_hidden = []
for i in range(len(enc_hidden)):
assert isinstance(enc_hidden[i], tuple)
tmp1 = self.collect_active_part(enc_hidden[i][0], active_inst_idx, n_prev_active_inst, n_bm)
tmp2 = self.collect_active_part(enc_hidden[i][1], active_inst_idx, n_prev_active_inst, n_bm)
active_hidden.append((tmp1, tmp2))
else:
assert isinstance(enc_hidden, tuple)
tmp1 = self.collect_active_part(enc_hidden[0], active_inst_idx, n_prev_active_inst, n_bm)
tmp2 = self.collect_active_part(enc_hidden[1], active_inst_idx, n_prev_active_inst, n_bm)
active_hidden = (tmp1, tmp2)
active_tag = None
if tag is not None:
active_tag = self.collect_active_part(tag, active_inst_idx, n_prev_active_inst, n_bm)
active_inst_idx_to_position_map = self.get_inst_idx_to_tensor_position_map(active_inst_idx_list)
if enc_hidden is None:
#if tag is not None:
return active_src_enc, active_category, active_inst_idx_to_position_map, active_tag
#return active_src_enc, active_category, active_inst_idx_to_position_map
return active_src_enc, active_hidden, active_category, active_inst_idx_to_position_map, active_tag
def collect_active_inst_idx_list(self, inst_beams, word_prob, inst_idx_to_position_map):
active_inst_idx_list = []
for inst_idx, inst_position in inst_idx_to_position_map.items():
is_inst_complete = inst_beams[inst_idx].advance(word_prob[inst_position])
if not is_inst_complete:
active_inst_idx_list += [inst_idx]
return active_inst_idx_list
def collect_hypothesis_and_scores(self, inst_dec_beams, n_best):
all_hyp, all_scores = [], []
for inst_idx in range(len(inst_dec_beams)):
scores, tk = inst_dec_beams[inst_idx].sort_finished(self.opt.get('beam_alpha', 1.0))
n_best = min(n_best, len(scores))
all_scores += [scores[:n_best]]
hyps = [inst_dec_beams[inst_idx].get_hypothesis_from_tk(t, k) for t, k in tk[:n_best]]
all_hyp += [hyps]
return all_hyp, all_scores
def collect_hypothesis_and_scores_bd(self, inst_dec_beams, n_best, enc_output, category):
max_len = self.opt['max_len']
all_hyp, all_scores = [], []
for inst_idx in range(len(inst_dec_beams)):
scores, tk = inst_dec_beams[inst_idx].sort_finished(self.opt.get('beam_alpha', 1.0))
_n_best = len(scores)
all_scores += [scores[:_n_best]]
hyps = [inst_dec_beams[inst_idx].get_hypothesis_from_tk(t, k) for t, k in tk[:_n_best]]
hyps = [[Constants.BOS] + item[:-1] + [Constants.PAD] * (max_len - len(item)) for item in hyps]
all_hyp += [hyps]
input_data = torch.LongTensor(all_hyp).to(self.device)
bsz, beam_size, _ = input_data.shape
input_data = input_data.view(bsz*beam_size, -1)
print(category.shape)
logit, *_ = self.model.beam_decoder(
tgt_seq=input_data,
category=category,
enc_output=enc_output
)
logit = logit.view(bsz, beam_size, 2)[:, :, 1]
ind = logit.max(1)[1]
tmp = input_data.view(bsz, beam_size, max_len).gather(1, ind.unsqueeze(1).unsqueeze(2).repeat(1, 1, max_len))[:, :, 1:]
return tmp.tolist(), []
def translate_batch_ARFormer(self, encoder_outputs, category):
''' Translation work in one batch '''
def beam_decode_step(
inst_dec_beams, len_dec_seq, enc_output, inst_idx_to_position_map, n_bm, category, attribute):
''' Decode and update beam status, and then return active beam idx '''
def prepare_beam_dec_seq(inst_dec_beams, len_dec_seq):
dec_partial_seq = [b.get_current_state() for b in inst_dec_beams if not b.done]
dec_partial_seq = torch.stack(dec_partial_seq).to(self.device)
dec_partial_seq = dec_partial_seq.view(-1, len_dec_seq)
#print(dec_partial_seq)
return dec_partial_seq
def predict_word(dec_seq, enc_output, n_active_inst, n_bm, category, attribute):
dec_output, *_ = self.model.decoder(dec_seq, enc_output, category, tags=attribute)
if isinstance(dec_output, list):
dec_output = dec_output[-1]
dec_output = dec_output[:, -1, :] # Pick the last step: (bh * bm) * d_h
word_prob = F.log_softmax(self.model.tgt_word_prj(dec_output), dim=1)
word_prob = word_prob.view(n_active_inst, n_bm, -1)
return word_prob
n_active_inst = len(inst_idx_to_position_map)
dec_seq = prepare_beam_dec_seq(inst_dec_beams, len_dec_seq)
word_prob = predict_word(dec_seq, enc_output, n_active_inst, n_bm, category, attribute)
# Update the beam with predicted word prob information and collect incomplete instances
active_inst_idx_list = self.collect_active_inst_idx_list(
inst_dec_beams, word_prob, inst_idx_to_position_map)
return active_inst_idx_list
'''
def collect_hypothesis_and_scores(inst_dec_beams, n_best):
all_hyp, all_scores = [], []
for inst_idx in range(len(inst_dec_beams)):
scores, tail_idxs = inst_dec_beams[inst_idx].sort_scores()
print(113, scores, tail_idxs)
all_scores += [scores[:n_best]]
hyps = [inst_dec_beams[inst_idx].get_hypothesis(i) for i in tail_idxs[:n_best]]
all_hyp += [hyps]
return all_hyp, all_scores
'''
with torch.no_grad():
enc_output = encoder_outputs['enc_output']
if isinstance(enc_output, list):
assert len(enc_output) == 1
enc_output = enc_output[0]
#-- Repeat data for beam search
n_bm = self.opt["beam_size"]
n_inst, len_s, d_h = enc_output.size()
enc_output = enc_output.repeat(1, n_bm, 1).view(n_inst * n_bm, len_s, d_h)
category = category.repeat(1, n_bm).view(n_inst * n_bm, 1)
e=enc_output.clone()
c=category.clone()
attribute = encoder_outputs.get(Constants.mapping['attr'][0], None)
if attribute is not None: attribute = attribute.unsqueeze(1).repeat(1, n_bm, 1).view(n_inst * n_bm, -1)
#-- Prepare beams
inst_dec_beams = [Beam(n_bm, self.opt["max_len"], device=self.device, specific_nums_of_sents=self.opt.get('topk', 1)) for _ in range(n_inst)]
#-- Bookkeeping for active or not
active_inst_idx_list = list(range(n_inst))
inst_idx_to_position_map = self.get_inst_idx_to_tensor_position_map(active_inst_idx_list)
#-- Decode
for len_dec_seq in range(1, self.opt["max_len"]):
active_inst_idx_list = beam_decode_step(
inst_dec_beams, len_dec_seq, enc_output, inst_idx_to_position_map, n_bm, category, attribute)
if not active_inst_idx_list:
break # all instances have finished their path to <EOS>
enc_output, category, inst_idx_to_position_map, attribute = self.collate_active_info(
enc_output, inst_idx_to_position_map, active_inst_idx_list, category, n_bm, tag=attribute)
if self.opt.get('use_beam_decoder', False):
batch_hyp, batch_scores = self.collect_hypothesis_and_scores_bd(inst_dec_beams, self.opt.get("topk", 1), e, c)
else:
batch_hyp, batch_scores = self.collect_hypothesis_and_scores(inst_dec_beams, self.opt.get("topk", 1))
return batch_hyp, batch_scores
def translate_batch_LSTM(self, encoder_outputs, category):
''' Translation work in one batch '''
def beam_decode_step(
inst_dec_beams, enc_output, enc_hidden, inst_idx_to_position_map, n_bm, category, tag):
''' Decode and update beam status, and then return active beam idx '''
def prepare_beam_dec_seq(inst_dec_beams):
dec_partial_seq = [b.get_lastest_state() for b in inst_dec_beams if not b.done]
dec_partial_seq = torch.stack(dec_partial_seq).to(self.device)
dec_partial_seq = dec_partial_seq.view(-1)
#print(dec_partial_seq)
return dec_partial_seq
def predict_word(dec_seq, enc_output, enc_hidden, n_active_inst, n_bm, category, tag):
res = self.model.decoder(
it=dec_seq,
encoder_outputs=enc_output,
category=category,
decoder_hidden=enc_hidden,
tag=tag
)
dec_output, enc_hidden, tag = res['dec_outputs'], res['dec_hidden'], res.get('pred_tag', None)
word_prob = F.log_softmax(self.model.tgt_word_prj(dec_output), dim=1)
word_prob = word_prob.view(n_active_inst, n_bm, -1)
return word_prob, enc_hidden, tag.argmax(1) if tag is not None else None
def collect_active_hidden_single(inst_beams, inst_idx_to_position_map, enc_hidden, n_bm):
if isinstance(enc_hidden, tuple):
tmp1, tmp2 = enc_hidden
_, *d_hs = tmp1.size()
n_curr_active_inst = len(inst_idx_to_position_map)
new_shape = (n_curr_active_inst * n_bm, *d_hs)
tmp1 = tmp1.view(n_curr_active_inst, n_bm, -1)
tmp2 = tmp2.view(n_curr_active_inst, n_bm, -1)
#print('hidden:', tmp1)
for inst_idx, inst_position in inst_idx_to_position_map.items():
_prev_ks = inst_beams[inst_idx].get_current_origin()
tmp1[inst_position] = tmp1[inst_position].index_select(0, _prev_ks)
tmp2[inst_position] = tmp2[inst_position].index_select(0, _prev_ks)
#print("PREV_KS:", _prev_ks)
#print('after h:', tmp1)
tmp1 = tmp1.view(*new_shape)
tmp2 = tmp2.view(*new_shape)
enc_hidden = (tmp1, tmp2)
else:
_, *d_hs = enc_hidden.size()
n_curr_active_inst = len(inst_idx_to_position_map)
new_shape = (n_curr_active_inst * n_bm, *d_hs)
enc_hidden = enc_hidden.view(n_curr_active_inst, n_bm, -1)
for inst_idx, inst_position in inst_idx_to_position_map.items():
_prev_ks = inst_beams[inst_idx].get_current_origin()
enc_hidden[inst_position] = enc_hidden[inst_position].index_select(0, _prev_ks)
enc_hidden = enc_hidden.view(*new_shape)
return enc_hidden
def collect_active_hidden(inst_beams, inst_idx_to_position_map, enc_hidden, n_bm):
if enc_hidden is None:
return None
if isinstance(enc_hidden, list):
hidden = []
for item in enc_hidden:
hidden.append(collect_active_hidden_single(inst_beams, inst_idx_to_position_map, item, n_bm))
else:
hidden = collect_active_hidden_single(inst_beams, inst_idx_to_position_map, enc_hidden, n_bm)
return hidden
'''
_, *d_hs = beamed_tensor.size()
n_curr_active_inst = len(curr_active_inst_idx)
new_shape = (n_curr_active_inst * n_bm, *d_hs)
print('n_prev_active:', n_prev_active_inst)
print('n_curr_active:', curr_active_inst_idx)
beamed_tensor = beamed_tensor.view(n_prev_active_inst, -1)
beamed_tensor = beamed_tensor.index_select(0, curr_active_inst_idx)
beamed_tensor = beamed_tensor.view(*new_shape)
return beamed_tensor
'''
n_active_inst = len(inst_idx_to_position_map)
dec_seq = prepare_beam_dec_seq(inst_dec_beams)
#print(dec_seq)
#print('before:', enc_hidden[0])
word_prob, enc_hidden, tag = predict_word(dec_seq, enc_output, enc_hidden, n_active_inst, n_bm, category, tag)
#print('after:', enc_hidden[0])
# Update the beam with predicted word prob information and collect incomplete instances
active_inst_idx_list = self.collect_active_inst_idx_list(
inst_dec_beams, word_prob, inst_idx_to_position_map)
enc_hidden = collect_active_hidden(inst_dec_beams, inst_idx_to_position_map, enc_hidden, n_bm)
tag = collect_active_hidden(inst_dec_beams, inst_idx_to_position_map, tag, n_bm)
return active_inst_idx_list, enc_hidden, tag
with torch.no_grad():
enc_output, enc_hidden = encoder_outputs['enc_output'], encoder_outputs['enc_hidden']
if not isinstance(enc_output, list):
enc_output = [enc_output]
n_bm = self.opt["beam_size"]
n_inst, len_s, _ = enc_output[0].shape
#-- Repeat data for beam search
enc_output = [item.repeat(1, n_bm, 1).view(n_inst * n_bm, len_s, -1) for item in enc_output]
if isinstance(enc_hidden, tuple):
n_inst, d_h = enc_hidden[0].size()
enc_hidden = (enc_hidden[0].unsqueeze(1).repeat(1, n_bm, 1).view(n_inst * n_bm, d_h), enc_hidden[1].unsqueeze(1).repeat(1, n_bm, 1).view(n_inst * n_bm, d_h))
elif isinstance(enc_hidden, list):
n_inst, d_h = enc_hidden[0].size()
enc_hidden = [item.unsqueeze(1).repeat(1, n_bm, 1).view(n_inst * n_bm, d_h) for item in enc_hidden]
else:
n_inst, d_h = enc_hidden.size()
enc_hidden = enc_hidden.unsqueeze(1).repeat(1, n_bm, 1).view(n_inst * n_bm, d_h)
enc_hidden = self.model.decoder.init_hidden(enc_hidden)
if encoder_outputs.get('obj_emb', None) is not None:
if self.opt['with_category']:
category = torch.cat([category, encoder_outputs['obj_emb']], dim=1)
else:
category = encoder_outputs['obj_emb']
category = category.unsqueeze(1).repeat(1, n_bm, 1).view(n_inst * n_bm, -1)
#-- Prepare beams
inst_dec_beams = [Beam(n_bm, self.opt["max_len"], device=self.device) for _ in range(n_inst)]
if self.opt['use_tag']:
tag = category.new(n_inst, n_bm).fill_(Constants.BOS).view(n_inst * n_bm).long()
else:
tag = None
#-- Bookkeeping for active or not
active_inst_idx_list = list(range(n_inst))
inst_idx_to_position_map = self.get_inst_idx_to_tensor_position_map(active_inst_idx_list)
#-- Decode
for t in range(1, self.opt["max_len"]):
active_inst_idx_list, enc_hidden, tag = beam_decode_step(
inst_dec_beams, enc_output, enc_hidden, inst_idx_to_position_map, n_bm, category, tag)
if not active_inst_idx_list:
break # all instances have finished their path to <EOS>
enc_output, enc_hidden, category, inst_idx_to_position_map, tag = self.collate_active_info(
enc_output, inst_idx_to_position_map, active_inst_idx_list, category, n_bm, enc_hidden=enc_hidden, tag=tag)
batch_hyp, batch_scores = self.collect_hypothesis_and_scores(inst_dec_beams, self.opt.get("topk", 1))
return batch_hyp, batch_scores
def translate_batch_ENSEMBLE(self, encoder_outputs, category):
''' Translation work in one batch '''
def beam_decode_step(
inst_dec_beams, enc_output, enc_hidden, inst_idx_to_position_map, n_bm, category):
''' Decode and update beam status, and then return active beam idx '''
def prepare_beam_dec_seq(inst_dec_beams):
dec_partial_seq = [b.get_lastest_state() for b in inst_dec_beams if not b.done]
dec_partial_seq = torch.stack(dec_partial_seq).to(self.device)
dec_partial_seq = dec_partial_seq.view(-1)
#print(dec_partial_seq)
return dec_partial_seq
def predict_word(dec_seq, enc_output, enc_hidden, n_active_inst, n_bm, category):
dec_output, enc_hidden, *_ = self.model.decoder(dec_seq, enc_output, category, enc_hidden)
assert isinstance(dec_output, list)
word_prob = []
for i in range(len(dec_output)):
tmp = F.log_softmax(self.model.tgt_word_prj(dec_output[i]), dim=1)
tmp = tmp.view(n_active_inst, n_bm, -1)
word_prob.append(tmp)
word_prob = torch.stack(word_prob, dim=0).mean(0)
return word_prob, enc_hidden
def collect_active_hidden(inst_beams, inst_idx_to_position_map, enc_hidden, n_bm):
assert isinstance(enc_hidden, list)
n_curr_active_inst = len(inst_idx_to_position_map)
for i in range(len(enc_hidden)):
tmp1, tmp2 = enc_hidden[i]
_, *d_hs = tmp1.size()
new_shape = (n_curr_active_inst * n_bm, *d_hs)
tmp1 = tmp1.view(n_curr_active_inst, n_bm, -1)
tmp2 = tmp2.view(n_curr_active_inst, n_bm, -1)
#print('hidden:', tmp1)
for inst_idx, inst_position in inst_idx_to_position_map.items():
_prev_ks = inst_beams[inst_idx].get_current_origin()
tmp1[inst_position] = tmp1[inst_position].index_select(0, _prev_ks)
tmp2[inst_position] = tmp2[inst_position].index_select(0, _prev_ks)
#print("PREV_KS:", _prev_ks)
#print('after h:', tmp1)
tmp1 = tmp1.view(*new_shape)
tmp2 = tmp2.view(*new_shape)
enc_hidden[i] = (tmp1, tmp2)
return enc_hidden
n_active_inst = len(inst_idx_to_position_map)
dec_seq = prepare_beam_dec_seq(inst_dec_beams)
#print(dec_seq)
#print('before:', enc_hidden[0])
word_prob, enc_hidden = predict_word(dec_seq, enc_output, enc_hidden, n_active_inst, n_bm, category)
#print('after:', enc_hidden[0])
# Update the beam with predicted word prob information and collect incomplete instances
active_inst_idx_list = self.collect_active_inst_idx_list(
inst_dec_beams, word_prob, inst_idx_to_position_map)
enc_hidden = collect_active_hidden(inst_dec_beams, inst_idx_to_position_map, enc_hidden, n_bm)
return active_inst_idx_list, enc_hidden
with torch.no_grad():
enc_output, enc_hidden = encoder_outputs['enc_output'], encoder_outputs['enc_hidden']
if not isinstance(enc_output, list):
enc_output = [enc_output]
n_bm = self.opt["beam_size"]
n_inst, len_s, d_h = enc_output[0].size()
#-- Repeat data for beam search
enc_output = [item.repeat(1, n_bm, 1).view(n_inst * n_bm, len_s, d_h) for item in enc_output]
assert isinstance(enc_hidden, list)
for i in range(len(enc_hidden)):
if isinstance(enc_hidden[i], tuple):
enc_hidden[i] = (enc_hidden[i][0].unsqueeze(1).repeat(1, n_bm, 1).view(n_inst * n_bm, d_h),
enc_hidden[i][1].unsqueeze(1).repeat(1, n_bm, 1).view(n_inst * n_bm, d_h))
else:
enc_hidden[i] = enc_hidden[i].unsqueeze(1).repeat(1, n_bm, 1).view(n_inst * n_bm, d_h)
enc_hidden = self.model.decoder.init_hidden(enc_hidden)
category = category.unsqueeze(1).repeat(1, n_bm, 1).view(n_inst * n_bm, self.opt['num_category'])
#-- Prepare beams
inst_dec_beams = [Beam(n_bm, self.opt["max_len"], device=self.device) for _ in range(n_inst)]
#-- Bookkeeping for active or not
active_inst_idx_list = list(range(n_inst))
inst_idx_to_position_map = self.get_inst_idx_to_tensor_position_map(active_inst_idx_list)
#-- Decode
for t in range(1, self.opt["max_len"]):
active_inst_idx_list, enc_hidden = beam_decode_step(
inst_dec_beams, enc_output, enc_hidden, inst_idx_to_position_map, n_bm, category)
if not active_inst_idx_list:
break # all instances have finished their path to <EOS>
enc_output, enc_hidden, category, inst_idx_to_position_map = self.collate_active_info(
enc_output, inst_idx_to_position_map, active_inst_idx_list, category, n_bm, enc_hidden=enc_hidden)
batch_hyp, batch_scores = self.collect_hypothesis_and_scores(inst_dec_beams, self.opt.get("topk", 1))
return batch_hyp, batch_scores
def translate_batch_NARFormer(self, encoder_outputs, category, tgt_tokens, tgt_vocab, teacher_encoder_outputs, tags):
from decoding.mask_predict import generate
with torch.no_grad():
return generate(
model=self.model,
teacher_model=self.teacher_model,
encoder_outputs=encoder_outputs,
teacher_encoder_outputs=teacher_encoder_outputs,
category=category,
tgt_tokens=tgt_tokens,
tgt_vocab=tgt_vocab,
opt=self.opt,
dict_mapping=self.dict_mapping,
length_bias=self.length_bias,
tags=tags#encoder_outputs.get(Constants.mapping['attr'][0], None)#tags
)
def translate_batch(self, encoder_outputs, category, tgt_tokens, tgt_vocab, teacher_encoder_outputs=None, tags=None):
if self.opt['decoder_type'] == 'NARFormer':
return self.translate_batch_NARFormer(encoder_outputs, category, tgt_tokens, tgt_vocab, teacher_encoder_outputs, tags=tags)
func_mapping = {
'LSTM': self.translate_batch_LSTM,
'ARFormer': self.translate_batch_ARFormer,
'ENSEMBLE': self.translate_batch_ENSEMBLE
}
return func_mapping[self.opt['decoder_type']](encoder_outputs, category)
class Translator_ensemble(object):
''' Load with trained model and handle the beam search '''
def __init__(self, model, opt, device=torch.device('cuda')):
self.model = model
assert isinstance(model, list)
for m in self.model:
m.eval()
self.opt = opt
self.device = device
def get_inst_idx_to_tensor_position_map(self, inst_idx_list):
''' Indicate the position of an instance in a tensor. '''
return {inst_idx: tensor_position for tensor_position, inst_idx in enumerate(inst_idx_list)}
def collect_active_part(self, beamed_tensor, curr_active_inst_idx, n_prev_active_inst, n_bm):
''' Collect tensor parts associated to active instances. '''
_, *d_hs = beamed_tensor.size()
n_curr_active_inst = len(curr_active_inst_idx)
new_shape = (n_curr_active_inst * n_bm, *d_hs)
#print('n_prev_active:', n_prev_active_inst)
#print('n_curr_active:', curr_active_inst_idx)
beamed_tensor = beamed_tensor.view(n_prev_active_inst, -1)
beamed_tensor = beamed_tensor.index_select(0, curr_active_inst_idx)
beamed_tensor = beamed_tensor.view(*new_shape)
return beamed_tensor
def collate_active_info(self, enc_output, inst_idx_to_position_map, active_inst_idx_list, category, n_bm, enc_hidden=None):
# Sentences which are still active are collected,
# so the decoder will not run on completed sentences.
n_prev_active_inst = len(inst_idx_to_position_map)
active_inst_idx = [inst_idx_to_position_map[k] for k in active_inst_idx_list]
active_inst_idx = torch.LongTensor(active_inst_idx).to(self.device)
if isinstance(enc_output, list):
active_src_enc = []
for item in enc_output:
tmp = []
for i in item:
tmp.append(self.collect_active_part(i, active_inst_idx, n_prev_active_inst, n_bm))
active_src_enc.append(tmp)
else:
active_src_enc = self.collect_active_part(enc_output, active_inst_idx, n_prev_active_inst, n_bm)
active_category = self.collect_active_part(category, active_inst_idx, n_prev_active_inst, n_bm)
if enc_hidden is not None:
if isinstance(enc_hidden, list):
active_hidden = []
for i in range(len(enc_hidden)):
if isinstance(enc_hidden[i], list):
ah = []
for j in range(len(enc_hidden[i])):
assert isinstance(enc_hidden[i][j], tuple)
tmp1 = self.collect_active_part(enc_hidden[i][j][0], active_inst_idx, n_prev_active_inst, n_bm)
tmp2 = self.collect_active_part(enc_hidden[i][j][1], active_inst_idx, n_prev_active_inst, n_bm)
ah.append((tmp1, tmp2))
active_hidden.append(ah)
else:
assert isinstance(enc_hidden[i], tuple)
tmp1 = self.collect_active_part(enc_hidden[i][0], active_inst_idx, n_prev_active_inst, n_bm)
tmp2 = self.collect_active_part(enc_hidden[i][1], active_inst_idx, n_prev_active_inst, n_bm)
active_hidden.append((tmp1, tmp2))
else:
assert isinstance(enc_hidden, tuple)
tmp1 = self.collect_active_part(enc_hidden[0], active_inst_idx, n_prev_active_inst, n_bm)
tmp2 = self.collect_active_part(enc_hidden[1], active_inst_idx, n_prev_active_inst, n_bm)
active_hidden = (tmp1, tmp2)
active_inst_idx_to_position_map = self.get_inst_idx_to_tensor_position_map(active_inst_idx_list)
if enc_hidden is None:
return active_src_enc, active_category, active_inst_idx_to_position_map
return active_src_enc, active_hidden, active_category, active_inst_idx_to_position_map
def collect_active_inst_idx_list(self, inst_beams, word_prob, inst_idx_to_position_map):
active_inst_idx_list = []
for inst_idx, inst_position in inst_idx_to_position_map.items():
is_inst_complete = inst_beams[inst_idx].advance(word_prob[inst_position])
if not is_inst_complete:
active_inst_idx_list += [inst_idx]
return active_inst_idx_list
def collect_hypothesis_and_scores(self, inst_dec_beams, n_best):
all_hyp, all_scores = [], []
for inst_idx in range(len(inst_dec_beams)):
scores, tk = inst_dec_beams[inst_idx].sort_finished(self.opt.get('beam_alpha', 1.0))
n_best = min(n_best, len(scores))
all_scores += [scores[:n_best]]
hyps = [inst_dec_beams[inst_idx].get_hypothesis_from_tk(t, k) for t, k in tk[:n_best]]
all_hyp += [hyps]
return all_hyp, all_scores
def translate_batch_ENSEMBLE(self, enc_output, enc_hidden, category):
''' Translation work in one batch '''
def beam_decode_step(
inst_dec_beams, enc_output, enc_hidden, inst_idx_to_position_map, n_bm, category):
''' Decode and update beam status, and then return active beam idx '''
def prepare_beam_dec_seq(inst_dec_beams):
dec_partial_seq = [b.get_lastest_state() for b in inst_dec_beams if not b.done]
dec_partial_seq = torch.stack(dec_partial_seq).to(self.device)
dec_partial_seq = dec_partial_seq.view(-1)
#print(dec_partial_seq)
return dec_partial_seq
def predict_word(dec_seq, enc_output, enc_hidden, n_active_inst, n_bm, category):
word_prob = []
for i in range(len(enc_output)):
res = self.model[i].decoder(
it=dec_seq,
encoder_outputs=enc_output[i],
category=category,
decoder_hidden=enc_hidden[i]
)
dec_output, enc_hidden[i] = res['dec_outputs'], res['dec_hidden']
tmp = F.log_softmax(self.model[i].tgt_word_prj(dec_output), dim=1)
tmp = tmp.view(n_active_inst, n_bm, -1)
word_prob.append(tmp)
word_prob = torch.stack(word_prob, dim=0).mean(0)
return word_prob, enc_hidden
def collect_active_hidden_single(inst_beams, inst_idx_to_position_map, enc_hidden, n_bm):
if isinstance(enc_hidden, tuple):
tmp1, tmp2 = enc_hidden
_, *d_hs = tmp1.size()
n_curr_active_inst = len(inst_idx_to_position_map)
new_shape = (n_curr_active_inst * n_bm, *d_hs)
tmp1 = tmp1.view(n_curr_active_inst, n_bm, -1)
tmp2 = tmp2.view(n_curr_active_inst, n_bm, -1)
#print('hidden:', tmp1)
for inst_idx, inst_position in inst_idx_to_position_map.items():
_prev_ks = inst_beams[inst_idx].get_current_origin()
tmp1[inst_position] = tmp1[inst_position].index_select(0, _prev_ks)
tmp2[inst_position] = tmp2[inst_position].index_select(0, _prev_ks)
#print("PREV_KS:", _prev_ks)
#print('after h:', tmp1)
tmp1 = tmp1.view(*new_shape)
tmp2 = tmp2.view(*new_shape)
enc_hidden = (tmp1, tmp2)
else:
_, *d_hs = enc_hidden.size()
n_curr_active_inst = len(inst_idx_to_position_map)
new_shape = (n_curr_active_inst * n_bm, *d_hs)
enc_hidden = enc_hidden.view(n_curr_active_inst, n_bm, -1)
for inst_idx, inst_position in inst_idx_to_position_map.items():
_prev_ks = inst_beams[inst_idx].get_current_origin()
enc_hidden[inst_position] = enc_hidden[inst_position].index_select(0, _prev_ks)
enc_hidden = enc_hidden.view(*new_shape)
return enc_hidden
def collect_active_hidden(inst_beams, inst_idx_to_position_map, enc_hidden, n_bm):
if enc_hidden is None:
return None
if isinstance(enc_hidden, list):
hidden = []
for item in enc_hidden:
hidden.append(collect_active_hidden_single(inst_beams, inst_idx_to_position_map, item, n_bm))
else:
hidden = collect_active_hidden_single(inst_beams, inst_idx_to_position_map, enc_hidden, n_bm)
return hidden
n_active_inst = len(inst_idx_to_position_map)
dec_seq = prepare_beam_dec_seq(inst_dec_beams)
#print(dec_seq)
#print('before:', enc_hidden[0])
word_prob, enc_hidden = predict_word(dec_seq, enc_output, enc_hidden, n_active_inst, n_bm, category)
#print('after:', enc_hidden[0])
# Update the beam with predicted word prob information and collect incomplete instances
active_inst_idx_list = self.collect_active_inst_idx_list(
inst_dec_beams, word_prob, inst_idx_to_position_map)
#print(type(enc_hidden))
#print(type(enc_hidden[0]))
#print(type(enc_hidden[0][0]))
#print(type(enc_hidden[0][0][0]))
enc_hidden = [collect_active_hidden(inst_dec_beams, inst_idx_to_position_map, item, n_bm) for item in enc_hidden]
return active_inst_idx_list, enc_hidden
with torch.no_grad():
assert isinstance(enc_output, list)
assert isinstance(enc_hidden, list)
assert len(enc_output) == len(self.model)
assert len(enc_output) == len(enc_hidden)
for i in range(len(enc_output)):
if not isinstance(enc_output[i], list):
enc_output[i] = [enc_output[i]]
n_bm = self.opt["beam_size"]
n_inst, len_s, d_h = enc_output[0][0].size()
#-- Repeat data for beam search
category = category.unsqueeze(1).repeat(1, n_bm, 1).view(n_inst * n_bm, self.opt['num_category'])
enc_output = [[tmp.repeat(1, n_bm, 1).view(n_inst * n_bm, len_s, d_h) for tmp in item] for item in enc_output]
for i in range(len(enc_hidden)):
if isinstance(enc_hidden[i], tuple):
enc_hidden[i] = (enc_hidden[i][0].unsqueeze(1).repeat(1, n_bm, 1).view(n_inst * n_bm, d_h),
enc_hidden[i][1].unsqueeze(1).repeat(1, n_bm, 1).view(n_inst * n_bm, d_h))
else:
enc_hidden[i] = enc_hidden[i].unsqueeze(1).repeat(1, n_bm, 1).view(n_inst * n_bm, d_h)
#-- initialize hidden state
for i in range(len(enc_output)):
enc_hidden[i] = self.model[i].decoder.init_hidden(enc_hidden[i])
#-- Prepare beams
inst_dec_beams = [Beam(n_bm, self.opt["max_len"], device=self.device) for _ in range(n_inst)]
#-- Bookkeeping for active or not
active_inst_idx_list = list(range(n_inst))
inst_idx_to_position_map = self.get_inst_idx_to_tensor_position_map(active_inst_idx_list)
#-- Decode
for t in range(1, self.opt["max_len"]):
active_inst_idx_list, enc_hidden = beam_decode_step(
inst_dec_beams, enc_output, enc_hidden, inst_idx_to_position_map, n_bm, category)
if not active_inst_idx_list:
break # all instances have finished their path to <EOS>
enc_output, enc_hidden, category, inst_idx_to_position_map = self.collate_active_info(
enc_output, inst_idx_to_position_map, active_inst_idx_list, category, n_bm, enc_hidden=enc_hidden)
batch_hyp, batch_scores = self.collect_hypothesis_and_scores(inst_dec_beams, self.opt.get("topk", 1))
return batch_hyp, batch_scores
def translate_batch(self, enc_output, enc_hidden, category):
return self.translate_batch_ENSEMBLE(enc_output, enc_hidden, category)
| 49.929319
| 173
| 0.611781
| 5,181
| 38,146
| 4.100174
| 0.04613
| 0.063268
| 0.053241
| 0.054418
| 0.888387
| 0.847856
| 0.826107
| 0.812644
| 0.800075
| 0.792449
| 0
| 0.009128
| 0.299271
| 38,146
| 763
| 174
| 49.994758
| 0.785597
| 0.075159
| 0
| 0.669903
| 0
| 0
| 0.012049
| 0
| 0
| 0
| 0
| 0
| 0.027184
| 1
| 0.071845
| false
| 0
| 0.013592
| 0.001942
| 0.16699
| 0.001942
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
fc80cd9b1168d91476c56530f47badf2d21f3739
| 4,112
|
py
|
Python
|
openstack/plugins/healthmonitor.py
|
MilkBotttle/systex_rally
|
38da1fd019bf64af6ea391ec58417b36cbd2ed19
|
[
"Apache-2.0"
] | null | null | null |
openstack/plugins/healthmonitor.py
|
MilkBotttle/systex_rally
|
38da1fd019bf64af6ea391ec58417b36cbd2ed19
|
[
"Apache-2.0"
] | null | null | null |
openstack/plugins/healthmonitor.py
|
MilkBotttle/systex_rally
|
38da1fd019bf64af6ea391ec58417b36cbd2ed19
|
[
"Apache-2.0"
] | null | null | null |
from rally.task import validation
from rally_openstack import consts
from rally_openstack import scenario
from rally_openstack.scenarios.octavia import utils as octavia_utils
@validation.add("required_services", services=[consts.Service.OCTAVIA])
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(context={"cleanup@openstack": ["octavia"]},
name="Octavia.healthmonitors_list",
platform="openstack")
class HealthMonitorsList(octavia_utils.OctaviaBase):
def run(self):
self.octavia.health_monitor_list()
@validation.add("required_services", services=[consts.Service.OCTAVIA])
@validation.add("required_platform", platform="openstack", users=True)
@validation.add("required_contexts", contexts=["network"])
@scenario.configure(context={"cleanup@openstack": ["octavia"]},
name="Octavia.create_and_delete_healthmonitors",
platform="openstack")
class CreateAndDeleteHealthMonitors(octavia_utils.OctaviaBase):
def run(self, protocol="HTTP", monitor_type="PING", lb_algorithm="ROUND_ROBIN"):
subnets = []
loadbalancers = []
pools = []
networks = self.context.get("tenant", {}).get("networks", [])
for network in networks:
subnets.extend(network.get("subnets", []))
for subnet_id in subnets:
lb = self.octavia.load_balancer_create(subnet_id)
loadbalancers.append(lb)
for loadbalancer in loadbalancers:
self.octavia.wait_for_loadbalancer_prov_status(loadbalancer)
pool = self.octavia.pool_create(
lb_id=loadbalancer["id"],
protocol=protocol, lb_algorithm="ROUND_ROBIN")
pools.append(pool)
for p in pools:
monitor_args = {
"pool_id" : p["id"],
"delay": 1,
"timeout": 30,
"type": monitor_type.upper(),
"max_retries": 10
}
hm = self.octavia.health_monitor_create(json={"healthmonitor": monitor_args})
self.octavia.wait_for_loadbalancer_prov_status(loadbalancer)
self.octavia.health_monitor_delete(hm["healthmonitor"]["id"])
@validation.add("required_services", services=[consts.Service.OCTAVIA])
@validation.add("required_platform", platform="openstack", users=True)
@validation.add("required_contexts", contexts=["network"])
@scenario.configure(context={"cleanup@openstack": ["octavia"]},
name="Octavia.create_and_update_healthmonitors",
platform="openstack")
class CreateAndUpdateHealthMonitors(octavia_utils.OctaviaBase):
def run(self, protocol="HTTP", monitor_type="PING", lb_algorithm="ROUND_ROBIN"):
subnets = []
loadbalancers = []
pools = []
networks = self.context.get("tenant", {}).get("networks", [])
for network in networks:
subnets.extend(network.get("subnets", []))
for subnet_id in subnets:
lb = self.octavia.load_balancer_create(subnet_id)
loadbalancers.append(lb)
for loadbalancer in loadbalancers:
self.octavia.wait_for_loadbalancer_prov_status(loadbalancer)
pool = self.octavia.pool_create(
lb_id=loadbalancer["id"],
protocol=protocol, lb_algorithm="ROUND_ROBIN")
pools.append(pool)
for p in pools:
monitor_args = {
"pool_id" : p["id"],
"delay": 1,
"timeout": 30,
"type": monitor_type.upper(),
"max_retries": 10
}
hm = self.octavia.health_monitor_create(json={"healthmonitor": monitor_args})
self.octavia.wait_for_loadbalancer_prov_status(loadbalancer)
set_args = {
"delay": 10,
"timeout": 60
}
self.octavia.health_monitor_set(health_monitor_id=hm["healthmonitor"]["id"],
json={"healthmonitor": set_args})
| 45.688889
| 89
| 0.61965
| 409
| 4,112
| 6.02445
| 0.202934
| 0.058036
| 0.068182
| 0.048701
| 0.784497
| 0.784497
| 0.771104
| 0.771104
| 0.747565
| 0.747565
| 0
| 0.004599
| 0.259728
| 4,112
| 89
| 90
| 46.202247
| 0.804862
| 0
| 0
| 0.752941
| 0
| 0
| 0.156128
| 0.026021
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035294
| false
| 0
| 0.047059
| 0
| 0.117647
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
5d8db93bac86e826ed810233823c76e4d3190204
| 245
|
py
|
Python
|
dvd/backend/dvd/core/signals.py
|
migue559/djangoVueDocker
|
c05e64e3699895ef691eb73c435cd815efbd0529
|
[
"MIT"
] | null | null | null |
dvd/backend/dvd/core/signals.py
|
migue559/djangoVueDocker
|
c05e64e3699895ef691eb73c435cd815efbd0529
|
[
"MIT"
] | null | null | null |
dvd/backend/dvd/core/signals.py
|
migue559/djangoVueDocker
|
c05e64e3699895ef691eb73c435cd815efbd0529
|
[
"MIT"
] | null | null | null |
from django.db.models.signals import post_save, m2m_changed
from graphene_subscriptions.signals import post_save_subscription
from .models import User
post_save.connect(post_save_subscription, sender=User, dispatch_uid="User_post_save")
| 35
| 86
| 0.840816
| 35
| 245
| 5.571429
| 0.514286
| 0.205128
| 0.174359
| 0.215385
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004525
| 0.097959
| 245
| 6
| 87
| 40.833333
| 0.877828
| 0
| 0
| 0
| 0
| 0
| 0.058577
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.75
| 0
| 0.75
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
5db0d22ff170109333375f1275bb95ef515a88d0
| 690
|
py
|
Python
|
src/utils/cheatengine_communication.py
|
ike753z/BLRE-Server-Info-Discord-Bot
|
38af42bfaa7424ce6e2b5d7b3feed7b0f03a0a07
|
[
"MIT"
] | null | null | null |
src/utils/cheatengine_communication.py
|
ike753z/BLRE-Server-Info-Discord-Bot
|
38af42bfaa7424ce6e2b5d7b3feed7b0f03a0a07
|
[
"MIT"
] | null | null | null |
src/utils/cheatengine_communication.py
|
ike753z/BLRE-Server-Info-Discord-Bot
|
38af42bfaa7424ce6e2b5d7b3feed7b0f03a0a07
|
[
"MIT"
] | null | null | null |
import struct
def scan_players(totalPlayers):
f = open(r'\\.\pipe\blrevive','wb')
# f = open(r'\\.\pipe\blreviveDev','wb')
lua = "Update30Seconds({})".format(totalPlayers)
luaBytes = bytes(lua, 'utf-8')
csz = len(luaBytes)
tosend = struct.pack("<bi"+str(csz)+"sq", 1, csz, luaBytes, 0)
ret = f.write(tosend)
f.flush()
f.close()
def update_loadouts():
f = open(r'\\.\pipe\blrevive','wb')
# f = open(r'\\.\pipe\blreviveDev','wb')
lua = "Update5Seconds()"
luaBytes = bytes(lua, 'utf-8')
csz = len(luaBytes)
tosend = struct.pack("<bi"+str(csz)+"sq", 1, csz, luaBytes, 0)
ret = f.write(tosend)
f.flush()
f.close()
| 22.258065
| 66
| 0.573913
| 93
| 690
| 4.236559
| 0.387097
| 0.050761
| 0.060914
| 0.101523
| 0.741117
| 0.741117
| 0.741117
| 0.741117
| 0.741117
| 0.741117
| 0
| 0.016514
| 0.210145
| 690
| 31
| 67
| 22.258065
| 0.706422
| 0.111594
| 0
| 0.736842
| 0
| 0
| 0.152209
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.105263
| false
| 0
| 0.052632
| 0
| 0.157895
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
5dbea84112e800b56c3ffe1e3aed4c25c79a167c
| 143
|
py
|
Python
|
code/np.py
|
pinzhongchuangxin/opencv
|
4b7a406cff32f0e3bff4dcd6b353df6a61861b42
|
[
"MIT"
] | null | null | null |
code/np.py
|
pinzhongchuangxin/opencv
|
4b7a406cff32f0e3bff4dcd6b353df6a61861b42
|
[
"MIT"
] | null | null | null |
code/np.py
|
pinzhongchuangxin/opencv
|
4b7a406cff32f0e3bff4dcd6b353df6a61861b42
|
[
"MIT"
] | null | null | null |
from th_test import tt
import time
tt.test_2()
while True:
# print(tt.arr)
print(tt.arr[0])
time.sleep(1)
# print(tt.arr)np
| 17.875
| 24
| 0.622378
| 26
| 143
| 3.346154
| 0.576923
| 0.241379
| 0.344828
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.027523
| 0.237762
| 143
| 8
| 24
| 17.875
| 0.770642
| 0.202797
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0.166667
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
b90783695e46e638324d6c1e8a2a014e35dfefce
| 45
|
py
|
Python
|
profile_generator/log/__init__.py
|
nethy/profile-generator
|
9bc54bed36b84b45902d75a273739480b4ff2204
|
[
"MIT"
] | null | null | null |
profile_generator/log/__init__.py
|
nethy/profile-generator
|
9bc54bed36b84b45902d75a273739480b4ff2204
|
[
"MIT"
] | null | null | null |
profile_generator/log/__init__.py
|
nethy/profile-generator
|
9bc54bed36b84b45902d75a273739480b4ff2204
|
[
"MIT"
] | null | null | null |
from .config import get_console_logger, init
| 22.5
| 44
| 0.844444
| 7
| 45
| 5.142857
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 45
| 1
| 45
| 45
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f8d58f0cff17bbf9301d43598ba2ca7dd481d227
| 53,026
|
py
|
Python
|
bot/telebot.py
|
fossabot/devopshelper_bot
|
f0ed63a428416c3b1e4318025cbc97f69c4be249
|
[
"BSD-3-Clause"
] | null | null | null |
bot/telebot.py
|
fossabot/devopshelper_bot
|
f0ed63a428416c3b1e4318025cbc97f69c4be249
|
[
"BSD-3-Clause"
] | null | null | null |
bot/telebot.py
|
fossabot/devopshelper_bot
|
f0ed63a428416c3b1e4318025cbc97f69c4be249
|
[
"BSD-3-Clause"
] | null | null | null |
import logging
import sys
import datetime
import configparser
import argparse
import inspect
import re
import shlex
from mwt import MWT
from dbhelper import DBHelper
from telegram import InlineKeyboardMarkup, InlineKeyboardButton, ReplyKeyboardRemove, ChatPermissions
from telegram.ext import MessageHandler, Filters, CommandHandler, Updater, CallbackQueryHandler
from telegram.ext.dispatcher import run_async
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO)
logger = logging.getLogger(__name__)
db = DBHelper()
parser = argparse.ArgumentParser(description='Bot for helping in administration in DevOps groups in TG')
parser.add_argument("-b","--bottoken", dest="bottoken", type=str,default="1231423",help="Bot token for TG API")
parser.add_argument("-e","--environment", dest="environment", type=str,default="config.ini",help="Environment for bot")
args = parser.parse_args()
bottoken = args.bottoken
environment = args.environment
config = configparser.ConfigParser()
# config.read('config.ini')
config.read(environment)
# bottoken= str(sys.argv[1])
updater = Updater(token=bottoken, use_context=True)
dispatcher = updater.dispatcher
# Get admins list
@MWT(timeout=60*60)
def get_admin_ids(context, chat_id):
"""Returns a list of admin IDs for a given chat. Results are cached for 1 hour."""
return [admin.user.id for admin in context.bot.get_chat_administrators(chat_id)]
# User commands
## Send user test tasks
def tasks(update, context):
section = str(update.message.chat.id)
try:
username = re.sub("[_]", "\_", update.message.from_user.username)
except TypeError:
username = None
in_section = section in config.sections()
command_name = inspect.currentframe().f_code.co_name
feature_flag = config.get(section, command_name) == 'on'
github_url = config.get(section, 'github_url')
user_id = update.message.from_user.id
first_name = re.sub("[_]", "\_", update.message.from_user.first_name)
first_name = re.sub("[*]", "\*", first_name)
first_name = re.sub("[`]", "\`", first_name)
first_name = re.sub("[[]", "\[", first_name)
if in_section and feature_flag:
try:
context.bot.send_message(chat_id=update.message.chat_id, text="@" + username + \
" here is your " + "[DevOps tasks]" + "(" + github_url + ")" + "." \
,parse_mode='Markdown', disable_web_page_preview=True)
context.bot.deleteMessage(chat_id=update.message.chat.id, message_id=update.message.message_id)
except TypeError:
context.bot.send_message(chat_id=update.message.chat_id, text="[" + first_name + "](tg://user?id=" + str(user_id) + ")" + \
" here is your " + "[DevOps tasks]" + "(" + github_url + ")" + "." \
, parse_mode='Markdown', disable_web_page_preview=True)
context.bot.deleteMessage(chat_id=update.message.chat.id, message_id=update.message.message_id)
tasks_handler = CommandHandler('tasks', tasks, run_async=True)
dispatcher.add_handler(tasks_handler)
## Send user starter kit
def starter(update, context):
section = str(update.message.chat.id)
try:
username = re.sub("[_]", "\_", update.message.from_user.username)
except TypeError:
username = None
in_section = section in config.sections()
command_name = inspect.currentframe().f_code.co_name
feature_flag = config.get(section, command_name) == 'on'
root_url = config.get('shared','root_url')
channel_specify = config.get(section, 'study')
starter = config.get(section, 'starter_filename')
url = root_url + channel_specify + "/" + starter
user_id = update.message.from_user.id
first_name = re.sub("[_]", "\_", update.message.from_user.first_name)
first_name = re.sub("[*]", "\*", first_name)
first_name = re.sub("[`]", "\`", first_name)
first_name = re.sub("[[]", "\[", first_name)
if in_section and feature_flag:
if update.message.reply_to_message is not None:
context.bot.send_message(chat_id=update.message.chat_id,text= "We have " + "[starter kit for newbies]" + "(" + url + ")" + \
". Also we have test tasks for SRE and list of various courses. You can find them in the same repository or via bot (see man for commands).", \
parse_mode='Markdown', reply_to_message_id=update.message.reply_to_message.message_id)
context.bot.deleteMessage(chat_id=update.message.chat.id, message_id=update.message.message_id)
else:
try:
context.bot.send_message(chat_id=update.message.chat_id, text="@" + username + \
" here is your " + "[starter kit]" + "(" + url + ")" + "." \
,parse_mode='Markdown', disable_web_page_preview=True)
context.bot.deleteMessage(chat_id=update.message.chat.id, message_id=update.message.message_id)
except TypeError:
context.bot.send_message(chat_id=update.message.chat_id, text="[" + first_name + "](tg://user?id=" + str(user_id) + ")" + \
" here is your " + "[starter kit]" + "(" + url + ")" + "." \
, parse_mode='Markdown', disable_web_page_preview=True)
context.bot.deleteMessage(chat_id=update.message.chat.id, message_id=update.message.message_id)
starter_handler = CommandHandler('starter', starter, run_async=True)
dispatcher.add_handler(starter_handler)
## Send user middle kit
def middle(update, context):
section = str(update.message.chat.id)
try:
username = re.sub("[_]", "\_", update.message.from_user.username)
except TypeError:
username = None
in_section = section in config.sections()
command_name = inspect.currentframe().f_code.co_name
feature_flag = config.get(section, command_name) == 'on'
root_url = config.get('shared','root_url')
channel_specify = config.get(section, 'study')
middle = config.get(section, 'middle_filename')
url = root_url + channel_specify + "/" + middle
user_id = update.message.from_user.id
first_name = re.sub("[_]", "\_", update.message.from_user.first_name)
first_name = re.sub("[*]", "\*", first_name)
first_name = re.sub("[`]", "\`", first_name)
first_name = re.sub("[[]", "\[", first_name)
if in_section and feature_flag:
try:
context.bot.send_message(chat_id=update.message.chat_id, text="@" + username + \
" here is your " + "[middle kit]" + "(" + url + ")" + "." \
, parse_mode='Markdown', disable_web_page_preview=True)
context.bot.deleteMessage(chat_id=update.message.chat.id, message_id=update.message.message_id)
except TypeError:
context.bot.send_message(chat_id=update.message.chat_id, text="[" + first_name + "](tg://user?id=" + str(user_id) + ")" + \
" here is your " + "[middle kit]" + "(" + url + ")" + "." \
, parse_mode='Markdown', disable_web_page_preview=True)
context.bot.deleteMessage(chat_id=update.message.chat.id, message_id=update.message.message_id)
middle_handler = CommandHandler('middle', middle, run_async=True)
dispatcher.add_handler(middle_handler)
## Send user middle kit
def hrman(update, context):
section = str(update.message.chat.id)
try:
username = re.sub("[_]", "\_", update.message.from_user.username)
except TypeError:
username = None
in_section = section in config.sections()
command_name = inspect.currentframe().f_code.co_name
feature_flag = config.get(section, command_name) == 'on'
root_url = config.get('shared','root_url')
hrman = config.get('shared', 'hrman_filename')
url = root_url + hrman
user_id = update.message.from_user.id
first_name = re.sub("[_]", "\_", update.message.from_user.first_name)
first_name = re.sub("[*]", "\*", first_name)
first_name = re.sub("[`]", "\`", first_name)
first_name = re.sub("[[]", "\[", first_name)
if in_section and feature_flag:
if update.message.reply_to_message is not None:
context.bot.send_message(chat_id=update.message.chat_id,text= "We have " + "[HR man]" + "(" + url + ")" + \
". Please read it carefully.", \
parse_mode='Markdown', reply_to_message_id=update.message.reply_to_message.message_id, disable_web_page_preview=True)
context.bot.deleteMessage(chat_id=update.message.chat.id, message_id=update.message.message_id)
else:
try:
context.bot.send_message(chat_id=update.message.chat_id, text="@" + username + \
" here is your " + "[HR man]" + "(" + url + ")" + "." \
, parse_mode='Markdown', disable_web_page_preview=True)
context.bot.deleteMessage(chat_id=update.message.chat.id, message_id=update.message.message_id)
except TypeError:
context.bot.send_message(chat_id=update.message.chat_id, text="[" + first_name + "](tg://user?id=" + str(user_id) + ")" + \
" here is your " + "[HR man]" + "(" + url + ")" + "." \
, parse_mode='Markdown', disable_web_page_preview=True)
context.bot.deleteMessage(chat_id=update.message.chat.id, message_id=update.message.message_id)
hrman_handler = CommandHandler('hrman', hrman, run_async=True)
dispatcher.add_handler(hrman_handler)
## Send user tips for certifications
def cert(update, context):
section = str(update.message.chat.id)
try:
username = re.sub("[_]", "\_", update.message.from_user.username)
except TypeError:
username = None
in_section = section in config.sections()
command_name = inspect.currentframe().f_code.co_name
feature_flag = config.get(section, command_name) == 'on'
root_url = config.get('shared','root_url')
channel_specify = config.get(section, 'study')
certification = config.get(section, 'certification_filename')
url = root_url + channel_specify + "/" + certification
user_id = update.message.from_user.id
first_name = re.sub("[_]", "\_", update.message.from_user.first_name)
first_name = re.sub("[*]", "\*", first_name)
first_name = re.sub("[`]", "\`", first_name)
first_name = re.sub("[[]", "\[", first_name)
if in_section and feature_flag:
try:
context.bot.send_message(chat_id=update.message.chat_id, text="@" + username + \
" here is your " + "[certification tips]" + "(" + url + ")" + "." \
, parse_mode='Markdown', disable_web_page_preview=True)
context.bot.deleteMessage(chat_id=update.message.chat.id, message_id=update.message.message_id)
except TypeError:
context.bot.send_message(chat_id=update.message.chat_id, text="[" + first_name + "](tg://user?id=" + str(user_id) + ")" + \
" here is your " + "[certification tips]" + "(" + url + ")" + "." \
, parse_mode='Markdown', disable_web_page_preview=True)
context.bot.deleteMessage(chat_id=update.message.chat.id, message_id=update.message.message_id)
cert_handler = CommandHandler('cert', cert, run_async=True)
dispatcher.add_handler(cert_handler)
## Send user list of various courses
def course(update, context):
section = str(update.message.chat.id)
try:
username = re.sub("[_]", "\_", update.message.from_user.username)
except TypeError:
username = None
in_section = section in config.sections()
command_name = inspect.currentframe().f_code.co_name
feature_flag = config.get(section, command_name) == 'on'
root_url = config.get('shared','root_url')
channel_specify = config.get('shared', 'course_folder')
certification = config.get('shared', 'course_filename')
url = root_url + channel_specify + "/" + certification
user_id = update.message.from_user.id
first_name = re.sub("[_]", "\_", update.message.from_user.first_name)
first_name = re.sub("[*]", "\*", first_name)
first_name = re.sub("[`]", "\`", first_name)
first_name = re.sub("[[]", "\[", first_name)
if in_section and feature_flag:
try:
context.bot.send_message(chat_id=update.message.chat_id, text="@" + username + \
" here is your " + "[courses list]" + "(" + url + ")" + "." \
, parse_mode='Markdown', disable_web_page_preview=True)
context.bot.deleteMessage(chat_id=update.message.chat.id, message_id=update.message.message_id)
except TypeError:
context.bot.send_message(chat_id=update.message.chat_id, text="[" + first_name + "](tg://user?id=" + str(user_id) + ")" + \
" here is your " + "[courses list]" + "(" + url + ")" + "." \
, parse_mode='Markdown', disable_web_page_preview=True)
context.bot.deleteMessage(chat_id=update.message.chat.id, message_id=update.message.message_id)
course_handler = CommandHandler('course', course, run_async=True)
dispatcher.add_handler(course_handler)
## Send user list of various relocate chats
def relocate(update, context):
section = str(update.message.chat.id)
try:
username = re.sub("[_]", "\_", update.message.from_user.username)
except TypeError:
username = None
in_section = section in config.sections()
command_name = inspect.currentframe().f_code.co_name
feature_flag = config.get(section, command_name) == 'on'
root_url = config.get('shared','root_url')
relocate = config.get('shared', 'relocate_filename')
url = root_url + relocate
user_id = update.message.from_user.id
first_name = re.sub("[_]", "_", update.message.from_user.first_name)
first_name = re.sub("[*]", "\*", first_name)
first_name = re.sub("[`]", "\`", first_name)
first_name = re.sub("[[]", "\[", first_name)
if in_section and feature_flag:
try:
context.bot.send_message(chat_id=update.message.chat_id, text="@" + username + \
" here is your " + "[relocate chats and channels]" + "(" + url + ")" + "." \
, parse_mode='Markdown', disable_web_page_preview=True)
context.bot.deleteMessage(chat_id=update.message.chat.id, message_id=update.message.message_id)
except TypeError:
context.bot.send_message(chat_id=update.message.chat_id, text="[" + first_name + "](tg://user?id=" + str(user_id) + ")" + \
" here is your " + "[relocate chats and channels]" + "(" + url + ")" + "." \
, parse_mode='Markdown', disable_web_page_preview=True)
context.bot.deleteMessage(chat_id=update.message.chat.id, message_id=update.message.message_id)
relocate_handler = CommandHandler('relocate', relocate, run_async=True)
dispatcher.add_handler(relocate_handler)
## Send use Code of Conduct
def coc(update, context):
section = str(update.message.chat.id)
try:
username = re.sub("[_]", "\_", update.message.from_user.username)
except TypeError:
username = None
in_section = section in config.sections()
command_name = inspect.currentframe().f_code.co_name
feature_flag = config.get(section, command_name) == 'on'
root_url = config.get('shared','root_url')
coc = config.get('shared', 'coc_filename')
url = root_url + coc
user_id = update.message.from_user.id
first_name = re.sub("[_]", "\_", update.message.from_user.first_name)
first_name = re.sub("[*]", "\*", first_name)
first_name = re.sub("[`]", "\`", first_name)
first_name = re.sub("[[]", "\[", first_name)
if in_section and feature_flag:
try:
context.bot.send_message(chat_id=update.message.chat_id, text="@" + username + \
" here is your " + "[code of conduct]" + "(" + url + ")" + "." \
, parse_mode='Markdown', disable_web_page_preview=True)
context.bot.deleteMessage(chat_id=update.message.chat.id, message_id=update.message.message_id)
except TypeError:
context.bot.send_message(chat_id=update.message.chat_id, text="[" + first_name + "](tg://user?id=" + str(user_id) + ")" + \
" here is your " + "[code of conduct]" + "(" + url + ")" + "." \
, parse_mode='Markdown', disable_web_page_preview=True)
context.bot.deleteMessage(chat_id=update.message.chat.id, message_id=update.message.message_id)
coc_handler = CommandHandler('coc', coc, run_async=True)
dispatcher.add_handler(coc_handler)
## Send user job opportunity and cv publish rules
def work(update, context):
section = str(update.message.chat.id)
try:
username = re.sub("[_]", "\_", update.message.from_user.username)
except TypeError:
username = None
in_section = section in config.sections()
command_name = inspect.currentframe().f_code.co_name
feature_flag = config.get(section, command_name) == 'on'
root_url = config.get('shared','root_url')
coc = config.get('shared', 'jobs_filename')
url = root_url + coc
user_id = update.message.from_user.id
first_name = re.sub("[_]", "\_", update.message.from_user.first_name)
first_name = re.sub("[*]", "\*", first_name)
first_name = re.sub("[`]", "\`", first_name)
first_name = re.sub("[[]", "\[", first_name)
if in_section and feature_flag:
if update.message.reply_to_message is not None:
context.bot.send_message(chat_id=update.message.chat_id,text= "We have " + "[job opportunities and cv publish rules]" + "(" + url + ")" + \
". Please read them carefully and follow them.", \
parse_mode='Markdown', reply_to_message_id=update.message.reply_to_message.message_id, disable_web_page_preview=True)
context.bot.deleteMessage(chat_id=update.message.chat.id, message_id=update.message.message_id)
else:
try:
context.bot.send_message(chat_id=update.message.chat_id, text="@" + username + \
" here is your " + "[job opportunities and cv publish rules]" + "(" + url + ")" + "." \
, parse_mode='Markdown', disable_web_page_preview=True)
context.bot.deleteMessage(chat_id=update.message.chat.id, message_id=update.message.message_id)
except TypeError:
context.bot.send_message(chat_id=update.message.chat_id, text="[" + first_name + "](tg://user?id=" + str(user_id) + ")" + \
" here is your " + "[job opportunities and cv publish rules]" \
+ "(" + url + ")" + ".", parse_mode='Markdown', disable_web_page_preview=True)
context.bot.deleteMessage(chat_id=update.message.chat.id, message_id=update.message.message_id)
work_handler = CommandHandler('work', work, run_async=True)
dispatcher.add_handler(work_handler)
## Send user advertising rules
def ad(update, context):
section = str(update.message.chat.id)
try:
username = re.sub("[_]", "\_", update.message.from_user.username)
except TypeError:
username = None
in_section = section in config.sections()
command_name = inspect.currentframe().f_code.co_name
feature_flag = config.get(section, command_name) == 'on'
root_url = config.get('shared','root_url')
coc = config.get('shared', 'advertising_filename')
url = root_url + coc
user_id = update.message.from_user.id
first_name = re.sub("[_]", "\_", update.message.from_user.first_name)
first_name = re.sub("[*]", "\*", first_name)
first_name = re.sub("[`]", "\`", first_name)
first_name = re.sub("[[]", "\[", first_name)
if in_section and feature_flag:
try:
context.bot.send_message(chat_id=update.message.chat_id, text="@" + username + \
" here is your " + "[advertising publish rules]" + "(" + url + ")" + "." \
, parse_mode='Markdown', disable_web_page_preview=True)
context.bot.deleteMessage(chat_id=update.message.chat.id, message_id=update.message.message_id)
except TypeError:
context.bot.send_message(chat_id=update.message.chat_id, text="[" + first_name + "](tg://user?id=" + str(user_id) + ")" + \
" here is your " + "[advertising publish rules]" \
+ "(" + url + ")" + ".", parse_mode='Markdown', disable_web_page_preview=True)
context.bot.deleteMessage(chat_id=update.message.chat.id, message_id=update.message.message_id)
ad_handler = CommandHandler('ad', ad, run_async=True)
dispatcher.add_handler(ad_handler)
## Send user list of friendly chats
def chats(update, context):
section = str(update.message.chat.id)
try:
username = re.sub("[_]", "\_", update.message.from_user.username)
except TypeError:
username = None
in_section = section in config.sections()
command_name = inspect.currentframe().f_code.co_name
feature_flag = config.get(section, command_name) == 'on'
root_url = config.get('shared','root_url')
coc = config.get('shared', 'othet_chats')
url = root_url + coc
user_id = update.message.from_user.id
first_name = re.sub("[_]", "\_", update.message.from_user.first_name)
first_name = re.sub("[*]", "\*", first_name)
first_name = re.sub("[`]", "\`", first_name)
first_name = re.sub("[[]", "\[", first_name)
if in_section and feature_flag:
try:
context.bot.send_message(chat_id=update.message.chat_id, text="@" + username + \
" here is your " + "[friendly chats list]" + "(" + url + ")" + "." \
, parse_mode='Markdown', disable_web_page_preview=True)
context.bot.deleteMessage(chat_id=update.message.chat.id, message_id=update.message.message_id)
except TypeError:
context.bot.send_message(chat_id=update.message.chat_id, text="[" + first_name + "](tg://user?id=" + str(user_id) + ")" + \
" here is your " + "[friendly chats list]" \
+ "(" + url + ")" + ".", parse_mode='Markdown', disable_web_page_preview=True)
context.bot.deleteMessage(chat_id=update.message.chat.id, message_id=update.message.message_id)
chats_handler = CommandHandler('chats', chats, run_async=True)
dispatcher.add_handler(chats_handler)
## Send user events rules
def events(update, context):
section = str(update.message.chat.id)
try:
username = re.sub("[_]", "\_", update.message.from_user.username)
except TypeError:
username = None
in_section = section in config.sections()
command_name = inspect.currentframe().f_code.co_name
feature_flag = config.get(section, command_name) == 'on'
root_url = config.get('shared','root_url')
coc = config.get('shared', 'events_list')
url = root_url + coc
user_id = update.message.from_user.id
first_name = re.sub("[_]", "\_", update.message.from_user.first_name)
first_name = re.sub("[*]", "\*", first_name)
first_name = re.sub("[`]", "\`", first_name)
first_name = re.sub("[[]", "\[", first_name)
if in_section and feature_flag:
try:
context.bot.send_message(chat_id=update.message.chat_id, text="@" + username + \
" here is your " + "[events list]" + "(" + url + ")" + "." \
, parse_mode='Markdown', disable_web_page_preview=True)
context.bot.deleteMessage(chat_id=update.message.chat.id, message_id=update.message.message_id)
except TypeError:
context.bot.send_message(chat_id=update.message.chat_id, text="[" + first_name + "](tg://user?id=" + str(user_id) + ")" + \
" here is your " + "[events list]" \
+ "(" + url + ")" + ".", parse_mode='Markdown', disable_web_page_preview=True)
context.bot.deleteMessage(chat_id=update.message.chat.id, message_id=update.message.message_id)
events_handler = CommandHandler('events', events, run_async=True)
dispatcher.add_handler(events_handler)
## User send report message to admins
def report(update, context):
section = str(update.message.chat.id)
in_section = section in config.sections()
command_name = inspect.currentframe().f_code.co_name
feature_flag = config.get(section, command_name) == 'on'
chat_id = str(update.message.chat_id)
user_id = str(update.message.reply_to_message.from_user.id)
message_id = str(update.message.reply_to_message.message_id)
keyboard = [
[InlineKeyboardButton('burn spam', callback_data='spam ' + chat_id + ' ' + user_id + ' ' + message_id)]
]
reply_markup = InlineKeyboardMarkup(keyboard)
if in_section and feature_flag:
try:
context.bot.send_message(chat_id=update.message.chat_id, text="Report on [spam message]("+ "https://t.me/" + str(update.message.chat.username) + "/" \
+ str(update.message.reply_to_message.message_id) + ")" + " was send to admins. Please be patient.", \
disable_web_page_preview=True, parse_mode='Markdown')
context.bot.deleteMessage(chat_id=update.message.chat.id, message_id=update.message.message_id)
context.bot.forward_message(chat_id=config.get(section, 'admin_chat'), from_chat_id=update.message.chat_id, message_id=update.message.reply_to_message.message_id)
context.bot.send_message(chat_id=config.get(section, 'admin_chat'), text="User think it's spam: " + "https://t.me/"+ str(update.message.chat.username)+"/" \
+ str(update.message.reply_to_message.message_id), disable_web_page_preview=True, reply_markup=reply_markup)
except AttributeError:
context.bot.deleteMessage(chat_id=update.message.chat.id, message_id=update.message.message_id)
context.bot.send_message(chat_id=update.message.chat_id, text="Report command works on replied messages only.")
report_handler = CommandHandler('report', report)
dispatcher.add_handler(report_handler)
## User summon some HRs in thread
def summon(update, context):
section = str(update.message.chat.id)
in_section = section in config.sections()
command_name = inspect.currentframe().f_code.co_name
feature_flag = config.get(section, command_name) == 'on'
if in_section and feature_flag:
try:
context.bot.send_message(chat_id=update.message.chat_id, text= "I'm DevOps Bot and i summon you HR for answer: " + \
config.get(section, 'summon_list') + " and the question is: " + str(update.message.reply_to_message.text), \
reply_to_message_id=update.message.message_id)
context.bot.deleteMessage(chat_id=update.message.chat.id, message_id=update.message.message_id)
except AttributeError:
context.bot.deleteMessage(chat_id=update.message.chat.id, message_id=update.message.message_id)
context.bot.send_message(chat_id=update.message.chat_id, text="Summon command works on replied messages only.")
summon_handler = CommandHandler('summon', summon, run_async=True)
dispatcher.add_handler(summon_handler)
## Send user bots man
def man(update, context):
section = str(update.message.chat.id)
try:
username = re.sub("[_]", "\_", update.message.from_user.username)
except TypeError:
username = None
in_section = section in config.sections()
command_name = inspect.currentframe().f_code.co_name
feature_flag = config.get(section, command_name) == 'on'
user_id = update.message.from_user.id
first_name = re.sub("[_]", "\_", update.message.from_user.first_name)
first_name = re.sub("[*]", "\*", first_name)
first_name = re.sub("[`]", "\`", first_name)
first_name = re.sub("[[]", "\[", first_name)
if in_section and feature_flag:
try:
f = open("helps/" + config.get(section, 'commands_list') + "/commands.txt", "r")
context.bot.send_message(chat_id=update.message.chat_id, text="@" + username + " here it is. " \
+ "\n" + f.read(), parse_mode='Markdown', disable_web_page_preview=True)
context.bot.deleteMessage(chat_id=update.message.chat.id, message_id=update.message.message_id)
except TypeError:
f = open("helps/" + config.get(section, 'commands_list') + "/commands.txt", "r")
context.bot.send_message(chat_id=update.message.chat_id, text="[" + first_name + "](tg://user?id=" + str(user_id) + ")" \
+ "\n" + f.read(), parse_mode='Markdown', disable_web_page_preview=True)
context.bot.deleteMessage(chat_id=update.message.chat.id, message_id=update.message.message_id)
man_handler = CommandHandler('man', man, run_async=True)
dispatcher.add_handler(man_handler)
# Administrator commands
## Mute some user
def mute(update, context):
section = str(update.message.chat.id)
in_section = section in config.sections()
command_name = inspect.currentframe().f_code.co_name
feature_flag = config.get(section, command_name) == 'on'
admins = update.message.from_user.id in get_admin_ids(context, update.message.chat_id)
admin_chat = config.get(section, 'admin_chat')
user_id = update.message.reply_to_message.from_user.id
first_name = re.sub("[_]", "\_", update.message.reply_to_message.from_user.first_name)
first_name = re.sub("[*]", "\*", first_name)
first_name = re.sub("[`]", "\`", first_name)
first_name = re.sub("[[]", "\[", first_name)
try:
last_name = re.sub("[_]", "\_", update.message.reply_to_message.from_user.last_name)
last_name = re.sub("[*]", "\*", last_name)
last_name = re.sub("[`]", "\`", last_name)
last_name = re.sub("[[]", "\[", last_name)
full_name = first_name + " " + last_name
except TypeError:
full_name = first_name
message_text=update.message.text+" "
hour,day = mute_parse_date(message_text)
duration = mute_gen_duration_message(hour, day)
comment = mute_parse_comment_message(message_text)
restrict = ChatPermissions(can_send_messages=False, can_send_media_messages=False, can_send_other_messages=False, can_add_web_page_previews=False)
if in_section and feature_flag and admins:
try:
context.bot.restrict_chat_member(chat_id=update.message.chat_id, user_id=update.message.reply_to_message.from_user.id, \
until_date=datetime.datetime.now() + datetime.timedelta(days=day, hours=hour), permissions = restrict)
context.bot.send_message(chat_id=admin_chat, text="User " + "[" + full_name + "](tg://user?id=" + str(user_id) + ") " \
+ "(@" + update.message.reply_to_message.from_user.username + ") was muted by [admin](tg://user?id=" + str(update.message.from_user.id) + ") " \
+ "in chat " + update.message.chat.title + "\nDuration: " + duration + "\nComment: " + comment, parse_mode='Markdown')
context.bot.send_message(chat_id=update.message.chat_id, text="User " + "@" + str(update.message.reply_to_message.from_user.username) + " muted.", \
reply_to_message_id=update.message.message_id)
context.bot.deleteMessage(chat_id=update.message.chat.id, message_id=update.message.message_id)
except TypeError:
context.bot.restrict_chat_member(chat_id=update.message.chat_id, user_id=update.message.reply_to_message.from_user.id, \
until_date=datetime.datetime.now() + datetime.timedelta(days=day, hours=hour), permissions = restrict)
context.bot.send_message(chat_id=admin_chat, text="User " + "[" + full_name + "](tg://user?id=" + str(user_id) + ") " \
+ "was muted by [admin](tg://user?id=" + str(update.message.from_user.id) + ") " + "in chat " + update.message.chat.title \
+ "\nDuration: " + duration + "\nComment: " + comment, parse_mode='Markdown')
context.bot.send_message(chat_id=update.message.chat_id, text="User " + "[" + first_name + "](tg://user?id=" + str(user_id) + ")" + " muted", \
reply_to_message_id=update.message.message_id, parse_mode='Markdown')
context.bot.deleteMessage(chat_id=update.message.chat.id, message_id=update.message.message_id)
mute_handler = CommandHandler('mute', mute, pass_args=True, run_async=True)
dispatcher.add_handler(mute_handler)
##mute support func
def mute_parse_date(message_text):
hour=int()
if any(re.findall(" [0-9]{1,3}h ", message_text)):
temp = re.findall(" [0-9]{1,2}h ", message_text)
hour = int(re.sub("h", "", temp[0]))
day=int()
if any(re.findall(" [0-9]{1,3}d ", message_text)):
temp = re.findall(" [0-9]{1,2}d ", message_text)
day = int(re.sub("d", "", temp[0]))
if any(re.findall(" [0-9]{1,3}w ", message_text)):
temp = re.findall(" [0-9]{1,2}w ", message_text)
day = day + int(re.sub("w", "", temp[0])) * 7
if any(re.findall(" inf ", message_text)):
day = 367
return hour, day
def mute_gen_duration_message(hour, day):
duration=str()
if hour>0:
if hour==1:
duration = str(hour) + " hour"
else:
duration = str(hour) + " hours"
if day>0:
if hour>0:
duration = duration + " "
if day==1:
duration = duration + str(day) + " day"
else:
duration = duration + str(day) + " days"
if hour==0 and day==0 or day>366:
duration = "forever"
return duration
def mute_parse_comment_message(message_text):
comment = message_text.replace("/mute ", "")
if any(re.findall(" [0-9]{1,3}h ", message_text)):
comment = re.sub("[0-9]{1,3}h ", "", comment)
if any(re.findall(" [0-9]{1,3}d ", message_text)):
comment = re.sub("[0-9]{1,3}d ", "", comment)
if any(re.findall(" [0-9]{1,3}w ", message_text)):
comment = re.sub("[0-9]{1,3}w ", "", comment)
if any(re.findall(" inf ", message_text)):
comment = re.sub("inf ", "", comment)
if any(re.findall("[a-zA-Z0-9]", comment))==0:
comment = "not found"
return comment
## Warn some user
def warn(update, context, args):
user_id = update.message.reply_to_message.from_user.id
user_username = update.message.reply_to_message.from_user.username
admin_username = re.sub("[_]", "\_", update.message.from_user.username)
warn = int(0)
section = str(update.message.chat.id)
in_section = section in config.sections()
command_name = inspect.currentframe().f_code.co_name
feature_flag = config.get(section, command_name) == 'on'
admins = update.message.from_user.id in get_admin_ids(context, update.message.chat_id)
if in_section and feature_flag and admins:
db.add_user(user_id, user_username, warn)
db.add_warn(user_id, user_username, warn)
warn_text = db.count_warn(user_id)
reason = str(' '.join(args))
context.bot.send_message(chat_id=update.message.chat_id, text="@"+ str(user_username) + \
" your warn count: " + str(warn_text) + str("/3.") + "If you get 3 warns you will be banned for 3 days." + "\n" +
"Admin: @" + admin_username + "\n" + "Reason: " + reason, reply_to_message_id=update.message.message_id)
context.bot.deleteMessage(chat_id=update.message.chat.id, message_id=update.message.message_id)
if warn_text >= 3:
db.delete_warn(user_id, user_username, warn)
context.bot.restrict_chat_member(chat_id=update.message.chat_id, user_id=update.message.reply_to_message.from_user.id, \
can_send_messages=False, can_send_media_messages=False, can_send_other_messages=False, \
can_add_web_page_previews=False, until_date=datetime.datetime.now() + datetime.timedelta(days=3))
warn_handler = CommandHandler('warn', warn, pass_args=True, run_async=True)
dispatcher.add_handler(warn_handler)
## Unwarn user
def unwarn(update, context):
user_id = update.message.reply_to_message.from_user.id
user_username = update.message.reply_to_message.from_user.username
warn = int(0)
section = str(update.message.chat.id)
in_section = section in config.sections()
command_name = inspect.currentframe().f_code.co_name
feature_flag = config.get(section, command_name) == 'on'
admins = update.message.from_user.id in get_admin_ids(context, update.message.chat_id)
if in_section and feature_flag and admins:
db.unwarn(user_id, user_username, warn)
warn_text = db.count_warn(user_id)
context.bot.send_message(chat_id=update.message.chat_id, text="@"+ str(update.message.reply_to_message.from_user.username) + \
" your warn count: " + str(warn_text) + str("/3.") + "If you get 3 warns you will be banned for 3 days.", \
reply_to_message_id=update.message.message_id)
context.bot.deleteMessage(chat_id=update.message.chat.id, message_id=update.message.message_id)
unwarn_handler = CommandHandler('unwarn', unwarn, run_async=True)
dispatcher.add_handler(unwarn_handler)
## Move message from one chat to another
def move(update, context):
section = str(update.message.chat.id)
in_section = section in config.sections()
command_name = inspect.currentframe().f_code.co_name
feature_flag = config.get(section, command_name) == 'on'
admins = update.message.from_user.id in get_admin_ids(context, update.message.chat_id)
if in_section and feature_flag and admins:
if update.message.reply_to_message.photo:
try:
context.bot.send_message(chat_id=config.get(section, 'move_from_id'), text="Your photo " + "@" + str(update.message.reply_to_message.from_user.username) \
+ " was moved to " + config.get(section, 'move_to_name'))
context.bot.forward_message(chat_id=config.get(section, 'move_to_id'), from_chat_id=update.message.chat_id, message_id=update.message.reply_to_message.message_id)
context.bot.deleteMessage(chat_id=update.message.chat.id, message_id=update.message.message_id)
context.bot.deleteMessage(chat_id=update.message.chat.id, message_id=update.message.reply_to_message.message_id)
except TypeError:
context.bot.send_message(chat_id=config.get(section, 'move_from_id'), text="Your photo " + \
" was moved to " + config.get(section, 'move_to_name'))
context.bot.forward_message(chat_id=config.get(section, 'move_to_id'), from_chat_id=update.message.chat_id, message_id=update.message.reply_to_message.message_id)
context.bot.deleteMessage(chat_id=update.message.chat.id, message_id=update.message.message_id)
context.bot.deleteMessage(chat_id=update.message.chat.id, message_id=update.message.reply_to_message.message_id)
else:
try:
context.bot.send_message(chat_id=config.get(section, 'move_from_id'), text="Your post " + "@" + str(update.message.reply_to_message.from_user.username) \
+ " was moved to " + config.get(section, 'move_to_name'))
context.bot.send_message(chat_id=config.get(section, 'move_to_id'), text="Message was forwarded from: " + "@" + str(update.message.chat.username) + "\n" + \
"Author: " + "@" + str(update.message.reply_to_message.from_user.username) + "\n" + \
"Message: " + str(update.message.reply_to_message.text))
context.bot.deleteMessage(chat_id=update.message.chat.id, message_id=update.message.message_id)
context.bot.deleteMessage(chat_id=update.message.chat.id, message_id=update.message.reply_to_message.message_id)
except TypeError:
context.bot.send_message(chat_id=config.get(section, 'move_from_id'), text="Your post " + \
" was moved to " + config.get(section, 'move_to_name'))
context.bot.send_message(chat_id=config.get(section, 'move_to_id'), text="Message was forwarded from: " + "@" + str(update.message.chat.username) + "\n" + \
"Author: " + str(update.message.reply_to_message.from_user.first_name) + "\n" + \
"Message: " + str(update.message.reply_to_message.text))
context.bot.deleteMessage(chat_id=update.message.chat.id, message_id=update.message.message_id)
context.bot.deleteMessage(chat_id=update.message.chat.id, message_id=update.message.reply_to_message.message_id)
move_handler = CommandHandler('move', move, run_async=True)
dispatcher.add_handler(move_handler)
## Post job opportunity into channel
def job(update, context):
section = str(update.message.chat.id)
in_section = section in config.sections()
command_name = inspect.currentframe().f_code.co_name
feature_flag = config.get(section, command_name) == 'on'
admins = update.message.from_user.id in get_admin_ids(context, update.message.chat_id)
if in_section and feature_flag and admins:
context.bot.forward_message(chat_id=config.get(section, 'post_to_name'), from_chat_id=update.message.chat_id, \
message_id=update.message.reply_to_message.message_id, disable_notification=False)
context.bot.send_message(chat_id=config.get(section, 'post_from_id'), reply_to_message_id=update.message.reply_to_message.message_id, \
text="Your post was posted in " + config.get(section, 'post_to_name'))
context.bot.deleteMessage(chat_id=update.message.chat.id, message_id=update.message.message_id)
job_handler = CommandHandler('job', job, run_async=True)
dispatcher.add_handler(job_handler)
## New Post job opportunity into channel
def jobs(update, context):
section = str(update.message.chat.id)
in_section = section in config.sections()
command_name = inspect.currentframe().f_code.co_name
feature_flag = config.get(section, command_name) == 'on'
admins = update.message.from_user.id in get_admin_ids(context, update.message.chat_id)
user_id = update.message.from_user.id
first_name = re.sub("[_]", "\_", update.message.from_user.first_name)
first_name = re.sub("[*]", "\*", first_name)
first_name = re.sub("[`]", "\`", first_name)
first_name = re.sub("[[]", "\[", first_name)
job_rss = config.get(section, 'job_rss')
job_channels = shlex.split(job_rss)
match_job = re.compile("#вакансия", re.IGNORECASE)
match_work = re.compile("#резюме", re.IGNORECASE)
matches_job = match_job.search(update.message.reply_to_message.text)
matches_work = match_work.search(update.message.reply_to_message.text)
rss_link = re.sub("@", "https://t.me/", job_channels[0])
if matches_work != None:
text_to_publish = "[Резюме]" + "https://t.me/"+ str(update.message.chat.username)+"/"+ str(update.message.reply_to_message.message_id) \
+ " было опубилковано в " + "[RSS канале]" + "(" + rss_link + ")"
elif matches_job != None:
text_to_publish = "[Вакансия]" "https://t.me/"+ str(update.message.chat.username)+"/"+ str(update.message.reply_to_message.message_id) \
+ " была опубилкована в " + "[RSS канале]" + "(" + rss_link + ")"
if in_section and feature_flag and admins:
if matches_job == None and matches_work == None:
context.bot.send_message(chat_id=update.message.chat.id, reply_to_message_id=update.message.reply_to_message.message_id, \
text="В Вашем посте отсуствует тег #вакансия или #резюме. Невозможно определить тип поста.")
context.bot.deleteMessage(chat_id=update.message.chat.id, message_id=update.message.message_id)
else:
if str(update.message.reply_to_message.from_user.username) != "None":
for i in job_channels:
context.bot.send_message(chat_id=i, text="Publisher: " + "@" + str(update.message.reply_to_message.from_user.username) + "\n" + \
str(update.message.reply_to_message.text), parse_mode='Markdown', disable_web_page_preview=True)
context.bot.send_message(chat_id=update.message.chat_id, reply_to_message_id=update.message.reply_to_message.message_id, \
text=text_to_publish, parse_mode='Markdown', disable_web_page_preview=True)
context.bot.deleteMessage(chat_id=update.message.chat.id, message_id=update.message.message_id)
elif str(update.message.reply_to_message.from_user.username) == "None":
for i in job_channels:
context.bot.send_message(chat_id=i, text="Publisher: " + "[" + first_name + "](tg://user?id=" + str(user_id) + ")" + "\n" + \
str(update.message.reply_to_message.text), parse_mode='Markdown', disable_web_page_preview=True)
context.bot.send_message(chat_id=update.message.chat_id, reply_to_message_id=update.message.reply_to_message.message_id, \
text=text_to_publish, parse_mode='Markdown', disable_web_page_preview=True)
context.bot.deleteMessage(chat_id=update.message.chat.id, message_id=update.message.message_id)
jobs_handler = CommandHandler('jobs', jobs, run_async=True)
dispatcher.add_handler(jobs_handler)
## Get chat number
def idnumber(update, context):
admins = update.message.from_user.id in get_admin_ids(context, update.message.chat_id)
if admins:
context.bot.send_message(chat_id=update.message.chat_id, text=update.message.chat_id)
context.bot.deleteMessage(chat_id=update.message.chat.id, message_id=update.message.message_id)
idnumber_handler = CommandHandler('idnumber', idnumber, run_async=True)
dispatcher.add_handler(idnumber_handler)
## Delete service messages
def delete_service_message(update, context):
section = str(update.message.chat.id)
in_section = section in config.sections()
command_name = inspect.currentframe().f_code.co_name
feature_flag = config.get(section, command_name) == 'on'
msg = update.effective_message
if in_section and feature_flag:
context.bot.delete_message(chat_id=msg.chat.id,message_id=msg.message_id)
dispatcher.add_handler(MessageHandler(Filters.status_update.new_chat_members, delete_service_message, run_async=True))
dispatcher.add_handler(MessageHandler(Filters.status_update.left_chat_member, delete_service_message, run_async=True))
## Delete spam message and Ban spamer with admin button
def delete_ban_button(update, context):
query = update.callback_query
query.answer()
callback_data = query.message.reply_markup.inline_keyboard[0][0].callback_data
chat_id = int(callback_data.split()[1])
user_id = int(callback_data.split()[2])
message_id = int(callback_data.split()[3])
admin_username = query.from_user.username
### i see no reason to double check
# section = str(chat_id)
# in_section = section in config.sections()
# command_name = inspect.currentframe().f_code.co_name
# feature_flag = config.get(section, command_name) == 'on'
# if in_section and feature_flag:
context.bot.delete_message(chat_id=chat_id, message_id=message_id)
context.bot.kick_chat_member(chat_id=chat_id, user_id=user_id)
query.edit_message_text(text="Approved by @"+admin_username)
button_spam_handler = CallbackQueryHandler(delete_ban_button, pattern='spam[\s\S]+', run_async=True)
dispatcher.add_handler(button_spam_handler)
def main():
db.setup()
if __name__ == '__main__':
main()
updater.start_polling()
| 61.729919
| 194
| 0.594935
| 6,258
| 53,026
| 4.793225
| 0.055769
| 0.12655
| 0.081478
| 0.081711
| 0.82231
| 0.812075
| 0.770536
| 0.759935
| 0.752434
| 0.732231
| 0
| 0.002527
| 0.276016
| 53,026
| 858
| 195
| 61.801865
| 0.778823
| 0.021763
| 0
| 0.637701
| 0
| 0.001337
| 0.08924
| 0.001235
| 0.022727
| 0
| 0
| 0
| 0
| 1
| 0.03877
| false
| 0.002674
| 0.01738
| 0
| 0.061497
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
5d4f57c12803f7bd5014ae82fde9fa61eeebf0e7
| 722
|
py
|
Python
|
terrascript/opsgenie/r.py
|
amlodzianowski/python-terrascript
|
1111affe6cd30d9b8b7bc74ae4e27590f7d4dc49
|
[
"BSD-2-Clause"
] | null | null | null |
terrascript/opsgenie/r.py
|
amlodzianowski/python-terrascript
|
1111affe6cd30d9b8b7bc74ae4e27590f7d4dc49
|
[
"BSD-2-Clause"
] | null | null | null |
terrascript/opsgenie/r.py
|
amlodzianowski/python-terrascript
|
1111affe6cd30d9b8b7bc74ae4e27590f7d4dc49
|
[
"BSD-2-Clause"
] | null | null | null |
# terrascript/opsgenie/r.py
import terrascript
class opsgenie_team(terrascript.Resource):
pass
class opsgenie_team_routing_rule(terrascript.Resource):
pass
class opsgenie_user(terrascript.Resource):
pass
class opsgenie_user_contact(terrascript.Resource):
pass
class opsgenie_escalation(terrascript.Resource):
pass
class opsgenie_api_integration(terrascript.Resource):
pass
class opsgenie_email_integration(terrascript.Resource):
pass
class opsgenie_schedule(terrascript.Resource):
pass
class opsgenie_schedule_rotation(terrascript.Resource):
pass
class opsgenie_maintenance(terrascript.Resource):
pass
class opsgenie_heartbeat(terrascript.Resource):
pass
| 15.041667
| 55
| 0.789474
| 78
| 722
| 7.089744
| 0.269231
| 0.25859
| 0.457505
| 0.506329
| 0.734177
| 0.40868
| 0
| 0
| 0
| 0
| 0
| 0
| 0.144044
| 722
| 47
| 56
| 15.361702
| 0.894822
| 0.034626
| 0
| 0.478261
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.478261
| 0.043478
| 0
| 0.521739
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
|
0
| 6
|
5d719da2cea5f49c79d3c22f345c08af34125d3e
| 124
|
py
|
Python
|
dist/Basilisk/fswAlgorithms/mrpRotation/__init__.py
|
ian-cooke/basilisk_mag
|
a8b1e37c31c1287549d6fd4d71fcaa35b6fc3f14
|
[
"0BSD"
] | null | null | null |
dist/Basilisk/fswAlgorithms/mrpRotation/__init__.py
|
ian-cooke/basilisk_mag
|
a8b1e37c31c1287549d6fd4d71fcaa35b6fc3f14
|
[
"0BSD"
] | 1
|
2019-03-13T20:52:22.000Z
|
2019-03-13T20:52:22.000Z
|
dist/Basilisk/fswAlgorithms/mrpRotation/__init__.py
|
ian-cooke/basilisk_mag
|
a8b1e37c31c1287549d6fd4d71fcaa35b6fc3f14
|
[
"0BSD"
] | null | null | null |
# This __init__.py file for the mrpRotation package is automatically generated by the build system
from mrpRotation import *
| 62
| 98
| 0.830645
| 18
| 124
| 5.5
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.145161
| 124
| 2
| 99
| 62
| 0.933962
| 0.774194
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
537be0dd389dcc688defe1da3ebc0d43c7d4d3ce
| 43
|
py
|
Python
|
tcedata/__init__.py
|
jiqicn/tcedata
|
1640c5f1767efd6e958467f39b7cab42340ebb6e
|
[
"MIT"
] | null | null | null |
tcedata/__init__.py
|
jiqicn/tcedata
|
1640c5f1767efd6e958467f39b7cab42340ebb6e
|
[
"MIT"
] | 2
|
2021-11-01T10:10:17.000Z
|
2021-12-09T15:21:27.000Z
|
tcedata/__init__.py
|
jiqicn/tcedata
|
1640c5f1767efd6e958467f39b7cab42340ebb6e
|
[
"MIT"
] | null | null | null |
from .minio import *
from .tcedata import *
| 21.5
| 22
| 0.744186
| 6
| 43
| 5.333333
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.162791
| 43
| 2
| 22
| 21.5
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
5390439ba6c39c31ba4c90c3427c225d8387ddd4
| 3,316
|
py
|
Python
|
tests/unit/scalar_fields/test_xyz_scalar_fields.py
|
bernssolg/pyntcloud-master
|
84cf000b7a7f69a2c1b36f9624f05f65160bf992
|
[
"MIT"
] | 1,142
|
2016-10-10T08:55:30.000Z
|
2022-03-30T04:46:16.000Z
|
tests/unit/scalar_fields/test_xyz_scalar_fields.py
|
bernssolg/pyntcloud-master
|
84cf000b7a7f69a2c1b36f9624f05f65160bf992
|
[
"MIT"
] | 195
|
2016-10-10T08:30:37.000Z
|
2022-02-17T12:51:17.000Z
|
tests/unit/scalar_fields/test_xyz_scalar_fields.py
|
bernssolg/pyntcloud-master
|
84cf000b7a7f69a2c1b36f9624f05f65160bf992
|
[
"MIT"
] | 215
|
2017-02-28T00:50:29.000Z
|
2022-03-22T17:01:31.000Z
|
import pytest
import numpy as np
from pyntcloud.scalar_fields.xyz import (
PlaneFit,
SphereFit,
SphericalCoordinates,
CylindricalCoordinates
)
@pytest.mark.usefixtures("plane_pyntcloud")
def test_PlaneFit_max_dist(plane_pyntcloud):
scalar_field = PlaneFit(
pyntcloud=plane_pyntcloud)
scalar_field.extract_info()
with np.errstate(divide='ignore', invalid='ignore'):
scalar_field.compute()
assert sum(scalar_field.to_be_added["is_plane"]) == 4
scalar_field = PlaneFit(
pyntcloud=plane_pyntcloud,
max_dist=0.4)
scalar_field.extract_info()
with np.errstate(divide='ignore', invalid='ignore'):
scalar_field.compute()
assert sum(scalar_field.to_be_added["is_plane"]) == 5
@pytest.mark.usefixtures("sphere_pyntcloud")
def test_SphereFit_max_dist(sphere_pyntcloud):
scalar_field = SphereFit(
pyntcloud=sphere_pyntcloud)
scalar_field.extract_info()
with np.errstate(divide='ignore', invalid='ignore'):
scalar_field.compute()
assert sum(scalar_field.to_be_added["is_sphere"]) == 4
scalar_field = SphereFit(
pyntcloud=sphere_pyntcloud,
max_dist=0.25)
scalar_field.extract_info()
with np.errstate(divide='ignore', invalid='ignore'):
scalar_field.compute()
assert sum(scalar_field.to_be_added["is_sphere"]) == 5
@pytest.mark.usefixtures("pyntcloud_with_rgb_and_normals")
def test_SphericalCoordinates_bounds(pyntcloud_with_rgb_and_normals):
scalar_field = SphericalCoordinates(
pyntcloud=pyntcloud_with_rgb_and_normals)
scalar_field.extract_info()
with np.errstate(divide='ignore', invalid='ignore'):
scalar_field.compute()
assert all(scalar_field.to_be_added["polar"] >= 0)
assert all(scalar_field.to_be_added["polar"] <= 180)
assert all(scalar_field.to_be_added["azimuthal"] >= -180)
assert all(scalar_field.to_be_added["azimuthal"] <= 180)
scalar_field = SphericalCoordinates(
pyntcloud=pyntcloud_with_rgb_and_normals,
degrees=False)
scalar_field.extract_info()
with np.errstate(divide='ignore', invalid='ignore'):
scalar_field.compute()
assert all(scalar_field.to_be_added["polar"] >= 0)
assert all(scalar_field.to_be_added["polar"] <= np.pi)
assert all(scalar_field.to_be_added["azimuthal"] >= -np.pi)
assert all(scalar_field.to_be_added["azimuthal"] <= np.pi)
@pytest.mark.usefixtures("pyntcloud_with_rgb_and_normals")
def test_CylindricalCoordinates_bounds(pyntcloud_with_rgb_and_normals):
scalar_field = CylindricalCoordinates(
pyntcloud=pyntcloud_with_rgb_and_normals)
scalar_field.extract_info()
with np.errstate(divide='ignore', invalid='ignore'):
scalar_field.compute()
assert all(scalar_field.to_be_added["angular_cylindrical"] >= -90)
assert all(scalar_field.to_be_added["angular_cylindrical"] <= 270)
scalar_field = CylindricalCoordinates(
pyntcloud=pyntcloud_with_rgb_and_normals,
degrees=False)
scalar_field.extract_info()
with np.errstate(divide='ignore', invalid='ignore'):
scalar_field.compute()
assert all(scalar_field.to_be_added["angular_cylindrical"] >= - (np.pi / 2))
assert all(scalar_field.to_be_added["angular_cylindrical"] <= (np.pi * 1.5))
| 33.16
| 80
| 0.723764
| 418
| 3,316
| 5.404306
| 0.138756
| 0.194776
| 0.092076
| 0.106242
| 0.850376
| 0.850376
| 0.782205
| 0.782205
| 0.753873
| 0.676405
| 0
| 0.010054
| 0.160133
| 3,316
| 99
| 81
| 33.494949
| 0.801077
| 0
| 0
| 0.552632
| 0
| 0
| 0.106486
| 0.0181
| 0
| 0
| 0
| 0
| 0.210526
| 1
| 0.052632
| false
| 0
| 0.039474
| 0
| 0.092105
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
5396b9d94582726c0834d7ff4388009cbdeb4590
| 33
|
py
|
Python
|
clinica/engine/__init__.py
|
alexandreroutier/clinica
|
66625c65e74962db7d5cea267d1a0e51d774bf91
|
[
"MIT"
] | 135
|
2019-05-17T14:16:40.000Z
|
2022-03-19T03:08:05.000Z
|
clinica/engine/__init__.py
|
alexandreroutier/clinica
|
66625c65e74962db7d5cea267d1a0e51d774bf91
|
[
"MIT"
] | 391
|
2019-06-03T09:32:17.000Z
|
2022-03-31T15:10:26.000Z
|
clinica/engine/__init__.py
|
alexandreroutier/clinica
|
66625c65e74962db7d5cea267d1a0e51d774bf91
|
[
"MIT"
] | 57
|
2019-05-20T08:38:01.000Z
|
2022-02-11T12:14:32.000Z
|
from .cmdparser import CmdParser
| 16.5
| 32
| 0.848485
| 4
| 33
| 7
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.121212
| 33
| 1
| 33
| 33
| 0.965517
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
53b0f164c7063aa80408b1f8d15ec5e3f054bbd7
| 100
|
py
|
Python
|
NasUnet/util/__init__.py
|
mlvc-lab/Segmentation-NAS
|
a9387a1546dacfa2dc6ee1f70366542a1552e541
|
[
"MIT"
] | 4
|
2020-03-26T11:05:08.000Z
|
2020-12-22T08:37:20.000Z
|
NasUnet/util/__init__.py
|
mlvc-lab/Segmentation-NAS
|
a9387a1546dacfa2dc6ee1f70366542a1552e541
|
[
"MIT"
] | null | null | null |
NasUnet/util/__init__.py
|
mlvc-lab/Segmentation-NAS
|
a9387a1546dacfa2dc6ee1f70366542a1552e541
|
[
"MIT"
] | 3
|
2020-03-26T11:05:09.000Z
|
2022-01-28T11:29:00.000Z
|
#from .visualize import *
from .metrics import *
from .prim_ops_set import *
from .utils import *
| 25
| 28
| 0.73
| 14
| 100
| 5.071429
| 0.571429
| 0.422535
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.18
| 100
| 4
| 29
| 25
| 0.865854
| 0.24
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
53f734e8a1fd9b0c286564f992eb8cd04f4ebdde
| 155
|
py
|
Python
|
setup.py
|
j0912345/pvz2_obb_tools
|
1cb2780a36850534f56ae207dfa92f4c23b6117a
|
[
"MIT"
] | null | null | null |
setup.py
|
j0912345/pvz2_obb_tools
|
1cb2780a36850534f56ae207dfa92f4c23b6117a
|
[
"MIT"
] | 1
|
2021-12-02T00:29:12.000Z
|
2021-12-02T00:29:12.000Z
|
setup.py
|
j0912345/pvz2_obb_tools
|
1cb2780a36850534f56ae207dfa92f4c23b6117a
|
[
"MIT"
] | null | null | null |
from distutils.core import setup
import py2exe
setup(console=['extract_obb.py', 'extract_file_names.py', 'convert_data_from_file_to_32_bit_uint.py'])
| 31
| 103
| 0.8
| 25
| 155
| 4.56
| 0.72
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.021277
| 0.090323
| 155
| 4
| 104
| 38.75
| 0.787234
| 0
| 0
| 0
| 0
| 0
| 0.496689
| 0.403974
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
54f20bd03a023c684924a40c000e7c92191b4b88
| 23
|
py
|
Python
|
tsmpy/dcel/__init__.py
|
rawfh/Topology-Shape-Metrics-algorithm
|
5f306fd12ce0833279b86baa66655eeec508e03a
|
[
"MIT"
] | 2
|
2021-05-26T16:45:14.000Z
|
2021-05-30T10:15:01.000Z
|
tsmpy/dcel/__init__.py
|
rawfh/Topology-Shape-Metrics-algorithm
|
5f306fd12ce0833279b86baa66655eeec508e03a
|
[
"MIT"
] | 3
|
2021-06-12T17:02:14.000Z
|
2021-06-14T16:52:02.000Z
|
tsmpy/dcel/__init__.py
|
rawfh/Topology-Shape-Metrics-algorithm
|
5f306fd12ce0833279b86baa66655eeec508e03a
|
[
"MIT"
] | 2
|
2020-06-11T11:00:06.000Z
|
2021-03-18T11:46:12.000Z
|
from .dcel import Dcel
| 11.5
| 22
| 0.782609
| 4
| 23
| 4.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.173913
| 23
| 1
| 23
| 23
| 0.947368
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
072ced417bb775f7de8bf208b95b311c606ecfdb
| 505
|
py
|
Python
|
main/colour.py
|
myujjawal/Terminal_Chat_Console
|
27ed40923189ead9d211c5a42e3ccbe8bd083652
|
[
"Apache-2.0"
] | 3
|
2021-01-01T17:58:05.000Z
|
2021-12-30T08:45:12.000Z
|
main/colour.py
|
myujjawal/Terminal_Chat_Console
|
27ed40923189ead9d211c5a42e3ccbe8bd083652
|
[
"Apache-2.0"
] | null | null | null |
main/colour.py
|
myujjawal/Terminal_Chat_Console
|
27ed40923189ead9d211c5a42e3ccbe8bd083652
|
[
"Apache-2.0"
] | 3
|
2020-05-20T06:10:14.000Z
|
2020-07-02T10:17:58.000Z
|
def prRed(skk): print("\033[91m {}\033[00m" .format(skk))
def prGreen(skk): print("\033[92m {}\033[00m" .format(skk))
def prYellow(skk): print("\033[93m {}\033[00m" .format(skk))
def prLightPurple(skk): print("\033[94m {}\033[00m" .format(skk))
def prPurple(skk): print("\033[95m {}\033[00m" .format(skk))
def prCyan(skk): print("\033[96m {}\033[00m" .format(skk))
def prLightGray(skk): print("\033[97m {}\033[00m" .format(skk))
def prBlack(skk): print("\033[98m {}\033[00m" .format(skk))
| 56.111111
| 67
| 0.635644
| 80
| 505
| 4.0125
| 0.275
| 0.199377
| 0.274143
| 0.373832
| 0.392523
| 0
| 0
| 0
| 0
| 0
| 0
| 0.178174
| 0.110891
| 505
| 8
| 68
| 63.125
| 0.536748
| 0
| 0
| 0
| 0
| 0
| 0.305835
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| false
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
|
0
| 6
|
4ad28f8567c7cbef39db512ef86b7cfd9466b989
| 7,524
|
py
|
Python
|
tests/util/test_percentage.py
|
tbarbette/core
|
8e58c3aa7bc8d2c2b09b6bd329daa1c092d52d3c
|
[
"Apache-2.0"
] | 4
|
2016-06-22T12:00:41.000Z
|
2018-06-11T20:31:25.000Z
|
tests/util/test_percentage.py
|
tbarbette/core
|
8e58c3aa7bc8d2c2b09b6bd329daa1c092d52d3c
|
[
"Apache-2.0"
] | 58
|
2020-08-03T07:33:02.000Z
|
2022-03-31T06:02:05.000Z
|
tests/util/test_percentage.py
|
tbarbette/core
|
8e58c3aa7bc8d2c2b09b6bd329daa1c092d52d3c
|
[
"Apache-2.0"
] | 6
|
2019-07-06T00:43:13.000Z
|
2021-01-16T13:27:06.000Z
|
"""Test Home Assistant percentage conversions."""
import math
import pytest
from homeassistant.util.percentage import (
ordered_list_item_to_percentage,
percentage_to_ordered_list_item,
percentage_to_ranged_value,
ranged_value_to_percentage,
)
SPEED_LOW = "low"
SPEED_MEDIUM = "medium"
SPEED_HIGH = "high"
SPEED_1 = SPEED_LOW
SPEED_2 = SPEED_MEDIUM
SPEED_3 = SPEED_HIGH
SPEED_4 = "very_high"
SPEED_5 = "storm"
SPEED_6 = "hurricane"
SPEED_7 = "solar_wind"
LEGACY_ORDERED_LIST = [SPEED_LOW, SPEED_MEDIUM, SPEED_HIGH]
SMALL_ORDERED_LIST = [SPEED_1, SPEED_2, SPEED_3, SPEED_4]
LARGE_ORDERED_LIST = [SPEED_1, SPEED_2, SPEED_3, SPEED_4, SPEED_5, SPEED_6, SPEED_7]
async def test_ordered_list_percentage_round_trip():
"""Test we can round trip."""
for ordered_list in (SMALL_ORDERED_LIST, LARGE_ORDERED_LIST):
for i in range(1, 100):
ordered_list_item_to_percentage(
ordered_list, percentage_to_ordered_list_item(ordered_list, i)
) == i
async def test_ordered_list_item_to_percentage():
"""Test percentage of an item in an ordered list."""
assert ordered_list_item_to_percentage(LEGACY_ORDERED_LIST, SPEED_LOW) == 33
assert ordered_list_item_to_percentage(LEGACY_ORDERED_LIST, SPEED_MEDIUM) == 66
assert ordered_list_item_to_percentage(LEGACY_ORDERED_LIST, SPEED_HIGH) == 100
assert ordered_list_item_to_percentage(SMALL_ORDERED_LIST, SPEED_1) == 25
assert ordered_list_item_to_percentage(SMALL_ORDERED_LIST, SPEED_2) == 50
assert ordered_list_item_to_percentage(SMALL_ORDERED_LIST, SPEED_3) == 75
assert ordered_list_item_to_percentage(SMALL_ORDERED_LIST, SPEED_4) == 100
assert ordered_list_item_to_percentage(LARGE_ORDERED_LIST, SPEED_1) == 14
assert ordered_list_item_to_percentage(LARGE_ORDERED_LIST, SPEED_2) == 28
assert ordered_list_item_to_percentage(LARGE_ORDERED_LIST, SPEED_3) == 42
assert ordered_list_item_to_percentage(LARGE_ORDERED_LIST, SPEED_4) == 57
assert ordered_list_item_to_percentage(LARGE_ORDERED_LIST, SPEED_5) == 71
assert ordered_list_item_to_percentage(LARGE_ORDERED_LIST, SPEED_6) == 85
assert ordered_list_item_to_percentage(LARGE_ORDERED_LIST, SPEED_7) == 100
with pytest.raises(ValueError):
assert ordered_list_item_to_percentage([], SPEED_1)
async def test_percentage_to_ordered_list_item():
"""Test item that most closely matches the percentage in an ordered list."""
assert percentage_to_ordered_list_item(SMALL_ORDERED_LIST, 1) == SPEED_1
assert percentage_to_ordered_list_item(SMALL_ORDERED_LIST, 25) == SPEED_1
assert percentage_to_ordered_list_item(SMALL_ORDERED_LIST, 26) == SPEED_2
assert percentage_to_ordered_list_item(SMALL_ORDERED_LIST, 50) == SPEED_2
assert percentage_to_ordered_list_item(SMALL_ORDERED_LIST, 51) == SPEED_3
assert percentage_to_ordered_list_item(SMALL_ORDERED_LIST, 75) == SPEED_3
assert percentage_to_ordered_list_item(SMALL_ORDERED_LIST, 76) == SPEED_4
assert percentage_to_ordered_list_item(SMALL_ORDERED_LIST, 100) == SPEED_4
assert percentage_to_ordered_list_item(LEGACY_ORDERED_LIST, 17) == SPEED_LOW
assert percentage_to_ordered_list_item(LEGACY_ORDERED_LIST, 33) == SPEED_LOW
assert percentage_to_ordered_list_item(LEGACY_ORDERED_LIST, 50) == SPEED_MEDIUM
assert percentage_to_ordered_list_item(LEGACY_ORDERED_LIST, 66) == SPEED_MEDIUM
assert percentage_to_ordered_list_item(LEGACY_ORDERED_LIST, 84) == SPEED_HIGH
assert percentage_to_ordered_list_item(LEGACY_ORDERED_LIST, 100) == SPEED_HIGH
assert percentage_to_ordered_list_item(LARGE_ORDERED_LIST, 1) == SPEED_1
assert percentage_to_ordered_list_item(LARGE_ORDERED_LIST, 14) == SPEED_1
assert percentage_to_ordered_list_item(LARGE_ORDERED_LIST, 25) == SPEED_2
assert percentage_to_ordered_list_item(LARGE_ORDERED_LIST, 26) == SPEED_2
assert percentage_to_ordered_list_item(LARGE_ORDERED_LIST, 28) == SPEED_2
assert percentage_to_ordered_list_item(LARGE_ORDERED_LIST, 29) == SPEED_3
assert percentage_to_ordered_list_item(LARGE_ORDERED_LIST, 41) == SPEED_3
assert percentage_to_ordered_list_item(LARGE_ORDERED_LIST, 42) == SPEED_3
assert percentage_to_ordered_list_item(LARGE_ORDERED_LIST, 43) == SPEED_4
assert percentage_to_ordered_list_item(LARGE_ORDERED_LIST, 56) == SPEED_4
assert percentage_to_ordered_list_item(LARGE_ORDERED_LIST, 50) == SPEED_4
assert percentage_to_ordered_list_item(LARGE_ORDERED_LIST, 51) == SPEED_4
assert percentage_to_ordered_list_item(LARGE_ORDERED_LIST, 75) == SPEED_6
assert percentage_to_ordered_list_item(LARGE_ORDERED_LIST, 76) == SPEED_6
assert percentage_to_ordered_list_item(LARGE_ORDERED_LIST, 100) == SPEED_7
assert percentage_to_ordered_list_item(LARGE_ORDERED_LIST, 1) == SPEED_1
assert percentage_to_ordered_list_item(LARGE_ORDERED_LIST, 25) == SPEED_2
assert percentage_to_ordered_list_item(LARGE_ORDERED_LIST, 26) == SPEED_2
assert percentage_to_ordered_list_item(LARGE_ORDERED_LIST, 50) == SPEED_4
assert percentage_to_ordered_list_item(LARGE_ORDERED_LIST, 51) == SPEED_4
assert percentage_to_ordered_list_item(LARGE_ORDERED_LIST, 75) == SPEED_6
assert percentage_to_ordered_list_item(LARGE_ORDERED_LIST, 76) == SPEED_6
assert percentage_to_ordered_list_item(LARGE_ORDERED_LIST, 100) == SPEED_7
assert percentage_to_ordered_list_item(LARGE_ORDERED_LIST, 100.1) == SPEED_7
with pytest.raises(ValueError):
assert percentage_to_ordered_list_item([], 100)
async def test_ranged_value_to_percentage_large():
"""Test a large range of low and high values convert a single value to a percentage."""
range = (1, 255)
assert ranged_value_to_percentage(range, 255) == 100
assert ranged_value_to_percentage(range, 127) == 49
assert ranged_value_to_percentage(range, 10) == 3
assert ranged_value_to_percentage(range, 1) == 0
async def test_percentage_to_ranged_value_large():
"""Test a large range of low and high values convert a percentage to a single value."""
range = (1, 255)
assert percentage_to_ranged_value(range, 100) == 255
assert percentage_to_ranged_value(range, 50) == 127.5
assert percentage_to_ranged_value(range, 4) == 10.2
assert math.ceil(percentage_to_ranged_value(range, 100)) == 255
assert math.ceil(percentage_to_ranged_value(range, 50)) == 128
assert math.ceil(percentage_to_ranged_value(range, 4)) == 11
async def test_ranged_value_to_percentage_small():
"""Test a small range of low and high values convert a single value to a percentage."""
range = (1, 6)
assert ranged_value_to_percentage(range, 1) == 16
assert ranged_value_to_percentage(range, 2) == 33
assert ranged_value_to_percentage(range, 3) == 50
assert ranged_value_to_percentage(range, 4) == 66
assert ranged_value_to_percentage(range, 5) == 83
assert ranged_value_to_percentage(range, 6) == 100
async def test_percentage_to_ranged_value_small():
"""Test a small range of low and high values convert a percentage to a single value."""
range = (1, 6)
assert math.ceil(percentage_to_ranged_value(range, 16)) == 1
assert math.ceil(percentage_to_ranged_value(range, 33)) == 2
assert math.ceil(percentage_to_ranged_value(range, 50)) == 3
assert math.ceil(percentage_to_ranged_value(range, 66)) == 4
assert math.ceil(percentage_to_ranged_value(range, 83)) == 5
assert math.ceil(percentage_to_ranged_value(range, 100)) == 6
| 47.320755
| 91
| 0.782297
| 1,138
| 7,524
| 4.699473
| 0.081722
| 0.252992
| 0.168287
| 0.180628
| 0.873972
| 0.812079
| 0.741773
| 0.694465
| 0.641548
| 0.572924
| 0
| 0.042537
| 0.14075
| 7,524
| 158
| 92
| 47.620253
| 0.784687
| 0.005715
| 0
| 0.191304
| 0
| 0
| 0.0066
| 0
| 0
| 0
| 0
| 0
| 0.66087
| 1
| 0
| false
| 0
| 0.026087
| 0
| 0.026087
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4af7dab9caf53b6556913fbaf39e051af04a105a
| 5,323
|
py
|
Python
|
internal/core/src/index/thirdparty/faiss/tests/test_binary_io.py
|
mfkiwl/milvus
|
c440b9e8a0dcb18bee52d58da303c37a97d20f94
|
[
"Apache-2.0"
] | 10,504
|
2019-09-16T12:20:11.000Z
|
2022-03-31T15:07:56.000Z
|
internal/core/src/index/thirdparty/faiss/tests/test_binary_io.py
|
mfkiwl/milvus
|
c440b9e8a0dcb18bee52d58da303c37a97d20f94
|
[
"Apache-2.0"
] | 13,389
|
2019-09-16T06:49:53.000Z
|
2022-03-31T18:01:24.000Z
|
internal/core/src/index/thirdparty/faiss/tests/test_binary_io.py
|
mfkiwl/milvus
|
c440b9e8a0dcb18bee52d58da303c37a97d20f94
|
[
"Apache-2.0"
] | 1,792
|
2019-09-18T04:27:42.000Z
|
2022-03-31T14:37:20.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Binary indexes (de)serialization"""
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
import unittest
import faiss
import os
import tempfile
def make_binary_dataset(d, nb, nt, nq):
assert d % 8 == 0
x = np.random.randint(256, size=(nb + nq + nt, int(d / 8))).astype('uint8')
return x[:nt], x[nt:-nq], x[-nq:]
class TestBinaryFlat(unittest.TestCase):
def __init__(self, *args, **kwargs):
unittest.TestCase.__init__(self, *args, **kwargs)
d = 32
nt = 0
nb = 1500
nq = 500
(_, self.xb, self.xq) = make_binary_dataset(d, nb, nt, nq)
def test_flat(self):
d = self.xq.shape[1] * 8
index = faiss.IndexBinaryFlat(d)
index.add(self.xb)
D, I = index.search(self.xq, 3)
_, tmpnam = tempfile.mkstemp()
try:
faiss.write_index_binary(index, tmpnam)
index2 = faiss.read_index_binary(tmpnam)
D2, I2 = index2.search(self.xq, 3)
assert (I2 == I).all()
assert (D2 == D).all()
finally:
os.remove(tmpnam)
class TestBinaryIVF(unittest.TestCase):
def __init__(self, *args, **kwargs):
unittest.TestCase.__init__(self, *args, **kwargs)
d = 32
nt = 200
nb = 1500
nq = 500
(self.xt, self.xb, self.xq) = make_binary_dataset(d, nb, nt, nq)
def test_ivf_flat(self):
d = self.xq.shape[1] * 8
quantizer = faiss.IndexBinaryFlat(d)
index = faiss.IndexBinaryIVF(quantizer, d, 8)
index.cp.min_points_per_centroid = 5 # quiet warning
index.nprobe = 4
index.train(self.xt)
index.add(self.xb)
D, I = index.search(self.xq, 3)
_, tmpnam = tempfile.mkstemp()
try:
faiss.write_index_binary(index, tmpnam)
index2 = faiss.read_index_binary(tmpnam)
D2, I2 = index2.search(self.xq, 3)
assert (I2 == I).all()
assert (D2 == D).all()
finally:
os.remove(tmpnam)
class TestObjectOwnership(unittest.TestCase):
def __init__(self, *args, **kwargs):
unittest.TestCase.__init__(self, *args, **kwargs)
d = 32
nt = 200
nb = 1500
nq = 500
(self.xt, self.xb, self.xq) = make_binary_dataset(d, nb, nt, nq)
def test_read_index_ownership(self):
d = self.xq.shape[1] * 8
index = faiss.IndexBinaryFlat(d)
index.add(self.xb)
_, tmpnam = tempfile.mkstemp()
try:
faiss.write_index_binary(index, tmpnam)
index2 = faiss.read_index_binary(tmpnam)
assert index2.thisown
finally:
os.remove(tmpnam)
class TestBinaryFromFloat(unittest.TestCase):
def __init__(self, *args, **kwargs):
unittest.TestCase.__init__(self, *args, **kwargs)
d = 32
nt = 200
nb = 1500
nq = 500
(self.xt, self.xb, self.xq) = make_binary_dataset(d, nb, nt, nq)
def test_binary_from_float(self):
d = self.xq.shape[1] * 8
float_index = faiss.IndexHNSWFlat(d, 16)
index = faiss.IndexBinaryFromFloat(float_index)
index.add(self.xb)
D, I = index.search(self.xq, 3)
_, tmpnam = tempfile.mkstemp()
try:
faiss.write_index_binary(index, tmpnam)
index2 = faiss.read_index_binary(tmpnam)
D2, I2 = index2.search(self.xq, 3)
assert (I2 == I).all()
assert (D2 == D).all()
finally:
os.remove(tmpnam)
class TestBinaryHNSW(unittest.TestCase):
def __init__(self, *args, **kwargs):
unittest.TestCase.__init__(self, *args, **kwargs)
d = 32
nt = 200
nb = 1500
nq = 500
(self.xt, self.xb, self.xq) = make_binary_dataset(d, nb, nt, nq)
def test_hnsw(self):
d = self.xq.shape[1] * 8
index = faiss.IndexBinaryHNSW(d)
index.add(self.xb)
D, I = index.search(self.xq, 3)
_, tmpnam = tempfile.mkstemp()
try:
faiss.write_index_binary(index, tmpnam)
index2 = faiss.read_index_binary(tmpnam)
D2, I2 = index2.search(self.xq, 3)
assert (I2 == I).all()
assert (D2 == D).all()
finally:
os.remove(tmpnam)
def test_ivf_hnsw(self):
d = self.xq.shape[1] * 8
quantizer = faiss.IndexBinaryHNSW(d)
index = faiss.IndexBinaryIVF(quantizer, d, 8)
index.cp.min_points_per_centroid = 5 # quiet warning
index.nprobe = 4
index.train(self.xt)
index.add(self.xb)
D, I = index.search(self.xq, 3)
_, tmpnam = tempfile.mkstemp()
try:
faiss.write_index_binary(index, tmpnam)
index2 = faiss.read_index_binary(tmpnam)
D2, I2 = index2.search(self.xq, 3)
assert (I2 == I).all()
assert (D2 == D).all()
finally:
os.remove(tmpnam)
if __name__ == '__main__':
unittest.main()
| 24.417431
| 82
| 0.562841
| 670
| 5,323
| 4.295522
| 0.18806
| 0.04378
| 0.041696
| 0.062543
| 0.773454
| 0.760598
| 0.760598
| 0.746004
| 0.740445
| 0.708478
| 0
| 0.034852
| 0.315424
| 5,323
| 217
| 83
| 24.529954
| 0.75494
| 0.043209
| 0
| 0.78169
| 0
| 0
| 0.002558
| 0
| 0
| 0
| 0
| 0
| 0.084507
| 1
| 0.084507
| false
| 0
| 0.042254
| 0
| 0.169014
| 0.007042
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
ab3908d0554cc7de124110afe281d80d6f18c52f
| 822
|
py
|
Python
|
heart_words.py
|
eptyee/heart_words
|
e694a287d50c4aab92de4eb1504fdd1f72cd1f2e
|
[
"MIT"
] | null | null | null |
heart_words.py
|
eptyee/heart_words
|
e694a287d50c4aab92de4eb1504fdd1f72cd1f2e
|
[
"MIT"
] | null | null | null |
heart_words.py
|
eptyee/heart_words
|
e694a287d50c4aab92de4eb1504fdd1f72cd1f2e
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Mon May 21 15:59:37 2018
@author: skyligo
"""
import time
words= input('plz input the words:')
for item in words.split():
print('\n'.join([''.join([(item[(x-y)%len(item)] if ((((x*0.05)**2+(y*0.1)**2-1)**3)-((x*0.05)**2)*(y*0.1)**3) <= 0 else ' ')for x in range(-30,30)]) for y in range(15,-15,-1)]))
time.sleep(1)
for item in words.split():
print('\n'.join([''.join([(item[(x-y)%len(item)] if ((((x*0.05)**2+(y*0.1)**2-1)**31)-((x*0.05)**2)*(y*0.1)**31) <= 0 else ' ')for x in range(-30,30)]) for y in range(15,-15,-1)]))
time.sleep(1)
for item in words.split():
print('\n'.join([''.join([(item[(x-y)%len(item)] if ((((x*0.05)**2+(y*0.1)**2-1)**51)-((x*0.05)**2)*(y*0.1)**51) <= 0 else ' ')for x in range(-30,30)]) for y in range(15,-15,-1)]))
time.sleep(1)
| 48.352941
| 184
| 0.525547
| 175
| 822
| 2.468571
| 0.24
| 0.027778
| 0.055556
| 0.069444
| 0.777778
| 0.777778
| 0.777778
| 0.722222
| 0.722222
| 0.722222
| 0
| 0.137063
| 0.13017
| 822
| 17
| 185
| 48.352941
| 0.467133
| 0.092457
| 0
| 0.545455
| 0
| 0
| 0.039242
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.090909
| 0
| 0.090909
| 0.272727
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
db952550a12b34853556fa42bba04c823bc7cbe4
| 21
|
py
|
Python
|
PaddleCV/video/models/stnet/__init__.py
|
suytingwan/models
|
ccdbfe77d071cc19b55fb9f4b738912e35d982ef
|
[
"Apache-2.0"
] | 7
|
2020-03-03T03:17:54.000Z
|
2021-11-11T15:53:45.000Z
|
PaddleCV/video/models/stnet/__init__.py
|
suytingwan/models
|
ccdbfe77d071cc19b55fb9f4b738912e35d982ef
|
[
"Apache-2.0"
] | 2
|
2019-06-26T03:21:49.000Z
|
2019-09-19T09:43:42.000Z
|
PaddleCV/video/models/stnet/__init__.py
|
suytingwan/models
|
ccdbfe77d071cc19b55fb9f4b738912e35d982ef
|
[
"Apache-2.0"
] | 6
|
2021-01-06T03:55:54.000Z
|
2021-11-25T03:24:20.000Z
|
from .stnet import *
| 10.5
| 20
| 0.714286
| 3
| 21
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.190476
| 21
| 1
| 21
| 21
| 0.882353
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
dbb81e826135a516f99fc503f34ebe32da7ea470
| 29
|
py
|
Python
|
pycq/expando/__init__.py
|
janusko/pycq
|
d58d5d3b3fe1af93f5922abd237e1cdd00cde4af
|
[
"MIT"
] | null | null | null |
pycq/expando/__init__.py
|
janusko/pycq
|
d58d5d3b3fe1af93f5922abd237e1cdd00cde4af
|
[
"MIT"
] | null | null | null |
pycq/expando/__init__.py
|
janusko/pycq
|
d58d5d3b3fe1af93f5922abd237e1cdd00cde4af
|
[
"MIT"
] | null | null | null |
from .expando import Expando
| 14.5
| 28
| 0.827586
| 4
| 29
| 6
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137931
| 29
| 1
| 29
| 29
| 0.96
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
dbd69ef10051bbcac8450a89035236029b6093d0
| 187
|
py
|
Python
|
src/graph_transpiler/webdnn/backend/webgl/kernels/transpose.py
|
steerapi/webdnn
|
1df51cc094e5a528cfd3452c264905708eadb491
|
[
"MIT"
] | 1
|
2018-07-26T13:52:21.000Z
|
2018-07-26T13:52:21.000Z
|
src/graph_transpiler/webdnn/backend/webgl/kernels/transpose.py
|
steerapi/webdnn
|
1df51cc094e5a528cfd3452c264905708eadb491
|
[
"MIT"
] | null | null | null |
src/graph_transpiler/webdnn/backend/webgl/kernels/transpose.py
|
steerapi/webdnn
|
1df51cc094e5a528cfd3452c264905708eadb491
|
[
"MIT"
] | null | null | null |
from webdnn.backend.webgl.kernels.elementwise import register_elementwise_kernel
from webdnn.graph.operators.transpose import Transpose
register_elementwise_kernel(Transpose, "y = x0;")
| 37.4
| 80
| 0.855615
| 23
| 187
| 6.782609
| 0.608696
| 0.128205
| 0.320513
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005747
| 0.069519
| 187
| 4
| 81
| 46.75
| 0.890805
| 0
| 0
| 0
| 0
| 0
| 0.037433
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
91a64d8794395629d35cd4c496a3caa89ab402b9
| 8,081
|
py
|
Python
|
gitlabform/gitlabform/test/test_services.py
|
rbartuzel/gitlabform
|
4027ef4d6bbbef7313ed6fcf07cef8fd1ad76d18
|
[
"MIT"
] | null | null | null |
gitlabform/gitlabform/test/test_services.py
|
rbartuzel/gitlabform
|
4027ef4d6bbbef7313ed6fcf07cef8fd1ad76d18
|
[
"MIT"
] | null | null | null |
gitlabform/gitlabform/test/test_services.py
|
rbartuzel/gitlabform
|
4027ef4d6bbbef7313ed6fcf07cef8fd1ad76d18
|
[
"MIT"
] | null | null | null |
import pytest
from gitlabform.gitlab import GitLab
from gitlabform.gitlabform import GitLabForm
from gitlabform.gitlabform.test import create_group, create_project_in_group, get_gitlab, GROUP_NAME
PROJECT_NAME = 'services_project'
GROUP_AND_PROJECT_NAME = GROUP_NAME + '/' + PROJECT_NAME
@pytest.fixture(scope="function")
def gitlab(request):
create_group(GROUP_NAME)
create_project_in_group(GROUP_NAME, PROJECT_NAME)
gl = get_gitlab()
def fin():
# disable test integrations
for service in ['asana', 'hipchat', 'redmine', 'jira', 'mattermost']:
gl.delete_service(GROUP_AND_PROJECT_NAME, service)
request.addfinalizer(fin)
return gl # provide fixture value
config_service_push_events_true = """
gitlab:
api_version: 4
project_settings:
gitlabform_tests_group/services_project:
services:
asana:
api_key: foo
push_events: true
hipchat:
token: foobar
push_events: true
redmine:
new_issue_url: http://foo.bar.com
project_url: http://foo.bar.com
issues_url: http://foo.bar.com
push_events: true
"""
config_service_push_events_false = """
gitlab:
api_version: 4
project_settings:
gitlabform_tests_group/services_project:
services:
asana:
api_key: foo
push_events: false # changed
hipchat:
token: foobar
push_events: false # changed
redmine:
new_issue_url: http://foo.bar.com
project_url: http://foo.bar.com
issues_url: http://foo.bar.com
push_events: false # changed
"""
config_service_jira_commit_events_true = """
gitlab:
api_version: 4
project_settings:
gitlabform_tests_group/services_project:
services:
jira:
url: http://foo.bar.com
username: foo
password: bar
active: true
commit_events: true
"""
config_service_jira_commit_events_false = """
gitlab:
api_version: 4
project_settings:
gitlabform_tests_group/services_project:
services:
jira:
url: http://foo.bar.com
username: foo
password: bar
active: true
commit_events: false
"""
config_service_jira_delete = """
gitlab:
api_version: 4
project_settings:
gitlabform_tests_group/services_project:
services:
jira:
delete: true
"""
config_service_mattermost_confidential_issues_events = """
gitlab:
api_version: 4
project_settings:
gitlabform_tests_group/services_project:
services:
mattermost:
active: true
webhook: https://mattermost.com/hooks/xxx
username: gitlab
merge_requests_events: true
merge_request_channel: "merge-requests"
push_events: false
issues_events: false
confidential_issues_events: false # this was not supposed to work according to #70
tag_push_events: false
note_events: false
confidential_note_events: false
pipeline_events: false
wiki_page_events: false
branches_to_be_notified: "all"
"""
class TestServices:
def test__if_they_are_not_set_by_default(self, gitlab):
services = []
for service_name in ['asana', 'hipchat', 'redmine']:
service = gitlab.get_service(GROUP_AND_PROJECT_NAME, service_name)
services.append(service)
assert not any([service['active'] for service in services]) is True
def test__if_push_events_true_works(self, gitlab: GitLab):
gf = GitLabForm(config_string=config_service_push_events_true, project_or_group=GROUP_AND_PROJECT_NAME)
gf.main()
services = []
for service_name in ['asana', 'hipchat', 'redmine']:
service = gitlab.get_service(GROUP_AND_PROJECT_NAME, service_name)
services.append(service)
assert all([service['active'] for service in services]) is True
assert all([service['push_events'] for service in services]) is True
def test__if_push_events_false_works(self, gitlab: GitLab):
gf = GitLabForm(config_string=config_service_push_events_false, project_or_group=GROUP_AND_PROJECT_NAME)
gf.main()
services = []
for service_name in ['asana', 'hipchat', 'redmine']:
service = gitlab.get_service(GROUP_AND_PROJECT_NAME, service_name)
services.append(service)
assert all([service['active'] for service in services]) is True
assert all([service['push_events'] for service in services]) is False
def test__if_push_events_change_works(self, gitlab: GitLab):
gf = GitLabForm(config_string=config_service_push_events_true, project_or_group=GROUP_AND_PROJECT_NAME)
gf.main()
services = []
for service_name in ['asana', 'hipchat', 'redmine']:
service = gitlab.get_service(GROUP_AND_PROJECT_NAME, service_name)
services.append(service)
assert all([service['active'] for service in services]) is True
assert all([service['push_events'] for service in services]) is True
gf = GitLabForm(config_string=config_service_push_events_false, project_or_group=GROUP_AND_PROJECT_NAME)
gf.main()
services = []
for service_name in ['asana', 'hipchat', 'redmine']:
service = gitlab.get_service(GROUP_AND_PROJECT_NAME, service_name)
services.append(service)
assert all([service['active'] for service in services]) is True
assert all([service['push_events'] for service in services]) is False
def test__if_jira_is_not_active_by_default(self, gitlab):
service = gitlab.get_service(GROUP_AND_PROJECT_NAME, 'jira')
assert service['active'] is False
def test__if_jira_commit_events_true_works(self, gitlab: GitLab):
gf = GitLabForm(config_string=config_service_jira_commit_events_true, project_or_group=GROUP_AND_PROJECT_NAME)
gf.main()
service = gitlab.get_service(GROUP_AND_PROJECT_NAME, 'jira')
assert service['active'] is True
assert service['commit_events'] is True
def test__if_jira_commit_events_false_works(self, gitlab: GitLab):
gf = GitLabForm(config_string=config_service_jira_commit_events_false, project_or_group=GROUP_AND_PROJECT_NAME)
gf.main()
service = gitlab.get_service(GROUP_AND_PROJECT_NAME, 'jira')
assert service['active'] is True
assert service['commit_events'] is False
def test__if_change_works(self, gitlab: GitLab):
gf = GitLabForm(config_string=config_service_jira_commit_events_true, project_or_group=GROUP_AND_PROJECT_NAME)
gf.main()
service = gitlab.get_service(GROUP_AND_PROJECT_NAME, 'jira')
assert service['active'] is True
assert service['commit_events'] is True
gf = GitLabForm(config_string=config_service_jira_commit_events_false, project_or_group=GROUP_AND_PROJECT_NAME)
gf.main()
service = gitlab.get_service(GROUP_AND_PROJECT_NAME, 'jira')
assert service['active'] is True
assert service['commit_events'] is False
def test__if_delete_works(self, gitlab: GitLab):
gf = GitLabForm(config_string=config_service_jira_commit_events_true, project_or_group=GROUP_AND_PROJECT_NAME)
gf.main()
service = gitlab.get_service(GROUP_AND_PROJECT_NAME, 'jira')
assert service['active'] is True
assert service['commit_events'] is True
gf = GitLabForm(config_string=config_service_jira_delete, project_or_group=GROUP_AND_PROJECT_NAME)
gf.main()
service = gitlab.get_service(GROUP_AND_PROJECT_NAME, 'jira')
assert service['active'] is False
def test__mattermost_confidential_issues_events(self, gitlab: GitLab):
gf = GitLabForm(config_string=config_service_mattermost_confidential_issues_events,
project_or_group=GROUP_AND_PROJECT_NAME)
gf.main()
service = gitlab.get_service(GROUP_AND_PROJECT_NAME, 'mattermost')
assert service['confidential_issues_events'] is False
| 33.392562
| 119
| 0.696696
| 1,012
| 8,081
| 5.200593
| 0.103755
| 0.060612
| 0.074102
| 0.093863
| 0.790804
| 0.771803
| 0.734752
| 0.734752
| 0.732282
| 0.722212
| 0
| 0.001267
| 0.218414
| 8,081
| 241
| 120
| 33.53112
| 0.832014
| 0.005816
| 0
| 0.709845
| 0
| 0
| 0.306438
| 0.048064
| 0
| 0
| 0
| 0
| 0.11399
| 1
| 0.062176
| false
| 0.010363
| 0.020725
| 0
| 0.093264
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
91b57ca2501c28afea35dffe3ac8bd4df2b2d83e
| 6,013
|
py
|
Python
|
profootballref/Tools/Rushhash.py
|
gVkWY8NJAa/ProFootballRef
|
29c4333d278f8f9dc2c5f13c08387b5b9374037f
|
[
"MIT"
] | 1
|
2020-06-09T16:31:45.000Z
|
2020-06-09T16:31:45.000Z
|
profootballref/Tools/Rushhash.py
|
gVkWY8NJAa/ProFootballRef
|
29c4333d278f8f9dc2c5f13c08387b5b9374037f
|
[
"MIT"
] | 19
|
2018-04-15T06:01:29.000Z
|
2020-01-07T00:20:40.000Z
|
profootballref/Tools/Rushhash.py
|
gVkWY8NJAa/ProFootballRef
|
29c4333d278f8f9dc2c5f13c08387b5b9374037f
|
[
"MIT"
] | null | null | null |
import pandas as pd
class RushHash:
def __init__(self):
# Combinations of header labels
# Combinations of header labels
self.base = ['Rk', 'Date', 'G#', 'Age', 'Tm', 'Home', 'Opp', 'Result', 'GS']
self.receiving = ['Rec_Tgt', 'Rec_Rec', 'Rec_Yds', 'Rec_Y/R', 'Rec_TD', 'Rec_Ctch%', 'Rec_Y/Tgt']
self.rushing = ['rush_att', 'rush_yds', 'rush_Y/A', 'rush_TD']
self.passing = ['pass_cmp', 'pass_att', 'Cmp%', 'pass_yds', 'pass_td', 'Int', 'Rate', 'Sk', 'Sk-Yds',
'pass_Y/A', 'AY/A']
self.rush_sk = ['rush_sk', 'tkl', 'Ast']
self.scoring2p = ['2pt']
self.scoring = ['Any_TD', 'Any_Pts']
self.punting = ['Pnt', 'Pnt_Yds', 'Y/P', 'Blck']
self.kick_rt = ['Kick_Rt', 'Kick_RtYds', 'Y/Rt', 'Kick_TD']
self.punt_rt = ['Pnt_rt', 'Pnt_Yds', 'Y/Pnt', 'Pnt_TD']
def md5c3695be2dd2fa9307301dccf047b4e86(self, df):
# Rename columns
df.columns = self.base + self.rushing + self.receiving + self.kick_rt + self.scoring2p + self.scoring
# add missing cols
df = pd.concat([df, pd.DataFrame(columns=self.punt_rt)], axis=1)
# set all the new columns to zero
df.loc[:, self.punt_rt] = 0
return df
def md57f97f3885d50fcf9b92797810856a89f(self, df):
# Rename columns
df.columns = self.base + self.rushing + self.receiving + self.scoring
# add missing cols
df = pd.concat([df, pd.DataFrame(columns=self.kick_rt + self.punt_rt + self.scoring2p)], axis=1)
# set all the new columns to zero
df.loc[:, self.kick_rt + self.punt_rt + self.scoring2p] = 0
return df
def md5aa321161d6f3f5230259dbc4ae67299a(self,df):
# Rename columns
df.columns = self.base + self.rushing + self.receiving + self.scoring + self.rush_sk
# add missing cols
df = pd.concat([df, pd.DataFrame(columns=self.kick_rt + self.punt_rt + self.scoring2p)], axis=1)
# set all the new columns to zero
df.loc[:, self.kick_rt + self.punt_rt + self.scoring2p] = 0
return df
def md59c11c15180efbf7aec4300fc190cd3a5(self, df):
# Rename columns
df.columns = self.base + self.rushing + self.receiving + self.passing + self.scoring2p + self.scoring
# add missing cols
df = pd.concat([df, pd.DataFrame(columns=self.kick_rt + self.punt_rt)], axis=1)
# set all the new columns to zero
df.loc[:, self.kick_rt + self.punt_rt] = 0
return df
def md5ad9a12e06546e3019128fec57cdc9d0e(self,df):
# Rename columns
df.columns = self.base + self.rushing + self.receiving + self.scoring2p + self.scoring + self.rush_sk
# add missing cols
df = pd.concat([df, pd.DataFrame(columns=self.kick_rt + self.punt_rt)], axis=1)
# set all the new columns to zero
df.loc[:, self.kick_rt + self.punt_rt] = 0
return df
def md500f83a7c4b3e891e3c448db700cc9ada(self,df):
# Rename columns
df.columns = self.base + self.rushing + self.receiving + self.scoring2p + self.scoring
# add missing cols
df = pd.concat([df, pd.DataFrame(columns=self.kick_rt + self.punt_rt)], axis=1)
# set all the new columns to zero
df.loc[:, self.kick_rt + self.punt_rt] = 0
return df
def md55980508dab2f61013bd07809c5ca0e41(self,df):
# Rename columns
df.columns = self.base + self.rushing + self.receiving
# add missing cols
df = pd.concat([df, pd.DataFrame(columns=self.scoring2p + self.scoring + self.kick_rt + self.punt_rt)], axis=1)
# set all the new columns to zero
df.loc[:, self.scoring2p + self.scoring + self.kick_rt + self.punt_rt] = 0
return df
def md5c35b37a5f0f696bfd1576753faffe81c(self,df):
# Rename columns
df.columns = self.base + self.rushing + self.receiving + self.passing + self.scoring + self.rush_sk
# add missing cols
df = pd.concat([df, pd.DataFrame(columns=self.scoring2p + self.kick_rt + self.punt_rt)], axis=1)
# set all the new columns to zero
df.loc[:, self.scoring2p + self.kick_rt + self.punt_rt] = 0
return df
def md5aed81e3e77b9842532b5efa73458a259(self,df):
# Rename columns
df.columns = self.base + self.rushing + self.receiving + self.kick_rt + self.scoring + self.rush_sk
# add missing cols
df = pd.concat([df, pd.DataFrame(columns=self.scoring2p + self.punt_rt)], axis=1)
# set all the new columns to zero
df.loc[:, self.scoring2p + self.punt_rt] = 0
return df
def md57d21a9a4ab9adde626d633fbd62db5c0(self,df):
# Rename columns
df.columns = self.base + self.rushing + self.receiving + self.passing + self.scoring2p + self.scoring + self.rush_sk
# add missing cols
df = pd.concat([df, pd.DataFrame(columns=self.punt_rt + self.kick_rt)], axis=1)
# set all the new columns to zero
df.loc[:, self.punt_rt + self.kick_rt] = 0
return df
def md591138c3c08c339b71b8323e2bac3aac7(self,df):
# Rename columns
df.columns = self.base + self.rushing + self.scoring
# add missing cols
df = pd.concat([df, pd.DataFrame(columns=self.punt_rt + self.receiving + self.kick_rt + self.scoring2p)],
axis=1)
# set all the new columns to zero
df.loc[:, self.punt_rt + self.kick_rt + self.receiving + self.scoring2p] = 0
return df
def md5ddcb0610869ff21799f008209ac6d229(self, df):
# Rename columns
df.columns = self.base + self.rushing + self.receiving + self.scoring
# add missing cols
df = pd.concat([df, pd.DataFrame(columns=self.punt_rt + self.kick_rt + self.scoring2p)],
axis=1)
# set all the new columns to zero
df.loc[:, self.punt_rt + self.kick_rt + self.scoring2p] = 0
return df
| 36.442424
| 124
| 0.614502
| 805
| 6,013
| 4.485714
| 0.109317
| 0.049848
| 0.069233
| 0.077541
| 0.760454
| 0.757131
| 0.747715
| 0.736915
| 0.734423
| 0.734423
| 0
| 0.066276
| 0.262265
| 6,013
| 164
| 125
| 36.664634
| 0.747746
| 0.137535
| 0
| 0.342105
| 0
| 0
| 0.052652
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.171053
| false
| 0.065789
| 0.013158
| 0
| 0.355263
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
91c605d7aee8f3c91a8c42e1a4aebddf8da9e09a
| 23
|
py
|
Python
|
main_train.py
|
yyu1/SurfaceNet
|
e59cf56d55d1be7295322d5a0f4a2aa244316d86
|
[
"MIT"
] | 117
|
2017-08-08T07:25:16.000Z
|
2022-01-30T02:41:11.000Z
|
main_train.py
|
yyu1/SurfaceNet
|
e59cf56d55d1be7295322d5a0f4a2aa244316d86
|
[
"MIT"
] | 8
|
2017-10-24T11:48:30.000Z
|
2020-10-31T10:45:39.000Z
|
main_train.py
|
yyu1/SurfaceNet
|
e59cf56d55d1be7295322d5a0f4a2aa244316d86
|
[
"MIT"
] | 35
|
2017-08-08T10:44:21.000Z
|
2022-02-13T13:18:35.000Z
|
def train():
pass
| 5.75
| 12
| 0.521739
| 3
| 23
| 4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.347826
| 23
| 3
| 13
| 7.666667
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
37d630195dee03a88620e33d957309ad6abd5b6d
| 38,171
|
py
|
Python
|
nemo/scripts/decoders/swig_decoders.py
|
ducnguyenhuynh/Sen-VoiceAssistant
|
c9eba093cf350629550356a5ec7d3d521bfaa5f2
|
[
"MIT"
] | 6
|
2020-05-18T22:38:58.000Z
|
2021-05-24T18:09:27.000Z
|
nemo/scripts/decoders/swig_decoders.py
|
ducnguyenhuynh/Sen-VoiceAssistant
|
c9eba093cf350629550356a5ec7d3d521bfaa5f2
|
[
"MIT"
] | null | null | null |
nemo/scripts/decoders/swig_decoders.py
|
ducnguyenhuynh/Sen-VoiceAssistant
|
c9eba093cf350629550356a5ec7d3d521bfaa5f2
|
[
"MIT"
] | 1
|
2020-07-15T09:34:01.000Z
|
2020-07-15T09:34:01.000Z
|
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 4.0.1
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info as _swig_python_version_info
if _swig_python_version_info < (2, 7, 0):
raise RuntimeError("Python 2.7 or later required")
# Import the low-level C/C++ module
if __package__ or "." in __name__:
from . import _swig_decoders
else:
import _swig_decoders
try:
import builtins as __builtin__
except ImportError:
import __builtin__
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except __builtin__.Exception:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
def _swig_setattr_nondynamic_instance_variable(set):
def set_instance_attr(self, name, value):
if name == "thisown":
self.this.own(value)
elif name == "this":
set(self, name, value)
elif hasattr(self, name) and isinstance(getattr(type(self), name), property):
set(self, name, value)
else:
raise AttributeError("You cannot add instance attributes to %s" % self)
return set_instance_attr
def _swig_setattr_nondynamic_class_variable(set):
def set_class_attr(cls, name, value):
if hasattr(cls, name) and not isinstance(getattr(cls, name), property):
set(cls, name, value)
else:
raise AttributeError("You cannot add class attributes to %s" % cls)
return set_class_attr
def _swig_add_metaclass(metaclass):
"""Class decorator for adding a metaclass to a SWIG wrapped class - a slimmed down version of six.add_metaclass"""
def wrapper(cls):
return metaclass(cls.__name__, cls.__bases__, cls.__dict__.copy())
return wrapper
class _SwigNonDynamicMeta(type):
"""Meta class to enforce nondynamic attributes (no new attributes) for a class"""
__setattr__ = _swig_setattr_nondynamic_class_variable(type.__setattr__)
class SwigPyIterator(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
__swig_destroy__ = _swig_decoders.delete_SwigPyIterator
def value(self):
return _swig_decoders.SwigPyIterator_value(self)
def incr(self, n=1):
return _swig_decoders.SwigPyIterator_incr(self, n)
def decr(self, n=1):
return _swig_decoders.SwigPyIterator_decr(self, n)
def distance(self, x):
return _swig_decoders.SwigPyIterator_distance(self, x)
def equal(self, x):
return _swig_decoders.SwigPyIterator_equal(self, x)
def copy(self):
return _swig_decoders.SwigPyIterator_copy(self)
def next(self):
return _swig_decoders.SwigPyIterator_next(self)
def __next__(self):
return _swig_decoders.SwigPyIterator___next__(self)
def previous(self):
return _swig_decoders.SwigPyIterator_previous(self)
def advance(self, n):
return _swig_decoders.SwigPyIterator_advance(self, n)
def __eq__(self, x):
return _swig_decoders.SwigPyIterator___eq__(self, x)
def __ne__(self, x):
return _swig_decoders.SwigPyIterator___ne__(self, x)
def __iadd__(self, n):
return _swig_decoders.SwigPyIterator___iadd__(self, n)
def __isub__(self, n):
return _swig_decoders.SwigPyIterator___isub__(self, n)
def __add__(self, n):
return _swig_decoders.SwigPyIterator___add__(self, n)
def __sub__(self, *args):
return _swig_decoders.SwigPyIterator___sub__(self, *args)
def __iter__(self):
return self
# Register SwigPyIterator in _swig_decoders:
_swig_decoders.SwigPyIterator_swigregister(SwigPyIterator)
class DoubleVector(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def iterator(self):
return _swig_decoders.DoubleVector_iterator(self)
def __iter__(self):
return self.iterator()
def __nonzero__(self):
return _swig_decoders.DoubleVector___nonzero__(self)
def __bool__(self):
return _swig_decoders.DoubleVector___bool__(self)
def __len__(self):
return _swig_decoders.DoubleVector___len__(self)
def __getslice__(self, i, j):
return _swig_decoders.DoubleVector___getslice__(self, i, j)
def __setslice__(self, *args):
return _swig_decoders.DoubleVector___setslice__(self, *args)
def __delslice__(self, i, j):
return _swig_decoders.DoubleVector___delslice__(self, i, j)
def __delitem__(self, *args):
return _swig_decoders.DoubleVector___delitem__(self, *args)
def __getitem__(self, *args):
return _swig_decoders.DoubleVector___getitem__(self, *args)
def __setitem__(self, *args):
return _swig_decoders.DoubleVector___setitem__(self, *args)
def pop(self):
return _swig_decoders.DoubleVector_pop(self)
def append(self, x):
return _swig_decoders.DoubleVector_append(self, x)
def empty(self):
return _swig_decoders.DoubleVector_empty(self)
def size(self):
return _swig_decoders.DoubleVector_size(self)
def swap(self, v):
return _swig_decoders.DoubleVector_swap(self, v)
def begin(self):
return _swig_decoders.DoubleVector_begin(self)
def end(self):
return _swig_decoders.DoubleVector_end(self)
def rbegin(self):
return _swig_decoders.DoubleVector_rbegin(self)
def rend(self):
return _swig_decoders.DoubleVector_rend(self)
def clear(self):
return _swig_decoders.DoubleVector_clear(self)
def get_allocator(self):
return _swig_decoders.DoubleVector_get_allocator(self)
def pop_back(self):
return _swig_decoders.DoubleVector_pop_back(self)
def erase(self, *args):
return _swig_decoders.DoubleVector_erase(self, *args)
def __init__(self, *args):
_swig_decoders.DoubleVector_swiginit(self, _swig_decoders.new_DoubleVector(*args))
def push_back(self, x):
return _swig_decoders.DoubleVector_push_back(self, x)
def front(self):
return _swig_decoders.DoubleVector_front(self)
def back(self):
return _swig_decoders.DoubleVector_back(self)
def assign(self, n, x):
return _swig_decoders.DoubleVector_assign(self, n, x)
def resize(self, *args):
return _swig_decoders.DoubleVector_resize(self, *args)
def insert(self, *args):
return _swig_decoders.DoubleVector_insert(self, *args)
def reserve(self, n):
return _swig_decoders.DoubleVector_reserve(self, n)
def capacity(self):
return _swig_decoders.DoubleVector_capacity(self)
__swig_destroy__ = _swig_decoders.delete_DoubleVector
# Register DoubleVector in _swig_decoders:
_swig_decoders.DoubleVector_swigregister(DoubleVector)
class IntVector(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def iterator(self):
return _swig_decoders.IntVector_iterator(self)
def __iter__(self):
return self.iterator()
def __nonzero__(self):
return _swig_decoders.IntVector___nonzero__(self)
def __bool__(self):
return _swig_decoders.IntVector___bool__(self)
def __len__(self):
return _swig_decoders.IntVector___len__(self)
def __getslice__(self, i, j):
return _swig_decoders.IntVector___getslice__(self, i, j)
def __setslice__(self, *args):
return _swig_decoders.IntVector___setslice__(self, *args)
def __delslice__(self, i, j):
return _swig_decoders.IntVector___delslice__(self, i, j)
def __delitem__(self, *args):
return _swig_decoders.IntVector___delitem__(self, *args)
def __getitem__(self, *args):
return _swig_decoders.IntVector___getitem__(self, *args)
def __setitem__(self, *args):
return _swig_decoders.IntVector___setitem__(self, *args)
def pop(self):
return _swig_decoders.IntVector_pop(self)
def append(self, x):
return _swig_decoders.IntVector_append(self, x)
def empty(self):
return _swig_decoders.IntVector_empty(self)
def size(self):
return _swig_decoders.IntVector_size(self)
def swap(self, v):
return _swig_decoders.IntVector_swap(self, v)
def begin(self):
return _swig_decoders.IntVector_begin(self)
def end(self):
return _swig_decoders.IntVector_end(self)
def rbegin(self):
return _swig_decoders.IntVector_rbegin(self)
def rend(self):
return _swig_decoders.IntVector_rend(self)
def clear(self):
return _swig_decoders.IntVector_clear(self)
def get_allocator(self):
return _swig_decoders.IntVector_get_allocator(self)
def pop_back(self):
return _swig_decoders.IntVector_pop_back(self)
def erase(self, *args):
return _swig_decoders.IntVector_erase(self, *args)
def __init__(self, *args):
_swig_decoders.IntVector_swiginit(self, _swig_decoders.new_IntVector(*args))
def push_back(self, x):
return _swig_decoders.IntVector_push_back(self, x)
def front(self):
return _swig_decoders.IntVector_front(self)
def back(self):
return _swig_decoders.IntVector_back(self)
def assign(self, n, x):
return _swig_decoders.IntVector_assign(self, n, x)
def resize(self, *args):
return _swig_decoders.IntVector_resize(self, *args)
def insert(self, *args):
return _swig_decoders.IntVector_insert(self, *args)
def reserve(self, n):
return _swig_decoders.IntVector_reserve(self, n)
def capacity(self):
return _swig_decoders.IntVector_capacity(self)
__swig_destroy__ = _swig_decoders.delete_IntVector
# Register IntVector in _swig_decoders:
_swig_decoders.IntVector_swigregister(IntVector)
class StringVector(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def iterator(self):
return _swig_decoders.StringVector_iterator(self)
def __iter__(self):
return self.iterator()
def __nonzero__(self):
return _swig_decoders.StringVector___nonzero__(self)
def __bool__(self):
return _swig_decoders.StringVector___bool__(self)
def __len__(self):
return _swig_decoders.StringVector___len__(self)
def __getslice__(self, i, j):
return _swig_decoders.StringVector___getslice__(self, i, j)
def __setslice__(self, *args):
return _swig_decoders.StringVector___setslice__(self, *args)
def __delslice__(self, i, j):
return _swig_decoders.StringVector___delslice__(self, i, j)
def __delitem__(self, *args):
return _swig_decoders.StringVector___delitem__(self, *args)
def __getitem__(self, *args):
return _swig_decoders.StringVector___getitem__(self, *args)
def __setitem__(self, *args):
return _swig_decoders.StringVector___setitem__(self, *args)
def pop(self):
return _swig_decoders.StringVector_pop(self)
def append(self, x):
return _swig_decoders.StringVector_append(self, x)
def empty(self):
return _swig_decoders.StringVector_empty(self)
def size(self):
return _swig_decoders.StringVector_size(self)
def swap(self, v):
return _swig_decoders.StringVector_swap(self, v)
def begin(self):
return _swig_decoders.StringVector_begin(self)
def end(self):
return _swig_decoders.StringVector_end(self)
def rbegin(self):
return _swig_decoders.StringVector_rbegin(self)
def rend(self):
return _swig_decoders.StringVector_rend(self)
def clear(self):
return _swig_decoders.StringVector_clear(self)
def get_allocator(self):
return _swig_decoders.StringVector_get_allocator(self)
def pop_back(self):
return _swig_decoders.StringVector_pop_back(self)
def erase(self, *args):
return _swig_decoders.StringVector_erase(self, *args)
def __init__(self, *args):
_swig_decoders.StringVector_swiginit(self, _swig_decoders.new_StringVector(*args))
def push_back(self, x):
return _swig_decoders.StringVector_push_back(self, x)
def front(self):
return _swig_decoders.StringVector_front(self)
def back(self):
return _swig_decoders.StringVector_back(self)
def assign(self, n, x):
return _swig_decoders.StringVector_assign(self, n, x)
def resize(self, *args):
return _swig_decoders.StringVector_resize(self, *args)
def insert(self, *args):
return _swig_decoders.StringVector_insert(self, *args)
def reserve(self, n):
return _swig_decoders.StringVector_reserve(self, n)
def capacity(self):
return _swig_decoders.StringVector_capacity(self)
__swig_destroy__ = _swig_decoders.delete_StringVector
# Register StringVector in _swig_decoders:
_swig_decoders.StringVector_swigregister(StringVector)
class VectorOfStructVector(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def iterator(self):
return _swig_decoders.VectorOfStructVector_iterator(self)
def __iter__(self):
return self.iterator()
def __nonzero__(self):
return _swig_decoders.VectorOfStructVector___nonzero__(self)
def __bool__(self):
return _swig_decoders.VectorOfStructVector___bool__(self)
def __len__(self):
return _swig_decoders.VectorOfStructVector___len__(self)
def __getslice__(self, i, j):
return _swig_decoders.VectorOfStructVector___getslice__(self, i, j)
def __setslice__(self, *args):
return _swig_decoders.VectorOfStructVector___setslice__(self, *args)
def __delslice__(self, i, j):
return _swig_decoders.VectorOfStructVector___delslice__(self, i, j)
def __delitem__(self, *args):
return _swig_decoders.VectorOfStructVector___delitem__(self, *args)
def __getitem__(self, *args):
return _swig_decoders.VectorOfStructVector___getitem__(self, *args)
def __setitem__(self, *args):
return _swig_decoders.VectorOfStructVector___setitem__(self, *args)
def pop(self):
return _swig_decoders.VectorOfStructVector_pop(self)
def append(self, x):
return _swig_decoders.VectorOfStructVector_append(self, x)
def empty(self):
return _swig_decoders.VectorOfStructVector_empty(self)
def size(self):
return _swig_decoders.VectorOfStructVector_size(self)
def swap(self, v):
return _swig_decoders.VectorOfStructVector_swap(self, v)
def begin(self):
return _swig_decoders.VectorOfStructVector_begin(self)
def end(self):
return _swig_decoders.VectorOfStructVector_end(self)
def rbegin(self):
return _swig_decoders.VectorOfStructVector_rbegin(self)
def rend(self):
return _swig_decoders.VectorOfStructVector_rend(self)
def clear(self):
return _swig_decoders.VectorOfStructVector_clear(self)
def get_allocator(self):
return _swig_decoders.VectorOfStructVector_get_allocator(self)
def pop_back(self):
return _swig_decoders.VectorOfStructVector_pop_back(self)
def erase(self, *args):
return _swig_decoders.VectorOfStructVector_erase(self, *args)
def __init__(self, *args):
_swig_decoders.VectorOfStructVector_swiginit(self, _swig_decoders.new_VectorOfStructVector(*args))
def push_back(self, x):
return _swig_decoders.VectorOfStructVector_push_back(self, x)
def front(self):
return _swig_decoders.VectorOfStructVector_front(self)
def back(self):
return _swig_decoders.VectorOfStructVector_back(self)
def assign(self, n, x):
return _swig_decoders.VectorOfStructVector_assign(self, n, x)
def resize(self, *args):
return _swig_decoders.VectorOfStructVector_resize(self, *args)
def insert(self, *args):
return _swig_decoders.VectorOfStructVector_insert(self, *args)
def reserve(self, n):
return _swig_decoders.VectorOfStructVector_reserve(self, n)
def capacity(self):
return _swig_decoders.VectorOfStructVector_capacity(self)
__swig_destroy__ = _swig_decoders.delete_VectorOfStructVector
# Register VectorOfStructVector in _swig_decoders:
_swig_decoders.VectorOfStructVector_swigregister(VectorOfStructVector)
class FloatVector(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def iterator(self):
return _swig_decoders.FloatVector_iterator(self)
def __iter__(self):
return self.iterator()
def __nonzero__(self):
return _swig_decoders.FloatVector___nonzero__(self)
def __bool__(self):
return _swig_decoders.FloatVector___bool__(self)
def __len__(self):
return _swig_decoders.FloatVector___len__(self)
def __getslice__(self, i, j):
return _swig_decoders.FloatVector___getslice__(self, i, j)
def __setslice__(self, *args):
return _swig_decoders.FloatVector___setslice__(self, *args)
def __delslice__(self, i, j):
return _swig_decoders.FloatVector___delslice__(self, i, j)
def __delitem__(self, *args):
return _swig_decoders.FloatVector___delitem__(self, *args)
def __getitem__(self, *args):
return _swig_decoders.FloatVector___getitem__(self, *args)
def __setitem__(self, *args):
return _swig_decoders.FloatVector___setitem__(self, *args)
def pop(self):
return _swig_decoders.FloatVector_pop(self)
def append(self, x):
return _swig_decoders.FloatVector_append(self, x)
def empty(self):
return _swig_decoders.FloatVector_empty(self)
def size(self):
return _swig_decoders.FloatVector_size(self)
def swap(self, v):
return _swig_decoders.FloatVector_swap(self, v)
def begin(self):
return _swig_decoders.FloatVector_begin(self)
def end(self):
return _swig_decoders.FloatVector_end(self)
def rbegin(self):
return _swig_decoders.FloatVector_rbegin(self)
def rend(self):
return _swig_decoders.FloatVector_rend(self)
def clear(self):
return _swig_decoders.FloatVector_clear(self)
def get_allocator(self):
return _swig_decoders.FloatVector_get_allocator(self)
def pop_back(self):
return _swig_decoders.FloatVector_pop_back(self)
def erase(self, *args):
return _swig_decoders.FloatVector_erase(self, *args)
def __init__(self, *args):
_swig_decoders.FloatVector_swiginit(self, _swig_decoders.new_FloatVector(*args))
def push_back(self, x):
return _swig_decoders.FloatVector_push_back(self, x)
def front(self):
return _swig_decoders.FloatVector_front(self)
def back(self):
return _swig_decoders.FloatVector_back(self)
def assign(self, n, x):
return _swig_decoders.FloatVector_assign(self, n, x)
def resize(self, *args):
return _swig_decoders.FloatVector_resize(self, *args)
def insert(self, *args):
return _swig_decoders.FloatVector_insert(self, *args)
def reserve(self, n):
return _swig_decoders.FloatVector_reserve(self, n)
def capacity(self):
return _swig_decoders.FloatVector_capacity(self)
__swig_destroy__ = _swig_decoders.delete_FloatVector
# Register FloatVector in _swig_decoders:
_swig_decoders.FloatVector_swigregister(FloatVector)
class Pair(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, *args):
_swig_decoders.Pair_swiginit(self, _swig_decoders.new_Pair(*args))
first = property(_swig_decoders.Pair_first_get, _swig_decoders.Pair_first_set)
second = property(_swig_decoders.Pair_second_get, _swig_decoders.Pair_second_set)
def __len__(self):
return 2
def __repr__(self):
return str((self.first, self.second))
def __getitem__(self, index):
if not (index % 2):
return self.first
else:
return self.second
def __setitem__(self, index, val):
if not (index % 2):
self.first = val
else:
self.second = val
__swig_destroy__ = _swig_decoders.delete_Pair
# Register Pair in _swig_decoders:
_swig_decoders.Pair_swigregister(Pair)
class PairFloatStringVector(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def iterator(self):
return _swig_decoders.PairFloatStringVector_iterator(self)
def __iter__(self):
return self.iterator()
def __nonzero__(self):
return _swig_decoders.PairFloatStringVector___nonzero__(self)
def __bool__(self):
return _swig_decoders.PairFloatStringVector___bool__(self)
def __len__(self):
return _swig_decoders.PairFloatStringVector___len__(self)
def __getslice__(self, i, j):
return _swig_decoders.PairFloatStringVector___getslice__(self, i, j)
def __setslice__(self, *args):
return _swig_decoders.PairFloatStringVector___setslice__(self, *args)
def __delslice__(self, i, j):
return _swig_decoders.PairFloatStringVector___delslice__(self, i, j)
def __delitem__(self, *args):
return _swig_decoders.PairFloatStringVector___delitem__(self, *args)
def __getitem__(self, *args):
return _swig_decoders.PairFloatStringVector___getitem__(self, *args)
def __setitem__(self, *args):
return _swig_decoders.PairFloatStringVector___setitem__(self, *args)
def pop(self):
return _swig_decoders.PairFloatStringVector_pop(self)
def append(self, x):
return _swig_decoders.PairFloatStringVector_append(self, x)
def empty(self):
return _swig_decoders.PairFloatStringVector_empty(self)
def size(self):
return _swig_decoders.PairFloatStringVector_size(self)
def swap(self, v):
return _swig_decoders.PairFloatStringVector_swap(self, v)
def begin(self):
return _swig_decoders.PairFloatStringVector_begin(self)
def end(self):
return _swig_decoders.PairFloatStringVector_end(self)
def rbegin(self):
return _swig_decoders.PairFloatStringVector_rbegin(self)
def rend(self):
return _swig_decoders.PairFloatStringVector_rend(self)
def clear(self):
return _swig_decoders.PairFloatStringVector_clear(self)
def get_allocator(self):
return _swig_decoders.PairFloatStringVector_get_allocator(self)
def pop_back(self):
return _swig_decoders.PairFloatStringVector_pop_back(self)
def erase(self, *args):
return _swig_decoders.PairFloatStringVector_erase(self, *args)
def __init__(self, *args):
_swig_decoders.PairFloatStringVector_swiginit(self, _swig_decoders.new_PairFloatStringVector(*args))
def push_back(self, x):
return _swig_decoders.PairFloatStringVector_push_back(self, x)
def front(self):
return _swig_decoders.PairFloatStringVector_front(self)
def back(self):
return _swig_decoders.PairFloatStringVector_back(self)
def assign(self, n, x):
return _swig_decoders.PairFloatStringVector_assign(self, n, x)
def resize(self, *args):
return _swig_decoders.PairFloatStringVector_resize(self, *args)
def insert(self, *args):
return _swig_decoders.PairFloatStringVector_insert(self, *args)
def reserve(self, n):
return _swig_decoders.PairFloatStringVector_reserve(self, n)
def capacity(self):
return _swig_decoders.PairFloatStringVector_capacity(self)
__swig_destroy__ = _swig_decoders.delete_PairFloatStringVector
# Register PairFloatStringVector in _swig_decoders:
_swig_decoders.PairFloatStringVector_swigregister(PairFloatStringVector)
class PairDoubleStringVector(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def iterator(self):
return _swig_decoders.PairDoubleStringVector_iterator(self)
def __iter__(self):
return self.iterator()
def __nonzero__(self):
return _swig_decoders.PairDoubleStringVector___nonzero__(self)
def __bool__(self):
return _swig_decoders.PairDoubleStringVector___bool__(self)
def __len__(self):
return _swig_decoders.PairDoubleStringVector___len__(self)
def __getslice__(self, i, j):
return _swig_decoders.PairDoubleStringVector___getslice__(self, i, j)
def __setslice__(self, *args):
return _swig_decoders.PairDoubleStringVector___setslice__(self, *args)
def __delslice__(self, i, j):
return _swig_decoders.PairDoubleStringVector___delslice__(self, i, j)
def __delitem__(self, *args):
return _swig_decoders.PairDoubleStringVector___delitem__(self, *args)
def __getitem__(self, *args):
return _swig_decoders.PairDoubleStringVector___getitem__(self, *args)
def __setitem__(self, *args):
return _swig_decoders.PairDoubleStringVector___setitem__(self, *args)
def pop(self):
return _swig_decoders.PairDoubleStringVector_pop(self)
def append(self, x):
return _swig_decoders.PairDoubleStringVector_append(self, x)
def empty(self):
return _swig_decoders.PairDoubleStringVector_empty(self)
def size(self):
return _swig_decoders.PairDoubleStringVector_size(self)
def swap(self, v):
return _swig_decoders.PairDoubleStringVector_swap(self, v)
def begin(self):
return _swig_decoders.PairDoubleStringVector_begin(self)
def end(self):
return _swig_decoders.PairDoubleStringVector_end(self)
def rbegin(self):
return _swig_decoders.PairDoubleStringVector_rbegin(self)
def rend(self):
return _swig_decoders.PairDoubleStringVector_rend(self)
def clear(self):
return _swig_decoders.PairDoubleStringVector_clear(self)
def get_allocator(self):
return _swig_decoders.PairDoubleStringVector_get_allocator(self)
def pop_back(self):
return _swig_decoders.PairDoubleStringVector_pop_back(self)
def erase(self, *args):
return _swig_decoders.PairDoubleStringVector_erase(self, *args)
def __init__(self, *args):
_swig_decoders.PairDoubleStringVector_swiginit(self, _swig_decoders.new_PairDoubleStringVector(*args))
def push_back(self, x):
return _swig_decoders.PairDoubleStringVector_push_back(self, x)
def front(self):
return _swig_decoders.PairDoubleStringVector_front(self)
def back(self):
return _swig_decoders.PairDoubleStringVector_back(self)
def assign(self, n, x):
return _swig_decoders.PairDoubleStringVector_assign(self, n, x)
def resize(self, *args):
return _swig_decoders.PairDoubleStringVector_resize(self, *args)
def insert(self, *args):
return _swig_decoders.PairDoubleStringVector_insert(self, *args)
def reserve(self, n):
return _swig_decoders.PairDoubleStringVector_reserve(self, n)
def capacity(self):
return _swig_decoders.PairDoubleStringVector_capacity(self)
__swig_destroy__ = _swig_decoders.delete_PairDoubleStringVector
# Register PairDoubleStringVector in _swig_decoders:
_swig_decoders.PairDoubleStringVector_swigregister(PairDoubleStringVector)
class PairDoubleStringVector2(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def iterator(self):
return _swig_decoders.PairDoubleStringVector2_iterator(self)
def __iter__(self):
return self.iterator()
def __nonzero__(self):
return _swig_decoders.PairDoubleStringVector2___nonzero__(self)
def __bool__(self):
return _swig_decoders.PairDoubleStringVector2___bool__(self)
def __len__(self):
return _swig_decoders.PairDoubleStringVector2___len__(self)
def __getslice__(self, i, j):
return _swig_decoders.PairDoubleStringVector2___getslice__(self, i, j)
def __setslice__(self, *args):
return _swig_decoders.PairDoubleStringVector2___setslice__(self, *args)
def __delslice__(self, i, j):
return _swig_decoders.PairDoubleStringVector2___delslice__(self, i, j)
def __delitem__(self, *args):
return _swig_decoders.PairDoubleStringVector2___delitem__(self, *args)
def __getitem__(self, *args):
return _swig_decoders.PairDoubleStringVector2___getitem__(self, *args)
def __setitem__(self, *args):
return _swig_decoders.PairDoubleStringVector2___setitem__(self, *args)
def pop(self):
return _swig_decoders.PairDoubleStringVector2_pop(self)
def append(self, x):
return _swig_decoders.PairDoubleStringVector2_append(self, x)
def empty(self):
return _swig_decoders.PairDoubleStringVector2_empty(self)
def size(self):
return _swig_decoders.PairDoubleStringVector2_size(self)
def swap(self, v):
return _swig_decoders.PairDoubleStringVector2_swap(self, v)
def begin(self):
return _swig_decoders.PairDoubleStringVector2_begin(self)
def end(self):
return _swig_decoders.PairDoubleStringVector2_end(self)
def rbegin(self):
return _swig_decoders.PairDoubleStringVector2_rbegin(self)
def rend(self):
return _swig_decoders.PairDoubleStringVector2_rend(self)
def clear(self):
return _swig_decoders.PairDoubleStringVector2_clear(self)
def get_allocator(self):
return _swig_decoders.PairDoubleStringVector2_get_allocator(self)
def pop_back(self):
return _swig_decoders.PairDoubleStringVector2_pop_back(self)
def erase(self, *args):
return _swig_decoders.PairDoubleStringVector2_erase(self, *args)
def __init__(self, *args):
_swig_decoders.PairDoubleStringVector2_swiginit(self, _swig_decoders.new_PairDoubleStringVector2(*args))
def push_back(self, x):
return _swig_decoders.PairDoubleStringVector2_push_back(self, x)
def front(self):
return _swig_decoders.PairDoubleStringVector2_front(self)
def back(self):
return _swig_decoders.PairDoubleStringVector2_back(self)
def assign(self, n, x):
return _swig_decoders.PairDoubleStringVector2_assign(self, n, x)
def resize(self, *args):
return _swig_decoders.PairDoubleStringVector2_resize(self, *args)
def insert(self, *args):
return _swig_decoders.PairDoubleStringVector2_insert(self, *args)
def reserve(self, n):
return _swig_decoders.PairDoubleStringVector2_reserve(self, n)
def capacity(self):
return _swig_decoders.PairDoubleStringVector2_capacity(self)
__swig_destroy__ = _swig_decoders.delete_PairDoubleStringVector2
# Register PairDoubleStringVector2 in _swig_decoders:
_swig_decoders.PairDoubleStringVector2_swigregister(PairDoubleStringVector2)
class DoubleVector3(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def iterator(self):
return _swig_decoders.DoubleVector3_iterator(self)
def __iter__(self):
return self.iterator()
def __nonzero__(self):
return _swig_decoders.DoubleVector3___nonzero__(self)
def __bool__(self):
return _swig_decoders.DoubleVector3___bool__(self)
def __len__(self):
return _swig_decoders.DoubleVector3___len__(self)
def __getslice__(self, i, j):
return _swig_decoders.DoubleVector3___getslice__(self, i, j)
def __setslice__(self, *args):
return _swig_decoders.DoubleVector3___setslice__(self, *args)
def __delslice__(self, i, j):
return _swig_decoders.DoubleVector3___delslice__(self, i, j)
def __delitem__(self, *args):
return _swig_decoders.DoubleVector3___delitem__(self, *args)
def __getitem__(self, *args):
return _swig_decoders.DoubleVector3___getitem__(self, *args)
def __setitem__(self, *args):
return _swig_decoders.DoubleVector3___setitem__(self, *args)
def pop(self):
return _swig_decoders.DoubleVector3_pop(self)
def append(self, x):
return _swig_decoders.DoubleVector3_append(self, x)
def empty(self):
return _swig_decoders.DoubleVector3_empty(self)
def size(self):
return _swig_decoders.DoubleVector3_size(self)
def swap(self, v):
return _swig_decoders.DoubleVector3_swap(self, v)
def begin(self):
return _swig_decoders.DoubleVector3_begin(self)
def end(self):
return _swig_decoders.DoubleVector3_end(self)
def rbegin(self):
return _swig_decoders.DoubleVector3_rbegin(self)
def rend(self):
return _swig_decoders.DoubleVector3_rend(self)
def clear(self):
return _swig_decoders.DoubleVector3_clear(self)
def get_allocator(self):
return _swig_decoders.DoubleVector3_get_allocator(self)
def pop_back(self):
return _swig_decoders.DoubleVector3_pop_back(self)
def erase(self, *args):
return _swig_decoders.DoubleVector3_erase(self, *args)
def __init__(self, *args):
_swig_decoders.DoubleVector3_swiginit(self, _swig_decoders.new_DoubleVector3(*args))
def push_back(self, x):
return _swig_decoders.DoubleVector3_push_back(self, x)
def front(self):
return _swig_decoders.DoubleVector3_front(self)
def back(self):
return _swig_decoders.DoubleVector3_back(self)
def assign(self, n, x):
return _swig_decoders.DoubleVector3_assign(self, n, x)
def resize(self, *args):
return _swig_decoders.DoubleVector3_resize(self, *args)
def insert(self, *args):
return _swig_decoders.DoubleVector3_insert(self, *args)
def reserve(self, n):
return _swig_decoders.DoubleVector3_reserve(self, n)
def capacity(self):
return _swig_decoders.DoubleVector3_capacity(self)
__swig_destroy__ = _swig_decoders.delete_DoubleVector3
# Register DoubleVector3 in _swig_decoders:
_swig_decoders.DoubleVector3_swigregister(DoubleVector3)
def IntDoublePairCompSecondRev(a, b):
return _swig_decoders.IntDoublePairCompSecondRev(a, b)
def StringDoublePairCompSecondRev(a, b):
return _swig_decoders.StringDoublePairCompSecondRev(a, b)
def DoubleStringPairCompFirstRev(a, b):
return _swig_decoders.DoubleStringPairCompFirstRev(a, b)
class RetriveStrEnumerateVocab(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self):
_swig_decoders.RetriveStrEnumerateVocab_swiginit(self, _swig_decoders.new_RetriveStrEnumerateVocab())
def Add(self, index, str):
return _swig_decoders.RetriveStrEnumerateVocab_Add(self, index, str)
vocabulary = property(_swig_decoders.RetriveStrEnumerateVocab_vocabulary_get, _swig_decoders.RetriveStrEnumerateVocab_vocabulary_set)
__swig_destroy__ = _swig_decoders.delete_RetriveStrEnumerateVocab
# Register RetriveStrEnumerateVocab in _swig_decoders:
_swig_decoders.RetriveStrEnumerateVocab_swigregister(RetriveStrEnumerateVocab)
cvar = _swig_decoders.cvar
OOV_SCORE = cvar.OOV_SCORE
START_TOKEN = cvar.START_TOKEN
UNK_TOKEN = cvar.UNK_TOKEN
END_TOKEN = cvar.END_TOKEN
class Scorer(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, alpha, beta, lm_path, vocabulary):
_swig_decoders.Scorer_swiginit(self, _swig_decoders.new_Scorer(alpha, beta, lm_path, vocabulary))
__swig_destroy__ = _swig_decoders.delete_Scorer
def get_log_cond_prob(self, words):
return _swig_decoders.Scorer_get_log_cond_prob(self, words)
def get_sent_log_prob(self, words):
return _swig_decoders.Scorer_get_sent_log_prob(self, words)
def get_max_order(self):
return _swig_decoders.Scorer_get_max_order(self)
def get_dict_size(self):
return _swig_decoders.Scorer_get_dict_size(self)
def is_character_based(self):
return _swig_decoders.Scorer_is_character_based(self)
def reset_params(self, alpha, beta):
return _swig_decoders.Scorer_reset_params(self, alpha, beta)
def make_ngram(self, prefix):
return _swig_decoders.Scorer_make_ngram(self, prefix)
def split_labels(self, labels):
return _swig_decoders.Scorer_split_labels(self, labels)
alpha = property(_swig_decoders.Scorer_alpha_get, _swig_decoders.Scorer_alpha_set)
beta = property(_swig_decoders.Scorer_beta_get, _swig_decoders.Scorer_beta_set)
dictionary = property(_swig_decoders.Scorer_dictionary_get, _swig_decoders.Scorer_dictionary_set)
# Register Scorer in _swig_decoders:
_swig_decoders.Scorer_swigregister(Scorer)
def ctc_greedy_decoder(probs_seq, vocabulary):
return _swig_decoders.ctc_greedy_decoder(probs_seq, vocabulary)
def ctc_beam_search_decoder(probs_seq, vocabulary, beam_size, cutoff_prob=1.0, cutoff_top_n=40, ext_scorer=None):
return _swig_decoders.ctc_beam_search_decoder(probs_seq, vocabulary, beam_size, cutoff_prob, cutoff_top_n, ext_scorer)
def ctc_beam_search_decoder_batch(probs_split, vocabulary, beam_size, num_processes, cutoff_prob=1.0, cutoff_top_n=40, ext_scorer=None):
return _swig_decoders.ctc_beam_search_decoder_batch(probs_split, vocabulary, beam_size, num_processes, cutoff_prob, cutoff_top_n, ext_scorer)
| 32.23902
| 145
| 0.726206
| 4,484
| 38,171
| 5.658341
| 0.052632
| 0.183509
| 0.219927
| 0.139603
| 0.813968
| 0.684022
| 0.594317
| 0.564008
| 0.449551
| 0.318658
| 0
| 0.00313
| 0.18818
| 38,171
| 1,183
| 146
| 32.266272
| 0.815638
| 0.025648
| 0
| 0.434128
| 1
| 0
| 0.011491
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.431619
| false
| 0
| 0.007528
| 0.40527
| 0.927227
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
37ebe1d5cb9f5e7a8ef610ed5c8ef994d63c1909
| 44,181
|
py
|
Python
|
lego/apps/events/tests/test_registrations.py
|
HoboKristian/lego
|
2729dcef770ad1105f53e087c07ece3f9e9dbc67
|
[
"MIT"
] | null | null | null |
lego/apps/events/tests/test_registrations.py
|
HoboKristian/lego
|
2729dcef770ad1105f53e087c07ece3f9e9dbc67
|
[
"MIT"
] | null | null | null |
lego/apps/events/tests/test_registrations.py
|
HoboKristian/lego
|
2729dcef770ad1105f53e087c07ece3f9e9dbc67
|
[
"MIT"
] | null | null | null |
from datetime import timedelta
from django.utils import timezone
from lego.apps.events.exceptions import EventNotReady
from lego.apps.events.models import Event, Pool, Registration
from lego.apps.followers.models import FollowEvent
from lego.apps.users.models import AbakusGroup, User
from lego.utils.test_utils import BaseTestCase
from .utils import get_dummy_users
class RegistrationMethodTest(BaseTestCase):
fixtures = [
"test_abakus_groups.yaml",
"test_users.yaml",
"test_companies.yaml",
"test_events.yaml",
]
def setUp(self):
Event.objects.all().update(start_time=timezone.now() + timedelta(hours=3))
self.event = Event.objects.get(title="POOLS_AND_PRICED")
self.users = get_dummy_users(2)
AbakusGroup.objects.get(name="Abakus").add_user(self.users[0])
self.registration = Registration.objects.get_or_create(
event=self.event, user=self.users[0]
)[0]
def test_str(self):
d = {"user": self.registration.user, "pool": self.registration.pool}
self.assertEqual(str(self.registration), str(d))
def test_member_cost(self):
self.registration = self.event.register(self.registration)
self.assertEqual(self.event.get_price(self.registration.user), 10000)
def test_user_cost(self):
registration = Registration.objects.get_or_create(
event=self.event, user=self.users[1]
)[0]
self.event.register(registration)
self.assertEqual(self.event.get_price(registration.user), 15000)
class RegistrationTestCase(BaseTestCase):
fixtures = [
"test_abakus_groups.yaml",
"test_users.yaml",
"test_companies.yaml",
"test_events.yaml",
]
def setUp(self):
Event.objects.all().update(
start_time=timezone.now() + timedelta(hours=3),
merge_time=timezone.now() + timedelta(hours=12),
heed_penalties=True,
)
def test_can_register_single_unlimited_pool(self):
"""Test registering user to event with a single unlimited pool"""
user = get_dummy_users(1)[0]
event = Event.objects.get(title="POOLS_NO_REGISTRATIONS")
pool = event.pools.first()
pool.capacity = 0
pool.save()
AbakusGroup.objects.get(name="Abakus").add_user(user)
registration = Registration.objects.get_or_create(event=event, user=user)[0]
event.register(registration)
self.assertIsNotNone(registration.pool)
self.assertEqual(pool.registrations.count(), 1)
def test_can_register_single_pool(self):
"""Test registering user to event with only a single pool"""
user = get_dummy_users(1)[0]
event = Event.objects.get(title="POOLS_NO_REGISTRATIONS")
pool = event.pools.first()
AbakusGroup.objects.get(name="Abakus").add_user(user)
registration = Registration.objects.get_or_create(event=event, user=user)[0]
event.register(registration)
self.assertEqual(pool.registrations.count(), event.number_of_registrations)
def test_can_register_to_single_open_pool(self):
"""Test registering user to event with only one pool with spots left"""
users = get_dummy_users(10)
abakus_users = users[:6]
webkom_users = users[6:]
event = Event.objects.get(title="POOLS_NO_REGISTRATIONS")
pool_one = event.pools.get(name="Abakusmember")
pool_two = event.pools.get(name="Webkom")
for user in abakus_users:
AbakusGroup.objects.get(name="Abakus").add_user(user)
for user in webkom_users:
AbakusGroup.objects.get(name="Webkom").add_user(user)
for user in webkom_users:
registration = Registration.objects.get_or_create(event=event, user=user)[0]
event.register(registration)
self.assertEqual(pool_one.registrations.count(), 2)
self.assertEqual(pool_two.registrations.count(), 2)
self.assertEqual(event.number_of_registrations, 4)
def test_can_register_with_automatic_pool_selection(self):
"""Test that registrating user selects correct pool and that user follows the event"""
user = get_dummy_users(1)[0]
event = Event.objects.get(title="POOLS_NO_REGISTRATIONS")
pool = event.pools.get(name="Abakusmember")
pool_2 = event.pools.get(name="Webkom")
AbakusGroup.objects.get(name="Abakus").add_user(user)
registration = Registration.objects.get_or_create(event=event, user=user)[0]
event.register(registration)
self.assertEqual(pool.registrations.count(), 1)
self.assertEqual(pool_2.registrations.count(), 0)
event_follow_exists = FollowEvent.objects.filter(
follower=user, target=event
).exists()
self.assertEqual(event_follow_exists, True)
def test_registrations_picks_correct_pool(self):
"""Test that multiple registrations selects correct pools"""
users = get_dummy_users(15)
abakus_users = users[:10]
webkom_users = users[10:]
event = Event.objects.get(title="POOLS_NO_REGISTRATIONS")
pool = event.pools.get(name="Abakusmember")
pool_2 = event.pools.get(name="Webkom")
for user in abakus_users:
AbakusGroup.objects.get(name="Abakus").add_user(user)
for user in webkom_users:
AbakusGroup.objects.get(name="Webkom").add_user(user)
registration_webkom_1 = Registration.objects.get_or_create(
event=event, user=webkom_users[0]
)[0]
event.register(registration_webkom_1)
self.assertEqual(pool.registrations.count(), 0)
self.assertEqual(pool_2.registrations.count(), 1)
registration_webkom_2 = Registration.objects.get_or_create(
event=event, user=webkom_users[1]
)[0]
registration_abakus = Registration.objects.get_or_create(
event=event, user=abakus_users[0]
)[0]
event.register(registration_webkom_2)
event.register(registration_abakus)
self.assertEqual(pool.registrations.count(), 1)
self.assertEqual(pool_2.registrations.count(), 2)
def test_no_duplicate_registrations(self):
"""Test that a user are not able register multiple times"""
users = get_dummy_users(2)
user_1, user_2 = users[0], users[1]
event = Event.objects.get(title="POOLS_NO_REGISTRATIONS")
pool = event.pools.get(id=4)
AbakusGroup.objects.get(name="Webkom").add_user(user_1)
AbakusGroup.objects.get(name="Abakus").add_user(user_2)
self.assertEqual(pool.registrations.count(), 0)
registration_one = Registration.objects.get_or_create(event=event, user=user_1)[
0
]
event.register(registration_one)
pool_two = Pool.objects.create(
name="test",
capacity=1,
event=event,
activation_date=(timezone.now() - timedelta(hours=24)),
)
with self.assertRaises(ValueError):
event.register(registration_one)
self.assertEqual(event.number_of_registrations, 1)
self.assertEqual(pool.registrations.count(), 1)
self.assertEqual(pool_two.registrations.count(), 0)
def test_can_not_register_pre_activation(self):
"""Test that user can not register before pool is activated"""
user = get_dummy_users(1)[0]
event = Event.objects.get(title="NO_POOLS_WEBKOM")
permission_groups = [AbakusGroup.objects.get(name="Webkom")]
permission_groups[0].add_user(user)
Pool.objects.create(
name="Webkom",
capacity=1,
event=event,
activation_date=(timezone.now() + timedelta(hours=24)),
)
with self.assertRaises(ValueError):
registration = Registration.objects.get_or_create(event=event, user=user)[0]
event.register(registration)
self.assertEqual(event.number_of_registrations, 0)
self.assertEqual(event.waiting_registrations.count(), 0)
def test_waiting_list_if_full(self):
"""Test that user is put in waiting list if pools are full"""
event = Event.objects.get(title="POOLS_NO_REGISTRATIONS")
pool = event.pools.get(id=3)
people_2_place_in_waiting_list = 3
users = get_dummy_users(pool.capacity + people_2_place_in_waiting_list)
for user in users:
AbakusGroup.objects.get(name="Abakus").add_user(user)
registration = Registration.objects.get_or_create(event=event, user=user)[0]
event.register(registration)
self.assertEqual(
event.waiting_registrations.count(), people_2_place_in_waiting_list
)
self.assertEqual(pool.registrations.count(), pool.capacity)
self.assertEqual(event.number_of_registrations, pool.registrations.count())
def test_can_register_pre_merge(self):
"""Test that user can register before the pools are merged"""
event = Event.objects.get(title="POOLS_NO_REGISTRATIONS")
pool_one = event.pools.get(name="Abakusmember")
pool_two = event.pools.get(name="Webkom")
users = get_dummy_users(2)
user_one, user_two = users[0], users[1]
AbakusGroup.objects.get(name="Abakus").add_user(user_one)
AbakusGroup.objects.get(name="Webkom").add_user(user_two)
registration_one = Registration.objects.get_or_create(
event=event, user=user_one
)[0]
event.register(registration_one)
n_registrants = pool_one.registrations.count()
self.assertEqual(pool_one.registrations.count(), event.number_of_registrations)
registration_two = Registration.objects.get_or_create(
event=event, user=user_two
)[0]
event.register(registration_two)
n_registrants += pool_two.registrations.count()
self.assertEqual(n_registrants, event.number_of_registrations)
def test_can_register_post_merge(self):
"""Test that users can register after the pools are merged"""
event = Event.objects.get(title="NO_POOLS_ABAKUS")
event.merge_time = timezone.now() - timedelta(hours=12)
permission_groups_one = [AbakusGroup.objects.get(name="Abakus")]
permission_groups_two = [AbakusGroup.objects.get(name="Webkom")]
pool_one = Pool.objects.create(
name="Abakus",
capacity=1,
event=event,
activation_date=(timezone.now() - timedelta(hours=24)),
)
pool_one.permission_groups.add(permission_groups_one[0])
pool_two = Pool.objects.create(
name="Webkom",
capacity=2,
event=event,
activation_date=(timezone.now() - timedelta(hours=24)),
)
pool_two.permission_groups.add(permission_groups_two[0])
users = get_dummy_users(3)
for user in users:
permission_groups_one[0].add_user(user)
registration = Registration.objects.get_or_create(event=event, user=user)[0]
event.register(registration)
self.assertEqual(pool_one.registrations.count(), 3)
self.assertEqual(pool_one.registrations.count(), event.number_of_registrations)
def test_can_only_register_with_correct_permission_group(self):
"""Test that user only can register having correct permission group"""
event = Event.objects.get(title="NO_POOLS_ABAKUS")
event.merge_time = timezone.now() - timedelta(hours=12)
permission_groups_one = [AbakusGroup.objects.get(name="Abakus")]
permission_groups_two = [AbakusGroup.objects.get(name="Webkom")]
pool = Pool.objects.create(
name="Webkom",
capacity=1,
event=event,
activation_date=(timezone.now() - timedelta(hours=24)),
)
pool.permission_groups.set(permission_groups_two)
user = get_dummy_users(1)[0]
permission_groups_one[0].add_user(user)
with self.assertRaises(ValueError):
registration = Registration.objects.get_or_create(event=event, user=user)[0]
event.register(registration)
self.assertEqual(pool.registrations.count(), 0)
def test_placed_in_waiting_list_post_merge(self):
"""Test waiting list after pools are merged"""
event = Event.objects.get(title="NO_POOLS_WEBKOM")
permission_groups = [AbakusGroup.objects.get(name="Webkom")]
pool = Pool.objects.create(
name="Webkom",
capacity=2,
event=event,
activation_date=(timezone.now() - timedelta(hours=24)),
)
pool.permission_groups.set(permission_groups)
event.merge_time = timezone.now() - timedelta(hours=12)
users = get_dummy_users(pool.capacity + 1)
expected_users_in_waiting_list = 1
for user in users:
permission_groups[0].add_user(user)
registration = Registration.objects.get_or_create(event=event, user=user)[0]
event.register(registration)
self.assertEqual(
event.waiting_registrations.count(), expected_users_in_waiting_list
)
def test_bump(self):
"""Test that waiting registration is bumped on unregistration"""
event = Event.objects.get(title="POOLS_NO_REGISTRATIONS")
pool = event.pools.first()
users = get_dummy_users(pool.capacity + 2)
for user in users:
AbakusGroup.objects.get(name="Abakus").add_user(user)
registration = Registration.objects.get_or_create(event=event, user=user)[0]
event.register(registration)
waiting_list_before = event.waiting_registrations.count()
regs_before = event.number_of_registrations
pool_before = pool.registrations.count()
event.bump(to_pool=pool)
self.assertEqual(event.number_of_registrations, regs_before + 1)
self.assertEqual(event.waiting_registrations.count(), waiting_list_before - 1)
self.assertEqual(event.waiting_registrations.first().user, users[4])
self.assertEqual(pool.registrations.count(), pool_before + 1)
def test_unregistering_from_event(self):
"""Test that user can unregister from event"""
event = Event.objects.get(title="POOLS_NO_REGISTRATIONS")
pool = event.pools.get(name="Webkom")
users = get_dummy_users(5)
for user in users:
AbakusGroup.objects.get(name="Abakus").add_user(user)
AbakusGroup.objects.get(name="Webkom").add_user(users[0])
registration = Registration.objects.get_or_create(event=event, user=users[0])[0]
event.register(registration)
registrations_before = event.number_of_registrations
pool_registrations_before = pool.registrations.count()
event.unregister(registration)
event_follow_exists = FollowEvent.objects.filter(
follower=registration.user, target=event
).exists()
self.assertEqual(event_follow_exists, False)
self.assertEqual(event.number_of_registrations, registrations_before - 1)
self.assertEqual(pool.registrations.count(), pool_registrations_before - 1)
def test_unable_to_unregister_after_started(self):
"""Test that user cannot unregister after start_time"""
event = Event.objects.get(title="POOLS_WITH_REGISTRATIONS")
event.start_time = timezone.now() - timedelta(days=1)
event.save()
user = User.objects.get(pk=1)
AbakusGroup.objects.get(name="Abakus").add_user(user)
registrations_before = event.number_of_registrations
registration = Registration.objects.get(event=event, user=user)
with self.assertRaises(ValueError):
event.unregister(registration)
self.assertEqual(event.number_of_registrations, registrations_before)
def test_register_after_unregister(self):
"""Test that user can re-register after having unregistered"""
event = Event.objects.get(title="POOLS_WITH_REGISTRATIONS")
user = User.objects.get(pk=1)
AbakusGroup.objects.get(name="Abakus").add_user(user)
registrations_before = event.number_of_registrations
registration = Registration.objects.get(event=event, user=user)
event.unregister(registration)
self.assertEqual(event.number_of_registrations, registrations_before - 1)
event.register(registration)
self.assertEqual(event.number_of_registrations, registrations_before)
def test_register_to_waiting_list_after_unregister(self):
"""Test that user can re-register into waiting list after having unregistered"""
event = Event.objects.get(title="POOLS_WITH_REGISTRATIONS")
user = get_dummy_users(1)[0]
AbakusGroup.objects.get(name="Abakus").add_user(user)
registration = Registration.objects.get_or_create(event=event, user=user)[0]
event.register(registration)
self.assertEqual(event.waiting_registrations.count(), 1)
event.unregister(registration)
self.assertEqual(event.waiting_registrations.count(), 0)
event.register(registration)
self.assertEqual(event.waiting_registrations.count(), 1)
def test_unregistering_non_existing_user(self):
"""Test that non existing user trying to unregister raises error"""
event = Event.objects.get(title="POOLS_NO_REGISTRATIONS")
user = get_dummy_users(1)[0]
with self.assertRaises(Registration.DoesNotExist):
registration = Registration.objects.get(event=event, user=user)
event.unregister(registration)
def test_popping_from_waiting_list_pre_merge(self):
"""Test popping of first user in waiting list before merge"""
event = Event.objects.get(title="NO_POOLS_WEBKOM")
permission_groups = [AbakusGroup.objects.get(name="Webkom")]
pool = Pool.objects.create(
name="Webkom",
capacity=1,
event=event,
activation_date=(timezone.now() - timedelta(hours=24)),
)
pool.permission_groups.set(permission_groups)
users = get_dummy_users(pool.capacity + 10)
for user in users:
permission_groups[0].add_user(user)
registration = Registration.objects.get_or_create(event=event, user=user)[0]
event.register(registration)
self.assertNotEqual(event.waiting_registrations.count(), 0)
prev = event.pop_from_waiting_list()
for top in event.waiting_registrations:
self.assertLessEqual(prev.registration_date, top.registration_date)
prev = top
def test_popping_from_waiting_list_post_merge(self):
"""Test popping of first user in waiting list after merge"""
event = Event.objects.get(title="NO_POOLS_WEBKOM")
event.merge_time = timezone.now() - timedelta(hours=12)
event.save()
permission_groups = [AbakusGroup.objects.get(name="Webkom")]
pool = Pool.objects.create(
name="Webkom",
capacity=1,
event=event,
activation_date=(timezone.now() - timedelta(hours=24)),
)
pool.permission_groups.set(permission_groups)
users = get_dummy_users(pool.capacity + 10)
for user in users:
permission_groups[0].add_user(user)
registration = Registration.objects.get_or_create(event=event, user=user)[0]
event.register(registration)
self.assertNotEqual(event.waiting_registrations.count(), 0)
prev = event.pop_from_waiting_list()
for registration in event.waiting_registrations:
self.assertLessEqual(prev.registration_date, registration.registration_date)
prev = registration
def test_popping_from_waiting_list_with_to_pool(self):
"""Test popping of first user in waiting list after merge"""
event = Event.objects.get(title="NO_POOLS_WEBKOM")
permission_groups = [AbakusGroup.objects.get(name="Webkom")]
pool = Pool.objects.create(
name="Webkom",
capacity=1,
event=event,
activation_date=(timezone.now() - timedelta(hours=24)),
)
pool.permission_groups.set(permission_groups)
users = get_dummy_users(2)
for user in users:
permission_groups[0].add_user(user)
registration = Registration.objects.get_or_create(event=event, user=user)[0]
event.register(registration)
top = event.pop_from_waiting_list(pool)
self.assertIsNotNone(top)
def test_popping_from_waiting_list_with_to_pool_without_heed_penalties(self):
"""Test popping of first user in waiting list after merge"""
event = Event.objects.get(title="NO_POOLS_WEBKOM")
event.heed_penalties = False
event.save()
permission_groups = [AbakusGroup.objects.get(name="Webkom")]
pool = Pool.objects.create(
name="Webkom",
capacity=1,
event=event,
activation_date=(timezone.now() - timedelta(hours=24)),
)
pool.permission_groups.set(permission_groups)
users = get_dummy_users(2)
for user in users:
permission_groups[0].add_user(user)
registration = Registration.objects.get_or_create(event=event, user=user)[0]
event.register(registration)
top = event.pop_from_waiting_list(pool)
self.assertIsNotNone(top)
def test_unregistering_from_waiting_list(self):
"""Test that user can unregister from waiting list"""
event = Event.objects.get(title="POOLS_NO_REGISTRATIONS")
pool = event.pools.first()
users = get_dummy_users(pool.capacity + 10)
for user in users:
AbakusGroup.objects.get(name="Abakus").add_user(user)
registration = Registration.objects.get_or_create(event=event, user=user)[0]
event.register(registration)
event_size_before = event.number_of_registrations
pool_size_before = pool.registrations.count()
waiting_list_before = event.waiting_registrations.count()
registration_last = Registration.objects.get(event=event, user=users[-1])
event.unregister(registration_last)
self.assertEqual(event.number_of_registrations, event_size_before)
self.assertEqual(pool.registrations.count(), pool_size_before)
self.assertEqual(event.waiting_registrations.count(), waiting_list_before - 1)
self.assertLessEqual(event.number_of_registrations, event.active_capacity)
def test_unregistering_and_bumping_pre_merge(self):
"""Test unregistration and that waiting list is bumped accordingly before merge"""
event = Event.objects.get(title="POOLS_NO_REGISTRATIONS")
pool = event.pools.first()
users = get_dummy_users(pool.capacity + 10)
for user in users:
AbakusGroup.objects.get(name="Abakus").add_user(user)
registration = Registration.objects.get_or_create(event=event, user=user)[0]
event.register(registration)
waiting_list_before = event.waiting_registrations.count()
event_size_before = event.number_of_registrations
pool_size_before = pool.registrations.count()
user_to_unregister = event.registrations.first().user
registration_to_unregister = Registration.objects.get(
event=event, user=user_to_unregister
)
event.unregister(registration_to_unregister)
pool.refresh_from_db()
self.assertEqual(pool.counter, pool.registrations.count())
self.assertEqual(pool.registrations.count(), pool_size_before)
self.assertEqual(event.number_of_registrations, event_size_before)
self.assertEqual(event.waiting_registrations.count(), waiting_list_before - 1)
self.assertLessEqual(event.number_of_registrations, event.active_capacity)
def test_unregistering_and_bumping_post_merge(self):
"""Test unregistration and that waiting list is bumped accordingly after merge"""
event = Event.objects.get(title="POOLS_NO_REGISTRATIONS")
event.merge_time = timezone.now() - timedelta(hours=24)
event.save()
pool_one = event.pools.get(name="Abakusmember")
pool_two = event.pools.get(name="Webkom")
users = get_dummy_users(6)
abakus_users = users[:3]
webkom_users = users[3:5]
for user in abakus_users:
AbakusGroup.objects.get(name="Abakus").add_user(user)
event.admin_register(user, pool=pool_one, admin_registration_reason="test")
event_follow_exists = FollowEvent.objects.filter(
follower=user, target=event
).exists()
self.assertEqual(event_follow_exists, True)
for user in webkom_users:
AbakusGroup.objects.get(name="Webkom").add_user(user)
event.admin_register(user, pool=pool_two, admin_registration_reason="test")
event_follow_exists = FollowEvent.objects.filter(
follower=user, target=event
).exists()
self.assertEqual(event_follow_exists, True)
AbakusGroup.objects.get(name="Abakus").add_user(users[5])
registration = Registration.objects.get_or_create(event=event, user=users[5])[0]
event.register(registration)
waiting_list_before = event.waiting_registrations.count()
event_size_before = event.number_of_registrations
pool_one_size_before = pool_one.registrations.count()
pool_two_size_before = pool_two.registrations.count()
user_to_unregister = pool_two.registrations.first().user
registration_to_unregister = Registration.objects.get(
event=event, user=user_to_unregister
)
event.unregister(registration_to_unregister)
event_follow_exists = FollowEvent.objects.filter(
follower=user_to_unregister, target=event
).exists()
self.assertEqual(event_follow_exists, False)
self.assertEqual(event.number_of_registrations, event_size_before)
self.assertEqual(event.waiting_registrations.count(), waiting_list_before - 1)
self.assertEqual(pool_one.registrations.count(), pool_one_size_before + 1)
self.assertGreater(pool_one.registrations.count(), pool_one.capacity)
self.assertEqual(pool_two.registrations.count(), pool_two_size_before - 1)
self.assertLessEqual(event.number_of_registrations, event.active_capacity)
def test_bumping_when_bumped_has_several_pools_available(self):
"""Test that user is bumped when user can join multiple pools"""
event = Event.objects.get(title="POOLS_WITH_REGISTRATIONS")
users = get_dummy_users(4)
user_0 = users[0]
pre_reg = event.registrations.first()
pool = event.pools.get(name="Webkom")
pool_registrations_before = pool.registrations.count()
waiting_list_before = event.waiting_registrations.count()
number_of_registered_before = event.number_of_registrations
for user in users:
AbakusGroup.objects.get(name="Webkom").add_user(user)
registration = Registration.objects.get_or_create(event=event, user=user)[0]
event.register(registration)
self.assertEqual(pool.registrations.count(), pool_registrations_before + 3)
self.assertEqual(event.waiting_registrations.count(), waiting_list_before + 1)
self.assertEqual(event.number_of_registrations, number_of_registered_before + 3)
event.unregister(pre_reg)
self.assertEqual(pool.registrations.count(), pool_registrations_before + 3)
self.assertEqual(event.waiting_registrations.count(), waiting_list_before)
self.assertEqual(event.number_of_registrations, number_of_registered_before + 3)
registration_to_unregister = Registration.objects.get(event=event, user=user_0)
event.unregister(registration_to_unregister)
self.assertEqual(event.number_of_registrations, number_of_registered_before + 2)
def test_unregistration_date_is_set_at_unregistration(self):
"""Test that unregistration date gets set when unregistering"""
event = Event.objects.get(title="POOLS_NO_REGISTRATIONS")
user = get_dummy_users(1)[0]
AbakusGroup.objects.get(name="Webkom").add_user(user)
registration = Registration.objects.get_or_create(event=event, user=user)[0]
event.register(registration)
registration = event.registrations.first()
self.assertIsNone(registration.unregistration_date)
event.unregister(registration)
registration = event.registrations.first()
self.assertIsNotNone(registration.unregistration_date)
def test_bump_after_rebalance(self):
"""Test bumping after pool rebalancing when user unregistrates"""
event = Event.objects.get(title="POOLS_NO_REGISTRATIONS")
pool_one = event.pools.get(name="Abakusmember")
pool_two = event.pools.get(name="Webkom")
users = get_dummy_users(6)
abakus_users = users[0:3]
webkom_users = users[3:6]
for user in abakus_users:
AbakusGroup.objects.get(name="Abakus").add_user(user)
for user in webkom_users:
AbakusGroup.objects.get(name="Webkom").add_user(user)
for user in webkom_users:
registration = Registration.objects.get_or_create(event=event, user=user)[0]
event.register(registration)
for user in abakus_users:
registration = Registration.objects.get_or_create(event=event, user=user)[0]
event.register(registration)
pool_one_before = pool_one.registrations.count()
pool_two_before = pool_two.registrations.count()
waiting_before = event.waiting_registrations.count()
registration_to_unregister = Registration.objects.get(
event=event, user=webkom_users[0]
)
event.unregister(registration_to_unregister)
self.assertEqual(pool_one.registrations.count(), pool_one_before)
self.assertEqual(pool_two.registrations.count(), pool_two_before)
self.assertEqual(event.waiting_registrations.count(), waiting_before - 1)
def test_user_is_moved_after_rebalance(self):
"""Test that user's pool has changed after being rebalanced"""
event = Event.objects.get(title="POOLS_NO_REGISTRATIONS")
pool_one = event.pools.get(name="Abakusmember")
pool_two = event.pools.get(name="Webkom")
users = get_dummy_users(6)
abakus_users = users[0:3]
webkom_users = users[3:6]
for user in abakus_users:
AbakusGroup.objects.get(name="Abakus").add_user(user)
for user in webkom_users:
AbakusGroup.objects.get(name="Webkom").add_user(user)
for user in webkom_users:
registration = Registration.objects.get_or_create(event=event, user=user)[0]
event.register(registration)
for user in abakus_users:
registration = Registration.objects.get_or_create(event=event, user=user)[0]
event.register(registration)
moved_user_registration = event.registrations.get(user=webkom_users[2])
self.assertEqual(moved_user_registration.pool, pool_one)
registration_to_unregister = Registration.objects.get(
event=event, user=webkom_users[0]
)
event.unregister(registration_to_unregister)
moved_user_registration = event.registrations.get(user=webkom_users[2])
self.assertEqual(moved_user_registration.pool, pool_two)
def test_correct_user_is_bumped_after_rebalance(self):
"""Test that the first user available for the rebalanced pool is bumped"""
event = Event.objects.get(title="POOLS_NO_REGISTRATIONS")
pool_one = event.pools.get(name="Abakusmember")
users = get_dummy_users(7)
abakus_users = users[0:4]
webkom_users = users[4:7]
for user in abakus_users:
AbakusGroup.objects.get(name="Abakus").add_user(user)
for user in webkom_users:
AbakusGroup.objects.get(name="Webkom").add_user(user)
for user in webkom_users:
registration = Registration.objects.get_or_create(event=event, user=user)[0]
event.register(registration)
for user in abakus_users:
registration = Registration.objects.get_or_create(event=event, user=user)[0]
event.register(registration)
waiting_before = event.waiting_registrations.count()
user_to_be_bumped = event.waiting_registrations.get(user=abakus_users[2]).user
user_not_to_be_bumped = event.waiting_registrations.get(
user=abakus_users[3]
).user
registration_to_unregister = Registration.objects.get(
event=event, user=webkom_users[0]
)
event.unregister(registration_to_unregister)
self.assertEqual(event.registrations.get(user=user_to_be_bumped).pool, pool_one)
self.assertIsNone(event.registrations.get(user=user_not_to_be_bumped).pool)
self.assertEqual(event.waiting_registrations.count(), waiting_before - 1)
def test_rebalance_pool_method(self):
"""Test rebalancing method by moving registered user's pool to fit waiting list user"""
event = Event.objects.get(title="POOLS_NO_REGISTRATIONS")
abakus_pool = event.pools.get(name="Abakusmember")
webkom_pool = event.pools.get(name="Webkom")
users = get_dummy_users(4)
abakus_user = users[0]
webkom_users = users[1:]
AbakusGroup.objects.get(name="Abakus").add_user(abakus_user)
for user in webkom_users:
AbakusGroup.objects.get(name="Webkom").add_user(user)
registration_abakus = Registration.objects.get_or_create(
event=event, user=abakus_user
)[0]
event.register(registration_abakus)
for user in webkom_users[:2]:
registration = Registration.objects.get_or_create(event=event, user=user)[0]
event.register(registration)
self.assertFalse(event.rebalance_pool(abakus_pool, webkom_pool))
self.assertEqual(abakus_pool.registrations.count(), 1)
self.assertEqual(webkom_pool.registrations.count(), 2)
registration_webkom = Registration.objects.get_or_create(
event=event, user=webkom_users[2]
)[0]
event.register(registration_webkom)
registration_to_unregister = Registration.objects.get(
event=event, user=webkom_users[0]
)
event.unregister(registration_to_unregister)
self.assertEqual(abakus_pool.registrations.count(), 2)
self.assertEqual(webkom_pool.registrations.count(), 1)
self.assertTrue(event.rebalance_pool(abakus_pool, webkom_pool))
self.assertEqual(abakus_pool.registrations.count(), 1)
self.assertEqual(webkom_pool.registrations.count(), 2)
def test_rebalance_pool_method_should_not_overflow(self):
"""Test rebalancing method by moving registered user's pool to fit waiting list user"""
event = Event.objects.get(title="POOLS_NO_REGISTRATIONS")
abakus_pool = event.pools.get(name="Abakusmember")
webkom_pool = event.pools.get(name="Webkom")
users = get_dummy_users(6)
for user in users:
AbakusGroup.objects.get(name="Abakus").add_user(user)
webkom_users = users[:3]
abakus_users = users[3:]
for user in webkom_users:
registration = Registration.objects.get_or_create(event=event, user=user)[0]
event.register(registration)
self.assertEqual(abakus_pool.registrations.count(), 3)
self.assertEqual(webkom_pool.registrations.count(), 0)
for user in abakus_users:
registration = Registration.objects.get_or_create(event=event, user=user)[0]
event.register(registration)
self.assertEqual(abakus_pool.registrations.count(), 3)
self.assertEqual(webkom_pool.registrations.count(), 0)
for user in webkom_users:
AbakusGroup.objects.get(name="Webkom").add_user(user)
event.bump_on_pool_creation_or_expansion()
self.assertEqual(abakus_pool.registrations.count(), 3) # Abakus-pool has size 3
self.assertEqual(webkom_pool.registrations.count(), 2) # Webkom-pool has size 2
def test_cant_register_after_event_has_started(self):
"""Test that a user cannot register after the event has started."""
event = Event.objects.get(title="POOLS_NO_REGISTRATIONS")
current_time = timezone.now()
event.start_time = current_time - timedelta(hours=3)
event.save()
user = get_dummy_users(1)[0]
AbakusGroup.objects.get(name="Abakus").add_user(user)
registration = Registration.objects.get_or_create(event=event, user=user)[0]
with self.assertRaises(ValueError):
event.register(registration)
self.assertEqual(event.number_of_registrations, 0)
def test_cant_register_after_event_has_closed(self):
"""Test that a user cannot register after the event has started."""
event = Event.objects.get(title="POOLS_NO_REGISTRATIONS")
current_time = timezone.now()
event.start_time = current_time + timedelta(hours=1)
event.save()
user = get_dummy_users(1)[0]
AbakusGroup.objects.get(name="Abakus").add_user(user)
registration = Registration.objects.get_or_create(event=event, user=user)[0]
with self.assertRaises(ValueError):
event.register(registration)
self.assertEqual(event.number_of_registrations, 0)
def test_presence_method_raises_error_with_illegal_value(self):
"""Test that presence raises error when given an illegal presence choice"""
event = Event.objects.get(title="POOLS_WITH_REGISTRATIONS")
registration = event.registrations.first()
with self.assertRaises(ValueError):
registration.set_presence("ripvalue")
def test_consent_method_raises_error_with_illegal_value(self):
"""Test that consent raises error when given an illegal consent choice"""
event = Event.objects.get(title="POOLS_WITH_REGISTRATIONS")
registration = event.registrations.first()
with self.assertRaises(ValueError):
registration.set_photo_consent("ripvalue")
def test_bump_on_pool_update(self):
"""Test that waiting registrations are bumped when a pool is expanded"""
event = Event.objects.get(title="POOLS_NO_REGISTRATIONS")
pool = event.pools.first()
users = get_dummy_users(6)
for user in users:
AbakusGroup.objects.get(name="Abakus").add_user(user)
registration = Registration.objects.get_or_create(event=event, user=user)[0]
event.register(registration)
no_of_waiting_registrations_before = event.waiting_registrations.count()
self.assertEqual(no_of_waiting_registrations_before, 3)
pool.capacity = 5
pool.save()
event.bump_on_pool_creation_or_expansion()
no_of_waiting_registrations_after = event.waiting_registrations.count()
self.assertEqual(no_of_waiting_registrations_after, 1)
def test_no_bump_to_illegal_pool_on_expansion(self):
"""Test that waiting regs aren't bumped if they don't have perm to join the expanded pool"""
event = Event.objects.get(title="POOLS_NO_REGISTRATIONS")
pool = event.pools.get(name="Webkom")
users = get_dummy_users(6)
for user in users:
AbakusGroup.objects.get(name="Abakus").add_user(user)
registration = Registration.objects.get_or_create(event=event, user=user)[0]
event.register(registration)
no_of_waiting_registrations_before = event.waiting_registrations.count()
self.assertEqual(no_of_waiting_registrations_before, 3)
pool.capacity = 5
pool.save()
event.bump_on_pool_creation_or_expansion()
no_of_waiting_registrations_after = event.waiting_registrations.count()
self.assertEqual(no_of_waiting_registrations_after, 3)
def test_bump_on_pool_creation(self):
"""Test that waiting registrations are bumped when a new pool is created"""
event = Event.objects.get(title="POOLS_NO_REGISTRATIONS")
users = get_dummy_users(6)
for user in users:
AbakusGroup.objects.get(name="Abakus").add_user(user)
registration = Registration.objects.get_or_create(event=event, user=user)[0]
event.register(registration)
no_of_waiting_registrations_before = event.waiting_registrations.count()
self.assertEqual(no_of_waiting_registrations_before, 3)
new_pool = Pool.objects.create(
name="test",
capacity=3,
event=event,
activation_date=(timezone.now() - timedelta(hours=24)),
)
new_pool.permission_groups.set([AbakusGroup.objects.get(name="Abakus")])
event.bump_on_pool_creation_or_expansion()
no_of_waiting_registrations_after = event.waiting_registrations.count()
self.assertEqual(no_of_waiting_registrations_after, 0)
self.assertEqual(new_pool.registrations.count(), 3)
def test_bump_on_several_pools_updated(self):
"""Test that waiting regs are bumped to several pools when several pools are updated"""
event = Event.objects.get(title="POOLS_NO_REGISTRATIONS")
pool_one = event.pools.get(name="Abakusmember")
pool_two = event.pools.get(name="Webkom")
users = get_dummy_users(7)
for user in users:
AbakusGroup.objects.get(name="Webkom").add_user(user)
registration = Registration.objects.get_or_create(event=event, user=user)[0]
event.register(registration)
no_of_waiting_registrations_before = event.waiting_registrations.count()
self.assertEqual(no_of_waiting_registrations_before, 2)
pool_one.capacity = pool_one.capacity + 1
pool_two.capacity = pool_two.capacity + 1
pool_one.save()
pool_two.save()
event.bump_on_pool_creation_or_expansion()
no_of_waiting_registrations_after = event.waiting_registrations.count()
self.assertEqual(no_of_waiting_registrations_after, 0)
def test_register_when_unregister_when_event_is_full(self):
"""Test that counter works when registering after an event is full"""
event = Event.objects.get(title="POOLS_NO_REGISTRATIONS")
users = get_dummy_users(6)
user_one = users[0]
user_two = users[-1]
for user in users[:5]:
AbakusGroup.objects.get(name="Webkom").add_user(user)
registration = Registration.objects.get_or_create(event=event, user=user)[0]
event.register(registration)
AbakusGroup.objects.get(name="Webkom").add_user(user_two)
registration_one = Registration.objects.get(event=event, user=user_one)
registration_two = Registration.objects.create(event=event, user=user_two)
event.unregister(registration_one)
event.register(registration_two)
self.assertTrue(registration_two.is_admitted)
def test_that_is_ready_flag_disables_new_registrations(self):
"""Test that users are not able to register when is_ready is False"""
event = Event.objects.get(title="POOLS_WITH_REGISTRATIONS")
event.is_ready = False
event.save()
user = get_dummy_users(1)[0]
AbakusGroup.objects.get(name="Abakus").add_user(user)
registration = Registration.objects.get_or_create(event=event, user=user)[0]
with self.assertRaises(EventNotReady):
event.register(registration)
| 44.00498
| 100
| 0.682148
| 5,269
| 44,181
| 5.485481
| 0.046498
| 0.055704
| 0.04567
| 0.048438
| 0.855517
| 0.818012
| 0.764972
| 0.72944
| 0.710999
| 0.668823
| 0
| 0.009401
| 0.217514
| 44,181
| 1,003
| 101
| 44.048853
| 0.826647
| 0.060592
| 0
| 0.678887
| 0
| 0
| 0.040783
| 0.019568
| 0
| 0
| 0
| 0
| 0.152971
| 1
| 0.059418
| false
| 0
| 0.010114
| 0
| 0.074589
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
37f325c092da2a0ec00cc85dc3e6b70463c3ae4f
| 10
|
py
|
Python
|
my.py
|
qiuhui1991/test1
|
fc8e9088a7a8082d2c4429b76eece56dfb99a0b7
|
[
"MIT"
] | 1
|
2018-12-21T09:21:33.000Z
|
2018-12-21T09:21:33.000Z
|
my.py
|
qiuhui1991/test1
|
fc8e9088a7a8082d2c4429b76eece56dfb99a0b7
|
[
"MIT"
] | null | null | null |
my.py
|
qiuhui1991/test1
|
fc8e9088a7a8082d2c4429b76eece56dfb99a0b7
|
[
"MIT"
] | null | null | null |
a=1
b=2
| 3.333333
| 4
| 0.4
| 4
| 10
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 0.4
| 10
| 2
| 5
| 5
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
5322191d268b650af0fa7642b2c35a6b9db84855
| 118
|
py
|
Python
|
tests/tests/admin.py
|
darkslab/django-optimistic-lock
|
9438f12fdaeef0bff3f3ccc74ab73bf54acdc9a0
|
[
"BSD-2-Clause"
] | 81
|
2015-02-02T10:21:02.000Z
|
2022-01-06T05:32:05.000Z
|
tests/tests/admin.py
|
darkslab/django-optimistic-lock
|
9438f12fdaeef0bff3f3ccc74ab73bf54acdc9a0
|
[
"BSD-2-Clause"
] | 8
|
2015-01-14T17:32:11.000Z
|
2020-12-08T09:40:17.000Z
|
tests/tests/admin.py
|
darkslab/django-optimistic-lock
|
9438f12fdaeef0bff3f3ccc74ab73bf54acdc9a0
|
[
"BSD-2-Clause"
] | 19
|
2015-02-02T10:22:09.000Z
|
2021-11-15T10:15:29.000Z
|
from django.contrib import admin
from .models import SimpleModel
admin.site.register(SimpleModel, admin.ModelAdmin)
| 19.666667
| 50
| 0.830508
| 15
| 118
| 6.533333
| 0.666667
| 0.326531
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.101695
| 118
| 5
| 51
| 23.6
| 0.924528
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
532b6585b8c4a26ac5a1826a3c26a78fbf6a06c7
| 186
|
py
|
Python
|
losses.py
|
dcastf01/self-supervised-cars
|
3859801d4c6bb5ff29c0198ae85bc02973c9910b
|
[
"MIT"
] | null | null | null |
losses.py
|
dcastf01/self-supervised-cars
|
3859801d4c6bb5ff29c0198ae85bc02973c9910b
|
[
"MIT"
] | null | null | null |
losses.py
|
dcastf01/self-supervised-cars
|
3859801d4c6bb5ff29c0198ae85bc02973c9910b
|
[
"MIT"
] | null | null | null |
"""
Author: Yonglong Tian (yonglong@mit.edu)
Date: May 07, 2020
"""
from __future__ import print_function
from lightly.loss import SymNegCosineSimilarityLoss,BarlowTwinsLoss,NTXentLoss
| 23.25
| 78
| 0.811828
| 22
| 186
| 6.636364
| 0.863636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.035928
| 0.102151
| 186
| 7
| 79
| 26.571429
| 0.838323
| 0.317204
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
|
0
| 6
|
5331d08ae8f2f05ea2f665a16199e2068855acc7
| 49
|
py
|
Python
|
crosswoz-dst/submission1/__init__.py
|
adamlin120/ConvLab-2
|
5da1803a923329565c8c75a8f18c9d3fc8b45c2d
|
[
"Apache-2.0"
] | null | null | null |
crosswoz-dst/submission1/__init__.py
|
adamlin120/ConvLab-2
|
5da1803a923329565c8c75a8f18c9d3fc8b45c2d
|
[
"Apache-2.0"
] | null | null | null |
crosswoz-dst/submission1/__init__.py
|
adamlin120/ConvLab-2
|
5da1803a923329565c8c75a8f18c9d3fc8b45c2d
|
[
"Apache-2.0"
] | null | null | null |
from .submission1 import GPT2DSTEnglish as Model
| 24.5
| 48
| 0.857143
| 6
| 49
| 7
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.046512
| 0.122449
| 49
| 1
| 49
| 49
| 0.930233
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
726009f237c2d8c0b6dfa58443b7896b9c225678
| 208
|
py
|
Python
|
Hiven/Objects/member_exit.py
|
NexInfinite/hivenpy
|
b20c37f68f2526a3455bbcfa1432f430ac1258cb
|
[
"MIT"
] | 9
|
2020-07-30T09:31:28.000Z
|
2021-02-17T13:23:43.000Z
|
Hiven/Objects/member_exit.py
|
NexInfinite/hivenpy
|
b20c37f68f2526a3455bbcfa1432f430ac1258cb
|
[
"MIT"
] | null | null | null |
Hiven/Objects/member_exit.py
|
NexInfinite/hivenpy
|
b20c37f68f2526a3455bbcfa1432f430ac1258cb
|
[
"MIT"
] | 4
|
2020-07-30T18:17:50.000Z
|
2020-08-09T23:49:01.000Z
|
class member_exit_obj:
def __init__(self, ctx):
self.user = self.User(ctx)
self.house_id = ctx['house_id']
class User:
def __init__(self, ctx):
self.id = ctx['id']
| 26
| 39
| 0.567308
| 29
| 208
| 3.655172
| 0.37931
| 0.198113
| 0.207547
| 0.264151
| 0.339623
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.302885
| 208
| 8
| 40
| 26
| 0.731034
| 0
| 0
| 0.285714
| 0
| 0
| 0.047847
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.285714
| false
| 0
| 0
| 0
| 0.571429
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
72a37a48ce6d5e52875addc00b9373dd5dadbb91
| 150
|
py
|
Python
|
coffelli/dashboard/__init__.py
|
coffeestudio/django-coffelli
|
cd0e6fbcfc169d8335351072c5816fb1d1639429
|
[
"BSD-3-Clause"
] | null | null | null |
coffelli/dashboard/__init__.py
|
coffeestudio/django-coffelli
|
cd0e6fbcfc169d8335351072c5816fb1d1639429
|
[
"BSD-3-Clause"
] | null | null | null |
coffelli/dashboard/__init__.py
|
coffeestudio/django-coffelli
|
cd0e6fbcfc169d8335351072c5816fb1d1639429
|
[
"BSD-3-Clause"
] | null | null | null |
from coffelli.dashboard.dashboards import *
from coffelli.dashboard.registry import *
default_app_config = "coffelli.dashboard.apps.DashboardConfig"
| 30
| 62
| 0.84
| 17
| 150
| 7.294118
| 0.647059
| 0.41129
| 0.33871
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.08
| 150
| 4
| 63
| 37.5
| 0.898551
| 0
| 0
| 0
| 0
| 0
| 0.26
| 0.26
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
72b52ef7d86ae092fa87120c47e4da17ba0a0ac0
| 595
|
py
|
Python
|
code/BGI/__init__.py
|
claudiavr/AIS
|
5a9b9db8377efbfba3e8bfc8bf126845ef6e9aea
|
[
"MIT"
] | null | null | null |
code/BGI/__init__.py
|
claudiavr/AIS
|
5a9b9db8377efbfba3e8bfc8bf126845ef6e9aea
|
[
"MIT"
] | null | null | null |
code/BGI/__init__.py
|
claudiavr/AIS
|
5a9b9db8377efbfba3e8bfc8bf126845ef6e9aea
|
[
"MIT"
] | null | null | null |
"""Background Image Generator Package.
This package calculates a back ground image that simulates and bias image of
the SPARC4 cameras
"""
from .BGI import Background_Image
from FC import Flux_Calculation
from Telescope_SR import Telescope_Spectral_Response
from Atmosphere_SR import Atmosphere_Spectral_Response
from S4_SR import (Abstract_SPARC4_Spectral_Response,
Concrete_SPARC4_Spectral_Response_1,
Concrete_SPARC4_Spectral_Response_2,
Concrete_SPARC4_Spectral_Response_3,
Concrete_SPARC4_Spectral_Response_4)
| 33.055556
| 76
| 0.766387
| 72
| 595
| 5.944444
| 0.486111
| 0.261682
| 0.257009
| 0.280374
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.023404
| 0.210084
| 595
| 17
| 77
| 35
| 0.887234
| 0.223529
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.555556
| 0
| 0.555556
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
72c01fd77922a7d4ad7119cf7a8891c897c9ea61
| 8,983
|
py
|
Python
|
pandapipes/test/stanet_comparison/test_water_stanet.py
|
lschmelting/pandapipes
|
d7ec5e77c113ae7a6670e66a90802ba2c84b8f86
|
[
"BSD-3-Clause"
] | null | null | null |
pandapipes/test/stanet_comparison/test_water_stanet.py
|
lschmelting/pandapipes
|
d7ec5e77c113ae7a6670e66a90802ba2c84b8f86
|
[
"BSD-3-Clause"
] | null | null | null |
pandapipes/test/stanet_comparison/test_water_stanet.py
|
lschmelting/pandapipes
|
d7ec5e77c113ae7a6670e66a90802ba2c84b8f86
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2020-2021 by Fraunhofer Institute for Energy Economics
# and Energy System Technology (IEE), Kassel, and University of Kassel. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.
import numpy as np
import pandapipes.networks.simple_water_networks as nw
import pytest
from pandapipes.pipeflow import logger as pf_logger
from pandapipes.test.stanet_comparison.pipeflow_stanet_comparison import pipeflow_stanet_comparison
try:
import pandaplan.core.pplog as logging
except ImportError:
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
pf_logger.setLevel(logging.WARNING)
# ---------- TEST AREA: combined networks ----------
# district_N
def test_case_district_grid_n(log_results=False):
"""
:param log_results:
:type log_results:
:return:
:rtype:
"""
net = nw.water_district_grid(method="n")
p_diff, v_diff_abs = pipeflow_stanet_comparison(net, log_results)
assert np.all(p_diff < 0.002)
assert np.all(v_diff_abs < 0.03)
# district_PC
def test_case_district_grid_pc(log_results=False):
"""
:param log_results:
:type log_results:
:return:
:rtype:
"""
net = nw.water_district_grid(method="pc")
p_diff, v_diff_abs = pipeflow_stanet_comparison(net, log_results, friction_model="colebrook")
assert np.all(p_diff < 0.03)
assert np.all(v_diff_abs < 0.03)
# ---------- TEST AREA: meshed networks ----------
# pumps_N
def test_case_pumps_n(log_results=False):
"""
:param log_results:
:type log_results:
:return:
:rtype:
"""
net = nw.water_meshed_pumps(results_from="stanet")
p_diff, v_diff_abs = pipeflow_stanet_comparison(net, log_results)
assert np.all(p_diff < 0.002)
assert np.all(v_diff_abs < 0.03)
# delta_N
def test_case_delta_n(log_results=False):
"""
:param log_results:
:type log_results:
:return:
:rtype:
"""
net = nw.water_meshed_delta(results_from="stanet")
p_diff, v_diff_abs = pipeflow_stanet_comparison(net, log_results)
assert np.all(p_diff < 0.002)
assert np.all(v_diff_abs < 0.03)
# two_valves_N
def test_case_meshed_2valves_n(log_results=False):
net = nw.water_meshed_2valves(method="n", results_from="stanet")
p_diff, v_diff_abs = pipeflow_stanet_comparison(net, log_results)
assert np.all(p_diff < 0.001)
assert np.all(v_diff_abs < 0.001)
# two_valves_PC
def test_case_meshed_2valves_pc(log_results=False):
net = nw.water_meshed_2valves(method="pc", results_from="stanet")
p_diff, v_diff_abs = pipeflow_stanet_comparison(net, log_results, friction_model="colebrook")
assert np.all(p_diff < 0.001)
assert np.all(v_diff_abs < 0.001)
# ---------- TEST AREA: one pipe ----------
# pipe_1_N
def test_case_one_pipe1_n(log_results=False):
"""
:param log_results:
:type log_results:
:return:
:rtype:
"""
net = nw.water_one_pipe1(method="n", results_from="stanet")
p_diff, v_diff_abs = pipeflow_stanet_comparison(net, log_results)
assert np.all(p_diff < 0.002)
assert np.all(v_diff_abs < 0.03)
# pipe_1_PC
def test_case_one_pipe1_pc(log_results=False):
"""
:param log_results:
:type log_results:
:return:
:rtype:
"""
net = nw.water_one_pipe1(method="pc", results_from="stanet")
p_diff, v_diff_abs = pipeflow_stanet_comparison(net, log_results, friction_model="colebrook")
assert np.all(p_diff < 0.002)
assert np.all(v_diff_abs < 0.03)
# pipe_2_N
def test_case_one_pipe2_n(log_results=False):
"""
:param log_results:
:type log_results:
:return:
:rtype:
"""
net = nw.water_one_pipe2(method="n", results_from="stanet")
p_diff, v_diff_abs = pipeflow_stanet_comparison(net, log_results)
assert np.all(p_diff < 0.002)
assert np.all(v_diff_abs < 0.03)
# pipe_2_PC
def test_case_one_pipe2_pc(log_results=False):
"""
:param log_results:
:type log_results:
:return:
:rtype:
"""
net = nw.water_one_pipe2(method="pc", results_from="stanet")
p_diff, v_diff_abs = pipeflow_stanet_comparison(net, log_results, friction_model="colebrook")
assert np.all(p_diff < 0.002)
assert np.all(v_diff_abs < 0.03)
# pipe_3_N
def test_case_one_pipe3_n(log_results=False):
"""
:param log_results:
:type log_results:
:return:
:rtype:
"""
net = nw.water_one_pipe3(method="n", results_from="stanet")
p_diff, v_diff_abs = pipeflow_stanet_comparison(net, log_results)
assert np.all(p_diff < 0.002)
assert np.all(v_diff_abs < 0.03)
# pipe_3_PC
def test_case_one_pipe3_pc(log_results=False):
"""
:param log_results:
:type log_results:
:return:
:rtype:
"""
net = nw.water_one_pipe3(method="pc", results_from="stanet")
p_diff, v_diff_abs = pipeflow_stanet_comparison(net, log_results, friction_model="colebrook")
assert np.all(p_diff < 0.002)
assert np.all(v_diff_abs < 0.03)
# ---------- TEST AREA: strand net ----------
# strand_net_N
def test_case_simple_strand_net_n(log_results=False):
"""
:param log_results:
:type log_results:
:return:
:rtype:
"""
net = nw.water_simple_strand_net(method="n", results_from="stanet")
p_diff, v_diff_abs = pipeflow_stanet_comparison(net, log_results)
assert np.all(p_diff < 0.002)
assert np.all(v_diff_abs < 0.03)
# strand_net_PC
def test_case_simple_strand_net_pc(log_results=False):
"""
:param log_results:
:type log_results:
:return:
:rtype:
"""
net = nw.water_simple_strand_net(method="pc", results_from="stanet")
p_diff, v_diff_abs = pipeflow_stanet_comparison(net, log_results, friction_model="colebrook")
assert np.all(p_diff < 0.01)
assert np.all(v_diff_abs < 0.03)
# two_pipes_N
def test_case_two_pipes_n(log_results=False):
"""
:param log_results:
:type log_results:
:return:
:rtype:
"""
net = nw.water_strand_2pipes(method="n", results_from="stanet")
p_diff, v_diff_abs = pipeflow_stanet_comparison(net, log_results)
assert np.all(p_diff < 0.002)
assert np.all(v_diff_abs < 0.03)
# two_pipes_PC
def test_case_two_pipes_pc(log_results=False):
"""
:param log_results:
:type log_results:
:return:
:rtype:
"""
net = nw.water_strand_2pipes(method="pc", results_from="stanet")
p_diff, v_diff_abs = pipeflow_stanet_comparison(net, log_results, friction_model="colebrook")
assert np.all(p_diff < 0.002)
assert np.all(v_diff_abs < 0.03)
# cross_PC
def test_case_cross_pc(log_results=False):
"""
:param log_results:
:type log_results:
:return:
:rtype:
"""
net = nw.water_strand_cross(results_from="stanet")
p_diff, v_diff_abs = pipeflow_stanet_comparison(net, log_results, friction_model="colebrook")
assert np.all(p_diff < 0.002)
assert np.all(v_diff_abs < 0.03)
# pump_N
def test_case_pump_n(log_results=False):
"""
:param log_results:
:type log_results:
:return:
:rtype:
"""
net = nw.water_strand_pump()
p_diff, v_diff_abs = pipeflow_stanet_comparison(net, log_results)
assert np.all(p_diff < 0.002)
assert np.all(v_diff_abs < 0.03)
# ---------- TEST AREA: t_cross ----------
# t-cross_N
def test_case_tcross_n(log_results=False):
"""
:param log_results:
:type log_results:
:return:
:rtype:
"""
net = nw.water_tcross(method="n", results_from="stanet")
p_diff, v_diff_abs = pipeflow_stanet_comparison(net, log_results)
assert np.all(p_diff < 0.002)
assert np.all(v_diff_abs < 0.03)
# t-cross_PC
def test_case_tcross_pc(log_results=False):
"""
:param log_results:
:type log_results:
:return:
:rtype:
"""
net = nw.water_tcross(method="pc", results_from="stanet")
p_diff, v_diff_abs = pipeflow_stanet_comparison(net, log_results, friction_model="colebrook")
assert np.all(p_diff < 0.002)
assert np.all(v_diff_abs < 0.03)
# ---------- TEST AREA: two pressure junctions ----------
# two_pipes_N
def test_case_2eg_two_pipes_n(log_results=False):
"""
:param log_results:
:type log_results:
:return:
:rtype:
"""
net = nw.water_2eg_two_pipes(method="n", results_from="stanet")
p_diff, v_diff_abs = pipeflow_stanet_comparison(net, log_results)
assert np.all(p_diff < 0.002)
assert np.all(v_diff_abs < 0.03)
# two_pipes_PC
def test_case_2eg_two_pipes_pc(log_results=False):
"""
:param log_results:
:type log_results:
:return:
:rtype:
"""
net = nw.water_2eg_two_pipes(method="pc", results_from="stanet")
p_diff, v_diff_abs = pipeflow_stanet_comparison(net, log_results, friction_model="colebrook")
assert np.all(p_diff < 0.002)
assert np.all(v_diff_abs < 0.03)
if __name__ == "__main__":
pytest.main([r'pandapipes/test/stanet_comparison/test_water_stanet.py'])
| 26.037681
| 99
| 0.685517
| 1,363
| 8,983
| 4.164343
| 0.090976
| 0.147992
| 0.062016
| 0.03876
| 0.840028
| 0.795455
| 0.779598
| 0.779598
| 0.779598
| 0.766209
| 0
| 0.02596
| 0.185239
| 8,983
| 344
| 100
| 26.113372
| 0.749556
| 0.211288
| 0
| 0.516129
| 0
| 0
| 0.045119
| 0.008315
| 0
| 0
| 0
| 0
| 0.354839
| 1
| 0.177419
| false
| 0
| 0.064516
| 0
| 0.241935
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
f4237a852413b8f635f90de8fc1babb119f0aa47
| 230
|
py
|
Python
|
frontstage/views/register/check_email.py
|
ONSdigital/ras-frontstage
|
e8ff1931b49cb3ab47b421aed6780e9e944dceea
|
[
"MIT"
] | 8
|
2017-06-30T12:32:02.000Z
|
2022-02-25T09:07:28.000Z
|
frontstage/views/register/check_email.py
|
ONSdigital/ras-frontstage
|
e8ff1931b49cb3ab47b421aed6780e9e944dceea
|
[
"MIT"
] | 256
|
2017-05-16T09:38:09.000Z
|
2022-03-28T13:38:42.000Z
|
frontstage/views/register/check_email.py
|
ONSdigital/ras-frontstage
|
e8ff1931b49cb3ab47b421aed6780e9e944dceea
|
[
"MIT"
] | 4
|
2017-09-29T08:58:36.000Z
|
2021-04-11T07:44:27.000Z
|
from flask import render_template
from frontstage.views.register import register_bp
@register_bp.route("/create-account/check-email")
def register_almost_done():
return render_template("register/register.almost-done.html")
| 25.555556
| 64
| 0.813043
| 31
| 230
| 5.83871
| 0.612903
| 0.154696
| 0.198895
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.086957
| 230
| 8
| 65
| 28.75
| 0.861905
| 0
| 0
| 0
| 0
| 0
| 0.265217
| 0.265217
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| true
| 0
| 0.4
| 0.2
| 0.8
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
f44547955c77a8fb44501253719b0394a14fca91
| 72,645
|
py
|
Python
|
teospy/liqiceair4.py
|
jarethholt/teospy
|
3bb23e67bbb765c0842aa8d4a73c1d55ea395d2f
|
[
"MIT"
] | null | null | null |
teospy/liqiceair4.py
|
jarethholt/teospy
|
3bb23e67bbb765c0842aa8d4a73c1d55ea395d2f
|
[
"MIT"
] | null | null | null |
teospy/liqiceair4.py
|
jarethholt/teospy
|
3bb23e67bbb765c0842aa8d4a73c1d55ea395d2f
|
[
"MIT"
] | null | null | null |
"""Liquid water-ice-humid air equilibrium functions.
This module provides functions to get the values of primary variables
for humid air in equilibrium with both liquid water and ice (wet-icy
air). This requires temperatures very close to the triple point
(273.16 K). The primary variables can be either the mass fractions of
dry air, liquid water, and ice; or the dry air mass fraction, total
entropy, and wet fraction of the condensates.
:Examples:
>>> pressure(airf=.99)
38338.9622424
>>> temperature(airf=.99)
273.157198087
>>> airfraction(temp=273.155)
0.994366063923
>>> pressure(temp=273.155)
67931.60108
>>> airfraction(pres=1e4)
0.961024307544
>>> temperature(pres=1e4)
273.159302793
>>> pressure(wair=.1,wliq=.2,wice=.3)
706.817425301
>>> temperature(wair=.1,wliq=.2,wice=.3)
273.159992933
>>> density(.1,wliq=.2,wice=.3)
0.0121364037568
>>> enthalpy(.1,wliq=.2,wice=.3)
900361.135280
>>> entropy(.1,wliq=.2,wice=.3)
3496.16306903
>>> airfraction(wair=.99,entr=0.,wetf=.5)
0.996583352944
>>> pressure(wair=.99,entr=0.,wetf=.5)
112016.075795
>>> temperature(wair=.99,entr=0.,wetf=.5)
273.151724970
>>> density(.99,entr=0.,wetf=.5)
1.43611528680
>>> enthalpy(.99,entr=0.,wetf=.5)
7356.12943724
>>> liquidfraction(.99,entr=0.,wetf=.5)
3.30296152581e-3
>>> solidfraction(.99,entr=0.,wetf=.5)
3.30296152581e-3
>>> vapourfraction(.99,entr=0.,wetf=.5)
3.39407694837e-3
>>> iml(.99,100.)
81605.5557729
>>> ifl(.99,100.)
83234.7314358
:Functions:
* :func:`eq_atp`: Calculate wet-icy air equilibrium properties from any
of the humid air dry fraction, temperature, or pressure.
* :func:`eq_wefli`: Calculate wet-icy air equilibrium properties from
either the mass fractions of dry air, liquid water, and ice; or from
the dry air fraction, entropy, and wet fraction of the condensates.
* :func:`eq_all`: Calculate wet-icy air equilibrium properties. This
function is just a common wrapper for `eq_atp` and `eq_wefli`.
* :func:`airfraction`: Humid air dry fraction at equilibrium.
* :func:`pressure`: Pressure at equilibrium.
* :func:`temperature`: Temperature at equilibrium.
* :func:`density`: Total wet-icy air density.
* :func:`dryairfraction`: Total dry fraction in wet-icy air.
* :func:`enthalpy`: Specific enthalpy of wet-icy air.
* :func:`entropy`: Specific entropy of wet-icy air.
* :func:`liquidfraction`: Mass fraction of liquid water in wet-icy air.
* :func:`solidfraction`: Mass fraction of ice in wet-icy air.
* :func:`vapourfraction`: Mass fraction of water vapour in wet-icy air.
* :func:`iml`: Isentropic melting level of wet-icy air.
* :func:`ifl`: Isentropic freezing level of wet-icy air.
"""
__all__ = ['eq_atp','eq_wefli','eq_all',
'airfraction','pressure','temperature',
'density','dryairfraction','enthalpy','entropy','liquidfraction',
'solidfraction','vapourfraction',
'iml','ifl']
import warnings
import numpy
from teospy import constants0
from teospy import flu1
from teospy import ice1
from teospy import air2
from teospy import flu2
from teospy import ice2
from teospy import maths3
from teospy import flu3a
from teospy import iceliq4
_CHKTOL = constants0.CHKTOL
_RWAT = constants0.RWAT
_RDRY = constants0.RDRY
_PATM = constants0.PATM
_TCELS = constants0.TCELS
_TTP = constants0.TTP
_PTPE = constants0.PTPE
_LLVTP = constants0.LLVTP
_LILTP = -constants0.LILTP
_DLTP = constants0.DLTP
_DITP = constants0.DITP
_CDRY = constants0.CDRY
_CVAP = constants0.CVAP
_CLIQ = constants0.CLIQ
_CICE = constants0.CICE
_EPSW = _RDRY/_RWAT
_AVL = _LLVTP / (_RWAT*_TTP)
_ALI = _LILTP/(_DITP**(-1) - _DLTP**(-1))/_PTPE
_chkflubnds = constants0.chkflubnds
_chkhumbnds = constants0.chkhumbnds
_chkicebnds = constants0.chkicebnds
_flu_f = flu1.flu_f
_ice_g = ice1.ice_g
_air_f = air2.air_f
_air_eq_pressure = air2.eq_pressure
_air_eq_vappot = air2.eq_vappot
_flu_eq_chempot = flu2.eq_chempot
_flu_eq_pressure = flu2.eq_pressure
_newton = maths3.newton
_dliq_default = flu3a._dliq_default
### Equilibrium functions
def _approx_a(airf):
"""Approximate TPDhDl at A.
Approximate the temperature, pressure, humid air density, and liquid
water density of wet-icy air at the given humid air dry fraction.
:arg float airf: Humid air dry fraction in kg/kg.
:returns: Temperature, pressure, humid air density, and liquid water
density (all in SI units).
"""
temp = _TTP * (1 - _EPSW*airf/(1-airf)/_ALI)
pres = _PTPE*(_EPSW*airf + 1-airf)/(1-airf)
dhum = pres/(_RDRY*temp) / (airf + (1-airf)/_EPSW)
dliq = _dliq_default(temp,pres)
return temp, pres, dhum, dliq
def _approx_t(temp):
"""Approximate APDhDl at T.
Approximate the humid air dry fraction, pressure, humid air density,
and liquid water density of wet-icy air at the given temperature.
:arg float temp: Temperature in K.
:returns: Humid air dry fraction, pressure, humid air density, and
liquid water density (all in SI units).
"""
#pres = _PTPE*(1 + _ALI*(1-temp/_TTP))
#dliq = _dliq_default(temp,pres)
pres, dliq = iceliq4._approx_t(temp)
pvap = _PTPE * (1 - _AVL*(1 - temp/_TTP))
airf = (pres-pvap)/(pres-pvap + _EPSW*pvap)
dhum = pres/(_RDRY*temp) / (airf + (1-airf)/_EPSW)
return airf, pres, dhum, dliq
def _approx_p(pres):
"""Approximate ATDhDl at P.
Approximate the humid air dry fraction, temperature, humid air
density, and liquid water density of wet-icy air at the given
pressure.
:arg float pres: Pressure in Pa.
:returns: Humid air dry fraction, temperature, humid air density,
and liquid water density (all in SI units).
"""
#temp = min(_TTP*(1 - (pres/_PTPE-1)/_ALI), _TTP-_CHKTOL)
#dliq = _dliq_default(temp,pres)
temp, dliq = iceliq4._approx_p(pres)
pvap = _PTPE * (1 - _AVL*(1 - temp/_TTP))
airf = (pres-pvap)/(pres-pvap + _EPSW*pvap)
dhum = pres/(_RDRY*temp) / (airf + (1-airf)/_EPSW)
return airf, temp, dhum, dliq
def _approx_wef(wair,entr,wetf):
"""Approximate ATPDhDl at WEF.
Approximate the humid air dry fraction, temperature, pressure, humid
air density, and liquid water density of wet-icy air at the given
total dry fraction, specific entropy, and wet fraction of
condensate.
:arg float wair: Total dry fraction in kg/kg.
:arg float entr: Entropy in J/kg/K.
:arg float wetf: Wet fraction in kg/kg.
:returns: Humid air dry fraction, temperature, pressure, humid air
density, and liquid water density (all in SI units).
"""
earg = ((entr + (1-wair)*(1-wetf)*_LILTP/_TTP
- wair*_RDRY*numpy.log(_PATM/_PTPE)) / (wair*_RDRY))
coeff = (_LLVTP + (1-wetf)*_LILTP)/(_RWAT*_TTP)
w = coeff * numpy.exp(earg)
z = w - w**2 + 1.5*w**3
pres = _PTPE + coeff*_PTPE/z
airf, temp, dhum, dliq = _approx_p(pres)
return airf, temp, pres, dhum, dliq
def _diff_a(t,p,dh,dl,airf):
"""Calculate wet-icy air disequilibrium at A.
Calculate both sides of the equations
given pressure = pressure in humid air
given pressure = pressure of liquid water
chemical potential of liquid water = potential of ice
chemical potential of liquid water = potential of water vapour
and their Jacobians with respect to temperature, pressure, humid air
density, and liquid water density. Solving these equations gives
equilibrium values at the given humid air dry fraction.
:arg float t: Temperature in K.
:arg float p: Pressure in Pa.
:arg float dh: Humid air density in kg/m3.
:arg float dl: Liquid water density in kg/m3.
:arg float airf: Humid air dry fraction in kg/kg.
:returns: Left-hand side of the equation, right-hand side,
Jacobian of LHS, and Jacobian of RHS.
:rtype: tuple(array(float))
"""
ph = _air_eq_pressure(0,0,0,airf,t,dh)
pl = _flu_eq_pressure(0,0,t,dl)
gl = _flu_eq_chempot(0,0,t,dl)
gi = _ice_g(0,0,t,p)
gv = _air_eq_vappot(0,0,0,airf,t,dh)
lhs = numpy.array([p, p, gl, gl])
rhs = numpy.array([ph, pl, gi, gv])
ph_t = _air_eq_pressure(0,1,0,airf,t,dh)
ph_d = _air_eq_pressure(0,0,1,airf,t,dh)
pl_t = _flu_eq_pressure(1,0,t,dl)
pl_d = _flu_eq_pressure(0,1,t,dl)
gl_t = _flu_eq_chempot(1,0,t,dl)
gl_d = _flu_eq_chempot(0,1,t,dl)
gi_t = _ice_g(1,0,t,p)
gi_p = _ice_g(0,1,t,p)
gv_t = _air_eq_vappot(0,1,0,airf,t,dh)
gv_d = _air_eq_vappot(0,0,1,airf,t,dh)
dlhs = numpy.array([[0.,1.,0.,0.], [0.,1.,0.,0.], [gl_t,0.,0.,gl_d],
[gl_t,0.,0.,gl_d]])
drhs = numpy.array([[ph_t,0.,ph_d,0.], [pl_t,0.,0.,pl_d],
[gi_t,gi_p,0.,0.], [gv_t,0.,gv_d,0.]])
return lhs, rhs, dlhs, drhs
def _diff_t(a,p,dh,dl,temp):
"""Calculate wet-icy air disequilibrium at T.
Calculate both sides of the equations
given pressure = pressure in humid air
given pressure = pressure of liquid water
chemical potential of liquid water = potential of ice
chemical potential of liquid water = potential of water vapour
and their Jacobians with respect to humid air dry fraction,
pressure, humid air density, and liquid water density. Solving these
equations gives equilibrium values at the given temperature.
:arg float a: Humid air dry fraction in kg/kg.
:arg float p: Pressure in Pa.
:arg float dh: Humid air density in kg/m3.
:arg float dl: Liquid water density in kg/m3.
:arg float temp: Temperature in K.
:returns: Left-hand side of the equation, right-hand side,
Jacobian of LHS, and Jacobian of RHS.
:rtype: tuple(array(float))
"""
ph = _air_eq_pressure(0,0,0,a,temp,dh)
pl = _flu_eq_pressure(0,0,temp,dl)
gl = _flu_eq_chempot(0,0,temp,dl)
gi = _ice_g(0,0,temp,p)
gv = _air_eq_vappot(0,0,0,a,temp,dh)
lhs = numpy.array([p, p, gl, gl])
rhs = numpy.array([ph, pl, gi, gv])
ph_a = _air_eq_pressure(1,0,0,a,temp,dh)
ph_d = _air_eq_pressure(0,0,1,a,temp,dh)
pl_d = _flu_eq_pressure(0,1,temp,dl)
gl_d = _flu_eq_chempot(0,1,temp,dl)
gi_p = _ice_g(0,1,temp,p)
gv_a = _air_eq_vappot(1,0,0,a,temp,dh)
gv_d = _air_eq_vappot(0,0,1,a,temp,dh)
dlhs = numpy.array([[0.,1.,0.,0.], [0.,1.,0.,0.], [0.,0.,0.,gl_d],
[0.,0.,0.,gl_d]])
drhs = numpy.array([[ph_a,0.,ph_d,0.], [0.,0.,0.,pl_d], [0.,gi_p,0.,0.],
[gv_a,0.,gv_d,0.]])
return lhs, rhs, dlhs, drhs
def _diff_p(a,t,dh,dl,pres):
"""Calculate wet-icy air disequilibrium at pressure.
Calculate both sides of the equations
given pressure = pressure in humid air
given pressure = pressure of liquid water
chemical potential of liquid water = potential of ice
chemical potential of liquid water = potential of water vapour
and their Jacobians with respect to dry air mass fraction in humid
air, temperature, humid air density, and liquid water density.
Solving these equations gives equilibrium values at the given
pressure.
:arg float a: Humid air dry fraction in kg/kg.
:arg float t: Temperature in K.
:arg float dh: Humid air density in kg/m3.
:arg float dl: Liquid water density in kg/m3.
:arg float pres: Pressure in Pa.
:returns: Left-hand side of the equation, right-hand side,
Jacobian of LHS, and Jacobian of RHS.
:rtype: tuple(array(float))
"""
ph = _air_eq_pressure(0,0,0,a,t,dh)
pl = _flu_eq_pressure(0,0,t,dl)
gl = _flu_eq_chempot(0,0,t,dl)
gi = _ice_g(0,0,t,pres)
gv = _air_eq_vappot(0,0,0,a,t,dh)
lhs = numpy.array([pres, pres, gl, gl])
rhs = numpy.array([ph, pl, gi, gv])
ph_a = _air_eq_pressure(1,0,0,a,t,dh)
ph_t = _air_eq_pressure(0,1,0,a,t,dh)
ph_d = _air_eq_pressure(0,0,1,a,t,dh)
pl_t = _flu_eq_pressure(1,0,t,dl)
pl_d = _flu_eq_pressure(0,1,t,dl)
gl_t = _flu_eq_chempot(1,0,t,dl)
gl_d = _flu_eq_chempot(0,1,t,dl)
gi_t = _ice_g(1,0,t,pres)
gv_a = _air_eq_vappot(1,0,0,a,t,dh)
gv_t = _air_eq_vappot(0,1,0,a,t,dh)
gv_d = _air_eq_vappot(0,0,1,a,t,dh)
dlhs = numpy.array([[0.,0.,0.,0.], [0.,0.,0.,0.], [0.,gl_t,0.,gl_d],
[0.,gl_t,0.,gl_d]])
drhs = numpy.array([[ph_a,ph_t,ph_d,0.], [0.,pl_t,0.,pl_d], [0.,gi_t,0.,0.],
[gv_a,gv_t,gv_d,0.]])
return lhs, rhs, dlhs, drhs
def _diff_wef(a,t,p,dh,dl,wair,entr,wetf):
"""Calculate wet-icy air disequilibrium at WEF.
Calculate both sides of the equations
given pressure = pressure in humid air
given pressure = pressure of liquid water
chemical potential of liquid water = potential of ice
chemical potential of liquid water = potential of water vapour
given entropy = entropy of wet-icy air
and their Jacobians with respect to humid air dry fraction,
temperature, pressure, humid air density, and liquid water density.
Solving these equations gives equilibrium values at the given total
dry fraction, specific entropy, and wet fraction of condensate.
:arg float a: Humid air dry fraction in kg/kg.
:arg float t: Temperature in K.
:arg float p: Pressure in Pa.
:arg float dh: Humid air density in kg/m3.
:arg float dl: Liquid water density in kg/m3.
:arg float wair: Total dry fraction in kg/kg.
:arg float entr: Entropy in J/kg/K.
:arg float wetf: Wet fraction of condensate in kg/kg.
:returns: Left-hand side of the equation, right-hand side,
Jacobian of LHS, and Jacobian of RHS.
:rtype: tuple(array(float))
"""
ph = _air_eq_pressure(0,0,0,a,t,dh)
pl = _flu_eq_pressure(0,0,t,dl)
gl = _flu_eq_chempot(0,0,t,dl)
gi = _ice_g(0,0,t,p)
gv = _air_eq_vappot(0,0,0,a,t,dh)
sh = -_air_f(0,1,0,a,t,dh)
sl = -_flu_f(1,0,t,dl)
si = -_ice_g(1,0,t,p)
s = wair/a*sh + wetf*(1-wair/a)*sl + (1-wetf)*(1-wair/a)*si
lhs = numpy.array([p, p, gl, gl, entr])
rhs = numpy.array([ph, pl, gi, gv, s])
ph_a = _air_eq_pressure(1,0,0,a,t,dh)
ph_t = _air_eq_pressure(0,1,0,a,t,dh)
ph_d = _air_eq_pressure(0,0,1,a,t,dh)
pl_t = _flu_eq_pressure(1,0,t,dl)
pl_d = _flu_eq_pressure(0,1,t,dl)
gl_t = _flu_eq_chempot(1,0,t,dl)
gl_d = _flu_eq_chempot(0,1,t,dl)
gi_t = _ice_g(1,0,t,p)
gi_p = _ice_g(0,1,t,p)
gv_a = _air_eq_vappot(1,0,0,a,t,dh)
gv_t = _air_eq_vappot(0,1,0,a,t,dh)
gv_d = _air_eq_vappot(0,0,1,a,t,dh)
sh_a = -_air_f(1,1,0,a,t,dh)
sh_t = -_air_f(0,2,0,a,t,dh)
sh_d = -_air_f(0,1,1,a,t,dh)
sl_t = -_flu_f(2,0,t,dl)
sl_d = -_flu_f(1,1,t,dl)
si_t = -_ice_g(2,0,t,p)
si_p = -_ice_g(1,1,t,p)
s_a = -wair/a**2*(sh - a*sh_a - wetf*sl - (1-wetf)*si)
s_t = wair/a*sh_t + wetf*(1-wair/a)*sl_t + (1-wetf)*(1-wair/a)*si_t
s_p = (1-wetf)*(1-wair/a)*si_p
s_dh = wair/a*sh_d
s_dl = wetf*(1-wair/a)*sl_d
dlhs = numpy.array([[0.,0.,1.,0.,0.], [0.,0.,1.,0.,0.],
[0.,gl_t,0.,0.,gl_d], [0.,gl_t,0.,0.,gl_d], [0.,0.,0.,0.,0.]])
drhs = numpy.array([[ph_a,ph_t,0.,ph_d,0.], [0.,pl_t,0.,0.,pl_d],
[0.,gi_t,gi_p,0.,0.], [gv_a,gv_t,0.,gv_d,0.], [s_a,s_t,s_p,s_dh,s_dl]])
return lhs, rhs, dlhs, drhs
def eq_atp(airf=None,temp=None,pres=None,dhum=None,dliq=None,
chkvals=False,chktol=_CHKTOL,airf0=None,temp0=None,pres0=None,
dhum0=None,dliq0=None,chkbnd=False,mathargs=None):
"""Get primary wet-icy air variables at ATP.
Get the values of all primary variables for wet-icy air at any of
the humid air dry fraction, temperature, or pressure. At least one
of these values must be provided.
If the calculation has already been done, the results can be passed
to avoid unnecessary repeat calculations. If enough values are
passed, they will be checked for consistency if chkvals is True.
:arg airf: Humid air dry fraction in kg/kg. If unknown, pass None
(default) and it will be calculated.
:type airf: float or None
:arg temp: Temperature in K. If unknown, pass None (default) and it
will be calculated.
:type temp: float or None
:arg pres: Pressure in Pa. If unknown, pass None (default) and it
will be calculated.
:type pres: float or None
:arg dhum: Humid air density in kg/m3. If unknown, pass None
(default) and it will be calculated.
:type dhum: float or None
:arg dliq: Liquid water density in kg/m3. If unknown, pass None
(default) and it will be calculated.
:type dliq: float or None
:arg bool chkvals: If True (default False) and all values are given,
this function will calculate the disequilibrium and raise a
warning if the results are not within a given tolerance.
:arg float chktol: Tolerance to use when checking values (default
_CHKTOL).
:arg airf0: Initial guess for the humid air dry fraction in kg/kg.
If None (default) then the appropriate `_approx_*` is used.
:type airf0: float or None
:arg temp0: Initial guess for the temperature in K. If None
(default) then the appropriate `_approx_*` is used.
:type temp0: float or None
:arg pres0: Initial guess for the pressure in Pa. If None (default)
then the appropriate `_approx_*` is used.
:type pres0: float or None
:arg dhum0: Initial guess for the humid air density in kg/m3. If
None (default) then the appropriate `_approx_*` is used.
:type dhum0: float or None
:arg dliq0: Initial guess for the liquid water density in kg/m3. If
None (default) then the appropriate `_approx_*` is used.
:type dliq0: float or None
:arg bool chkbnd: If True then warnings are raised when the given
values are valid but outside the recommended bounds (default
False).
:arg mathargs: Keyword arguments to the root-finder
:func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None
(default) then no arguments are passed and default parameters
will be used.
:returns: Humid air dry fraction, temperature, pressure, humid air
density, and liquid water density (all in SI units).
:raises ValueError: If no values are provided.
:raises RuntimeWarning: If the relative disequilibrium is more than
chktol, if chkvals is True and all values are given.
"""
if all(val is None for val in (airf,temp,pres)):
errmsg = 'Must provide one of airf, temp, or pres'
raise ValueError(errmsg)
if mathargs is None:
mathargs = dict()
if airf is not None:
if any(val is None for val in (temp,pres,dhum,dliq)):
x0 = (temp0,pres0,dhum0,dliq0)
fargs = (airf,)
x1 = _newton(_diff_a,x0,_approx_a,fargs=fargs,**mathargs)
temp, pres, dhum, dliq = x1
elif temp is not None:
if any(val is None for val in (airf,pres,dhum,dliq)):
x0 = (airf0,pres0,dhum0,dliq0)
fargs = (temp,)
x1 = _newton(_diff_t,x0,_approx_t,fargs=fargs,**mathargs)
airf, pres, dhum, dliq = x1
else:
if any(val is None for val in (airf,temp,dhum,dliq)):
x0 = (airf0,temp0,dhum0,dliq0)
fargs = (pres,)
x1 = _newton(_diff_p,x0,_approx_p,fargs=fargs,**mathargs)
airf, temp, dhum, dliq = x1
_chkflubnds(temp,dliq,chkbnd=chkbnd)
_chkhumbnds(airf,temp,dhum,chkbnd=chkbnd)
_chkicebnds(temp,pres,chkbnd=chkbnd)
if not chkvals:
return airf, temp, pres, dhum, dliq
lhs, rhs, __, __ = _diff_a(temp,pres,dhum,dliq,airf)
errs = list()
for (l,r) in zip(lhs,rhs):
if abs(r) >= chktol:
errs.append(abs(l/r-1))
else:
errs.append(abs(l-r))
if max(errs) > chktol:
warnmsg = ('Given values {0} and solutions {1} disagree to more than '
'the tolerance {2}').format(lhs,rhs,chktol)
warnings.warn(warnmsg,RuntimeWarning)
return airf, temp, pres, dhum, dliq
def eq_wefli(wair,entr=None,wetf=None,wliq=None,wice=None,airf=None,
temp=None,pres=None,dhum=None,dliq=None,chkvals=False,
chktol=_CHKTOL,airf0=None,temp0=None,pres0=None,dhum0=None,
dliq0=None,chkbnd=False,mathargs=None):
"""Get equilibrium values at WEF or WLI.
Get the values of all primary variables for wet-icy air with the
given properties. The properties can be either the total dry
fraction, specific entropy, and wet fraction of condensate; or the
total mass fractions of dry air, liquid water, and ice.
If the calculation has already been done, the results can be passed
to avoid unnecessary repeat calculations. If enough values are
passed, they will be checked for consistency if chkvals is True.
:arg float wair: Total dry fraction in kg/kg.
:arg entr: Entropy in J/kg/K.
:type entr: float or None
:arg wetf: Wet fraction of condensate in kg/kg.
:type wetf: float or None
:arg wliq: Mass fraction of liquid water in kg/kg.
:type wliq: float or None
:arg wice: Mass fraction of ice in kg/kg.
:type wice: float or None
:arg airf: Humid air dry fraction in kg/kg. If unknown, pass None
(default) and it will be calculated.
:type airf: float or None
:arg temp: Temperature in K. If unknown, pass None (default) and it
will be calculated.
:type temp: float or None
:arg pres: Pressure in Pa. If unknown, pass None (default) and it
will be calculated.
:type pres: float or None
:arg dhum: Humid air density in kg/m3. If unknown, pass None
(default) and it will be calculated.
:type dhum: float or None
:arg dliq: Liquid water density in kg/m3. If unknown, pass None
(default) and it will be calculated.
:type dliq: float or None
:arg bool chkvals: If True (default False) and all values are given,
this function will calculate the disequilibrium and raise a
warning if the results are not within a given tolerance.
:arg float chktol: Tolerance to use when checking values (default
_CHKTOL).
:arg airf0: Initial guess for the humid air dry fraction in kg/kg.
If None (default) then the appropriate `_approx_*` is used.
:type airf0: float or None
:arg temp0: Initial guess for the temperature in K. If None
(default) then the appropriate `_approx_*` is used.
:type temp0: float or None
:arg pres0: Initial guess for the pressure in Pa. If None (default)
then the appropriate `_approx_*` is used.
:type pres0: float or None
:arg dhum0: Initial guess for the humid air density in kg/m3. If
None (default) then the appropriate `_approx_*` is used.
:type dhum0: float or None
:arg dliq0: Initial guess for the liquid water density in kg/m3. If
None (default) then the appropriate `_approx_*` is used.
:type dliq0: float or None
:arg bool chkbnd: If True then warnings are raised when the given
values are valid but outside the recommended bounds (default
False).
:arg mathargs: Keyword arguments to the root-finder
:func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None
(default) then no arguments are passed and default parameters
will be used.
:returns: Humid air dry fraction, temperature, pressure, humid air
density, and liquid water density (all in SI units).
:raises ValueError: If not enough values are provided.
:raises ValueError: If all mass fractions are provided but their sum
is larger than 1.
:raises RuntimeWarning: If the relative disequilibrium is more than
chktol, if chkvals is True and all values are given.
"""
cond1 = (entr is None or wetf is None)
cond2 = (wliq is None or wice is None)
if cond1 and cond2:
errmsg = ('Not enough values were provided. Please provide wair and '
'either (entr,wetf) or (wliq,wice).')
raise ValueError(errmsg)
if mathargs is None:
mathargs = dict()
if not cond1:
if any(val is None for val in (airf,temp,pres,dhum,dliq)):
x0 = (airf0,temp0,pres0,dhum0,dliq0)
fargs = (wair,entr,wetf)
x1 = _newton(_diff_wef,x0,_approx_wef,fargs=fargs,**mathargs)
airf, temp, pres, dhum, dliq = x1
else:
wtot = wair + wliq + wice
if wtot >= 1:
errmsg = ('The mass fractions {0} sum to more than '
'1').format((wair,wliq,wice))
raise ValueError(errmsg)
if airf is None:
airf = wair / (1-wliq-wice)
if any(val is None for val in (temp,pres,dhum,dliq)):
x0 = (temp0,pres0,dhum0,dliq0)
fargs = (airf,)
x1 = _newton(_diff_a,x0,_approx_a,fargs=fargs,**mathargs)
temp, pres, dhum, dliq = x1
_chkflubnds(temp,dliq,chkbnd=chkbnd)
_chkhumbnds(airf,temp,dhum,chkbnd=chkbnd)
_chkicebnds(temp,pres,chkbnd=chkbnd)
if not chkvals:
return airf, temp, pres, dhum, dliq
if entr is None:
sh = -_air_f(0,1,0,airf,temp,dhum)
sl = -_flu_f(1,0,temp,dliq)
si = -_ice_g(1,0,temp,pres)
entr = wair/airf*sh + wliq*sl + wice*si
if wetf is None:
wetf = wliq/(wliq + wice)
lhs, rhs, __, __ = _diff_wef(airf,temp,pres,dhum,dliq,wair,entr,wetf)
errs = list()
for (l,r) in zip(lhs,rhs):
if abs(r) >= chktol:
errs.append(abs(l/r-1))
else:
errs.append(abs(l-r))
if max(errs) > chktol:
warnmsg = ('Given values {0} and solutions {1} disagree to more than '
'the tolerance {2}').format(lhs,rhs,chktol)
warnings.warn(warnmsg,RuntimeWarning)
return airf, temp, pres, dhum, dliq
def eq_all(wair=None,entr=None,wetf=None,wliq=None,wice=None,airf=None,
temp=None,pres=None,dhum=None,dliq=None,chkvals=False,
chktol=_CHKTOL,airf0=None,temp0=None,pres0=None,dhum0=None,
dliq0=None,chkbnd=False,mathargs=None):
"""Get equilibrium values at ATP, WEF, or WLI.
Get the values of all primary variables for wet-icy air with the
given properties. The properties can be: any of the humid air dry
fraction, temperature or pressure; or the total dry fraction,
specific entropy, and wet fraction of condensate; or the total mass
fractions of dry air, liquid water, and ice. This function only
serves as a common wrapper for :func:`eq_atp` and :func:`eq_wefli`,
which handle these cases separately.
If the calculation has already been done, the results can be passed
to avoid unnecessary repeat calculations. If enough values are
passed, they will be checked for consistency if chkvals is True.
:arg wair: Total dry fraction in kg/kg.
:type wair: float or None
:arg entr: Entropy in J/kg/K.
:type entr: float or None
:arg wetf: Wet fraction of condensate in kg/kg.
:type wetf: float or None
:arg wliq: Mass fraction of liquid water in kg/kg.
:type wliq: float or None
:arg wice: Mass fraction of ice in kg/kg.
:type wice: float or None
:arg airf: Humid air dry fraction in kg/kg. If unknown, pass None
(default) and it will be calculated.
:type airf: float or None
:arg temp: Temperature in K. If unknown, pass None (default) and it
will be calculated.
:type temp: float or None
:arg pres: Pressure in Pa. If unknown, pass None (default) and it
will be calculated.
:type pres: float or None
:arg dhum: Humid air density in kg/m3. If unknown, pass None
(default) and it will be calculated.
:type dhum: float or None
:arg dliq: Liquid water density in kg/m3. If unknown, pass None
(default) and it will be calculated.
:type dliq: float or None
:arg bool chkvals: If True (default False) and all values are given,
this function will calculate the disequilibrium and raise a
warning if the results are not within a given tolerance.
:arg float chktol: Tolerance to use when checking values (default
_CHKTOL).
:arg airf0: Initial guess for the humid air dry fraction in kg/kg.
If None (default) then the appropriate `_approx_*` is used.
:type airf0: float or None
:arg temp0: Initial guess for the temperature in K. If None
(default) then the appropriate `_approx_*` is used.
:type temp0: float or None
:arg pres0: Initial guess for the pressure in Pa. If None (default)
then the appropriate `_approx_*` is used.
:type pres0: float or None
:arg dhum0: Initial guess for the humid air density in kg/m3. If
None (default) then the appropriate `_approx_*` is used.
:type dhum0: float or None
:arg dliq0: Initial guess for the liquid water density in kg/m3. If
None (default) then the appropriate `_approx_*` is used.
:type dliq0: float or None
:arg bool chkbnd: If True then warnings are raised when the given
values are valid but outside the recommended bounds (default
False).
:arg mathargs: Keyword arguments to the root-finder
:func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None
(default) then no arguments are passed and default parameters
will be used.
:returns: Humid air dry fraction, temperature, pressure, humid air
density, and liquid water density (all in SI units).
:raises ValueError: If not enough values are provided.
:raises ValueError: If all mass fractions are provided but their sum
is larger than 1.
:raises RuntimeWarning: If the relative disequilibrium is more than
chktol, if chkvals is True and all values are given.
"""
if all(val is None for val in (airf,temp,pres)):
res = eq_wefli(wair,entr=entr,wetf=wetf,wliq=wliq,wice=wice,airf0=airf0,
temp0=temp0,pres0=pres0,dhum0=dhum0,dliq0=dliq0,chkbnd=chkbnd,
mathargs=mathargs)
else:
res = eq_atp(airf=airf,temp=temp,pres=pres,dhum=dhum,dliq=dliq,
chkvals=chkvals,chktol=chktol,airf0=airf0,temp0=temp0,pres0=pres0,
dhum0=dhum0,dliq0=dliq0,chkbnd=chkbnd,mathargs=mathargs)
return res
## Thermodynamic properties not needing mass fractions
def airfraction(wair=None,entr=None,wetf=None,wliq=None,wice=None,
airf=None,temp=None,pres=None,dhum=None,dliq=None,chkvals=False,
chktol=_CHKTOL,airf0=None,temp0=None,pres0=None,dhum0=None,
dliq0=None,chkbnd=False,mathargs=None):
"""Calculate wet-icy air humid air dry fraction.
Calculate the mass fraction of dry air in humid air for wet-icy air.
:arg wair: Total dry fraction in kg/kg.
:type wair: float or None
:arg entr: Entropy in J/kg/K.
:type entr: float or None
:arg wetf: Wet fraction of condensate in kg/kg.
:type wetf: float or None
:arg wliq: Mass fraction of liquid water in kg/kg.
:type wliq: float or None
:arg wice: Mass fraction of ice in kg/kg.
:type wice: float or None
:arg airf: Humid air dry fraction in kg/kg. If unknown, pass None
(default) and it will be calculated.
:type airf: float or None
:arg temp: Temperature in K. If unknown, pass None (default) and it
will be calculated.
:type temp: float or None
:arg pres: Pressure in Pa. If unknown, pass None (default) and it
will be calculated.
:type pres: float or None
:arg dhum: Humid air density in kg/m3. If unknown, pass None
(default) and it will be calculated.
:type dhum: float or None
:arg dliq: Liquid water density in kg/m3. If unknown, pass None
(default) and it will be calculated.
:type dliq: float or None
:arg bool chkvals: If True (default False) and all values are given,
this function will calculate the disequilibrium and raise a
warning if the results are not within a given tolerance.
:arg float chktol: Tolerance to use when checking values (default
_CHKTOL).
:arg airf0: Initial guess for the humid air dry fraction in kg/kg.
If None (default) then the appropriate `_approx_*` is used.
:type airf0: float or None
:arg temp0: Initial guess for the temperature in K. If None
(default) then the appropriate `_approx_*` is used.
:type temp0: float or None
:arg pres0: Initial guess for the pressure in Pa. If None (default)
then the appropriate `_approx_*` is used.
:type pres0: float or None
:arg dhum0: Initial guess for the humid air density in kg/m3. If
None (default) then the appropriate `_approx_*` is used.
:type dhum0: float or None
:arg dliq0: Initial guess for the liquid water density in kg/m3. If
None (default) then the appropriate `_approx_*` is used.
:type dliq0: float or None
:arg bool chkbnd: If True then warnings are raised when the given
values are valid but outside the recommended bounds (default
False).
:arg mathargs: Keyword arguments to the root-finder
:func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None
(default) then no arguments are passed and default parameters
will be used.
:returns: Humid air dry fraction in kg/kg.
:raises ValueError: If not enough values are provided.
:raises ValueError: If all mass fractions are provided but their sum
is larger than 1.
:raises RuntimeWarning: If the relative disequilibrium is more than
chktol, if chkvals is True and all values are given.
:Examples:
>>> airfraction(pres=1e4)
0.961024307544
>>> airfraction(temp=273.155)
0.994366063923
>>> airfraction(wair=.99,entr=0.,wetf=.5)
0.996583352944
"""
airf, temp, pres, dhum, dliq = eq_all(wair=wair,entr=entr,wetf=wetf,
wliq=wliq,wice=wice,airf=airf,temp=temp,pres=pres,dhum=dhum,dliq=dliq,
chkvals=chkvals,chktol=chktol,airf0=airf0,temp0=temp0,pres0=pres0,
dhum0=dhum0,dliq0=dliq0,chkbnd=chkbnd,mathargs=mathargs)
return airf
def pressure(wair=None,entr=None,wetf=None,wliq=None,wice=None,
airf=None,temp=None,pres=None,dhum=None,dliq=None,chkvals=False,
chktol=_CHKTOL,airf0=None,temp0=None,pres0=None,dhum0=None,
dliq0=None,chkbnd=False,mathargs=None):
"""Calculate wet-icy air pressure.
Calculate the pressure of wet-icy air.
:arg wair: Total dry fraction in kg/kg.
:type wair: float or None
:arg entr: Entropy in J/kg/K.
:type entr: float or None
:arg wetf: Wet fraction of condensate in kg/kg.
:type wetf: float or None
:arg wliq: Mass fraction of liquid water in kg/kg.
:type wliq: float or None
:arg wice: Mass fraction of ice in kg/kg.
:type wice: float or None
:arg airf: Humid air dry fraction in kg/kg. If unknown, pass None
(default) and it will be calculated.
:type airf: float or None
:arg temp: Temperature in K. If unknown, pass None (default) and it
will be calculated.
:type temp: float or None
:arg pres: Pressure in Pa. If unknown, pass None (default) and it
will be calculated.
:type pres: float or None
:arg dhum: Humid air density in kg/m3. If unknown, pass None
(default) and it will be calculated.
:type dhum: float or None
:arg dliq: Liquid water density in kg/m3. If unknown, pass None
(default) and it will be calculated.
:type dliq: float or None
:arg bool chkvals: If True (default False) and all values are given,
this function will calculate the disequilibrium and raise a
warning if the results are not within a given tolerance.
:arg float chktol: Tolerance to use when checking values (default
_CHKTOL).
:arg airf0: Initial guess for the humid air dry fraction in kg/kg.
If None (default) then the appropriate `_approx_*` is used.
:type airf0: float or None
:arg temp0: Initial guess for the temperature in K. If None
(default) then the appropriate `_approx_*` is used.
:type temp0: float or None
:arg pres0: Initial guess for the pressure in Pa. If None (default)
then the appropriate `_approx_*` is used.
:type pres0: float or None
:arg dhum0: Initial guess for the humid air density in kg/m3. If
None (default) then the appropriate `_approx_*` is used.
:type dhum0: float or None
:arg dliq0: Initial guess for the liquid water density in kg/m3. If
None (default) then the appropriate `_approx_*` is used.
:type dliq0: float or None
:arg bool chkbnd: If True then warnings are raised when the given
values are valid but outside the recommended bounds (default
False).
:arg mathargs: Keyword arguments to the root-finder
:func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None
(default) then no arguments are passed and default parameters
will be used.
:returns: Pressure in Pa.
:raises ValueError: If not enough values are provided.
:raises ValueError: If all mass fractions are provided but their sum
is larger than 1.
:raises RuntimeWarning: If the relative disequilibrium is more than
chktol, if chkvals is True and all values are given.
:Examples:
>>> pressure(airf=.99)
38338.9622424
>>> pressure(temp=273.155)
67931.60108
>>> pressure(wair=.99,entr=0.,wetf=.5)
112016.075795
>>> pressure(wair=.1,wliq=.2,wice=.3)
706.817425301
"""
airf, temp, pres, dhum, dliq = eq_all(wair=wair,entr=entr,wetf=wetf,
wliq=wliq,wice=wice,airf=airf,temp=temp,pres=pres,dhum=dhum,dliq=dliq,
chkvals=chkvals,chktol=chktol,airf0=airf0,temp0=temp0,pres0=pres0,
dhum0=dhum0,dliq0=dliq0,chkbnd=chkbnd,mathargs=mathargs)
return pres
def temperature(wair=None,entr=None,wetf=None,wliq=None,wice=None,
airf=None,temp=None,pres=None,dhum=None,dliq=None,chkvals=False,
chktol=_CHKTOL,airf0=None,temp0=None,pres0=None,dhum0=None,
dliq0=None,chkbnd=False,mathargs=None):
"""Calculate wet-icy air temperature.
Calculate the temperature of wet-icy air.
:arg wair: Total dry fraction in kg/kg.
:type wair: float or None
:arg entr: Entropy in J/kg/K.
:type entr: float or None
:arg wetf: Wet fraction of condensate in kg/kg.
:type wetf: float or None
:arg wliq: Mass fraction of liquid water in kg/kg.
:type wliq: float or None
:arg wice: Mass fraction of ice in kg/kg.
:type wice: float or None
:arg airf: Humid air dry fraction in kg/kg. If unknown, pass None
(default) and it will be calculated.
:type airf: float or None
:arg temp: Temperature in K. If unknown, pass None (default) and it
will be calculated.
:type temp: float or None
:arg pres: Pressure in Pa. If unknown, pass None (default) and it
will be calculated.
:type pres: float or None
:arg dhum: Humid air density in kg/m3. If unknown, pass None
(default) and it will be calculated.
:type dhum: float or None
:arg dliq: Liquid water density in kg/m3. If unknown, pass None
(default) and it will be calculated.
:type dliq: float or None
:arg bool chkvals: If True (default False) and all values are given,
this function will calculate the disequilibrium and raise a
warning if the results are not within a given tolerance.
:arg float chktol: Tolerance to use when checking values (default
_CHKTOL).
:arg airf0: Initial guess for the humid air dry fraction in kg/kg.
If None (default) then the appropriate `_approx_*` is used.
:type airf0: float or None
:arg temp0: Initial guess for the temperature in K. If None
(default) then the appropriate `_approx_*` is used.
:type temp0: float or None
:arg pres0: Initial guess for the pressure in Pa. If None (default)
then the appropriate `_approx_*` is used.
:type pres0: float or None
:arg dhum0: Initial guess for the humid air density in kg/m3. If
None (default) then the appropriate `_approx_*` is used.
:type dhum0: float or None
:arg dliq0: Initial guess for the liquid water density in kg/m3. If
None (default) then the appropriate `_approx_*` is used.
:type dliq0: float or None
:arg bool chkbnd: If True then warnings are raised when the given
values are valid but outside the recommended bounds (default
False).
:arg mathargs: Keyword arguments to the root-finder
:func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None
(default) then no arguments are passed and default parameters
will be used.
:returns: Temperature in K.
:raises ValueError: If not enough values are provided.
:raises ValueError: If all mass fractions are provided but their sum
is larger than 1.
:raises RuntimeWarning: If the relative disequilibrium is more than
chktol, if chkvals is True and all values are given.
:Examples:
>>> temperature(airf=.99)
273.157198087
>>> temperature(pres=1e4)
273.159302793
>>> temperature(wair=.99,entr=0.,wetf=.5)
273.151724970
>>> temperature(wair=.1,wliq=.2,wice=.3)
273.159992933
"""
airf, temp, pres, dhum, dliq = eq_all(wair=wair,entr=entr,wetf=wetf,
wliq=wliq,wice=wice,airf=airf,temp=temp,pres=pres,dhum=dhum,dliq=dliq,
chkvals=chkvals,chktol=chktol,airf0=airf0,temp0=temp0,pres0=pres0,
dhum0=dhum0,dliq0=dliq0,chkbnd=chkbnd,mathargs=mathargs)
return temp
## Equilibrium properties requiring total mass fractions
def density(wair,entr=None,wetf=None,wliq=None,wice=None,airf=None,
temp=None,pres=None,dhum=None,dliq=None,chkvals=False,
chktol=_CHKTOL,airf0=None,temp0=None,pres0=None,dhum0=None,
dliq0=None,chkbnd=False,mathargs=None):
"""Calculate wet-icy air density.
Calculate the density of wet-icy air.
:arg float wair: Total dry fraction in kg/kg.
:arg entr: Entropy in J/kg/K.
:type entr: float or None
:arg wetf: Wet fraction of condensate in kg/kg.
:type wetf: float or None
:arg wliq: Mass fraction of liquid water in kg/kg.
:type wliq: float or None
:arg wice: Mass fraction of ice in kg/kg.
:type wice: float or None
:arg airf: Humid air dry fraction in kg/kg. If unknown, pass None
(default) and it will be calculated.
:type airf: float or None
:arg temp: Temperature in K. If unknown, pass None (default) and it
will be calculated.
:type temp: float or None
:arg pres: Pressure in Pa. If unknown, pass None (default) and it
will be calculated.
:type pres: float or None
:arg dhum: Humid air density in kg/m3. If unknown, pass None
(default) and it will be calculated.
:type dhum: float or None
:arg dliq: Liquid water density in kg/m3. If unknown, pass None
(default) and it will be calculated.
:type dliq: float or None
:arg bool chkvals: If True (default False) and all values are given,
this function will calculate the disequilibrium and raise a
warning if the results are not within a given tolerance.
:arg float chktol: Tolerance to use when checking values (default
_CHKTOL).
:arg airf0: Initial guess for the humid air dry fraction in kg/kg.
If None (default) then the appropriate `_approx_*` is used.
:type airf0: float or None
:arg temp0: Initial guess for the temperature in K. If None
(default) then the appropriate `_approx_*` is used.
:type temp0: float or None
:arg pres0: Initial guess for the pressure in Pa. If None (default)
then the appropriate `_approx_*` is used.
:type pres0: float or None
:arg dhum0: Initial guess for the humid air density in kg/m3. If
None (default) then the appropriate `_approx_*` is used.
:type dhum0: float or None
:arg dliq0: Initial guess for the liquid water density in kg/m3. If
None (default) then the appropriate `_approx_*` is used.
:type dliq0: float or None
:arg bool chkbnd: If True then warnings are raised when the given
values are valid but outside the recommended bounds (default
False).
:arg mathargs: Keyword arguments to the root-finder
:func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None
(default) then no arguments are passed and default parameters
will be used.
:returns: Density in kg/m3.
:raises ValueError: If not enough values are provided.
:raises ValueError: If all mass fractions are provided but their sum
is larger than 1.
:raises RuntimeWarning: If the relative disequilibrium is more than
chktol, if chkvals is True and all values are given.
:Examples:
>>> density(0.99,entr=0.,wetf=.5)
1.43611528680
>>> density(.1,wliq=.2,wice=.3)
0.0121364037568
"""
airf, temp, pres, dhum, dliq = eq_wefli(wair,entr=entr,wetf=wetf,wliq=wliq,
wice=wice,airf=airf,temp=temp,pres=pres,dhum=dhum,dliq=dliq,
chkvals=chkvals,chktol=chktol,airf0=airf0,temp0=temp0,pres0=pres0,
dhum0=dhum0,dliq0=dliq0,chkbnd=chkbnd,mathargs=mathargs)
if wliq is None or wice is None:
sh = -_air_f(0,1,0,airf,temp,dhum)
si = -_ice_g(1,0,temp,pres)
if wetf == 0:
wliq = 0.
wice = (entr - wair/airf*sh)/si
else:
sl = -_flu_f(1,0,temp,dliq)
wliq = (entr - wair/airf*sh)/(sl + (1-wetf)/wetf*si)
wice = wliq * (1-wetf)/wetf
whum = wair/airf
vi = _ice_g(0,1,temp,pres,chkbnd=chkbnd)
vtot = whum/dhum + wliq/dliq + wice*vi
rho = vtot**(-1)
return rho
def dryairfraction(wair,entr=None,wetf=None,wliq=None,wice=None,
airf=None,temp=None,pres=None,dhum=None,dliq=None,chkvals=False,
chktol=_CHKTOL,airf0=None,temp0=None,pres0=None,dhum0=None,
dliq0=None,chkbnd=False,mathargs=None):
"""Calculate wet-icy air total dry fraction.
Calculate the total mass fraction of dry air in wet-icy air.
:arg float wair: Total dry fraction in kg/kg.
:arg entr: Entropy in J/kg/K.
:type entr: float or None
:arg wetf: Wet fraction of condensate in kg/kg.
:type wetf: float or None
:arg wliq: Mass fraction of liquid water in kg/kg.
:type wliq: float or None
:arg wice: Mass fraction of ice in kg/kg.
:type wice: float or None
:arg airf: Humid air dry fraction in kg/kg. If unknown, pass None
(default) and it will be calculated.
:type airf: float or None
:arg temp: Temperature in K. If unknown, pass None (default) and it
will be calculated.
:type temp: float or None
:arg pres: Pressure in Pa. If unknown, pass None (default) and it
will be calculated.
:type pres: float or None
:arg dhum: Humid air density in kg/m3. If unknown, pass None
(default) and it will be calculated.
:type dhum: float or None
:arg dliq: Liquid water density in kg/m3. If unknown, pass None
(default) and it will be calculated.
:type dliq: float or None
:arg bool chkvals: If True (default False) and all values are given,
this function will calculate the disequilibrium and raise a
warning if the results are not within a given tolerance.
:arg float chktol: Tolerance to use when checking values (default
_CHKTOL).
:arg airf0: Initial guess for the humid air dry fraction in kg/kg.
If None (default) then the appropriate `_approx_*` is used.
:type airf0: float or None
:arg temp0: Initial guess for the temperature in K. If None
(default) then the appropriate `_approx_*` is used.
:type temp0: float or None
:arg pres0: Initial guess for the pressure in Pa. If None (default)
then the appropriate `_approx_*` is used.
:type pres0: float or None
:arg dhum0: Initial guess for the humid air density in kg/m3. If
None (default) then the appropriate `_approx_*` is used.
:type dhum0: float or None
:arg dliq0: Initial guess for the liquid water density in kg/m3. If
None (default) then the appropriate `_approx_*` is used.
:type dliq0: float or None
:arg bool chkbnd: If True then warnings are raised when the given
values are valid but outside the recommended bounds (default
False).
:arg mathargs: Keyword arguments to the root-finder
:func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None
(default) then no arguments are passed and default parameters
will be used.
:returns: Total dry fraction in kg/kg.
:raises ValueError: If not enough values are provided.
:raises ValueError: If all mass fractions are provided but their sum
is larger than 1.
:raises RuntimeWarning: If the relative disequilibrium is more than
chktol, if chkvals is True and all values are given.
"""
return wair
def enthalpy(wair,entr=None,wetf=None,wliq=None,wice=None,airf=None,
temp=None,pres=None,dhum=None,dliq=None,chkvals=False,
chktol=_CHKTOL,airf0=None,temp0=None,pres0=None,dhum0=None,
dliq0=None,chkbnd=False,mathargs=None):
"""Calculate wet-icy air enthalpy.
Calculate the specific enthalpy of wet-icy air.
:arg float wair: Total dry fraction in kg/kg.
:arg entr: Entropy in J/kg/K.
:type entr: float or None
:arg wetf: Wet fraction of condensate in kg/kg.
:type wetf: float or None
:arg wliq: Mass fraction of liquid water in kg/kg.
:type wliq: float or None
:arg wice: Mass fraction of ice in kg/kg.
:type wice: float or None
:arg airf: Humid air dry fraction in kg/kg. If unknown, pass None
(default) and it will be calculated.
:type airf: float or None
:arg temp: Temperature in K. If unknown, pass None (default) and it
will be calculated.
:type temp: float or None
:arg pres: Pressure in Pa. If unknown, pass None (default) and it
will be calculated.
:type pres: float or None
:arg dhum: Humid air density in kg/m3. If unknown, pass None
(default) and it will be calculated.
:type dhum: float or None
:arg dliq: Liquid water density in kg/m3. If unknown, pass None
(default) and it will be calculated.
:type dliq: float or None
:arg bool chkvals: If True (default False) and all values are given,
this function will calculate the disequilibrium and raise a
warning if the results are not within a given tolerance.
:arg float chktol: Tolerance to use when checking values (default
_CHKTOL).
:arg airf0: Initial guess for the humid air dry fraction in kg/kg.
If None (default) then the appropriate `_approx_*` is used.
:type airf0: float or None
:arg temp0: Initial guess for the temperature in K. If None
(default) then the appropriate `_approx_*` is used.
:type temp0: float or None
:arg pres0: Initial guess for the pressure in Pa. If None (default)
then the appropriate `_approx_*` is used.
:type pres0: float or None
:arg dhum0: Initial guess for the humid air density in kg/m3. If
None (default) then the appropriate `_approx_*` is used.
:type dhum0: float or None
:arg dliq0: Initial guess for the liquid water density in kg/m3. If
None (default) then the appropriate `_approx_*` is used.
:type dliq0: float or None
:arg bool chkbnd: If True then warnings are raised when the given
values are valid but outside the recommended bounds (default
False).
:arg mathargs: Keyword arguments to the root-finder
:func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None
(default) then no arguments are passed and default parameters
will be used.
:returns: Enthalpy in J/kg.
:raises ValueError: If not enough values are provided.
:raises ValueError: If all mass fractions are provided but their sum
is larger than 1.
:raises RuntimeWarning: If the relative disequilibrium is more than
chktol, if chkvals is True and all values are given.
:Examples:
>>> enthalpy(.99,entr=0.,wetf=.5)
7356.12943724
>>> enthalpy(.1,wliq=.2,wice=.3)
900361.135280
"""
airf, temp, pres, dhum, dliq = eq_wefli(wair,entr=entr,wetf=wetf,wliq=wliq,
wice=wice,airf=airf,temp=temp,pres=pres,dhum=dhum,dliq=dliq,
chkvals=chkvals,chktol=chktol,airf0=airf0,temp0=temp0,pres0=pres0,
dhum0=dhum0,dliq0=dliq0,chkbnd=chkbnd,mathargs=mathargs)
if wliq is None or wice is None:
sh = -_air_f(0,1,0,airf,temp,dhum)
si = -_ice_g(1,0,temp,pres)
if wetf == 0:
wliq = 0.
wice = (entr - wair/airf*sh)/si
else:
sl = -_flu_f(1,0,temp,dliq)
wliq = (entr - wair/airf*sh)/(sl + (1-wetf)/wetf*si)
wice = wliq * (1-wetf)/wetf
whum = wair/airf
hh = air2.enthalpy(airf,temp,dhum)
hl = flu2.enthalpy(temp,dliq)
hi = ice2.enthalpy(temp,pres)
h = whum*hh + wliq*hl + wice*hi
return h
def entropy(wair,entr=None,wetf=None,wliq=None,wice=None,airf=None,
temp=None,pres=None,dhum=None,dliq=None,chkvals=False,
chktol=_CHKTOL,airf0=None,temp0=None,pres0=None,dhum0=None,
dliq0=None,chkbnd=False,mathargs=None):
"""Calculate wet-icy air entropy.
Calculate the specific entropy of wet-icy air.
:arg float wair: Total dry fraction in kg/kg.
:arg entr: Entropy in J/kg/K.
:type entr: float or None
:arg wetf: Wet fraction of condensate in kg/kg.
:type wetf: float or None
:arg wliq: Mass fraction of liquid water in kg/kg.
:type wliq: float or None
:arg wice: Mass fraction of ice in kg/kg.
:type wice: float or None
:arg airf: Humid air dry fraction in kg/kg. If unknown, pass None
(default) and it will be calculated.
:type airf: float or None
:arg temp: Temperature in K. If unknown, pass None (default) and it
will be calculated.
:type temp: float or None
:arg pres: Pressure in Pa. If unknown, pass None (default) and it
will be calculated.
:type pres: float or None
:arg dhum: Humid air density in kg/m3. If unknown, pass None
(default) and it will be calculated.
:type dhum: float or None
:arg dliq: Liquid water density in kg/m3. If unknown, pass None
(default) and it will be calculated.
:type dliq: float or None
:arg bool chkvals: If True (default False) and all values are given,
this function will calculate the disequilibrium and raise a
warning if the results are not within a given tolerance.
:arg float chktol: Tolerance to use when checking values (default
_CHKTOL).
:arg airf0: Initial guess for the humid air dry fraction in kg/kg.
If None (default) then the appropriate `_approx_*` is used.
:type airf0: float or None
:arg temp0: Initial guess for the temperature in K. If None
(default) then the appropriate `_approx_*` is used.
:type temp0: float or None
:arg pres0: Initial guess for the pressure in Pa. If None (default)
then the appropriate `_approx_*` is used.
:type pres0: float or None
:arg dhum0: Initial guess for the humid air density in kg/m3. If
None (default) then the appropriate `_approx_*` is used.
:type dhum0: float or None
:arg dliq0: Initial guess for the liquid water density in kg/m3. If
None (default) then the appropriate `_approx_*` is used.
:type dliq0: float or None
:arg bool chkbnd: If True then warnings are raised when the given
values are valid but outside the recommended bounds (default
False).
:arg mathargs: Keyword arguments to the root-finder
:func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None
(default) then no arguments are passed and default parameters
will be used.
:returns: Entropy in J/kg/K.
:raises ValueError: If not enough values are provided.
:raises ValueError: If all mass fractions are provided but their sum
is larger than 1.
:raises RuntimeWarning: If the relative disequilibrium is more than
chktol, if chkvals is True and all values are given.
:Examples:
>>> entropy(.1,wliq=.2,wice=.3)
3496.16306903
"""
if entr is not None:
return entr
airf, temp, pres, dhum, dliq = eq_wefli(wair,wliq=wliq,wice=wice,airf=airf,
temp=temp,pres=pres,dhum=dhum,dliq=dliq,chkvals=chkvals,chktol=chktol,
airf0=airf0,temp0=temp0,pres0=pres0,dhum0=dhum0,dliq0=dliq0,
chkbnd=chkbnd,mathargs=mathargs)
whum = wair/airf
sh = -_air_f(0,1,0,airf,temp,dhum)
sl = -_flu_f(1,0,temp,dliq)
si = -_ice_g(1,0,temp,pres)
s = whum*sh + wliq*sl + wice*si
return s
def liquidfraction(wair,entr=None,wetf=None,wliq=None,wice=None,
airf=None,temp=None,pres=None,dhum=None,dliq=None,chkvals=False,
chktol=_CHKTOL,airf0=None,temp0=None,pres0=None,dhum0=None,
dliq0=None,chkbnd=False,mathargs=None):
"""Calculate wet-icy air liquid water fraction.
Calculate the mass fraction of liquid water in wet-icy air.
:arg float wair: Total dry fraction in kg/kg.
:arg entr: Entropy in J/kg/K.
:type entr: float or None
:arg wetf: Wet fraction of condensate in kg/kg.
:type wetf: float or None
:arg wliq: Mass fraction of liquid water in kg/kg.
:type wliq: float or None
:arg wice: Mass fraction of ice in kg/kg.
:type wice: float or None
:arg airf: Humid air dry fraction in kg/kg. If unknown, pass None
(default) and it will be calculated.
:type airf: float or None
:arg temp: Temperature in K. If unknown, pass None (default) and it
will be calculated.
:type temp: float or None
:arg pres: Pressure in Pa. If unknown, pass None (default) and it
will be calculated.
:type pres: float or None
:arg dhum: Humid air density in kg/m3. If unknown, pass None
(default) and it will be calculated.
:type dhum: float or None
:arg dliq: Liquid water density in kg/m3. If unknown, pass None
(default) and it will be calculated.
:type dliq: float or None
:arg bool chkvals: If True (default False) and all values are given,
this function will calculate the disequilibrium and raise a
warning if the results are not within a given tolerance.
:arg float chktol: Tolerance to use when checking values (default
_CHKTOL).
:arg airf0: Initial guess for the humid air dry fraction in kg/kg.
If None (default) then the appropriate `_approx_*` is used.
:type airf0: float or None
:arg temp0: Initial guess for the temperature in K. If None
(default) then the appropriate `_approx_*` is used.
:type temp0: float or None
:arg pres0: Initial guess for the pressure in Pa. If None (default)
then the appropriate `_approx_*` is used.
:type pres0: float or None
:arg dhum0: Initial guess for the humid air density in kg/m3. If
None (default) then the appropriate `_approx_*` is used.
:type dhum0: float or None
:arg dliq0: Initial guess for the liquid water density in kg/m3. If
None (default) then the appropriate `_approx_*` is used.
:type dliq0: float or None
:arg bool chkbnd: If True then warnings are raised when the given
values are valid but outside the recommended bounds (default
False).
:arg mathargs: Keyword arguments to the root-finder
:func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None
(default) then no arguments are passed and default parameters
will be used.
:returns: Liquid water fraction in kg/kg.
:raises ValueError: If not enough values are provided.
:raises ValueError: If all mass fractions are provided but their sum
is larger than 1.
:raises RuntimeWarning: If the relative disequilibrium is more than
chktol, if chkvals is True and all values are given.
:Examples:
>>> liquidfraction(.99,entr=0.,wetf=.5)
3.30296152581e-3
"""
if wliq is not None:
return wliq
airf, temp, pres, dhum, dliq = eq_wefli(wair,entr=entr,wetf=wetf,wliq=wliq,
wice=wice,airf=airf,temp=temp,pres=pres,dhum=dhum,dliq=dliq,
chkvals=chkvals,chktol=chktol,airf0=airf0,temp0=temp0,pres0=pres0,
dhum0=dhum0,dliq0=dliq0,chkbnd=chkbnd,mathargs=mathargs)
if wetf == 0:
wliq = 0.
else:
sh = -_air_f(0,1,0,airf,temp,dhum)
si = -_ice_g(1,0,temp,pres)
sl = -_flu_f(1,0,temp,dliq)
wliq = (entr - wair/airf*sh)/(sl + (1-wetf)/wetf*si)
return wliq
def solidfraction(wair,entr=None,wetf=None,wliq=None,wice=None,
airf=None,temp=None,pres=None,dhum=None,dliq=None,chkvals=False,
chktol=_CHKTOL,airf0=None,temp0=None,pres0=None,dhum0=None,
dliq0=None,chkbnd=False,mathargs=None):
"""Calculate wet-icy air ice mass fraction.
Calculate the mass fraction of ice in wet-icy air.
:arg float wair: Total dry fraction in kg/kg.
:arg entr: Entropy in J/kg/K.
:type entr: float or None
:arg wetf: Wet fraction of condensate in kg/kg.
:type wetf: float or None
:arg wliq: Mass fraction of liquid water in kg/kg.
:type wliq: float or None
:arg wice: Mass fraction of ice in kg/kg.
:type wice: float or None
:arg airf: Humid air dry fraction in kg/kg. If unknown, pass None
(default) and it will be calculated.
:type airf: float or None
:arg temp: Temperature in K. If unknown, pass None (default) and it
will be calculated.
:type temp: float or None
:arg pres: Pressure in Pa. If unknown, pass None (default) and it
will be calculated.
:type pres: float or None
:arg dhum: Humid air density in kg/m3. If unknown, pass None
(default) and it will be calculated.
:type dhum: float or None
:arg dliq: Liquid water density in kg/m3. If unknown, pass None
(default) and it will be calculated.
:type dliq: float or None
:arg bool chkvals: If True (default False) and all values are given,
this function will calculate the disequilibrium and raise a
warning if the results are not within a given tolerance.
:arg float chktol: Tolerance to use when checking values (default
_CHKTOL).
:arg airf0: Initial guess for the humid air dry fraction in kg/kg.
If None (default) then the appropriate `_approx_*` is used.
:type airf0: float or None
:arg temp0: Initial guess for the temperature in K. If None
(default) then the appropriate `_approx_*` is used.
:type temp0: float or None
:arg pres0: Initial guess for the pressure in Pa. If None (default)
then the appropriate `_approx_*` is used.
:type pres0: float or None
:arg dhum0: Initial guess for the humid air density in kg/m3. If
None (default) then the appropriate `_approx_*` is used.
:type dhum0: float or None
:arg dliq0: Initial guess for the liquid water density in kg/m3. If
None (default) then the appropriate `_approx_*` is used.
:type dliq0: float or None
:arg bool chkbnd: If True then warnings are raised when the given
values are valid but outside the recommended bounds (default
False).
:arg mathargs: Keyword arguments to the root-finder
:func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None
(default) then no arguments are passed and default parameters
will be used.
:returns: Ice fraction in kg/kg.
:raises ValueError: If not enough values are provided.
:raises ValueError: If all mass fractions are provided but their sum
is larger than 1.
:raises RuntimeWarning: If the relative disequilibrium is more than
chktol, if chkvals is True and all values are given.
:Examples:
>>> solidfraction(.99,entr=0.,wetf=.5)
3.30296152581e-3
"""
if wice is not None:
return wice
airf, temp, pres, dhum, dliq = eq_wefli(wair,entr=entr,wetf=wetf,wliq=wliq,
wice=wice,airf=airf,temp=temp,pres=pres,dhum=dhum,dliq=dliq,
chkvals=chkvals,chktol=chktol,airf0=airf0,temp0=temp0,pres0=pres0,
dhum0=dhum0,dliq0=dliq0,chkbnd=chkbnd,mathargs=mathargs)
sh = -_air_f(0,1,0,airf,temp,dhum)
si = -_ice_g(1,0,temp,pres)
if wetf == 0:
wliq = 0.
wice = (entr - wair/airf*sh)/si
else:
sl = -_flu_f(1,0,temp,dliq)
wliq = (entr - wair/airf*sh)/(sl + (1-wetf)/wetf*si)
wice = wliq * (1-wetf)/wetf
return wice
def vapourfraction(wair,entr=None,wetf=None,wliq=None,wice=None,
airf=None,temp=None,pres=None,dhum=None,dliq=None,chkvals=False,
chktol=_CHKTOL,airf0=None,temp0=None,pres0=None,dhum0=None,
dliq0=None,chkbnd=False,mathargs=None):
"""Calculate wet-icy air water vapour fraction.
Calculate the mass fraction of water vapour in wet-icy air.
:arg float wair: Total dry fraction in kg/kg.
:arg entr: Entropy in J/kg/K.
:type entr: float or None
:arg wetf: Wet fraction of condensate in kg/kg.
:type wetf: float or None
:arg wliq: Mass fraction of liquid water in kg/kg.
:type wliq: float or None
:arg wice: Mass fraction of ice in kg/kg.
:type wice: float or None
:arg airf: Humid air dry fraction in kg/kg. If unknown, pass None
(default) and it will be calculated.
:type airf: float or None
:arg temp: Temperature in K. If unknown, pass None (default) and it
will be calculated.
:type temp: float or None
:arg pres: Pressure in Pa. If unknown, pass None (default) and it
will be calculated.
:type pres: float or None
:arg dhum: Humid air density in kg/m3. If unknown, pass None
(default) and it will be calculated.
:type dhum: float or None
:arg dliq: Liquid water density in kg/m3. If unknown, pass None
(default) and it will be calculated.
:type dliq: float or None
:arg bool chkvals: If True (default False) and all values are given,
this function will calculate the disequilibrium and raise a
warning if the results are not within a given tolerance.
:arg float chktol: Tolerance to use when checking values (default
_CHKTOL).
:arg airf0: Initial guess for the humid air dry fraction in kg/kg.
If None (default) then the appropriate `_approx_*` is used.
:type airf0: float or None
:arg temp0: Initial guess for the temperature in K. If None
(default) then the appropriate `_approx_*` is used.
:type temp0: float or None
:arg pres0: Initial guess for the pressure in Pa. If None (default)
then the appropriate `_approx_*` is used.
:type pres0: float or None
:arg dhum0: Initial guess for the humid air density in kg/m3. If
None (default) then the appropriate `_approx_*` is used.
:type dhum0: float or None
:arg dliq0: Initial guess for the liquid water density in kg/m3. If
None (default) then the appropriate `_approx_*` is used.
:type dliq0: float or None
:arg bool chkbnd: If True then warnings are raised when the given
values are valid but outside the recommended bounds (default
False).
:arg mathargs: Keyword arguments to the root-finder
:func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None
(default) then no arguments are passed and default parameters
will be used.
:returns: Water vapour fraction in kg/kg.
:raises ValueError: If not enough values are provided.
:raises ValueError: If all mass fractions are provided but their sum
is larger than 1.
:raises RuntimeWarning: If the relative disequilibrium is more than
chktol, if chkvals is True and all values are given.
:Examples:
>>> vapourfraction(.99,entr=0.,wetf=.5)
3.39407694837e-3
"""
if wliq is not None and wice is not None:
wvap = 1 - wair - wliq - wice
return wvap
airf, temp, pres, dhum, dliq = eq_wefli(wair,entr=entr,wetf=wetf,wliq=wliq,
wice=wice,airf=airf,temp=temp,pres=pres,dhum=dhum,dliq=dliq,
chkvals=chkvals,chktol=chktol,airf0=airf0,temp0=temp0,pres0=pres0,
dhum0=dhum0,dliq0=dliq0,chkbnd=chkbnd,mathargs=mathargs)
sh = -_air_f(0,1,0,airf,temp,dhum)
si = -_ice_g(1,0,temp,pres)
if wetf == 0:
wliq = 0.
wice = (entr - wair/airf*sh)/si
else:
sl = -_flu_f(1,0,temp,dliq)
wliq = (entr - wair/airf*sh)/(sl + (1-wetf)/wetf*si)
wice = wliq * (1-wetf)/wetf
wvap = 1 - wair - wliq - wice
return wvap
## Isentropic melting and freezing levels
def iml(wair,entr,airf0=None,temp0=None,pres0=None,dhum0=None,
dliq0=None,chkbnd=False,mathargs=None):
"""Calculate isentropic melting level.
Calculate the isentropic melting level of wet-icy air, the pressure
below which all condensed water will be ice at equilibrium.
:arg float wair: Total dry fraction in kg/kg.
:arg float entr: Entropy in J/kg/K.
:arg airf0: Initial guess for the humid air dry fraction in kg/kg.
If None (default) then the appropriate `_approx_*` is used.
:type airf0: float or None
:arg temp0: Initial guess for the temperature in K. If None
(default) then the appropriate `_approx_*` is used.
:type temp0: float or None
:arg pres0: Initial guess for the pressure in Pa. If None (default)
then the appropriate `_approx_*` is used.
:type pres0: float or None
:arg dhum0: Initial guess for the humid air density in kg/m3. If
None (default) then the appropriate `_approx_*` is used.
:type dhum0: float or None
:arg dliq0: Initial guess for the liquid water density in kg/m3. If
None (default) then the appropriate `_approx_*` is used.
:type dliq0: float or None
:arg bool chkbnd: If True then warnings are raised when the given
values are valid but outside the recommended bounds (default
False).
:arg mathargs: Keyword arguments to the root-finder
:func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None
(default) then no arguments are passed and default parameters
will be used.
:returns: Pressure at the isentropic melting level in Pa.
:Examples:
>>> iml(.99,100.)
81605.5557729
"""
wetf = 0.
airf, temp, pres, dhum, dliq = eq_wefli(wair,entr=entr,wetf=wetf,
airf0=airf0,temp0=temp0,pres0=pres0,dhum0=dhum0,dliq0=dliq0,
chkbnd=chkbnd,mathargs=mathargs)
return pres
def ifl(wair,entr,airf0=None,temp0=None,pres0=None,dhum0=None,
dliq0=None,chkbnd=False,mathargs=None):
"""Calculate isentropic freezing level.
Calculate the isentropic freezing level of wet-icy air, the pressure
above which all condensed water will be liquid water at equilibrium.
:arg float wair: Total dry fraction in kg/kg.
:arg float entr: Entropy in J/kg/K.
:arg airf0: Initial guess for the humid air dry fraction in kg/kg.
If None (default) then the appropriate `_approx_*` is used.
:type airf0: float or None
:arg temp0: Initial guess for the temperature in K. If None
(default) then the appropriate `_approx_*` is used.
:type temp0: float or None
:arg pres0: Initial guess for the pressure in Pa. If None (default)
then the appropriate `_approx_*` is used.
:type pres0: float or None
:arg dhum0: Initial guess for the humid air density in kg/m3. If
None (default) then the appropriate `_approx_*` is used.
:type dhum0: float or None
:arg dliq0: Initial guess for the liquid water density in kg/m3. If
None (default) then the appropriate `_approx_*` is used.
:type dliq0: float or None
:arg bool chkbnd: If True then warnings are raised when the given
values are valid but outside the recommended bounds (default
False).
:arg mathargs: Keyword arguments to the root-finder
:func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None
(default) then no arguments are passed and default parameters
will be used.
:returns: Pressure at the isentropic freezing level in Pa.
:Examples:
>>> ifl(.99,100.)
83234.7314358
"""
wetf = 1.
airf, temp, pres, dhum, dliq = eq_wefli(wair,entr=entr,wetf=wetf,
airf0=airf0,temp0=temp0,pres0=pres0,dhum0=dhum0,dliq0=dliq0,
chkbnd=chkbnd,mathargs=mathargs)
return pres
| 42.934397
| 80
| 0.666006
| 11,394
| 72,645
| 4.181675
| 0.031596
| 0.028208
| 0.044327
| 0.056416
| 0.914431
| 0.897515
| 0.877261
| 0.860681
| 0.849557
| 0.832683
| 0
| 0.028843
| 0.237346
| 72,645
| 1,691
| 81
| 42.959787
| 0.831146
| 0.665318
| 0
| 0.555315
| 0
| 0
| 0.022757
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.049892
| false
| 0
| 0.023861
| 0
| 0.136659
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
be4760a2f55d0f90bc5eb3f92fe63439f54a616b
| 233,293
|
py
|
Python
|
tests/app/main/test_frameworks.py
|
alphagov-mirror/digitalmarketplace-supplier-frontend
|
349af2ec867f784c524a6a1c42b069f6d302e513
|
[
"MIT"
] | null | null | null |
tests/app/main/test_frameworks.py
|
alphagov-mirror/digitalmarketplace-supplier-frontend
|
349af2ec867f784c524a6a1c42b069f6d302e513
|
[
"MIT"
] | null | null | null |
tests/app/main/test_frameworks.py
|
alphagov-mirror/digitalmarketplace-supplier-frontend
|
349af2ec867f784c524a6a1c42b069f6d302e513
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import mock
from collections import OrderedDict
from datetime import datetime
from io import BytesIO
from itertools import chain
from urllib.parse import urljoin
from freezegun import freeze_time
from lxml import html
import pytest
from werkzeug.datastructures import MultiDict
from dmapiclient import (
APIError,
HTTPError
)
from dmapiclient.audit import AuditTypes
from dmcontent.errors import ContentNotFoundError
from dmtestutils.api_model_stubs import FrameworkStub, SupplierStub
from dmtestutils.fixtures import valid_pdf_bytes
from dmutils.email.exceptions import EmailError
from dmutils.s3 import S3ResponseError
from app.main.forms.frameworks import ReuseDeclarationForm
from ..helpers import (
BaseApplicationTest,
MockEnsureApplicationCompanyDetailsHaveBeenConfirmedMixin,
FULL_G7_SUBMISSION,
valid_g9_declaration_base,
assert_args_and_raise,
assert_args_and_return,
)
def _return_fake_s3_file_dict(directory, filename, ext, last_modified=None, size=None):
return {
'path': '{}{}.{}'.format(directory, filename, ext),
'filename': filename,
'ext': ext,
'last_modified': last_modified or '2015-08-17T14:00:00.000Z',
'size': size if size is not None else 1
}
def get_g_cloud_8():
return BaseApplicationTest.framework(
status='standstill',
name='G-Cloud 8',
slug='g-cloud-8',
framework_agreement_version='v1.0'
)
def _extract_guidance_links(doc):
return OrderedDict(
(
section_li.xpath("normalize-space(string(.//h2))"),
tuple(
(
item_li.xpath("normalize-space(string(.//a))") or None,
item_li.xpath("string(.//a/@href)") or None,
item_li.xpath(
(
"normalize-space(string(.//time"
" | "
"./following-sibling::p[@class='dm-attachment__metadata']//time))"
)
) or None,
item_li.xpath(
(
"string(.//time/@datetime"
" | "
"./following-sibling::p[@class='dm-attachment__metadata']//time/@datetime)"
)
) or None,
)
for item_li in section_li.xpath(".//p[.//a] | .//h3[.//a]")
),
)
for section_li in doc.xpath("//main//*[./h2][.//p//a | .//section[@class='dm-attachment']//a]")
)
@mock.patch('dmutils.s3.S3')
class TestFrameworksDashboard(BaseApplicationTest):
def setup_method(self, method):
super().setup_method(method)
self.data_api_client_patch = mock.patch('app.main.views.frameworks.data_api_client', autospec=True)
self.data_api_client = self.data_api_client_patch.start()
def teardown_method(self, method):
self.data_api_client_patch.stop()
super().teardown_method(method)
def test_framework_dashboard_shows_for_pending_if_declaration_exists(self, s3):
self.login()
self.data_api_client.get_framework.return_value = self.framework(status='pending')
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework()
res = self.client.get("/suppliers/frameworks/g-cloud-7")
assert res.status_code == 200
doc = html.fromstring(res.get_data(as_text=True))
assert len(doc.xpath("//h1[normalize-space(string())=$b]", b="Your G-Cloud 7 application")) == 1
def test_framework_dashboard_shows_for_live_if_declaration_exists(self, s3):
self.login()
self.data_api_client.get_framework.return_value = self.framework(status='live')
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework()
res = self.client.get("/suppliers/frameworks/g-cloud-7")
assert res.status_code == 200
doc = html.fromstring(res.get_data(as_text=True))
assert len(doc.xpath("//h1[normalize-space(string())=$b]", b="G-Cloud 7 documents")) == 1
def test_does_not_show_for_live_if_no_declaration(self, s3):
self.login()
self.data_api_client.get_framework.return_value = self.framework(status='live')
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework(declaration=None)
res = self.client.get("/suppliers/frameworks/g-cloud-7")
assert res.status_code == 404
@mock.patch('app.main.views.frameworks.DMNotifyClient', autospec=True)
def test_email_sent_when_interest_registered_in_framework(self, mock_dmnotifyclient_class, s3):
self.login()
self.data_api_client.get_framework.return_value = self.framework(status='open')
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework()
self.data_api_client.find_users_iter.return_value = [
{'emailAddress': 'email1', 'active': True},
{'emailAddress': 'email2', 'active': True},
{'emailAddress': 'email3', 'active': False}
]
mock_dmnotifyclient_instance = mock_dmnotifyclient_class.return_value
mock_dmnotifyclient_instance.templates = {'framework-application-started': '123456789'}
res = self.client.post("/suppliers/frameworks/g-cloud-7")
self.data_api_client.register_framework_interest.assert_called_once_with(
1234,
"g-cloud-7",
"email@email.com"
)
assert res.status_code == 200
assert mock_dmnotifyclient_instance.send_email.call_count == 2
assert mock_dmnotifyclient_instance.send_email.call_args[1].get('template_name_or_id') == '123456789'
def test_interest_not_registered_in_framework_on_get(self, s3):
self.login()
self.data_api_client.get_framework.return_value = self.framework(status='pending')
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework()
res = self.client.get("/suppliers/frameworks/digital-outcomes-and-specialists")
assert res.status_code == 200
assert self.data_api_client.register_framework_interest.called is False
def test_interest_set_but_no_declaration(self, s3):
self.login()
self.data_api_client.get_framework.return_value = self.framework(status='pending')
self.data_api_client.get_framework_interest.return_value = {'frameworks': ['g-cloud-7']}
self.data_api_client.find_draft_services_iter.return_value = [
{'serviceName': 'A service', 'status': 'submitted', 'lotSlug': 'iaas'}
]
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework(declaration=None)
res = self.client.get("/suppliers/frameworks/g-cloud-7")
assert res.status_code == 200
def test_shows_closed_message_if_pending_and_no_application_done(self, s3):
self.login()
self.data_api_client.get_framework.return_value = self.framework(status='pending')
self.data_api_client.get_framework_interest.return_value = {'frameworks': ['g-cloud-7']}
self.data_api_client.find_draft_services_iter.return_value = [
{'serviceName': 'A service', 'status': 'not-submitted'}
]
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework()
res = self.client.get("/suppliers/frameworks/g-cloud-7")
assert res.status_code == 200
doc = html.fromstring(res.get_data(as_text=True))
heading = doc.xpath('//div[@class="summary-item-lede"]//h2[@class="summary-item-heading"]')
assert len(heading) > 0
assert "G-Cloud 7 is closed for applications" in heading[0].xpath('text()')[0]
assert "You didn't submit an application." in heading[0].xpath('../p[1]/text()')[0]
def test_shows_closed_message_if_pending_and_application(self, s3):
self.login()
self.data_api_client.get_framework.return_value = self.framework(status='pending')
self.data_api_client.get_framework_interest.return_value = {'frameworks': ['g-cloud-7']}
self.data_api_client.find_draft_services_iter.return_value = [
{'serviceName': 'A service', 'status': 'submitted', 'lotSlug': 'iaas'}
]
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework()
res = self.client.get("/suppliers/frameworks/g-cloud-7")
assert res.status_code == 200
doc = html.fromstring(res.get_data(as_text=True))
heading = doc.xpath('//div[@class="summary-item-lede"]//h2[@class="summary-item-heading"]')
assert len(heading) > 0
assert "G-Cloud 7 is closed for applications" in heading[0].xpath('text()')[0]
lede = doc.xpath('//div[@class="summary-item-lede"]')
expected_string = "You made your supplier declaration and submitted 1 service for consideration."
assert (expected_string in lede[0].xpath('./p[1]/text()')[0])
assert "We’ll let you know the result of your application by " in lede[0].xpath('./p[2]/text()')[0]
@mock.patch('dmutils.s3.S3')
class TestFrameworksDashboardOpenApplications(BaseApplicationTest):
def setup_method(self, method):
super().setup_method(method)
self.data_api_client_patch = mock.patch('app.main.views.frameworks.data_api_client', autospec=True)
self.data_api_client = self.data_api_client_patch.start()
def teardown_method(self, method):
self.data_api_client_patch.stop()
super().teardown_method(method)
def test_declaration_status_when_complete_for_open_framework(self, s3):
self.login()
self.data_api_client.get_framework.return_value = self.framework(status='open')
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework()
res = self.client.get("/suppliers/frameworks/g-cloud-7")
assert res.status_code == 200
doc = html.fromstring(res.get_data(as_text=True))
assert len(doc.xpath('//main//strong[@id="dm-declaration-done"][contains(text(), "Done")]')) == 1
def test_declaration_status_when_started_for_open_framework(self, s3):
self.login()
submission = FULL_G7_SUBMISSION.copy()
# User has not yet submitted page 3 of the declaration
del submission['SQ2-1abcd']
del submission['SQ2-1e']
del submission['SQ2-1f']
del submission['SQ2-1ghijklmn']
self.data_api_client.get_framework.return_value = self.framework(status='open')
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework(
declaration=submission, status='started')
res = self.client.get("/suppliers/frameworks/g-cloud-7")
assert res.status_code == 200
doc = html.fromstring(res.get_data(as_text=True))
assert len(doc.xpath('//main//strong[@id="dm-declaration-inprogress"][contains(text(), "In progress")]')) == 1
def test_declaration_status_when_company_details_not_complete_for_open_framework(self, s3):
self.login()
self.data_api_client.get_framework.return_value = self.framework(status='open')
self.data_api_client.get_supplier_framework_info.side_effect = APIError(mock.Mock(status_code=404))
self.data_api_client.get_supplier.return_value = SupplierStub().single_result_response()
res = self.client.get("/suppliers/frameworks/g-cloud-7")
assert res.status_code == 200
doc = html.fromstring(res.get_data(as_text=True))
assert len(doc.xpath('//main//strong[@id="dm-declaration-cantstart"]')) == 1
def test_downloads_shown_for_open_framework(self, s3):
files = [
('updates/communications/', 'file 1', 'odt', '2015-01-01T14:00:00.000Z'),
('updates/clarifications/', 'file 2', 'odt', '2015-02-02T14:00:00.000Z'),
('', 'g-cloud-7-proposed-call-off', 'pdf', '2016-05-01T14:00:00.000Z'),
('', 'g-cloud-7-invitation', 'pdf', '2016-05-01T14:00:00.000Z'),
('', 'g-cloud-7-proposed-framework-agreement', 'pdf', '2016-06-01T14:00:00.000Z'),
('', 'g-cloud-7-reporting-template', 'xls', '2016-06-06T14:00:00.000Z'),
# superfluous file that shouldn't be shown
('', 'g-cloud-7-supplier-pack', 'zip', '2015-01-01T14:00:00.000Z'),
]
s3.return_value.list.return_value = [
_return_fake_s3_file_dict(
'g-cloud-7/communications/{}'.format(section), filename, ext, last_modified=last_modified
) for section, filename, ext, last_modified in files
]
self.login()
self.data_api_client.get_framework.return_value = self.framework(status='open')
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework()
res = self.client.get("/suppliers/frameworks/g-cloud-7")
assert res.status_code == 200
doc = html.fromstring(res.get_data(as_text=True))
extracted_guidance_links = _extract_guidance_links(doc)
assert extracted_guidance_links == OrderedDict((
("Guidance", (
(
"Download the invitation to apply",
"/suppliers/frameworks/g-cloud-7/files/g-cloud-7-invitation.pdf",
None,
None,
),
(
"Read about how to apply",
"https://www.gov.uk/guidance/g-cloud-suppliers-guide#how-to-apply",
None,
None,
),
)),
("Legal documents", (
(
"Download the proposed framework agreement",
"/suppliers/frameworks/g-cloud-7/files/g-cloud-7-proposed-framework-agreement.pdf",
"Wednesday 1 June 2016",
"2016-06-01T14:00:00.000Z",
),
(
"Download the proposed \u2018call-off\u2019 contract",
"/suppliers/frameworks/g-cloud-7/files/g-cloud-7-proposed-call-off.pdf",
"Sunday 1 May 2016",
"2016-05-01T14:00:00.000Z",
),
)),
("Communications", (
(
"View communications and ask clarification questions",
"/suppliers/frameworks/g-cloud-7/updates",
"Monday 2 February 2015",
"2015-02-02T14:00:00.000Z",
),
)),
("Reporting", (
(
"Download the reporting template",
"/suppliers/frameworks/g-cloud-7/files/g-cloud-7-reporting-template.xls",
None,
None,
),
)),
))
assert not any(
doc.xpath("//main//a[contains(@href, $href_part)]", href_part=href_part)
for href_part in (
"g-cloud-7-final-framework-agreement.pdf",
"g-cloud-7-supplier-pack.zip",
)
)
assert len(doc.xpath(
"//main//p[contains(normalize-space(string()), $a)]",
a="until 5pm BST, Tuesday 22 September 2015",
)) == 1
assert not doc.xpath(
"//main//table[normalize-space(string(./caption))=$b]",
b="Agreement details",
)
assert s3.return_value.list.call_args_list == [
mock.call("g-cloud-7/communications", load_timestamps=True)
]
def test_downloads_shown_open_framework_clarification_questions_closed(self, s3):
files = [
('updates/communications/', 'file 1', 'odt', '2015-01-01T14:00:00.000Z'),
('updates/clarifications/', 'file 2', 'odt', '2015-02-02T14:00:00.000Z'),
('', 'g-cloud-7-proposed-call-off', 'pdf', '2016-05-01T14:00:00.000Z'),
('', 'g-cloud-7-invitation', 'pdf', '2016-05-01T14:00:00.000Z'),
('', 'g-cloud-7-proposed-framework-agreement', 'pdf', '2016-06-01T14:00:00.000Z'),
('', 'g-cloud-7-reporting-template', 'xls', '2016-06-06T14:00:00.000Z'),
# superfluous file that shouldn't be shown
('', 'g-cloud-7-supplier-pack', 'zip', '2015-01-01T14:00:00.000Z'),
]
s3.return_value.list.return_value = [
_return_fake_s3_file_dict(
'g-cloud-7/communications/{}'.format(section), filename, ext, last_modified=last_modified
) for section, filename, ext, last_modified in files
]
self.login()
self.data_api_client.get_framework.return_value = self.framework(
status="open", clarification_questions_open=False
)
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework()
res = self.client.get("/suppliers/frameworks/g-cloud-7")
assert res.status_code == 200
doc = html.fromstring(res.get_data(as_text=True))
extracted_guidance_links = _extract_guidance_links(doc)
assert extracted_guidance_links == OrderedDict((
("Guidance", (
(
"Download the invitation to apply",
"/suppliers/frameworks/g-cloud-7/files/g-cloud-7-invitation.pdf",
None,
None,
),
(
"Read about how to apply",
"https://www.gov.uk/guidance/g-cloud-suppliers-guide#how-to-apply",
None,
None,
),
)),
("Legal documents", (
(
"Download the proposed framework agreement",
"/suppliers/frameworks/g-cloud-7/files/g-cloud-7-proposed-framework-agreement.pdf",
"Wednesday 1 June 2016",
"2016-06-01T14:00:00.000Z",
),
(
"Download the proposed \u2018call-off\u2019 contract",
"/suppliers/frameworks/g-cloud-7/files/g-cloud-7-proposed-call-off.pdf",
"Sunday 1 May 2016",
"2016-05-01T14:00:00.000Z",
),
)),
("Communications", (
(
"View communications and clarification questions",
"/suppliers/frameworks/g-cloud-7/updates",
"Monday 2 February 2015",
"2015-02-02T14:00:00.000Z",
),
)),
("Reporting", (
(
"Download the reporting template",
"/suppliers/frameworks/g-cloud-7/files/g-cloud-7-reporting-template.xls",
None,
None,
),
)),
))
assert not any(
doc.xpath("//main//a[contains(@href, $href_part)]", href_part=href_part)
for href_part
in ("g-cloud-7-final-framework-agreement.pdf", "g-cloud-7-supplier-pack.zip")
)
assert not doc.xpath("//main[contains(normalize-space(string()), $a)]",
a="until 5pm BST, Tuesday 22 September 2015")
assert not doc.xpath("//main//table[normalize-space(string(./caption))=$b]", b="Agreement details")
assert s3.return_value.list.call_args_list == [
mock.call("g-cloud-7/communications", load_timestamps=True)
]
def test_final_agreement_download_shown_open_framework(self, s3):
files = [
('updates/communications/', 'file 1', 'odt', '2015-01-01T14:00:00.000Z'),
('updates/clarifications/', 'file 2', 'odt', '2015-02-02T14:00:00.000Z'),
('', 'g-cloud-7-proposed-call-off', 'pdf', '2016-05-01T14:00:00.000Z'),
('', 'g-cloud-7-invitation', 'pdf', '2016-05-01T14:00:00.000Z'),
('', 'g-cloud-7-reporting-template', 'xls', '2016-06-06T14:00:00.000Z'),
('', 'g-cloud-7-final-framework-agreement', 'pdf', '2016-06-02T14:00:00.000Z'),
# present but should be overridden by final agreement file
('', 'g-cloud-7-proposed-framework-agreement', 'pdf', '2016-06-11T14:00:00.000Z'),
]
s3.return_value.list.return_value = [
_return_fake_s3_file_dict(
'g-cloud-7/communications/{}'.format(section), filename, ext, last_modified=last_modified
) for section, filename, ext, last_modified in files
]
self.login()
self.data_api_client.get_framework.return_value = self.framework(status='open')
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework()
res = self.client.get("/suppliers/frameworks/g-cloud-7")
assert res.status_code == 200
doc = html.fromstring(res.get_data(as_text=True))
extracted_guidance_links = _extract_guidance_links(doc)
assert extracted_guidance_links == OrderedDict((
("Guidance", (
(
"Download the invitation to apply",
"/suppliers/frameworks/g-cloud-7/files/g-cloud-7-invitation.pdf",
None,
None,
),
(
"Read about how to apply",
"https://www.gov.uk/guidance/g-cloud-suppliers-guide#how-to-apply",
None,
None,
),
)),
("Legal documents", (
(
"Download the framework agreement",
"/suppliers/frameworks/g-cloud-7/files/g-cloud-7-final-framework-agreement.pdf",
"Thursday 2 June 2016",
"2016-06-02T14:00:00.000Z",
),
(
"Download the proposed \u2018call-off\u2019 contract",
"/suppliers/frameworks/g-cloud-7/files/g-cloud-7-proposed-call-off.pdf",
"Sunday 1 May 2016",
"2016-05-01T14:00:00.000Z",
),
)),
("Communications", (
(
"View communications and ask clarification questions",
"/suppliers/frameworks/g-cloud-7/updates",
"Monday 2 February 2015",
"2015-02-02T14:00:00.000Z",
),
)),
("Reporting", (
(
"Download the reporting template",
"/suppliers/frameworks/g-cloud-7/files/g-cloud-7-reporting-template.xls",
None,
None,
),
)),
))
assert not any(
doc.xpath("//main//a[contains(@href, $href_part)]", href_part=href_part)
for href_part
in ("g-cloud-7-proposed-framework-agreement.pdf", "g-cloud-7-supplier-pack.zip")
)
assert len(
doc.xpath("//main//p[contains(normalize-space(string()), $a)]",
a="until 5pm BST, Tuesday 22 September 2015")
) == 1
assert not doc.xpath("//main//table[normalize-space(string(./caption))=$b]", b="Agreement details")
def test_no_updates_open_framework(self, s3):
files = [
('', 'g-cloud-7-call-off', 'pdf', '2016-05-01T14:00:00.000Z'),
('', 'g-cloud-7-invitation', 'pdf', '2016-05-01T14:00:00.000Z'),
('', 'g-cloud-7-proposed-framework-agreement', 'pdf', '2016-06-01T14:00:00.000Z'),
('', 'g-cloud-7-reporting-template', 'xls', '2016-06-06T14:00:00.000Z'),
]
s3.return_value.list.return_value = [
_return_fake_s3_file_dict(
'g-cloud-7/communications/{}'.format(section), filename, ext, last_modified=last_modified
) for section, filename, ext, last_modified in files
]
self.login()
self.data_api_client.get_framework.return_value = self.framework(status='open')
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework()
res = self.client.get("/suppliers/frameworks/g-cloud-7")
assert res.status_code == 200
doc = html.fromstring(res.get_data(as_text=True))
extracted_guidance_links = _extract_guidance_links(doc)
assert (
"View communications and ask clarification questions",
"/suppliers/frameworks/g-cloud-7/updates",
None,
None,
) in extracted_guidance_links["Communications"]
assert len(
doc.xpath("//main//p[contains(normalize-space(string()), $a)]",
a="until 5pm BST, Tuesday 22 September 2015")
) == 1
assert not doc.xpath("//main//table[normalize-space(string(./caption))=$b]", b="Agreement details")
def test_no_files_exist_open_framework(self, s3):
s3.return_value.list.return_value = []
self.login()
self.data_api_client.get_framework.return_value = self.framework(status='open')
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework()
res = self.client.get("/suppliers/frameworks/g-cloud-7")
assert res.status_code == 200
doc = html.fromstring(res.get_data(as_text=True))
extracted_guidance_links = _extract_guidance_links(doc)
assert extracted_guidance_links == OrderedDict((
("Guidance", (
(
"Read about how to apply",
"https://www.gov.uk/guidance/g-cloud-suppliers-guide#how-to-apply",
None,
None,
),
)),
("Communications", (
(
"View communications and ask clarification questions",
"/suppliers/frameworks/g-cloud-7/updates",
None,
None,
),
)),
))
assert not any(
doc.xpath(
"//a[contains(@href, $href_part) or normalize-space(string())=$label]",
href_part=href_part,
label=label,
) for href_part, label in (
(
"g-cloud-7-invitation.pdf",
"Download the invitation to apply",
),
(
"g-cloud-7-proposed-framework-agreement.pdf",
"Download the proposed framework agreement",
),
(
"g-cloud-7-call-off.pdf",
"Download the proposed \u2018call-off\u2019 contract",
),
(
"g-cloud-7-reporting-template.xls",
"Download the reporting template",
),
(
"result-letter.pdf",
"Download your application result letter",
),
)
)
assert len(
doc.xpath("//main//p[contains(normalize-space(string()), $a)]",
a="until 5pm BST, Tuesday 22 September 2015")
) == 1
assert not doc.xpath("//main//table[normalize-space(string(./caption))=$b]", b="Agreement details")
def test_returns_404_if_framework_does_not_exist(self, s3):
self.login()
self.data_api_client.get_framework.side_effect = APIError(mock.Mock(status_code=404))
res = self.client.get('/suppliers/frameworks/does-not-exist')
assert res.status_code == 404
def test_visit_to_framework_dashboard_saved_in_session_if_framework_open(self, s3):
self.login()
self.data_api_client.get_framework.return_value = self.framework(slug="g-cloud-9", status="open")
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework()
response = self.client.get("/suppliers/frameworks/g-cloud-9")
assert response.status_code == 200
with self.client.session_transaction() as session:
assert session["currently_applying_to"] == "g-cloud-9"
@pytest.mark.parametrize(
"framework_status",
["coming", "pending", "standstill", "live", "expired"]
)
def test_visit_to_framework_dashboard_not_saved_in_session_if_framework_not_open(self, s3, framework_status):
self.login()
self.data_api_client.get_framework.return_value = self.framework(slug="g-cloud-9", status=framework_status)
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework()
self.client.get("/suppliers/frameworks/g-cloud-9")
with self.client.session_transaction() as session:
assert "currently_applying_to" not in session
@mock.patch('dmutils.s3.S3')
class TestFrameworksDashboardSuccessBanner(BaseApplicationTest):
"""Tests for the confidence banner on the declaration page."""
def setup_method(self, method):
super().setup_method(method)
self.data_api_client_patch = mock.patch('app.main.views.frameworks.data_api_client', autospec=True)
self.data_api_client = self.data_api_client_patch.start()
self.data_api_client.get_framework.return_value = self.framework(status='open')
def teardown_method(self, method):
self.data_api_client_patch.stop()
super().teardown_method(method)
def test_success_banner_on_page_for_open_framework(self, _):
self.data_api_client.find_draft_services_iter.return_value = [
{'serviceName': 'A service', 'status': 'submitted', 'lotSlug': 'foo'}
]
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework(
status='complete',
application_company_details_confirmed=True,
)
self.data_api_client.get_supplier.return_value = SupplierStub(
company_details_confirmed=True).single_result_response()
self.login()
res = self.client.get("/suppliers/frameworks/g-cloud-8")
assert res.status_code == 200
document = html.fromstring(res.get_data(as_text=True))
alert_banner = document.xpath('//div[@class="dm-alert dm-alert--success"]')
assert len(alert_banner) == 1
assert alert_banner[0].xpath(
"//h2[contains(normalize-space(string()), $t)]",
t="Your application is complete and will be submitted automatically.",
)
assert alert_banner[0].xpath(
"//div[contains(normalize-space(string()), $t)]",
t="You can change it at any time before the deadline."
)
# Check GA custom dimension values
assert len(document.xpath("//meta[@data-id='29' and @data-value='application_confirmed']")) == 1
def test_success_banner_with_unsubmitted_drafts_shows_different_message(self, _):
self.data_api_client.find_draft_services_iter.return_value = [
{'serviceName': 'A service', 'status': 'submitted', 'lotSlug': 'foo'},
{'serviceName': 'A service', 'status': 'not-submitted', 'lotSlug': 'foo'}
]
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework(
status='complete',
application_company_details_confirmed=True,
)
self.data_api_client.get_supplier.return_value = SupplierStub(
company_details_confirmed=True).single_result_response()
self.login()
res = self.client.get("/suppliers/frameworks/g-cloud-8")
assert res.status_code == 200
document = html.fromstring(res.get_data(as_text=True))
alert_banner = document.xpath('//div[@class="dm-alert dm-alert--success"]')
assert len(alert_banner) == 1
assert alert_banner[0].xpath(
"//h2[contains(normalize-space(string()), $t)]",
t="Your application is complete and will be submitted automatically.",
)
assert alert_banner[0].xpath(
"//div[contains(normalize-space(string()), $t)]",
t="You still have 1 unsubmitted draft service. "
"You can edit or remove draft services at any time before the deadline.",
)
# Check GA custom dimension values
assert len(document.xpath("//meta[@data-id='29' and @data-value='application_confirmed']")) == 1
@pytest.mark.parametrize(
('declaration_status', 'draft_service_status', 'details_confirmed', 'ga_value'),
(
('started', 'submitted', True, 'services_confirmed'),
('complete', 'not-submitted', True, 'declaration_confirmed'),
('unstarted', 'not-submitted', True, 'company_details_confirmed'),
('unstarted', 'not-submitted', False, 'application_started'),
)
)
def test_success_banner_not_on_page_if_sections_incomplete(
self, _, declaration_status, draft_service_status, details_confirmed, ga_value
):
"""Change value and assert that confidence banner is not displayed."""
supplier_data = SupplierStub(company_details_confirmed=details_confirmed).single_result_response()
self.data_api_client.find_draft_services_iter.return_value = [
{'serviceName': 'A service', 'status': draft_service_status, 'lotSlug': 'foo'}
]
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework(
status=declaration_status,
declaration={'status': declaration_status},
application_company_details_confirmed=supplier_data['suppliers']['companyDetailsConfirmed'],
)
self.data_api_client.get_supplier.return_value = supplier_data
self.login()
res = self.client.get("/suppliers/frameworks/g-cloud-8")
assert res.status_code == 200
document = html.fromstring(res.get_data(as_text=True))
# Alert banner should not be shown
alert_banner = document.xpath('//div[@class="dm-alert dm-alert--success"]')
assert len(alert_banner) == 0
assert 'Your application is complete and will be submitted automatically.' not in res.get_data(as_text=True)
# Check GA custom dimension values
doc = html.fromstring(res.get_data(as_text=True))
assert len(doc.xpath("//meta[@data-id='29' and @data-value='{}']".format(ga_value))) == 1
@mock.patch('dmutils.s3.S3')
class TestFrameworksDashboardPendingStandstill(BaseApplicationTest):
def setup_method(self, method):
super().setup_method(method)
self.data_api_client_patch = mock.patch('app.main.views.frameworks.data_api_client', autospec=True)
self.data_api_client = self.data_api_client_patch.start()
def teardown_method(self, method):
self.data_api_client_patch.stop()
super().teardown_method(method)
@staticmethod
def _extract_signing_details_table_rows(doc):
return tuple(
tuple(
td_th_dt_dd_elem.xpath("normalize-space(string())")
for td_th_dt_dd_elem in tr_elem.xpath("td|th|dt|dd")
)
for tr_elem in doc.xpath(
("//main//table[normalize-space(string(./caption))=$b]/tbody/tr"
"|"
"//main//dl/div[@class='govuk-summary-list__row']"),
b="Agreement details",
)
)
@property
def _boring_agreement_details(self):
# property so we always get a clean copy
return {
'frameworkAgreementVersion': 'v1.0',
'signerName': 'Martin Cunningham',
'signerRole': 'Foreman',
'uploaderUserId': 123,
'uploaderUserName': 'User',
'uploaderUserEmail': 'email@email.com',
}
_boring_agreement_returned_at = "2016-07-10T21:20:00.000000Z"
@property
def _boring_agreement_details_expected_table_results(self):
# property so we always get a clean copy
return (
(
'Person who signed',
'Martin Cunningham Foreman'
),
(
'Submitted by',
'User email@email.com Sunday 10 July 2016 at 10:20pm BST'
),
(
'Countersignature',
'Waiting for CCS to countersign'
),
)
def test_dashboard_pending_before_award_company_details_not_confirmed(self, s3):
self.login()
self.data_api_client.get_framework.return_value = self.framework(status='pending')
self.data_api_client.find_draft_services_iter.return_value = []
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework(
declaration={}, application_company_details_confirmed=False
)
res = self.client.get("/suppliers/frameworks/g-cloud-7")
assert res.status_code == 200
doc = html.fromstring(res.get_data(as_text=True))
assert doc.xpath(
"//main//p[contains(normalize-space(string()), $details_text)]",
details_text="You did not confirm your company details.",
)
assert doc.xpath(
"//main//p[contains(normalize-space(string()), $declaration_text)]",
declaration_text="You did not make a supplier declaration.",
)
assert doc.xpath(
"//main//p[contains(normalize-space(string()), $drafts_text)]",
drafts_text="You did not create any services.",
)
def test_dashboard_pending_before_award_services_but_no_declaration(self, s3):
self.login()
self.data_api_client.get_framework.return_value = self.framework(status='pending')
self.data_api_client.find_draft_services_iter.return_value = [
{'serviceName': 'A service', 'status': 'submitted', 'lotSlug': 'iaas'}
]
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework(
declaration={}
)
res = self.client.get("/suppliers/frameworks/g-cloud-7")
assert res.status_code == 200
doc = html.fromstring(res.get_data(as_text=True))
assert doc.xpath(
"//main//p[contains(normalize-space(string()), $declaration_text)]",
declaration_text="You did not make a supplier declaration",
)
assert doc.xpath(
"//main//a[@href=$href or normalize-space(string())=$label]",
href="/frameworks/g-cloud-7/submissions",
label="View draft services",
)
@pytest.mark.parametrize('declaration_status', ('started', 'complete'))
def test_dashboard_pending_before_award_with_services_and_declaration(self, s3, declaration_status):
self.login()
self.data_api_client.get_framework.return_value = self.framework(status='pending')
self.data_api_client.find_draft_services_iter.return_value = [
{'serviceName': 'A service', 'status': 'submitted', 'lotSlug': 'iaas'}
]
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework(
declaration={'status': declaration_status}
)
res = self.client.get("/suppliers/frameworks/g-cloud-7")
doc = html.fromstring(res.get_data(as_text=True))
assert doc.xpath(
"//main//a[@href=$href or normalize-space(string())=$label]",
href="/frameworks/g-cloud-7/declaration",
label="View your declaration",
)
if declaration_status == 'complete':
assert doc.xpath(
"//main//p[contains(normalize-space(string()), $declaration_text)]",
declaration_text="You made your supplier declaration",
)
assert doc.xpath(
"//main//a[@href=$href or normalize-space(string())=$label]",
href="/frameworks/g-cloud-7/submissions",
label="View submitted services",
)
@pytest.mark.parametrize('declaration_status', ('started', 'complete'))
def test_dashboard_pending_before_award_with_declaration_incomplete_services(self, s3, declaration_status):
self.login()
self.data_api_client.get_framework.return_value = self.framework(status='pending')
self.data_api_client.find_draft_services_iter.return_value = [
{'serviceName': 'A service', 'status': 'not-submitted', 'lotSlug': 'iaas'}
]
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework(
declaration={'status': declaration_status}
)
res = self.client.get("/suppliers/frameworks/g-cloud-7")
doc = html.fromstring(res.get_data(as_text=True))
assert doc.xpath(
"//main//a[@href=$href or normalize-space(string())=$label]",
href="/frameworks/g-cloud-7/declaration",
label="View your declaration",
)
assert doc.xpath(
"//main//a[@href=$href or normalize-space(string())=$label]",
href="/frameworks/g-cloud-7/submissions",
label="View draft services",
)
@pytest.mark.parametrize('declaration_status', ('started', 'complete'))
def test_dashboard_pending_before_award_with_declaration_no_services(self, s3, declaration_status):
self.login()
self.data_api_client.get_framework.return_value = self.framework(status='pending')
self.data_api_client.find_draft_services_iter.return_value = []
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework(
declaration={'status': declaration_status}
)
res = self.client.get("/suppliers/frameworks/g-cloud-7")
doc = html.fromstring(res.get_data(as_text=True))
assert doc.xpath(
"//main//a[@href=$href or normalize-space(string())=$label]",
href="/frameworks/g-cloud-7/declaration",
label="View your declaration",
)
assert doc.xpath(
"//main//p[contains(normalize-space(string()), $drafts_text)]",
drafts_text="You did not create any services.",
)
def test_result_letter_is_shown_when_is_in_standstill(self, s3):
self.login()
self.data_api_client.get_framework.return_value = self.framework(status='standstill')
self.data_api_client.find_draft_services_iter.return_value = [
{'serviceName': 'A service', 'status': 'submitted', 'lotSlug': 'iaas'}
]
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework()
res = self.client.get("/suppliers/frameworks/g-cloud-7")
data = res.get_data(as_text=True)
assert u'Download your application result letter' in data
def test_result_letter_is_not_shown_when_not_in_standstill(self, s3):
self.login()
self.data_api_client.get_framework.return_value = self.framework(status='pending')
self.data_api_client.find_draft_services_iter.return_value = [
{'serviceName': 'A service', 'status': 'submitted', 'lotSlug': 'iaas'}
]
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework()
res = self.client.get("/suppliers/frameworks/g-cloud-7")
data = res.get_data(as_text=True)
assert u'Download your application result letter' not in data
def test_result_letter_is_not_shown_when_no_application(self, s3):
self.login()
self.data_api_client.get_framework.return_value = self.framework(status='standstill')
self.data_api_client.find_draft_services_iter.return_value = [
{'serviceName': 'A service', 'status': 'not-submitted'}
]
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework()
res = self.client.get("/suppliers/frameworks/g-cloud-7")
data = res.get_data(as_text=True)
assert u'Download your application result letter' not in data
def test_link_to_unsigned_framework_agreement_is_shown_if_supplier_is_on_framework(self, s3):
self.login()
self.data_api_client.get_framework.return_value = self.framework(status='standstill')
self.data_api_client.find_draft_services_iter.return_value = [
{'serviceName': 'A service', 'status': 'submitted', 'lotSlug': 'iaas'}
]
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework(
on_framework=True)
res = self.client.get("/suppliers/frameworks/g-cloud-7")
data = res.get_data(as_text=True)
assert u'Sign and return your framework agreement' in data
assert u'Download your countersigned framework agreement' not in data
def test_pending_success_message_is_explicit_if_supplier_is_on_framework(self, s3):
self.login()
self.data_api_client.get_framework.return_value = self.framework(
status='standstill', framework_agreement_version=None
)
self.data_api_client.find_draft_services.return_value = [
{'serviceName': 'A service', 'status': 'submitted', 'lotSlug': 'iaas'}
]
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework(on_framework=True)
res = self.client.get("/suppliers/frameworks/g-cloud-7")
assert res.status_code == 200
data = res.get_data(as_text=True)
assert (
'Your application was successful.'
) in data
assert 'Download your application award letter (.pdf)' in data
assert 'This letter is a record of your successful G-Cloud 7 application.' in data
assert 'You made your supplier declaration and submitted 1 service.' not in data
assert 'Download your application result letter (.pdf)' not in data
assert 'This letter informs you if your G-Cloud 7 application has been successful.' not in data
def test_link_to_framework_agreement_is_not_shown_if_supplier_is_not_on_framework(self, s3):
self.login()
self.data_api_client.find_draft_services_iter.return_value = [
{'serviceName': 'A service', 'status': 'submitted', 'lotSlug': 'iaas'}
]
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework(on_framework=False)
res = self.client.get("/suppliers/frameworks/g-cloud-7")
data = res.get_data(as_text=True)
assert u'Sign and return your framework agreement' not in data
def test_pending_success_message_is_equivocal_if_supplier_is_on_framework(self, s3):
self.login()
self.data_api_client.get_framework.return_value = self.framework(status='standstill')
self.data_api_client.find_draft_services_iter.return_value = [
{'serviceName': 'A service', 'status': 'submitted', 'lotSlug': 'iaas'}
]
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework(on_framework=False)
res = self.client.get("/suppliers/frameworks/g-cloud-7")
assert res.status_code == 200
data = res.get_data(as_text=True)
assert (
'Your application was successful. You\'ll be able to sell services when the G-Cloud 7 framework is live'
) not in data
assert 'Download your application award letter (.pdf)' not in data
assert 'This letter is a record of your successful G-Cloud 7 application.' not in data
assert 'You made your supplier declaration and submitted 1 service.' in data
assert 'Download your application result letter (.pdf)' in data
assert 'This letter informs you if your G-Cloud 7 application has been successful.' in data
def test_countersigned_framework_agreement_non_fav_framework(self, s3):
# "fav" being "frameworkAgreementVersion"
files = [
('', 'g-cloud-7-final-call-off', 'pdf', '2016-05-01T14:00:00.000Z'),
('', 'g-cloud-7-invitation', 'pdf', '2016-05-01T14:00:00.000Z'),
('', 'g-cloud-7-final-framework-agreement', 'pdf', '2016-06-01T14:00:00.000Z'),
('', 'g-cloud-7-reporting-template', 'xls', '2016-06-06T14:00:00.000Z'),
]
s3.return_value.list.return_value = [
_return_fake_s3_file_dict(
'g-cloud-7/communications/{}'.format(section), filename, ext, last_modified=last_modified
) for section, filename, ext, last_modified in files
]
self.login()
self.data_api_client.get_framework.return_value = self.framework(status='standstill')
self.data_api_client.find_draft_services_iter.return_value = [
{'serviceName': 'A service', 'status': 'submitted', 'lotSlug': 'iaas'}
]
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework(
on_framework=True,
agreement_returned=True,
agreement_details=self._boring_agreement_details,
agreement_path='pathy/mc/path.face',
countersigned=True,
countersigned_path='g-cloud-7/agreements/1234/1234-countersigned-agreement.pdf',
)
res = self.client.get("/suppliers/frameworks/g-cloud-7")
assert res.status_code == 200
data = res.get_data(as_text=True)
doc = html.fromstring(data)
assert not doc.xpath(
"//main//a[@href=$href or normalize-space(string())=$label]",
href="/frameworks/g-cloud-7/agreement",
label="Sign and return your framework agreement",
)
extracted_guidance_links = _extract_guidance_links(doc)
assert extracted_guidance_links == OrderedDict((
("You submitted:", (
(
'View submitted services',
'/suppliers/frameworks/g-cloud-7/submissions',
None,
None,
),
(
"View your declaration",
"/suppliers/frameworks/g-cloud-7/declaration",
None,
None,
),
)),
("Legal documents", (
(
'Download the standard framework agreement',
'/suppliers/frameworks/g-cloud-7/files/g-cloud-7-final-framework-agreement.pdf',
None,
None,
),
(
"Download your signed framework agreement",
"/suppliers/frameworks/g-cloud-7/agreements/pathy/mc/path.face",
None,
None,
),
(
"Download your countersigned framework agreement",
"/suppliers/frameworks/g-cloud-7/agreements/countersigned-agreement.pdf",
None,
None,
),
(
'Download your application result letter',
'/suppliers/frameworks/g-cloud-7/agreements/result-letter.pdf',
None,
None,
),
(
'Download the call-off contract template',
'/suppliers/frameworks/g-cloud-7/files/g-cloud-7-final-call-off.pdf',
None,
None,
),
)),
("Guidance", (
(
'Download the invitation to apply',
'/suppliers/frameworks/g-cloud-7/files/g-cloud-7-invitation.pdf',
None,
None,
),
(
"Read about how to sell your services",
"https://www.gov.uk/guidance/g-cloud-suppliers-guide#how-to-apply",
None,
None,
),
)),
("Communications", (
(
"View communications and clarification questions",
"/suppliers/frameworks/g-cloud-7/updates",
None,
None,
),
)),
('Reporting', (
(
'Download the reporting template',
'/suppliers/frameworks/g-cloud-7/files/g-cloud-7-reporting-template.xls',
None,
None,
),
)),
))
assert not doc.xpath(
"//main//table[normalize-space(string(./caption))=$b]",
b="Agreement details",
)
assert not doc.xpath(
"//main//p[contains(normalize-space(string()), $b)]",
b="You can start selling your",
)
# neither of these should exist because it's a pre-frameworkAgreementVersion framework
assert not doc.xpath(
"//main//p[contains(normalize-space(string()), $b)]",
b="Your original and counterpart signature pages",
)
assert not doc.xpath(
"//main//p[contains(normalize-space(string()), $b)]",
b="Your framework agreement signature page has been sent to the Crown Commercial Service",
)
def test_countersigned_framework_agreement_fav_framework(self, s3):
# "fav" being "frameworkAgreementVersion"
files = [
('', 'g-cloud-8-final-call-off', 'pdf', '2016-05-01T14:00:00.000Z'),
('', 'g-cloud-8-invitation', 'pdf', '2016-05-01T14:00:00.000Z'),
('', 'g-cloud-8-final-framework-agreement', 'pdf', '2016-06-01T14:00:00.000Z'),
('', 'g-cloud-8-reporting-template', 'xls', '2016-06-06T14:00:00.000Z'),
]
s3.return_value.list.return_value = [
_return_fake_s3_file_dict(
'g-cloud-8/communications/{}'.format(section), filename, ext, last_modified=last_modified
) for section, filename, ext, last_modified in files
]
self.login()
self.data_api_client.get_framework.return_value = get_g_cloud_8()
self.data_api_client.find_draft_services_iter.return_value = [
{'serviceName': 'A service', 'status': 'submitted', 'lotSlug': 'iaas'}
]
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework(
on_framework=True,
agreement_returned=True,
agreement_details=self._boring_agreement_details,
agreement_path='pathy/mc/path.face',
agreement_returned_at=self._boring_agreement_returned_at,
countersigned=True,
countersigned_path='g-cloud-8/agreements/1234/1234-countersigned-agreement.pdf',
)
res = self.client.get("/suppliers/frameworks/g-cloud-8")
assert res.status_code == 200
data = res.get_data(as_text=True)
doc = html.fromstring(data)
assert not doc.xpath(
"//main//a[@href=$href or normalize-space(string())=$label]",
href="/frameworks/g-cloud-8/agreement",
label="Sign and return your framework agreement",
)
assert not doc.xpath(
"//main//a[@href=$href or normalize-space(string())=$label]",
href="/suppliers/frameworks/g-cloud-7/agreements/result-letter.pdf",
label="Download your application result letter",
)
extracted_guidance_links = _extract_guidance_links(doc)
assert extracted_guidance_links == OrderedDict((
("You submitted:", (
(
'View submitted services',
'/suppliers/frameworks/g-cloud-8/submissions',
None,
None,
),
(
"View your declaration",
"/suppliers/frameworks/g-cloud-8/declaration",
None,
None,
),
)),
("Legal documents", (
(
'Read the standard framework agreement',
'https://www.gov.uk/government/publications/g-cloud-8-framework-agreement',
None,
None,
),
(
"Download your \u2018original\u2019 framework agreement signature page",
"/suppliers/frameworks/g-cloud-8/agreements/pathy/mc/path.face",
None,
None,
),
(
"Download your \u2018counterpart\u2019 framework agreement signature page",
"/suppliers/frameworks/g-cloud-8/agreements/countersigned-agreement.pdf",
None,
None,
),
(
'Download the call-off contract template',
'/suppliers/frameworks/g-cloud-8/files/g-cloud-8-final-call-off.pdf',
None,
None,
),
)),
("Guidance", (
(
'Download the invitation to apply',
'/suppliers/frameworks/g-cloud-8/files/g-cloud-8-invitation.pdf',
None,
None,
),
(
"Read about how to sell your services",
"https://www.gov.uk/guidance/g-cloud-suppliers-guide#how-to-apply",
None,
None,
),
)),
("Communications", (
(
"View communications and clarification questions",
"/suppliers/frameworks/g-cloud-8/updates",
None,
None,
),
)),
('Reporting', (
(
'Download the reporting template',
'/suppliers/frameworks/g-cloud-8/files/g-cloud-8-reporting-template.xls',
None,
None,
),
)),
))
assert not doc.xpath("//main//table[normalize-space(string(./caption))=$b]", b="Agreement details")
assert not doc.xpath("//main//p[contains(normalize-space(string()), $b)]", b="You can start selling your")
assert doc.xpath(
"//main//p[contains(normalize-space(string()), $b)]",
b="Your original and counterpart signature pages"
)
assert not doc.xpath(
"//main//p[contains(normalize-space(string()), $b)]",
b="Your framework agreement signature page has been sent to the Crown Commercial Service"
)
def test_shows_returned_agreement_details(self, s3):
self.login()
self.data_api_client.get_framework.return_value = get_g_cloud_8()
self.data_api_client.find_draft_services_iter.return_value = [
{'serviceName': 'A service', 'status': 'submitted', 'lotSlug': 'iaas'}
]
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework(
on_framework=True,
agreement_returned=True,
agreement_details=self._boring_agreement_details,
agreement_path='g-cloud-8/agreements/123-framework-agreement.pdf',
agreement_returned_at=self._boring_agreement_returned_at
)
res = self.client.get("/suppliers/frameworks/g-cloud-8")
assert res.status_code == 200
data = res.get_data(as_text=True)
doc = html.fromstring(data)
assert not doc.xpath(
"//main//a[@href=$href or normalize-space(string())=$label]",
href="/frameworks/g-cloud-8/agreement",
label="Sign and return your framework agreement",
)
assert not doc.xpath(
"//main//a[@href=$href or normalize-space(string())=$label]",
href="/suppliers/frameworks/g-cloud-8/agreements/result-letter.pdf",
label="Download your application result letter",
)
extracted_guidance_links = _extract_guidance_links(doc)
assert extracted_guidance_links == OrderedDict((
("You submitted:", (
(
'View submitted services',
'/suppliers/frameworks/g-cloud-8/submissions',
None,
None,
),
(
"View your declaration",
"/suppliers/frameworks/g-cloud-8/declaration",
None,
None,
),
)),
('Legal documents', (
(
'Read the standard framework agreement',
'https://www.gov.uk/government/publications/g-cloud-8-framework-agreement',
None,
None,
),
(
u'Download your \u2018original\u2019 framework agreement signature page',
'/suppliers/frameworks/g-cloud-8/agreements/framework-agreement.pdf',
None,
None,
),
)),
('Guidance', (
(
'Read about how to sell your services',
'https://www.gov.uk/guidance/g-cloud-suppliers-guide#how-to-apply',
None,
None,
),
)),
('Communications', (
(
'View communications and clarification questions',
'/suppliers/frameworks/g-cloud-8/updates',
None,
None,
),
)),
))
extracted_signing_details_table_rows = self._extract_signing_details_table_rows(doc)
assert extracted_signing_details_table_rows == \
self._boring_agreement_details_expected_table_results
assert len(doc.xpath(
"//main//h1[normalize-space(string())=$b]",
b="Your G-Cloud 8 application",
)) == 1
assert doc.xpath("//main//p[contains(normalize-space(string()), $b)]", b="You can start selling your")
assert not doc.xpath(
"//main//p[contains(normalize-space(string()), $b)]",
b="Your original and counterpart signature pages",
)
assert doc.xpath(
"//main//p[contains(normalize-space(string()), $b)]",
b="Your framework agreement signature page has been sent to the Crown Commercial Service",
)
def test_countersigned_but_no_countersigned_path(self, s3):
self.login()
self.data_api_client.get_framework.return_value = get_g_cloud_8()
self.data_api_client.find_draft_services_iter.return_value = [
{'serviceName': 'A service', 'status': 'submitted', 'lotSlug': 'iaas'}
]
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework(
on_framework=True,
agreement_returned=True,
agreement_details=self._boring_agreement_details,
agreement_path='g-cloud-8/agreements/123-framework-agreement.pdf',
agreement_returned_at=self._boring_agreement_returned_at,
countersigned=True,
# note `countersigned_path` is not set: we're testing that the view behaves as though not countersigned
# i.e. is not depending on the `countersigned` property
)
res = self.client.get("/suppliers/frameworks/g-cloud-8")
assert res.status_code == 200
data = res.get_data(as_text=True)
doc = html.fromstring(data)
assert not doc.xpath(
"//main//a[@href=$href or normalize-space(string())=$label]",
href="/frameworks/g-cloud-8/agreement",
label="Sign and return your framework agreement",
)
extracted_guidance_links = _extract_guidance_links(doc)
assert extracted_guidance_links == OrderedDict((
("You submitted:", (
(
'View submitted services',
'/suppliers/frameworks/g-cloud-8/submissions',
None,
None,
),
(
"View your declaration",
"/suppliers/frameworks/g-cloud-8/declaration",
None,
None,
),
)),
('Legal documents', (
(
'Read the standard framework agreement',
'https://www.gov.uk/government/publications/g-cloud-8-framework-agreement',
None,
None,
),
(
u'Download your \u2018original\u2019 framework agreement signature page',
'/suppliers/frameworks/g-cloud-8/agreements/framework-agreement.pdf',
None,
None,
),
)),
('Guidance', (
(
'Read about how to sell your services',
'https://www.gov.uk/guidance/g-cloud-suppliers-guide#how-to-apply',
None,
None,
),
)),
('Communications', (
(
'View communications and clarification questions',
'/suppliers/frameworks/g-cloud-8/updates',
None,
None,
),
)),
))
extracted_signing_details_table_rows = self._extract_signing_details_table_rows(doc)
assert extracted_signing_details_table_rows == \
self._boring_agreement_details_expected_table_results
assert len(doc.xpath("//main//h1[normalize-space(string())=$b]", b="Your G-Cloud 8 application")) == 1
assert doc.xpath("//main//p[contains(normalize-space(string()), $b)]", b="You can start selling your")
assert not doc.xpath(
"//main//p[contains(normalize-space(string()), $b)]",
b="Your original and counterpart signature pages",
)
assert doc.xpath(
"//main//p[contains(normalize-space(string()), $b)]",
b="Your framework agreement signature page has been sent to the Crown Commercial Service",
)
def test_shows_contract_variation_link_after_agreement_returned(self, s3):
self.login()
g8_with_variation = get_g_cloud_8()
g8_with_variation['frameworks']['variations'] = {"1": {"createdAt": "2018-08-16"}}
self.data_api_client.get_framework.return_value = g8_with_variation
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework(
on_framework=True,
agreement_returned=True,
agreement_details=self._boring_agreement_details,
agreement_path='g-cloud-8/agreements/123-framework-agreement.pdf',
agreement_returned_at=self._boring_agreement_returned_at,
)
res = self.client.get("/suppliers/frameworks/g-cloud-8")
assert res.status_code == 200
data = res.get_data(as_text=True)
doc = html.fromstring(data)
assert not doc.xpath(
"//main//a[@href=$href or normalize-space(string())=$label]",
href="/frameworks/g-cloud-8/agreement",
label="Sign and return your framework agreement",
)
extracted_guidance_links = _extract_guidance_links(doc)
assert extracted_guidance_links == OrderedDict((
("You submitted:", (
(
'View submitted services',
'/suppliers/frameworks/g-cloud-8/submissions',
None,
None,
),
(
"View your declaration",
"/suppliers/frameworks/g-cloud-8/declaration",
None,
None,
),
)),
('Legal documents', (
(
'Read the standard framework agreement',
'https://www.gov.uk/government/publications/g-cloud-8-framework-agreement',
None,
None,
),
(
u'Download your \u2018original\u2019 framework agreement signature page',
'/suppliers/frameworks/g-cloud-8/agreements/framework-agreement.pdf',
None,
None,
),
(
'Read the proposed contract variation',
'/suppliers/frameworks/g-cloud-8/contract-variation/1',
None,
None,
),
)),
('Guidance', (
(
'Read about how to sell your services',
'https://www.gov.uk/guidance/g-cloud-suppliers-guide#how-to-apply',
None,
None,
),
)),
('Communications', (
(
'View communications and clarification questions',
'/suppliers/frameworks/g-cloud-8/updates',
None,
None,
),
)),
))
extracted_signing_details_table_rows = self._extract_signing_details_table_rows(doc)
assert extracted_signing_details_table_rows == \
self._boring_agreement_details_expected_table_results
assert doc.xpath(
"//main//p[contains(normalize-space(string()), $b)]", b="You can start selling your")
assert not doc.xpath(
"//main//p[contains(normalize-space(string()), $b)]",
b="Your original and counterpart signature pages",
)
assert doc.xpath(
"//main//p[contains(normalize-space(string()), $b)]",
b="Your framework agreement signature page has been sent to the Crown Commercial Service",
)
def test_does_not_show_contract_variation_link_if_no_variation(self, s3):
self.login()
self.data_api_client.get_framework.return_value = get_g_cloud_8()
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework(
on_framework=True,
agreement_returned=True,
agreement_details=self._boring_agreement_details,
agreement_path='g-cloud-8/agreements/123-framework-agreement.pdf',
agreement_returned_at=self._boring_agreement_returned_at,
)
res = self.client.get("/suppliers/frameworks/g-cloud-8")
assert res.status_code == 200
data = res.get_data(as_text=True)
doc = html.fromstring(data)
assert not doc.xpath(
"//main//a[@href=$href or normalize-space(string())=$label]",
href="/frameworks/g-cloud-7/agreement",
label="Sign and return your framework agreement",
)
assert not doc.xpath(
"//main//a[normalize-space(string())=$label]",
label="Read the proposed contract variation",
)
extracted_signing_details_table_rows = self._extract_signing_details_table_rows(doc)
assert extracted_signing_details_table_rows == \
self._boring_agreement_details_expected_table_results
assert doc.xpath("//main//p[contains(normalize-space(string()), $b)]", b="You can start selling your")
assert not doc.xpath(
"//main//p[contains(normalize-space(string()), $b)]",
b="Your original and counterpart signature pages",
)
assert doc.xpath(
"//main//p[contains(normalize-space(string()), $b)]",
b="Your framework agreement signature page has been sent to the Crown Commercial Service",
)
def test_does_not_show_contract_variation_link_if_agreement_not_returned(self, s3):
self.login()
g8_with_variation = get_g_cloud_8()
g8_with_variation['frameworks']['variations'] = {"1": {"createdAt": "2018-08-16"}}
self.data_api_client.get_framework.return_value = g8_with_variation
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework()
res = self.client.get("/suppliers/frameworks/g-cloud-8")
assert res.status_code == 200
data = res.get_data(as_text=True)
doc = html.fromstring(data)
assert not doc.xpath(
"//main//a[@href=$href or normalize-space(string())=$label]",
href="/frameworks/g-cloud-7/agreement",
label="Sign and return your framework agreement",
)
assert not doc.xpath(
"//main//a[contains(@href, $href_part) or normalize-space(string())=$label]",
href_part="contract-variation/1",
label="Read the proposed contract variation",
)
assert not doc.xpath(
"//main//table[normalize-space(string(./caption))=$b]",
b="Agreement details",
)
assert not doc.xpath("//main//p[contains(normalize-space(string()), $b)]", b="You can start selling your")
assert not doc.xpath(
"//main//p[contains(normalize-space(string()), $b)]",
b="Your original and counterpart signature pages",
)
assert not doc.xpath(
"//main//p[contains(normalize-space(string()), $b)]",
b="Your framework agreement signature page has been sent to the Crown Commercial Service",
)
def test_shows_contract_variation_alternate_link_text_after_agreed_by_ccs(self, s3):
self.login()
g8_with_variation = get_g_cloud_8()
g8_with_variation['frameworks']['variations'] = {
"1": {
"createdAt": "2018-08-16",
"countersignedAt": "2018-10-01",
"countersignerName": "A.N. Other",
"countersignerRole": "Head honcho",
},
}
self.data_api_client.get_framework.return_value = g8_with_variation
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework(
on_framework=True,
agreement_returned=True,
agreement_details=self._boring_agreement_details,
agreement_returned_at=self._boring_agreement_returned_at,
agreement_path='g-cloud-8/agreements/1234/1234-signed-agreement.pdf',
agreed_variations={
"1": {
"agreedAt": "2016-08-19T15:47:08.116613Z",
"agreedUserId": 1,
"agreedUserEmail": "agreed@email.com",
"agreedUserName": "William Drăyton",
},
},
)
res = self.client.get("/suppliers/frameworks/g-cloud-8")
assert res.status_code == 200
data = res.get_data(as_text=True)
doc = html.fromstring(data)
assert not doc.xpath(
"//main//a[@href=$href or normalize-space(string())=$label]",
href="/frameworks/g-cloud-8/agreement",
label="Sign and return your framework agreement",
)
extracted_guidance_links = _extract_guidance_links(doc)
assert extracted_guidance_links == OrderedDict((
("You submitted:", (
(
'View submitted services',
'/suppliers/frameworks/g-cloud-8/submissions',
None,
None,
),
(
"View your declaration",
"/suppliers/frameworks/g-cloud-8/declaration",
None,
None,
),
)),
('Legal documents', (
(
'Read the standard framework agreement',
'https://www.gov.uk/government/publications/g-cloud-8-framework-agreement',
None,
None,
),
(
u'Download your \u2018original\u2019 framework agreement signature page',
'/suppliers/frameworks/g-cloud-8/agreements/signed-agreement.pdf',
None,
None,
),
(
'View the signed contract variation',
'/suppliers/frameworks/g-cloud-8/contract-variation/1',
None,
None,
),
)),
('Guidance', (
(
'Read about how to sell your services',
'https://www.gov.uk/guidance/g-cloud-suppliers-guide#how-to-apply',
None,
None,
),
)),
('Communications', (
(
'View communications and clarification questions',
'/suppliers/frameworks/g-cloud-8/updates',
None,
None,
),
)),
))
assert not doc.xpath(
"//main//a[normalize-space(string())=$label]",
label="Read the proposed contract variation",
)
assert doc.xpath("//main//p[contains(normalize-space(string()), $b)]", b="You can start selling your")
assert not doc.xpath(
"//main//p[contains(normalize-space(string()), $b)]",
b="Your original and counterpart signature pages",
)
assert doc.xpath(
"//main//p[contains(normalize-space(string()), $b)]",
b="Your framework agreement signature page has been sent to the Crown Commercial Service",
)
@pytest.mark.parametrize(
'supplier_framework_kwargs,link_href',
(
({'declaration': None}, '/suppliers/frameworks/g-cloud-7/declaration/start'),
({}, '/suppliers/frameworks/g-cloud-7/declaration')
)
)
def test_make_supplier_declaration_links_to_correct_page(
self, s3, supplier_framework_kwargs, link_href
):
self.login()
self.data_api_client.get_framework.return_value = self.framework(status='open')
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework(
application_company_details_confirmed=True,
**supplier_framework_kwargs,
)
response = self.client.get('/suppliers/frameworks/g-cloud-7')
document = html.fromstring(response.get_data(as_text=True))
assert (
document.xpath(
"//a[contains(normalize-space(string()), $link_label)]/@href",
link_label="Make your supplier declaration"
)[0]
) == link_href
@mock.patch('dmutils.s3.S3')
class TestFrameworkAgreementDocumentDownload(BaseApplicationTest):
def setup_method(self, method):
super().setup_method(method)
self.data_api_client_patch = mock.patch('app.main.views.frameworks.data_api_client', autospec=True)
self.data_api_client = self.data_api_client_patch.start()
def teardown_method(self, method):
self.data_api_client_patch.stop()
super().teardown_method(method)
def test_download_document_fails_if_no_supplier_framework(self, S3):
self.data_api_client.get_supplier_framework_info.side_effect = APIError(mock.Mock(status_code=404))
self.login()
res = self.client.get('/suppliers/frameworks/g-cloud-7/agreements/example.pdf')
assert res.status_code == 404
def test_download_document_fails_if_no_supplier_declaration(self, S3):
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework(declaration=None)
self.login()
res = self.client.get('/suppliers/frameworks/g-cloud-7/agreements/example.pdf')
assert res.status_code == 404
def test_download_document(self, S3):
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework()
uploader = mock.Mock()
S3.return_value = uploader
uploader.get_signed_url.return_value = 'http://url/path?param=value'
self.login()
res = self.client.get('/suppliers/frameworks/g-cloud-7/agreements/example.pdf')
assert res.status_code == 302
assert res.location == 'http://asset-host/path?param=value'
uploader.get_signed_url.assert_called_with('g-cloud-7/agreements/1234/1234-example.pdf')
def test_download_document_with_asset_url(self, S3):
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework()
uploader = mock.Mock()
S3.return_value = uploader
uploader.get_signed_url.return_value = 'http://url/path?param=value'
self.app.config['DM_ASSETS_URL'] = 'https://example'
self.login()
res = self.client.get('/suppliers/frameworks/g-cloud-7/agreements/example.pdf')
assert res.status_code == 302
assert res.location == 'https://example/path?param=value'
uploader.get_signed_url.assert_called_with('g-cloud-7/agreements/1234/1234-example.pdf')
@mock.patch('dmutils.s3.S3')
class TestFrameworkDocumentDownload(BaseApplicationTest):
def test_download_document(self, S3):
uploader = mock.Mock()
S3.return_value = uploader
uploader.get_signed_url.return_value = 'http://url/path?param=value'
self.login()
res = self.client.get('/suppliers/frameworks/g-cloud-7/files/example.pdf')
assert res.status_code == 302
assert res.location == 'http://asset-host/path?param=value'
uploader.get_signed_url.assert_called_with('g-cloud-7/communications/example.pdf')
def test_download_document_returns_404_if_url_is_None(self, S3):
uploader = mock.Mock()
S3.return_value = uploader
uploader.get_signed_url.return_value = None
self.login()
res = self.client.get('/suppliers/frameworks/g-cloud-7/files/example.pdf')
assert res.status_code == 404
@mock.patch('dmutils.s3.S3')
class TestDownloadDeclarationDocument(BaseApplicationTest, MockEnsureApplicationCompanyDetailsHaveBeenConfirmedMixin):
def setup_method(self, method):
super().setup_method(method)
self.login()
self.data_api_client_patch = mock.patch('app.main.views.services.data_api_client', autospec=True)
self.data_api_client = self.data_api_client_patch.start()
def teardown_method(self, method):
self.data_api_client_patch.stop()
super().teardown_method(method)
def test_document_url(self, s3):
s3.return_value.get_signed_url.return_value = 'http://example.com/modern-slavery-statement.pdf'
res = self.client.get(
'/suppliers/assets/g-cloud-11/documents/1234/modern-slavery-statement.pdf'
)
assert res.status_code == 302
assert res.headers['Location'] == 'http://asset-host/modern-slavery-statement.pdf'
def test_missing_document_url(self, s3):
s3.return_value.get_signed_url.return_value = None
res = self.client.get(
'/suppliers/frameworks/g-cloud-11/documents/1234/modern-slavery-statement.pdf'
)
assert res.status_code == 404
def test_document_url_not_matching_user_supplier(self, s3):
res = self.client.get(
'/suppliers/frameworks/g-cloud-11/documents/999/modern-slavery-statement.pdf'
)
assert res.status_code == 404
class TestStartSupplierDeclaration(BaseApplicationTest, MockEnsureApplicationCompanyDetailsHaveBeenConfirmedMixin):
def setup_method(self, method):
super().setup_method(method)
self.data_api_client_patch = mock.patch('app.main.views.frameworks.data_api_client', autospec=True)
self.data_api_client = self.data_api_client_patch.start()
def teardown_method(self, method):
self.data_api_client_patch.stop()
super().teardown_method(method)
def test_start_declaration_goes_to_declaration_overview_page(self):
self.login()
self.data_api_client.get_framework.return_value = self.framework(status='open')
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework()
response = self.client.get('/suppliers/frameworks/g-cloud-7/declaration/start')
document = html.fromstring(response.get_data(as_text=True))
assert (
document.xpath("//a[normalize-space(string(.))='Start your declaration']/@href")[0]
== '/suppliers/frameworks/g-cloud-7/declaration/reuse'
)
assert document.xpath(
"//p[contains(normalize-space(string()), $t)]",
t="change your answers before the application deadline at "
"5pm\u00a0BST,\u00a0Tuesday\u00a06\u00a0October\u00a02015.",
)
@pytest.mark.parametrize('method', ('get', 'post'))
class TestDeclarationOverviewSubmit(BaseApplicationTest, MockEnsureApplicationCompanyDetailsHaveBeenConfirmedMixin):
"""Behaviour common to both GET and POST views on path /suppliers/frameworks/g-cloud-7/declaration."""
def setup_method(self, method):
super().setup_method(method)
self.data_api_client_patch = mock.patch('app.main.views.frameworks.data_api_client', autospec=True)
self.data_api_client = self.data_api_client_patch.start()
def teardown_method(self, method):
self.data_api_client_patch.stop()
super().teardown_method(method)
def test_supplier_not_interested(self, method):
self.login()
self.data_api_client.get_framework.side_effect = assert_args_and_return(
self.framework(status="open"), "g-cloud-7"
)
self.data_api_client.get_supplier_framework_info.side_effect = assert_args_and_raise(
APIError(mock.Mock(status_code=404)),
1234,
"g-cloud-7",
)
self.data_api_client.set_supplier_declaration.side_effect = AssertionError("This shouldn't be called")
response = getattr(self.client, method)("/suppliers/frameworks/g-cloud-7/declaration")
assert response.status_code == 404
def test_framework_coming(self, method):
self.login()
self.data_api_client.get_framework.side_effect = assert_args_and_return(
self.framework(status="coming"),
"g-cloud-7",
)
self.data_api_client.get_supplier_framework_info.side_effect = assert_args_and_return(
self.supplier_framework(framework_slug="g-cloud-7"),
1234,
"g-cloud-7",
)
self.data_api_client.set_supplier_declaration.side_effect = AssertionError("This shouldn't be called")
response = getattr(self.client, method)("/suppliers/frameworks/g-cloud-7/declaration")
assert response.status_code == 404
def test_framework_unknown(self, method):
self.login()
self.data_api_client.get_framework.side_effect = assert_args_and_raise(
APIError(mock.Mock(status_code=404)),
"muttoning-clouds",
)
self.data_api_client.get_supplier_framework_info.side_effect = assert_args_and_raise(
APIError(mock.Mock(status_code=404)),
1234,
"muttoning-clouds",
)
self.data_api_client.set_supplier_declaration.side_effect = AssertionError("This shouldn't be called")
response = getattr(self.client, method)("/suppliers/frameworks/muttoning-clouds/declaration")
assert response.status_code == 404
class TestDeclarationOverview(BaseApplicationTest, MockEnsureApplicationCompanyDetailsHaveBeenConfirmedMixin):
def setup_method(self, method):
super().setup_method(method)
self.data_api_client_patch = mock.patch('app.main.views.frameworks.data_api_client', autospec=True)
self.data_api_client = self.data_api_client_patch.start()
def teardown_method(self, method):
self.data_api_client_patch.stop()
super().teardown_method(method)
@staticmethod
def _extract_section_information(doc, section_title, expect_edit_link=True):
"""
given a section (full text) name, returns that section's relevant information in a tuple (format described
in comments)
"""
tables = doc.xpath(
"//table[preceding::h2[1][normalize-space(string())=$section_title]]",
section_title=section_title,
)
assert len(tables) == 1
table = tables[0]
edit_as = doc.xpath(
"//a[@class='summary-change-link'][preceding::h2[1][normalize-space(string())=$section_title]]",
section_title=section_title,
)
assert ([a.xpath("normalize-space(string())") for a in edit_as] == ["Edit"]) is expect_edit_link
return (
# table caption text
table.xpath("normalize-space(string(./caption))"),
# "Edit" link href
edit_as[0].xpath("@href")[0] if expect_edit_link else None,
tuple(
(
# contents of row heading
row.xpath("normalize-space(string(./td[@class='summary-item-field-first']))"),
# full text contents of row "value"
row.xpath("normalize-space(string(./td[@class='summary-item-field']))"),
# full text contents of each a element in row value
tuple(a.xpath("normalize-space(string())") for a in row.xpath(
"./td[@class='summary-item-field']//a"
)),
# href of each a element in row value
tuple(row.xpath("./td[@class='summary-item-field']//a/@href")),
# full text contents of each li element in row value
tuple(li.xpath("normalize-space(string())") for li in row.xpath(
"./td[@class='summary-item-field']//li"
)),
) for row in table.xpath(".//tr[contains(@class,'summary-item-row')]")
)
)
@staticmethod
def _section_information_strip_edit_href(section_information):
row_heading, edit_href, rows = section_information
return row_heading, None, rows
def _setup_data_api_client(self, framework_status, framework_slug, declaration, prefill_fw_slug):
self.data_api_client.get_framework.side_effect = assert_args_and_return(
self.framework(slug=framework_slug, name="F-Cumulus 0", status=framework_status),
framework_slug,
)
self.data_api_client.get_supplier_framework_info.side_effect = assert_args_and_return(
self.supplier_framework(
framework_slug=framework_slug,
declaration=declaration,
prefill_declaration_from_framework_slug=prefill_fw_slug,
),
1234,
framework_slug,
)
self.data_api_client.set_supplier_declaration.side_effect = AssertionError("This shouldn't be called")
# corresponds to the parametrization args:
# "framework_slug,declaration,decl_valid,prefill_fw_slug,expected_sections"
_common_parametrization = tuple(
chain.from_iterable(chain(
(( # noqa
"g-cloud-9",
empty_declaration,
False,
prefill_fw_slug,
(
( # expected result for "Providing suitable services" section as returned by
# _extract_section_information
"Providing suitable services",
"/suppliers/frameworks/g-cloud-9/declaration/edit/providing-suitable-services",
(
(
"Services are cloud-related",
"Answer question",
("Answer question",),
("/suppliers/frameworks/g-cloud-9/declaration/edit/providing-suitable-services",),
(),
),
(
"Services in scope for G-Cloud",
"Answer question",
("Answer question",),
("/suppliers/frameworks/g-cloud-9/declaration/edit/providing-suitable-"
"services#servicesDoNotInclude",),
(),
),
(
"Buyers pay for what they use",
"Answer question",
("Answer question",),
(
"/suppliers/frameworks/g-cloud-9/declaration/edit/providing-suitable-services"
"#payForWhatUse",
),
(),
),
(
"What your team will deliver",
"Answer question",
("Answer question",),
(
"/suppliers/frameworks/g-cloud-9/declaration/edit/providing-suitable-"
"services#offerServicesYourselves",
),
(),
),
(
"Contractual responsibility and accountability",
"Answer question",
("Answer question",),
(
"/suppliers/frameworks/g-cloud-9/declaration/edit/providing-suitable-"
"services#fullAccountability",
),
(),
),
),
),
( # expected result for "Grounds for mandatory exclusion" section as returned by
# _extract_section_information
"Grounds for mandatory exclusion",
"/suppliers/frameworks/g-cloud-9/declaration/edit/grounds-for-mandatory-exclusion",
(
(
"Organised crime or conspiracy convictions",
q_link_text_prefillable_section,
(q_link_text_prefillable_section,),
("/suppliers/frameworks/g-cloud-9/declaration/edit/grounds-for-mandatory-exclusion",),
(),
),
(
"Bribery or corruption convictions",
q_link_text_prefillable_section,
(q_link_text_prefillable_section,),
(
"/suppliers/frameworks/g-cloud-9/declaration/edit/grounds-for-mandatory-"
"exclusion#corruptionBribery",
),
(),
),
(
"Fraud convictions",
q_link_text_prefillable_section,
(q_link_text_prefillable_section,),
(
"/suppliers/frameworks/g-cloud-9/declaration/edit/grounds-for-mandatory-"
"exclusion#fraudAndTheft",
),
(),
),
(
"Terrorism convictions",
q_link_text_prefillable_section,
(q_link_text_prefillable_section,),
(
"/suppliers/frameworks/g-cloud-9/declaration/edit/grounds-for-mandatory-"
"exclusion#terrorism",
),
(),
),
(
"Organised crime convictions",
q_link_text_prefillable_section,
(q_link_text_prefillable_section,),
(
"/suppliers/frameworks/g-cloud-9/declaration/edit/grounds-for-mandatory-"
"exclusion#organisedCrime",
),
(),
),
),
),
( # expected result for "How you’ll deliver your services" section as returned by
# _extract_section_information
"How you’ll deliver your services",
"/suppliers/frameworks/g-cloud-9/declaration/edit/how-youll-deliver-your-services",
(
(
"Subcontractors or consortia",
q_link_text_prefillable_section,
(q_link_text_prefillable_section,),
(
"/suppliers/frameworks/g-cloud-9/declaration/edit/how-youll-deliver-your-"
"services",
),
(),
),
),
),
),
) for empty_declaration in (None, {})), # two possible ways of specifying a "empty" declaration - test both
(( # noqa
"g-cloud-9",
{
"status": "started",
"conspiracy": True,
"corruptionBribery": False,
"fraudAndTheft": True,
"terrorism": False,
"organisedCrime": True,
"subcontracting": [
"yourself without the use of third parties (subcontractors)",
"as a prime contractor, using third parties (subcontractors) to provide all services",
],
},
False,
prefill_fw_slug,
(
( # expected result for "Providing suitable services" section as returned by
# _extract_section_information
"Providing suitable services",
"/suppliers/frameworks/g-cloud-9/declaration/edit/providing-suitable-services",
(
(
"Services are cloud-related",
"Answer question",
("Answer question",),
("/suppliers/frameworks/g-cloud-9/declaration/edit/providing-suitable-services",),
(),
),
(
"Services in scope for G-Cloud",
"Answer question",
("Answer question",),
(
"/suppliers/frameworks/g-cloud-9/declaration/edit/providing-suitable-"
"services#servicesDoNotInclude",
),
(),
),
(
"Buyers pay for what they use",
"Answer question",
("Answer question",),
(
"/suppliers/frameworks/g-cloud-9/declaration/edit/providing-suitable-"
"services#payForWhatUse",
),
(),
),
(
"What your team will deliver",
"Answer question",
("Answer question",),
(
"/suppliers/frameworks/g-cloud-9/declaration/edit/providing-suitable-"
"services#offerServicesYourselves",
),
(),
),
(
"Contractual responsibility and accountability",
"Answer question",
("Answer question",),
(
"/suppliers/frameworks/g-cloud-9/declaration/edit/providing-suitable-"
"services#fullAccountability",
),
(),
),
),
),
( # expected result for "Grounds for mandatory exclusion" section as returned by
# _extract_section_information
"Grounds for mandatory exclusion",
"/suppliers/frameworks/g-cloud-9/declaration/edit/grounds-for-mandatory-exclusion",
(
(
"Organised crime or conspiracy convictions",
"Yes",
(),
(),
(),
),
(
"Bribery or corruption convictions",
"No",
(),
(),
(),
),
(
"Fraud convictions",
"Yes",
(),
(),
(),
),
(
"Terrorism convictions",
"No",
(),
(),
(),
),
(
"Organised crime convictions",
"Yes",
(),
(),
(),
),
),
),
( # expected result for "How you’ll deliver your services" section as returned by
# _extract_section_information
"How you’ll deliver your services",
"/suppliers/frameworks/g-cloud-9/declaration/edit/how-youll-deliver-your-services",
(
(
"Subcontractors or consortia",
(
"yourself without the use of third parties (subcontractors) as a prime contractor, "
"using third parties (subcontractors) to provide all services"
),
(),
(),
(
"yourself without the use of third parties (subcontractors)",
"as a prime contractor, using third parties (subcontractors) to provide all services",
),
),
),
),
),
),),
(( # noqa
"g-cloud-9",
dict(status=declaration_status, **(valid_g9_declaration_base())),
True,
prefill_fw_slug,
(
( # expected result for "Providing suitable services" section as returned by
# _extract_section_information
"Providing suitable services",
"/suppliers/frameworks/g-cloud-9/declaration/edit/providing-suitable-services",
(
(
"Services are cloud-related",
"Yes",
(),
(),
(),
),
(
"Services in scope for G-Cloud",
"Yes",
(),
(),
(),
),
(
"Buyers pay for what they use",
"Yes",
(),
(),
(),
),
(
"What your team will deliver",
"No",
(),
(),
(),
),
(
"Contractual responsibility and accountability",
"Yes",
(),
(),
(),
),
),
),
( # expected result for "Grounds for mandatory exclusion" section as returned by
# _extract_section_information
"Grounds for mandatory exclusion",
"/suppliers/frameworks/g-cloud-9/declaration/edit/grounds-for-mandatory-exclusion",
(
(
"Organised crime or conspiracy convictions",
"No",
(),
(),
(),
),
(
"Bribery or corruption convictions",
"Yes",
(),
(),
(),
),
(
"Fraud convictions",
"No",
(),
(),
(),
),
(
"Terrorism convictions",
"Yes",
(),
(),
(),
),
(
"Organised crime convictions",
"No",
(),
(),
(),
),
),
),
( # expected result for "How you’ll deliver your services" section as returned by
# _extract_section_information
"How you’ll deliver your services",
"/suppliers/frameworks/g-cloud-9/declaration/edit/how-youll-deliver-your-services",
(
(
"Subcontractors or consortia",
"yourself without the use of third parties (subcontractors)",
(),
(),
(),
),
),
),
),
) for declaration_status in ("started", "complete",)),
) for prefill_fw_slug, q_link_text_prefillable_section in (
# test all of the previous combinations with two possible values of prefill_fw_slug
(None, "Answer question",),
("some-previous-framework", "Review answer",),
)))
# this is more straightforward than _common_parametrization because we only have to care about non-open frameworks
# G7 doesn't (yet?) have any "short names" for questions and so will be listing the answers in the
# overview against their full verbose questions so any sections that we wanted to assert the content of
# would require a reference copy of all its full question texts kept here. we don't want to do this so for
# now don't assert any G7 sections...
_g7_parametrization = (
("g-cloud-7", dict(FULL_G7_SUBMISSION, status="started"), True, None, ()),
("g-cloud-7", dict(FULL_G7_SUBMISSION, status="complete"), True, None, ()),
("g-cloud-7", None, False, None, ()),
("g-cloud-7", {}, False, None, ()),
)
@pytest.mark.parametrize(
"framework_slug,declaration,decl_valid,prefill_fw_slug,expected_sections",
_g7_parametrization
)
def test_display_open(self, framework_slug, declaration, decl_valid, prefill_fw_slug, expected_sections):
self._setup_data_api_client("open", framework_slug, declaration, prefill_fw_slug)
self.login()
response = self.client.get("/suppliers/frameworks/{}/declaration".format(framework_slug))
assert response.status_code == 200
doc = html.fromstring(response.get_data(as_text=True))
breadcrumbs = doc.xpath("//div[@class='govuk-breadcrumbs']/ol/li")
assert tuple(li.xpath("normalize-space(string())") for li in breadcrumbs) == (
"Digital Marketplace",
"Your account",
"Apply to F-Cumulus 0",
"Your declaration overview",
)
assert tuple(li.xpath(".//a/@href") for li in breadcrumbs) == (
['/'],
['/suppliers'],
[f'/suppliers/frameworks/{framework_slug}'],
[],
)
assert bool(doc.xpath(
"//p[contains(normalize-space(string()), $t)][contains(normalize-space(string()), $f)]",
t="You must answer all questions and make your declaration before",
f="F-Cumulus 0",
)) is not decl_valid
assert bool(doc.xpath(
"//p[contains(normalize-space(string()), $t)][contains(normalize-space(string()), $f)]",
t="You must make your declaration before",
f="F-Cumulus 0",
)) is (decl_valid and declaration.get("status") != "complete")
assert len(doc.xpath(
"//p[contains(normalize-space(string()), $t)]",
t="You can come back and edit your answers at any time before the deadline.",
)) == (2 if decl_valid and declaration.get("status") != "complete" else 0)
assert len(doc.xpath(
"//p[contains(normalize-space(string()), $t)][not(contains(normalize-space(string()), $d))]",
t="You can come back and edit your answers at any time",
d="deadline",
)) == (2 if decl_valid and declaration.get("status") == "complete" else 0)
if prefill_fw_slug is None:
assert not doc.xpath("//a[normalize-space(string())=$t]", t="Review answer")
assert bool(doc.xpath(
"//a[normalize-space(string())=$a or normalize-space(string())=$b]",
a="Answer question",
b="Review answer",
)) is not decl_valid
if not decl_valid:
# assert that all links with the label "Answer question" or "Review answer" link to some subpage (by
# asserting that there are none that don't, having previously determined that such-labelled links exist)
assert not doc.xpath(
# we want the href to *contain* $u but not *be* $u
"//a[normalize-space(string())=$a or normalize-space(string())=$b]"
"[not(starts-with(@href, $u)) or @href=$u]",
a="Answer question",
b="Review answer",
u="/suppliers/frameworks/{}/declaration/".format(framework_slug),
)
if decl_valid and declaration.get("status") != "complete":
mdf_actions = doc.xpath(
"//form[@method='POST'][.//button[normalize-space(string())=$t]]"
"[.//input[@name='csrf_token']]/@action",
t="Make declaration",
)
assert len(mdf_actions) == 2
assert all(
urljoin("/suppliers/frameworks/{}/declaration".format(framework_slug), action) ==
"/suppliers/frameworks/{}/declaration".format(framework_slug)
for action in mdf_actions
)
else:
assert not doc.xpath("//button[normalize-space(string())=$t]", t="Make declaration")
assert doc.xpath(
"//a[normalize-space(string())=$t][@href=$u]",
t="Return to application",
u="/suppliers/frameworks/{}".format(framework_slug),
)
for expected_section in expected_sections:
assert self._extract_section_information(doc, expected_section[0]) == expected_section
@pytest.mark.parametrize(
"framework_slug,declaration,decl_valid,prefill_fw_slug,expected_sections",
tuple(
(
framework_slug,
declaration,
decl_valid,
prefill_fw_slug,
expected_sections,
)
for framework_slug, declaration, decl_valid, prefill_fw_slug, expected_sections
in chain(_common_parametrization, _g7_parametrization)
if (declaration or {}).get("status") == "complete"
)
)
@pytest.mark.parametrize("framework_status", ("pending", "standstill", "live", "expired",))
def test_display_closed(
self,
framework_status,
framework_slug,
declaration,
decl_valid,
prefill_fw_slug,
expected_sections,
):
self._setup_data_api_client(framework_status, framework_slug, declaration, prefill_fw_slug)
self.login()
response = self.client.get("/suppliers/frameworks/{}/declaration".format(framework_slug))
assert response.status_code == 200
doc = html.fromstring(response.get_data(as_text=True))
breadcrumbs = doc.xpath("//div[@class='govuk-breadcrumbs']/ol/li")
assert tuple(li.xpath("normalize-space(string())") for li in breadcrumbs) == (
"Digital Marketplace",
"Your account",
"Your F-Cumulus 0 application",
"Your declaration overview",
)
assert tuple(li.xpath(".//a/@href") for li in breadcrumbs) == (
['/'],
['/suppliers'],
[f'/suppliers/frameworks/{framework_slug}'],
[],
)
# there shouldn't be any links to the "edit" page
assert not any(
urljoin("/suppliers/frameworks/{}/declaration".format(framework_slug), a.attrib["href"]).startswith(
"/suppliers/frameworks/{}/declaration/edit/".format(framework_slug)
)
for a in doc.xpath("//a[@href]")
)
# no submittable forms should be pointing at ourselves
assert not any(
urljoin(
"/suppliers/frameworks/{}/declaration".format(framework_slug),
form.attrib["action"],
) == "/suppliers/frameworks/{}/declaration".format(framework_slug)
for form in doc.xpath("//form[.//input[@type='submit'] or .//button]")
)
assert not doc.xpath("//a[@href][normalize-space(string())=$label]", label="Answer question")
assert not doc.xpath("//a[@href][normalize-space(string())=$label]", label="Review answer")
assert not doc.xpath("//p[contains(normalize-space(string()), $t)]", t="make your declaration")
assert not doc.xpath("//p[contains(normalize-space(string()), $t)]", t="edit your answers")
for expected_section in expected_sections:
assert self._extract_section_information(
doc,
expected_section[0],
expect_edit_link=False,
) == self._section_information_strip_edit_href(expected_section)
@pytest.mark.parametrize(
"framework_slug,declaration,decl_valid,prefill_fw_slug,expected_sections",
tuple(
(
framework_slug,
declaration,
decl_valid,
prefill_fw_slug,
expected_sections,
)
for framework_slug, declaration, decl_valid, prefill_fw_slug, expected_sections
in chain(_common_parametrization, _g7_parametrization)
if (declaration or {}).get("status") != "complete"
)
)
@pytest.mark.parametrize("framework_status", ("pending", "standstill", "live", "expired",))
def test_error_closed(
self,
framework_status,
framework_slug,
declaration,
decl_valid,
prefill_fw_slug,
expected_sections,
):
self._setup_data_api_client(framework_status, framework_slug, declaration, prefill_fw_slug)
self.login()
response = self.client.get("/suppliers/frameworks/{}/declaration".format(framework_slug))
assert response.status_code == 410
@pytest.mark.parametrize("framework_status", ("coming", "open", "pending", "standstill", "live", "expired",))
def test_error_nonexistent_framework(self, framework_status):
self._setup_data_api_client(framework_status, "g-cloud-31415", {"status": "complete"}, None)
self.login()
response = self.client.get("/suppliers/frameworks/g-cloud-31415/declaration")
assert response.status_code == 404
class TestDeclarationSubmit(BaseApplicationTest, MockEnsureApplicationCompanyDetailsHaveBeenConfirmedMixin):
def setup_method(self, method):
super().setup_method(method)
self.data_api_client_patch = mock.patch('app.main.views.frameworks.data_api_client', autospec=True)
self.data_api_client = self.data_api_client_patch.start()
def teardown_method(self, method):
self.data_api_client_patch.stop()
super().teardown_method(method)
@pytest.mark.parametrize("prefill_fw_slug", (None, "some-previous-framework",))
@pytest.mark.parametrize("invalid_declaration", (
None,
{},
{
# not actually complete - only first section is
"status": "complete",
"unfairCompetition": False,
"skillsAndResources": False,
"offerServicesYourselves": False,
"fullAccountability": True,
},
))
def test_invalid_declaration(self, invalid_declaration, prefill_fw_slug):
self.login()
self.data_api_client.get_framework.side_effect = assert_args_and_return(
self.framework(slug="g-cloud-9", name="G-Cloud 9", status="open"),
"g-cloud-9",
)
self.data_api_client.get_supplier_framework_info.side_effect = assert_args_and_return(
self.supplier_framework(
framework_slug="g-cloud-9",
declaration=invalid_declaration,
prefill_declaration_from_framework_slug=prefill_fw_slug, # should have zero effect
),
1234,
"g-cloud-9",
)
self.data_api_client.set_supplier_declaration.side_effect = AssertionError("This shouldn't be called")
response = self.client.post("/suppliers/frameworks/g-cloud-9/declaration")
assert response.status_code == 400
@pytest.mark.parametrize("prefill_fw_slug", (None, "some-previous-framework",))
@pytest.mark.parametrize("declaration_status", ("started", "complete",))
@mock.patch("dmutils.s3.S3") # needed by the framework dashboard which our request gets redirected to
def test_valid_declaration(self, s3, prefill_fw_slug, declaration_status):
self.login()
self.data_api_client.get_framework.side_effect = assert_args_and_return(
self.framework(slug="g-cloud-9", name="G-Cloud 9", status="open"),
"g-cloud-9",
)
self.data_api_client.get_supplier_framework_info.side_effect = assert_args_and_return(
self.supplier_framework(
framework_slug="g-cloud-9",
declaration=dict(status=declaration_status, **(valid_g9_declaration_base())),
prefill_declaration_from_framework_slug=prefill_fw_slug, # should have zero effect
),
1234,
"g-cloud-9",
)
self.data_api_client.set_supplier_declaration.side_effect = assert_args_and_return(
dict(status="complete", **(valid_g9_declaration_base())),
1234,
"g-cloud-9",
dict(status="complete", **(valid_g9_declaration_base())),
"email@email.com",
)
response = self.client.post("/suppliers/frameworks/g-cloud-9/declaration", follow_redirects=True)
# args of call are asserted by mock's side_effect
assert self.data_api_client.set_supplier_declaration.called is True
# this will be the response from the redirected-to view
assert response.status_code == 200
@pytest.mark.parametrize("framework_status", ("standstill", "pending", "live", "expired",))
def test_closed_framework_state(self, framework_status):
self.login()
self.data_api_client.get_framework.side_effect = assert_args_and_return(
self.framework(status=framework_status),
"g-cloud-7",
)
self.data_api_client.get_supplier_framework_info.side_effect = assert_args_and_return(
self.supplier_framework(framework_slug="g-cloud-7"),
1234,
"g-cloud-7",
)
self.data_api_client.set_supplier_declaration.side_effect = AssertionError("This shouldn't be called")
response = self.client.post("/suppliers/frameworks/g-cloud-7/declaration")
assert response.status_code == 404
class TestSupplierDeclaration(BaseApplicationTest, MockEnsureApplicationCompanyDetailsHaveBeenConfirmedMixin):
def setup_method(self, method):
super().setup_method(method)
self.data_api_client_patch = mock.patch('app.main.views.frameworks.data_api_client', autospec=True)
self.data_api_client = self.data_api_client_patch.start()
def teardown_method(self, method):
self.data_api_client_patch.stop()
super().teardown_method(method)
@pytest.mark.parametrize("empty_declaration", ({}, None,))
def test_get_with_no_previous_answers(self, empty_declaration):
self.login()
self.data_api_client.get_framework.return_value = self.framework(status='open')
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework(
framework_slug="g-cloud-7",
declaration=empty_declaration,
)
self.data_api_client.get_supplier_declaration.side_effect = APIError(mock.Mock(status_code=404))
res = self.client.get('/suppliers/frameworks/g-cloud-7/declaration/edit/g-cloud-7-essentials')
assert res.status_code == 200
doc = html.fromstring(res.get_data(as_text=True))
assert doc.xpath('//input[@id="PR-1-yes"]/@checked') == []
assert doc.xpath('//input[@id="PR-1-no"]/@checked') == []
def test_get_with_with_previous_answers(self):
self.login()
self.data_api_client.get_framework.return_value = self.framework(status='open')
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework(
framework_slug="g-cloud-7",
declaration={"status": "started", "PR1": False}
)
res = self.client.get('/suppliers/frameworks/g-cloud-7/declaration/edit/g-cloud-7-essentials')
assert res.status_code == 200
doc = html.fromstring(res.get_data(as_text=True))
assert len(doc.xpath('//input[@id="input-PR1-2"]/@checked')) == 1
def test_get_with_with_prefilled_answers(self):
self.login()
# Handle calls for both the current framework and for the framework to pre-fill from
self.data_api_client.get_framework.side_effect = lambda framework_slug: {
"g-cloud-9": self.framework(slug='g-cloud-9', name='G-Cloud 9', status='open'),
"digital-outcomes-and-specialists-2": self.framework(
slug='digital-outcomes-and-specialists-2',
name='Digital Stuff 2', status='live'
)
}[framework_slug]
# Current framework application information
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework(
framework_slug="g-cloud-9",
declaration={"status": "started"},
prefill_declaration_from_framework_slug="digital-outcomes-and-specialists-2"
)
# The previous declaration to prefill from
self.data_api_client.get_supplier_declaration.return_value = {
'declaration': self.supplier_framework(
framework_slug="digital-outcomes-and-specialists-2",
declaration={
"status": "complete",
"conspiracy": True,
"corruptionBribery": False,
"fraudAndTheft": True,
"terrorism": False,
"organisedCrime": False,
}
)["frameworkInterest"]["declaration"]
}
# The grounds-for-mandatory-exclusion section has "prefill: True" in the declaration manifest
res = self.client.get(
'/suppliers/frameworks/g-cloud-9/declaration/edit/grounds-for-mandatory-exclusion'
)
assert res.status_code == 200
self.data_api_client.get_supplier_declaration.assert_called_once_with(
1234, "digital-outcomes-and-specialists-2"
)
doc = html.fromstring(res.get_data(as_text=True))
# Radio buttons have been pre-filled with the correct answers
assert len(doc.xpath('//input[@id="input-conspiracy-1"][@value="True"]/@checked')) == 1
assert len(doc.xpath('//input[@id="input-corruptionBribery-2"][@value="False"]/@checked')) == 1
assert len(doc.xpath('//input[@id="input-fraudAndTheft-1"][@value="True"]/@checked')) == 1
assert len(doc.xpath('//input[@id="input-terrorism-2"][@value="False"]/@checked')) == 1
assert len(doc.xpath('//input[@id="input-organisedCrime-2"][@value="False"]/@checked')) == 1
# Blue banner message is shown at top of page
assert doc.xpath('normalize-space(string(//div[@class="banner-information-without-action"]))') == \
"Answers on this page are from an earlier declaration and need review."
# Blue information messages are shown next to each question
info_messages = doc.xpath('//div[@class="message-wrapper"]//span[@class="message-content"]')
assert len(info_messages) == 5
for message in info_messages:
assert self.strip_all_whitespace(message.text) == self.strip_all_whitespace(
"This answer is from your Digital Stuff 2 declaration"
)
def test_get_with_with_partially_prefilled_answers(self):
self.login()
# Handle calls for both the current framework and for the framework to pre-fill from
self.data_api_client.get_framework.side_effect = lambda framework_slug: {
"g-cloud-9": self.framework(slug='g-cloud-9', name='G-Cloud 9', status='open'),
"digital-outcomes-and-specialists-2": self.framework(
slug='digital-outcomes-and-specialists-2',
name='Digital Stuff 2', status='live'
)
}[framework_slug]
# Current framework application information
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework(
framework_slug="g-cloud-9",
declaration={"status": "started"},
prefill_declaration_from_framework_slug="digital-outcomes-and-specialists-2"
)
# The previous declaration to prefill from - missing "corruptionBribery" and "terrorism" keys
self.data_api_client.get_supplier_declaration.return_value = {
'declaration': self.supplier_framework(
framework_slug="digital-outcomes-and-specialists-2",
declaration={
"status": "complete",
"conspiracy": True,
"fraudAndTheft": True,
"organisedCrime": False
}
)["frameworkInterest"]["declaration"]
}
# The grounds-for-mandatory-exclusion section has "prefill: True" in the declaration manifest
res = self.client.get('/suppliers/frameworks/g-cloud-9/declaration/edit/grounds-for-mandatory-exclusion')
assert res.status_code == 200
self.data_api_client.get_supplier_declaration.assert_called_once_with(
1234, "digital-outcomes-and-specialists-2"
)
doc = html.fromstring(res.get_data(as_text=True))
# Radio buttons have been pre-filled with the correct answers
assert len(doc.xpath('//input[@id="input-conspiracy-1"][@value="True"]/@checked')) == 1
assert len(doc.xpath('//input[@id="input-fraudAndTheft-1"][@value="True"]/@checked')) == 1
assert len(doc.xpath('//input[@id="input-organisedCrime-2"][@value="False"]/@checked')) == 1
# Radio buttons for missing keys exist but have not been pre-filled
assert len(doc.xpath('//input[@id="input-corruptionBribery-1"]')) == 1
assert len(doc.xpath('//input[@id="input-corruptionBribery-2"]')) == 1
assert len(doc.xpath('//input[@id="input-corruptionBribery-1"]/@checked')) == 0
assert len(doc.xpath('//input[@id="input-corruptionBribery-2"]/@checked')) == 0
assert len(doc.xpath('//input[@id="input-terrorism-1"]')) == 1
assert len(doc.xpath('//input[@id="input-terrorism-2"]')) == 1
assert len(doc.xpath('//input[@id="input-terrorism-1"]/@checked')) == 0
assert len(doc.xpath('//input[@id="input-terrorism-2"]/@checked')) == 0
# Blue banner message is shown at top of page
assert doc.xpath('normalize-space(string(//div[@class="banner-information-without-action"]))') == \
"Answers on this page are from an earlier declaration and need review."
# Blue information messages are shown next to pre-filled questions only
info_messages = doc.xpath('//div[@class="message-wrapper"]//span[@class="message-content"]')
assert len(info_messages) == 3
for message in info_messages:
assert self.strip_all_whitespace(message.text) == self.strip_all_whitespace(
"This answer is from your Digital Stuff 2 declaration"
)
def test_answers_not_prefilled_if_section_has_already_been_saved(self):
self.login()
# Handle calls for both the current framework and for the framework to pre-fill from
self.data_api_client.get_framework.side_effect = lambda framework_slug: {
"g-cloud-9": self.framework(slug='g-cloud-9', name='G-Cloud 9', status='open'),
"digital-outcomes-and-specialists-2": self.framework(
slug='digital-outcomes-and-specialists-2',
name='Digital Stuff 2', status='live'
)
}[framework_slug]
# Current framework application information with the grounds-for-mandatory-exclusion section complete
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework(
framework_slug="g-cloud-9",
declaration={
"status": "started",
"conspiracy": False,
"corruptionBribery": True,
"fraudAndTheft": False,
"terrorism": True,
"organisedCrime": False
},
prefill_declaration_from_framework_slug="digital-outcomes-and-specialists-2"
)
# The previous declaration to prefill from - has relevant answers but should not ever be called
self.data_api_client.get_supplier_declaration.return_value = {
'declaration': self.supplier_framework(
framework_slug="digital-outcomes-and-specialists-2",
declaration={
"status": "complete",
"conspiracy": True,
"corruptionBribery": False,
"fraudAndTheft": True,
"terrorism": False,
"organisedCrime": False
}
)["frameworkInterest"]["declaration"]
}
# The grounds-for-mandatory-exclusion section has "prefill: True" in the declaration manifest
res = self.client.get(
'/suppliers/frameworks/g-cloud-9/declaration/edit/grounds-for-mandatory-exclusion'
)
assert res.status_code == 200
doc = html.fromstring(res.get_data(as_text=True))
# Previous framework and declaration have not been fetchedself.
assert self.data_api_client.get_framework.call_args_list == [
mock.call('g-cloud-9'),
mock.call('g-cloud-9')
]
assert self.data_api_client.get_supplier_declaration.called is False
# Radio buttons have been filled with the current answers; not those from previous declaration
assert len(doc.xpath('//input[@id="input-conspiracy-2"][@value="False"]/@checked')) == 1
assert len(doc.xpath('//input[@id="input-corruptionBribery-1"][@value="True"]/@checked')) == 1
assert len(doc.xpath('//input[@id="input-fraudAndTheft-2"][@value="False"]/@checked')) == 1
assert len(doc.xpath('//input[@id="input-terrorism-1"][@value="True"]/@checked')) == 1
assert len(doc.xpath('//input[@id="input-organisedCrime-2"][@value="False"]/@checked')) == 1
# No blue banner message is shown at top of page
assert len(doc.xpath('//div[@class="banner-information-without-action"]')) == 0
# No blue information messages are shown next to each question
info_messages = doc.xpath('//div[@class="message-wrapper"]//span[@class="message-content"]')
assert len(info_messages) == 0
def test_answers_not_prefilled_if_section_marked_as_prefill_false(self):
self.login()
# Handle calls for both the current framework and for the framework to pre-fill from
self.data_api_client.get_framework.side_effect = lambda framework_slug: {
"g-cloud-9": self.framework(slug='g-cloud-9', name='G-Cloud 9', status='open'),
"digital-outcomes-and-specialists-2": self.framework(
slug='digital-outcomes-and-specialists-2',
name='Digital Stuff 2', status='live'
)
}[framework_slug]
# Current framework application information
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework(
framework_slug="g-cloud-9",
declaration={"status": "started"},
prefill_declaration_from_framework_slug="digital-outcomes-and-specialists-2"
)
# The previous declaration to prefill from - has relevant answers but should not ever be called
self.data_api_client.get_supplier_declaration.return_value = {
'declaration': self.supplier_framework(
framework_slug="digital-outcomes-and-specialists-2",
declaration={
"status": "complete",
"readUnderstoodGuidance": True,
"understandTool": True,
"understandHowToAskQuestions": False
}
)["frameworkInterest"]["declaration"]
}
# The how-you-apply section has "prefill: False" in the declaration manifest
res = self.client.get(
'/suppliers/frameworks/g-cloud-9/declaration/edit/how-you-apply'
)
assert res.status_code == 200
doc = html.fromstring(res.get_data(as_text=True))
# Previous framework and declaration have not been fetched
assert self.data_api_client.get_framework.call_args_list == [
mock.call('g-cloud-9'),
mock.call('g-cloud-9'),
]
assert self.data_api_client.get_supplier_declaration.called is False
# Radio buttons exist on page but have not been populated at all
assert len(doc.xpath('//input[@id="input-readUnderstoodGuidance-1"]')) == 1
assert len(doc.xpath('//input[@id="input-readUnderstoodGuidance-2"]')) == 1
assert len(doc.xpath('//input[@id="input-readUnderstoodGuidance-1"]/@checked')) == 0
assert len(doc.xpath('//input[@id="input-readUnderstoodGuidance-2"]/@checked')) == 0
assert len(doc.xpath('//input[@id="input-understandTool-1"]')) == 1
assert len(doc.xpath('//input[@id="input-understandTool-2"]')) == 1
assert len(doc.xpath('//input[@id="input-understandTool-1"]/@checked')) == 0
assert len(doc.xpath('//input[@id="input-understandTool-2"]/@checked')) == 0
assert len(doc.xpath('//input[@id="input-understandHowToAskQuestions-1"]')) == 1
assert len(doc.xpath('//input[@id="input-understandHowToAskQuestions-2"]')) == 1
assert len(doc.xpath('//input[@id="input-understandHowToAskQuestions-1"]/@checked')) == 0
assert len(doc.xpath('//input[@id="input-understandHowToAskQuestions-2"]/@checked')) == 0
# No blue banner message is shown at top of page
assert len(doc.xpath('//div[@class="banner-information-without-action"]')) == 0
# No blue information messages are shown next to each question
info_messages = doc.xpath('//div[@class="message-wrapper"]//span[@class="message-content"]')
assert len(info_messages) == 0
def test_post_valid_data(self):
self.login()
self.data_api_client.get_framework.return_value = self.framework(status='open')
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework(
framework_slug="g-cloud-7",
declaration={"status": "started"}
)
res = self.client.post(
'/suppliers/frameworks/g-cloud-7/declaration/edit/g-cloud-7-essentials',
data=FULL_G7_SUBMISSION
)
assert res.status_code == 302
assert self.data_api_client.set_supplier_declaration.called is True
@mock.patch('dmutils.s3.S3')
def test_post_valid_data_with_document_upload(self, s3):
self.login()
self.data_api_client.get_framework.return_value = self.framework(status='open', slug="g-cloud-11")
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework(
framework_slug="g-cloud-11",
declaration={"status": "started"}
)
with freeze_time('2017-11-12 13:14:15'):
res = self.client.post(
'/suppliers/frameworks/g-cloud-11/declaration/edit/modern-slavery',
data={
'modernSlaveryTurnover': False,
'modernSlaveryReportingRequirements': None,
'mitigatingFactors3': None,
'modernSlaveryStatement': None,
'modernSlaveryStatementOptional': (BytesIO(valid_pdf_bytes), 'document.pdf')
}
)
assert res.status_code == 302
assert self.data_api_client.set_supplier_declaration.call_args_list == [
mock.call(
1234,
"g-cloud-11",
{
'status': 'started',
'modernSlaveryTurnover': False,
'modernSlaveryReportingRequirements': None,
'mitigatingFactors3': None,
'modernSlaveryStatement': None,
'modernSlaveryStatementOptional': 'http://localhost/suppliers/assets/g-cloud-11/documents/1234/modern-slavery-statement-2017-11-12-1314.pdf' # noqa
},
"email@email.com"
)
]
s3.return_value.save.assert_called_once_with(
'g-cloud-11/documents/1234/modern-slavery-statement-2017-11-12-1314.pdf',
mock.ANY, acl='public-read'
)
def test_post_valid_data_to_complete_declaration(self):
self.login()
self.data_api_client.get_framework.return_value = self.framework(status='open')
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework(
framework_slug="g-cloud-7",
declaration=FULL_G7_SUBMISSION
)
res = self.client.post(
'/suppliers/frameworks/g-cloud-7/declaration/edit/grounds-for-discretionary-exclusion',
data=FULL_G7_SUBMISSION
)
assert res.status_code == 302
assert res.location == 'http://localhost/suppliers/frameworks/g-cloud-7/declaration'
assert self.data_api_client.set_supplier_declaration.called is True
assert self.data_api_client.set_supplier_declaration.call_args[0][2]['status'] == 'complete'
def test_post_valid_data_with_api_failure(self):
self.login()
self.data_api_client.get_framework.return_value = self.framework(status='open')
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework(
framework_slug="g-cloud-7",
declaration={"status": "started"}
)
self.data_api_client.set_supplier_declaration.side_effect = APIError(mock.Mock(status_code=400))
res = self.client.post(
'/suppliers/frameworks/g-cloud-7/declaration/edit/g-cloud-7-essentials',
data=FULL_G7_SUBMISSION
)
assert res.status_code == 400
@mock.patch('app.main.helpers.validation.G7Validator.get_error_messages_for_page')
def test_post_with_validation_errors(self, get_error_messages_for_page):
"""Test that answers are not saved if there are errors
For unit tests of the validation see :mod:`tests.app.main.helpers.test_frameworks`
"""
self.login()
self.data_api_client.get_framework.return_value = self.framework(status='open')
get_error_messages_for_page.return_value = {'PR1': {'input_name': 'PR1', 'message': 'this is invalid'}}
res = self.client.post(
'/suppliers/frameworks/g-cloud-7/declaration/edit/g-cloud-7-essentials',
data=FULL_G7_SUBMISSION
)
assert res.status_code == 400
assert self.data_api_client.set_supplier_declaration.called is False
doc = html.fromstring(res.get_data(as_text=True))
elems = doc.cssselect('#input-PR1-1')
assert elems[0].value == 'True'
def test_post_invalidating_previously_valid_page(self):
self.login()
self.data_api_client.get_framework.return_value = self.framework(slug='g-cloud-9', status='open')
mock_supplier_framework = self.supplier_framework(
framework_slug="g-cloud-9",
declaration={
"status": "started",
"establishedInTheUK": False,
"appropriateTradeRegisters": True,
"appropriateTradeRegistersNumber": "242#353",
"licenceOrMemberRequired": "licensed",
"licenceOrMemberRequiredDetails": "Foo Bar"
}
)
self.data_api_client.get_supplier_framework_info.return_value = mock_supplier_framework
self.data_api_client.get_supplier_declaration.return_value = {
"declaration": mock_supplier_framework["frameworkInterest"]["declaration"]
}
res = self.client.post(
'/suppliers/frameworks/g-cloud-9/declaration/edit/established-outside-the-uk',
data={
"establishedInTheUK": "False",
"appropriateTradeRegisters": "True",
"appropriateTradeRegistersNumber": "242#353",
"licenceOrMemberRequired": "licensed",
# deliberately missing:
"licenceOrMemberRequiredDetails": "",
},
)
assert res.status_code == 400
assert self.data_api_client.set_supplier_declaration.called is False
def test_cannot_post_data_if_not_open(self):
self.login()
self.data_api_client.get_framework.return_value = self.framework(status='pending')
self.data_api_client.get_supplier_declaration.return_value = {
"declaration": {"status": "started"}
}
res = self.client.post(
'/suppliers/frameworks/g-cloud-7/declaration/edit/g-cloud-7-essentials',
data=FULL_G7_SUBMISSION
)
assert res.status_code == 404
assert self.data_api_client.set_supplier_declaration.called is False
@mock.patch('dmutils.s3.S3')
def test_post_declaration_answer_with_document_upload_errors(self, s3):
self.login()
self.data_api_client.get_framework.return_value = self.framework(status='open', slug="g-cloud-11")
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework(
framework_slug="g-cloud-11",
declaration={"status": "started"}
)
with freeze_time('2017-11-12 13:14:15'):
res = self.client.post(
'/suppliers/frameworks/g-cloud-11/declaration/edit/modern-slavery',
data={
'modernSlaveryTurnover': False,
'modernSlaveryReportingRequirements': None,
'mitigatingFactors3': None,
'modernSlaveryStatement': None,
'modernSlaveryStatementOptional': (BytesIO(b"doc"), 'document.doc')
}
)
assert res.status_code == 400
doc = html.fromstring(res.get_data(as_text=True))
assert len(doc.xpath(
"//*[contains(@class,'validation-message')][contains(normalize-space(string()), $text)]",
text="Your document is not in an open format.",
)) == 1
assert self.data_api_client.set_supplier_declaration.called is False
assert s3.return_value.save.called is False
@mock.patch('dmutils.s3.S3')
def test_post_declaration_answer_with_existing_document(self, s3):
self.login()
self.data_api_client.get_framework.return_value = self.framework(status='open', slug="g-cloud-11")
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework(
framework_slug="g-cloud-11",
declaration={"status": "started", "modernSlaveryStatement": "path/to/existing/upload"}
)
with freeze_time('2017-11-12 13:14:15'):
res = self.client.post(
'/suppliers/frameworks/g-cloud-11/declaration/edit/modern-slavery',
data={
'modernSlaveryTurnover': True,
'modernSlaveryReportingRequirements': True,
'mitigatingFactors3': None,
}
)
assert res.status_code == 302
assert self.data_api_client.set_supplier_declaration.called
assert s3.return_value.save.called is False
def test_has_session_timeout_warning(self):
self.data_api_client.get_framework.return_value = self.framework(status='open', slug="g-cloud-11")
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework(
framework_slug="g-cloud-11",
declaration={"status": "started"}
)
with freeze_time("2019-11-12 13:14:15"):
self.login() # need to login after freezing time
doc = html.fromstring(
self.client.get(f"/suppliers/frameworks/g-cloud-11/declaration/edit/contact-details").data
)
assert "2:14pm GMT" in doc.xpath("string(.//div[@id='session-timeout-warning'])")
@mock.patch('dmutils.s3.S3')
class TestFrameworkUpdatesPage(BaseApplicationTest):
def setup_method(self, method):
super().setup_method(method)
self.data_api_client_patch = mock.patch('app.main.views.frameworks.data_api_client', autospec=True)
self.data_api_client = self.data_api_client_patch.start()
def teardown_method(self, method):
self.data_api_client_patch.stop()
super().teardown_method(method)
def _assert_page_title_and_table_headings(self, doc, check_for_tables=True):
assert self.strip_all_whitespace('G-Cloud 7 updates') in self.strip_all_whitespace(doc.xpath('//h1')[0].text)
headers = doc.xpath('//div[@class="govuk-grid-column-full"]//h2 | //table//caption//span')
assert len(headers) == 2
assert self.strip_all_whitespace(headers[0].text) == 'Communications'
assert self.strip_all_whitespace(headers[1].text) == 'Clarificationquestionsandanswers'
if check_for_tables:
table_captions = doc.xpath('//div/table/caption/span')
assert len(table_captions) == 2
assert self.strip_all_whitespace(table_captions[0].text) == 'Communications'
assert self.strip_all_whitespace(table_captions[1].text) == 'Clarificationquestionsandanswers'
def test_should_be_a_503_if_connecting_to_amazon_fails(self, s3):
self.data_api_client.get_framework.return_value = self.framework('open')
# if s3 throws a 500-level error
s3.side_effect = S3ResponseError(
{'Error': {'Code': 500, 'Message': 'Amazon has collapsed. The internet is over.'}},
'test_should_be_a_503_if_connecting_to_amazon_fails'
)
self.login()
response = self.client.get('/suppliers/frameworks/g-cloud-7/updates')
assert response.status_code == 503
doc = html.fromstring(response.get_data(as_text=True))
assert doc.xpath('//h1/text()')[0] == "Sorry, we’re experiencing technical difficulties"
def test_empty_messages_exist_if_no_files_returned(self, s3):
self.data_api_client.get_framework.return_value = self.framework('open')
self.login()
response = self.client.get('/suppliers/frameworks/g-cloud-7/updates')
assert response.status_code == 200
doc = html.fromstring(response.get_data(as_text=True))
self._assert_page_title_and_table_headings(doc, check_for_tables=False)
response_text = self.strip_all_whitespace(response.get_data(as_text=True))
assert (
self.strip_all_whitespace('<p class="govuk-body">No communications have been sent out.</p>')
in response_text
)
assert (
self.strip_all_whitespace(
'<p class="govuk-body">No clarification questions and answers have been posted yet.</p>'
)
in response_text
)
def test_dates_for_open_framework_closed_for_questions(self, s3):
self.data_api_client.get_framework.return_value = self.framework('open', clarification_questions_open=False)
self.login()
response = self.client.get('/suppliers/frameworks/g-cloud-7/updates')
data = response.get_data(as_text=True)
assert response.status_code == 200
assert 'All clarification questions and answers will be published ' \
'by 5pm BST, Tuesday 29 September 2015.' in data
assert "You can ask clarification questions until " not in data
def test_dates_for_open_framework_open_for_questions(self, s3):
self.data_api_client.get_framework.return_value = self.framework('open', clarification_questions_open=True)
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework()
self.login()
response = self.client.get('/suppliers/frameworks/g-cloud-7/updates')
data = response.get_data(as_text=True)
assert response.status_code == 200
assert 'All clarification questions and answers will be published ' \
'by 5pm BST, Tuesday 29 September 2015.' not in data
assert 'You can ask clarification questions until 5pm BST, Tuesday 22 September 2015.' in data
def test_the_tables_should_be_displayed_correctly(self, s3):
self.data_api_client.get_framework.return_value = self.framework('open')
files = [
('updates/communications/', 'file 1', 'odt'),
('updates/communications/', 'file 2', 'odt'),
('updates/clarifications/', 'file 3', 'odt'),
('updates/clarifications/', 'file 4', 'odt'),
]
# the communications table is always before the clarifications table
s3.return_value.list.return_value = [
_return_fake_s3_file_dict("g-cloud-7/communications/{}".format(section), filename, ext)
for section, filename, ext
in files
]
self.login()
response = self.client.get('/suppliers/frameworks/g-cloud-7/updates')
doc = html.fromstring(response.get_data(as_text=True))
self._assert_page_title_and_table_headings(doc)
tables = doc.xpath('//div[contains(@class, "updates-document-tables")]/table')
# test that for each table, we have the right number of rows
for table in tables:
item_rows = table.findall('.//tr[@class="summary-item-row"]')
assert len(item_rows) == 2
# test that the file names and urls are right
for row in item_rows:
section, filename, ext = files.pop(0)
filename_link = row.find('.//a[@class="document-link-with-icon"]')
assert filename in filename_link.text_content()
assert filename_link.get('href') == '/suppliers/frameworks/g-cloud-7/files/{}{}.{}'.format(
section,
filename.replace(' ', '%20'),
ext,
)
def test_names_with_the_section_name_in_them_will_display_correctly(self, s3):
self.data_api_client.get_framework.return_value = self.framework('open')
# for example: 'g-cloud-7-updates/clarifications/communications%20file.odf'
files = [
('updates/communications/', 'clarifications file', 'odt'),
('updates/clarifications/', 'communications file', 'odt')
]
s3.return_value.list.return_value = [
_return_fake_s3_file_dict("g-cloud-7/communications/{}".format(section), filename, ext)
for section, filename, ext
in files
]
self.login()
response = self.client.get('/suppliers/frameworks/g-cloud-7/updates')
doc = html.fromstring(response.get_data(as_text=True))
self._assert_page_title_and_table_headings(doc)
tables = doc.xpath('//div[contains(@class, "updates-document-tables")]/table')
# test that for each table, we have the right number of rows
for table in tables:
item_rows = table.findall('.//tr[@class="summary-item-row"]')
assert len(item_rows) == 1
# test that the file names and urls are right
for row in item_rows:
section, filename, ext = files.pop(0)
filename_link = row.find('.//a[@class="document-link-with-icon"]')
assert filename in filename_link.text_content()
assert filename_link.get('href') == '/suppliers/frameworks/g-cloud-7/files/{}{}.{}'.format(
section,
filename.replace(' ', '%20'),
ext,
)
@pytest.mark.parametrize('countersigned_path, contact_link_shown', [("path", False), (None, True)])
def test_contact_link_only_shown_if_countersigned_agreement_is_not_yet_returned(
self, s3, countersigned_path, contact_link_shown
):
self.data_api_client.get_framework.return_value = self.framework('live', clarification_questions_open=False)
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework(
countersigned_path=countersigned_path
)
self.login()
response = self.client.get('/suppliers/frameworks/g-cloud-7/updates')
data = response.get_data(as_text=True)
assert response.status_code == 200
assert ('Contact the support team' in data) == contact_link_shown
@mock.patch('app.main.views.frameworks.DMNotifyClient.send_email', autospec=True)
class TestSendClarificationQuestionEmail(BaseApplicationTest):
def setup_method(self, method):
super().setup_method(method)
self.data_api_client_patch = mock.patch('app.main.views.frameworks.data_api_client', autospec=True)
self.data_api_client = self.data_api_client_patch.start()
self.data_api_client.get_supplier.return_value = SupplierStub().single_result_response()
def teardown_method(self, method):
self.data_api_client_patch.stop()
super().teardown_method(method)
@mock.patch('dmutils.s3.S3')
def _send_email(self, s3, message):
self.login()
return self.client.post(
"/suppliers/frameworks/g-cloud-7/updates",
data={'clarification_question': message}
)
def test_should_call_send_email_with_correct_params_if_clarification_questions_open(self, notify_send_email):
self.data_api_client.get_framework.return_value = self.framework(
'open', name='Test Framework', clarification_questions_open=True
)
clarification_question = 'This is a clarification question.'
with freeze_time('2019-07-02 01:02:03'):
res = self._send_email(message=clarification_question)
# Assert Notify email 1 is sent (clarification question)
# Assert Notify email 2 is sent (receipt)
notify_send_email.assert_has_calls(
[
mock.call(
mock.ANY,
to_email_address="clarification-questions@example.gov.uk",
template_name_or_id='framework-clarification-question',
personalisation={
"framework_name": "Test Framework",
"supplier_id": 1234,
"supplier_name": "My Little Company",
"supplier_reference": "2019-07-02-JRX8IN",
"clarification_question": clarification_question,
},
reference=(
"fw-clarification-question-"
"42c1W5KnFy1IaDtDEnNsOChYYluckBo_mzTuRxQawFo=-"
"9B7i7y6lXFmVCHXyU7sP0nkdNK6l8B98xRimoHMzpAw="
),
allow_resend=True,
),
mock.call(
mock.ANY, # DMNotifyClient
to_email_address="email@email.com",
template_name_or_id='confirmation_of_clarification_question',
personalisation={
'user_name': 'Năme',
'framework_name': 'Test Framework',
"supplier_reference": "2019-07-02-JRX8IN",
'clarification_question_text': clarification_question,
},
reference=(
"fw-clarification-question-confirm-"
"42c1W5KnFy1IaDtDEnNsOChYYluckBo_mzTuRxQawFo=-"
"8yc90Y2VvBnVHT5jVuSmeebxOCRJcnKicOe7VAsKu50="
),
reply_to_address_id='24908180-b64e-513d-ab48-fdca677cec52',
)
]
)
# Assert audit event
self.data_api_client.create_audit_event.assert_called_with(
audit_type=AuditTypes.send_clarification_question,
user="email@email.com",
object_type="suppliers",
object_id=1234,
data={"question": clarification_question, 'framework': 'g-cloud-7'}
)
assert res.status_code == 200
# Assert flash message
doc = html.fromstring(res.get_data(as_text=True))
flash_message = doc.cssselect(".dm-alert")[0]
assert (
flash_message.cssselect(".dm-alert__title")[0].text.strip()
==
"Your clarification question has been sent. Answers to all "
"clarification questions will be published on this page."
)
def test_email_not_sent_if_clarification_questions_closed(self, notify_send_email):
self.data_api_client.get_framework.return_value = self.framework(
'open', name='Test Framework', clarification_questions_open=False
)
response = self._send_email(message='I have missed the clarification question deadline!')
assert response.status_code == 400
assert notify_send_email.called is False
assert self.data_api_client.create_audit_event.called is False
@pytest.mark.parametrize(
'invalid_clarification_question',
(
# Empty question
{'question': '', 'error_message': 'Add text if you want to ask a question.'},
# Whitespace only question
{'question': '\t \n\n\n', 'error_message': 'Add text if you want to ask a question.'},
# Question length > 5000 characters
{'question': ('ten__chars' * 500) + '1', 'error_message': 'Question cannot be longer than 5000 characters'}
)
)
def test_should_not_send_email_if_invalid_clarification_question(
self,
notify_send_email,
invalid_clarification_question,
):
self.data_api_client.get_framework.return_value = self.framework('open')
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework()
response = self._send_email(message=invalid_clarification_question['question'])
# Assert no audit
assert self.data_api_client.create_audit_event.call_count == 0
# Assert no emails sent
assert notify_send_email.call_count == 0
assert response.status_code == 400
# Assert error message shown
assert (
self.strip_all_whitespace('There was a problem with your submitted question')
in self.strip_all_whitespace(response.get_data(as_text=True))
)
assert (
self.strip_all_whitespace(invalid_clarification_question['error_message'])
in self.strip_all_whitespace(response.get_data(as_text=True))
)
def test_should_be_a_503_if_email_fails(self, notify_send_email):
self.data_api_client.get_framework.return_value = self.framework('open', name='Test Framework')
notify_send_email.side_effect = EmailError("Arrrgh")
clarification_question = 'This is a clarification question.'
with freeze_time('2019-07-02 01:02:03'):
response = self._send_email(message=clarification_question)
# Assert send_email is called only once
notify_send_email.assert_called_once_with(
mock.ANY,
to_email_address="clarification-questions@example.gov.uk",
template_name_or_id='framework-clarification-question',
personalisation={
"framework_name": "Test Framework",
"supplier_id": 1234,
"supplier_name": "My Little Company",
"supplier_reference": "2019-07-02-JRX8IN",
"clarification_question": clarification_question,
},
reference=(
"fw-clarification-question-"
"42c1W5KnFy1IaDtDEnNsOChYYluckBo_mzTuRxQawFo=-"
"9B7i7y6lXFmVCHXyU7sP0nkdNK6l8B98xRimoHMzpAw="
),
allow_resend=True,
)
# Assert no audit
assert self.data_api_client.create_audit_event.call_count == 0
assert response.status_code == 503
def test_should_fail_silently_if_receipt_email_fails(self, notify_send_email):
notify_send_email.side_effect = [None, EmailError("Arrrgh")]
self.data_api_client.get_framework.return_value = self.framework('open', name='Test Framework',
clarification_questions_open=True)
clarification_question = 'This is a clarification question.'
with freeze_time('2019-07-02 01:02:03'):
response = self._send_email(message=clarification_question)
# first email sends, second email fails
notify_send_email.assert_has_calls(
[
mock.call(
mock.ANY,
to_email_address="clarification-questions@example.gov.uk",
template_name_or_id="framework-clarification-question",
personalisation={
"framework_name": "Test Framework",
"supplier_id": 1234,
"supplier_name": "My Little Company",
"supplier_reference": "2019-07-02-JRX8IN",
"clarification_question": clarification_question,
},
reference=(
"fw-clarification-question-"
"42c1W5KnFy1IaDtDEnNsOChYYluckBo_mzTuRxQawFo=-"
"9B7i7y6lXFmVCHXyU7sP0nkdNK6l8B98xRimoHMzpAw="
),
allow_resend=True,
),
mock.call(
mock.ANY, # DMNotifyClient
to_email_address="email@email.com",
template_name_or_id='confirmation_of_clarification_question',
personalisation={
'user_name': 'Năme',
'framework_name': 'Test Framework',
"supplier_reference": "2019-07-02-JRX8IN",
'clarification_question_text': clarification_question,
},
reference=(
"fw-clarification-question-confirm-"
"42c1W5KnFy1IaDtDEnNsOChYYluckBo_mzTuRxQawFo=-"
"8yc90Y2VvBnVHT5jVuSmeebxOCRJcnKicOe7VAsKu50="
),
reply_to_address_id='24908180-b64e-513d-ab48-fdca677cec52',
)
]
)
# assert reached end of view and redirected
assert response.status_code == 200
@mock.patch('app.main.views.frameworks.count_unanswered_questions')
class TestFrameworkSubmissionLots(BaseApplicationTest, MockEnsureApplicationCompanyDetailsHaveBeenConfirmedMixin):
def setup_method(self, method):
super().setup_method(method)
self.get_metadata_patch = mock.patch('app.main.views.frameworks.content_loader.get_metadata')
self.get_metadata = self.get_metadata_patch.start()
self.get_metadata.return_value = 'g-cloud-6'
self.data_api_client_patch = mock.patch('app.main.views.frameworks.data_api_client', autospec=True)
self.data_api_client = self.data_api_client_patch.start()
def teardown_method(self, method):
self.data_api_client_patch.stop()
super().teardown_method(method)
self.get_metadata_patch.stop()
def test_drafts_list_progress_count(self, count_unanswered):
self.login()
count_unanswered.return_value = 3, 1
self.data_api_client.get_framework.return_value = self.framework(status='open')
self.data_api_client.find_draft_services_iter.return_value = [
{'serviceName': 'draft', 'lotSlug': 'scs', 'status': 'not-submitted'}
]
submissions = self.client.get('/suppliers/frameworks/g-cloud-7/submissions')
assert u'1 draft service' in submissions.get_data(as_text=True)
assert u'complete service' not in submissions.get_data(as_text=True)
@pytest.mark.parametrize('framework_slug, show_service_data', (
('digital-outcomes-and-specialists-2', 0),
('g-cloud-9', 1),
))
def test_submission_lots_page_shows_use_of_service_data_if_g_cloud_family(
self, count_unanswered, framework_slug, show_service_data
):
self.login()
self.data_api_client.get_framework.return_value = self.framework(slug=framework_slug, status="open")
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework(
framework_slug=framework_slug
)
res = self.client.get(f"/suppliers/frameworks/{framework_slug}/submissions")
assert res.status_code == 200
doc = html.fromstring(res.get_data(as_text=True))
use_of_data = doc.xpath('//div[contains(@class, "use-of-service-data")]')
assert len(use_of_data) == show_service_data
if show_service_data:
assert 'The service information you provide here:' in use_of_data[0].text_content()
@pytest.mark.parametrize(
'declaration, should_show_declaration_link, declaration_link_url',
(
({'declaration': {}}, True, '/suppliers/frameworks/g-cloud-7/declaration/start'),
({'declaration': {'status': 'started'}}, True, '/suppliers/frameworks/g-cloud-7/declaration'),
({'declaration': {}}, True, '/suppliers/frameworks/g-cloud-7/declaration/start'),
({'declaration': {'status': 'started'}}, True, '/suppliers/frameworks/g-cloud-7/declaration'),
({'declaration': {'status': 'complete'}}, False, None),
({'declaration': {'status': 'complete'}}, False, None),
)
)
def test_banner_on_submission_lot_page_shows_link_to_declaration(
self, count_unanswered, declaration, should_show_declaration_link, declaration_link_url
):
self.login()
self.data_api_client.get_framework.return_value = self.framework(status='open')
self.data_api_client.get_supplier.return_value = SupplierStub().single_result_response()
self.data_api_client.get_supplier_declaration.return_value = declaration
self.data_api_client.find_draft_services_iter.return_value = [
{'serviceName': 'draft', 'lotSlug': 'scs', 'status': 'submitted'}
]
submissions = self.client.get('/suppliers/frameworks/g-cloud-7/submissions')
if should_show_declaration_link:
doc = html.fromstring(submissions.get_data(as_text=True))
assert doc.xpath('//*[@class="banner-information-without-action"]')
decl_element = doc.xpath(
"//*[contains(@class,'banner-content')][contains(normalize-space(string()), $text)]",
text="make your supplier declaration",
)
assert decl_element[0].xpath('.//a[@href=$url]', url=declaration_link_url)
else:
# Application is done - don't show warning banner
assert "Your application is not complete" not in submissions.get_data(as_text=True)
@pytest.mark.parametrize(
"incomplete_declaration,expected_url",
(
({}, "/suppliers/frameworks/g-cloud-7/declaration/start"),
({"status": "started"}, "/suppliers/frameworks/g-cloud-7/declaration")
)
)
def test_drafts_list_completed(self, count_unanswered, incomplete_declaration, expected_url):
self.login()
count_unanswered.return_value = 0, 1
self.data_api_client.get_framework.return_value = self.framework(status='open')
self.data_api_client.get_supplier_declaration.return_value = {'declaration': incomplete_declaration}
self.data_api_client.find_draft_services_iter.return_value = [
{'serviceName': 'draft', 'lotSlug': 'scs', 'status': 'submitted'}
]
self.data_api_client.get_supplier.return_value = SupplierStub(
company_details_confirmed=False
).single_result_response()
submissions = self.client.get('/suppliers/frameworks/g-cloud-7/submissions')
submissions_html = submissions.get_data(as_text=True)
assert u'1 service marked as complete' in submissions_html
assert u'draft service' not in submissions_html
assert "Your application is not complete" in submissions_html
doc = html.fromstring(submissions_html)
assert doc.xpath('//*[@class="banner-information-without-action"]')
decl_element = doc.xpath(
"//*[contains(@class,'banner-content')][contains(normalize-space(string()), $text)]",
text="make your supplier declaration",
)
assert decl_element[0].xpath('.//a[@href=$url]', url=expected_url)
def test_drafts_list_completed_with_declaration_status(self, count_unanswered):
self.login()
self.data_api_client.get_framework.return_value = self.framework(status='open')
self.data_api_client.get_supplier_declaration.return_value = {'declaration': {'status': 'complete'}}
self.data_api_client.find_draft_services_iter.return_value = [
{'serviceName': 'draft', 'lotSlug': 'scs', 'status': 'submitted'}
]
self.data_api_client.get_supplier.return_value = SupplierStub(
company_details_confirmed=False
).single_result_response()
submissions = self.client.get('/suppliers/frameworks/g-cloud-7/submissions')
submissions_html = submissions.get_data(as_text=True)
assert u'1 service will be submitted' in submissions_html
assert u'1 complete service was submitted' not in submissions_html
assert u'browse-list-item-status-happy' in submissions_html
assert "Your application is not complete" not in submissions_html
def test_drafts_list_services_were_submitted(self, count_unanswered):
self.login()
self.data_api_client.get_framework.return_value = self.framework(status='standstill')
self.data_api_client.get_supplier_declaration.return_value = {'declaration': {'status': 'complete'}}
self.data_api_client.find_draft_services_iter.return_value = [
{'serviceName': 'draft', 'lotSlug': 'scs', 'status': 'not-submitted'},
{'serviceName': 'draft', 'lotSlug': 'scs', 'status': 'submitted'},
]
submissions = self.client.get('/suppliers/frameworks/g-cloud-7/submissions')
assert u'1 complete service was submitted' in submissions.get_data(as_text=True)
def test_dos_drafts_list_with_open_framework(self, count_unanswered):
self.login()
self.data_api_client.get_framework.return_value = self.framework(
slug='digital-outcomes-and-specialists',
status='open'
)
self.data_api_client.get_supplier_declaration.return_value = {'declaration': {'status': 'complete'}}
self.data_api_client.get_supplier.return_value = SupplierStub().single_result_response()
self.data_api_client.find_draft_services_iter.return_value = [
{'serviceName': 'draft', 'lotSlug': 'digital-specialists', 'status': 'submitted'}
]
submissions = self.client.get('/suppliers/frameworks/digital-outcomes-and-specialists/submissions')
assert u'This will be submitted' in submissions.get_data(as_text=True)
assert u'browse-list-item-status-happy' in submissions.get_data(as_text=True)
assert u'Apply to provide' in submissions.get_data(as_text=True)
assert "Your application is not complete" not in submissions.get_data(as_text=True)
def test_dos_drafts_list_with_closed_framework(self, count_unanswered):
self.login()
self.data_api_client.get_framework.return_value = self.framework(
slug="digital-outcomes-and-specialists",
status='pending'
)
self.data_api_client.get_supplier_declaration.return_value = {'declaration': {'status': 'complete'}}
self.data_api_client.find_draft_services_iter.return_value = [
{'serviceName': 'draft', 'lotSlug': 'digital-specialists', 'status': 'not-submitted'},
{'serviceName': 'draft', 'lotSlug': 'digital-specialists', 'status': 'submitted'},
]
submissions = self.client.get('/suppliers/frameworks/digital-outcomes-and-specialists/submissions')
assert submissions.status_code == 200
assert u'Submitted' in submissions.get_data(as_text=True)
assert u'Apply to provide' not in submissions.get_data(as_text=True)
@mock.patch('app.main.views.frameworks.count_unanswered_questions')
class TestG12RecoveryDraftServices(BaseApplicationTest, MockEnsureApplicationCompanyDetailsHaveBeenConfirmedMixin):
def setup_method(self, method):
super().setup_method(method)
self.get_metadata_patch = mock.patch('app.main.views.frameworks.content_loader.get_metadata')
self.get_metadata = self.get_metadata_patch.start()
self.get_metadata.return_value = 'g-cloud-12'
self.data_api_client_patch = mock.patch('app.main.views.frameworks.data_api_client', autospec=True)
self.data_api_client = self.data_api_client_patch.start()
def teardown_method(self, method):
self.data_api_client_patch.stop()
super().teardown_method(method)
self.get_metadata_patch.stop()
@pytest.mark.parametrize('status, response_code', [('open', 404), ('live', 200)])
def test_page_exists(self, count_unanswered, status, response_code):
self.login(supplier_id=577184)
self.data_api_client.get_framework.return_value = self.framework(slug='g-cloud-12', status=status)
with self.app.app_context():
response = self.client.get('/suppliers/frameworks/g-cloud-12/draft-services')
assert response.status_code == response_code
def test_page_exists_only_for_g_cloud_12(self, count_unanswered):
self.login(supplier_id=577184)
with self.app.app_context():
response = self.client.get('/suppliers/frameworks/g-cloud-11/draft-services')
assert response.status_code == 404
@pytest.mark.parametrize('supplier_id, response_code', [(1, 404), (577184, 200)])
def test_page_exists_for_recovery_suppliers_only(self, count_unanswered, supplier_id, response_code):
self.login(supplier_id=supplier_id)
self.data_api_client.get_framework.return_value = self.framework(slug='g-cloud-12', status='live')
with self.app.app_context():
response = self.client.get('/suppliers/frameworks/g-cloud-12/draft-services')
assert response.status_code == response_code
def test_page_renders(self, count_unanswered):
self.login(supplier_id=577184)
self.data_api_client.get_framework.return_value = FrameworkStub(slug="g-cloud-12", status="live")\
.single_result_response()
with self.app.app_context():
response = self.client.get('/suppliers/frameworks/g-cloud-12/draft-services')
doc = html.fromstring(response.get_data(as_text=True))
assert doc.cssselect("h1:contains('Your G-Cloud 12 services')")
assert [el.text for el in doc.cssselect(".browse-list a")] == [
"Cloud hosting", "Cloud software", "Cloud support"
]
def test_lot_status_includes_number_of_draft_and_completed_services(self, count_unanswered):
self.login(supplier_id=577184)
self.data_api_client.get_framework.return_value = FrameworkStub(slug="g-cloud-12", status="live")\
.single_result_response()
self.data_api_client.get_supplier_declaration.return_value = {'declaration': {'status': 'complete'}}
self.data_api_client.find_draft_services_iter.return_value = [
{'serviceName': 'draft', 'lotSlug': 'cloud-hosting', 'status': 'not-submitted'},
{'serviceName': 'completed', 'lotSlug': 'cloud-hosting', 'status': 'submitted'},
]
self.data_api_client.get_supplier.return_value = SupplierStub(
company_details_confirmed=False
).single_result_response()
with self.app.app_context():
response = self.client.get('/suppliers/frameworks/g-cloud-12/draft-services')
raw_html = response.get_data(as_text=True)
assert "1 draft service" in raw_html
assert "1 service will be submitted" in raw_html
assert "1 complete service was submitted" not in raw_html
assert 'browse-list-item-status-happy' in raw_html
assert "Your application is not complete" not in raw_html
@mock.patch('app.main.views.frameworks.count_unanswered_questions')
class TestFrameworkSubmissionServices(BaseApplicationTest, MockEnsureApplicationCompanyDetailsHaveBeenConfirmedMixin):
def setup_method(self, method):
super().setup_method(method)
self.get_metadata_patch = mock.patch('app.main.views.frameworks.content_loader.get_metadata')
self.get_metadata = self.get_metadata_patch.start()
self.get_metadata.return_value = 'g-cloud-6'
self.data_api_client_patch = mock.patch('app.main.views.frameworks.data_api_client', autospec=True)
self.data_api_client = self.data_api_client_patch.start()
def teardown_method(self, method):
self.data_api_client_patch.stop()
super().teardown_method(method)
self.get_metadata_patch.stop()
def _assert_incomplete_application_banner_not_visible(self, html):
assert "Your application is not complete" not in html
def _assert_incomplete_application_banner(self,
response_html,
decl_item_href=None):
doc = html.fromstring(response_html)
assert "Your application is not complete" in response_html
assert doc.xpath('//*[@class="banner-information-without-action"]')
decl_element = doc.xpath(
"//*[contains(@class,'banner-content')][contains(normalize-space(string()), $text)]",
text="make your supplier declaration",
)
assert decl_element
if decl_item_href:
assert decl_element[0].xpath('.//a[@href=$url]', url=decl_item_href)
@pytest.mark.parametrize(
'framework_status, msg',
[
('open', 'Add a service'),
('pending', 'You didn’t mark any services as complete.')
]
)
def test_services_list_open_or_pending_no_complete_services(self, count_unanswered, framework_status, msg):
self.login()
self.data_api_client.get_framework.return_value = self.framework(status=framework_status)
self.data_api_client.find_draft_services_iter.return_value = []
count_unanswered.return_value = 0
response = self.client.get('/suppliers/frameworks/g-cloud-7/submissions/iaas')
assert response.status_code == 200
assert msg in response.get_data(as_text=True)
@pytest.mark.parametrize('framework_status', ['open', 'pending'])
def test_services_list_open_or_pending_and_no_declaration(self, count_unanswered, framework_status):
self.login()
self.data_api_client.get_framework.return_value = self.framework(status=framework_status)
self.data_api_client.get_supplier_declaration.return_value = {
"declaration": {"status": "started"}
}
response = self.client.get('/suppliers/frameworks/g-cloud-7/submissions/iaas')
assert response.status_code == 200
assert 'You made your supplier declaration' not in response.get_data(as_text=True)
def test_services_list_shows_g7_message_if_pending_and_application_made(self, count_unanswered):
self.login()
self.data_api_client.get_framework.return_value = self.framework(status='pending')
self.data_api_client.get_supplier_declaration.return_value = self.supplier_framework()['frameworkInterest']
self.data_api_client.get_supplier.return_value = SupplierStub().single_result_response()
self.data_api_client.find_draft_services_iter.return_value = [
{'serviceName': 'draft', 'lotSlug': 'scs', 'status': 'submitted'}
]
count_unanswered.return_value = 0, 1
response = self.client.get('/suppliers/frameworks/g-cloud-7/submissions/scs')
doc = html.fromstring(response.get_data(as_text=True))
assert response.status_code == 200
heading = doc.xpath('//div[@class="summary-item-lede"]//h2[@class="summary-item-heading"]')
assert len(heading) > 0
assert "G-Cloud 7 is closed for applications" in heading[0].xpath('text()')[0]
assert "You made your supplier declaration and submitted 1 complete service." in \
heading[0].xpath('../p[1]/text()')[0]
self._assert_incomplete_application_banner_not_visible(response.get_data(as_text=True))
def test_shows_g7_message_if_pending_and_services_not_submitted(self, count_unanswered):
self.login()
self.data_api_client.get_framework.return_value = self.framework(status='pending')
self.data_api_client.get_supplier_declaration.return_value = self.supplier_framework()['frameworkInterest']
self.data_api_client.get_supplier.return_value = SupplierStub().single_result_response()
self.data_api_client.find_draft_services_iter.return_value = [
{'serviceName': 'draft', 'lotSlug': 'scs', 'status': 'not-submitted'}
]
count_unanswered.return_value = 0, 1
response = self.client.get('/suppliers/frameworks/g-cloud-7/submissions/scs')
doc = html.fromstring(response.get_data(as_text=True))
assert response.status_code == 200
heading = doc.xpath('//div[@class="summary-item-lede"]//h2[@class="summary-item-heading"]')
assert len(heading) > 0
assert "G-Cloud 7 is closed for applications" in heading[0].xpath('text()')[0]
assert "You made your supplier declaration and submitted 0 complete services." in \
heading[0].xpath('../p[1]/text()')[0]
assert "These services were not completed" in doc.xpath('//main//p[@class="hint"]')[0].xpath('text()')[0]
self._assert_incomplete_application_banner_not_visible(response.get_data(as_text=True))
def test_drafts_list_progress_count(self, count_unanswered):
self.login()
count_unanswered.return_value = 3, 1
self.data_api_client.get_framework.return_value = self.framework(status='open')
self.data_api_client.find_draft_services_iter.return_value = [
{'serviceName': 'draft', 'lotSlug': 'scs', 'status': 'not-submitted'}
]
lot_page = self.client.get('/suppliers/frameworks/g-cloud-7/submissions/scs')
assert u'Service can be moved to complete' not in lot_page.get_data(as_text=True)
assert u'4 unanswered questions' in lot_page.get_data(as_text=True)
def test_drafts_list_can_be_completed(self, count_unanswered):
self.login()
count_unanswered.return_value = 0, 1
self.data_api_client.get_framework.return_value = self.framework(status='open')
self.data_api_client.find_draft_services_iter.return_value = [
{'serviceName': 'draft', 'lotSlug': 'scs', 'status': 'not-submitted'}
]
res = self.client.get('/suppliers/frameworks/g-cloud-7/submissions/scs')
assert u'Service can be marked as complete' in res.get_data(as_text=True)
assert u'1 optional question unanswered' in res.get_data(as_text=True)
@pytest.mark.parametrize(
"incomplete_declaration,expected_url",
(
({}, "/suppliers/frameworks/g-cloud-7/declaration/start"),
({"status": "started"}, "/suppliers/frameworks/g-cloud-7/declaration")
)
)
def test_drafts_list_completed(self, count_unanswered, incomplete_declaration, expected_url):
self.login()
count_unanswered.return_value = 0, 1
self.data_api_client.get_framework.return_value = self.framework(status='open')
self.data_api_client.get_supplier_declaration.return_value = {'declaration': incomplete_declaration}
self.data_api_client.find_draft_services_iter.return_value = [
{'serviceName': 'draft', 'lotSlug': 'scs', 'status': 'submitted'}
]
self.data_api_client.get_supplier.return_value = SupplierStub(
company_details_confirmed=False
).single_result_response()
lot_page = self.client.get('/suppliers/frameworks/g-cloud-7/submissions/scs')
lot_page_html = lot_page.get_data(as_text=True)
assert u'Service can be moved to complete' not in lot_page_html
self._assert_incomplete_application_banner(lot_page_html, decl_item_href=expected_url)
@pytest.mark.parametrize(
('copied', 'link_shown'),
(
((False, False, False), True),
((True, False, True), True),
((True, True, True), False),
)
)
def test_drafts_list_has_link_to_add_published_services_if_any_services_not_yet_copied(
self, count_unanswered, copied, link_shown
):
self.data_api_client.find_services.return_value = {
'services': [
{'question1': 'answer1', 'copiedToFollowingFramework': copied[0]},
{'question2': 'answer2', 'copiedToFollowingFramework': copied[1]},
{'question2': 'answer2', 'copiedToFollowingFramework': copied[2]},
],
}
self.data_api_client.get_framework.return_value = self.framework(status='open')
self.login()
res = self.client.get('/suppliers/frameworks/g-cloud-7/submissions/scs')
doc = html.fromstring(res.get_data(as_text=True))
link = doc.xpath(
"//*[@id='main-content']/p[1]/a[normalize-space(string())='View and add your services from G-Cloud\xa07']"
)
assert self.data_api_client.find_services.call_args_list == [
mock.call(
supplier_id=1234,
framework='g-cloud-6',
lot='scs',
status='published',
)
]
if link_shown:
assert link
assert '/suppliers/frameworks/g-cloud-7/submissions/scs/previous-services' in link[0].values()
else:
assert not link
def test_link_to_add_previous_services_not_shown_if_no_defined_previous_framework(self, count_unanswered):
self.get_metadata.side_effect = ContentNotFoundError('Not found')
self.login()
res = self.client.get('/suppliers/frameworks/g-cloud-7/submissions/scs')
doc = html.fromstring(res.get_data(as_text=True))
assert not doc.xpath("//a[normalize-space(string())='View and add your services from G-Cloud\xa07']")
def test_redirect_to_previous_services_for_lot_with_one_service_limit_and_no_drafts_and_previous_service_to_copy(
self, count_unanswered
):
self.data_api_client.get_framework.return_value = self.framework(slug='digital-outcomes-and-specialists-3')
self.data_api_client.find_draft_services_iter.return_value = []
self.get_metadata.return_value = 'digital-outcomes-and-specialists-2'
self.data_api_client.find_services.return_value = {"services": [{"copiedToFollowingFramework": False}]}
self.login()
res = self.client.get('/suppliers/frameworks/digital-outcomes-and-specialists-3/submissions/digital-outcomes')
assert res.status_code == 302
assert '/digital-outcomes-and-specialists-3/submissions/digital-outcomes/previous-services' in res.location
def test_500s_if_previous_framework_not_found(self, count_unanswered):
self.data_api_client.get_framework.side_effect = [
self.framework(slug='g-cloud-10'),
HTTPError(mock.Mock(status_code=404)),
]
self.data_api_client.find_draft_services_iter.return_value = []
self.login()
res = self.client.get('/suppliers/frameworks/g-cloud-10/submissions/cloud-hosting')
assert res.status_code == 500
class TestContractVariation(BaseApplicationTest):
def setup_method(self, method):
super(TestContractVariation, self).setup_method(method)
self.data_api_client_patch = mock.patch('app.main.views.frameworks.data_api_client', autospec=True)
self.data_api_client = self.data_api_client_patch.start()
self.good_supplier_framework = self.supplier_framework(
declaration={'nameOfOrganisation': 'A.N. Supplier',
'primaryContactEmail': 'bigboss@email.com'},
on_framework=True,
agreement_returned=True,
agreement_details={}
)
self.g8_framework = self.framework(
name='G-Cloud 8',
slug='g-cloud-8',
status='live',
framework_agreement_version='3.1'
)
self.g8_framework['frameworks']['variations'] = {"1": {"createdAt": "2018-08-16"}}
self.g9_framework = self.framework(
name='G-Cloud 9',
slug='g-cloud-9',
status='live',
framework_agreement_version='3.1'
)
self.g9_framework['frameworks']['variations'] = {"1": {"createdAt": "2018-08-16"}}
self.login()
def teardown_method(self, method):
self.data_api_client_patch.stop()
super().teardown_method(method)
def test_get_page_renders_if_all_ok(self):
self.data_api_client.get_framework.return_value = self.g8_framework
self.data_api_client.get_supplier_framework_info.return_value = self.good_supplier_framework
res = self.client.get("/suppliers/frameworks/g-cloud-8/contract-variation/1")
doc = html.fromstring(res.get_data(as_text=True))
assert res.status_code == 200
assert len(doc.xpath('//h1[contains(text(), "Accept the contract variation for G-Cloud 8")]')) == 1
def test_supplier_must_be_on_framework(self):
supplier_not_on_framework = self.good_supplier_framework.copy()
supplier_not_on_framework['frameworkInterest']['onFramework'] = False
self.data_api_client.get_framework.return_value = self.g8_framework
self.data_api_client.get_supplier_framework_info.return_value = supplier_not_on_framework
res = self.client.get("/suppliers/frameworks/g-cloud-8/contract-variation/1")
assert res.status_code == 404
def test_variation_must_exist(self):
self.data_api_client.get_framework.return_value = self.g8_framework
self.data_api_client.get_supplier_framework_info.return_value = self.good_supplier_framework
# There is no variation number 2
res = self.client.get("/suppliers/frameworks/g-cloud-8/contract-variation/2")
assert res.status_code == 404
def test_agreement_must_be_returned_already(self):
agreement_not_returned = self.good_supplier_framework.copy()
agreement_not_returned['frameworkInterest']['agreementReturned'] = False
self.data_api_client.get_framework.return_value = self.g8_framework
self.data_api_client.get_supplier_framework_info.return_value = agreement_not_returned
res = self.client.get("/suppliers/frameworks/g-cloud-8/contract-variation/1")
assert res.status_code == 404
def test_shows_form_if_not_yet_agreed(self):
self.data_api_client.get_framework.return_value = self.g8_framework
self.data_api_client.get_supplier_framework_info.return_value = self.good_supplier_framework
res = self.client.get("/suppliers/frameworks/g-cloud-8/contract-variation/1")
doc = html.fromstring(res.get_data(as_text=True))
assert res.status_code == 200
assert len(doc.xpath('//label[contains(text(), "I accept these changes")]')) == 1
assert len(doc.xpath('//button[normalize-space(string())=$t]', t="I accept")) == 1
def test_shows_signer_details_and_no_form_if_already_agreed(self):
already_agreed = self.good_supplier_framework.copy()
already_agreed['frameworkInterest']['agreedVariations'] = {
"1": {
"agreedAt": "2016-08-19T15:47:08.116613Z",
"agreedUserId": 1,
"agreedUserEmail": "agreed@email.com",
"agreedUserName": "William Drăyton",
}}
self.data_api_client.get_framework.return_value = self.g8_framework
self.data_api_client.get_supplier_framework_info.return_value = already_agreed
res = self.client.get("/suppliers/frameworks/g-cloud-8/contract-variation/1")
page_text = res.get_data(as_text=True)
doc = html.fromstring(page_text)
assert res.status_code == 200
assert len(doc.xpath('//h2[contains(text(), "Contract variation status")]')) == 1
assert (
"<span>William Drăyton<br />agreed@email.com<br />Friday 19 August 2016 at 4:47pm BST</span>" in page_text
)
assert "<span>Waiting for CCS to countersign</span>" in page_text
assert len(doc.xpath('//label[contains(text(), "I accept these proposed changes")]')) == 0
assert len(doc.xpath('//input[@value="I accept"]')) == 0
def test_shows_signer_details_and_different_text_if_already_agreed_but_no_countersign(self):
already_agreed = self.good_supplier_framework.copy()
already_agreed['frameworkInterest']['agreedVariations'] = {
"1": {
"agreedAt": "2016-08-19T15:47:08.116613Z",
"agreedUserId": 1,
"agreedUserEmail": "agreed@email.com",
"agreedUserName": "William Drăyton",
}}
self.data_api_client.get_framework.return_value = self.g9_framework
self.data_api_client.get_supplier_framework_info.return_value = already_agreed
res = self.client.get("/suppliers/frameworks/g-cloud-9/contract-variation/1")
page_text = res.get_data(as_text=True)
doc = html.fromstring(page_text)
assert res.status_code == 200
assert len(doc.xpath('//h1[contains(text(), "The contract variation for G-Cloud 9")]')) == 1
assert len(doc.xpath('//h2[contains(text(), "Contract variation status")]')) == 1
assert (
"<span>William Drăyton<br />agreed@email.com<br />Friday 19 August 2016 at 4:47pm BST</span>" in page_text
)
assert "<span>Waiting for CCS to countersign</span>" in page_text
assert "You have accepted the Crown Commercial Service’s changes to the framework agreement" in page_text
assert "They will come into effect when CCS has countersigned them." in page_text
assert len(doc.xpath('//label[contains(text(), "I accept these proposed changes")]')) == 0
assert len(doc.xpath('//input[@value="I accept"]')) == 0
def test_shows_updated_heading_and_countersigner_details_but_no_form_if_countersigned(self):
already_agreed = self.good_supplier_framework.copy()
already_agreed['frameworkInterest']['agreedVariations'] = {
"1": {
"agreedAt": "2016-08-19T15:47:08.116613Z",
"agreedUserId": 1,
"agreedUserEmail": "agreed@email.com",
"agreedUserName": "William Drăyton",
}}
g8_with_countersigned_variation = self.framework(status='live', name='G-Cloud 8')
g8_with_countersigned_variation['frameworks']['variations'] = {"1": {
"createdAt": "2016-08-01T12:30:00.000000Z",
"countersignedAt": "2016-10-01T02:00:00.000000Z",
"countersignerName": "A.N. Other",
"countersignerRole": "Head honcho",
}
}
self.data_api_client.get_framework.return_value = g8_with_countersigned_variation
self.data_api_client.get_supplier_framework_info.return_value = already_agreed
res = self.client.get("/suppliers/frameworks/g-cloud-8/contract-variation/1")
page_text = res.get_data(as_text=True)
doc = html.fromstring(page_text)
assert res.status_code == 200
assert len(doc.xpath('//h1[contains(text(), "The contract variation for G-Cloud 8")]')) == 1
assert len(doc.xpath('//h2[contains(text(), "Contract variation status")]')) == 1
assert "<span>A.N. Other<br />Head honcho<br />Saturday 1 October 2016</span>" in page_text
assert len(doc.xpath('//label[contains(text(), "I accept these proposed changes")]')) == 0
assert len(doc.xpath('//input[@value="I accept"]')) == 0
@mock.patch('app.main.views.frameworks.DMNotifyClient', autospec=True, create=True)
def test_email_is_sent_to_correct_users(self, mocked_notify_class):
mocked_notify_client = mocked_notify_class.return_value
mocked_notify_client.templates = {'g-cloud-8_variation_1_agreed': 123456789}
self.data_api_client.get_framework.return_value = self.g8_framework
self.data_api_client.get_supplier_framework_info.return_value = self.good_supplier_framework
res = self.client.post(
"/suppliers/frameworks/g-cloud-8/contract-variation/1",
data={"accept_changes": "Yes"}
)
assert res.status_code == 302
assert res.location == "http://localhost/suppliers/frameworks/g-cloud-8/contract-variation/1"
self.data_api_client.agree_framework_variation.assert_called_once_with(
1234, 'g-cloud-8', '1', 123, 'email@email.com'
)
boss_email = mock.call(
'bigboss@email.com', template_name_or_id=123456789, personalisation={'framework_name': 'g-cloud-8'},
reference="contract-variation-agreed-confirmation-ouj_ZOpWHvitNdb7O7DDQGEB-lstuMfj9oEl5oWU4C0="
)
regular_email = mock.call(
'email@email.com', template_name_or_id=123456789, personalisation={'framework_name': 'g-cloud-8'},
reference="contract-variation-agreed-confirmation-8yc90Y2VvBnVHT5jVuSmeebxOCRJcnKicOe7VAsKu50="
)
mocked_notify_client.send_email.assert_has_calls([boss_email, regular_email], any_order=False)
@mock.patch('app.main.views.frameworks.DMNotifyClient', autospec=True)
def test_only_one_email_sent_if_user_is_framework_contact(self, mocked_notify_class):
same_email_as_current_user = self.good_supplier_framework.copy()
same_email_as_current_user['frameworkInterest']['declaration']['primaryContactEmail'] = 'email@email.com'
self.data_api_client.get_framework.return_value = self.g8_framework
self.data_api_client.get_supplier_framework_info.return_value = same_email_as_current_user
mocked_notify_client = mocked_notify_class.return_value
mocked_notify_client.templates = {'g-cloud-8_variation_1_agreed': 123456789}
self.client.post(
"/suppliers/frameworks/g-cloud-8/contract-variation/1",
data={"accept_changes": "Yes"}
)
mocked_notify_client.send_email.assert_called_once_with(
to_email_address='email@email.com',
personalisation={'framework_name': 'g-cloud-8'},
template_name_or_id=123456789,
reference='contract-variation-agreed-confirmation-8yc90Y2VvBnVHT5jVuSmeebxOCRJcnKicOe7VAsKu50='
)
@mock.patch('app.main.views.frameworks.DMNotifyClient', autospec=True)
def test_success_message_is_displayed_on_success(self, mocked_notify_class):
mocked_notify_client = mocked_notify_class.return_value
mocked_notify_client.templates = {'g-cloud-8_variation_1_agreed': 123456789}
self.data_api_client.get_framework.return_value = self.g8_framework
self.data_api_client.get_supplier_framework_info.return_value = self.good_supplier_framework
res = self.client.post(
"/suppliers/frameworks/g-cloud-8/contract-variation/1",
data={"accept_changes": "Yes"},
follow_redirects=True
)
doc = html.fromstring(res.get_data(as_text=True))
assert mocked_notify_client.send_email.called
assert res.status_code == 200
assert len(
doc.cssselect(".dm-alert:contains('You have accepted the proposed changes.')")
) == 1
@mock.patch('app.main.views.frameworks.DMNotifyClient', autospec=True)
def test_api_is_not_called_and_no_email_sent_for_subsequent_posts(self, mocked_notify_class):
mocked_notify_client = mocked_notify_class.return_value
already_agreed = self.good_supplier_framework.copy()
already_agreed['frameworkInterest']['agreedVariations'] = {
"1": {
"agreedAt": "2016-08-19T15:47:08.116613Z",
"agreedUserId": 1,
"agreedUserEmail": "agreed@email.com",
"agreedUserName": "William Drayton",
}
}
self.data_api_client.get_framework.return_value = self.g8_framework
self.data_api_client.get_supplier_framework_info.return_value = already_agreed
res = self.client.post(
"/suppliers/frameworks/g-cloud-8/contract-variation/1",
data={"accept_changes": "Yes"}
)
assert res.status_code == 200
assert self.data_api_client.agree_framework_variation.called is False
assert mocked_notify_client.called is False
def test_error_if_box_not_ticked(self):
self.data_api_client.get_framework.return_value = self.g8_framework
self.data_api_client.get_supplier_framework_info.return_value = self.good_supplier_framework
res = self.client.post("/suppliers/frameworks/g-cloud-8/contract-variation/1", data={})
doc = html.fromstring(res.get_data(as_text=True))
assert res.status_code == 400
validation_message = "You need to accept these changes to continue."
assert len(
doc.xpath('//span[@class="validation-message"][contains(text(), "{}")]'.format(validation_message))
) == 1
class TestReuseFrameworkSupplierDeclaration(BaseApplicationTest,
MockEnsureApplicationCompanyDetailsHaveBeenConfirmedMixin):
"""Tests for frameworks/<framework_slug>/declaration/reuse view."""
def setup_method(self, method):
super(TestReuseFrameworkSupplierDeclaration, self).setup_method(method)
self.login()
self.data_api_client_patch = mock.patch('app.main.views.frameworks.data_api_client', autospec=True)
self.data_api_client = self.data_api_client_patch.start()
self.framework_stub = FrameworkStub(
name='g-cloud-8',
slug='g-cloud-8',
allow_declaration_reuse=True,
applications_close_at=datetime(2009, 12, 3, 1, 1, 1)
).single_result_response()
def teardown_method(self, method):
self.data_api_client_patch.stop()
super().teardown_method(method)
def test_reusable_declaration_framework_slug_param(self):
"""Ensure that when using the param to specify declaration we collect the correct declaration."""
self.data_api_client.get_framework.return_value = self.framework_stub
self.data_api_client.get_supplier_framework_info.return_value = {
'frameworkInterest': {'declaration': {'status': 'complete'}, 'onFramework': True}
}
resp = self.client.get(
'/suppliers/frameworks/g-cloud-9/declaration/reuse?reusable_declaration_framework_slug=g-cloud-8'
)
assert resp.status_code == 200
self.data_api_client.get_framework.assert_has_calls([mock.call('g-cloud-9'), mock.call('g-cloud-8')])
self.data_api_client.get_supplier_framework_info.assert_called_once_with(1234, 'g-cloud-8')
def test_404_when_specified_declaration_not_found(self):
"""Fail on a 404 if declaration is specified but not found."""
self.data_api_client.get_framework.return_value = {'frameworks': {'status': 'open'}}
self.data_api_client.get_supplier_framework_info.side_effect = APIError(mock.Mock(status_code=404))
resp = self.client.get(
'/suppliers/frameworks/g-cloud-9/declaration/reuse?reusable_declaration_framework_slug=g-cloud-8'
)
assert resp.status_code == 404
assert self.data_api_client.get_framework.call_args_list == [
mock.call('g-cloud-9'),
mock.call('g-cloud-9'),
]
self.data_api_client.get_supplier_framework_info.assert_called_once_with(1234, 'g-cloud-8')
def test_redirect_when_declaration_not_found(self):
"""Redirect if a reusable declaration is not found."""
self.data_api_client.get_framework.return_value = self.framework_stub
frameworks = [
FrameworkStub(
name='ben-cloud-2',
allow_declaration_reuse=True,
applications_close_at=datetime(2009, 3, 3, 1, 1, 1)
).response()
]
supplier_declarations = []
self.data_api_client.find_frameworks.return_value = {'frameworks': frameworks}
self.data_api_client.find_supplier_declarations.return_value = dict(
frameworkInterest=supplier_declarations
)
resp = self.client.get(
'/suppliers/frameworks/g-cloud-9/declaration/reuse',
)
assert resp.location.endswith('/suppliers/frameworks/g-cloud-9/declaration')
assert self.data_api_client.get_framework.call_args_list == [
mock.call('g-cloud-9'),
mock.call('g-cloud-9'),
]
self.data_api_client.find_supplier_declarations.assert_called_once_with(1234)
def test_success_reuse_g_cloud_7_for_8(self):
"""Test success path."""
t09 = datetime(2009, 3, 3, 1, 1, 1)
t10 = datetime(2010, 3, 3, 1, 1, 1)
t11 = datetime(2011, 3, 3, 1, 1, 1)
t12 = datetime(2012, 3, 3, 1, 1, 1)
frameworks_response = [
FrameworkStub(slug='g-cloud-8', allow_declaration_reuse=True, applications_close_at=t12).response(),
FrameworkStub(slug='g-cloud-7', allow_declaration_reuse=True, applications_close_at=t11).response(),
FrameworkStub(
slug='digital-outcomes-and-specialists', allow_declaration_reuse=True, applications_close_at=t10
).response(),
FrameworkStub(slug='g-cloud-6', allow_declaration_reuse=True, applications_close_at=t09).response(),
]
framework_response = FrameworkStub(
slug='g-cloud-8', allow_declaration_reuse=True, applications_close_at=t09).response()
supplier_declarations_response = [
{'x': 'foo', 'frameworkSlug': 'g-cloud-6', 'declaration': {'status': 'complete'}, 'onFramework': True},
{'x': 'foo', 'frameworkSlug': 'g-cloud-7', 'declaration': {'status': 'complete'}, 'onFramework': True},
{'x': 'foo', 'frameworkSlug': 'dos', 'declaration': {'status': 'complete'}, 'onFramework': True}
]
self.data_api_client.find_frameworks.return_value = {'frameworks': frameworks_response}
self.data_api_client.get_framework.return_value = {'frameworks': framework_response}
self.data_api_client.find_supplier_declarations.return_value = {
'frameworkInterest': supplier_declarations_response
}
resp = self.client.get(
'/suppliers/frameworks/g-cloud-8/declaration/reuse',
)
assert resp.status_code == 200
expected = 'In March 2011, your organisation completed a declaration for G-Cloud 7.'
assert expected in str(resp.data)
assert self.data_api_client.get_framework.call_args_list == [
mock.call('g-cloud-8'),
mock.call('g-cloud-8'),
]
self.data_api_client.find_supplier_declarations.assert_called_once_with(1234)
class TestReuseFrameworkSupplierDeclarationPost(BaseApplicationTest,
MockEnsureApplicationCompanyDetailsHaveBeenConfirmedMixin):
"""Tests for frameworks/<framework_slug>/declaration/reuse POST view."""
def setup_method(self, method):
super().setup_method(method)
self.data_api_client_patch = mock.patch('app.main.views.frameworks.data_api_client', autospec=True)
self.data_api_client = self.data_api_client_patch.start()
self.login()
def teardown_method(self, method):
self.data_api_client_patch.stop()
super().teardown_method(method)
def test_reuse_false(self):
"""Assert that the redirect happens and the client sets the prefill pref to None."""
self.data_api_client.get_framework.return_value = self.framework()
data = {'reuse': 'False', 'old_framework_slug': 'should-not-be-used'}
resp = self.client.post('/suppliers/frameworks/g-cloud-9/declaration/reuse', data=data)
assert resp.location.endswith('/suppliers/frameworks/g-cloud-9/declaration')
self.data_api_client.set_supplier_framework_prefill_declaration.assert_called_once_with(
1234,
'g-cloud-9',
None,
'email@email.com'
)
def test_reuse_true(self):
"""Assert that the redirect happens and the client sets the prefill pref to the desired framework slug."""
data = {'reuse': True, 'old_framework_slug': 'digital-outcomes-and-specialists-2'}
self.data_api_client.get_supplier_framework_info.return_value = {
'frameworkInterest': {
'x_field': 'foo',
'frameworkSlug': 'digital-outcomes-and-specialists-2',
'declaration': {'status': 'complete'},
'onFramework': True
}
}
framework_response = {'frameworks': {'status': 'open', 'x_field': 'foo', 'allowDeclarationReuse': True}}
self.data_api_client.get_framework.return_value = framework_response
resp = self.client.post('/suppliers/frameworks/g-cloud-9/declaration/reuse', data=data)
assert resp.location.endswith('/suppliers/frameworks/g-cloud-9/declaration')
assert self.data_api_client.get_framework.call_args_list == [
mock.call('g-cloud-9'),
mock.call('digital-outcomes-and-specialists-2'),
]
self.data_api_client.get_supplier_framework_info.assert_called_once_with(
1234,
'digital-outcomes-and-specialists-2'
)
self.data_api_client.set_supplier_framework_prefill_declaration.assert_called_once_with(
1234,
'g-cloud-9',
'digital-outcomes-and-specialists-2',
'email@email.com'
)
def test_reuse_invalid_framework_post(self):
"""Assert 404 for non reusable framework."""
data = {'reuse': 'true', 'old_framework_slug': 'digital-outcomes-and-specialists'}
# A framework with allowDeclarationReuse as False
self.data_api_client.get_framework.return_value = {
'frameworks': {'status': 'open', 'x_field': 'foo', 'allowDeclarationReuse': False}
}
resp = self.client.post('/suppliers/frameworks/g-cloud-9/declaration/reuse', data=data)
assert self.data_api_client.get_framework.call_args_list == [
mock.call('g-cloud-9'),
mock.call('digital-outcomes-and-specialists'),
]
assert not self.data_api_client.get_supplier_framework_info.called
assert resp.status_code == 404
def test_reuse_non_existent_framework_post(self):
"""Assert 404 for non existent framework."""
data = {'reuse': 'true', 'old_framework_slug': 'digital-outcomes-and-specialists-1000000'}
# Attach does not exist.
self.data_api_client.get_framework.side_effect = [self.framework(), HTTPError()]
resp = self.client.post('/suppliers/frameworks/g-cloud-9/declaration/reuse', data=data)
assert resp.status_code == 404
assert self.data_api_client.get_framework.call_args_list == [
mock.call('g-cloud-9'),
mock.call('digital-outcomes-and-specialists-1000000')
]
# Should not do the declaration call if the framework is invalid.
assert not self.data_api_client.get_supplier_framework_info.called
def test_reuse_non_existent_declaration_post(self):
"""Assert 404 for non existent declaration."""
data = {'reuse': 'true', 'old_framework_slug': 'digital-outcomes-and-specialists-2'}
framework_response = {'frameworks': {'status': 'open', 'x_field': 'foo', 'allowDeclarationReuse': True}}
self.data_api_client.get_framework.return_value = framework_response
self.data_api_client.get_supplier_framework_info.side_effect = HTTPError()
# Do the post.
resp = self.client.post('/suppliers/frameworks/g-cloud-9/declaration/reuse', data=data)
assert resp.status_code == 404
# Should get the framework
assert self.data_api_client.get_framework.call_args_list == [
mock.call('g-cloud-9'),
mock.call('digital-outcomes-and-specialists-2'),
]
# Should error getting declaration.
self.data_api_client.get_supplier_framework_info.assert_called_once_with(
1234, 'digital-outcomes-and-specialists-2'
)
class TestReuseFrameworkSupplierDeclarationForm(BaseApplicationTest):
"""Tests for app.main.forms.frameworks.ReuseDeclarationForm form."""
@pytest.mark.parametrize('falsey_value', ('False', '', 'false'))
def test_false_values(self, falsey_value):
with self.app.test_request_context():
data = MultiDict({'framework_slug': 'digital-outcomes-and-specialists', 'reuse': falsey_value})
form = ReuseDeclarationForm(data)
assert form.reuse.data is False
class TestSignatureLegalAuthority(BaseApplicationTest):
"""Tests for app.main.views.frameworks.legal_authority."""
def setup_method(self, method):
super().setup_method(method)
self.data_api_client_patch = mock.patch('app.main.views.frameworks.data_api_client', autospec=True)
self.data_api_client = self.data_api_client_patch.start()
def teardown_method(self, method):
self.data_api_client_patch.stop()
super().teardown_method(method)
@pytest.mark.parametrize(
('framework_status', 'status_code'),
(
('coming', 404),
('open', 404),
('pending', 404),
('standstill', 200),
('live', 200),
('expired', 404),
)
)
def test_only_works_for_live_and_standstill_frameworks(self, framework_status, status_code):
self.login()
self.data_api_client.get_framework.return_value = self.framework(status=framework_status,
slug='g-cloud-12',
is_e_signature_supported=True)
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework(
on_framework=True)
res = self.client.get("/suppliers/frameworks/g-cloud-12/start-framework-agreement-signing")
assert res.status_code == status_code
@pytest.mark.parametrize(
('is_e_signature_supported', 'on_framework', 'status_code'),
(
(False, True, 404),
(True, True, 200),
(True, False, 400),
)
)
def test_only_works_for_supported_frameworks(self, is_e_signature_supported, on_framework, status_code):
self.login()
self.data_api_client.get_framework.return_value = self.framework(
status='standstill',
slug='g-cloud-12',
is_e_signature_supported=is_e_signature_supported)
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework(
on_framework=on_framework)
res = self.client.get(f"/suppliers/frameworks/g-cloud-12/start-framework-agreement-signing")
assert res.status_code == status_code
def test_post_yes_redirects_to_signing_page(self):
framework_slug = 'g-cloud-12'
self.login()
self.data_api_client.get_framework.return_value = self.framework(status='standstill',
slug=framework_slug,
framework_agreement_version="1",
is_e_signature_supported=True)
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework(
on_framework=True)
res = self.client.post(f"/suppliers/frameworks/{framework_slug}/start-framework-agreement-signing",
data={'legal_authority': 'yes'})
assert res.status_code == 302
assert res.location == 'http://localhost/suppliers/frameworks/g-cloud-12/sign-framework-agreement' \
def test_post_no_shows_info(self):
framework_slug = 'g-cloud-12'
self.login()
self.data_api_client.get_framework.return_value = self.framework(status='standstill',
slug=framework_slug,
framework_agreement_version="1",
is_e_signature_supported=True)
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework(
on_framework=True)
res = self.client.post(f"/suppliers/frameworks/{framework_slug}/start-framework-agreement-signing",
data={'legal_authority': 'no'})
assert res.status_code == 200
assert "You cannot sign the Framework Agreement" in res.get_data(as_text=True)
def test_post_no_response_shows_error(self):
framework_slug = 'g-cloud-12'
self.login()
self.data_api_client.get_framework.return_value = self.framework(status='standstill',
slug=framework_slug,
framework_agreement_version="1",
is_e_signature_supported=True)
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework(
on_framework=True)
res = self.client.post(f"/suppliers/frameworks/{framework_slug}/start-framework-agreement-signing",
data={})
assert res.status_code == 400
assert "Select yes if you have the legal authority to sign on behalf of your company" in res.get_data(
as_text=True)
class TestSignFrameworkAgreement(BaseApplicationTest):
"""Tests for app.main.views.frameworks.sign_framework_agreement"""
def setup_method(self, method):
super().setup_method(method)
self.data_api_client_patch = mock.patch('app.main.views.frameworks.data_api_client', autospec=True)
self.data_api_client = self.data_api_client_patch.start()
def teardown_method(self, method):
self.data_api_client_patch.stop()
super().teardown_method(method)
@pytest.mark.parametrize(
('is_e_signature_supported', 'on_framework', 'status_code'),
(
(False, True, 404),
(True, True, 200),
(True, False, 400),
)
)
def test_only_works_for_supported_frameworks(self, is_e_signature_supported, on_framework, status_code):
self.login()
self.data_api_client.get_framework.return_value = self.framework(
status='standstill',
slug='g-cloud-12',
framework_agreement_version="1",
is_e_signature_supported=is_e_signature_supported)
self.data_api_client.find_draft_services_by_framework.return_value = {
'meta': {'total': 1}
}
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework(
on_framework=on_framework)
res = self.client.get(f"/suppliers/frameworks/g-cloud-12/sign-framework-agreement")
assert res.status_code == status_code
@pytest.mark.parametrize(
('framework_status', 'status_code'),
(
('coming', 404),
('open', 404),
('pending', 404),
('standstill', 200),
('live', 200),
('expired', 404),
)
)
def test_only_works_for_live_and_standstill_frameworks(self, framework_status, status_code):
self.data_api_client.get_framework.return_value = self.framework(status=framework_status,
slug='g-cloud-12',
framework_agreement_version="1",
is_e_signature_supported=True)
self.data_api_client.find_draft_services_by_framework.return_value = {
'meta': {'total': 1}
}
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework(
on_framework=True)
self.login()
res = self.client.get("/suppliers/frameworks/g-cloud-12/sign-framework-agreement")
assert res.status_code == status_code
def test_shows_error_messages(self):
self.login()
self.data_api_client.get_framework.return_value = self.framework(status='standstill',
slug='g-cloud-12',
framework_agreement_version="1",
is_e_signature_supported=True)
self.data_api_client.find_draft_services_by_framework.return_value = {
'meta': {'total': 1}
}
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework(
on_framework=True)
res = self.client.post("/suppliers/frameworks/g-cloud-12/sign-framework-agreement", data={})
assert res.status_code == 400
text = res.get_data(as_text=True)
assert 'Enter your full name.' in text
assert 'Enter your role in the company.' in text
assert 'Accept the terms and conditions of the Framework Agreement.' in text
def test_post_signs_agreement(self):
self.data_api_client.create_framework_agreement.return_value = {"agreement": {"id": 789}}
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework(
on_framework=True)
self.data_api_client.find_draft_services_by_framework.return_value = {
'meta': {'total': 1}
}
self.data_api_client.get_framework.return_value = self.framework(status='standstill',
slug='g-cloud-12',
framework_agreement_version="1",
is_e_signature_supported=True)
self.login()
res = self.client.get("/suppliers/frameworks/g-cloud-12/sign-framework-agreement")
assert res.status_code == 200
res = self.client.post("/suppliers/frameworks/g-cloud-12/sign-framework-agreement",
data={"signerName": "Jane Doe",
"signerRole": "Director",
"signer_terms_and_conditions": "True"})
self.data_api_client.create_framework_agreement.assert_called_once_with(1234, 'g-cloud-12', 'email@email.com')
self.data_api_client.update_framework_agreement.assert_called_once_with(789, {
"signedAgreementDetails": {"signerName": "Jane Doe",
"signerRole": "Director"}},
"email@email.com")
self.data_api_client.sign_framework_agreement.assert_called_once_with(
789,
'email@email.com',
{'uploaderUserId': 123}
)
assert res.status_code == 200
doc = html.fromstring(res.get_data(as_text=True))
assert doc.xpath("//h1")[0].text_content().strip() == "You’ve signed the G-Cloud 12 Framework Agreement"
@mock.patch('app.main.views.frameworks.DMNotifyClient', autospec=True)
def test_sign_framework_agreement_sends_notify_emails(self, mock_dmnotifyclient_class):
mock_dmnotifyclient_instance = mock_dmnotifyclient_class.return_value
self.data_api_client.find_users_iter.return_value = [
{'emailAddress': 'email1', 'active': True},
{'emailAddress': 'email2', 'active': True},
{'emailAddress': 'email3', 'active': False}
]
self.data_api_client.create_framework_agreement.return_value = {"agreement": {"id": 789}}
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework(
on_framework=True)
self.data_api_client.find_draft_services_by_framework.return_value = {
'meta': {'total': 1}
}
self.data_api_client.get_framework.return_value = self.framework(status='standstill',
slug='g-cloud-12',
framework_agreement_version="1",
is_e_signature_supported=True)
self.login()
self.client.post("/suppliers/frameworks/g-cloud-12/sign-framework-agreement",
data={"signerName": "Jane Doe",
"signerRole": "Director",
"signer_terms_and_conditions": "True"})
assert mock_dmnotifyclient_instance.send_email.call_count == 2
assert (mock_dmnotifyclient_instance.send_email.call_args[1].get('template_name_or_id') ==
'sign_framework_agreement_confirmation')
def test_agreement_text_contains_supplier_details(self):
self.data_api_client.get_framework.return_value = self.framework(status='standstill',
slug='g-cloud-12',
framework_agreement_version="1",
is_e_signature_supported=True)
self.data_api_client.find_draft_services_by_framework.return_value = {
'meta': {'total': 1}
}
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework(on_framework=True)
self.data_api_client.get_supplier.return_value = {'suppliers': {'registeredName': 'Acme Company',
'companiesHouseNumber': '87654321',
'contactInformation':
[{'address1': '10 Downing Street',
'city': 'London',
'postcode': 'SW1A 2AA'
}]}}
self.login()
res = self.client.get("/suppliers/frameworks/g-cloud-12/sign-framework-agreement")
text = res.get_data(as_text=True)
assert "Lot 1: Cloud hosting, Lot 2: Cloud software, Lot 3: Cloud support" in text
assert "Acme Company" in text
assert "87654321" in text
assert "10 Downing Street, London, SW1A 2AA" in text
| 45.098202
| 168
| 0.598616
| 25,166
| 233,293
| 5.32226
| 0.041445
| 0.023115
| 0.046976
| 0.058131
| 0.853763
| 0.827818
| 0.806764
| 0.786696
| 0.765283
| 0.736449
| 0
| 0.021343
| 0.288423
| 233,293
| 5,172
| 169
| 45.106922
| 0.785496
| 0.033666
| 0
| 0.642807
| 0
| 0.02231
| 0.271065
| 0.146671
| 0
| 0
| 0
| 0
| 0.125726
| 1
| 0.047409
| false
| 0
| 0.004416
| 0.001394
| 0.059958
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
be71d9522e0251f03de47e3b9ceb1940c6b88c43
| 88
|
py
|
Python
|
day3/tupleandlist.py
|
nikhilsamninan/python-files
|
15198459081097058a939b40b5e8ef754e578fe0
|
[
"Apache-2.0"
] | null | null | null |
day3/tupleandlist.py
|
nikhilsamninan/python-files
|
15198459081097058a939b40b5e8ef754e578fe0
|
[
"Apache-2.0"
] | null | null | null |
day3/tupleandlist.py
|
nikhilsamninan/python-files
|
15198459081097058a939b40b5e8ef754e578fe0
|
[
"Apache-2.0"
] | null | null | null |
k=[('x','y','z'),('m','c',1),(1,'s',3)]
for x,y,z in k:
print(x,y,z)
print("\n")
| 22
| 39
| 0.375
| 22
| 88
| 1.5
| 0.590909
| 0.181818
| 0.272727
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.040541
| 0.159091
| 88
| 4
| 40
| 22
| 0.405405
| 0
| 0
| 0
| 0
| 0
| 0.089888
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0
| 1
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
beb23069cd4fe4feb099c510eab4927dc4a7c969
| 7,034
|
py
|
Python
|
skyportal/tests/frontend/test_sources.py
|
jialin-wu-02/skyportal
|
29d606ad8567b2230fb0553b18dd3cb9d3ab2d84
|
[
"BSD-3-Clause"
] | null | null | null |
skyportal/tests/frontend/test_sources.py
|
jialin-wu-02/skyportal
|
29d606ad8567b2230fb0553b18dd3cb9d3ab2d84
|
[
"BSD-3-Clause"
] | 156
|
2019-10-17T19:35:22.000Z
|
2021-08-01T13:23:47.000Z
|
skyportal/tests/frontend/test_sources.py
|
jialin-wu-02/skyportal
|
29d606ad8567b2230fb0553b18dd3cb9d3ab2d84
|
[
"BSD-3-Clause"
] | null | null | null |
import os
from os.path import join as pjoin
import time
import uuid
from selenium.webdriver.common.by import By
from selenium.webdriver.common.action_chains import ActionChains
from baselayer.app.config import load_config
cfg = load_config()
def test_public_source_page(driver, user, public_source):
driver.get(f"/become_user/{user.id}") # TODO decorator/context manager?
driver.get(f"/source/{public_source.id}")
driver.wait_for_xpath(f'//div[text()="{public_source.id}"]')
driver.wait_for_xpath('//label[contains(text(), "band")]') # TODO how to check plot?
driver.wait_for_xpath('//label[contains(text(), "Fe III")]')
def test_comments(driver, user, public_source):
driver.get(f"/become_user/{user.id}") # TODO decorator/context manager?
driver.get(f"/source/{public_source.id}")
driver.wait_for_xpath(f'//div[text()="{public_source.id}"]')
comment_box = driver.find_element_by_css_selector('[name=comment]')
comment_text = str(uuid.uuid4())
comment_box.send_keys(comment_text)
driver.scroll_to_element_and_click(driver.find_element_by_css_selector('[type=submit]'))
driver.wait_for_xpath(f'//div[text()="{comment_text}"]')
driver.wait_for_xpath('//span[contains(@class,"commentTime")]')
timestamp_text = driver.find_element(By.XPATH,
'//span[contains(@class,"commentTime")]').text
assert timestamp_text == 'a few seconds ago'
def test_upload_comment_attachment(driver, user, public_source):
driver.get(f"/become_user/{user.id}") # TODO decorator/context manager?
driver.get(f"/source/{public_source.id}")
driver.wait_for_xpath(f'//div[text()="{public_source.id}"]')
comment_box = driver.find_element_by_css_selector('[name=comment]')
comment_text = str(uuid.uuid4())
comment_box.send_keys(comment_text)
attachment_file = driver.find_element_by_css_selector('input[type=file]')
attachment_file.send_keys(pjoin(os.path.dirname(os.path.dirname(__file__)),
'data', 'spec.csv'))
driver.scroll_to_element_and_click(driver.find_element_by_css_selector('[type=submit]'))
driver.wait_for_xpath(f'//div[text()="{comment_text}"]')
driver.wait_for_xpath('//a[text()="spec.csv"]')
def test_download_comment_attachment(driver, user, public_source):
driver.get(f"/become_user/{user.id}") # TODO decorator/context manager?
driver.get(f"/source/{public_source.id}")
driver.wait_for_xpath(f'//div[text()="{public_source.id}"]')
comment_box = driver.find_element_by_css_selector('[name=comment]')
comment_text = str(uuid.uuid4())
comment_box.send_keys(comment_text)
attachment_file = driver.find_element_by_css_selector('input[type=file]')
attachment_file.send_keys(pjoin(os.path.dirname(os.path.dirname(__file__)),
'data', 'spec.csv'))
driver.scroll_to_element_and_click(driver.find_element_by_css_selector('[type=submit]'))
comment_text_div = driver.wait_for_xpath(f'//div[text()="{comment_text}"]')
comment_div = comment_text_div.find_element_by_xpath("..")
driver.execute_script("arguments[0].scrollIntoView();", comment_div)
ActionChains(driver).move_to_element(comment_div).perform()
time.sleep(0.1)
driver.wait_for_xpath('//a[text()="spec.csv"]').click()
time.sleep(0.5)
fpath = str(os.path.abspath(pjoin(cfg['paths.downloads_folder'], 'spec.csv')))
assert(os.path.exists(fpath))
try:
with open(fpath) as f:
l = f.read()
assert l.split('\n')[0] == 'wavelength,flux,instrument_id'
finally:
os.remove(fpath)
def test_view_only_user_cannot_comment(driver, view_only_user, public_source):
driver.get(f"/become_user/{view_only_user.id}")
driver.get(f"/source/{public_source.id}")
driver.wait_for_xpath(f'//div[text()="{public_source.id}"]')
driver.wait_for_xpath_to_disappear('//input[@name="comment"]')
def test_delete_comment(driver, user, public_source):
driver.get(f"/become_user/{user.id}")
driver.get(f"/source/{public_source.id}")
driver.wait_for_xpath(f'//div[text()="{public_source.id}"]')
comment_box = driver.find_element_by_css_selector('[name=comment]')
comment_text = str(uuid.uuid4())
comment_box.send_keys(comment_text)
driver.scroll_to_element_and_click(driver.find_element_by_css_selector('[type=submit]'))
comment_text_div = driver.wait_for_xpath(f'//div[text()="{comment_text}"]')
comment_div = comment_text_div.find_element_by_xpath("..")
ActionChains(driver).move_to_element(comment_div).perform()
time.sleep(0.1)
delete_button = comment_div.find_element_by_tag_name("button")
delete_button.click()
driver.wait_for_xpath_to_disappear(f'//div[text()="{comment_text}"]')
def test_regular_user_cannot_delete_unowned_comment(driver, super_admin_user,
user, public_source):
driver.get(f"/become_user/{super_admin_user.id}")
driver.get(f"/source/{public_source.id}")
driver.wait_for_xpath(f'//div[text()="{public_source.id}"]')
comment_box = driver.find_element_by_css_selector('[name=comment]')
comment_text = str(uuid.uuid4())
comment_box.send_keys(comment_text)
submit_button = driver.find_element_by_css_selector('[type=submit]')
driver.scroll_to_element_and_click(submit_button)
comment_text_div = driver.wait_for_xpath(f'//div[text()="{comment_text}"]')
driver.get(f"/become_user/{user.id}")
driver.get(f"/source/{public_source.id}")
comment_text_div = driver.wait_for_xpath(f'//div[text()="{comment_text}"]')
comment_div = comment_text_div.find_element_by_xpath("..")
ActionChains(driver).move_to_element(comment_div).perform()
time.sleep(0.1)
delete_button = comment_div.find_element_by_tag_name("button")
assert not delete_button.is_displayed()
def test_super_user_can_delete_unowned_comment(driver, super_admin_user,
user, public_source):
driver.get(f"/become_user/{user.id}")
driver.get(f"/source/{public_source.id}")
driver.wait_for_xpath(f'//div[text()="{public_source.id}"]')
comment_box = driver.find_element_by_css_selector('[name=comment]')
comment_text = str(uuid.uuid4())
comment_box.send_keys(comment_text)
driver.scroll_to_element_and_click(driver.find_element_by_css_selector('[type=submit]'))
comment_text_div = driver.wait_for_xpath(f'//div[text()="{comment_text}"]')
driver.get(f"/become_user/{super_admin_user.id}")
driver.get(f"/source/{public_source.id}")
comment_text_div = driver.wait_for_xpath(f'//div[text()="{comment_text}"]')
comment_div = comment_text_div.find_element_by_xpath("..")
driver.execute_script("arguments[0].scrollIntoView();", comment_div)
ActionChains(driver).move_to_element(comment_div).perform()
time.sleep(0.1)
delete_button = comment_div.find_element_by_tag_name("button")
assert delete_button.is_displayed()
| 48.847222
| 92
| 0.711402
| 994
| 7,034
| 4.687123
| 0.128773
| 0.073192
| 0.064177
| 0.08886
| 0.835802
| 0.818631
| 0.805108
| 0.793947
| 0.777205
| 0.767332
| 0
| 0.003119
| 0.133921
| 7,034
| 143
| 93
| 49.188811
| 0.761655
| 0.021467
| 0
| 0.669355
| 0
| 0
| 0.23993
| 0.197615
| 0
| 0
| 0
| 0.006993
| 0.040323
| 1
| 0.064516
| false
| 0
| 0.056452
| 0
| 0.120968
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
bec29f14339cc111686867b078032298e3d7a8dd
| 22
|
py
|
Python
|
planer_zoo/super_resolution/__init__.py
|
BioinfoTongLI/planer-zoo
|
a22ed4ecc980488ad3d37a352340344c355261b0
|
[
"BSD-3-Clause"
] | 1
|
2021-05-03T13:43:51.000Z
|
2021-05-03T13:43:51.000Z
|
planer_zoo/super_resolution/__init__.py
|
Image-Py/planer-store
|
64b22a99a90e3a43d86cd95749e8e8936540199d
|
[
"BSD-3-Clause"
] | null | null | null |
planer_zoo/super_resolution/__init__.py
|
Image-Py/planer-store
|
64b22a99a90e3a43d86cd95749e8e8936540199d
|
[
"BSD-3-Clause"
] | 1
|
2022-03-03T10:54:41.000Z
|
2022-03-03T10:54:41.000Z
|
from .esrgan import *
| 11
| 21
| 0.727273
| 3
| 22
| 5.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.181818
| 22
| 1
| 22
| 22
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
bec626d506a4e340b719b86966bec7fdc5d7fdd9
| 43
|
py
|
Python
|
jupyterlabpymolpysnips/Programming/printAtomNames.py
|
MooersLab/pymolpysnips
|
50a89c85adf8006d85c1d6cd3f8aad7e440a0b92
|
[
"MIT"
] | null | null | null |
jupyterlabpymolpysnips/Programming/printAtomNames.py
|
MooersLab/pymolpysnips
|
50a89c85adf8006d85c1d6cd3f8aad7e440a0b92
|
[
"MIT"
] | null | null | null |
jupyterlabpymolpysnips/Programming/printAtomNames.py
|
MooersLab/pymolpysnips
|
50a89c85adf8006d85c1d6cd3f8aad7e440a0b92
|
[
"MIT"
] | null | null | null |
cmd.do('iterate (resi 101), print(name);')
| 21.5
| 42
| 0.651163
| 7
| 43
| 4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.076923
| 0.093023
| 43
| 1
| 43
| 43
| 0.641026
| 0
| 0
| 0
| 0
| 0
| 0.744186
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
fe371d73ab7160788fa1fa3abef30dcce8d04222
| 148
|
py
|
Python
|
reasoning/__init__.py
|
pietrobarbiero/constraint-learning
|
178f6c4029dbf4120cc63e81f389309b44753e92
|
[
"Apache-2.0"
] | 3
|
2020-11-04T09:13:46.000Z
|
2021-08-22T05:07:20.000Z
|
reasoning/__init__.py
|
pietrobarbiero/constraint-learning
|
178f6c4029dbf4120cc63e81f389309b44753e92
|
[
"Apache-2.0"
] | null | null | null |
reasoning/__init__.py
|
pietrobarbiero/constraint-learning
|
178f6c4029dbf4120cc63e81f389309b44753e92
|
[
"Apache-2.0"
] | null | null | null |
from ._fol_extractor import generate_fol_explanations
from ._version import __version__
__all__ = [
'generate_fol_explanations',
'__version__',
]
| 21.142857
| 53
| 0.817568
| 16
| 148
| 6.375
| 0.5
| 0.215686
| 0.45098
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.108108
| 148
| 7
| 54
| 21.142857
| 0.772727
| 0
| 0
| 0
| 1
| 0
| 0.241611
| 0.167785
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
fe397ae2e6457281be8754f2beee5dc9fecff772
| 4,728
|
py
|
Python
|
mayan/apps/documents/setting_migrations.py
|
nattangwiwat/Mayan-EDMS-recitation
|
fcf16afb56eae812fb99144d65ae1ae6749de0b7
|
[
"Apache-2.0"
] | 4
|
2021-09-02T00:16:30.000Z
|
2021-09-09T22:25:15.000Z
|
mayan/apps/documents/setting_migrations.py
|
nattangwiwat/Mayan-EDMS-recitation
|
fcf16afb56eae812fb99144d65ae1ae6749de0b7
|
[
"Apache-2.0"
] | 86
|
2021-09-01T23:53:02.000Z
|
2021-09-20T02:25:10.000Z
|
mayan/apps/documents/setting_migrations.py
|
nattangwiwat/Mayan-EDMS-recitation
|
fcf16afb56eae812fb99144d65ae1ae6749de0b7
|
[
"Apache-2.0"
] | 70
|
2021-09-01T12:54:51.000Z
|
2022-02-16T00:53:18.000Z
|
from mayan.apps.smart_settings.classes import SettingNamespaceMigration, Setting
from mayan.apps.smart_settings.utils import smart_yaml_load
from .literals import (
DEFAULT_DOCUMENTS_STORAGE_BACKEND,
DEFAULT_DOCUMENTS_STORAGE_BACKEND_ARGUMENTS
)
class DocumentsSettingMigration(SettingNamespaceMigration):
"""
0001 to 0002: Backend arguments are no longer quoted but YAML valid
too. Changed in version 3.3.
0002 to 0003: Setting DOCUMENTS_RECENT_ADDED_COUNT renamed to
DOCUMENTS_RECENTLY_CREATED_COUNT,
DOCUMENTS_RECENT_ADDED_COUNT renamed to
DOCUMENTS_RECENTLY_CREATED_COUNT. Changed in version 4.0.
0003 to 0004: New settings for document file storage, file page image
cache and version page image cache added and made to take
their initial settings from existing
DOCUMENTS_CACHE_STORAGE_BACKEND,
DOCUMENTS_CACHE_STORAGE_BACKEND_ARGUMENTS,
DOCUMENTS_STORAGE_BACKEND, and
DOCUMENTS_STORAGE_BACKEND_ARGUMENTS settings.
"""
def documents_cache_storage_backend_arguments_0001(self, value):
return smart_yaml_load(value=value)
def documents_storage_backend_arguments_0001(self, value):
return smart_yaml_load(value=value)
def documents_file_page_image_cache_storage_backend_0003(self, value):
# Get the setting by its new global name
setting = Setting.get(
global_name='DOCUMENTS_FILE_PAGE_IMAGE_CACHE_STORAGE_BACKEND'
)
# Load the value from the setting's old global name
setting.cache_value(global_name='DOCUMENTS_CACHE_STORAGE_BACKEND')
return setting.value
def documents_file_page_image_cache_storage_backend_arguments_0003(self, value):
# Get the setting by its new global name
setting = Setting.get(
global_name='DOCUMENTS_FILE_PAGE_IMAGE_CACHE_STORAGE_BACKEND_ARGUMENTS'
)
# Load the value from the setting's old global name
setting.cache_value(
global_name='DOCUMENTS_CACHE_STORAGE_BACKEND_ARGUMENTS'
)
return setting.value
def documents_file_storage_backend_0003(self, value):
# Get the setting by its new global name
setting = Setting.get(global_name='DOCUMENTS_FILE_STORAGE_BACKEND')
# Load the value from the setting's old global name
setting.cache_value(
global_name='DOCUMENTS_STORAGE_BACKEND',
default_override=DEFAULT_DOCUMENTS_STORAGE_BACKEND
)
return setting.value
def documents_file_storage_backend_arguments_0003(self, value):
# Get the setting by its new global name
setting = Setting.get(
global_name='DOCUMENTS_FILE_STORAGE_BACKEND_ARGUMENTS'
)
# Load the value from the setting's old global name
setting.cache_value(
global_name='DOCUMENTS_STORAGE_BACKEND_ARGUMENTS',
default_override=DEFAULT_DOCUMENTS_STORAGE_BACKEND_ARGUMENTS
)
return setting.value
def documents_recently_accessed_count_0002(self, value):
# Get the setting by its new global name
setting = Setting.get(
global_name='DOCUMENTS_RECENTLY_ACCESSED_COUNT'
)
# Load the value from the setting's old global name
setting.cache_value(global_name='DOCUMENTS_RECENT_ACCESS_COUNT')
return setting.value
def documents_recently_created_count_0002(self, value):
# Get the setting by its new global name
setting = Setting.get(global_name='DOCUMENTS_RECENTLY_CREATED_COUNT')
# Load the value from the setting's old global name
setting.cache_value(global_name='DOCUMENTS_RECENT_ADDED_COUNT')
return setting.value
def documents_version_page_image_cache_storage_backend_0003(self, value):
# Get the setting by its new global name
setting = Setting.get(
global_name='DOCUMENTS_VERSION_PAGE_IMAGE_CACHE_STORAGE_BACKEND'
)
# Load the value from the setting's old global name
setting.cache_value(global_name='DOCUMENTS_CACHE_STORAGE_BACKEND')
return setting.value
def documents_version_page_image_cache_storage_backend_arguments_0003(self, value):
# Get the setting by its new global name
setting = Setting.get(
global_name='DOCUMENTS_VERSION_PAGE_IMAGE_CACHE_STORAGE_BACKEND_ARGUMENTS'
)
# Load the value from the setting's old global name
setting.cache_value(
global_name='DOCUMENTS_CACHE_STORAGE_BACKEND_ARGUMENTS'
)
return setting.value
| 43.376147
| 87
| 0.707276
| 576
| 4,728
| 5.472222
| 0.125
| 0.101523
| 0.086294
| 0.071066
| 0.820749
| 0.775698
| 0.738261
| 0.738261
| 0.701142
| 0.698604
| 0
| 0.019074
| 0.245981
| 4,728
| 108
| 88
| 43.777778
| 0.865077
| 0.30901
| 0
| 0.369231
| 0
| 0
| 0.191343
| 0.191343
| 0
| 0
| 0
| 0
| 0
| 1
| 0.153846
| false
| 0
| 0.046154
| 0.030769
| 0.369231
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
fe5c6537b87850166f3b3595028f2b584f8d703c
| 29,276
|
py
|
Python
|
BERT/models/rgb_resneXt3D.py
|
deepcam-cn/3D-CNN-BERT-COVID19
|
58adc7d570a016b48c48829e26b1f326bba91d4a
|
[
"MIT"
] | 1
|
2021-07-15T17:53:41.000Z
|
2021-07-15T17:53:41.000Z
|
BERT/models/rgb_resneXt3D.py
|
deepcam-cn/3D-CNN-BERT-COVID19
|
58adc7d570a016b48c48829e26b1f326bba91d4a
|
[
"MIT"
] | null | null | null |
BERT/models/rgb_resneXt3D.py
|
deepcam-cn/3D-CNN-BERT-COVID19
|
58adc7d570a016b48c48829e26b1f326bba91d4a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 5 11:45:05 2019
@author: esat
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import math
from functools import partial
from .NLB.NLBlockND import NLBlockND
from .BERT.bert import BERT5
__all__ = [ 'rgb_resneXt3D64f101', 'flow_resneXt3D64f101',
'rgb_resneXt3D64f101_bert10_FRAB', 'flow_resneXt3D64f101_bert10_FRAB',
'rgb_resneXt3D64f101_bert10_FRMB', 'flow_resneXt3D64f101_bert10_FRMB',
'rgb_resneXt3D64f101_FRMB_adamw', 'rgb_resneXt3D64f101_adamw',
'rgb_resneXt3D64f101_FRMB_NLB_concatenation', 'rgb_resneXt3D64f101_FRMB_lstm',
'rgb_resneXt3D64f101_concatenation']
class rgb_resneXt3D64f101(nn.Module):
def __init__(self, num_classes , length, modelPath=''):
super(rgb_resneXt3D64f101, self).__init__()
self.num_classes=num_classes
self.dp = nn.Dropout(p=0.8)
self.features=nn.Sequential(*list(_trained_resnext101(model_path=modelPath, sample_size=112, sample_duration=64).children())[:-1])
self.fc_action = nn.Linear(2048, num_classes)
for param in self.features.parameters():
param.requires_grad = True
torch.nn.init.xavier_uniform_(self.fc_action.weight)
self.fc_action.bias.data.zero_()
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.dp(x)
x = self.fc_action(x)
return x
class flow_resneXt3D64f101(nn.Module):
def __init__(self, num_classes , length, modelPath=''):
super(flow_resneXt3D64f101, self).__init__()
self.num_classes=num_classes
self.dp = nn.Dropout(p=0.7)
self.features=nn.Sequential(*list(_trained_resnext101_flow(model_path_flow=modelPath, \
sample_size=112, sample_duration=64).children())[:-1])
self.fc_action = nn.Linear(2048, num_classes)
for param in self.features.parameters():
param.requires_grad = True
torch.nn.init.xavier_uniform_(self.fc_action.weight)
self.fc_action.bias.data.zero_()
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.dp(x)
x = self.fc_action(x)
return x
def mars_forward(self, x):
features = self.features(x)
features = features.view(features.size(0), -1)
return features
#rgb_resneXt3D64f101_bert10XY2
class rgb_resneXt3D64f101_bert10_FRAB(nn.Module):
def __init__(self, num_classes , length, modelPath=''):
super(rgb_resneXt3D64f101_bert10_FRAB, self).__init__()
self.hidden_size=512
self.n_layers=1
self.attn_heads=8
self.num_classes=num_classes
self.length=length
self.dp = nn.Dropout(p=0.8)
self.avgpool = nn.AvgPool3d((1, 4, 4), stride=1)
self.features=nn.Sequential(*list(_trained_resnext101(model_path=modelPath, sample_size=112, sample_duration=64).children())[:-2])
downsample = nn.Sequential(
nn.Conv3d(
2048,
512,
kernel_size=1,
stride=1,
bias=False), nn.BatchNorm3d(512))
self.mapper = ResNeXtBottleneck(2048, 256, cardinality = 32, stride = 1, downsample = downsample)
for m in self.mapper.modules():
if isinstance(m, nn.Conv3d):
nn.init.kaiming_normal_(m.weight)
#m.weight = nn.init.kaiming_normal(m.weight, mode='fan_out')
elif isinstance(m, nn.BatchNorm3d):
m.weight.data.fill_(1)
m.bias.data.zero_()
self.bert = BERT5(self.hidden_size, 4 , hidden=self.hidden_size, n_layers=self.n_layers, attn_heads=self.attn_heads)
print(sum(p.numel() for p in self.bert.parameters() if p.requires_grad))
self.fc_action = nn.Linear(self.hidden_size, num_classes)
for param in self.features.parameters():
param.requires_grad = True
torch.nn.init.xavier_uniform_(self.fc_action.weight)
self.fc_action.bias.data.zero_()
def forward(self, x):
x = self.features(x)
x = self.mapper(x)
x = self.avgpool(x)
x = x.view(x.size(0), self.hidden_size, 4)
x = x.transpose(1,2)
input_vectors=x
norm = input_vectors.norm(p=2, dim = -1, keepdim=True)
input_vectors = input_vectors.div(norm)
output , maskSample = self.bert(x)
classificationOut = output[:,0,:]
sequenceOut=output[:,1:,:]
norm = sequenceOut.norm(p=2, dim = -1, keepdim=True)
sequenceOut = sequenceOut.div(norm)
output=self.dp(classificationOut)
x = self.fc_action(output)
return x, input_vectors, sequenceOut, maskSample, classificationOut
#flow_resneXt3D64f101_bert10S
class flow_resneXt3D64f101_bert10_FRAB(nn.Module):
def __init__(self, num_classes , length, modelPath=''):
super(flow_resneXt3D64f101_bert10_FRAB, self).__init__()
self.hidden_size=512
self.n_layers=1
self.attn_heads=8
self.num_classes=num_classes
self.length=length
self.dp = nn.Dropout(p=0.8)
self.avgpool = nn.AvgPool3d((1, 4, 4), stride=1)
self.features=nn.Sequential(*list(_trained_resnext101_flow(model_path_flow=modelPath, \
sample_size=112, sample_duration=64).children())[:-2])
downsample = nn.Sequential(
nn.Conv3d(
2048,
512,
kernel_size=1,
stride=1,
bias=False), nn.BatchNorm3d(512))
self.mapper = ResNeXtBottleneck(2048, 256, cardinality = 32, stride = 1, downsample = downsample)
for m in self.mapper.modules():
if isinstance(m, nn.Conv3d):
nn.init.kaiming_normal_(m.weight)
#m.weight = nn.init.kaiming_normal(m.weight, mode='fan_out')
elif isinstance(m, nn.BatchNorm3d):
m.weight.data.fill_(1)
m.bias.data.zero_()
self.bert = BERT5(self.hidden_size, 4 , hidden=self.hidden_size, n_layers=self.n_layers, attn_heads=self.attn_heads)
self.fc_action = nn.Linear(self.hidden_size, num_classes)
for param in self.features.parameters():
param.requires_grad = True
torch.nn.init.xavier_uniform_(self.fc_action.weight)
self.fc_action.bias.data.zero_()
def forward(self, x):
x = self.features(x)
x = self.mapper(x)
x = self.avgpool(x)
x = x.view(x.size(0), self.hidden_size, 4)
x = x.transpose(1,2)
input_vectors=x
norm = input_vectors.norm(p=2, dim = -1, keepdim=True)
input_vectors = input_vectors.div(norm)
output , maskSample = self.bert(x)
classificationOut = output[:,0,:]
sequenceOut=output[:,1:,:]
norm = sequenceOut.norm(p=2, dim = -1, keepdim=True)
sequenceOut = sequenceOut.div(norm)
output=self.dp(classificationOut)
x = self.fc_action(output)
return x, input_vectors, sequenceOut, maskSample, classificationOut
#rgb_resneXt3D64f101_bert10S
class rgb_resneXt3D64f101_bert10_FRMB(nn.Module):
def __init__(self, num_classes , length, modelPath=''):
super(rgb_resneXt3D64f101_bert10_FRMB, self).__init__()
self.hidden_size=512
self.n_layers=1
self.attn_heads=8
self.num_classes=num_classes
self.length=length
self.dp = nn.Dropout(p=0.8)
self.avgpool = nn.AvgPool3d((1, 4, 4), stride=1)
self.features=nn.Sequential(*list(_trained_resnext101(model_path=modelPath, sample_size=112, sample_duration=64).children())[:-2])
downsample = nn.Sequential(
nn.Conv3d(
2048,
512,
kernel_size=1,
stride=1,
bias=False), nn.BatchNorm3d(512))
mapper = ResNeXtBottleneck(2048, 256, cardinality = 32, stride = 1, downsample = downsample)
for m in mapper.modules():
if isinstance(m, nn.Conv3d):
nn.init.kaiming_normal_(m.weight)
#m.weight = nn.init.kaiming_normal(m.weight, mode='fan_out')
elif isinstance(m, nn.BatchNorm3d):
m.weight.data.fill_(1)
m.bias.data.zero_()
self.features[7][2] = mapper
self.bert = BERT5(self.hidden_size, 4 , hidden=self.hidden_size, n_layers=self.n_layers, attn_heads=self.attn_heads)
print(sum(p.numel() for p in self.bert.parameters() if p.requires_grad))
self.fc_action = nn.Linear(self.hidden_size, num_classes)
for param in self.features.parameters():
param.requires_grad = True
torch.nn.init.xavier_uniform_(self.fc_action.weight)
self.fc_action.bias.data.zero_()
def forward(self, x):
x = self.features(x)
x = self.avgpool(x)
x = x.view(x.size(0), self.hidden_size, 4)
x = x.transpose(1,2)
input_vectors=x
norm = input_vectors.norm(p=2, dim = -1, keepdim=True)
input_vectors = input_vectors.div(norm)
output , maskSample = self.bert(x)
classificationOut = output[:,0,:]
sequenceOut=output[:,1:,:]
norm = sequenceOut.norm(p=2, dim = -1, keepdim=True)
sequenceOut = sequenceOut.div(norm)
output=self.dp(classificationOut)
x = self.fc_action(output)
return x, input_vectors, sequenceOut, maskSample, classificationOut
#flow_resneXt3D64f101_bert10SS
class flow_resneXt3D64f101_bert10_FRMB(nn.Module):
def __init__(self, num_classes , length, modelPath=''):
super(flow_resneXt3D64f101_bert10_FRMB, self).__init__()
self.hidden_size=512
self.n_layers=1
self.attn_heads=8
self.num_classes=num_classes
self.length=length
self.dp = nn.Dropout(p=0.8)
self.avgpool = nn.AvgPool3d((1, 4, 4), stride=1)
self.features=nn.Sequential(*list(_trained_resnext101_flow(model_path_flow=modelPath, \
sample_size=112, sample_duration=64).children())[:-2])
downsample = nn.Sequential(
nn.Conv3d(
2048,
512,
kernel_size=1,
stride=1,
bias=False), nn.BatchNorm3d(512))
mapper = ResNeXtBottleneck(2048, 256, cardinality = 32, stride = 1, downsample = downsample)
for m in mapper.modules():
if isinstance(m, nn.Conv3d):
nn.init.kaiming_normal_(m.weight)
#m.weight = nn.init.kaiming_normal(m.weight, mode='fan_out')
elif isinstance(m, nn.BatchNorm3d):
m.weight.data.fill_(1)
m.bias.data.zero_()
self.features[7][2] = mapper
self.bert = BERT5(self.hidden_size, 4 , hidden=self.hidden_size, n_layers=self.n_layers, attn_heads=self.attn_heads)
print(sum(p.numel() for p in self.bert.parameters() if p.requires_grad))
self.fc_action = nn.Linear(self.hidden_size, num_classes)
for param in self.features.parameters():
param.requires_grad = True
torch.nn.init.xavier_uniform_(self.fc_action.weight)
self.fc_action.bias.data.zero_()
def forward(self, x):
x = self.features(x)
x = self.avgpool(x)
x = x.view(x.size(0), self.hidden_size, 4)
x = x.transpose(1,2)
input_vectors=x
norm = input_vectors.norm(p=2, dim = -1, keepdim=True)
input_vectors = input_vectors.div(norm)
output , maskSample = self.bert(x)
classificationOut = output[:,0,:]
sequenceOut=output[:,1:,:]
norm = sequenceOut.norm(p=2, dim = -1, keepdim=True)
sequenceOut = sequenceOut.div(norm)
output=self.dp(classificationOut)
x = self.fc_action(output)
return x, input_vectors, sequenceOut, maskSample, classificationOut
class rgb_resneXt3D64f101_concatenation(nn.Module):
def __init__(self, num_classes , length, modelPath=''):
super(rgb_resneXt3D64f101_concatenation, self).__init__()
self.hidden_size=512
self.n_layers=1
self.attn_heads=8
self.num_classes=num_classes
self.length=length
self.dp = nn.Dropout(p=0.8)
self.avgpool = nn.AvgPool3d((1, 4, 4), stride=1)
self.features=nn.Sequential(*list(_trained_resnext101(model_path=modelPath, sample_size=112, sample_duration=64).children())[:-2])
downsample = nn.Sequential(
nn.Conv3d(
2048,
512,
kernel_size=1,
stride=1,
bias=False), nn.BatchNorm3d(512))
mapper = ResNeXtBottleneck(2048, 256, cardinality = 32, stride = 1, downsample = downsample)
for m in mapper.modules():
if isinstance(m, nn.Conv3d):
nn.init.kaiming_normal_(m.weight)
#m.weight = nn.init.kaiming_normal(m.weight, mode='fan_out')
elif isinstance(m, nn.BatchNorm3d):
m.weight.data.fill_(1)
m.bias.data.zero_()
self.features[7][2] = mapper
self.linear = nn.Linear(2048, 1550)
self.fc_action = nn.Linear(1550, num_classes)
for param in self.features.parameters():
param.requires_grad = True
torch.nn.init.xavier_uniform_(self.fc_action.weight)
self.fc_action.bias.data.zero_()
def forward(self, x):
x = self.features(x)
x = self.avgpool(x)
x = x.view(x.size(0), self.hidden_size * 4)
input_vectors = x
sequenceOut = x
maskSample = x
x = self.dp(x)
output = self.linear(x)
x = self.fc_action(output)
return x, input_vectors, sequenceOut, maskSample, maskSample
class rgb_resneXt3D64f101_FRMB_NLB_concatenation(nn.Module):
def __init__(self, num_classes , length, modelPath=''):
super(rgb_resneXt3D64f101_FRMB_NLB_concatenation, self).__init__()
self.hidden_size=512
self.linear_size = 1000
self.n_layers=1
self.attn_heads=8
self.num_classes=num_classes
self.length=length
self.dp = nn.Dropout(p=0.8)
self.avgpool = nn.AvgPool3d((1, 4, 4), stride=1)
self.features=nn.Sequential(*list(_trained_resnext101(model_path=modelPath, sample_size=112, sample_duration=64).children())[:-2])
downsample = nn.Sequential(
nn.Conv3d(
2048,
self.hidden_size,
kernel_size=1,
stride=1,
bias=False), nn.BatchNorm3d(self.hidden_size))
mapper = ResNeXtBottleneck(2048, int(self.hidden_size / 2), cardinality = 32, stride = 1, downsample = downsample)
for m in mapper.modules():
if isinstance(m, nn.Conv3d):
nn.init.kaiming_normal_(m.weight)
#m.weight = nn.init.kaiming_normal(m.weight, mode='fan_out')
elif isinstance(m, nn.BatchNorm3d):
m.weight.data.fill_(1)
m.bias.data.zero_()
self.features[7][2] = mapper
self.NLB = NLBlockND(in_channels = self.hidden_size, inter_channels = self.hidden_size)
self.linear = nn.Linear(self.hidden_size * 4, self.linear_size)
self.fc_action = nn.Linear(self.linear_size, num_classes)
for param in self.features.parameters():
param.requires_grad = True
torch.nn.init.xavier_uniform_(self.fc_action.weight)
self.fc_action.bias.data.zero_()
def forward(self, x):
x = self.features(x)
x = self.NLB(x)
x = self.avgpool(x)
x = x.view(-1,self.hidden_size * 4)
input_vectors = x
sequenceOut = x
maskSample = x
x = self.dp(x)
x = self.linear(x)
x = self.fc_action(x)
return x, input_vectors, sequenceOut, maskSample, maskSample
#rgb_resneXt3D64f101_lstm2
class rgb_resneXt3D64f101_FRMB_lstm(nn.Module):
def __init__(self, num_classes , length, modelPath=''):
super(rgb_resneXt3D64f101_FRMB_lstm, self).__init__()
self.hidden_size = 450
self.input_size = 512
self.n_layers=1
self.attn_heads=8
self.num_classes=num_classes
self.length=length
self.dp = nn.Dropout(p=0.8)
self.avgpool = nn.AvgPool3d((1, 4, 4), stride=1)
self.features=nn.Sequential(*list(_trained_resnext101(model_path=modelPath, sample_size=112, sample_duration=64).children())[:-2])
downsample = nn.Sequential(
nn.Conv3d(
2048,
self.input_size,
kernel_size=1,
stride=1,
bias=False), nn.BatchNorm3d(self.input_size))
mapper = ResNeXtBottleneck(2048, int(self.input_size / 2), cardinality = 32, stride = 1, downsample = downsample)
for m in mapper.modules():
if isinstance(m, nn.Conv3d):
nn.init.kaiming_normal_(m.weight)
#m.weight = nn.init.kaiming_normal(m.weight, mode='fan_out')
elif isinstance(m, nn.BatchNorm3d):
m.weight.data.fill_(1)
m.bias.data.zero_()
self.features[7][2] = mapper
self.lstm=nn.LSTM(input_size=self.input_size, hidden_size=self.hidden_size, num_layers=2, batch_first=True,bidirectional=False)
self.fc_action = nn.Linear(self.hidden_size, num_classes)
for param in self.features.parameters():
param.requires_grad = True
torch.nn.init.xavier_uniform_(self.fc_action.weight)
self.fc_action.bias.data.zero_()
def forward(self, x):
x = self.features(x)
x = self.avgpool(x)
x = x.view(x.size(0), self.input_size, 4)
x = x.transpose(1,2)
output,_=self.lstm(x)
x= output[:,-1,:]
input_vectors = x
sequenceOut = x
maskSample = x
x = self.dp(x)
x = self.fc_action(x)
return x, input_vectors, sequenceOut, maskSample, sequenceOut
class rgb_resneXt3D64f101_adamw(nn.Module):
def __init__(self, num_classes , length, modelPath=''):
super(rgb_resneXt3D64f101_adamw, self).__init__()
self.num_classes=num_classes
self.dp = nn.Dropout(p=0.8)
self.features=nn.Sequential(*list(_trained_resnext101(model_path=modelPath, sample_size=112, sample_duration=64).children())[:-1])
self.fc_action = nn.Linear(2048, num_classes)
for param in self.features.parameters():
param.requires_grad = True
torch.nn.init.xavier_uniform_(self.fc_action.weight)
self.fc_action.bias.data.zero_()
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
y = x
x = self.dp(x)
x = self.fc_action(x)
return x, x, x, x, y
#rgb_resneXt3D64f101_adamw_modified
class rgb_resneXt3D64f101_FRMB_adamw(nn.Module):
def __init__(self, num_classes , length, modelPath=''):
super(rgb_resneXt3D64f101_FRMB_adamw, self).__init__()
self.num_classes=num_classes
self.dp = nn.Dropout(p=0.8)
self.hidden_size = 512
self.features=nn.Sequential(*list(_trained_resnext101(model_path=modelPath, sample_size=112, sample_duration=64).children())[:-1])
downsample = nn.Sequential(
nn.Conv3d(
2048,
self.hidden_size,
kernel_size=1,
stride=1,
bias=False), nn.BatchNorm3d(self.hidden_size))
mapper = ResNeXtBottleneck(2048, int(self.hidden_size / 2), cardinality = 32, stride = 1, downsample = downsample)
for m in mapper.modules():
if isinstance(m, nn.Conv3d):
nn.init.kaiming_normal_(m.weight)
#m.weight = nn.init.kaiming_normal(m.weight, mode='fan_out')
elif isinstance(m, nn.BatchNorm3d):
m.weight.data.fill_(1)
m.bias.data.zero_()
self.features[7][2] = mapper
self.fc_action = nn.Linear(self.hidden_size, num_classes)
for param in self.features.parameters():
param.requires_grad = True
torch.nn.init.xavier_uniform_(self.fc_action.weight)
self.fc_action.bias.data.zero_()
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
y = x
x = self.dp(x)
x = self.fc_action(x)
return x, x, x, x, y
def conv3x3x3(in_planes, out_planes, stride=1):
# 3x3x3 convolution with padding
return nn.Conv3d(
in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=1,
bias=False)
def downsample_basic_block(x, planes, stride):
out = F.avg_pool3d(x, kernel_size=1, stride=stride)
zero_pads = torch.Tensor(
out.size(0), planes - out.size(1), out.size(2), out.size(3),
out.size(4)).zero_()
if isinstance(out.data, torch.cuda.FloatTensor):
zero_pads = zero_pads.cuda()
out = Variable(torch.cat([out.data, zero_pads], dim=1))
return out
class ResNeXtBottleneck(nn.Module):
expansion = 2
def __init__(self, inplanes, planes, cardinality, stride=1,
downsample=None):
super(ResNeXtBottleneck, self).__init__()
mid_planes = cardinality * int(planes / 32)
self.conv1 = nn.Conv3d(inplanes, mid_planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm3d(mid_planes)
self.conv2 = nn.Conv3d(
mid_planes,
mid_planes,
kernel_size=3,
stride=stride,
padding=1,
groups=cardinality,
bias=False)
self.bn2 = nn.BatchNorm3d(mid_planes)
self.conv3 = nn.Conv3d(
mid_planes, planes * self.expansion, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm3d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNeXt(nn.Module):
def __init__(self,
block,
layers,
sample_size,
sample_duration,
shortcut_type='B',
cardinality=32,
num_classes=400):
self.inplanes = 64
super(ResNeXt, self).__init__()
self.conv1 = nn.Conv3d(
3,
64,
kernel_size=7,
stride=(1, 2, 2),
padding=(3, 3, 3),
bias=False)
self.bn1 = nn.BatchNorm3d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool3d(kernel_size=(3, 3, 3), stride=2, padding=1)
self.layer1 = self._make_layer(block, 128, layers[0], shortcut_type,
cardinality)
self.layer2 = self._make_layer(
block, 256, layers[1], shortcut_type, cardinality, stride=2)
self.layer3 = self._make_layer(
block, 512, layers[2], shortcut_type, cardinality, stride=2)
self.layer4 = self._make_layer(
block, 1024, layers[3], shortcut_type, cardinality, stride=2)
last_duration = int(math.ceil(sample_duration / 16))
last_size = int(math.ceil(sample_size / 32))
self.avgpool = nn.AvgPool3d(
(last_duration, last_size, last_size), stride=1)
self.fc = nn.Linear(cardinality * 32 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv3d):
nn.init.kaiming_normal_(m.weight)
#m.weight = nn.init.kaiming_normal(m.weight, mode='fan_out')
elif isinstance(m, nn.BatchNorm3d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self,
block,
planes,
blocks,
shortcut_type,
cardinality,
stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
if shortcut_type == 'A':
downsample = partial(
downsample_basic_block,
planes=planes * block.expansion,
stride=stride)
else:
downsample = nn.Sequential(
nn.Conv3d(
self.inplanes,
planes * block.expansion,
kernel_size=1,
stride=stride,
bias=False), nn.BatchNorm3d(planes * block.expansion))
layers = []
layers.append(
block(self.inplanes, planes, cardinality, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, cardinality))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def get_fine_tuning_parameters(model, ft_begin_index):
if ft_begin_index == 0:
return model.parameters()
ft_module_names = []
for i in range(ft_begin_index, 5):
ft_module_names.append('layer{}'.format(i))
ft_module_names.append('fc')
parameters = []
for k, v in model.named_parameters():
for ft_module in ft_module_names:
if ft_module in k:
parameters.append({'params': v})
break
else:
parameters.append({'params': v, 'lr': 0.0})
return parameters
def resnext3D50(**kwargs):
"""Constructs a ResNet-50 model.
"""
model = ResNeXt(ResNeXtBottleneck, [3, 4, 6, 3], **kwargs)
return model
def resnext3D101(**kwargs):
"""Constructs a ResNet-101 model.
"""
model = ResNeXt(ResNeXtBottleneck, [3, 4, 23, 3], **kwargs)
return model
def rgb_mars_resnext3D64f101(num_classes , length, modelPath=''):
"""Constructs a ResNet-101 model.
"""
model = ResNeXt(ResNeXtBottleneck, [3, 4, 23, 3], sample_size=112, sample_duration=64, num_classes=51)
return model
def flow_mars_resnext3D64f101(num_classes , length, modelPath=''):
"""Constructs a ResNet-101 model.
"""
model = ResNeXt(ResNeXtBottleneck, [3, 4, 23, 3], sample_size=112, sample_duration=64, num_classes=51)
model.conv1 = nn.Conv3d(
2,
64,
kernel_size=7,
stride=(1, 2, 2),
padding=(3, 3, 3),
bias=False)
return model
def _trained_resnext101(model_path, **kwargs):
"""Constructs a ResNet-101 model.
"""
model = ResNeXt(ResNeXtBottleneck, [3, 4, 23, 3], **kwargs)
if model_path=='':
return model
params = torch.load(model_path)
new_dict = {k[7:]: v for k, v in params['state_dict'].items()}
model_dict=model.state_dict()
model_dict.update(new_dict)
model.load_state_dict(new_dict)
return model
def _trained_resnext101_flow(model_path_flow, **kwargs):
"""Constructs a ResNet-101 model.
"""
model = ResNeXt(ResNeXtBottleneck, [3, 4, 23, 3], **kwargs)
model.conv1 = nn.Conv3d(
2,
64,
kernel_size=7,
stride=(1, 2, 2),
padding=(3, 3, 3),
bias=False)
if model_path_flow=='':
return model
params = torch.load(model_path_flow)
new_dict = {k[7:]: v for k, v in params['state_dict'].items()}
model_dict=model.state_dict()
model_dict.update(new_dict)
model.load_state_dict(new_dict)
return model
def resnext3D152(**kwargs):
"""Constructs a ResNet-101 model.
"""
model = ResNeXt(ResNeXtBottleneck, [3, 8, 36, 3], **kwargs)
return model
| 35.061078
| 138
| 0.585599
| 3,599
| 29,276
| 4.569881
| 0.068074
| 0.009607
| 0.032103
| 0.020794
| 0.797349
| 0.763483
| 0.744999
| 0.72536
| 0.716301
| 0.716301
| 0
| 0.050188
| 0.299665
| 29,276
| 834
| 139
| 35.103118
| 0.751988
| 0.036856
| 0
| 0.704225
| 0
| 0
| 0.013116
| 0.01013
| 0
| 0
| 0
| 0
| 0
| 1
| 0.059468
| false
| 0
| 0.01252
| 0.001565
| 0.137715
| 0.004695
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
fe5d251dfb48b9f56197507dab975a516d5f01a8
| 56
|
py
|
Python
|
django_tenants/tests/staticfiles/__init__.py
|
jcass77/django-tenants
|
5128d9e9d2409f3cd0b09c0bd574170ff657725d
|
[
"MIT"
] | 1
|
2019-12-26T22:39:43.000Z
|
2019-12-26T22:39:43.000Z
|
django_tenants/tests/staticfiles/__init__.py
|
tiagocapelli/django-tenants
|
44b0bf78a5ccabcb28c4fa4ef0465aadc3125d1c
|
[
"MIT"
] | null | null | null |
django_tenants/tests/staticfiles/__init__.py
|
tiagocapelli/django-tenants
|
44b0bf78a5ccabcb28c4fa4ef0465aadc3125d1c
|
[
"MIT"
] | null | null | null |
from .test_finders import *
from .test_storage import *
| 18.666667
| 27
| 0.785714
| 8
| 56
| 5.25
| 0.625
| 0.380952
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 56
| 2
| 28
| 28
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
fe77c0d87d566de36459dc36a5de11401cecf318
| 191
|
py
|
Python
|
src/stdout_parser.py
|
joewittmer/Morserino-32-Firmware-Updater
|
8b2359ddd6ea67b96f40e758efd07a7ffdfa53e8
|
[
"MIT"
] | 1
|
2022-01-10T00:12:12.000Z
|
2022-01-10T00:12:12.000Z
|
src/stdout_parser.py
|
joewittmer/Morserino-32-Firmware-Updater
|
8b2359ddd6ea67b96f40e758efd07a7ffdfa53e8
|
[
"MIT"
] | 7
|
2021-09-17T14:40:08.000Z
|
2021-10-21T15:29:11.000Z
|
src/stdout_parser.py
|
joewittmer/Morserino-32-Firmware-Updater
|
8b2359ddd6ea67b96f40e758efd07a7ffdfa53e8
|
[
"MIT"
] | 3
|
2020-05-11T20:07:30.000Z
|
2022-02-25T11:20:18.000Z
|
class StdoutParser(object):
def __init__(self, parse=lambda: ()):
self.parse = parse
def write(self, message):
self.parse(message)
def flush(self):
pass
| 19.1
| 41
| 0.596859
| 22
| 191
| 5
| 0.545455
| 0.245455
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.282723
| 191
| 9
| 42
| 21.222222
| 0.80292
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.428571
| false
| 0.142857
| 0
| 0
| 0.571429
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 6
|
fea2f39a857e56752085f38c07e868abcaecee63
| 141
|
py
|
Python
|
src/graph_onedrive/_main.py
|
npv12/graph-onedrive
|
fe0647e314704e4b4e211b3c4011475a337ed6b4
|
[
"BSD-3-Clause"
] | 6
|
2021-11-01T05:43:12.000Z
|
2022-02-20T08:19:12.000Z
|
src/graph_onedrive/_main.py
|
npv12/graph-onedrive
|
fe0647e314704e4b4e211b3c4011475a337ed6b4
|
[
"BSD-3-Clause"
] | 12
|
2021-10-17T03:44:12.000Z
|
2022-02-22T13:10:40.000Z
|
src/graph_onedrive/_main.py
|
npv12/graph-onedrive
|
fe0647e314704e4b4e211b3c4011475a337ed6b4
|
[
"BSD-3-Clause"
] | 8
|
2021-10-17T00:28:12.000Z
|
2022-03-27T17:14:34.000Z
|
"""OneDrive Class and Context Manager.
"""
from graph_onedrive._manager import OneDriveManager
from graph_onedrive._onedrive import OneDrive
| 28.2
| 51
| 0.836879
| 17
| 141
| 6.705882
| 0.529412
| 0.157895
| 0.298246
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.099291
| 141
| 4
| 52
| 35.25
| 0.897638
| 0.248227
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
22f9caa1a9f393e2eea0b284ae39a23fa8fb297f
| 289
|
py
|
Python
|
project/settings/components/rest-framework.py
|
gyukebox/drf-basic-settings
|
53c3df89b8f11169ee924c1c8000624384781c3a
|
[
"MIT"
] | null | null | null |
project/settings/components/rest-framework.py
|
gyukebox/drf-basic-settings
|
53c3df89b8f11169ee924c1c8000624384781c3a
|
[
"MIT"
] | null | null | null |
project/settings/components/rest-framework.py
|
gyukebox/drf-basic-settings
|
53c3df89b8f11169ee924c1c8000624384781c3a
|
[
"MIT"
] | null | null | null |
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated',
),
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.BasicAuthentication',
),
}
| 28.9
| 62
| 0.712803
| 20
| 289
| 9.9
| 0.5
| 0.262626
| 0.20202
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.183391
| 289
| 9
| 63
| 32.111111
| 0.838983
| 0
| 0
| 0.222222
| 0
| 0
| 0.685121
| 0.685121
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
fe00bcca3f51369fa2df44af8e220e03d33ad1bf
| 168
|
py
|
Python
|
diff_cover/__init__.py
|
venmo/diff-cover
|
8a1a8faed377a32ff0d3c4a53b3bd4a646a97211
|
[
"Apache-2.0"
] | null | null | null |
diff_cover/__init__.py
|
venmo/diff-cover
|
8a1a8faed377a32ff0d3c4a53b3bd4a646a97211
|
[
"Apache-2.0"
] | null | null | null |
diff_cover/__init__.py
|
venmo/diff-cover
|
8a1a8faed377a32ff0d3c4a53b3bd4a646a97211
|
[
"Apache-2.0"
] | 1
|
2020-11-07T10:33:13.000Z
|
2020-11-07T10:33:13.000Z
|
VERSION = '0.8.6'
DESCRIPTION = 'Automatically find diff lines that need test coverage.'
QUALITY_DESCRIPTION = 'Automatically find diff lines with quality violations.'
| 42
| 78
| 0.791667
| 22
| 168
| 6
| 0.727273
| 0.363636
| 0.424242
| 0.484848
| 0.560606
| 0
| 0
| 0
| 0
| 0
| 0
| 0.020548
| 0.130952
| 168
| 3
| 79
| 56
| 0.883562
| 0
| 0
| 0
| 0
| 0
| 0.672619
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a3d26d4dd853baf2ee08643311b3f1c746002253
| 74
|
py
|
Python
|
markdowngenerator/__init__.py
|
YLRong/python-markdown-generator
|
c96a4d467f28d0ee8f64e74e49dea91bc21c792c
|
[
"Apache-2.0"
] | null | null | null |
markdowngenerator/__init__.py
|
YLRong/python-markdown-generator
|
c96a4d467f28d0ee8f64e74e49dea91bc21c792c
|
[
"Apache-2.0"
] | null | null | null |
markdowngenerator/__init__.py
|
YLRong/python-markdown-generator
|
c96a4d467f28d0ee8f64e74e49dea91bc21c792c
|
[
"Apache-2.0"
] | 2
|
2020-09-21T13:38:33.000Z
|
2021-03-14T14:04:17.000Z
|
from .conf import *
from .syntax import *
from .markdowngenerator import *
| 24.666667
| 32
| 0.77027
| 9
| 74
| 6.333333
| 0.555556
| 0.350877
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.148649
| 74
| 3
| 32
| 24.666667
| 0.904762
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
430513d471ea1200ace874ffa5b0eeb44c2f2857
| 44
|
py
|
Python
|
changedetectionio/tests/fetchers/conftest.py
|
MXPicture/changedetection.io
|
2566de2aaeecec8146c929d9a18be76e4c369cdc
|
[
"Apache-2.0"
] | null | null | null |
changedetectionio/tests/fetchers/conftest.py
|
MXPicture/changedetection.io
|
2566de2aaeecec8146c929d9a18be76e4c369cdc
|
[
"Apache-2.0"
] | 6
|
2022-01-25T17:01:21.000Z
|
2022-03-04T13:20:31.000Z
|
changedetectionio/tests/fetchers/conftest.py
|
MXPicture/changedetection.io
|
2566de2aaeecec8146c929d9a18be76e4c369cdc
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python3
from .. import conftest
| 11
| 23
| 0.704545
| 6
| 44
| 5.166667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.026316
| 0.136364
| 44
| 3
| 24
| 14.666667
| 0.789474
| 0.386364
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4341fcde5ea87b071d1f155aa2aad5e31eb2a213
| 81
|
py
|
Python
|
tests/7_pytest_hooks/test_hooks_example.py
|
andrewnnov/otus_qa
|
e869b99e4860df9ecc358d7e491dbbbe94fffb75
|
[
"MIT"
] | 4
|
2020-03-01T16:35:23.000Z
|
2021-03-03T18:57:26.000Z
|
tests/7_pytest_hooks/test_hooks_example.py
|
andrewnnov/otus_qa
|
e869b99e4860df9ecc358d7e491dbbbe94fffb75
|
[
"MIT"
] | null | null | null |
tests/7_pytest_hooks/test_hooks_example.py
|
andrewnnov/otus_qa
|
e869b99e4860df9ecc358d7e491dbbbe94fffb75
|
[
"MIT"
] | 17
|
2019-09-06T10:54:28.000Z
|
2022-03-24T09:35:24.000Z
|
def test_one():
pass
def test_two():
pass
def test_three():
pass
| 7.363636
| 17
| 0.580247
| 12
| 81
| 3.666667
| 0.5
| 0.477273
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.308642
| 81
| 10
| 18
| 8.1
| 0.785714
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
434733e18a950600951a3ce5406253eb2da07a55
| 3,619
|
py
|
Python
|
src/ikshana/models/CIFAR10/model_Depth.py
|
abdksyed/ikshana
|
c4213b95758401fbcde905e2709b531cc614b92c
|
[
"MIT"
] | 1
|
2021-08-05T13:58:27.000Z
|
2021-08-05T13:58:27.000Z
|
src/ikshana/models/CIFAR10/model_Depth.py
|
varsha-raveendran/ikshana
|
c4213b95758401fbcde905e2709b531cc614b92c
|
[
"MIT"
] | null | null | null |
src/ikshana/models/CIFAR10/model_Depth.py
|
varsha-raveendran/ikshana
|
c4213b95758401fbcde905e2709b531cc614b92c
|
[
"MIT"
] | 2
|
2021-07-07T13:10:11.000Z
|
2022-02-22T06:02:19.000Z
|
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.layer1 = nn.Sequential(
# 32x32x3 -> 32x32x32
nn.Conv2d(in_channels=3, out_channels=32, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(num_features=32),
nn.ReLU(),
# 32x32x32 -> 32x32x32
nn.Conv2d(in_channels=32, out_channels=32, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(num_features=32),
nn.ReLU(),
# 32x32x32 -> 32x32x32
nn.Conv2d(in_channels=32, out_channels=32, kernel_size=3, padding=1, groups=32, bias=False),
nn.BatchNorm2d(num_features=32),
nn.ReLU(),
# 32x32x32 -> 16x16x32
nn.Conv2d(in_channels=32, out_channels=32, kernel_size=3, padding=1, stride=2, bias=False),
nn.BatchNorm2d(num_features=32),
nn.ReLU(),
)
self.layer2 = nn.Sequential(
# 16x16x32 -> 16x16x32
nn.Conv2d(in_channels=32, out_channels=32, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(num_features=32),
nn.ReLU(),
# 16x16x32 -> 16x16x32
nn.Conv2d(in_channels=32, out_channels=32, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(num_features=32),
nn.ReLU(),
# 16x16x32 -> 16x16x32
nn.Conv2d(in_channels=32, out_channels=32, kernel_size=3, padding=1, groups=32, bias=False),
nn.BatchNorm2d(num_features=32),
nn.ReLU(),
# 16x16x32 -> 8x8x32
nn.Conv2d(in_channels=32, out_channels=32, kernel_size=3, padding=1, stride=2, bias=False),
nn.BatchNorm2d(num_features=32),
nn.ReLU(),
)
self.layer3 = nn.Sequential(
# 8x8x32 -> 8x8x32
nn.Conv2d(in_channels=32, out_channels=32, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(num_features=32),
nn.ReLU(),
# 8x8x32 -> 8x8x32
nn.Conv2d(in_channels=32, out_channels=32, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(num_features=32),
nn.ReLU(),
# 8x8x32 -> 8x8x32
nn.Conv2d(in_channels=32, out_channels=32, kernel_size=3, padding=1, groups=32, bias=False),
nn.BatchNorm2d(num_features=32),
nn.ReLU(),
# 8x8x32-> 4x4x32
nn.Conv2d(in_channels=32, out_channels=32, kernel_size=3, padding=1, stride=2, bias=False),
nn.BatchNorm2d(num_features=32),
nn.ReLU(),
)
self.layer4 = nn.Sequential(
# 4x4x32 -> 4x4x32
nn.Conv2d(in_channels=32, out_channels=32, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(num_features=32),
nn.ReLU(),
# 4x4x32 -> 4x4x32
nn.Conv2d(in_channels=32, out_channels=32, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(num_features=32),
nn.ReLU(),
# # 4x4x32 -> 1x1x32
nn.AdaptiveAvgPool2d(1)
)
self.classifier = nn.Sequential(
nn.Conv2d(in_channels=32, out_channels=64, kernel_size=1),
nn.Conv2d(in_channels=64, out_channels=10, kernel_size=1)
)
def forward(self, x):
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.classifier(x)
x = x.view(-1,10)
return F.log_softmax(x, dim=1)
| 38.913978
| 104
| 0.564797
| 453
| 3,619
| 4.355408
| 0.119205
| 0.141916
| 0.081095
| 0.145971
| 0.802838
| 0.78966
| 0.78966
| 0.773948
| 0.773948
| 0.773948
| 0
| 0.128891
| 0.307544
| 3,619
| 93
| 105
| 38.913978
| 0.65842
| 0.078198
| 0
| 0.602941
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.029412
| false
| 0
| 0.029412
| 0
| 0.088235
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4a59b6f1be3212182c53eca463b1c1d135223122
| 229
|
py
|
Python
|
assets/Information.py
|
EnderNightLord-ChromeBook/Juice-File-Manager
|
d408586df6d4f613bbf72171460befcaefb9abce
|
[
"Apache-2.0"
] | 1
|
2017-09-29T00:09:50.000Z
|
2017-09-29T00:09:50.000Z
|
assets/Information.py
|
EnderNightLord-ChromeBook/Juice-File-Manager
|
d408586df6d4f613bbf72171460befcaefb9abce
|
[
"Apache-2.0"
] | 1
|
2019-03-28T22:03:25.000Z
|
2019-09-02T06:19:43.000Z
|
assets/Information.py
|
EnderNightLord-ChromeBook/Juice-File-Manager
|
d408586df6d4f613bbf72171460befcaefb9abce
|
[
"Apache-2.0"
] | null | null | null |
print('''
=======================================
Detailed Build Information
=======================================
Made On:GNU/LINUX
Date Created: Sun, 10, Feb
Thanks To:Python Fans
=======================================
''')
| 22.9
| 39
| 0.349345
| 17
| 229
| 4.705882
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009479
| 0.078603
| 229
| 9
| 40
| 25.444444
| 0.369668
| 0
| 0
| 0.333333
| 0
| 0
| 0.938865
| 0.510917
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0.111111
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4a7cd5c7b1221b5757793ed96146afbaa825ded3
| 123
|
py
|
Python
|
modules/module_tester.py
|
rolandovillca/python_introduction_basic
|
d1333a832a9e2b103e128a9dfc0c0da0952c267f
|
[
"MIT"
] | null | null | null |
modules/module_tester.py
|
rolandovillca/python_introduction_basic
|
d1333a832a9e2b103e128a9dfc0c0da0952c267f
|
[
"MIT"
] | null | null | null |
modules/module_tester.py
|
rolandovillca/python_introduction_basic
|
d1333a832a9e2b103e128a9dfc0c0da0952c267f
|
[
"MIT"
] | null | null | null |
import database_module
# __version__, __version_info__ = database_module.get_version(__file__)
print dir(database_module)
| 24.6
| 71
| 0.853659
| 15
| 123
| 5.866667
| 0.6
| 0.477273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.081301
| 123
| 5
| 72
| 24.6
| 0.778761
| 0.560976
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.5
| null | null | 0.5
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 1
|
0
| 6
|
4a864a1316ca5267d0159b11f14bbef5e0eb946f
| 78
|
py
|
Python
|
zip_build.py
|
denisrebrof/yandex-games-hc-sample
|
a7b03c057cd9980c13a2c7e0a0f33f011418fa58
|
[
"Apache-2.0"
] | null | null | null |
zip_build.py
|
denisrebrof/yandex-games-hc-sample
|
a7b03c057cd9980c13a2c7e0a0f33f011418fa58
|
[
"Apache-2.0"
] | null | null | null |
zip_build.py
|
denisrebrof/yandex-games-hc-sample
|
a7b03c057cd9980c13a2c7e0a0f33f011418fa58
|
[
"Apache-2.0"
] | null | null | null |
import shutil
import sys
shutil.make_archive(sys.argv[2], "zip", sys.argv[1])
| 19.5
| 52
| 0.74359
| 14
| 78
| 4.071429
| 0.642857
| 0.245614
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.028169
| 0.089744
| 78
| 4
| 52
| 19.5
| 0.774648
| 0
| 0
| 0
| 0
| 0
| 0.037975
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.