max_stars_repo_path stringlengths 4 286 | max_stars_repo_name stringlengths 5 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.03M | content_cleaned stringlengths 6 1.03M | language stringclasses 111 values | language_score float64 0.03 1 | comments stringlengths 0 556k | edu_score float64 0.32 5.03 | edu_int_score int64 0 5 |
|---|---|---|---|---|---|---|---|---|---|---|
superscript/ftp/ftp_login.py | AngeIo/projet_python_netway | 0 | 6619151 | # Ce script permet de se connecter sur le serveur FTP pour les 3 clients FTP AC distants
from ftplib import FTP
# Importe nos fonctions utiles
import sys
sys.path.insert(0, '../')
from utils import func
# Importer les variables globales
import settings
# Charge les paramètres
settings.init()
def login():
# Les paramètres de connexion au serveur FTP
ftp_host = '127.0.0.1'
ftp_login = 'Laurent'
ftp_password = '<PASSWORD>'
# Connexion au serveur pour chacun des users
try:
ftp = FTP(ftp_host, ftp_login, ftp_password)
print(ftp.getwelcome()) # Message de bienvenue
return ftp
except Exception as e:
print("/!\ Error occured while login /!\ \n", e)
return 1
| # Ce script permet de se connecter sur le serveur FTP pour les 3 clients FTP AC distants
from ftplib import FTP
# Importe nos fonctions utiles
import sys
sys.path.insert(0, '../')
from utils import func
# Importer les variables globales
import settings
# Charge les paramètres
settings.init()
def login():
# Les paramètres de connexion au serveur FTP
ftp_host = '127.0.0.1'
ftp_login = 'Laurent'
ftp_password = '<PASSWORD>'
# Connexion au serveur pour chacun des users
try:
ftp = FTP(ftp_host, ftp_login, ftp_password)
print(ftp.getwelcome()) # Message de bienvenue
return ftp
except Exception as e:
print("/!\ Error occured while login /!\ \n", e)
return 1
| fr | 0.930501 | # Ce script permet de se connecter sur le serveur FTP pour les 3 clients FTP AC distants # Importe nos fonctions utiles # Importer les variables globales # Charge les paramètres # Les paramètres de connexion au serveur FTP # Connexion au serveur pour chacun des users # Message de bienvenue | 3.215023 | 3 |
alibi_detect/cd/sklearn/tests/test_classifier_sklearn.py | sugatoray/alibi-detect | 1 | 6619152 | import pytest
import numpy as np
from typing import Union
from alibi_detect.cd.sklearn.classifier import ClassifierDriftSklearn
from sklearn.svm import SVC
from sklearn.svm import LinearSVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier, AdaBoostClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
# test List[Any] inputs to the detector
def identity_fn(x: Union[np.ndarray, list]) -> np.ndarray:
if isinstance(x, list):
return np.array(x)
else:
return x
@pytest.mark.parametrize('model, use_calibration, calibration_kwargs', [
(LogisticRegression(max_iter=10000), False, None),
(SVC(max_iter=10000, probability=True), False, None),
(LinearSVC(max_iter=10000), True, {'method': 'sigmoid'}),
(LinearSVC(max_iter=10000), True, {'method': 'isotonic'}),
(DecisionTreeClassifier(), False, None),
(RandomForestClassifier(n_estimators=50), False, None),
(GradientBoostingClassifier(n_estimators=50), False, None)
])
@pytest.mark.parametrize('preds_type', ['probs'])
@pytest.mark.parametrize('p_val', [0.05])
@pytest.mark.parametrize('n', [1000])
@pytest.mark.parametrize('n_features', [4])
@pytest.mark.parametrize('binarize_preds', [True, False])
@pytest.mark.parametrize('n_folds', [None, 2])
@pytest.mark.parametrize('train_size', [0.5])
@pytest.mark.parametrize('preprocess_batch', [None, identity_fn])
@pytest.mark.parametrize('update_x_ref', [{'last': 1000}, {'reservoir_sampling': 1000}])
def test_clfdrift_calibration(model, preds_type, p_val, n, n_features, binarize_preds, n_folds, train_size,
preprocess_batch, update_x_ref, use_calibration, calibration_kwargs):
np.random.seed(0)
x_ref = np.random.randn(n, n_features)
x_test0 = np.random.randn(n, n_features)
x_test1 = np.random.randn(n, n_features) + 1
to_list = False
if preprocess_batch is not None:
to_list = True
x_ref = [_ for _ in x_ref]
update_x_ref = None
cd = ClassifierDriftSklearn(
x_ref=x_ref,
model=model,
preds_type=preds_type,
p_val=p_val,
update_x_ref=update_x_ref,
train_size=train_size,
n_folds=n_folds,
binarize_preds=binarize_preds,
use_calibration=use_calibration,
calibration_kwargs=calibration_kwargs
)
if to_list:
x_test0 = [_ for _ in x_test0]
preds_0 = cd.predict(x_test0)
assert cd.n == len(x_test0) + len(x_ref)
assert preds_0['data']['is_drift'] == 0
assert preds_0['data']['distance'] >= 0
if to_list:
x_test1 = [_ for _ in x_test1]
preds_1 = cd.predict(x_test1)
assert cd.n == len(x_test1) + len(x_test0) + len(x_ref)
assert preds_1['data']['is_drift'] == 1
assert preds_1['data']['distance'] >= 0
assert preds_0['data']['distance'] < preds_1['data']['distance']
assert cd.meta['params']['preds_type'] == 'probs'
assert cd.meta['params']['binarize_preds '] == binarize_preds
@pytest.mark.parametrize('model', [LinearSVC(max_iter=10000),
AdaBoostClassifier(),
QuadraticDiscriminantAnalysis(),
LogisticRegression(),
GradientBoostingClassifier()])
@pytest.mark.parametrize('p_val', [0.05])
@pytest.mark.parametrize('n', [500, 1000])
@pytest.mark.parametrize('n_features', [4])
@pytest.mark.parametrize('binarize_preds', [False])
@pytest.mark.parametrize('n_folds', [2, 5])
@pytest.mark.parametrize('preds_type', ['scores'])
def test_clfdrift_scores(model, p_val, n, n_features, binarize_preds, n_folds, preds_type):
np.random.seed(0)
x_ref = np.random.randn(n, n_features)
x_test0 = np.random.randn(n, n_features)
x_test1 = np.random.randn(n, n_features) + 1
cd = ClassifierDriftSklearn(
x_ref=x_ref,
preds_type=preds_type,
model=model,
p_val=p_val,
n_folds=n_folds,
binarize_preds=binarize_preds,
)
preds_0 = cd.predict(x_test0)
assert cd.n == len(x_test0) + len(x_ref)
assert preds_0['data']['is_drift'] == 0
assert preds_0['data']['distance'] >= 0
preds_1 = cd.predict(x_test1)
assert cd.n == len(x_test1) + len(x_test0) + len(x_ref)
assert preds_1['data']['is_drift'] == 1
assert preds_1['data']['distance'] >= 0
assert preds_0['data']['distance'] < preds_1['data']['distance']
assert cd.meta['params']['preds_type'] == 'scores'
assert cd.meta['params']['binarize_preds '] == binarize_preds
@pytest.mark.parametrize('model', [SVC(probability=False), LinearSVC()])
@pytest.mark.parametrize('preds_type', ['probs'])
@pytest.mark.parametrize('use_calibration', [False])
@pytest.mark.parametrize('binarize_preds', [False])
def test_clone1(model, preds_type, use_calibration, binarize_preds):
# should raise an error because the models do NOT support `predict_proba`, `use_calibration=False`
# and we are interested in the probabilities due to `binarize_preds=False`
with pytest.raises(AttributeError):
ClassifierDriftSklearn(x_ref=np.random.randn(100, 5),
model=model,
preds_type=preds_type,
use_calibration=use_calibration,
binarize_preds=binarize_preds)
@pytest.mark.parametrize('model', [SVC(probability=False),
LinearSVC(),
LogisticRegression(),
DecisionTreeClassifier(),
RandomForestClassifier(),
AdaBoostClassifier(),
GaussianNB(),
QuadraticDiscriminantAnalysis()])
@pytest.mark.parametrize('preds_type', ['probs'])
@pytest.mark.parametrize('use_calibration', [False])
@pytest.mark.parametrize('binarize_preds', [True])
def test_clone2(model, preds_type, use_calibration, binarize_preds):
# should not raise an error because `binarize_preds=True` and we only need access to `predict` method.
ClassifierDriftSklearn(x_ref=np.random.randn(100, 5),
model=model,
preds_type=preds_type,
use_calibration=use_calibration,
binarize_preds=binarize_preds)
@pytest.mark.parametrize('model', [SVC(probability=False),
LinearSVC(),
LogisticRegression(),
DecisionTreeClassifier(),
RandomForestClassifier(),
AdaBoostClassifier(),
GaussianNB(),
QuadraticDiscriminantAnalysis()])
@pytest.mark.parametrize('preds_type', ['probs'])
@pytest.mark.parametrize('use_calibration', [True])
@pytest.mark.parametrize('binarize_preds', [False, True])
def test_clone3(model, preds_type, use_calibration, binarize_preds):
# should NOT raise an error because of the `use_calibration=True` which makes possible `preds_types='probs'`
ClassifierDriftSklearn(x_ref=np.random.randn(100, 5),
model=model,
preds_type=preds_type,
use_calibration=use_calibration,
binarize_preds=binarize_preds)
@pytest.mark.parametrize('model', [DecisionTreeClassifier(),
RandomForestClassifier(),
KNeighborsClassifier(),
GaussianProcessClassifier(),
MLPClassifier(),
GaussianNB()])
@pytest.mark.parametrize('preds_type', ['scores'])
@pytest.mark.parametrize('use_calibration', [False, True])
@pytest.mark.parametrize('binarize_preds', [False])
def test_clone4(model, preds_type, use_calibration, binarize_preds):
# should raise an error because the classifiers do not support decision function
with pytest.raises(AttributeError):
ClassifierDriftSklearn(x_ref=np.random.randn(100, 5),
model=model,
preds_type=preds_type,
use_calibration=use_calibration,
binarize_preds=binarize_preds)
@pytest.mark.parametrize('model', [DecisionTreeClassifier(),
RandomForestClassifier(),
KNeighborsClassifier(),
GaussianProcessClassifier(),
MLPClassifier(),
GaussianNB()])
@pytest.mark.parametrize('preds_type', ['scores'])
@pytest.mark.parametrize('use_calibration', [False, True])
@pytest.mark.parametrize('binarize_preds', [True])
def test_clone5(model, preds_type, use_calibration, binarize_preds):
# should raise an error because of `binarize_preds=True` which conflicts with `preds_types='scores'`
with pytest.raises(ValueError):
ClassifierDriftSklearn(x_ref=np.random.randn(100, 5),
model=model,
preds_type=preds_type,
use_calibration=use_calibration,
binarize_preds=binarize_preds)
@pytest.mark.parametrize('model', [SVC(probability=False), LinearSVC()])
@pytest.mark.parametrize('preds_type', ['probs'])
@pytest.mark.parametrize('use_calibration', [False])
@pytest.mark.parametrize('binarize_preds', [True])
def test_predict_proba1(model, preds_type, use_calibration, binarize_preds):
drift_detector = ClassifierDriftSklearn(x_ref=np.random.randn(100, 5),
model=model,
preds_type=preds_type,
use_calibration=use_calibration,
binarize_preds=binarize_preds)
# define train and test set for internal model
x_tr, y_tr = np.random.randn(100, 5), np.random.randint(0, 2, 100)
x_te = np.random.randn(100, 5)
# extract and fit internal model
internal_model = drift_detector.model
internal_model.fit(x_tr, y_tr)
# check if predict matches the new predict_proba
np.testing.assert_allclose(internal_model.predict(x_te),
internal_model.aux_predict_proba(x_te)[:, 1])
@pytest.mark.parametrize('model', [LogisticRegression(),
GradientBoostingClassifier(),
AdaBoostClassifier(),
QuadraticDiscriminantAnalysis()])
@pytest.mark.parametrize('pred_types', ['scores'])
@pytest.mark.parametrize('use_calibration', [False])
@pytest.mark.parametrize('binarize_preds', [False])
def test_predict_proba2(model, pred_types, use_calibration, binarize_preds):
drift_detector = ClassifierDriftSklearn(x_ref=np.random.randn(100, 5),
model=model,
preds_type=pred_types,
use_calibration=use_calibration,
binarize_preds=binarize_preds)
# define train and test set for internal model
x_tr, y_tr = np.random.randn(100, 5), np.random.randint(0, 2, 100)
x_te = np.random.randn(100, 5)
# extract and fit internal model
internal_model = drift_detector.model
internal_model.fit(x_tr, y_tr)
# check if predict matches the new predict_proba
np.testing.assert_allclose(internal_model.decision_function(x_te),
internal_model.aux_predict_proba(x_te)[:, 1])
| import pytest
import numpy as np
from typing import Union
from alibi_detect.cd.sklearn.classifier import ClassifierDriftSklearn
from sklearn.svm import SVC
from sklearn.svm import LinearSVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier, AdaBoostClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
# test List[Any] inputs to the detector
def identity_fn(x: Union[np.ndarray, list]) -> np.ndarray:
if isinstance(x, list):
return np.array(x)
else:
return x
@pytest.mark.parametrize('model, use_calibration, calibration_kwargs', [
(LogisticRegression(max_iter=10000), False, None),
(SVC(max_iter=10000, probability=True), False, None),
(LinearSVC(max_iter=10000), True, {'method': 'sigmoid'}),
(LinearSVC(max_iter=10000), True, {'method': 'isotonic'}),
(DecisionTreeClassifier(), False, None),
(RandomForestClassifier(n_estimators=50), False, None),
(GradientBoostingClassifier(n_estimators=50), False, None)
])
@pytest.mark.parametrize('preds_type', ['probs'])
@pytest.mark.parametrize('p_val', [0.05])
@pytest.mark.parametrize('n', [1000])
@pytest.mark.parametrize('n_features', [4])
@pytest.mark.parametrize('binarize_preds', [True, False])
@pytest.mark.parametrize('n_folds', [None, 2])
@pytest.mark.parametrize('train_size', [0.5])
@pytest.mark.parametrize('preprocess_batch', [None, identity_fn])
@pytest.mark.parametrize('update_x_ref', [{'last': 1000}, {'reservoir_sampling': 1000}])
def test_clfdrift_calibration(model, preds_type, p_val, n, n_features, binarize_preds, n_folds, train_size,
preprocess_batch, update_x_ref, use_calibration, calibration_kwargs):
np.random.seed(0)
x_ref = np.random.randn(n, n_features)
x_test0 = np.random.randn(n, n_features)
x_test1 = np.random.randn(n, n_features) + 1
to_list = False
if preprocess_batch is not None:
to_list = True
x_ref = [_ for _ in x_ref]
update_x_ref = None
cd = ClassifierDriftSklearn(
x_ref=x_ref,
model=model,
preds_type=preds_type,
p_val=p_val,
update_x_ref=update_x_ref,
train_size=train_size,
n_folds=n_folds,
binarize_preds=binarize_preds,
use_calibration=use_calibration,
calibration_kwargs=calibration_kwargs
)
if to_list:
x_test0 = [_ for _ in x_test0]
preds_0 = cd.predict(x_test0)
assert cd.n == len(x_test0) + len(x_ref)
assert preds_0['data']['is_drift'] == 0
assert preds_0['data']['distance'] >= 0
if to_list:
x_test1 = [_ for _ in x_test1]
preds_1 = cd.predict(x_test1)
assert cd.n == len(x_test1) + len(x_test0) + len(x_ref)
assert preds_1['data']['is_drift'] == 1
assert preds_1['data']['distance'] >= 0
assert preds_0['data']['distance'] < preds_1['data']['distance']
assert cd.meta['params']['preds_type'] == 'probs'
assert cd.meta['params']['binarize_preds '] == binarize_preds
@pytest.mark.parametrize('model', [LinearSVC(max_iter=10000),
AdaBoostClassifier(),
QuadraticDiscriminantAnalysis(),
LogisticRegression(),
GradientBoostingClassifier()])
@pytest.mark.parametrize('p_val', [0.05])
@pytest.mark.parametrize('n', [500, 1000])
@pytest.mark.parametrize('n_features', [4])
@pytest.mark.parametrize('binarize_preds', [False])
@pytest.mark.parametrize('n_folds', [2, 5])
@pytest.mark.parametrize('preds_type', ['scores'])
def test_clfdrift_scores(model, p_val, n, n_features, binarize_preds, n_folds, preds_type):
np.random.seed(0)
x_ref = np.random.randn(n, n_features)
x_test0 = np.random.randn(n, n_features)
x_test1 = np.random.randn(n, n_features) + 1
cd = ClassifierDriftSklearn(
x_ref=x_ref,
preds_type=preds_type,
model=model,
p_val=p_val,
n_folds=n_folds,
binarize_preds=binarize_preds,
)
preds_0 = cd.predict(x_test0)
assert cd.n == len(x_test0) + len(x_ref)
assert preds_0['data']['is_drift'] == 0
assert preds_0['data']['distance'] >= 0
preds_1 = cd.predict(x_test1)
assert cd.n == len(x_test1) + len(x_test0) + len(x_ref)
assert preds_1['data']['is_drift'] == 1
assert preds_1['data']['distance'] >= 0
assert preds_0['data']['distance'] < preds_1['data']['distance']
assert cd.meta['params']['preds_type'] == 'scores'
assert cd.meta['params']['binarize_preds '] == binarize_preds
@pytest.mark.parametrize('model', [SVC(probability=False), LinearSVC()])
@pytest.mark.parametrize('preds_type', ['probs'])
@pytest.mark.parametrize('use_calibration', [False])
@pytest.mark.parametrize('binarize_preds', [False])
def test_clone1(model, preds_type, use_calibration, binarize_preds):
# should raise an error because the models do NOT support `predict_proba`, `use_calibration=False`
# and we are interested in the probabilities due to `binarize_preds=False`
with pytest.raises(AttributeError):
ClassifierDriftSklearn(x_ref=np.random.randn(100, 5),
model=model,
preds_type=preds_type,
use_calibration=use_calibration,
binarize_preds=binarize_preds)
@pytest.mark.parametrize('model', [SVC(probability=False),
LinearSVC(),
LogisticRegression(),
DecisionTreeClassifier(),
RandomForestClassifier(),
AdaBoostClassifier(),
GaussianNB(),
QuadraticDiscriminantAnalysis()])
@pytest.mark.parametrize('preds_type', ['probs'])
@pytest.mark.parametrize('use_calibration', [False])
@pytest.mark.parametrize('binarize_preds', [True])
def test_clone2(model, preds_type, use_calibration, binarize_preds):
# should not raise an error because `binarize_preds=True` and we only need access to `predict` method.
ClassifierDriftSklearn(x_ref=np.random.randn(100, 5),
model=model,
preds_type=preds_type,
use_calibration=use_calibration,
binarize_preds=binarize_preds)
@pytest.mark.parametrize('model', [SVC(probability=False),
LinearSVC(),
LogisticRegression(),
DecisionTreeClassifier(),
RandomForestClassifier(),
AdaBoostClassifier(),
GaussianNB(),
QuadraticDiscriminantAnalysis()])
@pytest.mark.parametrize('preds_type', ['probs'])
@pytest.mark.parametrize('use_calibration', [True])
@pytest.mark.parametrize('binarize_preds', [False, True])
def test_clone3(model, preds_type, use_calibration, binarize_preds):
# should NOT raise an error because of the `use_calibration=True` which makes possible `preds_types='probs'`
ClassifierDriftSklearn(x_ref=np.random.randn(100, 5),
model=model,
preds_type=preds_type,
use_calibration=use_calibration,
binarize_preds=binarize_preds)
@pytest.mark.parametrize('model', [DecisionTreeClassifier(),
RandomForestClassifier(),
KNeighborsClassifier(),
GaussianProcessClassifier(),
MLPClassifier(),
GaussianNB()])
@pytest.mark.parametrize('preds_type', ['scores'])
@pytest.mark.parametrize('use_calibration', [False, True])
@pytest.mark.parametrize('binarize_preds', [False])
def test_clone4(model, preds_type, use_calibration, binarize_preds):
# should raise an error because the classifiers do not support decision function
with pytest.raises(AttributeError):
ClassifierDriftSklearn(x_ref=np.random.randn(100, 5),
model=model,
preds_type=preds_type,
use_calibration=use_calibration,
binarize_preds=binarize_preds)
@pytest.mark.parametrize('model', [DecisionTreeClassifier(),
RandomForestClassifier(),
KNeighborsClassifier(),
GaussianProcessClassifier(),
MLPClassifier(),
GaussianNB()])
@pytest.mark.parametrize('preds_type', ['scores'])
@pytest.mark.parametrize('use_calibration', [False, True])
@pytest.mark.parametrize('binarize_preds', [True])
def test_clone5(model, preds_type, use_calibration, binarize_preds):
# should raise an error because of `binarize_preds=True` which conflicts with `preds_types='scores'`
with pytest.raises(ValueError):
ClassifierDriftSklearn(x_ref=np.random.randn(100, 5),
model=model,
preds_type=preds_type,
use_calibration=use_calibration,
binarize_preds=binarize_preds)
@pytest.mark.parametrize('model', [SVC(probability=False), LinearSVC()])
@pytest.mark.parametrize('preds_type', ['probs'])
@pytest.mark.parametrize('use_calibration', [False])
@pytest.mark.parametrize('binarize_preds', [True])
def test_predict_proba1(model, preds_type, use_calibration, binarize_preds):
drift_detector = ClassifierDriftSklearn(x_ref=np.random.randn(100, 5),
model=model,
preds_type=preds_type,
use_calibration=use_calibration,
binarize_preds=binarize_preds)
# define train and test set for internal model
x_tr, y_tr = np.random.randn(100, 5), np.random.randint(0, 2, 100)
x_te = np.random.randn(100, 5)
# extract and fit internal model
internal_model = drift_detector.model
internal_model.fit(x_tr, y_tr)
# check if predict matches the new predict_proba
np.testing.assert_allclose(internal_model.predict(x_te),
internal_model.aux_predict_proba(x_te)[:, 1])
@pytest.mark.parametrize('model', [LogisticRegression(),
GradientBoostingClassifier(),
AdaBoostClassifier(),
QuadraticDiscriminantAnalysis()])
@pytest.mark.parametrize('pred_types', ['scores'])
@pytest.mark.parametrize('use_calibration', [False])
@pytest.mark.parametrize('binarize_preds', [False])
def test_predict_proba2(model, pred_types, use_calibration, binarize_preds):
drift_detector = ClassifierDriftSklearn(x_ref=np.random.randn(100, 5),
model=model,
preds_type=pred_types,
use_calibration=use_calibration,
binarize_preds=binarize_preds)
# define train and test set for internal model
x_tr, y_tr = np.random.randn(100, 5), np.random.randint(0, 2, 100)
x_te = np.random.randn(100, 5)
# extract and fit internal model
internal_model = drift_detector.model
internal_model.fit(x_tr, y_tr)
# check if predict matches the new predict_proba
np.testing.assert_allclose(internal_model.decision_function(x_te),
internal_model.aux_predict_proba(x_te)[:, 1])
| en | 0.735543 | # test List[Any] inputs to the detector # should raise an error because the models do NOT support `predict_proba`, `use_calibration=False` # and we are interested in the probabilities due to `binarize_preds=False` # should not raise an error because `binarize_preds=True` and we only need access to `predict` method. # should NOT raise an error because of the `use_calibration=True` which makes possible `preds_types='probs'` # should raise an error because the classifiers do not support decision function # should raise an error because of `binarize_preds=True` which conflicts with `preds_types='scores'` # define train and test set for internal model # extract and fit internal model # check if predict matches the new predict_proba # define train and test set for internal model # extract and fit internal model # check if predict matches the new predict_proba | 2.144643 | 2 |
python3/june/day_23_Count Complete Tree Nodes.py | kashyapvinay/leetcode-challenge | 1 | 6619153 | <filename>python3/june/day_23_Count Complete Tree Nodes.py
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def countNodes(self, root: TreeNode) -> int:
if root is None:
return 0
lh, rh = 0, 0
left, right = root, root
while(left):
lh += 1
left = left.left
while(right):
rh += 1
right = right.right
if lh == rh:
return (1 << lh) - 1
return 1 + self.countNodes(root.left) + self.countNodes(root.right)
| <filename>python3/june/day_23_Count Complete Tree Nodes.py
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def countNodes(self, root: TreeNode) -> int:
if root is None:
return 0
lh, rh = 0, 0
left, right = root, root
while(left):
lh += 1
left = left.left
while(right):
rh += 1
right = right.right
if lh == rh:
return (1 << lh) - 1
return 1 + self.countNodes(root.left) + self.countNodes(root.right)
| en | 0.53741 | # Definition for a binary tree node. # class TreeNode: # def __init__(self, val=0, left=None, right=None): # self.val = val # self.left = left # self.right = right | 3.905795 | 4 |
app/app/api/utils/security.py | mutalimov95/fastapi-mongodb-example | 0 | 6619154 | import jwt
from fastapi import Depends, HTTPException, Security
from fastapi.security import OAuth2PasswordBearer
from jwt import PyJWTError
from starlette.status import HTTP_401_UNAUTHORIZED, HTTP_403_FORBIDDEN
from app import crud
from app.core.config import settings
from app.core.jwt import ALGORITHM
from app.models.user import User
from app.schemas.token import TokenPayload
reusable_oauth2 = OAuth2PasswordBearer(
tokenUrl=f"{settings.API_V1_STR}/auth/access-token",
scopes={"me1": "Read information about the current user.", "items": "Read items."},
)
def get_current_user(token: str = Security(reusable_oauth2)):
try:
payload = jwt.decode(token, settings.SECRET_KEY, algorithms=[ALGORITHM])
token_data = TokenPayload(**payload)
except PyJWTError:
raise HTTPException(
status_code=HTTP_403_FORBIDDEN, detail="Could not validate credentials"
)
user = crud.user.get(id=token_data.user_id)
if not user:
raise HTTPException(
status_code=HTTP_401_UNAUTHORIZED, detail="Could not validate credentials",
)
return user
def get_current_active_user(current_user: User = Depends(get_current_user)):
if not crud.user.is_active(current_user):
raise HTTPException(status_code=400, detail="Inactive user")
return current_user
def get_current_active_superuser(current_user: User = Security(get_current_user)):
if not crud.user.is_superuser(current_user):
raise HTTPException(
status_code=400, detail="The user doesn't have enough privileges"
)
return current_user
| import jwt
from fastapi import Depends, HTTPException, Security
from fastapi.security import OAuth2PasswordBearer
from jwt import PyJWTError
from starlette.status import HTTP_401_UNAUTHORIZED, HTTP_403_FORBIDDEN
from app import crud
from app.core.config import settings
from app.core.jwt import ALGORITHM
from app.models.user import User
from app.schemas.token import TokenPayload
reusable_oauth2 = OAuth2PasswordBearer(
tokenUrl=f"{settings.API_V1_STR}/auth/access-token",
scopes={"me1": "Read information about the current user.", "items": "Read items."},
)
def get_current_user(token: str = Security(reusable_oauth2)):
try:
payload = jwt.decode(token, settings.SECRET_KEY, algorithms=[ALGORITHM])
token_data = TokenPayload(**payload)
except PyJWTError:
raise HTTPException(
status_code=HTTP_403_FORBIDDEN, detail="Could not validate credentials"
)
user = crud.user.get(id=token_data.user_id)
if not user:
raise HTTPException(
status_code=HTTP_401_UNAUTHORIZED, detail="Could not validate credentials",
)
return user
def get_current_active_user(current_user: User = Depends(get_current_user)):
if not crud.user.is_active(current_user):
raise HTTPException(status_code=400, detail="Inactive user")
return current_user
def get_current_active_superuser(current_user: User = Security(get_current_user)):
if not crud.user.is_superuser(current_user):
raise HTTPException(
status_code=400, detail="The user doesn't have enough privileges"
)
return current_user
| none | 1 | 2.472703 | 2 | |
store/citizens/utils.py | Shamilv05/store | 6 | 6619155 | from flask import make_response
from datetime import datetime
JSON_MIME_TYPE = 'application/json'
def json_response(data='', status=201, headers=None):
headers = headers or {}
if 'Content-Type' not in headers:
headers['Content-Type'] = JSON_MIME_TYPE
return make_response(data, status, headers)
def calculate_age_arr(brth_days):
today = datetime.utcnow()
for index, value in enumerate(brth_days):
datetime_format = datetime.strptime(value, '%d.%m.%Y')
brth_days[index] = today.year - datetime_format.year - ((today.month, today.day) < (datetime_format.month, datetime_format.day))
return brth_days
| from flask import make_response
from datetime import datetime
JSON_MIME_TYPE = 'application/json'
def json_response(data='', status=201, headers=None):
headers = headers or {}
if 'Content-Type' not in headers:
headers['Content-Type'] = JSON_MIME_TYPE
return make_response(data, status, headers)
def calculate_age_arr(brth_days):
today = datetime.utcnow()
for index, value in enumerate(brth_days):
datetime_format = datetime.strptime(value, '%d.%m.%Y')
brth_days[index] = today.year - datetime_format.year - ((today.month, today.day) < (datetime_format.month, datetime_format.day))
return brth_days
| none | 1 | 2.997146 | 3 | |
qutipy/channels/__init__.py | arnavdas88/QuTIpy | 0 | 6619156 | <reponame>arnavdas88/QuTIpy
# This file is part of the QuTIpy package.
# https://github.com/sumeetkhatri/QuTIpy
#
# Copyright (c) 2022 <NAME>.
# --.- ..- - .. .--. -.--
#
#
# SPDX-License-Identifier: AGPL-3.0
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import itertools
import cvxpy as cvx
import numpy as np
from numpy.linalg import matrix_power
from scipy.linalg import eig
from qutipy.general_functions import (
Tr,
dag,
eye,
ket,
partial_trace,
syspermute,
tensor,
)
from qutipy.linalg import gram_schmidt
from qutipy.misc import cvxpy_to_numpy, numpy_to_cvxpy
from qutipy.Pauli import (
generate_nQubit_Pauli,
generate_nQubit_Pauli_X,
generate_nQubit_Pauli_Z,
)
from qutipy.states import MaxEnt_state, RandomStateVector
from qutipy.Weyl import discrete_Weyl_Z
def Choi_to_Natural(C_AB, dimA, dimB):
"""
Takes the Choi representation of a map and outputs its natural representation.
The Choi represenatation Q of the channel acts as:
vec(N(rho))=Q*vec(rho),
where N is the channel in question. It can be obtained from the Choi representation
with a simple reshuffling of indices.
"""
C_AB = np.array(C_AB)
return np.array(
np.reshape(C_AB, [dimA, dimB, dimA, dimB])
.transpose((0, 2, 1, 3))
.reshape([dimA * dimA, dimB * dimB])
).T
def bit_flip_channel(p):
"""
Generates the channel rho -> (1-p)*rho+p*X*rho*X.
"""
return Pauli_channel(p, 0, 0)
def completely_dephasing_channel(d):
"""
Generates the completely dephasing channel in d dimensions. This channel
eliminates the off-diagonal elements (in the standard basis) of the input operator.
"""
if d == 2:
p = 1 / 2
return dephasing_channel(p, d=d)[0]
else:
p = (1 / d) * np.ones(d)
return dephasing_channel(p, d=d)
def Kraus_representation(P, dimA, dimB):
"""
Takes a Choi representation P of a channel and returns its Kraus representation.
The Choi representation is defined with the channel acting on the second half of
the maximally entangled vector.
"""
D, U = eig(P)
U_cols = U.shape[1]
# Need to check if the matrix U generated by eig is unitary (up to
# numerical precision)
check1 = np.allclose(eye(dimA * dimB), U @ dag(U))
check2 = np.allclose(eye(dimA * dimB), dag(U) @ U)
if check1 and check2:
U = np.array(U)
# If U is not unitary, use Gram-Schmidt to make it unitary (i.e., make the
# columns of U orthonormal)
else:
C = gram_schmidt([U[:, i] for i in range(U_cols)], dimA * dimB)
U = np.sum([tensor(dag(ket(U_cols, i)), C[i]) for i in range(U_cols)], 0)
# print(U)
K = []
for i in range(U_cols):
Col = U[:, i]
K_tmp = np.array(np.sqrt(D[i]) * Col.reshape([dimA, dimB]))
K.append(K_tmp.transpose())
return K
def phase_damping_channel(p):
"""
Generates the phase damping channel.
"""
K1 = np.array([[1, 0], [0, np.sqrt(p)]])
K2 = np.array([[0, 0], [0, np.sqrt(1 - p)]])
return [K1, K2]
def generate_channel_isometry(K, dimA, dimB):
"""
Generates an isometric extension of the
channel specified by the Kraus operators K. dimA is the dimension of the
input space of the channel, and dimB is the dimension of the output space
of the channel. If dimA=dimB, then the function also outputs a unitary
extension of the channel given by a particular construction.
"""
dimE = len(K)
V = np.sum([tensor(K[i], ket(dimE, i)) for i in range(dimE)], 0)
if dimA == dimB:
# In this case, the unitary we generate has dimensions dimA*dimE x
# dimA*dimE
U = tensor(V, dag(ket(dimE, 0)))
states = [V @ ket(dimA, i) for i in range(dimA)]
for i in range(dimA * dimE - dimA):
states.append(RandomStateVector(dimA * dimE))
states_new = gram_schmidt(states, dimA * dimE)
count = dimA
for i in range(dimA):
for j in range(1, dimE):
U = U + tensor(states_new[count], dag(ket(dimA, i)), dag(ket(dimE, j)))
count += 1
return V, np.array(U)
else:
return V
def Pauli_channel_nQubit(n, p, alt_repr=False):
"""
Generates the Kraus operators, an isometric extension, and a unitary
extension of the n-qubit Pauli channel specified by the 2^(2*n) parameters in
p, which must be probabilities in order for the map to be a channel. (i.e.,
they must be non-negative and sum to one.)
If alt_repr=True, then the channel is of the form
P(rho)=\\sum_{a,b} p_{a,b} X^aZ^b(rho)Z^bX^a
where a and b are n-bit strings
(using the n-qubit X and Z operators as generated by the functions
generate_nQubit_Pauli_X and generate_nQubit_Pauli_Z).
"""
K = []
if not alt_repr:
S = list(itertools.product(*[range(0, 4)] * n))
for i in range(2 ** (2 * n)):
K.append(np.sqrt(p[i]) * generate_nQubit_Pauli(list(S[i])))
V, U = generate_channel_isometry(K, 2**n, 2**n)
return K, V, U
else: # alt_repr==True
S = list(itertools.product(*[range(0, 2)] * n))
count = 0
for a in S:
a = list(a)
for b in S:
b = list(b)
K.append(
np.sqrt(p[count])
* generate_nQubit_Pauli_X(a)
@ generate_nQubit_Pauli_Z(b)
)
count += 1
V, U = generate_channel_isometry(K, 2**n, 2**n)
return K, V, U
def apply_channel(K, rho, sys=None, dim=None, adjoint=False):
"""
Applies the channel with Kraus operators in K to the state rho on
systems specified by sys. The dimensions of the subsystems on which rho
acts are given by dim.
If adjoint is True, then this function applies the adjoint of the given
channel.
"""
if isinstance(rho, cvx.Variable):
rho = cvxpy_to_numpy(rho)
rho_out = apply_channel(K, rho, sys, dim, adjoint)
return numpy_to_cvxpy(rho_out)
if adjoint:
K_tmp = K
K = []
K = [dag(K_tmp[i]) for i in range(len(K_tmp))]
if sys is None:
return np.sum([K[i] @ rho @ dag(K[i]) for i in range(len(K))], 0)
else:
A = []
for i in range(len(K)):
X = 1
for j in range(len(dim)):
if j + 1 == sys:
X = tensor(X, K[i])
else:
X = tensor(X, eye(dim[j]))
A.append(X)
return np.sum([A[i] @ rho @ dag(A[i]) for i in range(len(A))], 0)
def amplitude_damping_channel(gamma):
"""
Generates the amplitude damping channel.
"""
A1 = np.array([[1, 0], [0, np.sqrt(1 - gamma)]])
A2 = np.array([[0, np.sqrt(gamma)], [0, 0]])
return [A1, A2]
def Natural_representation(K):
"""
Calculates the natural representation of the channel (in the standard basis)
given by the Kraus operators in K. In terms of the Kraus operators, the natural
representation of the channel in the standard basis is given by
N=sum_i K_i ⊗ conj(K_i),
where the sum is over the Kraus operators K_i in K.
"""
return np.sum([tensor(k, np.conjugate(k)) for k in K], 0)
def BB84_channel(Q):
"""
Generates the channel corresponding to the BB84 protocol with
equal X and Z errors, given by the QBER Q. The definition of this
channel can be found in:
"Additive extensions of a quantum channel", by
<NAME> and <NAME>. (arXiv:0712.2471)
"""
return Pauli_channel(Q - Q**2, Q**2, Q - Q**2)
def Choi_representation(K, dimA):
"""
Calculates the Choi representation of the map with Kraus operators K.
dimA is the dimension of the input space of the channel.
The Choi represenatation is defined with the channel acting on the second
half of the maximally entangled vector.
"""
Gamma = MaxEnt_state(dimA, normalized=False)
return np.array(apply_channel(K, Gamma, 2, [dimA, dimA]), dtype=np.complex)
def compose_channels(C):
"""
Takes a composition of channels. The variable C should be a list of lists,
with each list consisting of the Kraus operators of the channels to be composed.
If C=[K1,K2,...,Kn], then this function returns the composition such that
the channel corresponding to K1 is applied first, then K2, etc.
"""
d = C[0][0].shape[0]
lengths = []
for c in C:
lengths.append(len(c))
combs = list(itertools.product(*[range(length) for length in lengths]))
K_n = []
for comb in combs:
# tmp=1
tmp = eye(d)
for i in range(len(comb)):
tmp = C[i][comb[i]] @ tmp
K_n.append(tmp)
return K_n
def tensor_channels(C):
"""
Takes the tensor product of the channels in C.
C is a set of sets of Kraus operators.
"""
lengths = []
for c in C:
lengths.append(len(c))
combs = list(itertools.product(*[range(length) for length in lengths]))
K_n = []
for comb in combs:
tmp = 1
for i in range(len(comb)):
tmp = tensor(tmp, C[i][comb[i]])
K_n.append(tmp)
return K_n
def depolarizing_channel_n_uses(p, n, rho, m):
"""
Generates the output state corresponding to the depolarizing channel
applied to each one of n systems in the joint state rho. p is the
depolarizing probability as defined in the function "depolarizing_channel"
above.
If rho contains m>n systems, then the first m-n systems are left alone.
"""
dims = 2 * np.ones(m).astype(int)
rho_out = np.zeros((2**m, 2**m))
for k in range(n + 1):
indices = list(itertools.combinations(range(1, n + 1), k))
# print k,indices
for index in indices:
index = list(index)
index = np.array(index) + (m - n)
index = list(index.astype(int))
index_diff = np.setdiff1d(range(1, m + 1), index)
perm_arrange = np.append(index, index_diff).astype(int)
perm_rearrange = np.zeros(m)
for i in range(m):
perm_rearrange[i] = np.argwhere(perm_arrange == i + 1)[0][0] + 1
perm_rearrange = perm_rearrange.astype(int)
mix = matrix_power(eye(2**k) / 2, k)
rho_part = partial_trace(rho, index, dims)
rho_out = rho_out + (4 * p / 3.0) ** k * (1 - (4 * p / 3.0)) ** (
n - k
) * syspermute(tensor(mix, rho_part), perm_rearrange, dims)
return rho_out
def diamond_norm(J, dimA, dimB, display=False):
"""
Computes the diamond norm of a superoperator with Choi representation J.
dimA is the dimension of the input space of the channel, and dimB is the
dimension of the output space.
The form of the SDP used comes from Theorem 3.1 of:
'Simpler semidefinite programs for completely bounded norms',
Chicago Journal of Theoretical Computer Science 2013,
by <NAME>
"""
"""
The Choi representation J in the above paper is defined using a different
convention:
J=(N\\otimes I)(|Phi^+><Phi^+|).
In other words, the channel N acts on the first half of the maximally-
entangled state, while the convention used throughout this code stack
is
J=(I\\otimes N)(|Phi^+><Phi^+|).
We thus use syspermute to convert to the form used in the aforementioned
paper.
"""
J = syspermute(J, [2, 1], [dimA, dimB])
X = cvx.Variable((dimA * dimB, dimA * dimB), hermitian=False)
rho0 = cvx.Variable((dimA, dimA), PSD=True)
rho1 = cvx.Variable((dimA, dimA), PSD=True)
M = cvx.bmat([[cvx.kron(eye(dimB), rho0), X], [X.H, cvx.kron(eye(dimB), rho1)]])
c = []
c += [M >> 0, cvx.trace(rho0) == 1, cvx.trace(rho1) == 1]
obj = cvx.Maximize(
(1 / 2) * cvx.real(cvx.trace(dag(J) @ X))
+ (1 / 2) * cvx.real(cvx.trace(J @ X.H))
)
prob = cvx.Problem(obj, constraints=c)
prob.solve(verbose=display, eps=1e-7)
return prob.value
def depolarizing_channel_nQubits(n, p):
"""
For 0<=p<=1, this returns the n-qubit Pauli channel given by
p[0]=1-p, p[i]=p/(2^(2*n)-1) for all i>=1.
"""
p = [1 - p] + [p / (2 ** (2 * n) - 1) for i in range(2 ** (2 * n) - 1)]
return Pauli_channel_nQubit(n, p, alt_repr=True)
def dephasing_channel(p, d=2):
"""
Generates the channel rho -> (1-p)*rho+p*Z*rho*Z. (In the case d=2.)
For d>=2, we let p be a list of d probabilities, and we use the discrete Weyl-Z
operators to define the channel.
For p=1/d, we get the completely dephasing channel.
"""
if d == 2:
return Pauli_channel(0, 0, p)
else:
K = [np.sqrt(p[k]) * matrix_power(discrete_Weyl_Z(d), k) for k in range(d)]
return K
def generalized_amplitude_damping_channel(gamma, N):
"""
Generates the generalized amplitude damping channel.
"""
if N == 0:
return amplitude_damping_channel(gamma)
elif N == 1:
A1 = np.array([[np.sqrt(1 - gamma), 0], [0, 1]])
A2 = np.array([[0, 0], [np.sqrt(gamma), 0]])
return [A1, A2]
else:
A1 = np.sqrt(1 - N) * np.array([[1, 0], [0, np.sqrt(1 - gamma)]])
A2 = np.sqrt(1 - N) * np.array([[0, np.sqrt(gamma)], [0, 0]])
A3 = np.sqrt(N) * np.array([[np.sqrt(1 - gamma), 0], [0, 1]])
A4 = np.sqrt(N) * np.array([[0, 0], [np.sqrt(gamma), 0]])
return [A1, A2, A3, A4]
def n_channel_uses(K, n):
"""
Given the Kraus operators K of a channel, this function generates the
Kraus operators corresponding to the n-fold tensor power of the channel.
dimA is the dimension of the input space, and dimB the dimension of the
output space.
"""
r = len(K) # Number of Kraus operators
combs = list(itertools.product(*[range(r)] * n))
K_n = []
for comb in combs:
# print comb
tmp = 1
for i in range(n):
tmp = tensor(tmp, K[comb[i]])
K_n.append(tmp)
return K_n
def channel_scalar_multiply(K, x):
"""
Multiplies the channel with Kraus operators in K by the scalar x.
This means that each Kraus operator is multiplied by sqrt(x)!
"""
K_new = []
for i in range(len(K)):
K_new.append(np.sqrt(x) * K[i])
return K_new
def Pauli_channel_coeffs(K, n, as_dict=False):
"""
Generates the coefficients c_{a,b} such that
P(X^aZ^b)=c_{a,b}X^aZ^b,
for the channel P with the Kraus operators in K.
"""
if as_dict:
c = {}
else:
c = []
S = list(itertools.product(*[range(0, 2)] * n))
# print(S)
for a in S:
for b in S:
Xa = generate_nQubit_Pauli_X(list(a))
Zb = generate_nQubit_Pauli_Z(list(b))
if as_dict:
c[(a, b)] = (1 / 2**n) * Tr(dag(Xa @ Zb) @ apply_channel(K, Xa @ Zb))
else:
c.append((1 / 2**n) * Tr(dag(Xa @ Zb) @ apply_channel(K, Xa @ Zb)))
return c
def Pauli_channel(px, py, pz):
"""
Generates the Kraus operators, an isometric extension, and a unitary
extension of the one-qubit Pauli channel specified by the parameters px, py, pz.
"""
pI = 1 - px - py - pz
Sx = np.array([[0, 1], [1, 0]])
Sy = np.array([[0, -1j], [1j, 0]])
Sz = np.array([[1, 0], [0, -1]])
K = [np.sqrt(pI) * eye(2), np.sqrt(px) * Sx, np.sqrt(py) * Sy, np.sqrt(pz) * Sz]
V, U = generate_channel_isometry(K, 2, 2)
return K, V, U
def depolarizing_channel(p):
"""
For 0<=p<=1, this returns the one-qubit Pauli channel given by px=py=pz=p/3.
"""
return Pauli_channel(p / 3.0, p / 3.0, p / 3.0)
| # This file is part of the QuTIpy package.
# https://github.com/sumeetkhatri/QuTIpy
#
# Copyright (c) 2022 <NAME>.
# --.- ..- - .. .--. -.--
#
#
# SPDX-License-Identifier: AGPL-3.0
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import itertools
import cvxpy as cvx
import numpy as np
from numpy.linalg import matrix_power
from scipy.linalg import eig
from qutipy.general_functions import (
Tr,
dag,
eye,
ket,
partial_trace,
syspermute,
tensor,
)
from qutipy.linalg import gram_schmidt
from qutipy.misc import cvxpy_to_numpy, numpy_to_cvxpy
from qutipy.Pauli import (
generate_nQubit_Pauli,
generate_nQubit_Pauli_X,
generate_nQubit_Pauli_Z,
)
from qutipy.states import MaxEnt_state, RandomStateVector
from qutipy.Weyl import discrete_Weyl_Z
def Choi_to_Natural(C_AB, dimA, dimB):
"""
Takes the Choi representation of a map and outputs its natural representation.
The Choi represenatation Q of the channel acts as:
vec(N(rho))=Q*vec(rho),
where N is the channel in question. It can be obtained from the Choi representation
with a simple reshuffling of indices.
"""
C_AB = np.array(C_AB)
return np.array(
np.reshape(C_AB, [dimA, dimB, dimA, dimB])
.transpose((0, 2, 1, 3))
.reshape([dimA * dimA, dimB * dimB])
).T
def bit_flip_channel(p):
"""
Generates the channel rho -> (1-p)*rho+p*X*rho*X.
"""
return Pauli_channel(p, 0, 0)
def completely_dephasing_channel(d):
"""
Generates the completely dephasing channel in d dimensions. This channel
eliminates the off-diagonal elements (in the standard basis) of the input operator.
"""
if d == 2:
p = 1 / 2
return dephasing_channel(p, d=d)[0]
else:
p = (1 / d) * np.ones(d)
return dephasing_channel(p, d=d)
def Kraus_representation(P, dimA, dimB):
"""
Takes a Choi representation P of a channel and returns its Kraus representation.
The Choi representation is defined with the channel acting on the second half of
the maximally entangled vector.
"""
D, U = eig(P)
U_cols = U.shape[1]
# Need to check if the matrix U generated by eig is unitary (up to
# numerical precision)
check1 = np.allclose(eye(dimA * dimB), U @ dag(U))
check2 = np.allclose(eye(dimA * dimB), dag(U) @ U)
if check1 and check2:
U = np.array(U)
# If U is not unitary, use Gram-Schmidt to make it unitary (i.e., make the
# columns of U orthonormal)
else:
C = gram_schmidt([U[:, i] for i in range(U_cols)], dimA * dimB)
U = np.sum([tensor(dag(ket(U_cols, i)), C[i]) for i in range(U_cols)], 0)
# print(U)
K = []
for i in range(U_cols):
Col = U[:, i]
K_tmp = np.array(np.sqrt(D[i]) * Col.reshape([dimA, dimB]))
K.append(K_tmp.transpose())
return K
def phase_damping_channel(p):
"""
Generates the phase damping channel.
"""
K1 = np.array([[1, 0], [0, np.sqrt(p)]])
K2 = np.array([[0, 0], [0, np.sqrt(1 - p)]])
return [K1, K2]
def generate_channel_isometry(K, dimA, dimB):
"""
Generates an isometric extension of the
channel specified by the Kraus operators K. dimA is the dimension of the
input space of the channel, and dimB is the dimension of the output space
of the channel. If dimA=dimB, then the function also outputs a unitary
extension of the channel given by a particular construction.
"""
dimE = len(K)
V = np.sum([tensor(K[i], ket(dimE, i)) for i in range(dimE)], 0)
if dimA == dimB:
# In this case, the unitary we generate has dimensions dimA*dimE x
# dimA*dimE
U = tensor(V, dag(ket(dimE, 0)))
states = [V @ ket(dimA, i) for i in range(dimA)]
for i in range(dimA * dimE - dimA):
states.append(RandomStateVector(dimA * dimE))
states_new = gram_schmidt(states, dimA * dimE)
count = dimA
for i in range(dimA):
for j in range(1, dimE):
U = U + tensor(states_new[count], dag(ket(dimA, i)), dag(ket(dimE, j)))
count += 1
return V, np.array(U)
else:
return V
def Pauli_channel_nQubit(n, p, alt_repr=False):
"""
Generates the Kraus operators, an isometric extension, and a unitary
extension of the n-qubit Pauli channel specified by the 2^(2*n) parameters in
p, which must be probabilities in order for the map to be a channel. (i.e.,
they must be non-negative and sum to one.)
If alt_repr=True, then the channel is of the form
P(rho)=\\sum_{a,b} p_{a,b} X^aZ^b(rho)Z^bX^a
where a and b are n-bit strings
(using the n-qubit X and Z operators as generated by the functions
generate_nQubit_Pauli_X and generate_nQubit_Pauli_Z).
"""
K = []
if not alt_repr:
S = list(itertools.product(*[range(0, 4)] * n))
for i in range(2 ** (2 * n)):
K.append(np.sqrt(p[i]) * generate_nQubit_Pauli(list(S[i])))
V, U = generate_channel_isometry(K, 2**n, 2**n)
return K, V, U
else: # alt_repr==True
S = list(itertools.product(*[range(0, 2)] * n))
count = 0
for a in S:
a = list(a)
for b in S:
b = list(b)
K.append(
np.sqrt(p[count])
* generate_nQubit_Pauli_X(a)
@ generate_nQubit_Pauli_Z(b)
)
count += 1
V, U = generate_channel_isometry(K, 2**n, 2**n)
return K, V, U
def apply_channel(K, rho, sys=None, dim=None, adjoint=False):
"""
Applies the channel with Kraus operators in K to the state rho on
systems specified by sys. The dimensions of the subsystems on which rho
acts are given by dim.
If adjoint is True, then this function applies the adjoint of the given
channel.
"""
if isinstance(rho, cvx.Variable):
rho = cvxpy_to_numpy(rho)
rho_out = apply_channel(K, rho, sys, dim, adjoint)
return numpy_to_cvxpy(rho_out)
if adjoint:
K_tmp = K
K = []
K = [dag(K_tmp[i]) for i in range(len(K_tmp))]
if sys is None:
return np.sum([K[i] @ rho @ dag(K[i]) for i in range(len(K))], 0)
else:
A = []
for i in range(len(K)):
X = 1
for j in range(len(dim)):
if j + 1 == sys:
X = tensor(X, K[i])
else:
X = tensor(X, eye(dim[j]))
A.append(X)
return np.sum([A[i] @ rho @ dag(A[i]) for i in range(len(A))], 0)
def amplitude_damping_channel(gamma):
"""
Generates the amplitude damping channel.
"""
A1 = np.array([[1, 0], [0, np.sqrt(1 - gamma)]])
A2 = np.array([[0, np.sqrt(gamma)], [0, 0]])
return [A1, A2]
def Natural_representation(K):
"""
Calculates the natural representation of the channel (in the standard basis)
given by the Kraus operators in K. In terms of the Kraus operators, the natural
representation of the channel in the standard basis is given by
N=sum_i K_i ⊗ conj(K_i),
where the sum is over the Kraus operators K_i in K.
"""
return np.sum([tensor(k, np.conjugate(k)) for k in K], 0)
def BB84_channel(Q):
"""
Generates the channel corresponding to the BB84 protocol with
equal X and Z errors, given by the QBER Q. The definition of this
channel can be found in:
"Additive extensions of a quantum channel", by
<NAME> and <NAME>. (arXiv:0712.2471)
"""
return Pauli_channel(Q - Q**2, Q**2, Q - Q**2)
def Choi_representation(K, dimA):
"""
Calculates the Choi representation of the map with Kraus operators K.
dimA is the dimension of the input space of the channel.
The Choi represenatation is defined with the channel acting on the second
half of the maximally entangled vector.
"""
Gamma = MaxEnt_state(dimA, normalized=False)
return np.array(apply_channel(K, Gamma, 2, [dimA, dimA]), dtype=np.complex)
def compose_channels(C):
"""
Takes a composition of channels. The variable C should be a list of lists,
with each list consisting of the Kraus operators of the channels to be composed.
If C=[K1,K2,...,Kn], then this function returns the composition such that
the channel corresponding to K1 is applied first, then K2, etc.
"""
d = C[0][0].shape[0]
lengths = []
for c in C:
lengths.append(len(c))
combs = list(itertools.product(*[range(length) for length in lengths]))
K_n = []
for comb in combs:
# tmp=1
tmp = eye(d)
for i in range(len(comb)):
tmp = C[i][comb[i]] @ tmp
K_n.append(tmp)
return K_n
def tensor_channels(C):
"""
Takes the tensor product of the channels in C.
C is a set of sets of Kraus operators.
"""
lengths = []
for c in C:
lengths.append(len(c))
combs = list(itertools.product(*[range(length) for length in lengths]))
K_n = []
for comb in combs:
tmp = 1
for i in range(len(comb)):
tmp = tensor(tmp, C[i][comb[i]])
K_n.append(tmp)
return K_n
def depolarizing_channel_n_uses(p, n, rho, m):
"""
Generates the output state corresponding to the depolarizing channel
applied to each one of n systems in the joint state rho. p is the
depolarizing probability as defined in the function "depolarizing_channel"
above.
If rho contains m>n systems, then the first m-n systems are left alone.
"""
dims = 2 * np.ones(m).astype(int)
rho_out = np.zeros((2**m, 2**m))
for k in range(n + 1):
indices = list(itertools.combinations(range(1, n + 1), k))
# print k,indices
for index in indices:
index = list(index)
index = np.array(index) + (m - n)
index = list(index.astype(int))
index_diff = np.setdiff1d(range(1, m + 1), index)
perm_arrange = np.append(index, index_diff).astype(int)
perm_rearrange = np.zeros(m)
for i in range(m):
perm_rearrange[i] = np.argwhere(perm_arrange == i + 1)[0][0] + 1
perm_rearrange = perm_rearrange.astype(int)
mix = matrix_power(eye(2**k) / 2, k)
rho_part = partial_trace(rho, index, dims)
rho_out = rho_out + (4 * p / 3.0) ** k * (1 - (4 * p / 3.0)) ** (
n - k
) * syspermute(tensor(mix, rho_part), perm_rearrange, dims)
return rho_out
def diamond_norm(J, dimA, dimB, display=False):
"""
Computes the diamond norm of a superoperator with Choi representation J.
dimA is the dimension of the input space of the channel, and dimB is the
dimension of the output space.
The form of the SDP used comes from Theorem 3.1 of:
'Simpler semidefinite programs for completely bounded norms',
Chicago Journal of Theoretical Computer Science 2013,
by <NAME>
"""
"""
The Choi representation J in the above paper is defined using a different
convention:
J=(N\\otimes I)(|Phi^+><Phi^+|).
In other words, the channel N acts on the first half of the maximally-
entangled state, while the convention used throughout this code stack
is
J=(I\\otimes N)(|Phi^+><Phi^+|).
We thus use syspermute to convert to the form used in the aforementioned
paper.
"""
J = syspermute(J, [2, 1], [dimA, dimB])
X = cvx.Variable((dimA * dimB, dimA * dimB), hermitian=False)
rho0 = cvx.Variable((dimA, dimA), PSD=True)
rho1 = cvx.Variable((dimA, dimA), PSD=True)
M = cvx.bmat([[cvx.kron(eye(dimB), rho0), X], [X.H, cvx.kron(eye(dimB), rho1)]])
c = []
c += [M >> 0, cvx.trace(rho0) == 1, cvx.trace(rho1) == 1]
obj = cvx.Maximize(
(1 / 2) * cvx.real(cvx.trace(dag(J) @ X))
+ (1 / 2) * cvx.real(cvx.trace(J @ X.H))
)
prob = cvx.Problem(obj, constraints=c)
prob.solve(verbose=display, eps=1e-7)
return prob.value
def depolarizing_channel_nQubits(n, p):
"""
For 0<=p<=1, this returns the n-qubit Pauli channel given by
p[0]=1-p, p[i]=p/(2^(2*n)-1) for all i>=1.
"""
p = [1 - p] + [p / (2 ** (2 * n) - 1) for i in range(2 ** (2 * n) - 1)]
return Pauli_channel_nQubit(n, p, alt_repr=True)
def dephasing_channel(p, d=2):
"""
Generates the channel rho -> (1-p)*rho+p*Z*rho*Z. (In the case d=2.)
For d>=2, we let p be a list of d probabilities, and we use the discrete Weyl-Z
operators to define the channel.
For p=1/d, we get the completely dephasing channel.
"""
if d == 2:
return Pauli_channel(0, 0, p)
else:
K = [np.sqrt(p[k]) * matrix_power(discrete_Weyl_Z(d), k) for k in range(d)]
return K
def generalized_amplitude_damping_channel(gamma, N):
"""
Generates the generalized amplitude damping channel.
"""
if N == 0:
return amplitude_damping_channel(gamma)
elif N == 1:
A1 = np.array([[np.sqrt(1 - gamma), 0], [0, 1]])
A2 = np.array([[0, 0], [np.sqrt(gamma), 0]])
return [A1, A2]
else:
A1 = np.sqrt(1 - N) * np.array([[1, 0], [0, np.sqrt(1 - gamma)]])
A2 = np.sqrt(1 - N) * np.array([[0, np.sqrt(gamma)], [0, 0]])
A3 = np.sqrt(N) * np.array([[np.sqrt(1 - gamma), 0], [0, 1]])
A4 = np.sqrt(N) * np.array([[0, 0], [np.sqrt(gamma), 0]])
return [A1, A2, A3, A4]
def n_channel_uses(K, n):
"""
Given the Kraus operators K of a channel, this function generates the
Kraus operators corresponding to the n-fold tensor power of the channel.
dimA is the dimension of the input space, and dimB the dimension of the
output space.
"""
r = len(K) # Number of Kraus operators
combs = list(itertools.product(*[range(r)] * n))
K_n = []
for comb in combs:
# print comb
tmp = 1
for i in range(n):
tmp = tensor(tmp, K[comb[i]])
K_n.append(tmp)
return K_n
def channel_scalar_multiply(K, x):
"""
Multiplies the channel with Kraus operators in K by the scalar x.
This means that each Kraus operator is multiplied by sqrt(x)!
"""
K_new = []
for i in range(len(K)):
K_new.append(np.sqrt(x) * K[i])
return K_new
def Pauli_channel_coeffs(K, n, as_dict=False):
"""
Generates the coefficients c_{a,b} such that
P(X^aZ^b)=c_{a,b}X^aZ^b,
for the channel P with the Kraus operators in K.
"""
if as_dict:
c = {}
else:
c = []
S = list(itertools.product(*[range(0, 2)] * n))
# print(S)
for a in S:
for b in S:
Xa = generate_nQubit_Pauli_X(list(a))
Zb = generate_nQubit_Pauli_Z(list(b))
if as_dict:
c[(a, b)] = (1 / 2**n) * Tr(dag(Xa @ Zb) @ apply_channel(K, Xa @ Zb))
else:
c.append((1 / 2**n) * Tr(dag(Xa @ Zb) @ apply_channel(K, Xa @ Zb)))
return c
def Pauli_channel(px, py, pz):
"""
Generates the Kraus operators, an isometric extension, and a unitary
extension of the one-qubit Pauli channel specified by the parameters px, py, pz.
"""
pI = 1 - px - py - pz
Sx = np.array([[0, 1], [1, 0]])
Sy = np.array([[0, -1j], [1j, 0]])
Sz = np.array([[1, 0], [0, -1]])
K = [np.sqrt(pI) * eye(2), np.sqrt(px) * Sx, np.sqrt(py) * Sy, np.sqrt(pz) * Sz]
V, U = generate_channel_isometry(K, 2, 2)
return K, V, U
def depolarizing_channel(p):
"""
For 0<=p<=1, this returns the one-qubit Pauli channel given by px=py=pz=p/3.
"""
return Pauli_channel(p / 3.0, p / 3.0, p / 3.0) | en | 0.839206 | # This file is part of the QuTIpy package. # https://github.com/sumeetkhatri/QuTIpy # # Copyright (c) 2022 <NAME>. # --.- ..- - .. .--. -.-- # # # SPDX-License-Identifier: AGPL-3.0 # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, version 3. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # Takes the Choi representation of a map and outputs its natural representation. The Choi represenatation Q of the channel acts as: vec(N(rho))=Q*vec(rho), where N is the channel in question. It can be obtained from the Choi representation with a simple reshuffling of indices. Generates the channel rho -> (1-p)*rho+p*X*rho*X. Generates the completely dephasing channel in d dimensions. This channel eliminates the off-diagonal elements (in the standard basis) of the input operator. Takes a Choi representation P of a channel and returns its Kraus representation. The Choi representation is defined with the channel acting on the second half of the maximally entangled vector. # Need to check if the matrix U generated by eig is unitary (up to # numerical precision) # If U is not unitary, use Gram-Schmidt to make it unitary (i.e., make the # columns of U orthonormal) # print(U) Generates the phase damping channel. Generates an isometric extension of the channel specified by the Kraus operators K. dimA is the dimension of the input space of the channel, and dimB is the dimension of the output space of the channel. If dimA=dimB, then the function also outputs a unitary extension of the channel given by a particular construction. # In this case, the unitary we generate has dimensions dimA*dimE x # dimA*dimE Generates the Kraus operators, an isometric extension, and a unitary extension of the n-qubit Pauli channel specified by the 2^(2*n) parameters in p, which must be probabilities in order for the map to be a channel. (i.e., they must be non-negative and sum to one.) If alt_repr=True, then the channel is of the form P(rho)=\\sum_{a,b} p_{a,b} X^aZ^b(rho)Z^bX^a where a and b are n-bit strings (using the n-qubit X and Z operators as generated by the functions generate_nQubit_Pauli_X and generate_nQubit_Pauli_Z). # alt_repr==True Applies the channel with Kraus operators in K to the state rho on systems specified by sys. The dimensions of the subsystems on which rho acts are given by dim. If adjoint is True, then this function applies the adjoint of the given channel. Generates the amplitude damping channel. Calculates the natural representation of the channel (in the standard basis) given by the Kraus operators in K. In terms of the Kraus operators, the natural representation of the channel in the standard basis is given by N=sum_i K_i ⊗ conj(K_i), where the sum is over the Kraus operators K_i in K. Generates the channel corresponding to the BB84 protocol with equal X and Z errors, given by the QBER Q. The definition of this channel can be found in: "Additive extensions of a quantum channel", by <NAME> and <NAME>. (arXiv:0712.2471) Calculates the Choi representation of the map with Kraus operators K. dimA is the dimension of the input space of the channel. The Choi represenatation is defined with the channel acting on the second half of the maximally entangled vector. Takes a composition of channels. The variable C should be a list of lists, with each list consisting of the Kraus operators of the channels to be composed. If C=[K1,K2,...,Kn], then this function returns the composition such that the channel corresponding to K1 is applied first, then K2, etc. # tmp=1 Takes the tensor product of the channels in C. C is a set of sets of Kraus operators. Generates the output state corresponding to the depolarizing channel applied to each one of n systems in the joint state rho. p is the depolarizing probability as defined in the function "depolarizing_channel" above. If rho contains m>n systems, then the first m-n systems are left alone. # print k,indices Computes the diamond norm of a superoperator with Choi representation J. dimA is the dimension of the input space of the channel, and dimB is the dimension of the output space. The form of the SDP used comes from Theorem 3.1 of: 'Simpler semidefinite programs for completely bounded norms', Chicago Journal of Theoretical Computer Science 2013, by <NAME> The Choi representation J in the above paper is defined using a different convention: J=(N\\otimes I)(|Phi^+><Phi^+|). In other words, the channel N acts on the first half of the maximally- entangled state, while the convention used throughout this code stack is J=(I\\otimes N)(|Phi^+><Phi^+|). We thus use syspermute to convert to the form used in the aforementioned paper. For 0<=p<=1, this returns the n-qubit Pauli channel given by p[0]=1-p, p[i]=p/(2^(2*n)-1) for all i>=1. Generates the channel rho -> (1-p)*rho+p*Z*rho*Z. (In the case d=2.) For d>=2, we let p be a list of d probabilities, and we use the discrete Weyl-Z operators to define the channel. For p=1/d, we get the completely dephasing channel. Generates the generalized amplitude damping channel. Given the Kraus operators K of a channel, this function generates the Kraus operators corresponding to the n-fold tensor power of the channel. dimA is the dimension of the input space, and dimB the dimension of the output space. # Number of Kraus operators # print comb Multiplies the channel with Kraus operators in K by the scalar x. This means that each Kraus operator is multiplied by sqrt(x)! Generates the coefficients c_{a,b} such that P(X^aZ^b)=c_{a,b}X^aZ^b, for the channel P with the Kraus operators in K. # print(S) Generates the Kraus operators, an isometric extension, and a unitary extension of the one-qubit Pauli channel specified by the parameters px, py, pz. For 0<=p<=1, this returns the one-qubit Pauli channel given by px=py=pz=p/3. | 1.665879 | 2 |
commands/network_analysis.py | ficolo/science-radar | 1 | 6619157 | from graph_tool.all import *
import json
import click
def analyse_graph(graph: Graph, previous: Graph = None):
analysys = dict()
click.secho(' Getting degree histogram', fg='yellow')
analysys['degree_histogram'] = [array.tolist() for array in vertex_hist(graph, 'total')]
click.secho(' Getting degree average', fg='yellow')
analysys['degree_average'] = vertex_average(graph, 'total')
click.secho(' Getting edge count', fg='yellow')
analysys['edge_count'] = graph.num_edges()
click.secho(' Getting weight average', fg='yellow')
analysys['edge_weight_average'] = edge_average(graph, graph.edge_properties['weight'])
click.secho(' Getting edge weight histogram', fg='yellow')
analysys['edge_weight_histogram'] = [array.tolist() for array in edge_hist(graph, graph.edge_properties['weight'])]
click.secho(' Getting vertex count', fg='yellow')
analysys['vertex_count'] = graph.num_vertices()
click.secho(' Getting density', fg='yellow')
analysys['density'] = analysys['edge_count'] / ((analysys['vertex_count'] * (analysys['vertex_count'] - 1)) / 2)
click.secho(' Getting clustering coefficient', fg='yellow')
analysys['clustering_coefficient'] = global_clustering(graph)
click.secho(' Getting similarity year before', fg='yellow')
if previous is not None:
analysys['similarity_year_before'] = similarity(graph,
previous,
eweight1=graph.edge_properties['weight'],
eweight2=previous.edge_properties['weight']
)
return analysys
def analyse_networks(networks: dict, output_path):
analysis = dict()
previous = None
for key, value in networks.items():
if value.num_edges() == 0 or value.num_edges() == 0:
continue
analysis[key] = analyse_graph(value, previous)
previous = value
click.secho(' Analysing {} network'.format(key), fg='yellow')
with open(output_path, 'w') as fp:
json.dump(analysis, fp, indent=4, sort_keys=True)
return analysis
| from graph_tool.all import *
import json
import click
def analyse_graph(graph: Graph, previous: Graph = None):
analysys = dict()
click.secho(' Getting degree histogram', fg='yellow')
analysys['degree_histogram'] = [array.tolist() for array in vertex_hist(graph, 'total')]
click.secho(' Getting degree average', fg='yellow')
analysys['degree_average'] = vertex_average(graph, 'total')
click.secho(' Getting edge count', fg='yellow')
analysys['edge_count'] = graph.num_edges()
click.secho(' Getting weight average', fg='yellow')
analysys['edge_weight_average'] = edge_average(graph, graph.edge_properties['weight'])
click.secho(' Getting edge weight histogram', fg='yellow')
analysys['edge_weight_histogram'] = [array.tolist() for array in edge_hist(graph, graph.edge_properties['weight'])]
click.secho(' Getting vertex count', fg='yellow')
analysys['vertex_count'] = graph.num_vertices()
click.secho(' Getting density', fg='yellow')
analysys['density'] = analysys['edge_count'] / ((analysys['vertex_count'] * (analysys['vertex_count'] - 1)) / 2)
click.secho(' Getting clustering coefficient', fg='yellow')
analysys['clustering_coefficient'] = global_clustering(graph)
click.secho(' Getting similarity year before', fg='yellow')
if previous is not None:
analysys['similarity_year_before'] = similarity(graph,
previous,
eweight1=graph.edge_properties['weight'],
eweight2=previous.edge_properties['weight']
)
return analysys
def analyse_networks(networks: dict, output_path):
analysis = dict()
previous = None
for key, value in networks.items():
if value.num_edges() == 0 or value.num_edges() == 0:
continue
analysis[key] = analyse_graph(value, previous)
previous = value
click.secho(' Analysing {} network'.format(key), fg='yellow')
with open(output_path, 'w') as fp:
json.dump(analysis, fp, indent=4, sort_keys=True)
return analysis
| none | 1 | 2.901991 | 3 | |
pystradamus/utils.py | bockmabe/pystradamus | 12 | 6619158 | import logging
import sys
def format_timedelta(dt):
"""Formats a datetime.timedelta into a simple string of days, hours, minutes
and seconds
"""
ts = dt.total_seconds()
days, r = divmod(ts, 84600)
hours, r = divmod(r, 3600)
minutes, r = divmod(r, 60)
return "%dD %02d:%02d:%02f" % (days, hours, minutes, r)
def error_exit(message, exit_code=1):
"""Bail out with an exit code
"""
logging.error(message)
sys.exit(exit_code)
| import logging
import sys
def format_timedelta(dt):
"""Formats a datetime.timedelta into a simple string of days, hours, minutes
and seconds
"""
ts = dt.total_seconds()
days, r = divmod(ts, 84600)
hours, r = divmod(r, 3600)
minutes, r = divmod(r, 60)
return "%dD %02d:%02d:%02f" % (days, hours, minutes, r)
def error_exit(message, exit_code=1):
"""Bail out with an exit code
"""
logging.error(message)
sys.exit(exit_code)
| en | 0.791398 | Formats a datetime.timedelta into a simple string of days, hours, minutes and seconds Bail out with an exit code | 3.198518 | 3 |
SL_pa.py | OakInn/ysLineidGen | 1 | 6619159 | import argparse
def prepareOptions(program, description):
parser = argparse.ArgumentParser(description=description, prog=program)
parser.add_argument(r"""--BCKP""", help=r"""Path to folder for backing up files in SRC {OPTIONAL,type:string,default:""}""", metavar=r"""BCKP""", dest=r"""BCKP""" , type=str )
parser.add_argument(r"""--ext""", help=r"""Extensions of files for procession {OPTIONAL,type:string,default:".txt_.yarn_.yarn.txt"}""", metavar=r"""ext""", dest=r"""ext""" , type=str , default=r""".txt_.yarn_.yarn.txt""" )
parser.add_argument(r"""--compat""", help=r"""Initial line tag length check. ""/"yarn"/"long" {OPTIONAL,type:string,default:""}""", metavar=r"""compat""", dest=r"""compat""" , type=str )
parser.add_argument(r"""--resolve""", help=r"""Line tag length for conflict resolve. ""/"yarn"/"long" {OPTIONAL,type:string,default:""}""", metavar=r"""resolve""", dest=r"""resolve""" , type=str )
parser.add_argument(r"""--newcompat""", help=r"""Newly generated line tag length. "yarn"/"long" {OPTIONAL,type:string,default:"yarn"}""", metavar=r"""newcompat""", dest=r"""newcompat""" , type=str , default=r"""yarn""" )
parser.add_argument(r"""--loglevel""", help=r"""Log level, possible values [ERROR|WARNING|INFO|DEBUG] {OPTIONAL,type:string,default:"INFO"}""", metavar=r"""loglevel""", dest=r"""loglevel""" , type=str , default=r"""INFO""" )
parser.add_argument(r"""SRC""", type=str, help=r"""Path to folder which contain yarn spinner files {REQUIRED,type:string}""")
return parser
def usage(program, description=""):
return prepareOptions(program, description).format_help()
def parse(program, description, argv, allowIncomplete=False):
parser = prepareOptions(program, description)
args = None
if allowIncomplete:
args = parser.parse_known_args(argv)
else:
args = parser.parse_args(argv)
return args;
| import argparse
def prepareOptions(program, description):
parser = argparse.ArgumentParser(description=description, prog=program)
parser.add_argument(r"""--BCKP""", help=r"""Path to folder for backing up files in SRC {OPTIONAL,type:string,default:""}""", metavar=r"""BCKP""", dest=r"""BCKP""" , type=str )
parser.add_argument(r"""--ext""", help=r"""Extensions of files for procession {OPTIONAL,type:string,default:".txt_.yarn_.yarn.txt"}""", metavar=r"""ext""", dest=r"""ext""" , type=str , default=r""".txt_.yarn_.yarn.txt""" )
parser.add_argument(r"""--compat""", help=r"""Initial line tag length check. ""/"yarn"/"long" {OPTIONAL,type:string,default:""}""", metavar=r"""compat""", dest=r"""compat""" , type=str )
parser.add_argument(r"""--resolve""", help=r"""Line tag length for conflict resolve. ""/"yarn"/"long" {OPTIONAL,type:string,default:""}""", metavar=r"""resolve""", dest=r"""resolve""" , type=str )
parser.add_argument(r"""--newcompat""", help=r"""Newly generated line tag length. "yarn"/"long" {OPTIONAL,type:string,default:"yarn"}""", metavar=r"""newcompat""", dest=r"""newcompat""" , type=str , default=r"""yarn""" )
parser.add_argument(r"""--loglevel""", help=r"""Log level, possible values [ERROR|WARNING|INFO|DEBUG] {OPTIONAL,type:string,default:"INFO"}""", metavar=r"""loglevel""", dest=r"""loglevel""" , type=str , default=r"""INFO""" )
parser.add_argument(r"""SRC""", type=str, help=r"""Path to folder which contain yarn spinner files {REQUIRED,type:string}""")
return parser
def usage(program, description=""):
return prepareOptions(program, description).format_help()
def parse(program, description, argv, allowIncomplete=False):
parser = prepareOptions(program, description)
args = None
if allowIncomplete:
args = parser.parse_known_args(argv)
else:
args = parser.parse_args(argv)
return args;
| en | 0.397091 | --BCKP Path to folder for backing up files in SRC {OPTIONAL,type:string,default:""} BCKP BCKP --ext Extensions of files for procession {OPTIONAL,type:string,default:".txt_.yarn_.yarn.txt"} ext ext .txt_.yarn_.yarn.txt --compat Initial line tag length check. ""/"yarn"/"long" {OPTIONAL,type:string,default:""} compat compat --resolve Line tag length for conflict resolve. ""/"yarn"/"long" {OPTIONAL,type:string,default:""} resolve resolve --newcompat Newly generated line tag length. "yarn"/"long" {OPTIONAL,type:string,default:"yarn"} newcompat newcompat yarn --loglevel Log level, possible values [ERROR|WARNING|INFO|DEBUG] {OPTIONAL,type:string,default:"INFO"} loglevel loglevel INFO SRC Path to folder which contain yarn spinner files {REQUIRED,type:string} | 2.841317 | 3 |
trayjenkins/settings.py | brewmook/trayjenkins | 0 | 6619160 | <filename>trayjenkins/settings.py
from optparse import OptionParser
class Settings(object):
def __init__(self, host, username='', password=''):
self.host = host
self.username = username
self.password = password
def __eq__(self, other):
return other is not None \
and self.host == other.host \
and self.username == other.username \
and self.password == other.password
def __repr__(self):
return "Settings(host='%s',username='%s',password='%s')" % (
self.host,
self.username,
self.password)
class CommandLineSettingsParser(object):
def __init__(self):
self._parser = OptionParser(usage='usage: %prog [options] host')
self._parser.add_option('-p', '--password',
dest='password',
default='',
help='password for remote host')
self._parser.add_option('-u', '--username',
dest='username',
default='',
help='username for remote host')
def parse_args(self, args):
(options, args) = self._parser.parse_args(args) # @UnusedVariable
if len(args) is 1:
result = Settings(args[0])
result.username = options.username
result.password = <PASSWORD>.password
else:
result = None
return result
def print_help(self):
self._parser.print_help()
| <filename>trayjenkins/settings.py
from optparse import OptionParser
class Settings(object):
def __init__(self, host, username='', password=''):
self.host = host
self.username = username
self.password = password
def __eq__(self, other):
return other is not None \
and self.host == other.host \
and self.username == other.username \
and self.password == other.password
def __repr__(self):
return "Settings(host='%s',username='%s',password='%s')" % (
self.host,
self.username,
self.password)
class CommandLineSettingsParser(object):
def __init__(self):
self._parser = OptionParser(usage='usage: %prog [options] host')
self._parser.add_option('-p', '--password',
dest='password',
default='',
help='password for remote host')
self._parser.add_option('-u', '--username',
dest='username',
default='',
help='username for remote host')
def parse_args(self, args):
(options, args) = self._parser.parse_args(args) # @UnusedVariable
if len(args) is 1:
result = Settings(args[0])
result.username = options.username
result.password = <PASSWORD>.password
else:
result = None
return result
def print_help(self):
self._parser.print_help()
| en | 0.172438 | # @UnusedVariable | 2.74116 | 3 |
hello_world.py | PantsuitUp/Whack2017 | 0 | 6619161 | from __future__ import print_function
# We'll start with a couple of globals...
CardTitlePrefix = "(Pant) Suit Up"
AskQuestionIntent = "Good question"
Questions = ["Give me an example of when you showed initiative",
"Tell me about a time you failed",
"How would your friends describe you?",
"Tell me about yourself",
"Did you ever make a risky decision? Why? How did you handle it?"]
Sequences = [("Hi", "Bye")]
FeedbackTemplate = "Good job" #make this an object -- configure individual measure values--> call method to insert them
# --------------- Helpers that build all of the responses ----------------------
def build_speechlet_response(title, output, reprompt_text, should_end_session):
"""
Build a speechlet JSON representation of the title, output text,
reprompt text & end of session
"""
return {
'outputSpeech': {
'type': 'PlainText',
'text': output
},
'card': {
'type': 'Simple',
'title': CardTitlePrefix + " - " + title,
'content': output
},
'reprompt': {
'outputSpeech': {
'type': 'PlainText',
'text': reprompt_text
}
},
'shouldEndSession': should_end_session
}
def build_response(session_attributes, speechlet_response):
"""
Build the full response JSON from the speechlet response
"""
return {
'version': '1.0',
'sessionAttributes': session_attributes,
'response': speechlet_response
}
# --------------- Functions that control the skill's behavior ------------------
def begin_interview():
# initialize interview variables
intro, conclusion = pick_sequences()
questions = pick_questions()
session_attributes = {"current_question_index": 1,
"questions": questions,
"all_answers": "",
"conclusion": conclusion
}
# initialize response variables
card_title = "Beginning Interview"
speech_output = "Welcome to (Pant) Suit Up. You're interview is beginning in 3, 2, 1, now! " + intro + " " + questions[0]
should_end_session = False
return build_response(session_attributes, build_speechlet_response(card_title, speech_output, "no reprompt", should_end_session))
def handle_session_end_request(session):
card_title = "Interview Done"
speech_output = construct_feedback(session)
# Setting this to true ends the session and exits the skill.
should_end_session = True
return build_response({}, build_speechlet_response(
card_title, speech_output, None, should_end_session))
def pick_questions():
"""
Pick questions based on expected length of response and other factors in tags
"""
return Questions[0:2]
def pick_sequences():
"""
Pick opening/closing sequence pair randomly
"""
return Sequences[0]
def construct_feedback(session):
"""
Construct feedback from total answers
"""
total_text = session["attributes"]["all_answers"]
return FeedbackTemplate
def ask_question(intent, session):
"""
Record answer in session attributes and ask new question or conclude interview
"""
# update cumulative interview answer
answer = intent['slots'].get('Answer', {}).get('value') # does this work for us?????
session["attributes"]["all_answers"] += (" " + answer)
# extract next question
questions = session["attributes"]["questions"]
question_index = session["attributes"]["current_question_index"]
if question_index >= len(questions):
return handle_session_end_request(session) # it's a wrap!
question_string = questions[question_index]
session["attributes"]["current_question_index"] += 1
card_title = "Question"
reprompt_text = "I'm sorry, but I didn't understand your answer. Can you try again?"
return build_response({}, build_speechlet_response(card_title, question_string, reprompt_text, True))
# --------------- Events ------------------
def on_session_started(session_started_request, session):
""" Called when the session starts """
print("on_session_started requestId=" + session_started_request['requestId']
+ ", sessionId=" + session['sessionId'])
def on_launch(launch_request, session):
""" Called when the user launches the skill without specifying what they want """
print("on_launch requestId=" + launch_request['requestId'] +
", sessionId=" + session['sessionId'])
# Dispatch to your skill's launch
return begin_interview()
def on_intent(intent_request, session):
""" Called when the user specifies an intent for this skill """
print("on_intent requestId=" + intent_request['requestId'] +
", sessionId=" + session['sessionId'])
intent = intent_request['intent']
intent_name = intent_request['intent']['name']
# Dispatch to your skill's intent handlers
if intent_name == AskQuestionIntent:
return ask_question(intent, session)
elif intent_name == "AMAZON.StartOverIntent": # based on example ???
return begin_interview()
elif intent_name == "AMAZON.CancelIntent" or intent_name == "AMAZON.StopIntent":
return handle_session_end_request(session)
else:
raise ValueError("Invalid intent")
def on_session_ended(session_ended_request, session):
""" Called when the user ends the session. Is not called when the skill returns should_end_session=true """
print("on_session_ended requestId=" + session_ended_request['requestId'] +
", sessionId=" + session['sessionId'])
# --------------- Main handler ------------------
def lambda_handler(event, context):
""" Route the incoming request based on type (LaunchRequest, IntentRequest,
etc.) The JSON body of the request is provided in the event parameter.
"""
print("event.session.application.applicationId=" +
event['session']['application']['applicationId'])
if event['session']['new']:
on_session_started({'requestId': event['request']['requestId']},
event['session'])
if event['request']['type'] == "LaunchRequest":
return on_launch(event['request'], event['session'])
elif event['request']['type'] == "IntentRequest":
return on_intent(event['request'], event['session'])
elif event['request']['type'] == "SessionEndedRequest":
return on_session_ended(event['request'], event['session']) | from __future__ import print_function
# We'll start with a couple of globals...
CardTitlePrefix = "(Pant) Suit Up"
AskQuestionIntent = "Good question"
Questions = ["Give me an example of when you showed initiative",
"Tell me about a time you failed",
"How would your friends describe you?",
"Tell me about yourself",
"Did you ever make a risky decision? Why? How did you handle it?"]
Sequences = [("Hi", "Bye")]
FeedbackTemplate = "Good job" #make this an object -- configure individual measure values--> call method to insert them
# --------------- Helpers that build all of the responses ----------------------
def build_speechlet_response(title, output, reprompt_text, should_end_session):
"""
Build a speechlet JSON representation of the title, output text,
reprompt text & end of session
"""
return {
'outputSpeech': {
'type': 'PlainText',
'text': output
},
'card': {
'type': 'Simple',
'title': CardTitlePrefix + " - " + title,
'content': output
},
'reprompt': {
'outputSpeech': {
'type': 'PlainText',
'text': reprompt_text
}
},
'shouldEndSession': should_end_session
}
def build_response(session_attributes, speechlet_response):
"""
Build the full response JSON from the speechlet response
"""
return {
'version': '1.0',
'sessionAttributes': session_attributes,
'response': speechlet_response
}
# --------------- Functions that control the skill's behavior ------------------
def begin_interview():
# initialize interview variables
intro, conclusion = pick_sequences()
questions = pick_questions()
session_attributes = {"current_question_index": 1,
"questions": questions,
"all_answers": "",
"conclusion": conclusion
}
# initialize response variables
card_title = "Beginning Interview"
speech_output = "Welcome to (Pant) Suit Up. You're interview is beginning in 3, 2, 1, now! " + intro + " " + questions[0]
should_end_session = False
return build_response(session_attributes, build_speechlet_response(card_title, speech_output, "no reprompt", should_end_session))
def handle_session_end_request(session):
card_title = "Interview Done"
speech_output = construct_feedback(session)
# Setting this to true ends the session and exits the skill.
should_end_session = True
return build_response({}, build_speechlet_response(
card_title, speech_output, None, should_end_session))
def pick_questions():
"""
Pick questions based on expected length of response and other factors in tags
"""
return Questions[0:2]
def pick_sequences():
"""
Pick opening/closing sequence pair randomly
"""
return Sequences[0]
def construct_feedback(session):
"""
Construct feedback from total answers
"""
total_text = session["attributes"]["all_answers"]
return FeedbackTemplate
def ask_question(intent, session):
"""
Record answer in session attributes and ask new question or conclude interview
"""
# update cumulative interview answer
answer = intent['slots'].get('Answer', {}).get('value') # does this work for us?????
session["attributes"]["all_answers"] += (" " + answer)
# extract next question
questions = session["attributes"]["questions"]
question_index = session["attributes"]["current_question_index"]
if question_index >= len(questions):
return handle_session_end_request(session) # it's a wrap!
question_string = questions[question_index]
session["attributes"]["current_question_index"] += 1
card_title = "Question"
reprompt_text = "I'm sorry, but I didn't understand your answer. Can you try again?"
return build_response({}, build_speechlet_response(card_title, question_string, reprompt_text, True))
# --------------- Events ------------------
def on_session_started(session_started_request, session):
""" Called when the session starts """
print("on_session_started requestId=" + session_started_request['requestId']
+ ", sessionId=" + session['sessionId'])
def on_launch(launch_request, session):
""" Called when the user launches the skill without specifying what they want """
print("on_launch requestId=" + launch_request['requestId'] +
", sessionId=" + session['sessionId'])
# Dispatch to your skill's launch
return begin_interview()
def on_intent(intent_request, session):
""" Called when the user specifies an intent for this skill """
print("on_intent requestId=" + intent_request['requestId'] +
", sessionId=" + session['sessionId'])
intent = intent_request['intent']
intent_name = intent_request['intent']['name']
# Dispatch to your skill's intent handlers
if intent_name == AskQuestionIntent:
return ask_question(intent, session)
elif intent_name == "AMAZON.StartOverIntent": # based on example ???
return begin_interview()
elif intent_name == "AMAZON.CancelIntent" or intent_name == "AMAZON.StopIntent":
return handle_session_end_request(session)
else:
raise ValueError("Invalid intent")
def on_session_ended(session_ended_request, session):
""" Called when the user ends the session. Is not called when the skill returns should_end_session=true """
print("on_session_ended requestId=" + session_ended_request['requestId'] +
", sessionId=" + session['sessionId'])
# --------------- Main handler ------------------
def lambda_handler(event, context):
""" Route the incoming request based on type (LaunchRequest, IntentRequest,
etc.) The JSON body of the request is provided in the event parameter.
"""
print("event.session.application.applicationId=" +
event['session']['application']['applicationId'])
if event['session']['new']:
on_session_started({'requestId': event['request']['requestId']},
event['session'])
if event['request']['type'] == "LaunchRequest":
return on_launch(event['request'], event['session'])
elif event['request']['type'] == "IntentRequest":
return on_intent(event['request'], event['session'])
elif event['request']['type'] == "SessionEndedRequest":
return on_session_ended(event['request'], event['session']) | en | 0.800226 | # We'll start with a couple of globals... #make this an object -- configure individual measure values--> call method to insert them # --------------- Helpers that build all of the responses ---------------------- Build a speechlet JSON representation of the title, output text, reprompt text & end of session Build the full response JSON from the speechlet response # --------------- Functions that control the skill's behavior ------------------ # initialize interview variables # initialize response variables # Setting this to true ends the session and exits the skill. Pick questions based on expected length of response and other factors in tags Pick opening/closing sequence pair randomly Construct feedback from total answers Record answer in session attributes and ask new question or conclude interview # update cumulative interview answer # does this work for us????? # extract next question # it's a wrap! # --------------- Events ------------------ Called when the session starts Called when the user launches the skill without specifying what they want # Dispatch to your skill's launch Called when the user specifies an intent for this skill # Dispatch to your skill's intent handlers # based on example ??? Called when the user ends the session. Is not called when the skill returns should_end_session=true # --------------- Main handler ------------------ Route the incoming request based on type (LaunchRequest, IntentRequest, etc.) The JSON body of the request is provided in the event parameter. | 2.82378 | 3 |
client/rpc.py | Unifield/ufcheck | 0 | 6619162 | <reponame>Unifield/ufcheck
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
OpenObject Client Library
"""
import sys
import os
import socket
import zlib
import xmlrpclib
from timeout_transport import TimeoutTransport
from gzip_xmlrpclib import GzipTransport, GzipSafeTransport
from datetime import datetime
from tools.translate import _
import tools
try:
import cPickle as pickle
except:
import pickle
try:
import cStringIO as StringIO
except:
import StringIO
import logging
GZIP_MAGIC = '\x78\xda' # magic when max compression used
NB_RETRY = 10
# Safer Unpickler, in case the server is untrusted, from <NAME>
# http://nadiana.com/python-pickle-insecure#How_to_Make_Unpickling_Safer
class SafeUnpickler(object):
PICKLE_SAFE = {
'exceptions': set(['Exception']),
}
@classmethod
def find_class(cls, module, name):
if not module in cls.PICKLE_SAFE:
raise pickle.UnpicklingError(
'Attempting to unpickle unsafe module %s' % module
)
__import__(module)
mod = sys.modules[module]
if not name in cls.PICKLE_SAFE[module]:
raise pickle.UnpicklingError(
'Attempting to unpickle unsafe class %s' % name
)
klass = getattr(mod, name)
return klass
@classmethod
def loads(cls, pickle_string):
pickle_obj = pickle.Unpickler(StringIO.StringIO(pickle_string))
pickle_obj.find_global = cls.find_class
return pickle_obj.load()
class Connector(object):
"""
Connector class
"""
_logger = logging.getLogger('connector')
def __init__(self, hostname, port, timeout):
"""
:param hostname: Host name of the server
:param port: Port for the connection to the server
"""
self.hostname = hostname
self.port = port
self.timeout = timeout
class XmlRPCConnector(Connector):
"""
This class supports the XmlRPC protocol
"""
PROTOCOL = 'xmlrpc'
def __init__(self, hostname, port=8069, timeout=10.0, retry=0):
Connector.__init__(self, hostname, port, timeout=timeout)
self._logger = logging.getLogger('connector.xmlrpc')
self.url = 'http://%s:%s/xmlrpc' % (self.hostname, self.port)
self.retry = retry
def send(self, service_name, method, *args):
url = '%s/%s' % (self.url, service_name)
transport = TimeoutTransport(timeout=self.timeout)
service = xmlrpclib.ServerProxy(url, allow_none=1, transport=transport)
return self._send(service, method, *args)
def _send(self, service, method, *args):
i = 0
retry = True
while retry:
try:
retry = False
return getattr(service, method)(*args)
except Exception, e:
error = e
if i < self.retry:
print 'retry xml_rpc', i
retry = True
self._logger.debug("retry to connect %s, error : %s" ,i, e)
i += 1
if error:
raise RuntimeError("Unable to proceed for the following reason: %s" % (e.faultCode if hasattr(e, 'faultCode') else tools.ustr(e)))
"""Modified version of xmlrcpclib.Transport.request (same in Python 2.4, 2.5, 2.6)
to workaround Python bug http://bugs.python.org/issue1223
for Python versions before 2.6
This patch is inspired by http://www.cherrypy.org/ticket/743.
See LP bug https://bugs.launchpad.net/openobject-client/+bug/673775
"""
def fixed_request(self, host, handler, request_body, verbose=0):
h = self.make_connection(host)
if verbose:
h.set_debuglevel(1)
self.send_request(h, handler, request_body)
self.send_host(h, host)
self.send_user_agent(h)
self.send_content(h, request_body)
errcode, errmsg, headers = h.getreply()
if errcode != 200:
raise xmlrpclib.ProtocolError(host + handler, errcode, errmsg,
headers)
self.verbose = verbose
# below we make sure to call parse_response() and
# not _parse_response(), and don't pass the socket,
# so it will have to use the file instead, and avoid
# the problem of the original code.
return self.parse_response(h.getfile())
# Rude monkey-patch to fix the SSL connection error in Python 2.5-,
# as last resort solution to fix it all at once.
if sys.version_info < (2,6):
xmlrpclib.SafeTransport.request = fixed_request
class SecuredXmlRPCConnector(XmlRPCConnector):
"""
This class supports the XmlRPC protocol over HTTPS
"""
PROTOCOL = 'xmlrpcs'
def __init__(self, hostname, port=8070, timeout=10.0, retry=0):
XmlRPCConnector.__init__(self, hostname, port, timeout=timeout, retry=retry)
self.url = 'https://%s:%s/xmlrpc' % (self.hostname, self.port)
def send(self, service_name, method, *args):
url = '%s/%s' % (self.url, service_name)
service = xmlrpclib.ServerProxy(url, allow_none=1)
return self._send(service, method, *args)
class GzipXmlRPCConnector(XmlRPCConnector):
"""
This class supports the XmlRPC protocol with gzipped payload
"""
PROTOCOL = 'gzipxmlrpc'
def send(self, service_name, method, *args):
url = '%s/%s' % (self.url, service_name)
gzip_transport = GzipTransport(timeout=self.timeout)
service = xmlrpclib.ServerProxy(url, allow_none=1, transport=gzip_transport)
return self._send(service, method, *args)
class GzipXmlRPCSConnector(GzipXmlRPCConnector):
PROTOCOL = 'gzipxmlrpcs'
def __init__(self, hostname, port=8069, *args, **kwargs):
GzipXmlRPCConnector.__init__(self, hostname, port, *args, **kwargs)
self.url = 'https://%s:%s/xmlrpc' % (self.hostname, self.port)
def send(self, service_name, method, *args):
url = '%s/%s' % (self.url, service_name)
gzip_safe_transport = GzipSafeTransport(timeout=self.timeout)
service = xmlrpclib.ServerProxy(url, allow_none=1, transport=gzip_safe_transport)
return getattr(service, method)(*args)
class NetRPC_Exception(Exception):
def __init__(self, faultCode, faultString):
self.faultCode = faultCode
self.faultString = faultString
self.args = (faultCode, faultString)
class NetRPC:
def __init__(self, sock=None, is_gzip=False, timeout=10.0):
if sock is None:
self.sock = socket.socket(
socket.AF_INET, socket.SOCK_STREAM)
else:
self.sock = sock
self.sock.settimeout(timeout)
self.is_gzip = is_gzip
self._logger = logging.getLogger('netrpc')
def connect(self, host, port=False):
if not port:
protocol, buf = host.split('//')
host, port = buf.split(':')
try:
self.sock.connect((host, int(port)))
except Exception, e:
raise NetRPC_Exception(tools.ustr(e), "Could not connect to %s:%s" % (host, port))
def disconnect(self):
self.sock.shutdown(socket.SHUT_RDWR)
self.sock.close()
def mysend(self, msg, exception=False, traceback=None):
#self._logger.debug("rpc message : %s", msg)
print "Sending %(msg)s" % dict(msg='/'.join(msg))
msg = pickle.dumps([msg,traceback])
if self.is_gzip:
print "Compressing %(nb)d bytes" % dict(nb=len(msg))
raw_size = len(msg)
msg = zlib.compress(msg, zlib.Z_BEST_COMPRESSION)
gzipped_size = len(msg)
print " => %(content)d bytes saved (new size: %(newsize)d)" % dict(content=raw_size-gzipped_size, newsize=gzipped_size)
#saving = 100*(float(raw_size-gzipped_size))/gzipped_size if gzipped_size else 0
#self._logger.debug('payload size: raw %s, gzipped %s, saving %.2f%%', raw_size, gzipped_size, saving)
size = len(msg)
self.sock.send('%8d' % size)
self.sock.send(exception and "1" or "0")
totalsent = 0
while totalsent < size:
sent = self.sock.send(msg[totalsent:])
if sent == 0:
raise RuntimeError, "socket connection broken"
totalsent = totalsent + sent
percentage = float(sent)/totalsent*100.0
progressbar = 'X' * int(percentage/5.0)
data = dict(percentage=percentage, progressbar=progressbar)
sys.stdout.write(' %(percentage)3d%% %(progressbar)s\r' % data)
sys.stdout.write(' 100% ' + ('X' * 20) + '\r\n')
def myreceive(self):
print "Waiting for data"
buf=''
while len(buf) < 8:
chunk = self.sock.recv(8 - len(buf))
if chunk == '':
raise RuntimeError, "socket connection broken"
buf += chunk
size = int(buf)
print " => %(nb)d bytes to be downloaded" % dict(nb=size)
buf = self.sock.recv(1)
if buf != "0":
exception = buf
else:
exception = False
msg = ''
while len(msg) < size:
chunk = self.sock.recv(size-len(msg))
if chunk == '':
raise RuntimeError, "socket connection broken"
msg = msg + chunk
percentage = float(len(msg))/size*100.0
progressbar = 'X' * int(percentage/5.0)
data = dict(percentage=percentage, progressbar=progressbar)
sys.stdout.write(' %(percentage)3d%% %(progressbar)s\r' % data)
sys.stdout.write(' 100% ' + ('X' * 20) + '\r\n')
if msg.startswith(GZIP_MAGIC):
gzipped_size = len(msg)
msg = zlib.decompress(msg)
raw_size = len(msg)
#saving = 100*(float(raw_size-gzipped_size))/gzipped_size if gzipped_size else 0
#self._logger.debug('payload size: raw %s, gzipped %s, saving %.2f%%', raw_size, gzipped_size, saving)
res = SafeUnpickler.loads(msg)
if isinstance(res[0],Exception):
if exception:
raise NetRPC_Exception(unicode(res[0]), str(res[1]))
raise res[0]
else:
return res[0]
class NetRPCConnector(Connector):
PROTOCOL = 'netrpc'
def __init__(self, hostname, port=8070, is_gzip=False, timeout=10.0, retry=10):
Connector.__init__(self, hostname, port, timeout=timeout)
self._logger = logging.getLogger('connector.netrpc')
self.is_gzip = is_gzip
self.retry = retry
def send(self, service_name, method, *args):
i = 0
retry = True
result = False
error = False
while retry:
try:
retry = False
#US-309: Reset value of error in the previous rounds, otherwise the system will raise exception regardless of the result of the next try!
error = False
socket = NetRPC(is_gzip=self.is_gzip, timeout=self.timeout)
socket.connect(self.hostname, self.port)
socket.mysend((service_name, method, )+args)
result = socket.myreceive()
except Exception, e:
error = e
print "Error when connecting to %(hostname)s:%(port)d" % dict(hostname=self.hostname, port=self.port)
if i < self.retry:
retry = True
i += 1
socket.disconnect()
if error:
raise RuntimeError("Unable to proceed for the following reason: %s" % (e.faultCode if hasattr(e, 'faultCode') else tools.ustr(e)))
return result
class GzipNetRPCConnector(NetRPCConnector):
PROTOCOL = 'netrpc_gzip'
def __init__(self, *args, **kwargs):
super(GzipNetRPCConnector, self).__init__(is_gzip=True, *args, **kwargs)
class Common(object):
_logger = logging.getLogger('connection.common')
def __init__(self, connector):
self.connector = connector
def __getattr__(self, method):
"""
:param method: The method for the linked object (search, read, write, unlink, create, ...)
"""
#self._logger.debug('method: %r', method)
def proxy(*args):
"""
:param args: A list of values for the method
"""
#self._logger.debug('args: %r', args)
result = self.connector.send('common', method, *args)
#self._logger.debug('result: %r' % result)
return result
return proxy
class Database(object):
_logger = logging.getLogger('connection.database')
def __init__(self, connector):
self.connector = connector
def __getattr__(self, method):
"""
:param method: The method for the linked object (search, read, write, unlink, create, ...)
"""
#self._logger.debug('method: %r', method)
def proxy(*args):
"""
:param args: A list of values for the method
"""
#self._logger.debug('args: %r', args)
result = self.connector.send('db', method, *args)
#self._logger.debug('result: %r' % result)
return result
return proxy
class Connection(object):
"""
TODO: Document this class
"""
_logger = logging.getLogger('connection')
def __init__(self, connector,
database,
login=None,
password=<PASSWORD>,
user_id=None):
"""
:param connector:
:param database:
:param login:
:param password:
"""
self.connector = connector
self.database, self.login, self.password = database, login, password
self.user_id = user_id
if user_id is None:
self.user_id = Common(self.connector).login(self.database, self.login, self.password)
if self.user_id is False:
raise osv.except_osv(_('Error!'), _('Unable to connect to the distant server with this user!'))
self._logger.debug(self.user_id)
def __repr__(self):
"""
Return a readable representation of the Connection object
"""
url = "%(protocol)s://%(login)s:%(password)s@" \
"%(hostname)s:%(port)d/%(database)s" % {
'protocol' : self.connector.PROTOCOL,
'login' : self.login,
'password' : <PASSWORD>,
'hostname' : self.connector.hostname,
'port' : self.connector.port,
'database' : self.database,
}
return "Connection: %s" % url
class Object(object):
"""
TODO: Document this class
"""
_logger = logging.getLogger('object')
def __repr__(self):
"""
"""
return "Object <%s>" % (self.model)
def __init__(self, connection, model, context=None):
"""
:param connection:
:param model:
"""
self.connection = connection
self.model = model
self.context = context
def __getattr__(self, method):
"""
:param method: The method for the linked object (search, read, write, unlink, create, ...)
"""
def proxy(*args):
"""
:param args: A list of values for the method
"""
return self.__send__(method, *args)
return proxy
def __send__(self, method, *args):
#self._logger.debug('method: %r', method)
#self._logger.debug('args: %r', args)
result = self.connection.connector.send('object', 'execute',
self.connection.database,
self.connection.user_id,
self.connection.password,
self.model,
method,
*args)
#self._logger.debug('result: %r', result)
return result
def __add_context(self, arguments, context=None):
if context is None:
context = {}
if self.context is not None:
context.update(self.context)
arguments.append(context)
return arguments
def exists(self, oid, context=None):
# TODO: Fucking bug, we can't use the read(fields=['id']),
# because the server returns a positive value but the record does not exist
# into the database
value = self.search_count([('id', '=', oid)], context=context)
return value > 0
def read(self, ids, fields=None, context=None):
if fields is None:
fields = []
arguments = [ids, fields]
arguments = self.__add_context(arguments, context)
records = self.__send__('read', *arguments)
if isinstance(ids, (list, tuple,)):
records.sort(lambda x, y: cmp(ids.index(x['id']),
ids.index(y['id'])))
return records
def search(self, domain=None, offset=0, limit=None, order=None, context=None):
if domain is None:
domain = []
if limit is None:
limit = self.search_count(domain)
arguments = [domain, offset, limit, order is not None and order or False]
arguments = self.__add_context(arguments, context)
return self.__send__('search', *arguments)
def search_count(self, domain, context=None):
if context is None:
context = {}
return self.__send__('search_count', domain, context)
def write(self, ids, values, context=None):
if not isinstance(ids, (tuple, list)):
ids = [ids]
arguments = self.__add_context([ids, values], context)
return self.__send__('write', *arguments)
def create(self, values, context=None):
arguments = self.__add_context([values], context)
return self.__send__('create', *arguments)
def unlink(self, ids, context=None):
if not isinstance(ids, (tuple, list)):
ids = [ids]
arguments = self.__add_context([ids], context)
return self.__send__('unlink', *arguments)
def select(self, domain=None, fields=None, offset=0, limit=None, order=None, context=None):
record_ids = self.search(domain, offset=offset, limit=limit, order=order, context=context)
return self.read(record_ids, fields=fields, context=context)
for port in [ 20, 110, 8070 ]:
print "== CHECKING PORT %d ==" % port
print
before_time = datetime.now()
try:
host = 'check-internet.unifield.org'
if 'CHECK_HOST' in os.environ:
host = os.environ['CHECK_HOST']
connector = GzipNetRPCConnector(host, port, timeout=500, retry=2)
content = Common(connector).get_zip_file()
except socket.error as e:
print "Unable to connect"
print ""
continue
after_time = datetime.now()
print '%.2f Ko/s' % (len(content) / 1024.0 / (after_time - before_time).total_seconds())
import hashlib
hash = hashlib.md5()
hash.update(content)
md5hash = hash.hexdigest()
print
if md5hash == '6ed32b24be2b7e270e79f92fb2680754':
print "OK"
else:
print "Failed. Got hash %s." % md5hash
print ""
print "Press [return] to exit."
raw_input()
| #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
OpenObject Client Library
"""
import sys
import os
import socket
import zlib
import xmlrpclib
from timeout_transport import TimeoutTransport
from gzip_xmlrpclib import GzipTransport, GzipSafeTransport
from datetime import datetime
from tools.translate import _
import tools
try:
import cPickle as pickle
except:
import pickle
try:
import cStringIO as StringIO
except:
import StringIO
import logging
GZIP_MAGIC = '\x78\xda' # magic when max compression used
NB_RETRY = 10
# Safer Unpickler, in case the server is untrusted, from <NAME>
# http://nadiana.com/python-pickle-insecure#How_to_Make_Unpickling_Safer
class SafeUnpickler(object):
PICKLE_SAFE = {
'exceptions': set(['Exception']),
}
@classmethod
def find_class(cls, module, name):
if not module in cls.PICKLE_SAFE:
raise pickle.UnpicklingError(
'Attempting to unpickle unsafe module %s' % module
)
__import__(module)
mod = sys.modules[module]
if not name in cls.PICKLE_SAFE[module]:
raise pickle.UnpicklingError(
'Attempting to unpickle unsafe class %s' % name
)
klass = getattr(mod, name)
return klass
@classmethod
def loads(cls, pickle_string):
pickle_obj = pickle.Unpickler(StringIO.StringIO(pickle_string))
pickle_obj.find_global = cls.find_class
return pickle_obj.load()
class Connector(object):
"""
Connector class
"""
_logger = logging.getLogger('connector')
def __init__(self, hostname, port, timeout):
"""
:param hostname: Host name of the server
:param port: Port for the connection to the server
"""
self.hostname = hostname
self.port = port
self.timeout = timeout
class XmlRPCConnector(Connector):
"""
This class supports the XmlRPC protocol
"""
PROTOCOL = 'xmlrpc'
def __init__(self, hostname, port=8069, timeout=10.0, retry=0):
Connector.__init__(self, hostname, port, timeout=timeout)
self._logger = logging.getLogger('connector.xmlrpc')
self.url = 'http://%s:%s/xmlrpc' % (self.hostname, self.port)
self.retry = retry
def send(self, service_name, method, *args):
url = '%s/%s' % (self.url, service_name)
transport = TimeoutTransport(timeout=self.timeout)
service = xmlrpclib.ServerProxy(url, allow_none=1, transport=transport)
return self._send(service, method, *args)
def _send(self, service, method, *args):
i = 0
retry = True
while retry:
try:
retry = False
return getattr(service, method)(*args)
except Exception, e:
error = e
if i < self.retry:
print 'retry xml_rpc', i
retry = True
self._logger.debug("retry to connect %s, error : %s" ,i, e)
i += 1
if error:
raise RuntimeError("Unable to proceed for the following reason: %s" % (e.faultCode if hasattr(e, 'faultCode') else tools.ustr(e)))
"""Modified version of xmlrcpclib.Transport.request (same in Python 2.4, 2.5, 2.6)
to workaround Python bug http://bugs.python.org/issue1223
for Python versions before 2.6
This patch is inspired by http://www.cherrypy.org/ticket/743.
See LP bug https://bugs.launchpad.net/openobject-client/+bug/673775
"""
def fixed_request(self, host, handler, request_body, verbose=0):
h = self.make_connection(host)
if verbose:
h.set_debuglevel(1)
self.send_request(h, handler, request_body)
self.send_host(h, host)
self.send_user_agent(h)
self.send_content(h, request_body)
errcode, errmsg, headers = h.getreply()
if errcode != 200:
raise xmlrpclib.ProtocolError(host + handler, errcode, errmsg,
headers)
self.verbose = verbose
# below we make sure to call parse_response() and
# not _parse_response(), and don't pass the socket,
# so it will have to use the file instead, and avoid
# the problem of the original code.
return self.parse_response(h.getfile())
# Rude monkey-patch to fix the SSL connection error in Python 2.5-,
# as last resort solution to fix it all at once.
if sys.version_info < (2,6):
xmlrpclib.SafeTransport.request = fixed_request
class SecuredXmlRPCConnector(XmlRPCConnector):
"""
This class supports the XmlRPC protocol over HTTPS
"""
PROTOCOL = 'xmlrpcs'
def __init__(self, hostname, port=8070, timeout=10.0, retry=0):
XmlRPCConnector.__init__(self, hostname, port, timeout=timeout, retry=retry)
self.url = 'https://%s:%s/xmlrpc' % (self.hostname, self.port)
def send(self, service_name, method, *args):
url = '%s/%s' % (self.url, service_name)
service = xmlrpclib.ServerProxy(url, allow_none=1)
return self._send(service, method, *args)
class GzipXmlRPCConnector(XmlRPCConnector):
"""
This class supports the XmlRPC protocol with gzipped payload
"""
PROTOCOL = 'gzipxmlrpc'
def send(self, service_name, method, *args):
url = '%s/%s' % (self.url, service_name)
gzip_transport = GzipTransport(timeout=self.timeout)
service = xmlrpclib.ServerProxy(url, allow_none=1, transport=gzip_transport)
return self._send(service, method, *args)
class GzipXmlRPCSConnector(GzipXmlRPCConnector):
PROTOCOL = 'gzipxmlrpcs'
def __init__(self, hostname, port=8069, *args, **kwargs):
GzipXmlRPCConnector.__init__(self, hostname, port, *args, **kwargs)
self.url = 'https://%s:%s/xmlrpc' % (self.hostname, self.port)
def send(self, service_name, method, *args):
url = '%s/%s' % (self.url, service_name)
gzip_safe_transport = GzipSafeTransport(timeout=self.timeout)
service = xmlrpclib.ServerProxy(url, allow_none=1, transport=gzip_safe_transport)
return getattr(service, method)(*args)
class NetRPC_Exception(Exception):
def __init__(self, faultCode, faultString):
self.faultCode = faultCode
self.faultString = faultString
self.args = (faultCode, faultString)
class NetRPC:
def __init__(self, sock=None, is_gzip=False, timeout=10.0):
if sock is None:
self.sock = socket.socket(
socket.AF_INET, socket.SOCK_STREAM)
else:
self.sock = sock
self.sock.settimeout(timeout)
self.is_gzip = is_gzip
self._logger = logging.getLogger('netrpc')
def connect(self, host, port=False):
if not port:
protocol, buf = host.split('//')
host, port = buf.split(':')
try:
self.sock.connect((host, int(port)))
except Exception, e:
raise NetRPC_Exception(tools.ustr(e), "Could not connect to %s:%s" % (host, port))
def disconnect(self):
self.sock.shutdown(socket.SHUT_RDWR)
self.sock.close()
def mysend(self, msg, exception=False, traceback=None):
#self._logger.debug("rpc message : %s", msg)
print "Sending %(msg)s" % dict(msg='/'.join(msg))
msg = pickle.dumps([msg,traceback])
if self.is_gzip:
print "Compressing %(nb)d bytes" % dict(nb=len(msg))
raw_size = len(msg)
msg = zlib.compress(msg, zlib.Z_BEST_COMPRESSION)
gzipped_size = len(msg)
print " => %(content)d bytes saved (new size: %(newsize)d)" % dict(content=raw_size-gzipped_size, newsize=gzipped_size)
#saving = 100*(float(raw_size-gzipped_size))/gzipped_size if gzipped_size else 0
#self._logger.debug('payload size: raw %s, gzipped %s, saving %.2f%%', raw_size, gzipped_size, saving)
size = len(msg)
self.sock.send('%8d' % size)
self.sock.send(exception and "1" or "0")
totalsent = 0
while totalsent < size:
sent = self.sock.send(msg[totalsent:])
if sent == 0:
raise RuntimeError, "socket connection broken"
totalsent = totalsent + sent
percentage = float(sent)/totalsent*100.0
progressbar = 'X' * int(percentage/5.0)
data = dict(percentage=percentage, progressbar=progressbar)
sys.stdout.write(' %(percentage)3d%% %(progressbar)s\r' % data)
sys.stdout.write(' 100% ' + ('X' * 20) + '\r\n')
def myreceive(self):
print "Waiting for data"
buf=''
while len(buf) < 8:
chunk = self.sock.recv(8 - len(buf))
if chunk == '':
raise RuntimeError, "socket connection broken"
buf += chunk
size = int(buf)
print " => %(nb)d bytes to be downloaded" % dict(nb=size)
buf = self.sock.recv(1)
if buf != "0":
exception = buf
else:
exception = False
msg = ''
while len(msg) < size:
chunk = self.sock.recv(size-len(msg))
if chunk == '':
raise RuntimeError, "socket connection broken"
msg = msg + chunk
percentage = float(len(msg))/size*100.0
progressbar = 'X' * int(percentage/5.0)
data = dict(percentage=percentage, progressbar=progressbar)
sys.stdout.write(' %(percentage)3d%% %(progressbar)s\r' % data)
sys.stdout.write(' 100% ' + ('X' * 20) + '\r\n')
if msg.startswith(GZIP_MAGIC):
gzipped_size = len(msg)
msg = zlib.decompress(msg)
raw_size = len(msg)
#saving = 100*(float(raw_size-gzipped_size))/gzipped_size if gzipped_size else 0
#self._logger.debug('payload size: raw %s, gzipped %s, saving %.2f%%', raw_size, gzipped_size, saving)
res = SafeUnpickler.loads(msg)
if isinstance(res[0],Exception):
if exception:
raise NetRPC_Exception(unicode(res[0]), str(res[1]))
raise res[0]
else:
return res[0]
class NetRPCConnector(Connector):
PROTOCOL = 'netrpc'
def __init__(self, hostname, port=8070, is_gzip=False, timeout=10.0, retry=10):
Connector.__init__(self, hostname, port, timeout=timeout)
self._logger = logging.getLogger('connector.netrpc')
self.is_gzip = is_gzip
self.retry = retry
def send(self, service_name, method, *args):
i = 0
retry = True
result = False
error = False
while retry:
try:
retry = False
#US-309: Reset value of error in the previous rounds, otherwise the system will raise exception regardless of the result of the next try!
error = False
socket = NetRPC(is_gzip=self.is_gzip, timeout=self.timeout)
socket.connect(self.hostname, self.port)
socket.mysend((service_name, method, )+args)
result = socket.myreceive()
except Exception, e:
error = e
print "Error when connecting to %(hostname)s:%(port)d" % dict(hostname=self.hostname, port=self.port)
if i < self.retry:
retry = True
i += 1
socket.disconnect()
if error:
raise RuntimeError("Unable to proceed for the following reason: %s" % (e.faultCode if hasattr(e, 'faultCode') else tools.ustr(e)))
return result
class GzipNetRPCConnector(NetRPCConnector):
PROTOCOL = 'netrpc_gzip'
def __init__(self, *args, **kwargs):
super(GzipNetRPCConnector, self).__init__(is_gzip=True, *args, **kwargs)
class Common(object):
_logger = logging.getLogger('connection.common')
def __init__(self, connector):
self.connector = connector
def __getattr__(self, method):
"""
:param method: The method for the linked object (search, read, write, unlink, create, ...)
"""
#self._logger.debug('method: %r', method)
def proxy(*args):
"""
:param args: A list of values for the method
"""
#self._logger.debug('args: %r', args)
result = self.connector.send('common', method, *args)
#self._logger.debug('result: %r' % result)
return result
return proxy
class Database(object):
_logger = logging.getLogger('connection.database')
def __init__(self, connector):
self.connector = connector
def __getattr__(self, method):
"""
:param method: The method for the linked object (search, read, write, unlink, create, ...)
"""
#self._logger.debug('method: %r', method)
def proxy(*args):
"""
:param args: A list of values for the method
"""
#self._logger.debug('args: %r', args)
result = self.connector.send('db', method, *args)
#self._logger.debug('result: %r' % result)
return result
return proxy
class Connection(object):
"""
TODO: Document this class
"""
_logger = logging.getLogger('connection')
def __init__(self, connector,
database,
login=None,
password=<PASSWORD>,
user_id=None):
"""
:param connector:
:param database:
:param login:
:param password:
"""
self.connector = connector
self.database, self.login, self.password = database, login, password
self.user_id = user_id
if user_id is None:
self.user_id = Common(self.connector).login(self.database, self.login, self.password)
if self.user_id is False:
raise osv.except_osv(_('Error!'), _('Unable to connect to the distant server with this user!'))
self._logger.debug(self.user_id)
def __repr__(self):
"""
Return a readable representation of the Connection object
"""
url = "%(protocol)s://%(login)s:%(password)s@" \
"%(hostname)s:%(port)d/%(database)s" % {
'protocol' : self.connector.PROTOCOL,
'login' : self.login,
'password' : <PASSWORD>,
'hostname' : self.connector.hostname,
'port' : self.connector.port,
'database' : self.database,
}
return "Connection: %s" % url
class Object(object):
"""
TODO: Document this class
"""
_logger = logging.getLogger('object')
def __repr__(self):
"""
"""
return "Object <%s>" % (self.model)
def __init__(self, connection, model, context=None):
"""
:param connection:
:param model:
"""
self.connection = connection
self.model = model
self.context = context
def __getattr__(self, method):
"""
:param method: The method for the linked object (search, read, write, unlink, create, ...)
"""
def proxy(*args):
"""
:param args: A list of values for the method
"""
return self.__send__(method, *args)
return proxy
def __send__(self, method, *args):
#self._logger.debug('method: %r', method)
#self._logger.debug('args: %r', args)
result = self.connection.connector.send('object', 'execute',
self.connection.database,
self.connection.user_id,
self.connection.password,
self.model,
method,
*args)
#self._logger.debug('result: %r', result)
return result
def __add_context(self, arguments, context=None):
if context is None:
context = {}
if self.context is not None:
context.update(self.context)
arguments.append(context)
return arguments
def exists(self, oid, context=None):
# TODO: Fucking bug, we can't use the read(fields=['id']),
# because the server returns a positive value but the record does not exist
# into the database
value = self.search_count([('id', '=', oid)], context=context)
return value > 0
def read(self, ids, fields=None, context=None):
if fields is None:
fields = []
arguments = [ids, fields]
arguments = self.__add_context(arguments, context)
records = self.__send__('read', *arguments)
if isinstance(ids, (list, tuple,)):
records.sort(lambda x, y: cmp(ids.index(x['id']),
ids.index(y['id'])))
return records
def search(self, domain=None, offset=0, limit=None, order=None, context=None):
if domain is None:
domain = []
if limit is None:
limit = self.search_count(domain)
arguments = [domain, offset, limit, order is not None and order or False]
arguments = self.__add_context(arguments, context)
return self.__send__('search', *arguments)
def search_count(self, domain, context=None):
if context is None:
context = {}
return self.__send__('search_count', domain, context)
def write(self, ids, values, context=None):
if not isinstance(ids, (tuple, list)):
ids = [ids]
arguments = self.__add_context([ids, values], context)
return self.__send__('write', *arguments)
def create(self, values, context=None):
arguments = self.__add_context([values], context)
return self.__send__('create', *arguments)
def unlink(self, ids, context=None):
if not isinstance(ids, (tuple, list)):
ids = [ids]
arguments = self.__add_context([ids], context)
return self.__send__('unlink', *arguments)
def select(self, domain=None, fields=None, offset=0, limit=None, order=None, context=None):
record_ids = self.search(domain, offset=offset, limit=limit, order=order, context=context)
return self.read(record_ids, fields=fields, context=context)
for port in [ 20, 110, 8070 ]:
print "== CHECKING PORT %d ==" % port
print
before_time = datetime.now()
try:
host = 'check-internet.unifield.org'
if 'CHECK_HOST' in os.environ:
host = os.environ['CHECK_HOST']
connector = GzipNetRPCConnector(host, port, timeout=500, retry=2)
content = Common(connector).get_zip_file()
except socket.error as e:
print "Unable to connect"
print ""
continue
after_time = datetime.now()
print '%.2f Ko/s' % (len(content) / 1024.0 / (after_time - before_time).total_seconds())
import hashlib
hash = hashlib.md5()
hash.update(content)
md5hash = hash.hexdigest()
print
if md5hash == '6ed32b24be2b7e270e79f92fb2680754':
print "OK"
else:
print "Failed. Got hash %s." % md5hash
print ""
print "Press [return] to exit."
raw_input() | en | 0.556024 | #!/usr/bin/python # -*- coding: utf-8 -*- OpenObject Client Library # magic when max compression used # Safer Unpickler, in case the server is untrusted, from <NAME> # http://nadiana.com/python-pickle-insecure#How_to_Make_Unpickling_Safer Connector class :param hostname: Host name of the server :param port: Port for the connection to the server This class supports the XmlRPC protocol Modified version of xmlrcpclib.Transport.request (same in Python 2.4, 2.5, 2.6) to workaround Python bug http://bugs.python.org/issue1223 for Python versions before 2.6 This patch is inspired by http://www.cherrypy.org/ticket/743. See LP bug https://bugs.launchpad.net/openobject-client/+bug/673775 # below we make sure to call parse_response() and # not _parse_response(), and don't pass the socket, # so it will have to use the file instead, and avoid # the problem of the original code. # Rude monkey-patch to fix the SSL connection error in Python 2.5-, # as last resort solution to fix it all at once. This class supports the XmlRPC protocol over HTTPS This class supports the XmlRPC protocol with gzipped payload #self._logger.debug("rpc message : %s", msg) #saving = 100*(float(raw_size-gzipped_size))/gzipped_size if gzipped_size else 0 #self._logger.debug('payload size: raw %s, gzipped %s, saving %.2f%%', raw_size, gzipped_size, saving) #saving = 100*(float(raw_size-gzipped_size))/gzipped_size if gzipped_size else 0 #self._logger.debug('payload size: raw %s, gzipped %s, saving %.2f%%', raw_size, gzipped_size, saving) #US-309: Reset value of error in the previous rounds, otherwise the system will raise exception regardless of the result of the next try! :param method: The method for the linked object (search, read, write, unlink, create, ...) #self._logger.debug('method: %r', method) :param args: A list of values for the method #self._logger.debug('args: %r', args) #self._logger.debug('result: %r' % result) :param method: The method for the linked object (search, read, write, unlink, create, ...) #self._logger.debug('method: %r', method) :param args: A list of values for the method #self._logger.debug('args: %r', args) #self._logger.debug('result: %r' % result) TODO: Document this class :param connector: :param database: :param login: :param password: Return a readable representation of the Connection object TODO: Document this class :param connection: :param model: :param method: The method for the linked object (search, read, write, unlink, create, ...) :param args: A list of values for the method #self._logger.debug('method: %r', method) #self._logger.debug('args: %r', args) #self._logger.debug('result: %r', result) # TODO: Fucking bug, we can't use the read(fields=['id']), # because the server returns a positive value but the record does not exist # into the database | 2.160725 | 2 |
tests/test_services/test_auth/test_base.py | beatMeDev/beatMeBackend | 0 | 6619163 | <reponame>beatMeDev/beatMeBackend
"""Base auth test_services test."""
import asyncio
from typing import Any
from typing import Dict
from typing import List
from typing import Optional
from typing import Tuple
from typing import Union
from unittest import mock
from unittest.mock import MagicMock
from uuid import UUID
import jwt
import pytest
from fastapi.responses import ORJSONResponse
from fastapi.security import HTTPAuthorizationCredentials
from orjson import dumps # pylint: disable-msg=E0611
from orjson import loads # pylint: disable-msg=E0611
from starlette.datastructures import QueryParams
from starlette.requests import Request
from truth.truth import AssertThat # type: ignore
from app.models.api.auth import AuthOut
from app.models.db.user import AuthAccount
from app.models.db.user import User
from app.services.auth.base import OAuthRoute
from app.services.auth.base import bearer_auth
from app.services.auth.base import create_tokens
from app.services.auth.base import logout
from app.services.auth.base import refresh_tokens
from app.services.auth.base import refresh_tokens_controller
from app.settings import JWT_ALGORITHM
from app.settings import JWT_SECRET
from app.utils.exceptions import BadRequestError
from app.utils.exceptions import UnauthorizedError
USER_UUID = UUID("ef4b35cb-1c32-43b7-a986-14ba5d05064f")
AUTH_ACCOUNT_ID = "1"
REDIRECT_LINK = "link"
async def endpoint_logic() -> None:
"""Endpoint logic mock"""
return None
class TestOAuthRoute(OAuthRoute):
"""Test auth class with mocked methods."""
__test__ = False
async def code_auth(self, code: str) -> Tuple[str, str, int]:
"""
Code auth mock.
:param code: auth code
:return: mock value
"""
return "access_token", "refresh_token", 1000000
async def get_account_info(self, access_token: str) -> Dict[str, str]:
"""Get account info mock."""
return {"_id": AUTH_ACCOUNT_ID, "name": "Test", "image": "link", "url": "link"}
async def create_auth_link(self) -> str:
"""Create link for sign in on external provider."""
return REDIRECT_LINK
def get_patched_route() -> TestOAuthRoute:
"""Create patched test route."""
route = TestOAuthRoute(endpoint=endpoint_logic, path="test")
return route
async def get_auth_request(method: str, user_id: Optional[str] = None) -> Request:
"""Create test request."""
request_scope = {
"type": "http",
"method": method,
"query_params": QueryParams(code="test"),
"query_string": b"code=test",
"headers": [],
}
if user_id:
request_scope["user_id"] = user_id
request = Request(scope=request_scope)
return request
not_implemented_methods: List[Any] = [
("code_auth", {"code": "test"},),
("get_account_info", {"access_token": "test"},),
("create_auth_link", {},),
]
@pytest.mark.asyncio
@mock.patch("app.extensions.redis_client.set")
async def test_create_tokens_check_schema(set_mock: MagicMock) -> None:
"""
Test tokens creation if user id in account info
"""
set_mock.return_value = asyncio.Future()
set_mock.return_value.set_result(True)
tokens: Dict[str, Union[str, int]] = await create_tokens(user_id=str(USER_UUID))
AssertThat(AuthOut(**tokens).validate(tokens)).IsNotEmpty() # type: ignore
@pytest.mark.asyncio
@mock.patch("app.extensions.redis_client.set")
async def test_create_tokens_check_tokens(set_mock: MagicMock) -> None:
"""Check created tokens and encoded data in them."""
set_mock.return_value = asyncio.Future()
set_mock.return_value.set_result(True)
tokens: Dict[str, Any] = await create_tokens(user_id=str(USER_UUID))
access_token: str = tokens["access_token"]
refresh_token: str = tokens["refresh_token"]
access_token_data: Dict[str, Any] = jwt.decode(
jwt=access_token, key=JWT_SECRET, algorithms=[JWT_ALGORITHM]
)
refresh_token_data = jwt.decode(
jwt=refresh_token, key=JWT_SECRET, algorithms=[JWT_ALGORITHM]
)
AssertThat(access_token_data.get("user_id")).IsEqualTo(str(USER_UUID))
AssertThat(refresh_token_data.get("access_token")).IsEqualTo(access_token)
@pytest.mark.asyncio
@pytest.mark.parametrize( # pylint: disable=not-callable
"method_name,methods_kwargs", not_implemented_methods,
)
async def test_base_auth_route_not_implement(
method_name: str,
methods_kwargs: Dict[str, Any]
) -> None:
"""Check not implemented methods were raised."""
route = OAuthRoute(path="/test/", endpoint=endpoint_logic)
with AssertThat(NotImplementedError).IsRaised():
await getattr(route, method_name)(**methods_kwargs)
@pytest.mark.asyncio
@mock.patch("app.extensions.redis_client.set")
async def test_base_auth_route_on_post(set_mock: MagicMock) -> None:
"""
Check auth handler when AuthAccount and User are not exist,
AuthAccount, User and relation between them should be created,
tokens should be returned.
"""
set_mock.return_value = asyncio.Future()
set_mock.return_value.set_result(True)
route = get_patched_route()
route_handler = route.get_route_handler()
request: Request = await get_auth_request(method="POST")
response: ORJSONResponse = await route_handler(request)
response_body = loads(response.body)
auth_account: AuthAccount = await AuthAccount.get(_id=AUTH_ACCOUNT_ID)
user: User = await User.get(auth_accounts__in=[auth_account])
AssertThat(AuthOut(**response_body).validate(response_body)).IsNotEmpty()
AssertThat(auth_account).IsNotNone()
AssertThat(user).IsNotNone()
@pytest.mark.asyncio
@mock.patch("app.extensions.redis_client.set")
async def test_base_auth_route_on_post_user_created(
set_mock: MagicMock,
user_fixture: User,
) -> None:
"""
Check auth handler when AuthAccount is not exists, but User exists and logged in,
AuthAccount should be created and added for user,
tokens should be returned.
"""
set_mock.return_value = asyncio.Future()
set_mock.return_value.set_result(True)
route = get_patched_route()
route_handler = route.get_route_handler()
request: Request = await get_auth_request(method="POST", user_id=str(user_fixture.id))
response: ORJSONResponse = await route_handler(request)
response_body = loads(response.body)
auth_account: AuthAccount = await AuthAccount.get(_id=AUTH_ACCOUNT_ID, user=user_fixture)
user: User = await User.get(auth_accounts__in=[auth_account])
AssertThat(AuthOut(**response_body).validate(response_body)).IsNotEmpty()
AssertThat(auth_account).IsNotNone()
AssertThat(user).IsNotNone()
@pytest.mark.asyncio
@mock.patch("app.extensions.redis_client.set")
async def test_base_auth_route_on_post_auth_account_created(
set_mock: MagicMock,
) -> None:
"""
Check auth handler when AuthAccount is not exists, but User exists and logged in,
AuthAccount should be created and added for user,
tokens should be returned.
"""
set_mock.return_value = asyncio.Future()
set_mock.return_value.set_result(True)
route = get_patched_route()
route_handler = route.get_route_handler()
request: Request = await get_auth_request(method="POST")
await route_handler(request) # call first time and auth account will created
response: ORJSONResponse = await route_handler(request) # here account should be created
response_body = loads(response.body)
auth_account: AuthAccount = await AuthAccount.get(_id=AUTH_ACCOUNT_ID)
user: User = await User.get(auth_accounts__in=[auth_account])
AssertThat(AuthOut(**response_body).validate(response_body)).IsNotEmpty()
AssertThat(auth_account).IsNotNone()
AssertThat(user).IsNotNone()
@pytest.mark.asyncio
async def test_base_auth_route_on_get() -> None:
"""
Check auth handler on GET will return sign in link for external provider.
"""
route = get_patched_route()
route_handler = route.get_route_handler()
request: Request = await get_auth_request(method="GET")
response: ORJSONResponse = await route_handler(request)
response_body = loads(response.body)
AssertThat(response.status_code).IsEqualTo(200)
AssertThat(response_body).IsEqualTo({"link": REDIRECT_LINK})
@pytest.mark.asyncio
async def test_base_auth_route_on_put() -> None:
"""
Check auth handler on PUT should return Bad Request.
"""
route = get_patched_route()
route_handler = route.get_route_handler()
request: Request = await get_auth_request(method="PUT")
with AssertThat(BadRequestError).IsRaised():
await route_handler(request)
@pytest.mark.asyncio
@mock.patch("app.extensions.redis_client.set")
@mock.patch("app.extensions.redis_client.get")
@mock.patch("app.extensions.redis_client.delete")
async def test_logout(
delete_mock: MagicMock, get_mock: MagicMock, set_mock: MagicMock
) -> None:
"""
Check that access token and refresh token will be deleted from redis.
"""
delete_mock.return_value = asyncio.Future()
delete_mock.return_value.set_result(True)
set_mock.return_value = asyncio.Future()
set_mock.return_value.set_result(True)
tokens: Dict[str, Any] = await create_tokens(user_id=str(USER_UUID))
access_token: str = tokens["access_token"]
refresh_token: str = tokens["refresh_token"]
get_mock.return_value = asyncio.Future()
get_mock.return_value.set_result(dumps({"refresh_token": refresh_token}))
result: bool = await logout(access_token=access_token)
AssertThat(result).IsTrue()
delete_mock.assert_any_call(access_token)
delete_mock.assert_any_call(refresh_token)
@pytest.mark.asyncio
async def test_logout_token_is_none() -> None:
"""
Check logout if token is none.
"""
result: bool = await logout(access_token=None)
AssertThat(result).IsFalse()
@mock.patch("app.extensions.redis_client.get")
@pytest.mark.asyncio
async def test_logout_data_is_none(get_mock: MagicMock) -> None:
"""
Check logout if toke_data is none.
"""
get_mock.return_value = asyncio.Future()
get_mock.return_value.set_result(None)
result: bool = await logout(access_token="test_token")
AssertThat(result).IsFalse()
@pytest.mark.asyncio
@mock.patch("app.extensions.redis_client.set")
@mock.patch("app.extensions.redis_client.get")
@mock.patch("app.extensions.redis_client.delete")
async def test_refresh_tokens(
delete_mock: MagicMock, get_mock: MagicMock, set_mock: MagicMock
) -> None:
"""
Test tokens refreshing if everything is fine.
"""
delete_mock.return_value = asyncio.Future()
delete_mock.return_value.set_result(True)
set_mock.return_value = asyncio.Future()
set_mock.return_value.set_result(True)
tokens: Dict[str, Any] = await create_tokens(user_id=str(USER_UUID))
get_mock.return_value = asyncio.Future()
get_mock.return_value.set_result(
dumps({"access_token": tokens["access_token"], "user_id": str(USER_UUID)})
)
refresh_token: str = tokens["refresh_token"]
new_tokens: Dict[str, Union[str, int]] = await refresh_tokens(
refresh_token=refresh_token
)
AssertThat(AuthOut(**new_tokens).validate(new_tokens)).IsNotEmpty() # type: ignore
@pytest.mark.asyncio
@mock.patch("app.extensions.redis_client.get")
async def test_refresh_tokens_not_raw_token(get_mock: MagicMock) -> None:
"""
Test tokens refreshing if token not exists in redis storage.
"""
get_mock.return_value = asyncio.Future()
get_mock.return_value.set_result(None)
with AssertThat(UnauthorizedError).IsRaised():
await refresh_tokens(refresh_token="<PASSWORD>")
@pytest.mark.asyncio
@mock.patch("app.services.auth.base.logout")
@mock.patch("app.extensions.redis_client.get")
async def test_refresh_tokens_logout_false(
get_mock: MagicMock,
logout_mock: MagicMock,
) -> None:
"""
Test tokens refreshing if logout returned False.
"""
get_mock.return_value = asyncio.Future()
get_mock.return_value.set_result(
dumps({"access_token": "test_token"})
)
logout_mock.return_value = False
with AssertThat(UnauthorizedError).IsRaised():
await refresh_tokens(refresh_token="<PASSWORD>")
@pytest.mark.asyncio
async def test_bearer_auth() -> None:
"""Check bearer auth return user id from scope."""
request = Request(scope={
"type": "http",
"method": "GET",
"headers": [],
"token_data": {
"user_id": str(USER_UUID),
}
})
credentials: HTTPAuthorizationCredentials = HTTPAuthorizationCredentials(
scheme="test",
credentials="test",
)
user_id: Optional[str] = await bearer_auth(
request=request, http_credentials=credentials
)
AssertThat(user_id).IsEqualTo(str(USER_UUID))
@pytest.mark.asyncio
async def test_refresh_tokens_controller_empty_token() -> None:
"""Check controller is raised if request scope has no token."""
request: Request = Request(scope={
"type": "http",
"method": "GET",
"headers": [],
})
with AssertThat(UnauthorizedError).IsRaised():
await refresh_tokens_controller(request=request)
@pytest.mark.asyncio
@mock.patch("app.services.auth.base.refresh_tokens")
async def test_refresh_tokens_controller(refresh_tokens_mock: MagicMock) -> None:
"""Check controller if everything is fine."""
test_value: bool = True
refresh_tokens_mock.return_value = test_value
request: Request = Request(scope={
"type": "http",
"method": "GET",
"headers": [],
"token": "test",
})
result = await refresh_tokens_controller(request=request)
AssertThat(result).IsEqualTo(test_value)
| """Base auth test_services test."""
import asyncio
from typing import Any
from typing import Dict
from typing import List
from typing import Optional
from typing import Tuple
from typing import Union
from unittest import mock
from unittest.mock import MagicMock
from uuid import UUID
import jwt
import pytest
from fastapi.responses import ORJSONResponse
from fastapi.security import HTTPAuthorizationCredentials
from orjson import dumps # pylint: disable-msg=E0611
from orjson import loads # pylint: disable-msg=E0611
from starlette.datastructures import QueryParams
from starlette.requests import Request
from truth.truth import AssertThat # type: ignore
from app.models.api.auth import AuthOut
from app.models.db.user import AuthAccount
from app.models.db.user import User
from app.services.auth.base import OAuthRoute
from app.services.auth.base import bearer_auth
from app.services.auth.base import create_tokens
from app.services.auth.base import logout
from app.services.auth.base import refresh_tokens
from app.services.auth.base import refresh_tokens_controller
from app.settings import JWT_ALGORITHM
from app.settings import JWT_SECRET
from app.utils.exceptions import BadRequestError
from app.utils.exceptions import UnauthorizedError
USER_UUID = UUID("ef4b35cb-1c32-43b7-a986-14ba5d05064f")
AUTH_ACCOUNT_ID = "1"
REDIRECT_LINK = "link"
async def endpoint_logic() -> None:
"""Endpoint logic mock"""
return None
class TestOAuthRoute(OAuthRoute):
"""Test auth class with mocked methods."""
__test__ = False
async def code_auth(self, code: str) -> Tuple[str, str, int]:
"""
Code auth mock.
:param code: auth code
:return: mock value
"""
return "access_token", "refresh_token", 1000000
async def get_account_info(self, access_token: str) -> Dict[str, str]:
"""Get account info mock."""
return {"_id": AUTH_ACCOUNT_ID, "name": "Test", "image": "link", "url": "link"}
async def create_auth_link(self) -> str:
"""Create link for sign in on external provider."""
return REDIRECT_LINK
def get_patched_route() -> TestOAuthRoute:
"""Create patched test route."""
route = TestOAuthRoute(endpoint=endpoint_logic, path="test")
return route
async def get_auth_request(method: str, user_id: Optional[str] = None) -> Request:
"""Create test request."""
request_scope = {
"type": "http",
"method": method,
"query_params": QueryParams(code="test"),
"query_string": b"code=test",
"headers": [],
}
if user_id:
request_scope["user_id"] = user_id
request = Request(scope=request_scope)
return request
not_implemented_methods: List[Any] = [
("code_auth", {"code": "test"},),
("get_account_info", {"access_token": "test"},),
("create_auth_link", {},),
]
@pytest.mark.asyncio
@mock.patch("app.extensions.redis_client.set")
async def test_create_tokens_check_schema(set_mock: MagicMock) -> None:
"""
Test tokens creation if user id in account info
"""
set_mock.return_value = asyncio.Future()
set_mock.return_value.set_result(True)
tokens: Dict[str, Union[str, int]] = await create_tokens(user_id=str(USER_UUID))
AssertThat(AuthOut(**tokens).validate(tokens)).IsNotEmpty() # type: ignore
@pytest.mark.asyncio
@mock.patch("app.extensions.redis_client.set")
async def test_create_tokens_check_tokens(set_mock: MagicMock) -> None:
"""Check created tokens and encoded data in them."""
set_mock.return_value = asyncio.Future()
set_mock.return_value.set_result(True)
tokens: Dict[str, Any] = await create_tokens(user_id=str(USER_UUID))
access_token: str = tokens["access_token"]
refresh_token: str = tokens["refresh_token"]
access_token_data: Dict[str, Any] = jwt.decode(
jwt=access_token, key=JWT_SECRET, algorithms=[JWT_ALGORITHM]
)
refresh_token_data = jwt.decode(
jwt=refresh_token, key=JWT_SECRET, algorithms=[JWT_ALGORITHM]
)
AssertThat(access_token_data.get("user_id")).IsEqualTo(str(USER_UUID))
AssertThat(refresh_token_data.get("access_token")).IsEqualTo(access_token)
@pytest.mark.asyncio
@pytest.mark.parametrize( # pylint: disable=not-callable
"method_name,methods_kwargs", not_implemented_methods,
)
async def test_base_auth_route_not_implement(
method_name: str,
methods_kwargs: Dict[str, Any]
) -> None:
"""Check not implemented methods were raised."""
route = OAuthRoute(path="/test/", endpoint=endpoint_logic)
with AssertThat(NotImplementedError).IsRaised():
await getattr(route, method_name)(**methods_kwargs)
@pytest.mark.asyncio
@mock.patch("app.extensions.redis_client.set")
async def test_base_auth_route_on_post(set_mock: MagicMock) -> None:
"""
Check auth handler when AuthAccount and User are not exist,
AuthAccount, User and relation between them should be created,
tokens should be returned.
"""
set_mock.return_value = asyncio.Future()
set_mock.return_value.set_result(True)
route = get_patched_route()
route_handler = route.get_route_handler()
request: Request = await get_auth_request(method="POST")
response: ORJSONResponse = await route_handler(request)
response_body = loads(response.body)
auth_account: AuthAccount = await AuthAccount.get(_id=AUTH_ACCOUNT_ID)
user: User = await User.get(auth_accounts__in=[auth_account])
AssertThat(AuthOut(**response_body).validate(response_body)).IsNotEmpty()
AssertThat(auth_account).IsNotNone()
AssertThat(user).IsNotNone()
@pytest.mark.asyncio
@mock.patch("app.extensions.redis_client.set")
async def test_base_auth_route_on_post_user_created(
set_mock: MagicMock,
user_fixture: User,
) -> None:
"""
Check auth handler when AuthAccount is not exists, but User exists and logged in,
AuthAccount should be created and added for user,
tokens should be returned.
"""
set_mock.return_value = asyncio.Future()
set_mock.return_value.set_result(True)
route = get_patched_route()
route_handler = route.get_route_handler()
request: Request = await get_auth_request(method="POST", user_id=str(user_fixture.id))
response: ORJSONResponse = await route_handler(request)
response_body = loads(response.body)
auth_account: AuthAccount = await AuthAccount.get(_id=AUTH_ACCOUNT_ID, user=user_fixture)
user: User = await User.get(auth_accounts__in=[auth_account])
AssertThat(AuthOut(**response_body).validate(response_body)).IsNotEmpty()
AssertThat(auth_account).IsNotNone()
AssertThat(user).IsNotNone()
@pytest.mark.asyncio
@mock.patch("app.extensions.redis_client.set")
async def test_base_auth_route_on_post_auth_account_created(
set_mock: MagicMock,
) -> None:
"""
Check auth handler when AuthAccount is not exists, but User exists and logged in,
AuthAccount should be created and added for user,
tokens should be returned.
"""
set_mock.return_value = asyncio.Future()
set_mock.return_value.set_result(True)
route = get_patched_route()
route_handler = route.get_route_handler()
request: Request = await get_auth_request(method="POST")
await route_handler(request) # call first time and auth account will created
response: ORJSONResponse = await route_handler(request) # here account should be created
response_body = loads(response.body)
auth_account: AuthAccount = await AuthAccount.get(_id=AUTH_ACCOUNT_ID)
user: User = await User.get(auth_accounts__in=[auth_account])
AssertThat(AuthOut(**response_body).validate(response_body)).IsNotEmpty()
AssertThat(auth_account).IsNotNone()
AssertThat(user).IsNotNone()
@pytest.mark.asyncio
async def test_base_auth_route_on_get() -> None:
"""
Check auth handler on GET will return sign in link for external provider.
"""
route = get_patched_route()
route_handler = route.get_route_handler()
request: Request = await get_auth_request(method="GET")
response: ORJSONResponse = await route_handler(request)
response_body = loads(response.body)
AssertThat(response.status_code).IsEqualTo(200)
AssertThat(response_body).IsEqualTo({"link": REDIRECT_LINK})
@pytest.mark.asyncio
async def test_base_auth_route_on_put() -> None:
"""
Check auth handler on PUT should return Bad Request.
"""
route = get_patched_route()
route_handler = route.get_route_handler()
request: Request = await get_auth_request(method="PUT")
with AssertThat(BadRequestError).IsRaised():
await route_handler(request)
@pytest.mark.asyncio
@mock.patch("app.extensions.redis_client.set")
@mock.patch("app.extensions.redis_client.get")
@mock.patch("app.extensions.redis_client.delete")
async def test_logout(
delete_mock: MagicMock, get_mock: MagicMock, set_mock: MagicMock
) -> None:
"""
Check that access token and refresh token will be deleted from redis.
"""
delete_mock.return_value = asyncio.Future()
delete_mock.return_value.set_result(True)
set_mock.return_value = asyncio.Future()
set_mock.return_value.set_result(True)
tokens: Dict[str, Any] = await create_tokens(user_id=str(USER_UUID))
access_token: str = tokens["access_token"]
refresh_token: str = tokens["refresh_token"]
get_mock.return_value = asyncio.Future()
get_mock.return_value.set_result(dumps({"refresh_token": refresh_token}))
result: bool = await logout(access_token=access_token)
AssertThat(result).IsTrue()
delete_mock.assert_any_call(access_token)
delete_mock.assert_any_call(refresh_token)
@pytest.mark.asyncio
async def test_logout_token_is_none() -> None:
"""
Check logout if token is none.
"""
result: bool = await logout(access_token=None)
AssertThat(result).IsFalse()
@mock.patch("app.extensions.redis_client.get")
@pytest.mark.asyncio
async def test_logout_data_is_none(get_mock: MagicMock) -> None:
"""
Check logout if toke_data is none.
"""
get_mock.return_value = asyncio.Future()
get_mock.return_value.set_result(None)
result: bool = await logout(access_token="test_token")
AssertThat(result).IsFalse()
@pytest.mark.asyncio
@mock.patch("app.extensions.redis_client.set")
@mock.patch("app.extensions.redis_client.get")
@mock.patch("app.extensions.redis_client.delete")
async def test_refresh_tokens(
delete_mock: MagicMock, get_mock: MagicMock, set_mock: MagicMock
) -> None:
"""
Test tokens refreshing if everything is fine.
"""
delete_mock.return_value = asyncio.Future()
delete_mock.return_value.set_result(True)
set_mock.return_value = asyncio.Future()
set_mock.return_value.set_result(True)
tokens: Dict[str, Any] = await create_tokens(user_id=str(USER_UUID))
get_mock.return_value = asyncio.Future()
get_mock.return_value.set_result(
dumps({"access_token": tokens["access_token"], "user_id": str(USER_UUID)})
)
refresh_token: str = tokens["refresh_token"]
new_tokens: Dict[str, Union[str, int]] = await refresh_tokens(
refresh_token=refresh_token
)
AssertThat(AuthOut(**new_tokens).validate(new_tokens)).IsNotEmpty() # type: ignore
@pytest.mark.asyncio
@mock.patch("app.extensions.redis_client.get")
async def test_refresh_tokens_not_raw_token(get_mock: MagicMock) -> None:
"""
Test tokens refreshing if token not exists in redis storage.
"""
get_mock.return_value = asyncio.Future()
get_mock.return_value.set_result(None)
with AssertThat(UnauthorizedError).IsRaised():
await refresh_tokens(refresh_token="<PASSWORD>")
@pytest.mark.asyncio
@mock.patch("app.services.auth.base.logout")
@mock.patch("app.extensions.redis_client.get")
async def test_refresh_tokens_logout_false(
get_mock: MagicMock,
logout_mock: MagicMock,
) -> None:
"""
Test tokens refreshing if logout returned False.
"""
get_mock.return_value = asyncio.Future()
get_mock.return_value.set_result(
dumps({"access_token": "test_token"})
)
logout_mock.return_value = False
with AssertThat(UnauthorizedError).IsRaised():
await refresh_tokens(refresh_token="<PASSWORD>")
@pytest.mark.asyncio
async def test_bearer_auth() -> None:
"""Check bearer auth return user id from scope."""
request = Request(scope={
"type": "http",
"method": "GET",
"headers": [],
"token_data": {
"user_id": str(USER_UUID),
}
})
credentials: HTTPAuthorizationCredentials = HTTPAuthorizationCredentials(
scheme="test",
credentials="test",
)
user_id: Optional[str] = await bearer_auth(
request=request, http_credentials=credentials
)
AssertThat(user_id).IsEqualTo(str(USER_UUID))
@pytest.mark.asyncio
async def test_refresh_tokens_controller_empty_token() -> None:
"""Check controller is raised if request scope has no token."""
request: Request = Request(scope={
"type": "http",
"method": "GET",
"headers": [],
})
with AssertThat(UnauthorizedError).IsRaised():
await refresh_tokens_controller(request=request)
@pytest.mark.asyncio
@mock.patch("app.services.auth.base.refresh_tokens")
async def test_refresh_tokens_controller(refresh_tokens_mock: MagicMock) -> None:
"""Check controller if everything is fine."""
test_value: bool = True
refresh_tokens_mock.return_value = test_value
request: Request = Request(scope={
"type": "http",
"method": "GET",
"headers": [],
"token": "test",
})
result = await refresh_tokens_controller(request=request)
AssertThat(result).IsEqualTo(test_value) | en | 0.829537 | Base auth test_services test. # pylint: disable-msg=E0611 # pylint: disable-msg=E0611 # type: ignore Endpoint logic mock Test auth class with mocked methods. Code auth mock. :param code: auth code :return: mock value Get account info mock. Create link for sign in on external provider. Create patched test route. Create test request. Test tokens creation if user id in account info # type: ignore Check created tokens and encoded data in them. # pylint: disable=not-callable Check not implemented methods were raised. Check auth handler when AuthAccount and User are not exist, AuthAccount, User and relation between them should be created, tokens should be returned. Check auth handler when AuthAccount is not exists, but User exists and logged in, AuthAccount should be created and added for user, tokens should be returned. Check auth handler when AuthAccount is not exists, but User exists and logged in, AuthAccount should be created and added for user, tokens should be returned. # call first time and auth account will created # here account should be created Check auth handler on GET will return sign in link for external provider. Check auth handler on PUT should return Bad Request. Check that access token and refresh token will be deleted from redis. Check logout if token is none. Check logout if toke_data is none. Test tokens refreshing if everything is fine. # type: ignore Test tokens refreshing if token not exists in redis storage. Test tokens refreshing if logout returned False. Check bearer auth return user id from scope. Check controller is raised if request scope has no token. Check controller if everything is fine. | 2.154194 | 2 |
savecode/threeyears/idownserver/config_taskbackdeal.py | Octoberr/swm0920 | 2 | 6619164 | <reponame>Octoberr/swm0920
"""回馈命令处理"""
# -*- coding:utf-8 -*-
from datacontract import ExtMatcher
from .taskbackdealer import (AutoTaskBackDealer, CmdBackDealer,
IScanTaskBackDealer, IScoutTaskBackDealer,
TaskBackConfig, TaskBackDealer)
taskbackconfig: TaskBackConfig = TaskBackConfig({
"taskbackdealer":
TaskBackDealer(
uniquename="taskbackdealer",
datamatcher=ExtMatcher([
# "idown_task_back",
"idown_btask_back",
]),
relation_inputer_src=None,
),
"cmdbackdealer":
CmdBackDealer(
uniquename="cmdbackdealer",
datamatcher=ExtMatcher([
"idown_cmd_back",
]),
relation_inputer_src=None,
),
"iscantaskbackdealer":
IScanTaskBackDealer(
uniquename="iscantaskbackdealer",
datamatcher=ExtMatcher([
"iscan_task_back",
]),
relation_inputer_src=None,
),
"iscouttaskbackdealer":
IScoutTaskBackDealer(
uniquename="iscouttaskbackdealer",
datamatcher=ExtMatcher([
# "iscout_task_back",
"iscout_btask_back",
]),
relation_inputer_src=None,
),
"autotaskbackdealer":
AutoTaskBackDealer(
uniquename="autotaskbackdealer",
datamatcher=ExtMatcher([
# "iscout_task_back",
"automated_btask_back",
]),
relation_inputer_src=None,
),
})
| """回馈命令处理"""
# -*- coding:utf-8 -*-
from datacontract import ExtMatcher
from .taskbackdealer import (AutoTaskBackDealer, CmdBackDealer,
IScanTaskBackDealer, IScoutTaskBackDealer,
TaskBackConfig, TaskBackDealer)
taskbackconfig: TaskBackConfig = TaskBackConfig({
"taskbackdealer":
TaskBackDealer(
uniquename="taskbackdealer",
datamatcher=ExtMatcher([
# "idown_task_back",
"idown_btask_back",
]),
relation_inputer_src=None,
),
"cmdbackdealer":
CmdBackDealer(
uniquename="cmdbackdealer",
datamatcher=ExtMatcher([
"idown_cmd_back",
]),
relation_inputer_src=None,
),
"iscantaskbackdealer":
IScanTaskBackDealer(
uniquename="iscantaskbackdealer",
datamatcher=ExtMatcher([
"iscan_task_back",
]),
relation_inputer_src=None,
),
"iscouttaskbackdealer":
IScoutTaskBackDealer(
uniquename="iscouttaskbackdealer",
datamatcher=ExtMatcher([
# "iscout_task_back",
"iscout_btask_back",
]),
relation_inputer_src=None,
),
"autotaskbackdealer":
AutoTaskBackDealer(
uniquename="autotaskbackdealer",
datamatcher=ExtMatcher([
# "iscout_task_back",
"automated_btask_back",
]),
relation_inputer_src=None,
),
}) | en | 0.216021 | 回馈命令处理 # -*- coding:utf-8 -*- # "idown_task_back", # "iscout_task_back", # "iscout_task_back", | 1.948231 | 2 |
rbwriter/models/__init__.py | TheCoder777/Python-Report-Booklet-Writer | 1 | 6619165 | <filename>rbwriter/models/__init__.py
from . import message, messagequeue, user
| <filename>rbwriter/models/__init__.py
from . import message, messagequeue, user
| none | 1 | 1.350458 | 1 | |
stack/examples/balanced-brackets.py | icamarkov/Problem-Solving-with-Algorithms-and-Data-Structures-using-Python | 81 | 6619166 | from stack import Stack
def balanced_brackets(string: str) -> bool:
stack: Stack = Stack()
for character in string:
if character in "([{":
stack.push(character)
if character in ")]}":
if stack.is_empty():
return False
if "([{".index(stack.peek()) == ")]}".index(character):
stack.pop()
return stack.is_empty()
print(balanced_brackets('((()))')) # True
print(balanced_brackets('(()')) # False
print(balanced_brackets(']()')) # False
| from stack import Stack
def balanced_brackets(string: str) -> bool:
stack: Stack = Stack()
for character in string:
if character in "([{":
stack.push(character)
if character in ")]}":
if stack.is_empty():
return False
if "([{".index(stack.peek()) == ")]}".index(character):
stack.pop()
return stack.is_empty()
print(balanced_brackets('((()))')) # True
print(balanced_brackets('(()')) # False
print(balanced_brackets(']()')) # False
| en | 0.553814 | # True # False # False | 4.049598 | 4 |
tests/test_quality_assessment.py | Song655/sdp-algorithm-reference | 0 | 6619167 | """Unit tests for quality assessment
"""
import unittest
import logging
from arl.data.data_models import QA
log = logging.getLogger(__name__)
class TestQualityAssessment(unittest.TestCase):
def test_qa(self):
qa=QA(origin='foo', data={'rms':100.0, 'median':10.0}, context='test of qa')
log.debug(str(qa))
if __name__ == '__main__':
unittest.main()
| """Unit tests for quality assessment
"""
import unittest
import logging
from arl.data.data_models import QA
log = logging.getLogger(__name__)
class TestQualityAssessment(unittest.TestCase):
def test_qa(self):
qa=QA(origin='foo', data={'rms':100.0, 'median':10.0}, context='test of qa')
log.debug(str(qa))
if __name__ == '__main__':
unittest.main()
| en | 0.916029 | Unit tests for quality assessment | 2.785485 | 3 |
autos/googleapi/sheets.py | hans-t/autos | 1 | 6619168 | import time
import uuid
import logging
import functools
from autos.utils.csv import write_csv
from .service import Service
from .errors import SheetNotFound
from .errors import ExecutionError
from .errors import SheetAlreadyExists
from .errors import MissingSpreadsheetId
logger = logging.getLogger(__name__)
def generate_sheet_id():
"""Generate random sheet ID."""
return int(time.time())
class Sheets(Service):
"""Sheets API wrapper to perform common tasks.
Current API version: v4.
API Documentations:
- https://developers.google.com/sheets/reference/rest/v4/spreadsheets
- https://developers.google.com/sheets/guides/migration
"""
def __init__(
self,
scopes=['https://www.googleapis.com/auth/drive'],
):
super().__init__(
scopes=scopes,
api_name='sheets',
api_version='v4',
)
self._spreadsheet_id = None
self._metadata = {}
self._properties = {}
@property
def spreadsheets(self):
return self.service.spreadsheets()
@property
def spreadsheet_id(self):
if self._spreadsheet_id is not None:
return self._spreadsheet_id
else:
raise MissingSpreadsheetId('Please set spreadsheet_id.')
@spreadsheet_id.setter
def spreadsheet_id(self, value):
self._spreadsheet_id = value
self.reload()
def reload(self):
"""Refreshes sheets' metadata and properties."""
self.reload_metadata()
self.reload_properties()
@property
def metadata(self):
return self._metadata
def reload_metadata(self):
"""Refreshes sheets metadata."""
self._metadata = self.spreadsheets.get(
spreadsheetId=self.spreadsheet_id,
includeGridData=False,
).execute()
@property
def properties(self):
return self._properties
def reload_properties(self):
"""Refreshes sheets' properties."""
sheets = self.metadata.get('sheets', [])
self._properties = {sheet['properties']['title']: sheet['properties'] for sheet in sheets}
def get_sheet_id(self, sheet_name):
"""Maps sheet name to its id."""
try:
return self.properties[sheet_name]['sheetId']
except KeyError as e:
raise SheetNotFound('{} does not exist.'.format(sheet_name)) from e
def execute(self, request, batch):
"""Executes a request if batch is False, else return the request.
:type request: dict
:param request: Dict request to be passed to Sheets API.
:type batch: bool
:param batch: If true, returns request for batching, else execute immediately.
"""
if batch:
return request
return self.batch_update(request)
def add(self, name='New Sheet', index=0, row_count=10000, column_count=10, batch=False):
"""Adds a new sheet of size row_count and column_count with the given
name and positioned at index.
:type name: str
:param name: Sheet name.
:type index: int
:param index: Sheet position.
:type row_count: int
:param row_count: Number of rows in the new sheet.
:type column_count: int
:param column_count: Number of columns in the new sheet.
:type batch: bool
:param batch: If true, returns request for batching, else execute immediately.
"""
if name in self.properties:
raise SheetAlreadyExists('A sheet with the name {} already exists.'.format(name))
request = {
'addSheet': {
'properties': {
'sheetId': generate_sheet_id(),
'title': name,
'index': index,
'sheetType': 'GRID',
'gridProperties': {
'rowCount': row_count,
'columnCount': column_count,
},
},
},
}
return self.execute(request, batch)
def delete(self, sheet_id, batch=False):
"""Deletes sheet by its sheet_id.
:type sheet_id: int
:param sheet_id: Sheet ID.
:type batch: bool
:param batch: If true, returns request for batching, else execute immediately.
"""
request = {
'deleteSheet': {
'sheetId': sheet_id,
},
}
return self.execute(request, batch)
def delete_by_name(self, sheet_name, batch=False):
"""Deletes sheet by its name."""
sheet_id = self.get_sheet_id(sheet_name)
return self.delete(sheet_id, batch=batch)
def rename(self, current_sheet_name, new_sheet_name, batch=False):
"""Renames a sheet name.
:type batch: bool
:param batch: If true, returns request for batching, else execute immediately.
"""
request = {
'updateSheetProperties': {
'properties': {
'sheetId': self.get_sheet_id(current_sheet_name),
'title': new_sheet_name,
},
'fields': 'title',
}
}
return self.execute(request, batch)
def reset(self, row_count=10000, column_count=10):
"""Removes all sheets and add a new blank sheet with the given
numbers of rows and columns.
"""
sheet_temp_name = uuid.uuid4().hex
self.batch_update(
self.add(sheet_temp_name, row_count=row_count, column_count=column_count, batch=True),
*(self.delete_by_name(title, batch=True) for title in self.properties),
)
self.rename(sheet_temp_name, 'Sheet1')
def batch_update(self, *requests):
body = { 'requests': requests }
try:
response = self.spreadsheets.batchUpdate(
spreadsheetId=self.spreadsheet_id,
body=body,
).execute()
except Exception as e:
logger.exception('EXECUTION_ERROR')
raise ExecutionError from e
else:
self.reload()
return response
def update_values(self, range, values, as_is=True):
"""Updates rows in range with the given values.
:type range: str
:param range: The A1 notation of the values to update.
:type values: list
:param values: Rows within the range.
"""
value_input_option = 'RAW' if as_is else 'USER_ENTERED'
body = { 'range': range, 'values': values }
return self.spreadsheets.values().update(
spreadsheetId=self.spreadsheet_id,
range=range,
valueInputOption=value_input_option,
body=body,
).execute()
def get_values(self, range):
"""Retrieves data in range.
:type range: str
:param range: The A1 notation of the values to retrieve.
:rtype: list
:returns: Rows within the range.
"""
response = self.spreadsheets.values().get(
spreadsheetId=self.spreadsheet_id,
range=range,
).execute()
return response.get('values', [])
def clear_values(self, sheet_name, batch=False):
"""Clear a sheet of all values while preserving formats.
:type sheet_name: str
:param sheet_name: Sheet name.
:type batch: bool
:param batch: If true, returns request for batching, else execute immediately.
"""
sheet_id = self.get_sheet_id(sheet_name)
request = {
'updateCells': {
'range': {
'sheetId': sheet_id,
},
'fields': 'userEnteredValue',
}
}
return self.execute(request, batch)
def extract(self, path, range):
rows = self.get_values(range=range)
write_csv(path, rows=rows)
| import time
import uuid
import logging
import functools
from autos.utils.csv import write_csv
from .service import Service
from .errors import SheetNotFound
from .errors import ExecutionError
from .errors import SheetAlreadyExists
from .errors import MissingSpreadsheetId
logger = logging.getLogger(__name__)
def generate_sheet_id():
"""Generate random sheet ID."""
return int(time.time())
class Sheets(Service):
"""Sheets API wrapper to perform common tasks.
Current API version: v4.
API Documentations:
- https://developers.google.com/sheets/reference/rest/v4/spreadsheets
- https://developers.google.com/sheets/guides/migration
"""
def __init__(
self,
scopes=['https://www.googleapis.com/auth/drive'],
):
super().__init__(
scopes=scopes,
api_name='sheets',
api_version='v4',
)
self._spreadsheet_id = None
self._metadata = {}
self._properties = {}
@property
def spreadsheets(self):
return self.service.spreadsheets()
@property
def spreadsheet_id(self):
if self._spreadsheet_id is not None:
return self._spreadsheet_id
else:
raise MissingSpreadsheetId('Please set spreadsheet_id.')
@spreadsheet_id.setter
def spreadsheet_id(self, value):
self._spreadsheet_id = value
self.reload()
def reload(self):
"""Refreshes sheets' metadata and properties."""
self.reload_metadata()
self.reload_properties()
@property
def metadata(self):
return self._metadata
def reload_metadata(self):
"""Refreshes sheets metadata."""
self._metadata = self.spreadsheets.get(
spreadsheetId=self.spreadsheet_id,
includeGridData=False,
).execute()
@property
def properties(self):
return self._properties
def reload_properties(self):
"""Refreshes sheets' properties."""
sheets = self.metadata.get('sheets', [])
self._properties = {sheet['properties']['title']: sheet['properties'] for sheet in sheets}
def get_sheet_id(self, sheet_name):
"""Maps sheet name to its id."""
try:
return self.properties[sheet_name]['sheetId']
except KeyError as e:
raise SheetNotFound('{} does not exist.'.format(sheet_name)) from e
def execute(self, request, batch):
"""Executes a request if batch is False, else return the request.
:type request: dict
:param request: Dict request to be passed to Sheets API.
:type batch: bool
:param batch: If true, returns request for batching, else execute immediately.
"""
if batch:
return request
return self.batch_update(request)
def add(self, name='New Sheet', index=0, row_count=10000, column_count=10, batch=False):
"""Adds a new sheet of size row_count and column_count with the given
name and positioned at index.
:type name: str
:param name: Sheet name.
:type index: int
:param index: Sheet position.
:type row_count: int
:param row_count: Number of rows in the new sheet.
:type column_count: int
:param column_count: Number of columns in the new sheet.
:type batch: bool
:param batch: If true, returns request for batching, else execute immediately.
"""
if name in self.properties:
raise SheetAlreadyExists('A sheet with the name {} already exists.'.format(name))
request = {
'addSheet': {
'properties': {
'sheetId': generate_sheet_id(),
'title': name,
'index': index,
'sheetType': 'GRID',
'gridProperties': {
'rowCount': row_count,
'columnCount': column_count,
},
},
},
}
return self.execute(request, batch)
def delete(self, sheet_id, batch=False):
"""Deletes sheet by its sheet_id.
:type sheet_id: int
:param sheet_id: Sheet ID.
:type batch: bool
:param batch: If true, returns request for batching, else execute immediately.
"""
request = {
'deleteSheet': {
'sheetId': sheet_id,
},
}
return self.execute(request, batch)
def delete_by_name(self, sheet_name, batch=False):
"""Deletes sheet by its name."""
sheet_id = self.get_sheet_id(sheet_name)
return self.delete(sheet_id, batch=batch)
def rename(self, current_sheet_name, new_sheet_name, batch=False):
"""Renames a sheet name.
:type batch: bool
:param batch: If true, returns request for batching, else execute immediately.
"""
request = {
'updateSheetProperties': {
'properties': {
'sheetId': self.get_sheet_id(current_sheet_name),
'title': new_sheet_name,
},
'fields': 'title',
}
}
return self.execute(request, batch)
def reset(self, row_count=10000, column_count=10):
"""Removes all sheets and add a new blank sheet with the given
numbers of rows and columns.
"""
sheet_temp_name = uuid.uuid4().hex
self.batch_update(
self.add(sheet_temp_name, row_count=row_count, column_count=column_count, batch=True),
*(self.delete_by_name(title, batch=True) for title in self.properties),
)
self.rename(sheet_temp_name, 'Sheet1')
def batch_update(self, *requests):
body = { 'requests': requests }
try:
response = self.spreadsheets.batchUpdate(
spreadsheetId=self.spreadsheet_id,
body=body,
).execute()
except Exception as e:
logger.exception('EXECUTION_ERROR')
raise ExecutionError from e
else:
self.reload()
return response
def update_values(self, range, values, as_is=True):
"""Updates rows in range with the given values.
:type range: str
:param range: The A1 notation of the values to update.
:type values: list
:param values: Rows within the range.
"""
value_input_option = 'RAW' if as_is else 'USER_ENTERED'
body = { 'range': range, 'values': values }
return self.spreadsheets.values().update(
spreadsheetId=self.spreadsheet_id,
range=range,
valueInputOption=value_input_option,
body=body,
).execute()
def get_values(self, range):
"""Retrieves data in range.
:type range: str
:param range: The A1 notation of the values to retrieve.
:rtype: list
:returns: Rows within the range.
"""
response = self.spreadsheets.values().get(
spreadsheetId=self.spreadsheet_id,
range=range,
).execute()
return response.get('values', [])
def clear_values(self, sheet_name, batch=False):
"""Clear a sheet of all values while preserving formats.
:type sheet_name: str
:param sheet_name: Sheet name.
:type batch: bool
:param batch: If true, returns request for batching, else execute immediately.
"""
sheet_id = self.get_sheet_id(sheet_name)
request = {
'updateCells': {
'range': {
'sheetId': sheet_id,
},
'fields': 'userEnteredValue',
}
}
return self.execute(request, batch)
def extract(self, path, range):
rows = self.get_values(range=range)
write_csv(path, rows=rows)
| en | 0.687012 | Generate random sheet ID. Sheets API wrapper to perform common tasks. Current API version: v4. API Documentations: - https://developers.google.com/sheets/reference/rest/v4/spreadsheets - https://developers.google.com/sheets/guides/migration Refreshes sheets' metadata and properties. Refreshes sheets metadata. Refreshes sheets' properties. Maps sheet name to its id. Executes a request if batch is False, else return the request. :type request: dict :param request: Dict request to be passed to Sheets API. :type batch: bool :param batch: If true, returns request for batching, else execute immediately. Adds a new sheet of size row_count and column_count with the given name and positioned at index. :type name: str :param name: Sheet name. :type index: int :param index: Sheet position. :type row_count: int :param row_count: Number of rows in the new sheet. :type column_count: int :param column_count: Number of columns in the new sheet. :type batch: bool :param batch: If true, returns request for batching, else execute immediately. Deletes sheet by its sheet_id. :type sheet_id: int :param sheet_id: Sheet ID. :type batch: bool :param batch: If true, returns request for batching, else execute immediately. Deletes sheet by its name. Renames a sheet name. :type batch: bool :param batch: If true, returns request for batching, else execute immediately. Removes all sheets and add a new blank sheet with the given numbers of rows and columns. Updates rows in range with the given values. :type range: str :param range: The A1 notation of the values to update. :type values: list :param values: Rows within the range. Retrieves data in range. :type range: str :param range: The A1 notation of the values to retrieve. :rtype: list :returns: Rows within the range. Clear a sheet of all values while preserving formats. :type sheet_name: str :param sheet_name: Sheet name. :type batch: bool :param batch: If true, returns request for batching, else execute immediately. | 2.700544 | 3 |
criterion.py | Holmes-Alan/Photo2Sketch | 0 | 6619169 | <filename>criterion.py
import torch
import torch.nn as nn
import torch.nn.functional as fnn
from torch.autograd import Variable
import numpy as np
def adaptive_instance_normalization(content_feat, style_feat):
assert (content_feat.size()[:2] == style_feat.size()[:2])
size = content_feat.size()
style_mean, style_std = calc_mean_std(style_feat)
content_mean, content_std = calc_mean_std(content_feat)
normalized_feat = (content_feat - content_mean.expand(
size)) / content_std.expand(size)
return normalized_feat * style_std.expand(size) + style_mean.expand(size)
def calc_mean_std(feat, eps=1e-5):
# eps is a small value added to the variance to avoid divide-by-zero.
size = feat.size()
assert (len(size) == 4)
N, C = size[:2]
feat_var = feat.view(N, C, -1).var(dim=2) + eps
feat_std = feat_var.sqrt().view(N, C, 1, 1)
feat_mean = feat.view(N, C, -1).mean(dim=2).view(N, C, 1, 1)
return feat_mean, feat_std
def mean_variance_norm(feat):
size = feat.size()
mean, std = calc_mean_std(feat)
normalized_feat = (feat - mean.expand(size)) / std.expand(size)
return normalized_feat
def mean_variance_norm_loss(feat1, feat2, device):
mean = torch.Tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1).to(device)
std = torch.Tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1).to(device)
size = feat1.size()
normalized_feat1 = (feat1 - mean.expand(size)) / std.expand(size)
normalized_feat2 = (feat2 - mean.expand(size)) / std.expand(size)
return normalized_feat1, normalized_feat2
def TV(x):
b, c, h_x, w_x = x.shape
h_tv = torch.mean(torch.abs(x[:,:,1:,:]-x[:,:,:h_x-1,:]))
w_tv = torch.mean(torch.abs(x[:,:,:,1:]-x[:,:,:,:w_x-1]))
return h_tv + w_tv
class styleLoss(nn.Module):
def forward(self,input,target):
ib,ic,ih,iw = input.size()
iF = input.view(ib,ic,-1)
iMean = torch.mean(iF,dim=2)
iCov = GramMatrix()(input)
tb,tc,th,tw = target.size()
tF = target.view(tb,tc,-1)
tMean = torch.mean(tF,dim=2)
tCov = GramMatrix()(target)
loss = nn.MSELoss(size_average=False)(iMean,tMean) + nn.MSELoss(size_average=False)(iCov,tCov)
return loss/tb
class styleLoss_v2(nn.Module):
def forward(self,input,target):
ib,ic,ih,iw = input.size()
mean_x, var_x = calc_mean_std(input)
iCov = GramMatrix()(input)
mean_y, var_y = calc_mean_std(target)
tCov = GramMatrix()(target)
loss = nn.MSELoss(size_average=True)(mean_x, mean_y) + nn.MSELoss(size_average=True)(var_x, var_y) + nn.MSELoss(size_average=True)(iCov, tCov)
return loss
class GramMatrix(nn.Module):
def forward(self,input):
b, c, h, w = input.size()
f = input.view(b,c,h*w) # bxcx(hxw)
# torch.bmm(batch1, batch2, out=None) #
# batch1: bxmxp, batch2: bxpxn -> bxmxn #
G = torch.bmm(f,f.transpose(1,2)) # f: bxcx(hxw), f.transpose: bx(hxw)xc -> bxcxc
return G.div_(c*h*w)
class LossCriterion(nn.Module):
def __init__(self,style_layers,content_layers,style_weight,content_weight):
super(LossCriterion,self).__init__()
self.style_layers = style_layers
self.content_layers = content_layers
self.style_weight = style_weight
self.content_weight = content_weight
self.styleLosses = [styleLoss()] * len(style_layers)
self.contentLosses = [nn.MSELoss()] * len(content_layers)
def forward(self, tF, sF, cF, KL):
# content loss
totalContentLoss = 0
for i,layer in enumerate(self.content_layers):
cf_i = cF[layer]
cf_i = cf_i.detach()
tf_i = tF[layer]
loss_i = self.contentLosses[i]
totalContentLoss += loss_i(tf_i,cf_i)
totalContentLoss = totalContentLoss * self.content_weight
# style loss
totalStyleLoss = 0
for i,layer in enumerate(self.style_layers):
sf_i = sF[layer]
sf_i = sf_i.detach()
tf_i = tF[layer]
loss_i = self.styleLosses[i]
totalStyleLoss += loss_i(tf_i,sf_i)
totalStyleLoss = totalStyleLoss * self.style_weight
# KL loss
KL = torch.sum(KL)
# laplacian loss
# Laploss = Lap_criterion(2*ori_content-1, 2*ori_style-1)
# total loss
loss = totalStyleLoss + totalContentLoss + KL
return loss, totalStyleLoss, totalContentLoss, KL
class LossCriterion_v2(nn.Module):
def __init__(self, style_weight, content_weight, device):
super(LossCriterion_v2, self).__init__()
self.style_weight = style_weight
self.content_weight = content_weight
self.L2_loss = nn.MSELoss().to(device)
def forward(self, tF, sF, cF):
# content loss
totalContentLoss = (self.L2_loss(tF.relu4_1, cF.relu4_1) +
self.L2_loss(tF.relu3_1, cF.relu3_1) +
self.L2_loss(tF.relu2_1, cF.relu2_1) +
self.L2_loss(tF.relu1_1, cF.relu1_1)) * self.content_weight
# style loss
totalStyleLoss = 0
# weight_list = [100, 30, 2, 1]
for ft_x, ft_s in zip(tF, sF):
mean_x, var_x = calc_mean_std(ft_x)
mean_style, var_style = calc_mean_std(ft_s)
# iCov = GramMatrix()(ft_x)
# tCov = GramMatrix()(ft_s)
totalStyleLoss = totalStyleLoss + self.L2_loss(mean_x, mean_style)
totalStyleLoss = totalStyleLoss + self.L2_loss(var_x, var_style)
# totalStyleLoss = totalStyleLoss + 100*self.L2_loss(iCov, tCov)
totalStyleLoss = totalStyleLoss * self.style_weight
# total loss
loss = totalStyleLoss + totalContentLoss
return loss, totalStyleLoss, totalContentLoss
class LossCriterion_v3(nn.Module):
def __init__(self, style_weight, content_weight, device):
super(LossCriterion_v3, self).__init__()
self.style_weight = style_weight
self.content_weight = content_weight
self.L2_loss = nn.MSELoss().to(device)
def forward(self, tF, sF, cF, KL):
# content loss
totalContentLoss = self.L2_loss(tF['r41'], cF['r41']) * self.content_weight
# style loss
totalStyleLoss = 0
weight_list = [100, 30, 2, 1]
mean_x, var_x = calc_mean_std(tF['r41'])
mean_style, var_style = calc_mean_std(sF['r41'])
totalStyleLoss = totalStyleLoss + weight_list[3] * self.L2_loss(mean_x, mean_style)
totalStyleLoss = totalStyleLoss + weight_list[3] * self.L2_loss(var_x, var_style)
mean_x, var_x = calc_mean_std(tF['r31'])
mean_style, var_style = calc_mean_std(sF['r31'])
totalStyleLoss = totalStyleLoss + weight_list[2] * self.L2_loss(mean_x, mean_style)
totalStyleLoss = totalStyleLoss + weight_list[2] * self.L2_loss(var_x, var_style)
mean_x, var_x = calc_mean_std(tF['r21'])
mean_style, var_style = calc_mean_std(sF['r21'])
totalStyleLoss = totalStyleLoss + weight_list[1] * self.L2_loss(mean_x, mean_style)
totalStyleLoss = totalStyleLoss + weight_list[1] * self.L2_loss(var_x, var_style)
mean_x, var_x = calc_mean_std(tF['r11'])
mean_style, var_style = calc_mean_std(sF['r11'])
totalStyleLoss = totalStyleLoss + weight_list[0] * self.L2_loss(mean_x, mean_style)
totalStyleLoss = totalStyleLoss + weight_list[0] * self.L2_loss(var_x, var_style)
totalStyleLoss = totalStyleLoss * self.style_weight
# KL loss
KL = torch.mean(KL)
# total loss
loss = totalStyleLoss + totalContentLoss + 1*KL
return loss, totalStyleLoss, totalContentLoss, KL
class LossCriterion_GAN(nn.Module):
def __init__(self,style_layers,content_layers,style_weight,content_weight):
super(LossCriterion_GAN,self).__init__()
self.style_layers = style_layers
self.content_layers = content_layers
self.style_weight = style_weight
self.content_weight = content_weight
self.styleLosses = [styleLoss()] * len(style_layers)
self.contentLosses = [nn.MSELoss()] * len(content_layers)
def forward(self, tF, sF, cF):
# content loss
totalContentLoss = 0
for i,layer in enumerate(self.content_layers):
cf_i = cF[layer]
cf_i = cf_i.detach()
tf_i = tF[layer]
loss_i = self.contentLosses[i]
totalContentLoss += loss_i(tf_i,cf_i)
totalContentLoss = totalContentLoss * self.content_weight
# style loss
totalStyleLoss = 0
for i,layer in enumerate(self.style_layers):
sf_i = sF[layer]
sf_i = sf_i.detach()
tf_i = tF[layer]
loss_i = self.styleLosses[i]
totalStyleLoss += loss_i(tf_i,sf_i)
totalStyleLoss = totalStyleLoss * self.style_weight
# laplacian loss
# Laploss = Lap_criterion(2*ori_content-1, 2*ori_style-1)
# total loss
loss = totalStyleLoss + totalContentLoss
return loss
class TVLoss(nn.Module):
def forward(self, x):
batch_size = x.size()[0]
h_x = x.size()[2]
w_x = x.size()[3]
count_h = self._tensor_size(x[:, :, 1:, :])
count_w = self._tensor_size(x[:, :, :, 1:])
h_tv = torch.pow((x[:, :, 1:, :] - x[:, :, :h_x - 1, :]), 2).sum()
w_tv = torch.pow((x[:, :, :, 1:] - x[:, :, :, :w_x - 1]), 2).sum()
return 2 * (h_tv / count_h + w_tv / count_w) / batch_size
def _tensor_size(self, t):
return t.size()[1] * t.size()[2] * t.size()[3]
def build_gauss_kernel(cuda, size=5, sigma=1.0, n_channels=1):
if size % 2 != 1:
raise ValueError("kernel size must be uneven")
grid = np.float32(np.mgrid[0:size, 0:size].T)
gaussian = lambda x: np.exp((x - size // 2) ** 2 / (-2 * sigma ** 2)) ** 2
kernel = np.sum(gaussian(grid), axis=2)
kernel /= np.sum(kernel)
# repeat same kernel across depth dimension
kernel = np.tile(kernel, (n_channels, 1, 1))
# conv weight should be (out_channels, groups/in_channels, h, w),
# and since we have depth-separable convolution we want the groups dimension to be 1
kernel = torch.FloatTensor(kernel[:, None, :, :])
kernel = kernel.to(cuda)
return Variable(kernel, requires_grad=False)
def conv_gauss(img, kernel):
""" convolve img with a gaussian kernel that has been built with build_gauss_kernel """
n_channels, _, kw, kh = kernel.shape
img = fnn.pad(img, (kw // 2, kh // 2, kw // 2, kh // 2), mode='replicate')
return fnn.conv2d(img, kernel, groups=n_channels)
def laplacian_pyramid(img, kernel, max_levels=5):
current = img
pyr = []
for level in range(max_levels):
filtered = conv_gauss(current, kernel)
diff = current - filtered
pyr.append(diff)
current = fnn.avg_pool2d(filtered, 2)
pyr.append(current)
return pyr
def down_pyramid(img, max_levels=5):
current = img
pyr = []
pyr.append(img)
for level in range(max_levels):
img = fnn.interpolate(img, mode='bilinear', scale_factor=0.5)
pyr.append(img)
pyr.append(current)
return pyr
class LapLoss(nn.Module):
def __init__(self, device, max_levels=5, k_size=5, sigma=2.0):
super(LapLoss, self).__init__()
self.max_levels = max_levels
self.k_size = k_size
self.sigma = sigma
self._gauss_kernel = None
self.device = device
def forward(self, input, target, reduce='mean'):
if self._gauss_kernel is None or self._gauss_kernel.shape[1] != input.shape[1]:
self._gauss_kernel = build_gauss_kernel(
cuda=self.device, size=self.k_size, sigma=self.sigma,
n_channels=input.shape[1]
)
pyr_input = laplacian_pyramid(input, self._gauss_kernel, self.max_levels)
pyr_target = laplacian_pyramid(target, self._gauss_kernel, self.max_levels)
if reduce is 'mean':
L1_loss = torch.nn.L1Loss(size_average=True)
return sum(L1_loss(a, b) for a, b in zip(pyr_input, pyr_target))
else:
L1_loss = torch.nn.L1Loss(size_average=False)
return sum(L1_loss(a, b) for a, b in zip(pyr_input, pyr_target))
# class LapLoss(nn.Module):
# def __init__(self, device, max_levels=5, k_size=5, sigma=2.0):
# super(LapLoss, self).__init__()
# self.max_levels = max_levels
# self.k_size = k_size
# self.sigma = sigma
# self._gauss_kernel = None
# self.device = device
#
#
# def forward(self, input, target, reduce='mean'):
# if self._gauss_kernel is None or self._gauss_kernel.shape[1] != input.shape[1]:
# self._gauss_kernel = build_gauss_kernel(
# cuda=self.device, size=self.k_size, sigma=self.sigma,
# n_channels=input.shape[1]
# )
# pyr_input = down_pyramid(input, self.max_levels)
# pyr_target = laplacian_pyramid(target, self._gauss_kernel, self.max_levels)
# if reduce is 'mean':
# L1_loss = torch.nn.L1Loss(size_average=True)
# return sum(L1_loss(a, b) for a, b in zip(pyr_input, pyr_target))
# else:
# L1_loss = torch.nn.L1Loss(size_average=False)
# return sum(L1_loss(a, b) for a, b in zip(pyr_input, pyr_target))
class LapMap(nn.Module):
def __init__(self, max_levels=5, k_size=5, sigma=2.0):
super(LapMap, self).__init__()
self.max_levels = max_levels
self.k_size = k_size
self.sigma = sigma
self._gauss_kernel = None
def forward(self, input):
if self._gauss_kernel is None or self._gauss_kernel.shape[1] != input.shape[1]:
self._gauss_kernel = build_gauss_kernel(
size=self.k_size, sigma=self.sigma,
n_channels=input.shape[1], cuda=input.is_cuda
)
pyr_input = laplacian_pyramid(input, self._gauss_kernel, self.max_levels)
return pyr_input | <filename>criterion.py
import torch
import torch.nn as nn
import torch.nn.functional as fnn
from torch.autograd import Variable
import numpy as np
def adaptive_instance_normalization(content_feat, style_feat):
assert (content_feat.size()[:2] == style_feat.size()[:2])
size = content_feat.size()
style_mean, style_std = calc_mean_std(style_feat)
content_mean, content_std = calc_mean_std(content_feat)
normalized_feat = (content_feat - content_mean.expand(
size)) / content_std.expand(size)
return normalized_feat * style_std.expand(size) + style_mean.expand(size)
def calc_mean_std(feat, eps=1e-5):
# eps is a small value added to the variance to avoid divide-by-zero.
size = feat.size()
assert (len(size) == 4)
N, C = size[:2]
feat_var = feat.view(N, C, -1).var(dim=2) + eps
feat_std = feat_var.sqrt().view(N, C, 1, 1)
feat_mean = feat.view(N, C, -1).mean(dim=2).view(N, C, 1, 1)
return feat_mean, feat_std
def mean_variance_norm(feat):
size = feat.size()
mean, std = calc_mean_std(feat)
normalized_feat = (feat - mean.expand(size)) / std.expand(size)
return normalized_feat
def mean_variance_norm_loss(feat1, feat2, device):
mean = torch.Tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1).to(device)
std = torch.Tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1).to(device)
size = feat1.size()
normalized_feat1 = (feat1 - mean.expand(size)) / std.expand(size)
normalized_feat2 = (feat2 - mean.expand(size)) / std.expand(size)
return normalized_feat1, normalized_feat2
def TV(x):
b, c, h_x, w_x = x.shape
h_tv = torch.mean(torch.abs(x[:,:,1:,:]-x[:,:,:h_x-1,:]))
w_tv = torch.mean(torch.abs(x[:,:,:,1:]-x[:,:,:,:w_x-1]))
return h_tv + w_tv
class styleLoss(nn.Module):
def forward(self,input,target):
ib,ic,ih,iw = input.size()
iF = input.view(ib,ic,-1)
iMean = torch.mean(iF,dim=2)
iCov = GramMatrix()(input)
tb,tc,th,tw = target.size()
tF = target.view(tb,tc,-1)
tMean = torch.mean(tF,dim=2)
tCov = GramMatrix()(target)
loss = nn.MSELoss(size_average=False)(iMean,tMean) + nn.MSELoss(size_average=False)(iCov,tCov)
return loss/tb
class styleLoss_v2(nn.Module):
def forward(self,input,target):
ib,ic,ih,iw = input.size()
mean_x, var_x = calc_mean_std(input)
iCov = GramMatrix()(input)
mean_y, var_y = calc_mean_std(target)
tCov = GramMatrix()(target)
loss = nn.MSELoss(size_average=True)(mean_x, mean_y) + nn.MSELoss(size_average=True)(var_x, var_y) + nn.MSELoss(size_average=True)(iCov, tCov)
return loss
class GramMatrix(nn.Module):
def forward(self,input):
b, c, h, w = input.size()
f = input.view(b,c,h*w) # bxcx(hxw)
# torch.bmm(batch1, batch2, out=None) #
# batch1: bxmxp, batch2: bxpxn -> bxmxn #
G = torch.bmm(f,f.transpose(1,2)) # f: bxcx(hxw), f.transpose: bx(hxw)xc -> bxcxc
return G.div_(c*h*w)
class LossCriterion(nn.Module):
def __init__(self,style_layers,content_layers,style_weight,content_weight):
super(LossCriterion,self).__init__()
self.style_layers = style_layers
self.content_layers = content_layers
self.style_weight = style_weight
self.content_weight = content_weight
self.styleLosses = [styleLoss()] * len(style_layers)
self.contentLosses = [nn.MSELoss()] * len(content_layers)
def forward(self, tF, sF, cF, KL):
# content loss
totalContentLoss = 0
for i,layer in enumerate(self.content_layers):
cf_i = cF[layer]
cf_i = cf_i.detach()
tf_i = tF[layer]
loss_i = self.contentLosses[i]
totalContentLoss += loss_i(tf_i,cf_i)
totalContentLoss = totalContentLoss * self.content_weight
# style loss
totalStyleLoss = 0
for i,layer in enumerate(self.style_layers):
sf_i = sF[layer]
sf_i = sf_i.detach()
tf_i = tF[layer]
loss_i = self.styleLosses[i]
totalStyleLoss += loss_i(tf_i,sf_i)
totalStyleLoss = totalStyleLoss * self.style_weight
# KL loss
KL = torch.sum(KL)
# laplacian loss
# Laploss = Lap_criterion(2*ori_content-1, 2*ori_style-1)
# total loss
loss = totalStyleLoss + totalContentLoss + KL
return loss, totalStyleLoss, totalContentLoss, KL
class LossCriterion_v2(nn.Module):
def __init__(self, style_weight, content_weight, device):
super(LossCriterion_v2, self).__init__()
self.style_weight = style_weight
self.content_weight = content_weight
self.L2_loss = nn.MSELoss().to(device)
def forward(self, tF, sF, cF):
# content loss
totalContentLoss = (self.L2_loss(tF.relu4_1, cF.relu4_1) +
self.L2_loss(tF.relu3_1, cF.relu3_1) +
self.L2_loss(tF.relu2_1, cF.relu2_1) +
self.L2_loss(tF.relu1_1, cF.relu1_1)) * self.content_weight
# style loss
totalStyleLoss = 0
# weight_list = [100, 30, 2, 1]
for ft_x, ft_s in zip(tF, sF):
mean_x, var_x = calc_mean_std(ft_x)
mean_style, var_style = calc_mean_std(ft_s)
# iCov = GramMatrix()(ft_x)
# tCov = GramMatrix()(ft_s)
totalStyleLoss = totalStyleLoss + self.L2_loss(mean_x, mean_style)
totalStyleLoss = totalStyleLoss + self.L2_loss(var_x, var_style)
# totalStyleLoss = totalStyleLoss + 100*self.L2_loss(iCov, tCov)
totalStyleLoss = totalStyleLoss * self.style_weight
# total loss
loss = totalStyleLoss + totalContentLoss
return loss, totalStyleLoss, totalContentLoss
class LossCriterion_v3(nn.Module):
def __init__(self, style_weight, content_weight, device):
super(LossCriterion_v3, self).__init__()
self.style_weight = style_weight
self.content_weight = content_weight
self.L2_loss = nn.MSELoss().to(device)
def forward(self, tF, sF, cF, KL):
# content loss
totalContentLoss = self.L2_loss(tF['r41'], cF['r41']) * self.content_weight
# style loss
totalStyleLoss = 0
weight_list = [100, 30, 2, 1]
mean_x, var_x = calc_mean_std(tF['r41'])
mean_style, var_style = calc_mean_std(sF['r41'])
totalStyleLoss = totalStyleLoss + weight_list[3] * self.L2_loss(mean_x, mean_style)
totalStyleLoss = totalStyleLoss + weight_list[3] * self.L2_loss(var_x, var_style)
mean_x, var_x = calc_mean_std(tF['r31'])
mean_style, var_style = calc_mean_std(sF['r31'])
totalStyleLoss = totalStyleLoss + weight_list[2] * self.L2_loss(mean_x, mean_style)
totalStyleLoss = totalStyleLoss + weight_list[2] * self.L2_loss(var_x, var_style)
mean_x, var_x = calc_mean_std(tF['r21'])
mean_style, var_style = calc_mean_std(sF['r21'])
totalStyleLoss = totalStyleLoss + weight_list[1] * self.L2_loss(mean_x, mean_style)
totalStyleLoss = totalStyleLoss + weight_list[1] * self.L2_loss(var_x, var_style)
mean_x, var_x = calc_mean_std(tF['r11'])
mean_style, var_style = calc_mean_std(sF['r11'])
totalStyleLoss = totalStyleLoss + weight_list[0] * self.L2_loss(mean_x, mean_style)
totalStyleLoss = totalStyleLoss + weight_list[0] * self.L2_loss(var_x, var_style)
totalStyleLoss = totalStyleLoss * self.style_weight
# KL loss
KL = torch.mean(KL)
# total loss
loss = totalStyleLoss + totalContentLoss + 1*KL
return loss, totalStyleLoss, totalContentLoss, KL
class LossCriterion_GAN(nn.Module):
def __init__(self,style_layers,content_layers,style_weight,content_weight):
super(LossCriterion_GAN,self).__init__()
self.style_layers = style_layers
self.content_layers = content_layers
self.style_weight = style_weight
self.content_weight = content_weight
self.styleLosses = [styleLoss()] * len(style_layers)
self.contentLosses = [nn.MSELoss()] * len(content_layers)
def forward(self, tF, sF, cF):
# content loss
totalContentLoss = 0
for i,layer in enumerate(self.content_layers):
cf_i = cF[layer]
cf_i = cf_i.detach()
tf_i = tF[layer]
loss_i = self.contentLosses[i]
totalContentLoss += loss_i(tf_i,cf_i)
totalContentLoss = totalContentLoss * self.content_weight
# style loss
totalStyleLoss = 0
for i,layer in enumerate(self.style_layers):
sf_i = sF[layer]
sf_i = sf_i.detach()
tf_i = tF[layer]
loss_i = self.styleLosses[i]
totalStyleLoss += loss_i(tf_i,sf_i)
totalStyleLoss = totalStyleLoss * self.style_weight
# laplacian loss
# Laploss = Lap_criterion(2*ori_content-1, 2*ori_style-1)
# total loss
loss = totalStyleLoss + totalContentLoss
return loss
class TVLoss(nn.Module):
def forward(self, x):
batch_size = x.size()[0]
h_x = x.size()[2]
w_x = x.size()[3]
count_h = self._tensor_size(x[:, :, 1:, :])
count_w = self._tensor_size(x[:, :, :, 1:])
h_tv = torch.pow((x[:, :, 1:, :] - x[:, :, :h_x - 1, :]), 2).sum()
w_tv = torch.pow((x[:, :, :, 1:] - x[:, :, :, :w_x - 1]), 2).sum()
return 2 * (h_tv / count_h + w_tv / count_w) / batch_size
def _tensor_size(self, t):
return t.size()[1] * t.size()[2] * t.size()[3]
def build_gauss_kernel(cuda, size=5, sigma=1.0, n_channels=1):
if size % 2 != 1:
raise ValueError("kernel size must be uneven")
grid = np.float32(np.mgrid[0:size, 0:size].T)
gaussian = lambda x: np.exp((x - size // 2) ** 2 / (-2 * sigma ** 2)) ** 2
kernel = np.sum(gaussian(grid), axis=2)
kernel /= np.sum(kernel)
# repeat same kernel across depth dimension
kernel = np.tile(kernel, (n_channels, 1, 1))
# conv weight should be (out_channels, groups/in_channels, h, w),
# and since we have depth-separable convolution we want the groups dimension to be 1
kernel = torch.FloatTensor(kernel[:, None, :, :])
kernel = kernel.to(cuda)
return Variable(kernel, requires_grad=False)
def conv_gauss(img, kernel):
""" convolve img with a gaussian kernel that has been built with build_gauss_kernel """
n_channels, _, kw, kh = kernel.shape
img = fnn.pad(img, (kw // 2, kh // 2, kw // 2, kh // 2), mode='replicate')
return fnn.conv2d(img, kernel, groups=n_channels)
def laplacian_pyramid(img, kernel, max_levels=5):
current = img
pyr = []
for level in range(max_levels):
filtered = conv_gauss(current, kernel)
diff = current - filtered
pyr.append(diff)
current = fnn.avg_pool2d(filtered, 2)
pyr.append(current)
return pyr
def down_pyramid(img, max_levels=5):
current = img
pyr = []
pyr.append(img)
for level in range(max_levels):
img = fnn.interpolate(img, mode='bilinear', scale_factor=0.5)
pyr.append(img)
pyr.append(current)
return pyr
class LapLoss(nn.Module):
def __init__(self, device, max_levels=5, k_size=5, sigma=2.0):
super(LapLoss, self).__init__()
self.max_levels = max_levels
self.k_size = k_size
self.sigma = sigma
self._gauss_kernel = None
self.device = device
def forward(self, input, target, reduce='mean'):
if self._gauss_kernel is None or self._gauss_kernel.shape[1] != input.shape[1]:
self._gauss_kernel = build_gauss_kernel(
cuda=self.device, size=self.k_size, sigma=self.sigma,
n_channels=input.shape[1]
)
pyr_input = laplacian_pyramid(input, self._gauss_kernel, self.max_levels)
pyr_target = laplacian_pyramid(target, self._gauss_kernel, self.max_levels)
if reduce is 'mean':
L1_loss = torch.nn.L1Loss(size_average=True)
return sum(L1_loss(a, b) for a, b in zip(pyr_input, pyr_target))
else:
L1_loss = torch.nn.L1Loss(size_average=False)
return sum(L1_loss(a, b) for a, b in zip(pyr_input, pyr_target))
# class LapLoss(nn.Module):
# def __init__(self, device, max_levels=5, k_size=5, sigma=2.0):
# super(LapLoss, self).__init__()
# self.max_levels = max_levels
# self.k_size = k_size
# self.sigma = sigma
# self._gauss_kernel = None
# self.device = device
#
#
# def forward(self, input, target, reduce='mean'):
# if self._gauss_kernel is None or self._gauss_kernel.shape[1] != input.shape[1]:
# self._gauss_kernel = build_gauss_kernel(
# cuda=self.device, size=self.k_size, sigma=self.sigma,
# n_channels=input.shape[1]
# )
# pyr_input = down_pyramid(input, self.max_levels)
# pyr_target = laplacian_pyramid(target, self._gauss_kernel, self.max_levels)
# if reduce is 'mean':
# L1_loss = torch.nn.L1Loss(size_average=True)
# return sum(L1_loss(a, b) for a, b in zip(pyr_input, pyr_target))
# else:
# L1_loss = torch.nn.L1Loss(size_average=False)
# return sum(L1_loss(a, b) for a, b in zip(pyr_input, pyr_target))
class LapMap(nn.Module):
def __init__(self, max_levels=5, k_size=5, sigma=2.0):
super(LapMap, self).__init__()
self.max_levels = max_levels
self.k_size = k_size
self.sigma = sigma
self._gauss_kernel = None
def forward(self, input):
if self._gauss_kernel is None or self._gauss_kernel.shape[1] != input.shape[1]:
self._gauss_kernel = build_gauss_kernel(
size=self.k_size, sigma=self.sigma,
n_channels=input.shape[1], cuda=input.is_cuda
)
pyr_input = laplacian_pyramid(input, self._gauss_kernel, self.max_levels)
return pyr_input | en | 0.606673 | # eps is a small value added to the variance to avoid divide-by-zero. # bxcx(hxw) # torch.bmm(batch1, batch2, out=None) # # batch1: bxmxp, batch2: bxpxn -> bxmxn # # f: bxcx(hxw), f.transpose: bx(hxw)xc -> bxcxc # content loss # style loss # KL loss # laplacian loss # Laploss = Lap_criterion(2*ori_content-1, 2*ori_style-1) # total loss # content loss # style loss # weight_list = [100, 30, 2, 1] # iCov = GramMatrix()(ft_x) # tCov = GramMatrix()(ft_s) # totalStyleLoss = totalStyleLoss + 100*self.L2_loss(iCov, tCov) # total loss # content loss # style loss # KL loss # total loss # content loss # style loss # laplacian loss # Laploss = Lap_criterion(2*ori_content-1, 2*ori_style-1) # total loss # repeat same kernel across depth dimension # conv weight should be (out_channels, groups/in_channels, h, w), # and since we have depth-separable convolution we want the groups dimension to be 1 convolve img with a gaussian kernel that has been built with build_gauss_kernel # class LapLoss(nn.Module): # def __init__(self, device, max_levels=5, k_size=5, sigma=2.0): # super(LapLoss, self).__init__() # self.max_levels = max_levels # self.k_size = k_size # self.sigma = sigma # self._gauss_kernel = None # self.device = device # # # def forward(self, input, target, reduce='mean'): # if self._gauss_kernel is None or self._gauss_kernel.shape[1] != input.shape[1]: # self._gauss_kernel = build_gauss_kernel( # cuda=self.device, size=self.k_size, sigma=self.sigma, # n_channels=input.shape[1] # ) # pyr_input = down_pyramid(input, self.max_levels) # pyr_target = laplacian_pyramid(target, self._gauss_kernel, self.max_levels) # if reduce is 'mean': # L1_loss = torch.nn.L1Loss(size_average=True) # return sum(L1_loss(a, b) for a, b in zip(pyr_input, pyr_target)) # else: # L1_loss = torch.nn.L1Loss(size_average=False) # return sum(L1_loss(a, b) for a, b in zip(pyr_input, pyr_target)) | 2.480128 | 2 |
elementary/between-markers.py | vargad/exercises | 1 | 6619170 | #!/usr/bin/env python3
def between_markers(text: str, begin: str, end: str) -> str:
b=text.find(begin)
e=text.find(end)
b=0 if b==-1 else b+len(begin)
return text[b:] if e==-1 else text[b:e]
if __name__ == '__main__':
print(between_markers("What is >love<", ">", "<"))
print(between_markers("<body><h1>My Little Phony</h1></body>", "<h1>", "</h1>"))
assert between_markers("What is >love<", ">", "<") == "love"
assert between_markers("<body><h1>My Little Phony</h1></body>", "<h1>", "</h1>") == "My Little Phony"
assert between_markers("<body><h1>My Little Phony", "<h1>", "</h1>") == "My Little Phony"
assert between_markers("My Little Phony", "<h1>", "</h1>") == "My Little Phony"
assert between_markers("What is <love>", ">", "<") == ""
| #!/usr/bin/env python3
def between_markers(text: str, begin: str, end: str) -> str:
b=text.find(begin)
e=text.find(end)
b=0 if b==-1 else b+len(begin)
return text[b:] if e==-1 else text[b:e]
if __name__ == '__main__':
print(between_markers("What is >love<", ">", "<"))
print(between_markers("<body><h1>My Little Phony</h1></body>", "<h1>", "</h1>"))
assert between_markers("What is >love<", ">", "<") == "love"
assert between_markers("<body><h1>My Little Phony</h1></body>", "<h1>", "</h1>") == "My Little Phony"
assert between_markers("<body><h1>My Little Phony", "<h1>", "</h1>") == "My Little Phony"
assert between_markers("My Little Phony", "<h1>", "</h1>") == "My Little Phony"
assert between_markers("What is <love>", ">", "<") == ""
| fr | 0.221828 | #!/usr/bin/env python3 | 3.723789 | 4 |
tests/transports/debug_tests.py | ko101/softlayer-python | 0 | 6619171 | <filename>tests/transports/debug_tests.py<gh_stars>0
"""
SoftLayer.tests.transports.debug
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:license: MIT, see LICENSE for more details.
"""
import requests
from unittest import mock as mock
import SoftLayer
from SoftLayer import testing
from SoftLayer import transports
class TestDebugTransport(testing.TestCase):
def set_up(self):
fixture_transport = transports.FixtureTransport()
self.transport = transports.DebugTransport(fixture_transport)
req = transports.Request()
req.service = 'SoftLayer_Account'
req.method = 'getObject'
self.req = req
def test_call(self):
resp = self.transport(self.req)
self.assertEqual(resp['accountId'], 1234)
def test_get_last_calls(self):
resp = self.transport(self.req)
self.assertEqual(resp['accountId'], 1234)
calls = self.transport.get_last_calls()
self.assertEqual(calls[0].service, 'SoftLayer_Account')
def test_print_reproduceable(self):
req = transports.Request()
req.service = 'SoftLayer_Account'
req.method = 'getObject'
output_text = self.transport.print_reproduceable(self.req)
self.assertEqual('SoftLayer_Account', output_text)
def test_print_reproduceable_post(self):
req = transports.Request()
req.url = "https://test.com"
req.payload = "testing"
req.transport_headers = {"test-headers": 'aaaa'}
req.args = 'createObject'
rest_transport = transports.RestTransport()
transport = transports.DebugTransport(rest_transport)
output_text = transport.print_reproduceable(req)
self.assertIn("https://test.com", output_text)
self.assertIn("-X POST", output_text)
@mock.patch('SoftLayer.transports.rest.requests.Session.request')
def test_error(self, request):
# Test JSON Error
e = requests.HTTPError('error')
e.response = mock.MagicMock()
e.response.status_code = 404
e.response.text = '''{
"error": "description",
"code": "Error Code"
}'''
request().raise_for_status.side_effect = e
req = transports.Request()
req.service = 'SoftLayer_Service'
req.method = 'Resource'
rest_transport = transports.RestTransport()
transport = transports.DebugTransport(rest_transport)
self.assertRaises(SoftLayer.SoftLayerAPIError, transport, req)
calls = transport.get_last_calls()
self.assertEqual(404, calls[0].exception.faultCode)
| <filename>tests/transports/debug_tests.py<gh_stars>0
"""
SoftLayer.tests.transports.debug
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:license: MIT, see LICENSE for more details.
"""
import requests
from unittest import mock as mock
import SoftLayer
from SoftLayer import testing
from SoftLayer import transports
class TestDebugTransport(testing.TestCase):
def set_up(self):
fixture_transport = transports.FixtureTransport()
self.transport = transports.DebugTransport(fixture_transport)
req = transports.Request()
req.service = 'SoftLayer_Account'
req.method = 'getObject'
self.req = req
def test_call(self):
resp = self.transport(self.req)
self.assertEqual(resp['accountId'], 1234)
def test_get_last_calls(self):
resp = self.transport(self.req)
self.assertEqual(resp['accountId'], 1234)
calls = self.transport.get_last_calls()
self.assertEqual(calls[0].service, 'SoftLayer_Account')
def test_print_reproduceable(self):
req = transports.Request()
req.service = 'SoftLayer_Account'
req.method = 'getObject'
output_text = self.transport.print_reproduceable(self.req)
self.assertEqual('SoftLayer_Account', output_text)
def test_print_reproduceable_post(self):
req = transports.Request()
req.url = "https://test.com"
req.payload = "testing"
req.transport_headers = {"test-headers": 'aaaa'}
req.args = 'createObject'
rest_transport = transports.RestTransport()
transport = transports.DebugTransport(rest_transport)
output_text = transport.print_reproduceable(req)
self.assertIn("https://test.com", output_text)
self.assertIn("-X POST", output_text)
@mock.patch('SoftLayer.transports.rest.requests.Session.request')
def test_error(self, request):
# Test JSON Error
e = requests.HTTPError('error')
e.response = mock.MagicMock()
e.response.status_code = 404
e.response.text = '''{
"error": "description",
"code": "Error Code"
}'''
request().raise_for_status.side_effect = e
req = transports.Request()
req.service = 'SoftLayer_Service'
req.method = 'Resource'
rest_transport = transports.RestTransport()
transport = transports.DebugTransport(rest_transport)
self.assertRaises(SoftLayer.SoftLayerAPIError, transport, req)
calls = transport.get_last_calls()
self.assertEqual(404, calls[0].exception.faultCode)
| en | 0.285649 | SoftLayer.tests.transports.debug ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ :license: MIT, see LICENSE for more details. # Test JSON Error { "error": "description", "code": "Error Code" } | 2.367875 | 2 |
mani/scheduler.py | sherinkurian/mani | 58 | 6619172 |
import gc
import logging
import os
import pytz
import signal
import socket
import time
from datetime import datetime
from .job import Job
from . import util
log = logging.getLogger(__name__)
class Scheduler:
DEFAULT_CONFIG = {
"timeout": 60,
"heartbeat_key": "mani:heartbeat",
"timezone": pytz.utc
}
TRAPPED_SIGNALS = (
signal.SIGINT,
signal.SIGTERM,
signal.SIGQUIT
)
def __init__(self, redis, config = {}):
self.jobs = {}
self.redis = redis
self.host = socket.gethostname()
self.pid = os.getpid()
self.running = False
self.stopped = False
self.config = self.DEFAULT_CONFIG.copy()
self.config.update(config)
def add_job(self, period, at, job_func):
name = job_func.__name__
if name in self.jobs:
raise "duplicate job %s" % name
job = Job(name, period, at, job_func, self.redis, self.config)
self.jobs[name] = job
def start(self):
self.running = True
self.trap_signals()
while True:
if self.stopped: break
now = self.now()
jobs = self.jobs_to_run(now)
for job in jobs:
job.run(now)
self.heartbeat(now)
if self.stopped: break
self.sleep_until_next_second()
log.info("stopped")
def jobs_to_run(self, now):
return filter(lambda j: j.ready_to_run(now), self.jobs.values())
def heartbeat(self, now):
ts = util.to_timestamp(now)
self.redis.hset(self.config["heartbeat_key"], self.heartbeat_field(), ts)
def heartbeat_field(self):
return "%s##%s" % (self.host, self.pid)
def now(self):
return datetime.utcnow().replace(tzinfo=pytz.utc)
def trap_signals(self):
try:
for sig in self.TRAPPED_SIGNALS:
signal.signal(sig, self.stop)
except ValueError: # for tests to pass (since it runs on a thread)
log.warning("could not add handlers for trapping signals")
def stop(self, _signal=None, _frame=None):
self.stopped = True
def sleep_until_next_second(self):
# process gets hot otherwise
gc.collect()
now = self.now()
sleeptime = 1.0 - (now.microsecond / 1000000.0)
time.sleep(sleeptime)
|
import gc
import logging
import os
import pytz
import signal
import socket
import time
from datetime import datetime
from .job import Job
from . import util
log = logging.getLogger(__name__)
class Scheduler:
DEFAULT_CONFIG = {
"timeout": 60,
"heartbeat_key": "mani:heartbeat",
"timezone": pytz.utc
}
TRAPPED_SIGNALS = (
signal.SIGINT,
signal.SIGTERM,
signal.SIGQUIT
)
def __init__(self, redis, config = {}):
self.jobs = {}
self.redis = redis
self.host = socket.gethostname()
self.pid = os.getpid()
self.running = False
self.stopped = False
self.config = self.DEFAULT_CONFIG.copy()
self.config.update(config)
def add_job(self, period, at, job_func):
name = job_func.__name__
if name in self.jobs:
raise "duplicate job %s" % name
job = Job(name, period, at, job_func, self.redis, self.config)
self.jobs[name] = job
def start(self):
self.running = True
self.trap_signals()
while True:
if self.stopped: break
now = self.now()
jobs = self.jobs_to_run(now)
for job in jobs:
job.run(now)
self.heartbeat(now)
if self.stopped: break
self.sleep_until_next_second()
log.info("stopped")
def jobs_to_run(self, now):
return filter(lambda j: j.ready_to_run(now), self.jobs.values())
def heartbeat(self, now):
ts = util.to_timestamp(now)
self.redis.hset(self.config["heartbeat_key"], self.heartbeat_field(), ts)
def heartbeat_field(self):
return "%s##%s" % (self.host, self.pid)
def now(self):
return datetime.utcnow().replace(tzinfo=pytz.utc)
def trap_signals(self):
try:
for sig in self.TRAPPED_SIGNALS:
signal.signal(sig, self.stop)
except ValueError: # for tests to pass (since it runs on a thread)
log.warning("could not add handlers for trapping signals")
def stop(self, _signal=None, _frame=None):
self.stopped = True
def sleep_until_next_second(self):
# process gets hot otherwise
gc.collect()
now = self.now()
sleeptime = 1.0 - (now.microsecond / 1000000.0)
time.sleep(sleeptime)
| en | 0.739779 | ##%s" % (self.host, self.pid) # for tests to pass (since it runs on a thread) # process gets hot otherwise | 2.445038 | 2 |
src/qa/__init__.py | honeydev/Junior | 21 | 6619173 | from src.qa.models import *
| from src.qa.models import *
| none | 1 | 1.130152 | 1 | |
Python/pyworkout/objects/ex38_mod1.py | honchardev/Fun | 0 | 6619174 | import pprint
class Beverage(object):
def __init__(
self,
name: str,
temperature: float
) -> None:
self.name = name
self.temperature = temperature
def __repr__(
self
) -> str:
return f"{self.__class__} {id(self)=} {self.name=} {self.temperature=}"
def main():
names = ['bev1', 'bev2', 'bev3']
temps = [38.5, 10.5, -3.2]
beverages = [
Beverage(name, temperature)
for name, temperature in zip(names, temps)
]
pprint.pprint(beverages)
if __name__ == '__main__':
main()
| import pprint
class Beverage(object):
def __init__(
self,
name: str,
temperature: float
) -> None:
self.name = name
self.temperature = temperature
def __repr__(
self
) -> str:
return f"{self.__class__} {id(self)=} {self.name=} {self.temperature=}"
def main():
names = ['bev1', 'bev2', 'bev3']
temps = [38.5, 10.5, -3.2]
beverages = [
Beverage(name, temperature)
for name, temperature in zip(names, temps)
]
pprint.pprint(beverages)
if __name__ == '__main__':
main()
| none | 1 | 3.394543 | 3 | |
Python/problem0003.py | 1050669722/LeetCode-Answers | 0 | 6619175 | <filename>Python/problem0003.py<gh_stars>0
# # -*- coding: utf-8 -*-
# """
# Created on Sun May 12 17:25:45 2019
# @author: Administrator
# """
# import time
# time1 = time.perf_counter()
# #class Solution():
# # def lengthOfLongestSubstring(self, s):
# # length = len(s)
# ## if length == 0:
# ## return 0
# ## elif length == 1:
# ## return 1
# ## else:
# # for n in range(length,-1,-1):
# # a = []
# # for p in range(0,length-n+1):
# # a.append(s[p:p+n])
# # for m in range(len(a)):
# # b = []
# # c = {}
# # for k in list(a[m]):
# # if k not in b:
# # b.append(k)
# # c[k] = 1
# # else:
# # c[k] += 1
# ## if 1 not in c.values():
# # count = 0
# # for value in c.values():
# # if value != 1:
# # count += 1
# # if count != 0:
# # continue
# # else:
# # return len(a[m]) #a[m]
# #class Solution:
# # def lengthOfLongestSubstring(self, s):
# # """
# # :type s: str
# # :rtype: int
# # """
# # st = {}
# # i, ans = 0, 0
# # for j in range(len(s)):
# # if s[j] in st:
# # i = max(st[s[j]], i) #上一个被重复字母的位置,以1开头
# # ans = max(ans, (j + 1) - i) #这种相减是可行的,因为i是从1开始的
# # st[s[j]] = (j + 1) #s[j]的位置更新,以1开头
# # return ans
# class Solution():
# def lengthOfLongestSubstring(self, s): #两个位置,当前字母的最新位置,被重复字母的最新位置
# st = {}
# i, ans = 0, 0
# # d = {}
# for j in range(len(s)): #一边计长度,一边更新最大长度值
# if s[j] in st.keys():
# # print(s[j])
# # print([st[s[j]],i])
# i = max(st[s[j]], i)#st[s[j]]#
# ans = max(ans, (j+1)-i)
# # d['head'] = i
# # d['tail'] = j+1
# st[s[j]] = (j+1)
# return ans
# solu = Solution()
# s = 'abcabcbb'
# s = 'bbbbb'
# s = 'pwwkew'
# s = ''
# s = ' '
# s = 'c'
# s = 'au'
# s = "kwssiouw"#fydhihvgjuejmzbudeybgigseylmohjtgodovyxgubphcrbfxcjfkpxqpkfdsqz"
# print(solu.lengthOfLongestSubstring(s))
# time2 = time.perf_counter()
# print(time2-time1)
class Solution:
def lengthOfLongestSubstring(self, s: str) -> int:
if len(s) <= 1:
return s
if len(set(s)) == 1:
return 1
p, q = 0, 0
count = 0
ans = 0
while q <= len(s)-1:
if self.fun(s[p:q+1]):
print(1, p, q)
ans = max(ans, q-p)
q += 1
else:
print(2, p, q)
ind = s[p:q+1].index(s[q]) + count
count = len(s[p:q+1])
p = ind + 1
ans = max(ans, q-p)
q += 1
ans = max(ans, q-p)
return ans
def fun(self, s):
if len(s) == len(set(s)):
return True
else:
return False
solu = Solution()
s = "abcabcbb"
# s = "bbbbb"
# # s = "pwwkew"
# # s = ''
# # s = 's'
print(solu.lengthOfLongestSubstring(s))
| <filename>Python/problem0003.py<gh_stars>0
# # -*- coding: utf-8 -*-
# """
# Created on Sun May 12 17:25:45 2019
# @author: Administrator
# """
# import time
# time1 = time.perf_counter()
# #class Solution():
# # def lengthOfLongestSubstring(self, s):
# # length = len(s)
# ## if length == 0:
# ## return 0
# ## elif length == 1:
# ## return 1
# ## else:
# # for n in range(length,-1,-1):
# # a = []
# # for p in range(0,length-n+1):
# # a.append(s[p:p+n])
# # for m in range(len(a)):
# # b = []
# # c = {}
# # for k in list(a[m]):
# # if k not in b:
# # b.append(k)
# # c[k] = 1
# # else:
# # c[k] += 1
# ## if 1 not in c.values():
# # count = 0
# # for value in c.values():
# # if value != 1:
# # count += 1
# # if count != 0:
# # continue
# # else:
# # return len(a[m]) #a[m]
# #class Solution:
# # def lengthOfLongestSubstring(self, s):
# # """
# # :type s: str
# # :rtype: int
# # """
# # st = {}
# # i, ans = 0, 0
# # for j in range(len(s)):
# # if s[j] in st:
# # i = max(st[s[j]], i) #上一个被重复字母的位置,以1开头
# # ans = max(ans, (j + 1) - i) #这种相减是可行的,因为i是从1开始的
# # st[s[j]] = (j + 1) #s[j]的位置更新,以1开头
# # return ans
# class Solution():
# def lengthOfLongestSubstring(self, s): #两个位置,当前字母的最新位置,被重复字母的最新位置
# st = {}
# i, ans = 0, 0
# # d = {}
# for j in range(len(s)): #一边计长度,一边更新最大长度值
# if s[j] in st.keys():
# # print(s[j])
# # print([st[s[j]],i])
# i = max(st[s[j]], i)#st[s[j]]#
# ans = max(ans, (j+1)-i)
# # d['head'] = i
# # d['tail'] = j+1
# st[s[j]] = (j+1)
# return ans
# solu = Solution()
# s = 'abcabcbb'
# s = 'bbbbb'
# s = 'pwwkew'
# s = ''
# s = ' '
# s = 'c'
# s = 'au'
# s = "kwssiouw"#fydhihvgjuejmzbudeybgigseylmohjtgodovyxgubphcrbfxcjfkpxqpkfdsqz"
# print(solu.lengthOfLongestSubstring(s))
# time2 = time.perf_counter()
# print(time2-time1)
class Solution:
def lengthOfLongestSubstring(self, s: str) -> int:
if len(s) <= 1:
return s
if len(set(s)) == 1:
return 1
p, q = 0, 0
count = 0
ans = 0
while q <= len(s)-1:
if self.fun(s[p:q+1]):
print(1, p, q)
ans = max(ans, q-p)
q += 1
else:
print(2, p, q)
ind = s[p:q+1].index(s[q]) + count
count = len(s[p:q+1])
p = ind + 1
ans = max(ans, q-p)
q += 1
ans = max(ans, q-p)
return ans
def fun(self, s):
if len(s) == len(set(s)):
return True
else:
return False
solu = Solution()
s = "abcabcbb"
# s = "bbbbb"
# # s = "pwwkew"
# # s = ''
# # s = 's'
print(solu.lengthOfLongestSubstring(s))
| en | 0.389478 | # # -*- coding: utf-8 -*- # """ # Created on Sun May 12 17:25:45 2019 # @author: Administrator # """ # import time # time1 = time.perf_counter() # #class Solution(): # # def lengthOfLongestSubstring(self, s): # # length = len(s) # ## if length == 0: # ## return 0 # ## elif length == 1: # ## return 1 # ## else: # # for n in range(length,-1,-1): # # a = [] # # for p in range(0,length-n+1): # # a.append(s[p:p+n]) # # for m in range(len(a)): # # b = [] # # c = {} # # for k in list(a[m]): # # if k not in b: # # b.append(k) # # c[k] = 1 # # else: # # c[k] += 1 # ## if 1 not in c.values(): # # count = 0 # # for value in c.values(): # # if value != 1: # # count += 1 # # if count != 0: # # continue # # else: # # return len(a[m]) #a[m] # #class Solution: # # def lengthOfLongestSubstring(self, s): # # """ # # :type s: str # # :rtype: int # # """ # # st = {} # # i, ans = 0, 0 # # for j in range(len(s)): # # if s[j] in st: # # i = max(st[s[j]], i) #上一个被重复字母的位置,以1开头 # # ans = max(ans, (j + 1) - i) #这种相减是可行的,因为i是从1开始的 # # st[s[j]] = (j + 1) #s[j]的位置更新,以1开头 # # return ans # class Solution(): # def lengthOfLongestSubstring(self, s): #两个位置,当前字母的最新位置,被重复字母的最新位置 # st = {} # i, ans = 0, 0 # # d = {} # for j in range(len(s)): #一边计长度,一边更新最大长度值 # if s[j] in st.keys(): # # print(s[j]) # # print([st[s[j]],i]) # i = max(st[s[j]], i)#st[s[j]]# # ans = max(ans, (j+1)-i) # # d['head'] = i # # d['tail'] = j+1 # st[s[j]] = (j+1) # return ans # solu = Solution() # s = 'abcabcbb' # s = 'bbbbb' # s = 'pwwkew' # s = '' # s = ' ' # s = 'c' # s = 'au' # s = "kwssiouw"#fydhihvgjuejmzbudeybgigseylmohjtgodovyxgubphcrbfxcjfkpxqpkfdsqz" # print(solu.lengthOfLongestSubstring(s)) # time2 = time.perf_counter() # print(time2-time1) # s = "bbbbb" # # s = "pwwkew" # # s = '' # # s = 's' | 3.161957 | 3 |
src/spring/azext_spring/_constant.py | Caoxuyang/azure-cli-extensions | 0 | 6619176 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=wrong-import-order
# pylint: disable=unused-argument, logging-format-interpolation, protected-access, wrong-import-order, too-many-lines
MARKETPLACE_OFFER_ID = 'azure-spring-cloud-vmware-tanzu-2'
MARKETPLACE_PUBLISHER_ID = 'vmware-inc'
MARKETPLACE_PLAN_ID = 'asa-ent-hr-mtr'
| # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=wrong-import-order
# pylint: disable=unused-argument, logging-format-interpolation, protected-access, wrong-import-order, too-many-lines
MARKETPLACE_OFFER_ID = 'azure-spring-cloud-vmware-tanzu-2'
MARKETPLACE_PUBLISHER_ID = 'vmware-inc'
MARKETPLACE_PLAN_ID = 'asa-ent-hr-mtr'
| en | 0.471385 | # -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- # pylint: disable=wrong-import-order # pylint: disable=unused-argument, logging-format-interpolation, protected-access, wrong-import-order, too-many-lines | 1.483902 | 1 |
haplocopy/hmm.py | msfuji/haplocopy | 0 | 6619177 | <reponame>msfuji/haplocopy
import numpy as np
from scipy.special import logsumexp
class HMM:
r"""Position-dependent hidden Markov model.
Parameters
----------
states : _HMMStateModel
initial_prob : array, shape (n_states)
Attributes
----------
"""
def __init__(self, states, initial_prob):
self.states = states
self.initial_prob = initial_prob
def _check_obs_seq(self, obs_seq):
if type(obs_seq) != np.ndarray:
raise ValueError("obs_seq must be a numpy.ndarray object")
if self.states.n_features == 1:
if obs_seq.ndim != 1:
raise ValueError("Number of columns in obs_seq differs from n_features")
if self.states.n_features > 1:
if obs_seq.ndim != 2 or obs_seq.shape[1] != self.states.n_features:
raise ValueError("Number of columns in obs_seq differs from n_features")
def _naive_viterbi(self, obs_seq):
r"""Compute Viterbi path for an observed sequence. Numerically
unstable because neither log transformation nor scaling is performed.
Only for debugging purpose.
Parameters
----------
obs_seq : array, shape (n_obs, n_features)
Observed sequence. The first dimension corresponds to the temporal
order of observations.
Returns
-------
likelihood : float
Probability of the state_seq, P(obs_seq, state_seq|theta)
state_seq : array, shape (n_obs)
State sequence by a ML estimate.
"""
self._check_obs_seq(obs_seq)
n_obs = obs_seq.shape[0]
prob = np.empty((n_obs, self.states.n_states))
ptr = np.empty((n_obs, self.states.n_states), dtype=int)
em = self.states.get_emission_prob(0, obs_seq[0])
prob[0, :] = self.initial_prob * em
for pos in range(1, n_obs):
tr = self.states.get_transition_prob(pos - 1)
em = self.states.get_emission_prob(pos, obs_seq[pos])
prob_before_max = prob[pos - 1, :, np.newaxis] * tr
prob[pos, :] = np.max(prob_before_max, axis=0) * em
ptr[pos, :] = np.argmax(prob_before_max, axis=0)
# backtrack
state_seq = np.empty(n_obs, dtype=int)
state_seq[n_obs - 1] = np.argmax(prob[n_obs - 1, :])
likelihood = np.max(prob[n_obs - 1, :])
for pos in range(n_obs - 1, 0, -1):
current_state = state_seq[pos]
prev_state = ptr[pos, current_state]
state_seq[pos - 1] = prev_state
return likelihood, state_seq
def viterbi(self, obs_seq):
r"""Compute Viterbi path for an observed sequence in the log space.
Parameters
----------
obs_seq : array, shape (n_obs, n_features)
Observed sequence. The first dimension corresponds to the temporal
order of observations.
Returns
-------
log_likelihood : float
Log likelihood of the state_seq, log P(obs_seq, state_seq|theta)
state_seq : array, shape (n_obs)
State sequence by a ML estimate.
"""
self._check_obs_seq(obs_seq)
n_obs = obs_seq.shape[0]
logp = np.empty((n_obs, self.states.n_states))
ptr = np.empty((n_obs, self.states.n_states), dtype=int)
em = self.states.get_emission_prob(0, obs_seq[0])
logp[0, :] = np.log(self.initial_prob) + np.log(em)
for pos in range(1, n_obs):
tr = self.states.get_transition_prob(pos - 1)
em = self.states.get_emission_prob(pos, obs_seq[pos])
logp_before_max = logp[pos - 1, :, np.newaxis] + np.log(tr)
logp[pos, :] = np.max(logp_before_max, axis=0) + np.log(em)
ptr[pos, :] = np.argmax(logp_before_max, axis=0)
# backtrack
state_seq = np.empty(n_obs, dtype=int)
state_seq[n_obs - 1] = np.argmax(logp[n_obs - 1, :])
log_likelihood = np.max(logp[n_obs - 1, :])
for pos in range(n_obs - 1, 0, -1):
current_state = state_seq[pos]
prev_state = ptr[pos, current_state]
state_seq[pos - 1] = prev_state
return log_likelihood, state_seq
def _naive_forward(self, obs_seq):
r"""Compute marginal likelihood for an observed sequence using the
forward algorithm. Numerically unstable because neither log
transformation nor scaling is performed. Only for debugging purpose.
Parameters
----------
obs_seq : array, shape (n_obs, n_features)
Observed sequence. The first dimension corresponds to the temporal
order of observations.
Returns
-------
marginal_likelihood : float
Marginal likelihood for the observed sequence, P(obs_seq|theta).
prob : array, shape (n_obs, n_states)
Probability matrix.
"""
self._check_obs_seq(obs_seq)
n_obs = obs_seq.shape[0]
prob = np.empty((n_obs, self.states.n_states))
em = self.states.get_emission_prob(0, obs_seq[0])
prob[0, :] = self.initial_prob * em
for pos in range(1, n_obs):
tr = self.states.get_transition_prob(pos - 1)
em = self.states.get_emission_prob(pos, obs_seq[pos])
prob_before_sum = prob[pos - 1, :, np.newaxis] * tr
prob[pos, :] = np.sum(prob_before_sum, axis=0) * em
marginal_likelihood = np.sum(prob[n_obs - 1, :])
return marginal_likelihood, prob
def forward(self, obs_seq):
r"""Compute marginal likelihood for an observed sequence using the
forward algorithm. Use the logsumexp method for numerically stability.
Parameters
----------
obs_seq : array, shape (n_obs, n_features)
Observed sequence. The first dimension corresponds to the temporal
order of observations.
Returns
-------
log_marginal_likelihood : float
Log marginal likelihood for the observed sequence,
log P(obs_seq|theta).
prob : array, shape (n_obs, n_states)
Log probability matrix.
"""
self._check_obs_seq(obs_seq)
n_obs = obs_seq.shape[0]
logp = np.empty((n_obs, self.states.n_states))
em = self.states.get_emission_prob(0, obs_seq[0])
logp[0, :] = np.log(self.initial_prob) + np.log(em)
for pos in range(1, n_obs):
tr = self.states.get_transition_prob(pos - 1)
em = self.states.get_emission_prob(pos, obs_seq[pos])
logp_before_sum = logp[pos - 1, :, np.newaxis] + np.log(tr)
logp[pos, :] = logsumexp(logp_before_sum, axis=0) + np.log(em)
log_marginal_likelihood = logsumexp(logp[n_obs - 1, :])
return log_marginal_likelihood, logp
| import numpy as np
from scipy.special import logsumexp
class HMM:
r"""Position-dependent hidden Markov model.
Parameters
----------
states : _HMMStateModel
initial_prob : array, shape (n_states)
Attributes
----------
"""
def __init__(self, states, initial_prob):
self.states = states
self.initial_prob = initial_prob
def _check_obs_seq(self, obs_seq):
if type(obs_seq) != np.ndarray:
raise ValueError("obs_seq must be a numpy.ndarray object")
if self.states.n_features == 1:
if obs_seq.ndim != 1:
raise ValueError("Number of columns in obs_seq differs from n_features")
if self.states.n_features > 1:
if obs_seq.ndim != 2 or obs_seq.shape[1] != self.states.n_features:
raise ValueError("Number of columns in obs_seq differs from n_features")
def _naive_viterbi(self, obs_seq):
r"""Compute Viterbi path for an observed sequence. Numerically
unstable because neither log transformation nor scaling is performed.
Only for debugging purpose.
Parameters
----------
obs_seq : array, shape (n_obs, n_features)
Observed sequence. The first dimension corresponds to the temporal
order of observations.
Returns
-------
likelihood : float
Probability of the state_seq, P(obs_seq, state_seq|theta)
state_seq : array, shape (n_obs)
State sequence by a ML estimate.
"""
self._check_obs_seq(obs_seq)
n_obs = obs_seq.shape[0]
prob = np.empty((n_obs, self.states.n_states))
ptr = np.empty((n_obs, self.states.n_states), dtype=int)
em = self.states.get_emission_prob(0, obs_seq[0])
prob[0, :] = self.initial_prob * em
for pos in range(1, n_obs):
tr = self.states.get_transition_prob(pos - 1)
em = self.states.get_emission_prob(pos, obs_seq[pos])
prob_before_max = prob[pos - 1, :, np.newaxis] * tr
prob[pos, :] = np.max(prob_before_max, axis=0) * em
ptr[pos, :] = np.argmax(prob_before_max, axis=0)
# backtrack
state_seq = np.empty(n_obs, dtype=int)
state_seq[n_obs - 1] = np.argmax(prob[n_obs - 1, :])
likelihood = np.max(prob[n_obs - 1, :])
for pos in range(n_obs - 1, 0, -1):
current_state = state_seq[pos]
prev_state = ptr[pos, current_state]
state_seq[pos - 1] = prev_state
return likelihood, state_seq
def viterbi(self, obs_seq):
r"""Compute Viterbi path for an observed sequence in the log space.
Parameters
----------
obs_seq : array, shape (n_obs, n_features)
Observed sequence. The first dimension corresponds to the temporal
order of observations.
Returns
-------
log_likelihood : float
Log likelihood of the state_seq, log P(obs_seq, state_seq|theta)
state_seq : array, shape (n_obs)
State sequence by a ML estimate.
"""
self._check_obs_seq(obs_seq)
n_obs = obs_seq.shape[0]
logp = np.empty((n_obs, self.states.n_states))
ptr = np.empty((n_obs, self.states.n_states), dtype=int)
em = self.states.get_emission_prob(0, obs_seq[0])
logp[0, :] = np.log(self.initial_prob) + np.log(em)
for pos in range(1, n_obs):
tr = self.states.get_transition_prob(pos - 1)
em = self.states.get_emission_prob(pos, obs_seq[pos])
logp_before_max = logp[pos - 1, :, np.newaxis] + np.log(tr)
logp[pos, :] = np.max(logp_before_max, axis=0) + np.log(em)
ptr[pos, :] = np.argmax(logp_before_max, axis=0)
# backtrack
state_seq = np.empty(n_obs, dtype=int)
state_seq[n_obs - 1] = np.argmax(logp[n_obs - 1, :])
log_likelihood = np.max(logp[n_obs - 1, :])
for pos in range(n_obs - 1, 0, -1):
current_state = state_seq[pos]
prev_state = ptr[pos, current_state]
state_seq[pos - 1] = prev_state
return log_likelihood, state_seq
def _naive_forward(self, obs_seq):
r"""Compute marginal likelihood for an observed sequence using the
forward algorithm. Numerically unstable because neither log
transformation nor scaling is performed. Only for debugging purpose.
Parameters
----------
obs_seq : array, shape (n_obs, n_features)
Observed sequence. The first dimension corresponds to the temporal
order of observations.
Returns
-------
marginal_likelihood : float
Marginal likelihood for the observed sequence, P(obs_seq|theta).
prob : array, shape (n_obs, n_states)
Probability matrix.
"""
self._check_obs_seq(obs_seq)
n_obs = obs_seq.shape[0]
prob = np.empty((n_obs, self.states.n_states))
em = self.states.get_emission_prob(0, obs_seq[0])
prob[0, :] = self.initial_prob * em
for pos in range(1, n_obs):
tr = self.states.get_transition_prob(pos - 1)
em = self.states.get_emission_prob(pos, obs_seq[pos])
prob_before_sum = prob[pos - 1, :, np.newaxis] * tr
prob[pos, :] = np.sum(prob_before_sum, axis=0) * em
marginal_likelihood = np.sum(prob[n_obs - 1, :])
return marginal_likelihood, prob
def forward(self, obs_seq):
r"""Compute marginal likelihood for an observed sequence using the
forward algorithm. Use the logsumexp method for numerically stability.
Parameters
----------
obs_seq : array, shape (n_obs, n_features)
Observed sequence. The first dimension corresponds to the temporal
order of observations.
Returns
-------
log_marginal_likelihood : float
Log marginal likelihood for the observed sequence,
log P(obs_seq|theta).
prob : array, shape (n_obs, n_states)
Log probability matrix.
"""
self._check_obs_seq(obs_seq)
n_obs = obs_seq.shape[0]
logp = np.empty((n_obs, self.states.n_states))
em = self.states.get_emission_prob(0, obs_seq[0])
logp[0, :] = np.log(self.initial_prob) + np.log(em)
for pos in range(1, n_obs):
tr = self.states.get_transition_prob(pos - 1)
em = self.states.get_emission_prob(pos, obs_seq[pos])
logp_before_sum = logp[pos - 1, :, np.newaxis] + np.log(tr)
logp[pos, :] = logsumexp(logp_before_sum, axis=0) + np.log(em)
log_marginal_likelihood = logsumexp(logp[n_obs - 1, :])
return log_marginal_likelihood, logp | en | 0.678961 | Position-dependent hidden Markov model. Parameters ---------- states : _HMMStateModel initial_prob : array, shape (n_states) Attributes ---------- Compute Viterbi path for an observed sequence. Numerically unstable because neither log transformation nor scaling is performed. Only for debugging purpose. Parameters ---------- obs_seq : array, shape (n_obs, n_features) Observed sequence. The first dimension corresponds to the temporal order of observations. Returns ------- likelihood : float Probability of the state_seq, P(obs_seq, state_seq|theta) state_seq : array, shape (n_obs) State sequence by a ML estimate. # backtrack Compute Viterbi path for an observed sequence in the log space. Parameters ---------- obs_seq : array, shape (n_obs, n_features) Observed sequence. The first dimension corresponds to the temporal order of observations. Returns ------- log_likelihood : float Log likelihood of the state_seq, log P(obs_seq, state_seq|theta) state_seq : array, shape (n_obs) State sequence by a ML estimate. # backtrack Compute marginal likelihood for an observed sequence using the forward algorithm. Numerically unstable because neither log transformation nor scaling is performed. Only for debugging purpose. Parameters ---------- obs_seq : array, shape (n_obs, n_features) Observed sequence. The first dimension corresponds to the temporal order of observations. Returns ------- marginal_likelihood : float Marginal likelihood for the observed sequence, P(obs_seq|theta). prob : array, shape (n_obs, n_states) Probability matrix. Compute marginal likelihood for an observed sequence using the forward algorithm. Use the logsumexp method for numerically stability. Parameters ---------- obs_seq : array, shape (n_obs, n_features) Observed sequence. The first dimension corresponds to the temporal order of observations. Returns ------- log_marginal_likelihood : float Log marginal likelihood for the observed sequence, log P(obs_seq|theta). prob : array, shape (n_obs, n_states) Log probability matrix. | 2.438358 | 2 |
tirageAuSort.py | kevinhassan/electionRandomAccess | 0 | 6619178 | <reponame>kevinhassan/electionRandomAccess<filename>tirageAuSort.py
#This function extract the list of students after xls opening
def extractStudents(filename):
"""
Pre: The list in xls file is not empty
Post: All students are extract from file
Returns students list
"""
list = []
try:
# open Excel file
wb = xlrd.open_workbook(str(filename))
except IOError:
print ("Oops! No file "+filename+ " has been found !")
else:
sh = wb.sheet_by_name(wb.sheet_names()[0])
for rownum in range(1,sh.nrows):#1 to remove title line
student = sh.row_values(rownum)
list.append(student)
return list
import sys,getopt,random,xlrd
def main(argv):
filename = ''
n = -1
student = []
try:
options, remainder = getopt.getopt(sys.argv[1:], 'f:n:', ['--file=','--number='])
except getopt.GetoptError:
print (sys.argv[0] + ' -f <filename> -n <numberOfName>')
sys.exit(2)
else:
for opt, arg in options:
if opt == '-h':
print (sys.argv[0] + '-f <filename> -n <numberOfName>')
sys.exit()
elif opt in ("-f", "--file"):
filename = str(arg)
elif opt in ("-n", "--number"):
n = int(arg)
if filename!='' and n!=-1:
students = extractStudents(filename)
if (len(students)<n):
print('No need to launch program because you have only '+str(n)+' students')
sys.exit()
else:
i = len(students)-n
while i < len(students): #Get students to student & Remove n students
k = len(students)-1
l = random.randint(0,k)
student.append(students[l])#add student selected
del students[l]#Remove this student from the list
print("Les candidats pour les elections sont : ")
for candidat in student:
print (candidat[0], candidat[1])
else:
print('error occured')
sys.exit()
if __name__ == "__main__":
main(sys.argv[1:])
| #This function extract the list of students after xls opening
def extractStudents(filename):
"""
Pre: The list in xls file is not empty
Post: All students are extract from file
Returns students list
"""
list = []
try:
# open Excel file
wb = xlrd.open_workbook(str(filename))
except IOError:
print ("Oops! No file "+filename+ " has been found !")
else:
sh = wb.sheet_by_name(wb.sheet_names()[0])
for rownum in range(1,sh.nrows):#1 to remove title line
student = sh.row_values(rownum)
list.append(student)
return list
import sys,getopt,random,xlrd
def main(argv):
filename = ''
n = -1
student = []
try:
options, remainder = getopt.getopt(sys.argv[1:], 'f:n:', ['--file=','--number='])
except getopt.GetoptError:
print (sys.argv[0] + ' -f <filename> -n <numberOfName>')
sys.exit(2)
else:
for opt, arg in options:
if opt == '-h':
print (sys.argv[0] + '-f <filename> -n <numberOfName>')
sys.exit()
elif opt in ("-f", "--file"):
filename = str(arg)
elif opt in ("-n", "--number"):
n = int(arg)
if filename!='' and n!=-1:
students = extractStudents(filename)
if (len(students)<n):
print('No need to launch program because you have only '+str(n)+' students')
sys.exit()
else:
i = len(students)-n
while i < len(students): #Get students to student & Remove n students
k = len(students)-1
l = random.randint(0,k)
student.append(students[l])#add student selected
del students[l]#Remove this student from the list
print("Les candidats pour les elections sont : ")
for candidat in student:
print (candidat[0], candidat[1])
else:
print('error occured')
sys.exit()
if __name__ == "__main__":
main(sys.argv[1:]) | en | 0.894157 | #This function extract the list of students after xls opening Pre: The list in xls file is not empty
Post: All students are extract from file
Returns students list # open Excel file #1 to remove title line #Get students to student & Remove n students #add student selected #Remove this student from the list | 3.616473 | 4 |
structures/heap.py | exterkamps/Python-Data-Structures | 3 | 6619179 | <filename>structures/heap.py
class Heap():
def __init__(self):
self.heap_list = [0]
def insert(self, value: int):
self.heap_list.append(value)
self.percolate(self.size())
def percolate(self, i):
while i // 2 > 0:
parent = i // 2
if self.heap_list[i] < self.heap_list[parent]:
self.heap_list[i], self.heap_list[parent] = self.heap_list[parent], self.heap_list[i]
i = i // 2
def sift(self, i):
while (i * 2) <= self.size():
mc_i = self.find_min_child_index(i)
if self.heap_list[i] > self.heap_list[mc_i]:
self.heap_list[i], self.heap_list[mc_i] = self.heap_list[mc_i], self.heap_list[i]
i = mc_i
def find_min_child_index(self, i):
if (i * 2) > self.size():
return None
if (i * 2) + 1 > self.size():
return i * 2
else:
if self.heap_list[i * 2] < self.heap_list[i * 2 + 1]:
return i * 2
else:
return i * 2 + 1
def min(self):
if len(self.heap_list) > 1:
return self.heap_list[1]
else:
return None
def delete_min(self):
if self.size() == 0:
return None
if self.size() == 1:
return self.heap_list.pop()
min_val = self.heap_list[1]
self.heap_list[1] = self.heap_list.pop()
self.sift(1)
return min_val
def build(self, lst:list):
i = len(lst) // 2
self.heap_list = [0] + lst
while i > 0:
self.sift(i)
i -= 1
def size(self):
return len(self.heap_list) - 1
| <filename>structures/heap.py
class Heap():
def __init__(self):
self.heap_list = [0]
def insert(self, value: int):
self.heap_list.append(value)
self.percolate(self.size())
def percolate(self, i):
while i // 2 > 0:
parent = i // 2
if self.heap_list[i] < self.heap_list[parent]:
self.heap_list[i], self.heap_list[parent] = self.heap_list[parent], self.heap_list[i]
i = i // 2
def sift(self, i):
while (i * 2) <= self.size():
mc_i = self.find_min_child_index(i)
if self.heap_list[i] > self.heap_list[mc_i]:
self.heap_list[i], self.heap_list[mc_i] = self.heap_list[mc_i], self.heap_list[i]
i = mc_i
def find_min_child_index(self, i):
if (i * 2) > self.size():
return None
if (i * 2) + 1 > self.size():
return i * 2
else:
if self.heap_list[i * 2] < self.heap_list[i * 2 + 1]:
return i * 2
else:
return i * 2 + 1
def min(self):
if len(self.heap_list) > 1:
return self.heap_list[1]
else:
return None
def delete_min(self):
if self.size() == 0:
return None
if self.size() == 1:
return self.heap_list.pop()
min_val = self.heap_list[1]
self.heap_list[1] = self.heap_list.pop()
self.sift(1)
return min_val
def build(self, lst:list):
i = len(lst) // 2
self.heap_list = [0] + lst
while i > 0:
self.sift(i)
i -= 1
def size(self):
return len(self.heap_list) - 1
| none | 1 | 3.83747 | 4 | |
icevision/models/mmdet/models/sparse_rcnn/backbones/resnet_fpn.py | ai-fast-track/mantisshrimp | 580 | 6619180 | <filename>icevision/models/mmdet/models/sparse_rcnn/backbones/resnet_fpn.py
__all__ = [
"resnet50_fpn_1x",
"resnet50_fpn_mstrain_480_800_3x",
"resnet50_fpn_300_proposals_crop_mstrain_480_800_3x",
"resnet101_fpn_mstrain_480_800_3x_coco",
"resnet101_fpn_300_proposals_crop_mstrain_480_800_3x",
]
from icevision.imports import *
from icevision.models.mmdet.utils import *
class MMDetSparseRCNNBackboneConfig(MMDetBackboneConfig):
def __init__(self, **kwargs):
super().__init__(model_name="sparse_rcnn", **kwargs)
base_config_path = mmdet_configs_path / "sparse_rcnn"
base_weights_url = "https://download.openmmlab.com/mmdetection/v2.0/sparse_rcnn"
resnet50_fpn_1x = MMDetSparseRCNNBackboneConfig(
config_path=base_config_path / "sparse_rcnn_r50_fpn_1x_coco.py",
weights_url=f"{base_weights_url}/sparse_rcnn_r50_fpn_1x_coco/sparse_rcnn_r50_fpn_1x_coco_20201222_214453-dc79b137.pth",
)
resnet50_fpn_mstrain_480_800_3x = MMDetSparseRCNNBackboneConfig(
config_path=base_config_path / "sparse_rcnn_r50_fpn_mstrain_480-800_3x_coco.py",
weights_url=f"{base_weights_url}/sparse_rcnn_r50_fpn_mstrain_480-800_3x_coco/sparse_rcnn_r50_fpn_mstrain_480-800_3x_coco_20201218_154234-7bc5c054.pth",
)
resnet50_fpn_300_proposals_crop_mstrain_480_800_3x = MMDetSparseRCNNBackboneConfig(
config_path=base_config_path
/ "sparse_rcnn_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py",
weights_url=f"{base_weights_url}/sparse_rcnn_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco/sparse_rcnn_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco_20201223_024605-9fe92701.pth",
)
resnet101_fpn_mstrain_480_800_3x_coco = MMDetSparseRCNNBackboneConfig(
config_path=base_config_path / "sparse_rcnn_r101_fpn_mstrain_480-800_3x_coco.py",
weights_url=f"{base_weights_url}/sparse_rcnn_r101_fpn_mstrain_480-800_3x_coco/sparse_rcnn_r101_fpn_mstrain_480-800_3x_coco_20201223_121552-6c46c9d6.pth",
)
resnet101_fpn_300_proposals_crop_mstrain_480_800_3x = MMDetSparseRCNNBackboneConfig(
config_path=base_config_path
/ "sparse_rcnn_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py",
weights_url=f"{base_weights_url}/sparse_rcnn_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco/sparse_rcnn_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco_20201223_023452-c23c3564.pth",
)
| <filename>icevision/models/mmdet/models/sparse_rcnn/backbones/resnet_fpn.py
__all__ = [
"resnet50_fpn_1x",
"resnet50_fpn_mstrain_480_800_3x",
"resnet50_fpn_300_proposals_crop_mstrain_480_800_3x",
"resnet101_fpn_mstrain_480_800_3x_coco",
"resnet101_fpn_300_proposals_crop_mstrain_480_800_3x",
]
from icevision.imports import *
from icevision.models.mmdet.utils import *
class MMDetSparseRCNNBackboneConfig(MMDetBackboneConfig):
def __init__(self, **kwargs):
super().__init__(model_name="sparse_rcnn", **kwargs)
base_config_path = mmdet_configs_path / "sparse_rcnn"
base_weights_url = "https://download.openmmlab.com/mmdetection/v2.0/sparse_rcnn"
resnet50_fpn_1x = MMDetSparseRCNNBackboneConfig(
config_path=base_config_path / "sparse_rcnn_r50_fpn_1x_coco.py",
weights_url=f"{base_weights_url}/sparse_rcnn_r50_fpn_1x_coco/sparse_rcnn_r50_fpn_1x_coco_20201222_214453-dc79b137.pth",
)
resnet50_fpn_mstrain_480_800_3x = MMDetSparseRCNNBackboneConfig(
config_path=base_config_path / "sparse_rcnn_r50_fpn_mstrain_480-800_3x_coco.py",
weights_url=f"{base_weights_url}/sparse_rcnn_r50_fpn_mstrain_480-800_3x_coco/sparse_rcnn_r50_fpn_mstrain_480-800_3x_coco_20201218_154234-7bc5c054.pth",
)
resnet50_fpn_300_proposals_crop_mstrain_480_800_3x = MMDetSparseRCNNBackboneConfig(
config_path=base_config_path
/ "sparse_rcnn_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py",
weights_url=f"{base_weights_url}/sparse_rcnn_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco/sparse_rcnn_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco_20201223_024605-9fe92701.pth",
)
resnet101_fpn_mstrain_480_800_3x_coco = MMDetSparseRCNNBackboneConfig(
config_path=base_config_path / "sparse_rcnn_r101_fpn_mstrain_480-800_3x_coco.py",
weights_url=f"{base_weights_url}/sparse_rcnn_r101_fpn_mstrain_480-800_3x_coco/sparse_rcnn_r101_fpn_mstrain_480-800_3x_coco_20201223_121552-6c46c9d6.pth",
)
resnet101_fpn_300_proposals_crop_mstrain_480_800_3x = MMDetSparseRCNNBackboneConfig(
config_path=base_config_path
/ "sparse_rcnn_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py",
weights_url=f"{base_weights_url}/sparse_rcnn_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco/sparse_rcnn_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco_20201223_023452-c23c3564.pth",
)
| none | 1 | 1.615498 | 2 | |
m_src/pricing/stream.py | komthanh/v20-python-samples | 0 | 6619181 | <gh_stars>0
import argparse
import common.config
import common.args
from src.pricing.view import price_to_string, heartbeat_to_string
def main():
parser = argparse.ArgumentParser()
common.config.add_argument(parser)
parser.add_argument('--instrument','-i',type=common.args.instrument, required=True,action='append')
parser.add_argument('--snapshot',action='store_true',default=True)
parser.add_argument('--no-snapshot',dest='snapshot',action='store_false')
parser.add_argument('--show-heartbeats','-s',action='store_true', default=False)
args=parser.parse_args("-i EUR_USD".split())
account_id=args.config.active_account
api=args.config.create_streaming_context()
response=api.pricing.stream(account_id,snapshot=args.snapshot,instruments=','.join(args.instrument))
for msg_type, msg in response.parts():
if msg_type == "pricing.Heartbeat" and args.show_heartbeats:
print(heartbeat_to_string(msg))
elif msg_type == "pricing.Price":
print(price_to_string(msg))
if __name__=='__main__':
main() | import argparse
import common.config
import common.args
from src.pricing.view import price_to_string, heartbeat_to_string
def main():
parser = argparse.ArgumentParser()
common.config.add_argument(parser)
parser.add_argument('--instrument','-i',type=common.args.instrument, required=True,action='append')
parser.add_argument('--snapshot',action='store_true',default=True)
parser.add_argument('--no-snapshot',dest='snapshot',action='store_false')
parser.add_argument('--show-heartbeats','-s',action='store_true', default=False)
args=parser.parse_args("-i EUR_USD".split())
account_id=args.config.active_account
api=args.config.create_streaming_context()
response=api.pricing.stream(account_id,snapshot=args.snapshot,instruments=','.join(args.instrument))
for msg_type, msg in response.parts():
if msg_type == "pricing.Heartbeat" and args.show_heartbeats:
print(heartbeat_to_string(msg))
elif msg_type == "pricing.Price":
print(price_to_string(msg))
if __name__=='__main__':
main() | none | 1 | 2.446884 | 2 | |
source/estimation/engine.py | lonelycorn/AHRS | 1 | 6619182 | <reponame>lonelycorn/AHRS
import sys
import os
sys.path.insert(1, os.path.join(sys.path[0], '..'))
import numpy as np
from base.SO3 import SO3
from base.simple_filter import LowPassFilter, AverageFilter
from estimation.magnetometer_calibrator import MagnetometerCalibrator
from estimation.kalman_filter import KalmanFilterSO3
class Engine:
GYRO_NO_MOTION_THRESHOLD = 0.1
ACCEL_NO_MOTION_THRESHOLD = 10.0 # FIXME we may need a bigger value
LOWPASS_GAIN = 0.9
STATIC_CAL_SAMPLE_COUNT = 200
SENSOR_COVAR_AMPLIFIER = 2.0 # covar obtained after static calibration would be amplified for better stability
INITIAL_POSE_COVAR = 1e1 # diagonal
STATE_INIT = 0
STATE_CALIBRATE_MOVING = 1 # for mag bias
STATE_CALIBRATE_STATIC = 2 # for gyro bias, mag ref and accel ref
STATE_RUNNING = 3
def __init__(self):
self._filter = KalmanFilterSO3() # estimates the transform from current chip to initial chip
self._gyro_lp = LowPassFilter(Engine.LOWPASS_GAIN)
self._accel_lp = LowPassFilter(Engine.LOWPASS_GAIN)
self._gyro_avg = AverageFilter()
self._accel_avg = AverageFilter()
self._mag_avg = AverageFilter()
self._gyro_bias = None
self._mag_calibrator = MagnetometerCalibrator(np.zeros(3))
self._state = Engine.STATE_INIT
self._last_update_time = 0.0
def set_mag_param(self, mag_bias):
'''
update the mag parameters.
Could be used as a hacky way to advance the internal state machine,
but only in the simulation.
'''
if (self._state < Engine.STATE_CALIBRATE_STATIC):
self._state = Engine.STATE_CALIBRATE_STATIC
self._mag_bias = mag_bias
def update(self, t, gyro, accel, mag):
"""
"""
t *= 1.0
gyro = np.array(gyro, dtype=np.float)
accel = np.array(accel, dtype=np.float)
mag = np.array(mag, dtype=np.float)
# update low pass filters
self._gyro_lp.update(gyro)
self._accel_lp.update(accel)
no_motion = self._check_no_motion(gyro, accel)
if (self._state == Engine.STATE_INIT):
# wait until starts to move
if (not no_motion):
print("[EngineState] transit from INIT to CALIBRATE_MOVING")
self._state = Engine.STATE_CALIBRATE_MOVING
elif (self._state == Engine.STATE_CALIBRATE_MOVING):
self._mag_calibrator.update(mag)
self._mag_bias = self._mag_calibrator.bias
# wait until found bias, and stopped moving
if ((self._mag_bias is not None) and \
(no_motion)):
print("[EngineState] transit from CALIBRATE_MOVING to CALIBRATE_STATIC")
print("mag bias is {}".format(self._mag_bias))
self._state = Engine.STATE_CALIBRATE_STATIC
elif (self._state == Engine.STATE_CALIBRATE_STATIC):
if (no_motion): # only update when device is stationary
done = self._update_static_calibration(gyro, accel, mag)
if (done):
# NOTE: acceleration is in the opposite direction of the corresponding inertial force
gravity_in_body = self._accel_avg.value
gravity_in_world = np.array([0, 0, 1], dtype=np.float) * np.linalg.norm(gravity_in_body)
R_from_body_to_world = SO3.from_two_directions(gravity_in_body, gravity_in_world)
initial_pose_covar = np.eye(3) * Engine.INITIAL_POSE_COVAR
self._gyro_bias = self._gyro_avg.value
gyro_covar = self._gyro_avg.covar * Engine.SENSOR_COVAR_AMPLIFIER
accel_covar = self._accel_avg.covar * Engine.SENSOR_COVAR_AMPLIFIER
mag_ref = R_from_body_to_world.inverse() * self._mag_avg.value
mag_covar = self._mag_avg.covar * Engine.SENSOR_COVAR_AMPLIFIER
# initialize the kalman filter here.
self._filter.set_initial_pose(R_from_body_to_world, initial_pose_covar)
self._filter.set_sensor_covar(gyro_covar, accel_covar, mag_covar)
self._filter.set_references(gravity_in_world, mag_ref)
self._state = Engine.STATE_RUNNING
print("[EngineState] transit from CALIBRATE_STATIC to RUNNING")
print("initial orientation = {}\nroll = {}, pitch = {}, yaw = {}".format(
R_from_body_to_world.ln(), R_from_body_to_world.get_roll(),
R_from_body_to_world.get_pitch(), R_from_body_to_world.get_yaw()))
print("gravity in world = {}".format(gravity_in_world))
print("gyro bias = {}".format(self._gyro_bias))
print("gyro covar = \n{}".format(gyro_covar))
print("accel covar = \n{}".format(accel_covar))
print("mag ref = {}".format(mag_ref))
print("mag covar = \n{}".format(mag_covar))
elif (self._state == Engine.STATE_RUNNING):
dt = t - self._last_update_time
# always do gyro update
gyro_calibrated = gyro - self._gyro_bias
self._filter.process_update(gyro_calibrated, dt)
# do accel update iff gravity is dominant
if (np.linalg.norm(accel) < Engine.ACCEL_NO_MOTION_THRESHOLD):
self._filter.acc_update(accel)
else:
print("[ACC] rejected")
# do mag update iff mag reading matchs mag param
mag_calibrated = self._mag_calibrator.calibrate_measurement(mag)
if (mag_calibrated is not None):
self._filter.mag_update(mag_calibrated)
else:
print("[MAG] rejected")
else:
# invalid state -- should not happen
assert(False)
self._last_update_time = t
def get_orientation_in_world(self):
'''
:return transform from current chip to world.
'''
if (self._state < Engine.STATE_RUNNING):
return None
return self._filter.get_estimate_mean().inverse()
def get_orientation_covar(self):
if (self._state < Engine.STATE_RUNNING):
return None
return self._filter.get_estimate_covar()
def get_state_string(self):
"""
:return a string representing the internal state.
"""
if (self._state == Engine.STATE_INIT):
return "Init"
elif (self._state == Engine.STATE_CALIBRATE_MOVING):
return "Moving calibration"
elif (self._state == Engine.STATE_CALIBRATE_STATIC):
return "Static calibration"
elif (self._state == Engine.STATE_RUNNING):
return "Running"
else:
raise RuntimeError("Invalid state: {}".format(self._state))
def _check_no_motion(self, gyro, accel):
"""
:return True if the barely moving
"""
tg = Engine.GYRO_NO_MOTION_THRESHOLD
ta = Engine.ACCEL_NO_MOTION_THRESHOLD
# trivial motion both instantaneously and recently
return ((np.linalg.norm(gyro) < tg) and \
(np.linalg.norm(self._gyro_lp.value) < tg) and \
(np.linalg.norm(accel) < ta) and \
(np.linalg.norm(self._accel_lp.value) < ta))
def _update_static_calibration(self, gyro, accel, mag):
"""
estimate gyro offset, mag ref and accel ref
:return True if finished.
"""
self._gyro_avg.update(gyro)
self._accel_avg.update(accel)
self._mag_avg.update(mag - self._mag_bias)
return ((self._gyro_avg.count > Engine.STATIC_CAL_SAMPLE_COUNT) and \
(self._accel_avg.count > Engine.STATIC_CAL_SAMPLE_COUNT) and \
(self._mag_avg.count > Engine.STATIC_CAL_SAMPLE_COUNT))
if (__name__ == '__main__'):
pass
| import sys
import os
sys.path.insert(1, os.path.join(sys.path[0], '..'))
import numpy as np
from base.SO3 import SO3
from base.simple_filter import LowPassFilter, AverageFilter
from estimation.magnetometer_calibrator import MagnetometerCalibrator
from estimation.kalman_filter import KalmanFilterSO3
class Engine:
GYRO_NO_MOTION_THRESHOLD = 0.1
ACCEL_NO_MOTION_THRESHOLD = 10.0 # FIXME we may need a bigger value
LOWPASS_GAIN = 0.9
STATIC_CAL_SAMPLE_COUNT = 200
SENSOR_COVAR_AMPLIFIER = 2.0 # covar obtained after static calibration would be amplified for better stability
INITIAL_POSE_COVAR = 1e1 # diagonal
STATE_INIT = 0
STATE_CALIBRATE_MOVING = 1 # for mag bias
STATE_CALIBRATE_STATIC = 2 # for gyro bias, mag ref and accel ref
STATE_RUNNING = 3
def __init__(self):
self._filter = KalmanFilterSO3() # estimates the transform from current chip to initial chip
self._gyro_lp = LowPassFilter(Engine.LOWPASS_GAIN)
self._accel_lp = LowPassFilter(Engine.LOWPASS_GAIN)
self._gyro_avg = AverageFilter()
self._accel_avg = AverageFilter()
self._mag_avg = AverageFilter()
self._gyro_bias = None
self._mag_calibrator = MagnetometerCalibrator(np.zeros(3))
self._state = Engine.STATE_INIT
self._last_update_time = 0.0
def set_mag_param(self, mag_bias):
'''
update the mag parameters.
Could be used as a hacky way to advance the internal state machine,
but only in the simulation.
'''
if (self._state < Engine.STATE_CALIBRATE_STATIC):
self._state = Engine.STATE_CALIBRATE_STATIC
self._mag_bias = mag_bias
def update(self, t, gyro, accel, mag):
"""
"""
t *= 1.0
gyro = np.array(gyro, dtype=np.float)
accel = np.array(accel, dtype=np.float)
mag = np.array(mag, dtype=np.float)
# update low pass filters
self._gyro_lp.update(gyro)
self._accel_lp.update(accel)
no_motion = self._check_no_motion(gyro, accel)
if (self._state == Engine.STATE_INIT):
# wait until starts to move
if (not no_motion):
print("[EngineState] transit from INIT to CALIBRATE_MOVING")
self._state = Engine.STATE_CALIBRATE_MOVING
elif (self._state == Engine.STATE_CALIBRATE_MOVING):
self._mag_calibrator.update(mag)
self._mag_bias = self._mag_calibrator.bias
# wait until found bias, and stopped moving
if ((self._mag_bias is not None) and \
(no_motion)):
print("[EngineState] transit from CALIBRATE_MOVING to CALIBRATE_STATIC")
print("mag bias is {}".format(self._mag_bias))
self._state = Engine.STATE_CALIBRATE_STATIC
elif (self._state == Engine.STATE_CALIBRATE_STATIC):
if (no_motion): # only update when device is stationary
done = self._update_static_calibration(gyro, accel, mag)
if (done):
# NOTE: acceleration is in the opposite direction of the corresponding inertial force
gravity_in_body = self._accel_avg.value
gravity_in_world = np.array([0, 0, 1], dtype=np.float) * np.linalg.norm(gravity_in_body)
R_from_body_to_world = SO3.from_two_directions(gravity_in_body, gravity_in_world)
initial_pose_covar = np.eye(3) * Engine.INITIAL_POSE_COVAR
self._gyro_bias = self._gyro_avg.value
gyro_covar = self._gyro_avg.covar * Engine.SENSOR_COVAR_AMPLIFIER
accel_covar = self._accel_avg.covar * Engine.SENSOR_COVAR_AMPLIFIER
mag_ref = R_from_body_to_world.inverse() * self._mag_avg.value
mag_covar = self._mag_avg.covar * Engine.SENSOR_COVAR_AMPLIFIER
# initialize the kalman filter here.
self._filter.set_initial_pose(R_from_body_to_world, initial_pose_covar)
self._filter.set_sensor_covar(gyro_covar, accel_covar, mag_covar)
self._filter.set_references(gravity_in_world, mag_ref)
self._state = Engine.STATE_RUNNING
print("[EngineState] transit from CALIBRATE_STATIC to RUNNING")
print("initial orientation = {}\nroll = {}, pitch = {}, yaw = {}".format(
R_from_body_to_world.ln(), R_from_body_to_world.get_roll(),
R_from_body_to_world.get_pitch(), R_from_body_to_world.get_yaw()))
print("gravity in world = {}".format(gravity_in_world))
print("gyro bias = {}".format(self._gyro_bias))
print("gyro covar = \n{}".format(gyro_covar))
print("accel covar = \n{}".format(accel_covar))
print("mag ref = {}".format(mag_ref))
print("mag covar = \n{}".format(mag_covar))
elif (self._state == Engine.STATE_RUNNING):
dt = t - self._last_update_time
# always do gyro update
gyro_calibrated = gyro - self._gyro_bias
self._filter.process_update(gyro_calibrated, dt)
# do accel update iff gravity is dominant
if (np.linalg.norm(accel) < Engine.ACCEL_NO_MOTION_THRESHOLD):
self._filter.acc_update(accel)
else:
print("[ACC] rejected")
# do mag update iff mag reading matchs mag param
mag_calibrated = self._mag_calibrator.calibrate_measurement(mag)
if (mag_calibrated is not None):
self._filter.mag_update(mag_calibrated)
else:
print("[MAG] rejected")
else:
# invalid state -- should not happen
assert(False)
self._last_update_time = t
def get_orientation_in_world(self):
'''
:return transform from current chip to world.
'''
if (self._state < Engine.STATE_RUNNING):
return None
return self._filter.get_estimate_mean().inverse()
def get_orientation_covar(self):
if (self._state < Engine.STATE_RUNNING):
return None
return self._filter.get_estimate_covar()
def get_state_string(self):
"""
:return a string representing the internal state.
"""
if (self._state == Engine.STATE_INIT):
return "Init"
elif (self._state == Engine.STATE_CALIBRATE_MOVING):
return "Moving calibration"
elif (self._state == Engine.STATE_CALIBRATE_STATIC):
return "Static calibration"
elif (self._state == Engine.STATE_RUNNING):
return "Running"
else:
raise RuntimeError("Invalid state: {}".format(self._state))
def _check_no_motion(self, gyro, accel):
"""
:return True if the barely moving
"""
tg = Engine.GYRO_NO_MOTION_THRESHOLD
ta = Engine.ACCEL_NO_MOTION_THRESHOLD
# trivial motion both instantaneously and recently
return ((np.linalg.norm(gyro) < tg) and \
(np.linalg.norm(self._gyro_lp.value) < tg) and \
(np.linalg.norm(accel) < ta) and \
(np.linalg.norm(self._accel_lp.value) < ta))
def _update_static_calibration(self, gyro, accel, mag):
"""
estimate gyro offset, mag ref and accel ref
:return True if finished.
"""
self._gyro_avg.update(gyro)
self._accel_avg.update(accel)
self._mag_avg.update(mag - self._mag_bias)
return ((self._gyro_avg.count > Engine.STATIC_CAL_SAMPLE_COUNT) and \
(self._accel_avg.count > Engine.STATIC_CAL_SAMPLE_COUNT) and \
(self._mag_avg.count > Engine.STATIC_CAL_SAMPLE_COUNT))
if (__name__ == '__main__'):
pass | en | 0.853114 | # FIXME we may need a bigger value # covar obtained after static calibration would be amplified for better stability # diagonal # for mag bias # for gyro bias, mag ref and accel ref # estimates the transform from current chip to initial chip update the mag parameters. Could be used as a hacky way to advance the internal state machine, but only in the simulation. # update low pass filters # wait until starts to move # wait until found bias, and stopped moving # only update when device is stationary # NOTE: acceleration is in the opposite direction of the corresponding inertial force # initialize the kalman filter here. # always do gyro update # do accel update iff gravity is dominant # do mag update iff mag reading matchs mag param # invalid state -- should not happen :return transform from current chip to world. :return a string representing the internal state. :return True if the barely moving # trivial motion both instantaneously and recently estimate gyro offset, mag ref and accel ref :return True if finished. | 2.364138 | 2 |
webkiller.py | Burkuts-Translate/webkiller | 1 | 6619183 | #!/usr/bin/env python3
import sys
import socket
import os
import time
from helplist import helpp
from modules import cms,Traceroute,reverseip,portscan,iplocation,httpheader,findsharedns,whois,dnslookup,robots,finder,cloudflare,wordpress
try:
from colorama import Fore
except:
os.system("clear")
print(Fore.RED+"""\n Lütfen renklendirme yükleyin\n
pip3 install colorama
""")
#---------------------------
try:
import requests
except:
os.system("clear")
print(Fore.RED+"""\n Lütfen istekleri yükleyin\n
pip3 install requests
""")
#---------------------------
try:
import ipapi
except:
os.system("clear")
print(Fore.RED+"""\n Lütfen ipapi Yükle\n
pip3 install ipapi
""")
#---------------------------
try:
import builtwith
except:
os.system("clear")
print(Fore.RED+"""\n Lütfen builtwith Yükle\n
pip3 install builtwith
""")
#---------------------------
while True:
try:
helpp.Banner()
helpp.infolist1()
number = input(Fore.RED+" ┌─["+Fore.LIGHTGREEN_EX+"WEBKILLER"+Fore.BLUE+"~"+Fore.WHITE+"@HOME"+Fore.RED+"""]
└──╼ """+Fore.WHITE+"卐 ").lower()
except:
print("\n Tanrı Kilidi :) ")
sys.exit()
if number == '4':
print
sys.exit()
#####################
#####################
elif number == "3":
helpp.infolist3()
#####################
elif number == "":
print(Fore.RED+" [!]"+Fore.BLUE+" LLütfen Numara Giriniz :))))")
input("")
#----------------------------------------------------------------------------------
#Information Gathering
elif number == '1':
try:
helpp.Banner()
helpp.infolist2()
infor = input(Fore.RED+" ┌─["+Fore.LIGHTGREEN_EX+"WEBKILLER"+Fore.BLUE+"~"+Fore.WHITE+"@HOME"+Fore.RED+"/"+Fore.CYAN+"Bilgi Toplama"+Fore.RED+"""]
└──╼ """+Fore.WHITE+"卐 ").lower()
if infor == "1":
helpp.Banner()
cloudflare.__start__()
#####################
elif infor == "2":
helpp.Banner()
cms.__start__()
#####################
elif infor == "3":
helpp.Banner()
Traceroute.__start__()
#####################
elif infor == "4":
helpp.Banner()
reverseip.__start__()
#####################
elif infor == "5":
helpp.Banner()
portscan.__start__()
#####################
elif infor == "6":
helpp.Banner()
iplocation.__start__()
#####################
elif infor == "7":
helpp.Banner()
httpheader.__start__()
#####################
elif infor == "8":
helpp.Banner()
findsharedns.__start__()
#####################
elif infor == "9":
helpp.Banner()
whois.__start__()
#####################
elif infor == "10":
helpp.Banner()
dnslookup.__start__()
#####################
elif infor == "11":
helpp.Banner()
robots.__start__()
#####################
elif infor == "12":
helpp.Banner()
finder.__start__()
#####################
elif infor == "13":
input(Fore.RED+" [!]"+Fore.GREEN+" Menüye Dön (Enter Tuşuna Basın...) ")
#####################
elif infor == "14":
sys.exit()
#####################
elif infor == "":
input(Fore.RED+" [!]"+Fore.GREEN+" Lütfen Numarayı Giriniz (Enter Tuşuna Basın...) ")
except KeyboardInterrupt:
print("")
sys.exit()
#------------------------------------------------------------------------------------------------
elif number == "2":
helpp.infolist4()
try:
numcms = input(Fore.RED+" ┌─["+Fore.LIGHTGREEN_EX+"WEBKILLER"+Fore.BLUE+"~"+Fore.WHITE+"@HOME"+Fore.RED+"/"+Fore.CYAN+"CMS Algılama"+Fore.RED+"""]
└──╼ """+Fore.WHITE+"卐 ").lower()
except:
print("")
sys.exit()
if numcms == "1":
helpp.infowp()
try:
wp = input(Fore.RED+" ┌─["+Fore.LIGHTGREEN_EX+"WEBKILLER"+Fore.BLUE+"~"+Fore.WHITE+"@HOME"+Fore.RED+"/"+Fore.CYAN+"CMN"+Fore.RED+"/"+Fore.LIGHTYELLOW_EX+"WordPress"+Fore.RED+"""]
└──╼ """+Fore.WHITE+"卐 ").lower()
except:
print("")
sys.exit()
if wp == "1":
helpp.Banner()
wordpress.wpplug()
elif wp == "2":
helpp.Banner()
wordpress.user()
elif wp == "3":
try:
input(Fore.GREEN+" [*] Menüye dön (Enter Tuşuna Basın...) ")
except:
print("\n")
sys.exit()
elif numcms == "2":
helpp.Banner()
print(Fore.RED+" [!]"+Fore.BLUE+" Çok Yakın'da ! ")
try:
input(Fore.GREEN+" [*] Menüye Dön (Enter Tuşuna Basın...) ")
except:
print("")
sys.exit()
elif numcms == "3":
helpp.Banner()
print(Fore.RED+" [!]"+Fore.BLUE+" Çok Yakın'da ! ")
try:
input(Fore.GREEN+" [*] Menüye Dön (Enter Tuşuna Basın...) ")
except:
print("")
sys.exit()
elif numcms == "4":
try:
input(Fore.GREEN+" [*] Menüye Dön (Enter Tuşuna Basın...) ")
except:
print("")
sys.exit()
elif numcms == "" or False:
try:
input(Fore.GREEN+" [*] Menüye Dön (Enter Tuşuna Basın...) ")
except:
print("")
sys.exit()
| #!/usr/bin/env python3
import sys
import socket
import os
import time
from helplist import helpp
from modules import cms,Traceroute,reverseip,portscan,iplocation,httpheader,findsharedns,whois,dnslookup,robots,finder,cloudflare,wordpress
try:
from colorama import Fore
except:
os.system("clear")
print(Fore.RED+"""\n Lütfen renklendirme yükleyin\n
pip3 install colorama
""")
#---------------------------
try:
import requests
except:
os.system("clear")
print(Fore.RED+"""\n Lütfen istekleri yükleyin\n
pip3 install requests
""")
#---------------------------
try:
import ipapi
except:
os.system("clear")
print(Fore.RED+"""\n Lütfen ipapi Yükle\n
pip3 install ipapi
""")
#---------------------------
try:
import builtwith
except:
os.system("clear")
print(Fore.RED+"""\n Lütfen builtwith Yükle\n
pip3 install builtwith
""")
#---------------------------
while True:
try:
helpp.Banner()
helpp.infolist1()
number = input(Fore.RED+" ┌─["+Fore.LIGHTGREEN_EX+"WEBKILLER"+Fore.BLUE+"~"+Fore.WHITE+"@HOME"+Fore.RED+"""]
└──╼ """+Fore.WHITE+"卐 ").lower()
except:
print("\n Tanrı Kilidi :) ")
sys.exit()
if number == '4':
print
sys.exit()
#####################
#####################
elif number == "3":
helpp.infolist3()
#####################
elif number == "":
print(Fore.RED+" [!]"+Fore.BLUE+" LLütfen Numara Giriniz :))))")
input("")
#----------------------------------------------------------------------------------
#Information Gathering
elif number == '1':
try:
helpp.Banner()
helpp.infolist2()
infor = input(Fore.RED+" ┌─["+Fore.LIGHTGREEN_EX+"WEBKILLER"+Fore.BLUE+"~"+Fore.WHITE+"@HOME"+Fore.RED+"/"+Fore.CYAN+"Bilgi Toplama"+Fore.RED+"""]
└──╼ """+Fore.WHITE+"卐 ").lower()
if infor == "1":
helpp.Banner()
cloudflare.__start__()
#####################
elif infor == "2":
helpp.Banner()
cms.__start__()
#####################
elif infor == "3":
helpp.Banner()
Traceroute.__start__()
#####################
elif infor == "4":
helpp.Banner()
reverseip.__start__()
#####################
elif infor == "5":
helpp.Banner()
portscan.__start__()
#####################
elif infor == "6":
helpp.Banner()
iplocation.__start__()
#####################
elif infor == "7":
helpp.Banner()
httpheader.__start__()
#####################
elif infor == "8":
helpp.Banner()
findsharedns.__start__()
#####################
elif infor == "9":
helpp.Banner()
whois.__start__()
#####################
elif infor == "10":
helpp.Banner()
dnslookup.__start__()
#####################
elif infor == "11":
helpp.Banner()
robots.__start__()
#####################
elif infor == "12":
helpp.Banner()
finder.__start__()
#####################
elif infor == "13":
input(Fore.RED+" [!]"+Fore.GREEN+" Menüye Dön (Enter Tuşuna Basın...) ")
#####################
elif infor == "14":
sys.exit()
#####################
elif infor == "":
input(Fore.RED+" [!]"+Fore.GREEN+" Lütfen Numarayı Giriniz (Enter Tuşuna Basın...) ")
except KeyboardInterrupt:
print("")
sys.exit()
#------------------------------------------------------------------------------------------------
elif number == "2":
helpp.infolist4()
try:
numcms = input(Fore.RED+" ┌─["+Fore.LIGHTGREEN_EX+"WEBKILLER"+Fore.BLUE+"~"+Fore.WHITE+"@HOME"+Fore.RED+"/"+Fore.CYAN+"CMS Algılama"+Fore.RED+"""]
└──╼ """+Fore.WHITE+"卐 ").lower()
except:
print("")
sys.exit()
if numcms == "1":
helpp.infowp()
try:
wp = input(Fore.RED+" ┌─["+Fore.LIGHTGREEN_EX+"WEBKILLER"+Fore.BLUE+"~"+Fore.WHITE+"@HOME"+Fore.RED+"/"+Fore.CYAN+"CMN"+Fore.RED+"/"+Fore.LIGHTYELLOW_EX+"WordPress"+Fore.RED+"""]
└──╼ """+Fore.WHITE+"卐 ").lower()
except:
print("")
sys.exit()
if wp == "1":
helpp.Banner()
wordpress.wpplug()
elif wp == "2":
helpp.Banner()
wordpress.user()
elif wp == "3":
try:
input(Fore.GREEN+" [*] Menüye dön (Enter Tuşuna Basın...) ")
except:
print("\n")
sys.exit()
elif numcms == "2":
helpp.Banner()
print(Fore.RED+" [!]"+Fore.BLUE+" Çok Yakın'da ! ")
try:
input(Fore.GREEN+" [*] Menüye Dön (Enter Tuşuna Basın...) ")
except:
print("")
sys.exit()
elif numcms == "3":
helpp.Banner()
print(Fore.RED+" [!]"+Fore.BLUE+" Çok Yakın'da ! ")
try:
input(Fore.GREEN+" [*] Menüye Dön (Enter Tuşuna Basın...) ")
except:
print("")
sys.exit()
elif numcms == "4":
try:
input(Fore.GREEN+" [*] Menüye Dön (Enter Tuşuna Basın...) ")
except:
print("")
sys.exit()
elif numcms == "" or False:
try:
input(Fore.GREEN+" [*] Menüye Dön (Enter Tuşuna Basın...) ")
except:
print("")
sys.exit()
| de | 0.374555 | #!/usr/bin/env python3 \n Lütfen renklendirme yükleyin\n
pip3 install colorama #--------------------------- \n Lütfen istekleri yükleyin\n
pip3 install requests #--------------------------- \n Lütfen ipapi Yükle\n
pip3 install ipapi #--------------------------- \n Lütfen builtwith Yükle\n
pip3 install builtwith #--------------------------- ]
└──╼ ##################### ##################### ##################### #---------------------------------------------------------------------------------- #Information Gathering ]
└──╼ ##################### ##################### ##################### ##################### ##################### ##################### ##################### ##################### ##################### ##################### ##################### ##################### ##################### ##################### #------------------------------------------------------------------------------------------------ ]
└──╼ ]
└──╼ | 2.08258 | 2 |
vi_cleaner/sentence_utils.py | CodeLinkIO/Vietnamese-text-normalization | 0 | 6619184 | <reponame>CodeLinkIO/Vietnamese-text-normalization<filename>vi_cleaner/sentence_utils.py
import re
from .symbol_vi import punctuations
def isTextOnly(c: str):
return c.isalnum()
def split_text_sentences(text, regex):
return [e.strip() + d for e, d in zip(re.split(regex, text), re.findall(regex, text)) if e]
def combine_sentences(sentences: list, maxLength: int = 30) -> list:
if len(sentences) <= 1:
return sentences
if len(sentences[0].split(" ")) > maxLength:
return [sentences[0]] + combine_sentences(sentences[1:], maxLength=maxLength)
if len((sentences[0] + sentences[1]).split(" ")) <= maxLength:
return combine_sentences([sentences[0] + " " + sentences[1]]+sentences[2:], maxLength=maxLength)
else:
return [sentences[0]] + combine_sentences(sentences[1:], maxLength=maxLength)
def split_long_sentences(sentences: list, maxLength: int = 30) -> list:
sub_sentences = []
for sentence in sentences:
if len(sentence.split(" ")) > maxLength:
sub_sentences.append(split_text_sentences(sentence, r'[?!.,:;-]'))
else:
sub_sentences.append([sentence])
return sub_sentences
def get_pieces(passage: str, maxLength: int):
sub_sentences = split_long_sentences(split_text_sentences(passage, r'[.!?]'), maxLength)
combined_sub_sentences = [combine_sentences(
i, maxLength) for i in sub_sentences]
flat_list = []
for sublist in combined_sub_sentences:
for item in sublist:
item_chars = set([i for i in item])
if not punctuations.issuperset(item_chars) and any(map(isTextOnly, item_chars)):
flat_list.append(item)
return flat_list | import re
from .symbol_vi import punctuations
def isTextOnly(c: str):
return c.isalnum()
def split_text_sentences(text, regex):
return [e.strip() + d for e, d in zip(re.split(regex, text), re.findall(regex, text)) if e]
def combine_sentences(sentences: list, maxLength: int = 30) -> list:
if len(sentences) <= 1:
return sentences
if len(sentences[0].split(" ")) > maxLength:
return [sentences[0]] + combine_sentences(sentences[1:], maxLength=maxLength)
if len((sentences[0] + sentences[1]).split(" ")) <= maxLength:
return combine_sentences([sentences[0] + " " + sentences[1]]+sentences[2:], maxLength=maxLength)
else:
return [sentences[0]] + combine_sentences(sentences[1:], maxLength=maxLength)
def split_long_sentences(sentences: list, maxLength: int = 30) -> list:
sub_sentences = []
for sentence in sentences:
if len(sentence.split(" ")) > maxLength:
sub_sentences.append(split_text_sentences(sentence, r'[?!.,:;-]'))
else:
sub_sentences.append([sentence])
return sub_sentences
def get_pieces(passage: str, maxLength: int):
sub_sentences = split_long_sentences(split_text_sentences(passage, r'[.!?]'), maxLength)
combined_sub_sentences = [combine_sentences(
i, maxLength) for i in sub_sentences]
flat_list = []
for sublist in combined_sub_sentences:
for item in sublist:
item_chars = set([i for i in item])
if not punctuations.issuperset(item_chars) and any(map(isTextOnly, item_chars)):
flat_list.append(item)
return flat_list | none | 1 | 3.101355 | 3 | |
cgi-bin/objetos/patrimonio/Vendedor.py | wsampaio/multi_agenda_py | 0 | 6619185 | #
# Este arquivo é parte do programa multi_agenda
#
# Esta obra está licenciada com uma
# Licença Creative Commons Atribuição 4.0 Internacional.
# (CC BY 4.0 Internacional)
#
# Para ver uma cópia da licença, visite
# https://creativecommons.org/licenses/by/4.0/legalcode
#
# <NAME> - <EMAIL>
# https://www.linkedin.com/in/wellsampaio/
#
"""
CREATE TABLE vendedores (
codVendedor INTEGER PRIMARY KEY NOT NULL,
vendedor STRING DEFAULT (''),
endereco STRING DEFAULT (''),
contato STRING DEFAULT (''),
obs STRING DEFAULT ('')
);
"""
class Vendedor:
__codVendedor = 0
__vendedor = ""
__endereco = ""
__contato = ""
__obs = ""
def __init__(self):
pass
def povoarObj(self, array):
self.setCodVendedor(array[0])
self.setVendedor(array[1])
self.setEndereco(array[2])
self.setContato(array[3])
self.setObs(array[4])
return self
def getCodVendedor(self):
return int(self.__codVendedor)
def setCodVendedor(self, codVendedor):
try:
self.__codVendedor = int(codVendedor)
except ValueError:
self.__codVendedor = self.getCodVendedor()
def getVendedor(self):
return str(self.__vendedor)
def setVendedor(self, vendedor):
try:
self.__vendedor = str(vendedor)
except ValueError:
self.__vendedor = self.getVendedor()
def getEndereco(self):
return str(self.__endereco)
def setEndereco(self, endereco):
try:
self.__endereco = str(endereco)
except ValueError:
self.__endereco = self.getEndereco()
def getContato(self):
return str(self.__contato)
def setContato(self, contato):
try:
self.__contato = str(contato)
except ValueError:
self.__contato = self.getContato()
def getObs(self):
return str(self.__obs)
def setObs(self, obs):
try:
self.__obs = str(obs)
except ValueError:
self.__obs = self.getObs()
| #
# Este arquivo é parte do programa multi_agenda
#
# Esta obra está licenciada com uma
# Licença Creative Commons Atribuição 4.0 Internacional.
# (CC BY 4.0 Internacional)
#
# Para ver uma cópia da licença, visite
# https://creativecommons.org/licenses/by/4.0/legalcode
#
# <NAME> - <EMAIL>
# https://www.linkedin.com/in/wellsampaio/
#
"""
CREATE TABLE vendedores (
codVendedor INTEGER PRIMARY KEY NOT NULL,
vendedor STRING DEFAULT (''),
endereco STRING DEFAULT (''),
contato STRING DEFAULT (''),
obs STRING DEFAULT ('')
);
"""
class Vendedor:
__codVendedor = 0
__vendedor = ""
__endereco = ""
__contato = ""
__obs = ""
def __init__(self):
pass
def povoarObj(self, array):
self.setCodVendedor(array[0])
self.setVendedor(array[1])
self.setEndereco(array[2])
self.setContato(array[3])
self.setObs(array[4])
return self
def getCodVendedor(self):
return int(self.__codVendedor)
def setCodVendedor(self, codVendedor):
try:
self.__codVendedor = int(codVendedor)
except ValueError:
self.__codVendedor = self.getCodVendedor()
def getVendedor(self):
return str(self.__vendedor)
def setVendedor(self, vendedor):
try:
self.__vendedor = str(vendedor)
except ValueError:
self.__vendedor = self.getVendedor()
def getEndereco(self):
return str(self.__endereco)
def setEndereco(self, endereco):
try:
self.__endereco = str(endereco)
except ValueError:
self.__endereco = self.getEndereco()
def getContato(self):
return str(self.__contato)
def setContato(self, contato):
try:
self.__contato = str(contato)
except ValueError:
self.__contato = self.getContato()
def getObs(self):
return str(self.__obs)
def setObs(self, obs):
try:
self.__obs = str(obs)
except ValueError:
self.__obs = self.getObs()
| pt | 0.691171 | # # Este arquivo é parte do programa multi_agenda # # Esta obra está licenciada com uma # Licença Creative Commons Atribuição 4.0 Internacional. # (CC BY 4.0 Internacional) # # Para ver uma cópia da licença, visite # https://creativecommons.org/licenses/by/4.0/legalcode # # <NAME> - <EMAIL> # https://www.linkedin.com/in/wellsampaio/ # CREATE TABLE vendedores ( codVendedor INTEGER PRIMARY KEY NOT NULL, vendedor STRING DEFAULT (''), endereco STRING DEFAULT (''), contato STRING DEFAULT (''), obs STRING DEFAULT ('') ); | 3.177276 | 3 |
examples/02-optimizing-basis.py | Jaikinator/dqc | 39 | 6619186 | import dqc
import torch
import xitorch as xt
import xitorch.optimize
basis = {
"H": dqc.loadbasis("1:3-21G"), # load 3-21G basis for atomz = 1
}
bpacker = xt.Packer(basis) # use xitorch's Packer to get the tensors within a structure
bparams = bpacker.get_param_tensor() # get the parameters of the basis as one tensor
def fcn(bparams, bpacker):
# returns the same structure as basis above, but the parameters (alphas
# and coeffs) are changed according to values in bparams
basis = bpacker.construct_from_tensor(bparams)
m = dqc.Mol("H 1 0 0; H -1 0 0", basis=basis)
qc = dqc.HF(m).run()
ene = qc.energy()
return ene
print("Original basis")
print(basis)
min_bparams = xitorch.optimize.minimize(fcn, bparams, (bpacker,), method="gd",
step=2e-1, maxiter=200, verbose=True)
opt_basis = bpacker.construct_from_tensor(min_bparams)
print("Optimized basis")
print(opt_basis)
| import dqc
import torch
import xitorch as xt
import xitorch.optimize
basis = {
"H": dqc.loadbasis("1:3-21G"), # load 3-21G basis for atomz = 1
}
bpacker = xt.Packer(basis) # use xitorch's Packer to get the tensors within a structure
bparams = bpacker.get_param_tensor() # get the parameters of the basis as one tensor
def fcn(bparams, bpacker):
# returns the same structure as basis above, but the parameters (alphas
# and coeffs) are changed according to values in bparams
basis = bpacker.construct_from_tensor(bparams)
m = dqc.Mol("H 1 0 0; H -1 0 0", basis=basis)
qc = dqc.HF(m).run()
ene = qc.energy()
return ene
print("Original basis")
print(basis)
min_bparams = xitorch.optimize.minimize(fcn, bparams, (bpacker,), method="gd",
step=2e-1, maxiter=200, verbose=True)
opt_basis = bpacker.construct_from_tensor(min_bparams)
print("Optimized basis")
print(opt_basis)
| en | 0.862406 | # load 3-21G basis for atomz = 1 # use xitorch's Packer to get the tensors within a structure # get the parameters of the basis as one tensor # returns the same structure as basis above, but the parameters (alphas # and coeffs) are changed according to values in bparams | 2.308123 | 2 |
tests/test_save.py | patarapolw/pyexcel-xlsxwx | 2 | 6619187 | import pytest
from pathlib import Path
import pyexcel
import pyexcel_xlsxwx
@pytest.mark.parametrize("in_file", ["test.xlsx"])
@pytest.mark.parametrize(
"config",
[None, "config1.yaml", {"worksheet": {"_default": {"freeze_panes": None}}}],
)
def test_save(in_file, config, request):
if isinstance(config, str):
config = Path("tests/input").joinpath(config)
assert config.exists()
config = str(config)
data = pyexcel.get_book_dict(file_name=str(Path("tests/input").joinpath(in_file)))
pyexcel_xlsxwx.save_data(
str(Path("tests/output").joinpath(request.node.name).with_suffix(".xlsx")),
data,
config=config,
)
| import pytest
from pathlib import Path
import pyexcel
import pyexcel_xlsxwx
@pytest.mark.parametrize("in_file", ["test.xlsx"])
@pytest.mark.parametrize(
"config",
[None, "config1.yaml", {"worksheet": {"_default": {"freeze_panes": None}}}],
)
def test_save(in_file, config, request):
if isinstance(config, str):
config = Path("tests/input").joinpath(config)
assert config.exists()
config = str(config)
data = pyexcel.get_book_dict(file_name=str(Path("tests/input").joinpath(in_file)))
pyexcel_xlsxwx.save_data(
str(Path("tests/output").joinpath(request.node.name).with_suffix(".xlsx")),
data,
config=config,
)
| none | 1 | 2.181485 | 2 | |
datasetsnx/analyser.py | ckxy/part-of-hitogata | 0 | 6619188 | import copy
import bisect
import numpy as np
from tqdm import tqdm
def image_analysis(dataset, **kwargs):
mode = kwargs['mode']
if mode == 'aspect_ratio':
info = []
for i in tqdm(range(len(dataset))):
info_dict = dataset.get_data_info(i)
info.append(info_dict['h'] / info_dict['w'])
return quantize(info, kwargs['split'])
elif mode == 'len':
return len(dataset)
else:
raise ValueError
def quantize(x, bins):
bins = copy.copy(bins)
bins = sorted(bins)
quantized = list(map(lambda y: bisect.bisect_right(bins, y), x))
return quantized
| import copy
import bisect
import numpy as np
from tqdm import tqdm
def image_analysis(dataset, **kwargs):
mode = kwargs['mode']
if mode == 'aspect_ratio':
info = []
for i in tqdm(range(len(dataset))):
info_dict = dataset.get_data_info(i)
info.append(info_dict['h'] / info_dict['w'])
return quantize(info, kwargs['split'])
elif mode == 'len':
return len(dataset)
else:
raise ValueError
def quantize(x, bins):
bins = copy.copy(bins)
bins = sorted(bins)
quantized = list(map(lambda y: bisect.bisect_right(bins, y), x))
return quantized
| none | 1 | 2.615422 | 3 | |
modules/errors/Errors.py | jaiwardhan/raspimon | 0 | 6619189 | """
jaiwardhan/Raspimon
@author: <NAME>, 2021
Copyright 2021-present
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from modules.comms.TelegramRelay import PiMonBot
import sys
class ErrorTypes:
"""Defines error `type`s to properly structure errors
"""
RESOURCE_MISSING = "Resource Missing"
UNRECOGNIZED = "Unrecognized"
DENY_RESOLVE = "Unresolvable"
class ErrorCategories:
"""Defines error `category`ies to properly structure errors
"""
ILLEGAL = "Illegal"
BAD_ARGUMENT = "Bad Argument"
class Errors:
"""Error objects to properly store and format custom errors which
can be relayed to an external channel
"""
Types = ErrorTypes
Categories = ErrorCategories
def __init__(self, msg, category = ErrorCategories.ILLEGAL, error_type = ErrorTypes.RESOURCE_MISSING):
self.category = category
self.error_type = error_type
self.msg = msg
def relay(self):
"""Relay the error object to the external channel"""
Errors.throw(self.category, self.error_type, self.msg)
@staticmethod
def format(category, error_type, msg):
"""Format the error attributes to an explanable string
Args:
category (str): The category to which this error belongs, preferably defined
by the `ErrorCategories` class
error_type (str): The error type to which this error tends to be in, preferably
defined by the `ErrorTypes` class
msg (str): The custom error explanation as sent by the thrower
Returns:
str: Explanable string which can be logged to sent to an external channel
"""
return "🔥 " + getattr(Errors.Categories, category) + ": " +\
getattr(Errors.Types, error_type) + ":: " +\
msg
@staticmethod
def format_obj(error_obj):
"""Format the error object's attributes to an explanable string. See `Errors.format` for a better explanation
Args:
error_obj (Error): The error object
Returns:
str: Explanable string which can be logged to sent to an external channel
"""
return "🔥 " + getattr(Errors.Categories, error_obj.category) + ": " +\
getattr(Errors.Types, error_obj.error_type) + ":: " +\
error_obj.msg
@staticmethod
def throw(category, error_type, msg):
"""Throw the `format`ted error to an external channel
Args:
category (str): The category to which this error belongs, preferably defined
by the `ErrorCategories` class
error_type (str): The error type to which this error tends to be in, preferably
defined by the `ErrorTypes` class
msg (str): The custom error explanation as sent by the thrower
"""
if msg is None or len(str(msg)) == 0 or\
not hasattr(Errors.Categories, category) or \
not hasattr(Errors.Types, error_type):
return
PiMonBot.send(Errors.format(category, error_type, msg))
Errors.die(msg)
@staticmethod
def die(with_message):
"""Just die with a scream
Args:
with_message (str): Death note just before program termination
"""
sys.exit(with_message)
| """
jaiwardhan/Raspimon
@author: <NAME>, 2021
Copyright 2021-present
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from modules.comms.TelegramRelay import PiMonBot
import sys
class ErrorTypes:
"""Defines error `type`s to properly structure errors
"""
RESOURCE_MISSING = "Resource Missing"
UNRECOGNIZED = "Unrecognized"
DENY_RESOLVE = "Unresolvable"
class ErrorCategories:
"""Defines error `category`ies to properly structure errors
"""
ILLEGAL = "Illegal"
BAD_ARGUMENT = "Bad Argument"
class Errors:
"""Error objects to properly store and format custom errors which
can be relayed to an external channel
"""
Types = ErrorTypes
Categories = ErrorCategories
def __init__(self, msg, category = ErrorCategories.ILLEGAL, error_type = ErrorTypes.RESOURCE_MISSING):
self.category = category
self.error_type = error_type
self.msg = msg
def relay(self):
"""Relay the error object to the external channel"""
Errors.throw(self.category, self.error_type, self.msg)
@staticmethod
def format(category, error_type, msg):
"""Format the error attributes to an explanable string
Args:
category (str): The category to which this error belongs, preferably defined
by the `ErrorCategories` class
error_type (str): The error type to which this error tends to be in, preferably
defined by the `ErrorTypes` class
msg (str): The custom error explanation as sent by the thrower
Returns:
str: Explanable string which can be logged to sent to an external channel
"""
return "🔥 " + getattr(Errors.Categories, category) + ": " +\
getattr(Errors.Types, error_type) + ":: " +\
msg
@staticmethod
def format_obj(error_obj):
"""Format the error object's attributes to an explanable string. See `Errors.format` for a better explanation
Args:
error_obj (Error): The error object
Returns:
str: Explanable string which can be logged to sent to an external channel
"""
return "🔥 " + getattr(Errors.Categories, error_obj.category) + ": " +\
getattr(Errors.Types, error_obj.error_type) + ":: " +\
error_obj.msg
@staticmethod
def throw(category, error_type, msg):
"""Throw the `format`ted error to an external channel
Args:
category (str): The category to which this error belongs, preferably defined
by the `ErrorCategories` class
error_type (str): The error type to which this error tends to be in, preferably
defined by the `ErrorTypes` class
msg (str): The custom error explanation as sent by the thrower
"""
if msg is None or len(str(msg)) == 0 or\
not hasattr(Errors.Categories, category) or \
not hasattr(Errors.Types, error_type):
return
PiMonBot.send(Errors.format(category, error_type, msg))
Errors.die(msg)
@staticmethod
def die(with_message):
"""Just die with a scream
Args:
with_message (str): Death note just before program termination
"""
sys.exit(with_message)
| en | 0.777122 | jaiwardhan/Raspimon @author: <NAME>, 2021 Copyright 2021-present Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Defines error `type`s to properly structure errors Defines error `category`ies to properly structure errors Error objects to properly store and format custom errors which can be relayed to an external channel Relay the error object to the external channel Format the error attributes to an explanable string Args: category (str): The category to which this error belongs, preferably defined by the `ErrorCategories` class error_type (str): The error type to which this error tends to be in, preferably defined by the `ErrorTypes` class msg (str): The custom error explanation as sent by the thrower Returns: str: Explanable string which can be logged to sent to an external channel Format the error object's attributes to an explanable string. See `Errors.format` for a better explanation Args: error_obj (Error): The error object Returns: str: Explanable string which can be logged to sent to an external channel Throw the `format`ted error to an external channel Args: category (str): The category to which this error belongs, preferably defined by the `ErrorCategories` class error_type (str): The error type to which this error tends to be in, preferably defined by the `ErrorTypes` class msg (str): The custom error explanation as sent by the thrower Just die with a scream Args: with_message (str): Death note just before program termination | 2.779534 | 3 |
qr_make.py | Lockdef/LoginQRCode | 0 | 6619190 | import qrcode
import sqlite3
from datetime import datetime
time = datetime.now().strftime("%Y%m%d%H%M%S")
print('ユーザー名を入力してください。')
name = input()
print('パスワードを設定してください。')
password = input()
# --- データベースに保存
con = sqlite3.connect('user.db')
cursor = con.cursor()
p = "INSERT INTO user(name, password) VALUES(?, ?)"
cursor.execute(p, (name, password))
con.commit()
# ---
img = qrcode.make(password)
img.save('{}.png'.format(time)) | import qrcode
import sqlite3
from datetime import datetime
time = datetime.now().strftime("%Y%m%d%H%M%S")
print('ユーザー名を入力してください。')
name = input()
print('パスワードを設定してください。')
password = input()
# --- データベースに保存
con = sqlite3.connect('user.db')
cursor = con.cursor()
p = "INSERT INTO user(name, password) VALUES(?, ?)"
cursor.execute(p, (name, password))
con.commit()
# ---
img = qrcode.make(password)
img.save('{}.png'.format(time)) | ja | 0.986292 | # --- データベースに保存 # --- | 3.236742 | 3 |
setup.py | Purvanshsingh/creditrisk-poc | 3 | 6619191 | <filename>setup.py
from setuptools import setup, find_packages
try:
from pip._internal.network.session import PipSession
from pip._internal.req import parse_requirements
install_requires = parse_requirements("requirements.txt", session=PipSession())
dependencies = [str(package.requirement) for package in install_requires]
except ImportError:
msg = "Your pip version is out of date, please run `pip install --upgrade pip setuptools`"
raise ImportError(msg)
for package_index in range(len(dependencies)):
if dependencies[package_index].startswith("git+"):
dependencies[package_index] = dependencies[package_index].split("=")[1]
setup(
name="creditrisk_poc",
version='0.0.1',
description='Hydra powered API for creditrisk management',
author="Hydra Ecosystem",
author_email="<EMAIL>",
url="https://github.com/HTTP-APIs/hydrus",
py_modules=["cli"],
python_requires=">=3.6",
install_requires=dependencies,
packages=find_packages()
)
| <filename>setup.py
from setuptools import setup, find_packages
try:
from pip._internal.network.session import PipSession
from pip._internal.req import parse_requirements
install_requires = parse_requirements("requirements.txt", session=PipSession())
dependencies = [str(package.requirement) for package in install_requires]
except ImportError:
msg = "Your pip version is out of date, please run `pip install --upgrade pip setuptools`"
raise ImportError(msg)
for package_index in range(len(dependencies)):
if dependencies[package_index].startswith("git+"):
dependencies[package_index] = dependencies[package_index].split("=")[1]
setup(
name="creditrisk_poc",
version='0.0.1',
description='Hydra powered API for creditrisk management',
author="Hydra Ecosystem",
author_email="<EMAIL>",
url="https://github.com/HTTP-APIs/hydrus",
py_modules=["cli"],
python_requires=">=3.6",
install_requires=dependencies,
packages=find_packages()
)
| none | 1 | 1.919102 | 2 | |
car/TF_RefineDet_CIDI3/data/dataAugement.py | donghaiwang/VisualTracking_DRL | 4 | 6619192 | # -*- coding: utf-8 -*-
"""
@author: yangxuefeng
"""
import numpy as np
import tensorflow as tf
IMG_MEAN = np.array((74,75,71), dtype=np.float32)
class Augement():
def __init__(self,image,reg_label_real,cls_label,shape):
self.images = image
self.reg_label_real = reg_label_real
self.cls_label = cls_label
self.shape = shape
def execute(self):
flag = tf.random_uniform(shape=[],minval=3,maxval=4,dtype=tf.int32)
images, reg_label_real, cls_label = tf.case({tf.equal(flag, 0): self.order1,
tf.equal(flag, 1): self.order2,
tf.equal(flag, 2): self.order3,
tf.equal(flag, 3): self.order4
}, exclusive=True)
img_shape = tf.shape(images)
return images, reg_label_real, tf.reshape(cls_label,[-1,1]),img_shape
def order1(self):
images0, reg_label_real0, cls_label0 = self.crop(self.images, self.reg_label_real, self.cls_label)
images1, reg_label_real1, cls_label1 = self.color(images0, reg_label_real0, cls_label0)
images2, reg_label_real2, cls_label2 = self.flip(images1, reg_label_real1, cls_label1)
return images2, reg_label_real2, cls_label2
def order2(self):
images0, reg_label_real0, cls_label0 = self.padding(self.images,self.reg_label_real,self.cls_label,4,self.shape)
images1, reg_label_real1, cls_label1 = self.color(images0, reg_label_real0, cls_label0)
images2, reg_label_real2, cls_label2 = self.flip(images1, reg_label_real1, cls_label1 )
return images2, reg_label_real2, cls_label2
def order3(self):
return self.images,self.reg_label_real,self.cls_label
def order4(self):
is_do = tf.random_uniform(shape=[],minval=0,maxval=2,dtype=tf.int32)
images0, reg_label_real0, cls_label0 = tf.cond(tf.equal(is_do,0),lambda:self.color(self.images, self.reg_label_real, self.cls_label),lambda:self.returnsrc(self.images, self.reg_label_real, self.cls_label))
is_do = tf.random_uniform(shape=[], minval=0, maxval=2, dtype=tf.int32)
images1, reg_label_real1, cls_label1 = tf.cond(tf.equal(is_do, 0),
lambda: self.padding(images0, reg_label_real0, cls_label0,2,self.shape),lambda:self.returnsrc(images0, reg_label_real0, cls_label0))
is_do = tf.random_uniform(shape=[], minval=0, maxval=2, dtype=tf.int32)
images2, reg_label_real2, cls_label2 = tf.cond(tf.equal(is_do, 0),
lambda: self.crop(images1, reg_label_real1, cls_label1),lambda:self.returnsrc(images1, reg_label_real1, cls_label1))
is_do = tf.random_uniform(shape=[], minval=0, maxval=2, dtype=tf.int32)
images3, reg_label_real3, cls_label3 = tf.cond(tf.equal(is_do, 0),
lambda: self.flip(images2, reg_label_real2, cls_label2),lambda:self.returnsrc(images2, reg_label_real2, cls_label2))
return images3, reg_label_real3, cls_label3
def returnsrc(self,images,reg_label_real,cls_label):
return images,reg_label_real,cls_label
def color(self,images,reg_label_real,cls_label):
def f1():
image = tf.image.random_brightness(images, max_delta=32. / 255.)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.2)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
return image
def f2():
image = tf.image.random_saturation(images, lower=0.5, upper=1.5)
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.2)
return image
def f3():
image = tf.image.random_contrast(images, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.2)
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
return image
def f4():
image = tf.image.random_hue(images, max_delta=0.2)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
image = tf.image.random_brightness(image, max_delta=32. / 255.)
return image
color_ordering = tf.random_uniform(shape=[], minval=0, maxval=4, dtype=tf.int32)
image = tf.case({tf.equal(color_ordering, 0): f1,
tf.equal(color_ordering, 1): f2,
tf.equal(color_ordering, 2): f3,
tf.equal(color_ordering, 3): f4},exclusive=True)
return image, reg_label_real, cls_label
def flip(self,images,reg_label_real,cls_label):
image = tf.image.flip_left_right(images)
ymin = reg_label_real[:,0]
xmin = 1.0 - reg_label_real[:,3]
ymax = reg_label_real[:,2]
xmax = 1.0 - reg_label_real[:,1]
reg_label_realNew = tf.stack(values=[ymin, xmin, ymax, xmax], axis=1)
reg_label_realNew = tf.reshape(reg_label_realNew,[-1,4])
return image, reg_label_realNew, cls_label
def padding(self, images,reg_label_real,cls_label,ratio,shape):
ratios = tf.random_uniform(shape=[], minval=1.0, maxval=ratio, dtype=tf.float32)
shapesize = tf.cast(shape,tf.float32)
width = shapesize[1] * ratios
hight = shapesize[0] * ratios
offset_h = tf.random_uniform(shape=[],minval=0,dtype=tf.float32,maxval=hight-shapesize[0])
offset_w = tf.random_uniform(shape=[],minval=0,dtype=tf.float32,maxval=width-shapesize[1])
offset_h = tf.cast(offset_h,tf.int32)
offset_w = tf.cast(offset_w, tf.int32)
width = tf.cast(width,tf.int32)
hight = tf.cast(hight, tf.int32)
padding = [[offset_h,hight-tf.cast(shapesize[0],tf.int32)-tf.cast(offset_h,tf.int32)],[offset_w,width-tf.cast(shapesize[1],tf.int32)-tf.cast(offset_w,tf.int32)]]
image_0 = tf.pad(tensor=images[:,:,0],paddings=padding,constant_values=IMG_MEAN[0])
image_1 = tf.pad(tensor=images[:, :, 1], paddings=padding, constant_values=IMG_MEAN[1])
image_2 = tf.pad(tensor=images[:, :, 2], paddings=padding, constant_values=IMG_MEAN[2])
image = tf.stack(values=[image_0,image_1,image_2],axis=-1)
offset_h = tf.cast(offset_h, tf.float32)
offset_w = tf.cast(offset_w, tf.float32)
width = tf.cast(width, tf.float32)
hight = tf.cast(hight, tf.float32)
ymin = (reg_label_real[:,0]*shapesize[0]+offset_h)/hight
xmin = (reg_label_real[:,1]*shapesize[1]+offset_w)/width
ymax = (reg_label_real[:,2]*shapesize[0]+offset_h)/hight
xmax = (reg_label_real[:,3]*shapesize[1]+offset_w)/width
reg_label_realNew = tf.stack(values=[ymin, xmin, ymax, xmax], axis=1)
return image, reg_label_realNew, cls_label
def crop(self, images,reg_label_real,cls_label):
reg_label_real0 = tf.transpose(reg_label_real)
ymin,xmin,ymax,xmax = tf.split(reg_label_real0,4,0)
reg_label_real_withLab = tf.stack(values=[ymin,xmin,ymax,xmax,tf.cast(tf.transpose(cls_label),tf.float32)], axis=1)
reg_label_real_withLab = tf.reshape(tf.transpose(reg_label_real_withLab),[-1,5])
tf_image = tf.cast(images, dtype=tf.float32)
bounding_boxes = tf.expand_dims(reg_label_real, 0)
begin, size, bbox_for_draw = tf.image.sample_distorted_bounding_box(
tf.shape(tf_image),
bounding_boxes=bounding_boxes,
min_object_covered=0.3,
aspect_ratio_range=(0.5, 2),
area_range=(0.3, 1.0),
max_attempts=None,
use_image_if_no_bounding_boxes=True,
name=None
)
image_with_box = tf.squeeze(tf.cast(tf.image.draw_bounding_boxes(tf.expand_dims(tf_image, 0), bbox_for_draw), tf.uint8))
distorted_image = tf.cast(tf.slice(tf_image, begin, size), tf.uint8)
distort_bbox = bbox_for_draw[0, 0]
filter_box = self.bboxes_intersection_filter(distort_bbox, reg_label_real_withLab)
filter_box = tf.reshape(filter_box,[-1,5])
return distorted_image,filter_box[:,0:4],tf.cast(filter_box[:,4],tf.int64)
def bboxes_intersection_filter(self,bbox_ref, bboxes, threshold=0.3):
# thresholds = tf.random_uniform(shape=[], minval=0, maxval=6, dtype=tf.int32)
# threshold = tf.case({tf.equal(thresholds, 0): lambda :0.1,
# tf.equal(thresholds, 1): lambda :0.3,
# tf.equal(thresholds, 2): lambda :0.5,
# tf.equal(thresholds, 3): lambda :0.7,
# tf.equal(thresholds, 4): lambda: 0.9,
# tf.equal(thresholds, 5): lambda: 1.0
# }, exclusive=True)
int_ymin = tf.maximum(bboxes[:,0], bbox_ref[0])
int_xmin = tf.maximum(bboxes[:,1], bbox_ref[1])
int_ymax = tf.minimum(bboxes[:,2], bbox_ref[2])
int_xmax = tf.minimum(bboxes[:,3], bbox_ref[3])
h = tf.maximum(int_ymax - int_ymin, 0.)
w = tf.maximum(int_xmax - int_xmin, 0.)
inter_vol = h * w
bboxes_vol = (bboxes[:,2] - bboxes[:,0]) * (bboxes[:,3] - bboxes[:,1])
scores =tf.divide(inter_vol, bboxes_vol)
clip_ymin = (tf.clip_by_value(bboxes[:,0], bbox_ref[0], bbox_ref[2])-bbox_ref[0])/(bbox_ref[2] - bbox_ref[0])
clip_xmin = (tf.clip_by_value(bboxes[:, 1], bbox_ref[1], bbox_ref[3])-bbox_ref[1])/(bbox_ref[3] - bbox_ref[1])
clip_ymax = (tf.clip_by_value(bboxes[:, 2], bbox_ref[0], bbox_ref[2])-bbox_ref[0])/(bbox_ref[2] - bbox_ref[0])
clip_xmax = (tf.clip_by_value(bboxes[:, 3], bbox_ref[1], bbox_ref[3])-bbox_ref[1])/(bbox_ref[3] - bbox_ref[1])
clip_cls = bboxes[:, 4]
bboxes = tf.stack(values=[clip_ymin, clip_xmin, clip_ymax, clip_xmax,clip_cls], axis=1)
filter_score = tf.gather(bboxes, tf.squeeze(tf.where(tf.greater_equal(scores,threshold))))
return filter_score | # -*- coding: utf-8 -*-
"""
@author: yangxuefeng
"""
import numpy as np
import tensorflow as tf
IMG_MEAN = np.array((74,75,71), dtype=np.float32)
class Augement():
def __init__(self,image,reg_label_real,cls_label,shape):
self.images = image
self.reg_label_real = reg_label_real
self.cls_label = cls_label
self.shape = shape
def execute(self):
flag = tf.random_uniform(shape=[],minval=3,maxval=4,dtype=tf.int32)
images, reg_label_real, cls_label = tf.case({tf.equal(flag, 0): self.order1,
tf.equal(flag, 1): self.order2,
tf.equal(flag, 2): self.order3,
tf.equal(flag, 3): self.order4
}, exclusive=True)
img_shape = tf.shape(images)
return images, reg_label_real, tf.reshape(cls_label,[-1,1]),img_shape
def order1(self):
images0, reg_label_real0, cls_label0 = self.crop(self.images, self.reg_label_real, self.cls_label)
images1, reg_label_real1, cls_label1 = self.color(images0, reg_label_real0, cls_label0)
images2, reg_label_real2, cls_label2 = self.flip(images1, reg_label_real1, cls_label1)
return images2, reg_label_real2, cls_label2
def order2(self):
images0, reg_label_real0, cls_label0 = self.padding(self.images,self.reg_label_real,self.cls_label,4,self.shape)
images1, reg_label_real1, cls_label1 = self.color(images0, reg_label_real0, cls_label0)
images2, reg_label_real2, cls_label2 = self.flip(images1, reg_label_real1, cls_label1 )
return images2, reg_label_real2, cls_label2
def order3(self):
return self.images,self.reg_label_real,self.cls_label
def order4(self):
is_do = tf.random_uniform(shape=[],minval=0,maxval=2,dtype=tf.int32)
images0, reg_label_real0, cls_label0 = tf.cond(tf.equal(is_do,0),lambda:self.color(self.images, self.reg_label_real, self.cls_label),lambda:self.returnsrc(self.images, self.reg_label_real, self.cls_label))
is_do = tf.random_uniform(shape=[], minval=0, maxval=2, dtype=tf.int32)
images1, reg_label_real1, cls_label1 = tf.cond(tf.equal(is_do, 0),
lambda: self.padding(images0, reg_label_real0, cls_label0,2,self.shape),lambda:self.returnsrc(images0, reg_label_real0, cls_label0))
is_do = tf.random_uniform(shape=[], minval=0, maxval=2, dtype=tf.int32)
images2, reg_label_real2, cls_label2 = tf.cond(tf.equal(is_do, 0),
lambda: self.crop(images1, reg_label_real1, cls_label1),lambda:self.returnsrc(images1, reg_label_real1, cls_label1))
is_do = tf.random_uniform(shape=[], minval=0, maxval=2, dtype=tf.int32)
images3, reg_label_real3, cls_label3 = tf.cond(tf.equal(is_do, 0),
lambda: self.flip(images2, reg_label_real2, cls_label2),lambda:self.returnsrc(images2, reg_label_real2, cls_label2))
return images3, reg_label_real3, cls_label3
def returnsrc(self,images,reg_label_real,cls_label):
return images,reg_label_real,cls_label
def color(self,images,reg_label_real,cls_label):
def f1():
image = tf.image.random_brightness(images, max_delta=32. / 255.)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.2)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
return image
def f2():
image = tf.image.random_saturation(images, lower=0.5, upper=1.5)
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.2)
return image
def f3():
image = tf.image.random_contrast(images, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.2)
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
return image
def f4():
image = tf.image.random_hue(images, max_delta=0.2)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
image = tf.image.random_brightness(image, max_delta=32. / 255.)
return image
color_ordering = tf.random_uniform(shape=[], minval=0, maxval=4, dtype=tf.int32)
image = tf.case({tf.equal(color_ordering, 0): f1,
tf.equal(color_ordering, 1): f2,
tf.equal(color_ordering, 2): f3,
tf.equal(color_ordering, 3): f4},exclusive=True)
return image, reg_label_real, cls_label
def flip(self,images,reg_label_real,cls_label):
image = tf.image.flip_left_right(images)
ymin = reg_label_real[:,0]
xmin = 1.0 - reg_label_real[:,3]
ymax = reg_label_real[:,2]
xmax = 1.0 - reg_label_real[:,1]
reg_label_realNew = tf.stack(values=[ymin, xmin, ymax, xmax], axis=1)
reg_label_realNew = tf.reshape(reg_label_realNew,[-1,4])
return image, reg_label_realNew, cls_label
def padding(self, images,reg_label_real,cls_label,ratio,shape):
ratios = tf.random_uniform(shape=[], minval=1.0, maxval=ratio, dtype=tf.float32)
shapesize = tf.cast(shape,tf.float32)
width = shapesize[1] * ratios
hight = shapesize[0] * ratios
offset_h = tf.random_uniform(shape=[],minval=0,dtype=tf.float32,maxval=hight-shapesize[0])
offset_w = tf.random_uniform(shape=[],minval=0,dtype=tf.float32,maxval=width-shapesize[1])
offset_h = tf.cast(offset_h,tf.int32)
offset_w = tf.cast(offset_w, tf.int32)
width = tf.cast(width,tf.int32)
hight = tf.cast(hight, tf.int32)
padding = [[offset_h,hight-tf.cast(shapesize[0],tf.int32)-tf.cast(offset_h,tf.int32)],[offset_w,width-tf.cast(shapesize[1],tf.int32)-tf.cast(offset_w,tf.int32)]]
image_0 = tf.pad(tensor=images[:,:,0],paddings=padding,constant_values=IMG_MEAN[0])
image_1 = tf.pad(tensor=images[:, :, 1], paddings=padding, constant_values=IMG_MEAN[1])
image_2 = tf.pad(tensor=images[:, :, 2], paddings=padding, constant_values=IMG_MEAN[2])
image = tf.stack(values=[image_0,image_1,image_2],axis=-1)
offset_h = tf.cast(offset_h, tf.float32)
offset_w = tf.cast(offset_w, tf.float32)
width = tf.cast(width, tf.float32)
hight = tf.cast(hight, tf.float32)
ymin = (reg_label_real[:,0]*shapesize[0]+offset_h)/hight
xmin = (reg_label_real[:,1]*shapesize[1]+offset_w)/width
ymax = (reg_label_real[:,2]*shapesize[0]+offset_h)/hight
xmax = (reg_label_real[:,3]*shapesize[1]+offset_w)/width
reg_label_realNew = tf.stack(values=[ymin, xmin, ymax, xmax], axis=1)
return image, reg_label_realNew, cls_label
def crop(self, images,reg_label_real,cls_label):
reg_label_real0 = tf.transpose(reg_label_real)
ymin,xmin,ymax,xmax = tf.split(reg_label_real0,4,0)
reg_label_real_withLab = tf.stack(values=[ymin,xmin,ymax,xmax,tf.cast(tf.transpose(cls_label),tf.float32)], axis=1)
reg_label_real_withLab = tf.reshape(tf.transpose(reg_label_real_withLab),[-1,5])
tf_image = tf.cast(images, dtype=tf.float32)
bounding_boxes = tf.expand_dims(reg_label_real, 0)
begin, size, bbox_for_draw = tf.image.sample_distorted_bounding_box(
tf.shape(tf_image),
bounding_boxes=bounding_boxes,
min_object_covered=0.3,
aspect_ratio_range=(0.5, 2),
area_range=(0.3, 1.0),
max_attempts=None,
use_image_if_no_bounding_boxes=True,
name=None
)
image_with_box = tf.squeeze(tf.cast(tf.image.draw_bounding_boxes(tf.expand_dims(tf_image, 0), bbox_for_draw), tf.uint8))
distorted_image = tf.cast(tf.slice(tf_image, begin, size), tf.uint8)
distort_bbox = bbox_for_draw[0, 0]
filter_box = self.bboxes_intersection_filter(distort_bbox, reg_label_real_withLab)
filter_box = tf.reshape(filter_box,[-1,5])
return distorted_image,filter_box[:,0:4],tf.cast(filter_box[:,4],tf.int64)
def bboxes_intersection_filter(self,bbox_ref, bboxes, threshold=0.3):
# thresholds = tf.random_uniform(shape=[], minval=0, maxval=6, dtype=tf.int32)
# threshold = tf.case({tf.equal(thresholds, 0): lambda :0.1,
# tf.equal(thresholds, 1): lambda :0.3,
# tf.equal(thresholds, 2): lambda :0.5,
# tf.equal(thresholds, 3): lambda :0.7,
# tf.equal(thresholds, 4): lambda: 0.9,
# tf.equal(thresholds, 5): lambda: 1.0
# }, exclusive=True)
int_ymin = tf.maximum(bboxes[:,0], bbox_ref[0])
int_xmin = tf.maximum(bboxes[:,1], bbox_ref[1])
int_ymax = tf.minimum(bboxes[:,2], bbox_ref[2])
int_xmax = tf.minimum(bboxes[:,3], bbox_ref[3])
h = tf.maximum(int_ymax - int_ymin, 0.)
w = tf.maximum(int_xmax - int_xmin, 0.)
inter_vol = h * w
bboxes_vol = (bboxes[:,2] - bboxes[:,0]) * (bboxes[:,3] - bboxes[:,1])
scores =tf.divide(inter_vol, bboxes_vol)
clip_ymin = (tf.clip_by_value(bboxes[:,0], bbox_ref[0], bbox_ref[2])-bbox_ref[0])/(bbox_ref[2] - bbox_ref[0])
clip_xmin = (tf.clip_by_value(bboxes[:, 1], bbox_ref[1], bbox_ref[3])-bbox_ref[1])/(bbox_ref[3] - bbox_ref[1])
clip_ymax = (tf.clip_by_value(bboxes[:, 2], bbox_ref[0], bbox_ref[2])-bbox_ref[0])/(bbox_ref[2] - bbox_ref[0])
clip_xmax = (tf.clip_by_value(bboxes[:, 3], bbox_ref[1], bbox_ref[3])-bbox_ref[1])/(bbox_ref[3] - bbox_ref[1])
clip_cls = bboxes[:, 4]
bboxes = tf.stack(values=[clip_ymin, clip_xmin, clip_ymax, clip_xmax,clip_cls], axis=1)
filter_score = tf.gather(bboxes, tf.squeeze(tf.where(tf.greater_equal(scores,threshold))))
return filter_score | en | 0.432596 | # -*- coding: utf-8 -*- @author: yangxuefeng # thresholds = tf.random_uniform(shape=[], minval=0, maxval=6, dtype=tf.int32) # threshold = tf.case({tf.equal(thresholds, 0): lambda :0.1, # tf.equal(thresholds, 1): lambda :0.3, # tf.equal(thresholds, 2): lambda :0.5, # tf.equal(thresholds, 3): lambda :0.7, # tf.equal(thresholds, 4): lambda: 0.9, # tf.equal(thresholds, 5): lambda: 1.0 # }, exclusive=True) | 2.406585 | 2 |
pcml/core/PCMLConfig.py | Jindam/HPCGISLab | 1 | 6619193 | """
Copyright (c) 2014 High-Performance Computing and GIS (HPCGIS) Laboratory. All rights reserved.
Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.
Authors and contributors: <NAME> (<EMAIL>); <NAME> (<EMAIL>, <EMAIL>)
"""
from Scheduler import *
# Number of processes to run
num_procs=4
exectype=ExecutorType.serialpython
exectype=ExecutorType.parallelpythonqueue
# The precision used in formatting floating values into strings
value_precision="%f"
# By default osgeo including gdal, ogr, and osr are not available
# In OperationIO we try to import them and if successful osgeoenabled=1
osgeoenabled=0
| """
Copyright (c) 2014 High-Performance Computing and GIS (HPCGIS) Laboratory. All rights reserved.
Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.
Authors and contributors: <NAME> (<EMAIL>); <NAME> (<EMAIL>, <EMAIL>)
"""
from Scheduler import *
# Number of processes to run
num_procs=4
exectype=ExecutorType.serialpython
exectype=ExecutorType.parallelpythonqueue
# The precision used in formatting floating values into strings
value_precision="%f"
# By default osgeo including gdal, ogr, and osr are not available
# In OperationIO we try to import them and if successful osgeoenabled=1
osgeoenabled=0
| en | 0.816742 | Copyright (c) 2014 High-Performance Computing and GIS (HPCGIS) Laboratory. All rights reserved. Use of this source code is governed by a BSD-style license that can be found in the LICENSE file. Authors and contributors: <NAME> (<EMAIL>); <NAME> (<EMAIL>, <EMAIL>) # Number of processes to run # The precision used in formatting floating values into strings # By default osgeo including gdal, ogr, and osr are not available # In OperationIO we try to import them and if successful osgeoenabled=1 | 1.545879 | 2 |
preprocess.py | zhangjh915/Statistical-Machine-Learning-Project | 0 | 6619194 | <reponame>zhangjh915/Statistical-Machine-Learning-Project<filename>preprocess.py
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns; sns.set()
def read_data(data_type='train'):
# Load training or test dataset.
if data_type == 'train':
data = pd.read_csv('data/cryoocyte_3_regression_train.csv')
elif data_type == 'test':
data = pd.read_csv('data/cryoocyte_3_regression_test.csv')
else:
raise ValueError('Unknown Data Type: %s' % data_type)
print('Dimension for %s set is: %s' % (data_type, data.shape))
# Dimension for train set is: (40000, 116) with y
# Dimension for test set is: (10000, 115) without y
return data
def check_data(data):
# Check data non-numerical features and nan rate.
# Check numerical and object variables.
num_var = data.applymap(np.isreal).all(0)
obj_var = {}
for index, value in num_var.items():
if not value:
obj_var[index] = data.loc[0, index]
# obj_var = {'x44': '0.0%', 'x50': 'tuesday', 'x59': '$-1832.38', 'x63': 'Orang', 'x65': 'D', 'x95': 'Aug'}
# Calculate nan rate for each feature and y and create a data frame to store the information.
nan_rate = {}
for x in data.columns:
nan_rate[x] = 100 * data[x].isnull().sum() / len(data[x])
nan_rate_df = pd.DataFrame(list(nan_rate.values()), index=nan_rate.keys(), columns=['nan_rate'])
nan_rate_df['data_type'] = data.dtypes
# max(nan_rate_df['nan_rate']) = 0.0725%; nan_rate = 0 for y
return nan_rate_df
def process_data(data, filled=False, data_type='train'):
# Change the non-numerical quantitative features to numeric values.
if data_type == 'train':
num_sample = 40000
elif data_type == 'test':
num_sample = 10000
else:
raise ValueError('Unknown Data Type: %s' % data_type)
if not filled: # before filling the nan values
# Change x44(percentage with %) and x59(price with $) to numerical values.
for i in range(num_sample):
x44 = data.loc[i, 'x44']
x59 = data.loc[i, 'x59']
try:
data.loc[i, 'x44'] = float(x44[:-1]) * 0.01
except TypeError: # nan values
pass
try:
data.loc[i, 'x59'] = float(x59[1:])
except TypeError: # nan values
pass
data = data.astype({"x44": float, "x59": float})
# Replace the nan values of week and month features with their modes respectively.
for x in ['x50', 'x95']:
data[x].fillna(data[x].mode()[0], inplace=True)
# Use sine transformation on x50(week) and x95(month) variables to keep their temporal relationship.
x50_sin = pd.Series()
x50_cos = pd.Series()
x95_sin = pd.Series()
x95_cos = pd.Series()
for i in range(num_sample):
x50 = data.loc[i, 'x50']
x95 = data.loc[i, 'x95']
week_dict = {'monday': 0, 'tuesday': 1, 'wednesday': 2, 'thursday': 5, 'friday': 4, 'sat': 5, 'sun': 6}
month_dict = {'January': 0, 'Feb': 1, 'Mar': 2, 'Apr': 3, 'May': 4, 'Jun': 5,
'July': 6, 'Aug': 7, 'sept.': 8, 'Oct': 9, 'Nov': 10, 'Dev': 11}
try:
x50_sin.at[i] = np.sin(week_dict[x50] * (2 * np.pi / 7))
x50_cos.at[i] = np.cos(week_dict[x50] * (2 * np.pi / 7))
except KeyError: # nan values
x50_sin.at[i] = np.nan
x50_cos.at[i] = np.nan
try:
x95_sin.at[i] = np.sin(month_dict[x95] * (2 * np.pi / 12))
x95_cos.at[i] = np.cos(month_dict[x95] * (2 * np.pi / 12))
except KeyError: # nan values
x95_sin.at[i] = np.nan
x95_cos.at[i] = np.nan
data['x50_sin'] = x50_sin
data['x50_cos'] = x50_cos
data['x95_sin'] = x95_sin
data['x95_cos'] = x95_cos
data.drop('x50', axis=1, inplace=True)
data.drop('x95', axis=1, inplace=True)
else: # after filling the nan values
# One-hot encoding of categorical features.
one_hot_variables = pd.get_dummies(data[['x63', 'x65']])
data.drop(['x63', 'x65'], axis=1, inplace=True)
data = pd.concat([one_hot_variables, data], axis=1)
return data
def fill_data(data):
# Fill nan values of the dataset.
# Replace the nan values of the categorical features with their modes respectively.
for x in ['x63', 'x65']:
data[x].fillna(data[x].mode()[0], inplace=True)
# Replace the nan values of the numerical features with the means.
nan_rate_df = check_data(data)
numerical_features = list(nan_rate_df.loc[nan_rate_df.data_type == 'float', ].index)
for x in numerical_features:
data[x].fillna(data[x].mean(), inplace=True)
return data
def count_cat(data):
# This function is not called but was used to obtain the lists of categorical variables.
cat50 = {}
cat63 = {}
cat65 = {}
cat95 = {}
for i in range(40000):
x50 = data.loc[i, 'x50']
x63 = data.loc[i, 'x63']
x65 = data.loc[i, 'x65']
x95 = data.loc[i, 'x95']
if x50 not in cat50:
cat50[x50] = 0
else:
cat50[x50] += 1
if x63 not in cat63:
cat63[x63] = 0
else:
cat63[x63] += 1
if x65 not in cat65:
cat65[x65] = 0
else:
cat65[x65] += 1
if x95 not in cat95:
cat95[x95] = 0
else:
cat95[x95] += 1
# cat50 = {'tuesday': 18114, 'monday': 6534, 'wednesday': 12552, 'thursday': 2162, 'sun': 518,
# 'friday': 82, nan: 15, 'sat': 15}
# cat63 = {'Orang': 14538, 'Yellow': 23454, 'red': 351, 'blue': 1638, nan: 14}
# cat65 = {'D': 6224, 'B': 32715, 'A': 1029, nan: 28}
# cat95 = {'Aug': 5672, 'May': 7371, 'July': 9877, 'Apr': 2934, 'Jun': 10933, 'sept.': 1864,
# 'Mar': 751, 'Feb': 102, 'Oct': 408, 'Nov': 40, nan: 21, 'January': 11, 'Dev': 3}
def main(plot=False):
data = read_data('train')
nan_rate_df = check_data(data)
data = process_data(data, filled=False)
data = fill_data(data)
data = process_data(data, filled=True)
data.to_csv('data/train.csv', index=False)
# Plot scatter plots for each feature.
if plot:
data = pd.read_csv('train.csv')
nan_rate_df = check_data(data)
numerical_features = list(nan_rate_df.loc[nan_rate_df.data_type == 'float', ].index)
for x in ['y', 'x50_sin', 'x50_cos', 'x95_sin', 'x95_cos']:
numerical_features.remove(x)
fig, axs = plt.subplots(11, 11, figsize=(20, 20))
for i in range(len(numerical_features)):
sns.scatterplot(x=numerical_features[i], y='y', data=data[[numerical_features[i], 'y']], ax=axs[i//11][i%11])
plt.subplots_adjust(hspace=0.5)
plt.savefig('results/scatter_plots.png')
if __name__ == "__main__":
main(plot=False)
| import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns; sns.set()
def read_data(data_type='train'):
# Load training or test dataset.
if data_type == 'train':
data = pd.read_csv('data/cryoocyte_3_regression_train.csv')
elif data_type == 'test':
data = pd.read_csv('data/cryoocyte_3_regression_test.csv')
else:
raise ValueError('Unknown Data Type: %s' % data_type)
print('Dimension for %s set is: %s' % (data_type, data.shape))
# Dimension for train set is: (40000, 116) with y
# Dimension for test set is: (10000, 115) without y
return data
def check_data(data):
# Check data non-numerical features and nan rate.
# Check numerical and object variables.
num_var = data.applymap(np.isreal).all(0)
obj_var = {}
for index, value in num_var.items():
if not value:
obj_var[index] = data.loc[0, index]
# obj_var = {'x44': '0.0%', 'x50': 'tuesday', 'x59': '$-1832.38', 'x63': 'Orang', 'x65': 'D', 'x95': 'Aug'}
# Calculate nan rate for each feature and y and create a data frame to store the information.
nan_rate = {}
for x in data.columns:
nan_rate[x] = 100 * data[x].isnull().sum() / len(data[x])
nan_rate_df = pd.DataFrame(list(nan_rate.values()), index=nan_rate.keys(), columns=['nan_rate'])
nan_rate_df['data_type'] = data.dtypes
# max(nan_rate_df['nan_rate']) = 0.0725%; nan_rate = 0 for y
return nan_rate_df
def process_data(data, filled=False, data_type='train'):
# Change the non-numerical quantitative features to numeric values.
if data_type == 'train':
num_sample = 40000
elif data_type == 'test':
num_sample = 10000
else:
raise ValueError('Unknown Data Type: %s' % data_type)
if not filled: # before filling the nan values
# Change x44(percentage with %) and x59(price with $) to numerical values.
for i in range(num_sample):
x44 = data.loc[i, 'x44']
x59 = data.loc[i, 'x59']
try:
data.loc[i, 'x44'] = float(x44[:-1]) * 0.01
except TypeError: # nan values
pass
try:
data.loc[i, 'x59'] = float(x59[1:])
except TypeError: # nan values
pass
data = data.astype({"x44": float, "x59": float})
# Replace the nan values of week and month features with their modes respectively.
for x in ['x50', 'x95']:
data[x].fillna(data[x].mode()[0], inplace=True)
# Use sine transformation on x50(week) and x95(month) variables to keep their temporal relationship.
x50_sin = pd.Series()
x50_cos = pd.Series()
x95_sin = pd.Series()
x95_cos = pd.Series()
for i in range(num_sample):
x50 = data.loc[i, 'x50']
x95 = data.loc[i, 'x95']
week_dict = {'monday': 0, 'tuesday': 1, 'wednesday': 2, 'thursday': 5, 'friday': 4, 'sat': 5, 'sun': 6}
month_dict = {'January': 0, 'Feb': 1, 'Mar': 2, 'Apr': 3, 'May': 4, 'Jun': 5,
'July': 6, 'Aug': 7, 'sept.': 8, 'Oct': 9, 'Nov': 10, 'Dev': 11}
try:
x50_sin.at[i] = np.sin(week_dict[x50] * (2 * np.pi / 7))
x50_cos.at[i] = np.cos(week_dict[x50] * (2 * np.pi / 7))
except KeyError: # nan values
x50_sin.at[i] = np.nan
x50_cos.at[i] = np.nan
try:
x95_sin.at[i] = np.sin(month_dict[x95] * (2 * np.pi / 12))
x95_cos.at[i] = np.cos(month_dict[x95] * (2 * np.pi / 12))
except KeyError: # nan values
x95_sin.at[i] = np.nan
x95_cos.at[i] = np.nan
data['x50_sin'] = x50_sin
data['x50_cos'] = x50_cos
data['x95_sin'] = x95_sin
data['x95_cos'] = x95_cos
data.drop('x50', axis=1, inplace=True)
data.drop('x95', axis=1, inplace=True)
else: # after filling the nan values
# One-hot encoding of categorical features.
one_hot_variables = pd.get_dummies(data[['x63', 'x65']])
data.drop(['x63', 'x65'], axis=1, inplace=True)
data = pd.concat([one_hot_variables, data], axis=1)
return data
def fill_data(data):
# Fill nan values of the dataset.
# Replace the nan values of the categorical features with their modes respectively.
for x in ['x63', 'x65']:
data[x].fillna(data[x].mode()[0], inplace=True)
# Replace the nan values of the numerical features with the means.
nan_rate_df = check_data(data)
numerical_features = list(nan_rate_df.loc[nan_rate_df.data_type == 'float', ].index)
for x in numerical_features:
data[x].fillna(data[x].mean(), inplace=True)
return data
def count_cat(data):
# This function is not called but was used to obtain the lists of categorical variables.
cat50 = {}
cat63 = {}
cat65 = {}
cat95 = {}
for i in range(40000):
x50 = data.loc[i, 'x50']
x63 = data.loc[i, 'x63']
x65 = data.loc[i, 'x65']
x95 = data.loc[i, 'x95']
if x50 not in cat50:
cat50[x50] = 0
else:
cat50[x50] += 1
if x63 not in cat63:
cat63[x63] = 0
else:
cat63[x63] += 1
if x65 not in cat65:
cat65[x65] = 0
else:
cat65[x65] += 1
if x95 not in cat95:
cat95[x95] = 0
else:
cat95[x95] += 1
# cat50 = {'tuesday': 18114, 'monday': 6534, 'wednesday': 12552, 'thursday': 2162, 'sun': 518,
# 'friday': 82, nan: 15, 'sat': 15}
# cat63 = {'Orang': 14538, 'Yellow': 23454, 'red': 351, 'blue': 1638, nan: 14}
# cat65 = {'D': 6224, 'B': 32715, 'A': 1029, nan: 28}
# cat95 = {'Aug': 5672, 'May': 7371, 'July': 9877, 'Apr': 2934, 'Jun': 10933, 'sept.': 1864,
# 'Mar': 751, 'Feb': 102, 'Oct': 408, 'Nov': 40, nan: 21, 'January': 11, 'Dev': 3}
def main(plot=False):
data = read_data('train')
nan_rate_df = check_data(data)
data = process_data(data, filled=False)
data = fill_data(data)
data = process_data(data, filled=True)
data.to_csv('data/train.csv', index=False)
# Plot scatter plots for each feature.
if plot:
data = pd.read_csv('train.csv')
nan_rate_df = check_data(data)
numerical_features = list(nan_rate_df.loc[nan_rate_df.data_type == 'float', ].index)
for x in ['y', 'x50_sin', 'x50_cos', 'x95_sin', 'x95_cos']:
numerical_features.remove(x)
fig, axs = plt.subplots(11, 11, figsize=(20, 20))
for i in range(len(numerical_features)):
sns.scatterplot(x=numerical_features[i], y='y', data=data[[numerical_features[i], 'y']], ax=axs[i//11][i%11])
plt.subplots_adjust(hspace=0.5)
plt.savefig('results/scatter_plots.png')
if __name__ == "__main__":
main(plot=False) | en | 0.582142 | # Load training or test dataset. # Dimension for train set is: (40000, 116) with y # Dimension for test set is: (10000, 115) without y # Check data non-numerical features and nan rate. # Check numerical and object variables. # obj_var = {'x44': '0.0%', 'x50': 'tuesday', 'x59': '$-1832.38', 'x63': 'Orang', 'x65': 'D', 'x95': 'Aug'} # Calculate nan rate for each feature and y and create a data frame to store the information. # max(nan_rate_df['nan_rate']) = 0.0725%; nan_rate = 0 for y # Change the non-numerical quantitative features to numeric values. # before filling the nan values # Change x44(percentage with %) and x59(price with $) to numerical values. # nan values # nan values # Replace the nan values of week and month features with their modes respectively. # Use sine transformation on x50(week) and x95(month) variables to keep their temporal relationship. # nan values # nan values # after filling the nan values # One-hot encoding of categorical features. # Fill nan values of the dataset. # Replace the nan values of the categorical features with their modes respectively. # Replace the nan values of the numerical features with the means. # This function is not called but was used to obtain the lists of categorical variables. # cat50 = {'tuesday': 18114, 'monday': 6534, 'wednesday': 12552, 'thursday': 2162, 'sun': 518, # 'friday': 82, nan: 15, 'sat': 15} # cat63 = {'Orang': 14538, 'Yellow': 23454, 'red': 351, 'blue': 1638, nan: 14} # cat65 = {'D': 6224, 'B': 32715, 'A': 1029, nan: 28} # cat95 = {'Aug': 5672, 'May': 7371, 'July': 9877, 'Apr': 2934, 'Jun': 10933, 'sept.': 1864, # 'Mar': 751, 'Feb': 102, 'Oct': 408, 'Nov': 40, nan: 21, 'January': 11, 'Dev': 3} # Plot scatter plots for each feature. | 3.338027 | 3 |
protestbot/start.py | Vigilo4u/ProtestBot | 0 | 6619195 | <gh_stars>0
#!/usr/bin/python3
import sys
from protestbot.protestbot import ProtestBot
# Entry point
def run(args=None):
# First we capture the command line arguments
if len(sys.argv) < 2:
print('''
ProtestBot Help
Command syntax: runbot [command] [botname]
The botname is optional. It should be the name of a python module copied from settings.py. Example: mybot.py
List of commands
reply-to-abuser Replies to all posts and comments made by
the abuser of power using the
protest_template.txt.
reply-to-abuser-friends Replies to all comments left by others on
the abuser's posts. Also uses the
protest_template.txt. This command takes
two arguments:
1) The title of the post 2) a list of 5 tags.
abused Prints out a list of those the abuser
downvoted recently.
memos Sends 0.001 transactions to those the abuser
downvoted along with the message in
memo_template.txt
balance Prints the current STEEM and SBD balance
for the bot.
replies Prints a list of all replies recently made
by the abuser.
replies-to-friends Prints a list of replies recently made to
the abuser's post by others.
upvote-downvoted Finds all the authors downvoted by the abuser
and gives them an upvote.
''')
else:
command = str(sys.argv[1])
# If no bot name was given use the default settings
if len(sys.argv) == 2:
commander("settings", command)
# Iterate through a list of bot names and execute the same command for each
else:
for i in range(2, len(sys.argv)):
commander(str(sys.argv[i]), command)
def commander(selectedbot, command):
# import the settings based on which bot we're using
a = ProtestBot(botname=selectedbot)
# The various commands
if command == "reply-to-abuser":
a.reply_to_abuser_posts()
elif command == "reply-to-abuser-friends":
a.reply_to_abuser_posts(friends=True)
elif command == "post":
a.post_to_profile()
elif command == "abused":
a.find_downvoted_authors()
elif command == "memos":
a.send_memos_to_the_downvoted()
elif command == "balance":
a.ensure_balance()
elif command == "replies":
a.get_all_posts_and_replies()
elif command == "replies-to-friends":
a.get_all_posts_and_replies(friends=True)
elif command == "upvote-downvoted":
a.find_downvoted_authors()
a.upvote_the_downvoted()
else:
print ("Invalid command.")
if __name__ == "__main__":
run()
# EOF
| #!/usr/bin/python3
import sys
from protestbot.protestbot import ProtestBot
# Entry point
def run(args=None):
# First we capture the command line arguments
if len(sys.argv) < 2:
print('''
ProtestBot Help
Command syntax: runbot [command] [botname]
The botname is optional. It should be the name of a python module copied from settings.py. Example: mybot.py
List of commands
reply-to-abuser Replies to all posts and comments made by
the abuser of power using the
protest_template.txt.
reply-to-abuser-friends Replies to all comments left by others on
the abuser's posts. Also uses the
protest_template.txt. This command takes
two arguments:
1) The title of the post 2) a list of 5 tags.
abused Prints out a list of those the abuser
downvoted recently.
memos Sends 0.001 transactions to those the abuser
downvoted along with the message in
memo_template.txt
balance Prints the current STEEM and SBD balance
for the bot.
replies Prints a list of all replies recently made
by the abuser.
replies-to-friends Prints a list of replies recently made to
the abuser's post by others.
upvote-downvoted Finds all the authors downvoted by the abuser
and gives them an upvote.
''')
else:
command = str(sys.argv[1])
# If no bot name was given use the default settings
if len(sys.argv) == 2:
commander("settings", command)
# Iterate through a list of bot names and execute the same command for each
else:
for i in range(2, len(sys.argv)):
commander(str(sys.argv[i]), command)
def commander(selectedbot, command):
# import the settings based on which bot we're using
a = ProtestBot(botname=selectedbot)
# The various commands
if command == "reply-to-abuser":
a.reply_to_abuser_posts()
elif command == "reply-to-abuser-friends":
a.reply_to_abuser_posts(friends=True)
elif command == "post":
a.post_to_profile()
elif command == "abused":
a.find_downvoted_authors()
elif command == "memos":
a.send_memos_to_the_downvoted()
elif command == "balance":
a.ensure_balance()
elif command == "replies":
a.get_all_posts_and_replies()
elif command == "replies-to-friends":
a.get_all_posts_and_replies(friends=True)
elif command == "upvote-downvoted":
a.find_downvoted_authors()
a.upvote_the_downvoted()
else:
print ("Invalid command.")
if __name__ == "__main__":
run()
# EOF | en | 0.888794 | #!/usr/bin/python3 # Entry point # First we capture the command line arguments ProtestBot Help Command syntax: runbot [command] [botname] The botname is optional. It should be the name of a python module copied from settings.py. Example: mybot.py List of commands reply-to-abuser Replies to all posts and comments made by the abuser of power using the protest_template.txt. reply-to-abuser-friends Replies to all comments left by others on the abuser's posts. Also uses the protest_template.txt. This command takes two arguments: 1) The title of the post 2) a list of 5 tags. abused Prints out a list of those the abuser downvoted recently. memos Sends 0.001 transactions to those the abuser downvoted along with the message in memo_template.txt balance Prints the current STEEM and SBD balance for the bot. replies Prints a list of all replies recently made by the abuser. replies-to-friends Prints a list of replies recently made to the abuser's post by others. upvote-downvoted Finds all the authors downvoted by the abuser and gives them an upvote. # If no bot name was given use the default settings # Iterate through a list of bot names and execute the same command for each # import the settings based on which bot we're using # The various commands # EOF | 3.633445 | 4 |
main.py | dimayasha7123/prologTextProcessing | 0 | 6619196 | def transliterate(name):
slovar = {'а': 'a', 'б': 'b', 'в': 'v', 'г': 'g', 'д': 'd', 'е': 'e', 'ё': 'yo',
'ж': 'zh', 'з': 'z', 'и': 'i', 'й': 'i', 'к': 'k', 'л': 'l', 'м': 'm', 'н': 'n',
'о': 'o', 'п': 'p', 'р': 'r', 'с': 's', 'т': 't', 'у': 'u', 'ф': 'f', 'х': 'h',
'ц': 'c', 'ч': 'ch', 'ш': 'sh', 'щ': 'sch', 'ъ': '', 'ы': 'y', 'ь': '', 'э': 'e',
'ю': 'u', 'я': 'ya', 'А': 'A', 'Б': 'B', 'В': 'V', 'Г': 'G', 'Д': 'D', 'Е': 'E', 'Ё': 'Yo',
'Ж': 'Zh', 'З': 'Z', 'И': 'I', 'Й': 'I', 'К': 'K', 'Л': 'L', 'М': 'M', 'Н': 'N',
'О': 'O', 'П': 'P', 'Р': 'R', 'С': 'S', 'Т': 'T', 'У': 'U', 'Ф': 'F', 'Х': 'H',
'Ц': 'C', 'Ч': 'Ch', 'Ш': 'Sh', 'Щ': 'Sch', 'Ъ': '', 'Ы': 'y', 'Ь': '', 'Э': 'E',
'Ю': 'U', 'Я': 'Ya', ',': '', '?': '', ' ': ' ', '~': '', '!': '', '@': '', '#': '',
'$': '', '%': '', '^': '', '&': '', '*': '', '(': '', ')': '', '-': '-', '=': '', '+': '',
':': '', ';': '', '<': '', '>': '', '\'': '', '"': '', '\\': '', '/': '', '№': '',
'[': '', ']': '', '{': '', '}': '', 'ґ': '', 'ї': '', 'є': '', 'Ґ': 'g', 'Ї': 'i',
'Є': 'e', '—': ''}
for key in slovar:
name = name.replace(key, slovar[key])
return name
inputString = """Амур 4416 350 1855 Яблоневый хребет Татарский пролив
Лена 4400 488 2490 Байкальский хребет Море Лаптевых
Обь 4070 400 2990 Предгорья Алтая Карское море
Иртыш 4248 323 1643 Китай Обь
Енисей 3487 600 2580 Восточный Саян Карское море
Волга 3530 255 1360 Валдайская возвышенность Каспийское море
Колыма 2129 44 643 Хребет Черского Восточно — сибирское море
Урал 2428 54 231 Южный Урал Каспийское море
Дон 2200 45 504 Средне-русская возвышенность Азовское море
Кама 1805 130 507 Верхне — Камская возвышенность Волга
Печора 1809 130 322 Северный Урал Баренцево море
Ангара 1779 62 1039 Байкал Енисей
Селенга 1024 14 447 Монголия Байкал
Кубань 870 11 58 Кавказ Азовское море
Нева 74 281 Ладожское озеро Балтийское море """
splitted = [i.split("\t") for i in inputString.split("\n")]
#print(splitted)
def formatData(data):
data = data.strip()
if data.isdigit():
return int(data)
else:
return '\'' + transliterate(data) + '\''
for i in range(len(splitted)):
lineArray = [formatData(j) for j in splitted[i]]
print('river(', end='')
for j in range(len(lineArray)):
if j != 0:
print(', ', end='')
print(lineArray[j], end='')
print(').')
| def transliterate(name):
slovar = {'а': 'a', 'б': 'b', 'в': 'v', 'г': 'g', 'д': 'd', 'е': 'e', 'ё': 'yo',
'ж': 'zh', 'з': 'z', 'и': 'i', 'й': 'i', 'к': 'k', 'л': 'l', 'м': 'm', 'н': 'n',
'о': 'o', 'п': 'p', 'р': 'r', 'с': 's', 'т': 't', 'у': 'u', 'ф': 'f', 'х': 'h',
'ц': 'c', 'ч': 'ch', 'ш': 'sh', 'щ': 'sch', 'ъ': '', 'ы': 'y', 'ь': '', 'э': 'e',
'ю': 'u', 'я': 'ya', 'А': 'A', 'Б': 'B', 'В': 'V', 'Г': 'G', 'Д': 'D', 'Е': 'E', 'Ё': 'Yo',
'Ж': 'Zh', 'З': 'Z', 'И': 'I', 'Й': 'I', 'К': 'K', 'Л': 'L', 'М': 'M', 'Н': 'N',
'О': 'O', 'П': 'P', 'Р': 'R', 'С': 'S', 'Т': 'T', 'У': 'U', 'Ф': 'F', 'Х': 'H',
'Ц': 'C', 'Ч': 'Ch', 'Ш': 'Sh', 'Щ': 'Sch', 'Ъ': '', 'Ы': 'y', 'Ь': '', 'Э': 'E',
'Ю': 'U', 'Я': 'Ya', ',': '', '?': '', ' ': ' ', '~': '', '!': '', '@': '', '#': '',
'$': '', '%': '', '^': '', '&': '', '*': '', '(': '', ')': '', '-': '-', '=': '', '+': '',
':': '', ';': '', '<': '', '>': '', '\'': '', '"': '', '\\': '', '/': '', '№': '',
'[': '', ']': '', '{': '', '}': '', 'ґ': '', 'ї': '', 'є': '', 'Ґ': 'g', 'Ї': 'i',
'Є': 'e', '—': ''}
for key in slovar:
name = name.replace(key, slovar[key])
return name
inputString = """Амур 4416 350 1855 Яблоневый хребет Татарский пролив
Лена 4400 488 2490 Байкальский хребет Море Лаптевых
Обь 4070 400 2990 Предгорья Алтая Карское море
Иртыш 4248 323 1643 Китай Обь
Енисей 3487 600 2580 Восточный Саян Карское море
Волга 3530 255 1360 Валдайская возвышенность Каспийское море
Колыма 2129 44 643 Хребет Черского Восточно — сибирское море
Урал 2428 54 231 Южный Урал Каспийское море
Дон 2200 45 504 Средне-русская возвышенность Азовское море
Кама 1805 130 507 Верхне — Камская возвышенность Волга
Печора 1809 130 322 Северный Урал Баренцево море
Ангара 1779 62 1039 Байкал Енисей
Селенга 1024 14 447 Монголия Байкал
Кубань 870 11 58 Кавказ Азовское море
Нева 74 281 Ладожское озеро Балтийское море """
splitted = [i.split("\t") for i in inputString.split("\n")]
#print(splitted)
def formatData(data):
data = data.strip()
if data.isdigit():
return int(data)
else:
return '\'' + transliterate(data) + '\''
for i in range(len(splitted)):
lineArray = [formatData(j) for j in splitted[i]]
print('river(', end='')
for j in range(len(lineArray)):
if j != 0:
print(', ', end='')
print(lineArray[j], end='')
print(').')
| ru | 0.945939 | Амур 4416 350 1855 Яблоневый хребет Татарский пролив Лена 4400 488 2490 Байкальский хребет Море Лаптевых Обь 4070 400 2990 Предгорья Алтая Карское море Иртыш 4248 323 1643 Китай Обь Енисей 3487 600 2580 Восточный Саян Карское море Волга 3530 255 1360 Валдайская возвышенность Каспийское море Колыма 2129 44 643 Хребет Черского Восточно — сибирское море Урал 2428 54 231 Южный Урал Каспийское море Дон 2200 45 504 Средне-русская возвышенность Азовское море Кама 1805 130 507 Верхне — Камская возвышенность Волга Печора 1809 130 322 Северный Урал Баренцево море Ангара 1779 62 1039 Байкал Енисей Селенга 1024 14 447 Монголия Байкал Кубань 870 11 58 Кавказ Азовское море Нева 74 281 Ладожское озеро Балтийское море #print(splitted) | 2.993434 | 3 |
bootleg/utils/sentence_utils.py | Mehrad0711/bootleg | 0 | 6619197 | <gh_stars>0
from collections import defaultdict
from math import ceil
from transformers.tokenization_utils import _is_control
from bootleg.symbols.constants import CLS_BERT, PAD, PAD_BERT, SEP_BERT
def determine_windowsX(
sentence, spans, aliases_seen_by_model, maxlen, mincontext, sanity_check=False
):
"""Truncate <sentence> into windows of <maxlen> tokens each.
* Returns a list of windows. Each window is a tuple with:
- The offset and endpos, indicating where it starts and ends in sentence.
- The first and the last spans that start (but maybe not end) in the window.
- The list of spans, among those from the above line, that lie within aliases2see.
* Each window will have exactly <maxlen> tokens unless the sentence itself is shorter than that.
* Windows may overlap. Conversely, large portions of the sentence may not exist in any window, particularly when
they don't contain any aliases2see.
* Windows are determined through a greedy packing appraoch that guarantees that:
- Every alias in aliases2see is present in at least one window.
- Every alias in aliases2see is present in exactly one window in which it's marked as "to predict".
- The alias may share this unique window with other aliases, some of which may be 'aliases2see' as well.
- In this unique window, the alias is guaranteed to have at least <mincontext> context on its left and right.
- The exception to the above rule is if the sentence boundaries are closer than <mincontext> words.
- In that case, more words are taken from the "other" direction (e.g., right) up to <maxlen>, if possible.
- Given multiple aliases to predict in the same window, the window is centered around its leftmost and
rightmost aliases, making sure their left and right contexts---respectively---are equal.
- For all of the above, an alias's position is taken as its first token.
- Something tells me all of the above just sounds like legalese. I hope it doesn't.
Args:
sentence: sentence
spans: List of mention spans
aliases_seen_by_model: List of indexes into spans of the mentions that the model will use in the forward()
This may not be all aliases due to removing weak labels
maxlen: maximum length of window size
mincontext: minimum length of left/right context around a mention
sanity_check: whether to sanity check the above conditions
Returns: List of window boundaries in terms of tokens and mentions
"""
assert 2 * mincontext < maxlen
windows = []
alias_idx = 0
while alias_idx < len(spans):
if alias_idx not in aliases_seen_by_model:
alias_idx += 1
continue
window_first_alias = alias_idx
window_last_alias = alias_idx
# left-most possible start position is first span - mincontext
max_possible_offset = max(0, spans[alias_idx][0] - mincontext)
window_aliases2see = [window_first_alias]
# Expand with more aliases within the same window
while alias_idx + 1 < len(spans):
# Stop if adding another alias would prevent retaining mincontext to the left of window_first_alias
# We +1 to the mincontext because the ending span is exclusive
# E.g., if sentence is ["alias", "##1", "alias", "##2", "alias", "##3", "##5"] spans [0,2], [2,4], [4,7]
# To have mincontext = 1 around the start of all aliases, we need final sentence of [0:6] (6 is exclusive)
# Therefore the condition is start span (i.e., 4) plus mincontext (i.e., 1) plus 1 (i.e., total of 6)
if (
min(spans[alias_idx + 1][0] + mincontext + 1, len(sentence))
> max_possible_offset + maxlen
):
break
alias_idx += 1
window_last_alias = (
alias_idx if alias_idx in aliases_seen_by_model else window_last_alias
)
if alias_idx in aliases_seen_by_model:
window_aliases2see.append(alias_idx)
# print("MAX LEN", maxlen, "SENT LEN", len(sentence))
# print("first", window_first_alias, "second", window_last_alias, "spans", spans)
center = (spans[window_first_alias][0] + spans[window_last_alias][0]) // 2
# print("Center", center)
# As the window_offset is inclusive while endpos is exclusive we make sure endpos gets +1 more than offset
# (e.g. if maxlen is 6, offset gets -2 while endpos gets +3). This ensure balance on both sides.
window_offset = max(center - ((maxlen - 1) // 2), 0)
window_endpos = min(center + int(ceil(maxlen / 2)), len(sentence))
# print("Start offset", window_offset, "start end", window_endpos)
assert (
window_endpos - window_offset <= maxlen
), f"windows_endpos {window_endpos} - window_startpos {window_offset} is more than maxlen {maxlen}"
# In the case the window_endpos - window_offset > maxlen, adjust endpos to be maxlen
window_endpos += max(maxlen - (window_endpos - window_offset), 0)
# In len(sentence) < maxlen, adjust endpos
window_endpos = min(window_endpos, len(sentence))
# In the case the window_endpos - window_offset > maxlen, adjust window_offset to be maxlen
window_offset -= max(maxlen - (window_endpos - window_offset), 0)
window_offset = max(window_offset, 0)
# print("Adjusted offset", window_offset, "Adjusted end", window_endpos)
# Adjust the alias indices based on what spans are in the sentence window or now
while window_first_alias > 0:
if spans[window_first_alias - 1][0] < window_offset:
break
window_first_alias -= 1
while window_last_alias + 1 < len(spans):
if spans[window_last_alias + 1][0] >= window_endpos:
break
window_last_alias += 1
windows.append(
(
window_offset,
window_endpos,
window_first_alias,
window_last_alias + 1,
window_aliases2see,
)
)
alias_idx += 1
if sanity_check:
for alias_idx, (offset, endpos) in enumerate(spans):
assert 0 <= offset and offset < endpos and endpos <= len(sentence)
windowX = [
(o, e, f, l, A)
for o, e, f, l, A in windows
if f <= alias_idx and alias_idx < l
]
assert len(windowX) >= int(alias_idx in aliases_seen_by_model)
window = [(o, e, f, l, A) for o, e, f, l, A in windows if alias_idx in A]
assert len(window) == int(alias_idx in aliases_seen_by_model)
if alias_idx in aliases_seen_by_model:
assert window[0] in windowX
window_offset, window_endpos, _, _, _ = window[0]
assert window_offset <= max(offset - mincontext, 0)
assert min(offset + mincontext, len(sentence)) <= window_endpos + 1
assert window_endpos - window_offset == min(maxlen, len(sentence))
return windows
def determine_windows(
sentence,
spans,
aliases_seen_by_model,
maxlen,
mincontext,
max_aliases,
sanity_check=False,
):
"""Refer to determine_windowsX(.) for documentation.
This function simply postprocesses the output of
determine_windowsX(.) to handle max_aliases. To do so, it replicates
each window whose number of aliases exceeds max_aliases. The
resulting sub-windows may overlap in their sets of aliases but not
in their aliases2see.
Args:
sentence: sentence
spans: List of mention spans
aliases_seen_by_model: List of indexes into spans of the mentions that the model will use in the forward()
This may not be all aliases due to removing weak labels
maxlen: maximum length of window size
mincontext: minimum length of left/right context around a mention
max_aliases: maximum number of mentions in a context window
sanity_check: whether to sanity check the above conditions
Returns: List of window boundaries with respect to tokens and mentions
"""
windows = determine_windowsX(
sentence, spans, aliases_seen_by_model, maxlen, mincontext, sanity_check
)
output = []
for window in windows:
(
split_offset,
split_endpos,
split_first_alias,
split_last_alias,
split_aliases2see,
) = window
# Determine the <number of aliases in window> and <number of sub-windows required to accomodate max_aliases>
window_width = split_last_alias - split_first_alias
num_subwindows = ceil(window_width / max_aliases)
# Determine the <average width of sub-window> and <some allowance for extra aliases per sub-window>
subwindow_width = ceil(window_width / num_subwindows)
subwindow_overflow = max(0, max_aliases - subwindow_width) // 2
if num_subwindows == 1:
output.append(window)
continue
current_alias = split_first_alias
for _ in range(num_subwindows):
last_alias = min(current_alias + subwindow_width, split_last_alias)
current_alias_ = max(split_first_alias, current_alias - subwindow_overflow)
last_alias_ = min(last_alias + subwindow_overflow, split_last_alias)
subwindow_aliases2see = [
x for x in split_aliases2see if current_alias <= x and x < last_alias
]
if len(subwindow_aliases2see):
assert last_alias_ - current_alias_ <= max_aliases
output.append(
(
split_offset,
split_endpos,
current_alias_,
last_alias_,
subwindow_aliases2see,
)
)
current_alias = last_alias
return output
def pad_sentence(sentence, pad_token, maxlen):
assert len(sentence) <= maxlen
return sentence + [pad_token] * (maxlen - len(sentence))
def split_sentence(
max_aliases,
phrase,
spans,
aliases,
aliases_seen_by_model,
seq_len,
is_bert,
tokenizer,
sanity_check=False,
):
"""
- Splits a sentence into windows using determine_windows(.)
- Returns 4 'parallel' lists, where the corresponding positions describe a single window:
* window_span_idxs[i] has the alias indices that start in the i^th window.
* window_aliases2see[i] has the alias indices (relative to window_span_idxs[i], starting at zero) that
lie within aliases_to_predict.
* window_spans[i] has the string-formatted spans for the spans in window_span_idxs[i], relative to the start
of the i^th window.
* window_sentences[i] has the tokens of the i^th window.
Args:
max_aliases: maximum number of mentions in a context window
phrase: sentence
spans: List of mention spans
aliases: List of all mention strings
aliases_seen_by_model: List of indexes into spans of the mentions that the model will use in the forward()
This may not be all aliases due to removing weak labels
seq_len: maximum length of window size
is_bert: is the tokenizer a BERT one with CLS/SEP tokens
tokenizer: input tokenizer
sanity_check: whether to sanity check the above conditions
Returns: list of window mention indices, list of window mention indices
(relative to window_span_idxs[i], starting at zero), list of tokenized sentences,
list of token positions (relative to tokenized entire sentence)
"""
sentence, aliases2see, maxlen, old_spans = (
phrase,
aliases_seen_by_model,
seq_len,
spans,
)
maxlen_prepad = maxlen
if is_bert:
maxlen_prepad = maxlen_prepad - 2
old_len = len(sentence.split())
assert old_spans == list(
sorted(old_spans)
), f"You spans {old_spans} for ***{phrase}*** are not in sorted order from smallest to largest"
old_to_new, sentence = get_old_to_new_word_idx_mapping(phrase, tokenizer)
spans = []
for sp in old_spans:
assert sp[0] < sp[1], (
f"We assume all mentions are at least length 1, but you have span {sp} where the right index is not "
f"greater than the left with phrase ***{phrase}***. Each span is in "
f"[0, length of sentence={old_len}], both inclusive"
)
assert (
sp[0] >= 0 and sp[1] >= 0 and sp[1] <= old_len and sp[0] <= old_len
), f"The span of {sp} with {phrase} was not between [0, length of sentence={old_len}], both inclusive"
# We should have the right side be old_to_new[sp[1]][0], but due do tokenization occasionally removing rare
# unicode characters, this way ensures the right span is greater than the left because, in that case,
# we will have old_to_new[sp[1]-1][-1] == old_to_new[sp[0]][0] (see test case in test_sentence_utils.py)
spans.append([old_to_new[sp[0]][0], old_to_new[sp[1] - 1][-1] + 1])
assert spans[-1][0] < spans[-1][1], (
f"Adjusted spans for old span {sp} and phrase ***{phrase}*** have the right side not greater than "
f"the left side. This might be due to a spans being on a unicode character removed by tokenization."
)
(
window_span_idxs,
window_aliases2see,
window_spans,
window_sentences,
window_sentence_pos_idxs,
) = ([], [], [], [], [])
# Sub-divide sentence into windows, respecting maxlen_prepad and max_aliases per window.
# This retains at least maxlen_prepad/5 context to the left and right of each alias2predict.
windows = determine_windows(
sentence,
spans,
aliases2see,
maxlen_prepad,
max(1, maxlen_prepad // 5),
max_aliases,
sanity_check,
)
for (
split_offset,
split_endpos,
split_first_alias,
split_last_alias,
split_aliases2see,
) in windows:
sub_sentence = sentence[split_offset:split_endpos]
sub_sentence_pos = list(range(split_offset, split_endpos))
if is_bert:
sub_sentence = pad_sentence(
[CLS_BERT] + sub_sentence + [SEP_BERT], PAD_BERT, maxlen
)
sub_sentence_pos = pad_sentence([-2] + sub_sentence_pos + [-3], -1, maxlen)
else:
sub_sentence = pad_sentence(sub_sentence, PAD, maxlen)
sub_sentence_pos = pad_sentence(sub_sentence_pos, -1, maxlen)
window_sentences.append(sub_sentence)
window_sentence_pos_idxs.append(sub_sentence_pos)
window_span_idxs.append([])
window_aliases2see.append([])
window_spans.append([])
current_alias_idx = split_first_alias
for span_offset, span_endpos in spans[split_first_alias:split_last_alias]:
window_span_idxs[-1].append(current_alias_idx)
if current_alias_idx in split_aliases2see:
assert current_alias_idx in aliases2see
window_aliases2see[-1].append(current_alias_idx - split_first_alias)
span_offset += int(is_bert) # add one for BERT to account for [CLS]
span_endpos += int(is_bert)
adjusted_endpos = span_endpos - split_offset
# If it's over the maxlen, adjust to be at the [CLS] token
if adjusted_endpos > maxlen:
adjusted_endpos = maxlen
if is_bert:
# Adjust so the end token is over the [CLS]
adjusted_endpos -= 1
assert span_offset - split_offset >= 0, (
f"The first span of {span_offset - split_offset} less than 0. "
f"Something went wrong in the span adjustment"
)
window_spans[-1].append([span_offset - split_offset, adjusted_endpos])
current_alias_idx += 1
return (
window_span_idxs,
window_aliases2see,
window_spans,
window_sentences,
window_sentence_pos_idxs,
)
def get_old_to_new_word_idx_mapping(sentence, tokenizer):
"""Method takes the original sentence and tokenized_sentence and builds a
mapping from the original sentence spans (split on " ") to the new sentence
spans (after tokenization). This will account for tokenizers splitting on
grammar and subwordpiece tokens from BERT.
For example:
phrase: 'Alexander få Baldwin III (born April 3, 1958, in Massapequa, Long Island, New York, USA).'
tokenized sentence: ['Alexander', 'f', '##å', 'Baldwin', 'III', '(', 'born', 'April', '3', ',', '1958',
',', 'in', 'Mass', '##ap', '##e', '##qua', ',', 'Long', 'Island', ',',
'New', 'York', ',', 'USA', ')']
Output: {0: [0], 1: [1, 2], 2: [3], 3: [4], 4: [5, 6], 5: [7], 6: [8, 9], 7: [10, 11], 8: [12],
9: [13, 14, 15, 16, 17], 10: [18], 11: [19, 20], 12: [21], 13: [22, 23], 14: [24, 25]}
We use this to convert spans from original sentence splitting to new sentence splitting.
Args:
sentence: sentence
tokenizer: tokenizer
Returns: Dict of word index to token index, tokenized sentence
"""
old_split = sentence.split()
final_tokenized_sentence = []
old_w = 0
new_w = 0
lost_words = 0
old_to_new = defaultdict(list)
while old_w < len(old_split):
old_word = old_split[old_w]
if old_w > 0:
# This will allow tokenizers that use spaces to know it's a middle word
old_word = " " + old_word
tokenized_word = [t for t in tokenizer.tokenize(old_word) if len(t) > 0]
# due to https://github.com/huggingface/transformers/commit/21ed3a6b993eba06e7f4cf7720f4a07cc8a0d4c2,
# certain characters are cleaned and removed
# if this is the case, we need to adjust the spans so the token is eaten
# print("OLD", old_w, old_word, "TOK", tokenized_word, "NEW W", new_w, "+", len(tokenized_word))
if len(tokenized_word) <= 0:
print(
f"TOKENIZED WORD IS LENGTH 0. It SHOULD BE WEIRD CHARACTERS WITH ORDS",
[ord(c) for c in old_word],
"AND IS CONTROL",
[_is_control(c) for c in old_word],
)
# if this is the last word, assign it to the previous word
if old_w + 1 >= len(old_split):
old_to_new[old_w] = [new_w - 1]
lost_words += 1
else:
# assign the span specifically to the new_w
old_to_new[old_w] = [new_w]
lost_words += 1
else:
new_w_ids = list(range(new_w, new_w + len(tokenized_word)))
old_to_new[old_w] = new_w_ids
final_tokenized_sentence.extend(tokenized_word)
new_w = new_w + len(tokenized_word)
old_w += 1
old_to_new = dict(old_to_new)
# Verify that each word from both sentences are in the mappings
len_tokenized_sentence = len(final_tokenized_sentence)
if final_tokenized_sentence != tokenizer.tokenize(sentence):
import pdb
pdb.set_trace()
assert final_tokenized_sentence == tokenizer.tokenize(sentence)
assert len_tokenized_sentence + lost_words >= len(
old_split
), f"Tokenize has compressed words that weren't lost {old_split} versus {tokenizer.tokenize(sentence)}"
assert all(len(val) > 0 for val in old_to_new.values()), f"{old_to_new}, {sentence}"
assert set(range(len_tokenized_sentence)) == set(
[v for val in old_to_new.values() for v in val]
), f"{old_to_new}, {sentence}"
assert set(range(len(old_split))) == set(
old_to_new.keys()
), f"{old_to_new}, {sentence}"
return old_to_new, final_tokenized_sentence
| from collections import defaultdict
from math import ceil
from transformers.tokenization_utils import _is_control
from bootleg.symbols.constants import CLS_BERT, PAD, PAD_BERT, SEP_BERT
def determine_windowsX(
sentence, spans, aliases_seen_by_model, maxlen, mincontext, sanity_check=False
):
"""Truncate <sentence> into windows of <maxlen> tokens each.
* Returns a list of windows. Each window is a tuple with:
- The offset and endpos, indicating where it starts and ends in sentence.
- The first and the last spans that start (but maybe not end) in the window.
- The list of spans, among those from the above line, that lie within aliases2see.
* Each window will have exactly <maxlen> tokens unless the sentence itself is shorter than that.
* Windows may overlap. Conversely, large portions of the sentence may not exist in any window, particularly when
they don't contain any aliases2see.
* Windows are determined through a greedy packing appraoch that guarantees that:
- Every alias in aliases2see is present in at least one window.
- Every alias in aliases2see is present in exactly one window in which it's marked as "to predict".
- The alias may share this unique window with other aliases, some of which may be 'aliases2see' as well.
- In this unique window, the alias is guaranteed to have at least <mincontext> context on its left and right.
- The exception to the above rule is if the sentence boundaries are closer than <mincontext> words.
- In that case, more words are taken from the "other" direction (e.g., right) up to <maxlen>, if possible.
- Given multiple aliases to predict in the same window, the window is centered around its leftmost and
rightmost aliases, making sure their left and right contexts---respectively---are equal.
- For all of the above, an alias's position is taken as its first token.
- Something tells me all of the above just sounds like legalese. I hope it doesn't.
Args:
sentence: sentence
spans: List of mention spans
aliases_seen_by_model: List of indexes into spans of the mentions that the model will use in the forward()
This may not be all aliases due to removing weak labels
maxlen: maximum length of window size
mincontext: minimum length of left/right context around a mention
sanity_check: whether to sanity check the above conditions
Returns: List of window boundaries in terms of tokens and mentions
"""
assert 2 * mincontext < maxlen
windows = []
alias_idx = 0
while alias_idx < len(spans):
if alias_idx not in aliases_seen_by_model:
alias_idx += 1
continue
window_first_alias = alias_idx
window_last_alias = alias_idx
# left-most possible start position is first span - mincontext
max_possible_offset = max(0, spans[alias_idx][0] - mincontext)
window_aliases2see = [window_first_alias]
# Expand with more aliases within the same window
while alias_idx + 1 < len(spans):
# Stop if adding another alias would prevent retaining mincontext to the left of window_first_alias
# We +1 to the mincontext because the ending span is exclusive
# E.g., if sentence is ["alias", "##1", "alias", "##2", "alias", "##3", "##5"] spans [0,2], [2,4], [4,7]
# To have mincontext = 1 around the start of all aliases, we need final sentence of [0:6] (6 is exclusive)
# Therefore the condition is start span (i.e., 4) plus mincontext (i.e., 1) plus 1 (i.e., total of 6)
if (
min(spans[alias_idx + 1][0] + mincontext + 1, len(sentence))
> max_possible_offset + maxlen
):
break
alias_idx += 1
window_last_alias = (
alias_idx if alias_idx in aliases_seen_by_model else window_last_alias
)
if alias_idx in aliases_seen_by_model:
window_aliases2see.append(alias_idx)
# print("MAX LEN", maxlen, "SENT LEN", len(sentence))
# print("first", window_first_alias, "second", window_last_alias, "spans", spans)
center = (spans[window_first_alias][0] + spans[window_last_alias][0]) // 2
# print("Center", center)
# As the window_offset is inclusive while endpos is exclusive we make sure endpos gets +1 more than offset
# (e.g. if maxlen is 6, offset gets -2 while endpos gets +3). This ensure balance on both sides.
window_offset = max(center - ((maxlen - 1) // 2), 0)
window_endpos = min(center + int(ceil(maxlen / 2)), len(sentence))
# print("Start offset", window_offset, "start end", window_endpos)
assert (
window_endpos - window_offset <= maxlen
), f"windows_endpos {window_endpos} - window_startpos {window_offset} is more than maxlen {maxlen}"
# In the case the window_endpos - window_offset > maxlen, adjust endpos to be maxlen
window_endpos += max(maxlen - (window_endpos - window_offset), 0)
# In len(sentence) < maxlen, adjust endpos
window_endpos = min(window_endpos, len(sentence))
# In the case the window_endpos - window_offset > maxlen, adjust window_offset to be maxlen
window_offset -= max(maxlen - (window_endpos - window_offset), 0)
window_offset = max(window_offset, 0)
# print("Adjusted offset", window_offset, "Adjusted end", window_endpos)
# Adjust the alias indices based on what spans are in the sentence window or now
while window_first_alias > 0:
if spans[window_first_alias - 1][0] < window_offset:
break
window_first_alias -= 1
while window_last_alias + 1 < len(spans):
if spans[window_last_alias + 1][0] >= window_endpos:
break
window_last_alias += 1
windows.append(
(
window_offset,
window_endpos,
window_first_alias,
window_last_alias + 1,
window_aliases2see,
)
)
alias_idx += 1
if sanity_check:
for alias_idx, (offset, endpos) in enumerate(spans):
assert 0 <= offset and offset < endpos and endpos <= len(sentence)
windowX = [
(o, e, f, l, A)
for o, e, f, l, A in windows
if f <= alias_idx and alias_idx < l
]
assert len(windowX) >= int(alias_idx in aliases_seen_by_model)
window = [(o, e, f, l, A) for o, e, f, l, A in windows if alias_idx in A]
assert len(window) == int(alias_idx in aliases_seen_by_model)
if alias_idx in aliases_seen_by_model:
assert window[0] in windowX
window_offset, window_endpos, _, _, _ = window[0]
assert window_offset <= max(offset - mincontext, 0)
assert min(offset + mincontext, len(sentence)) <= window_endpos + 1
assert window_endpos - window_offset == min(maxlen, len(sentence))
return windows
def determine_windows(
sentence,
spans,
aliases_seen_by_model,
maxlen,
mincontext,
max_aliases,
sanity_check=False,
):
"""Refer to determine_windowsX(.) for documentation.
This function simply postprocesses the output of
determine_windowsX(.) to handle max_aliases. To do so, it replicates
each window whose number of aliases exceeds max_aliases. The
resulting sub-windows may overlap in their sets of aliases but not
in their aliases2see.
Args:
sentence: sentence
spans: List of mention spans
aliases_seen_by_model: List of indexes into spans of the mentions that the model will use in the forward()
This may not be all aliases due to removing weak labels
maxlen: maximum length of window size
mincontext: minimum length of left/right context around a mention
max_aliases: maximum number of mentions in a context window
sanity_check: whether to sanity check the above conditions
Returns: List of window boundaries with respect to tokens and mentions
"""
windows = determine_windowsX(
sentence, spans, aliases_seen_by_model, maxlen, mincontext, sanity_check
)
output = []
for window in windows:
(
split_offset,
split_endpos,
split_first_alias,
split_last_alias,
split_aliases2see,
) = window
# Determine the <number of aliases in window> and <number of sub-windows required to accomodate max_aliases>
window_width = split_last_alias - split_first_alias
num_subwindows = ceil(window_width / max_aliases)
# Determine the <average width of sub-window> and <some allowance for extra aliases per sub-window>
subwindow_width = ceil(window_width / num_subwindows)
subwindow_overflow = max(0, max_aliases - subwindow_width) // 2
if num_subwindows == 1:
output.append(window)
continue
current_alias = split_first_alias
for _ in range(num_subwindows):
last_alias = min(current_alias + subwindow_width, split_last_alias)
current_alias_ = max(split_first_alias, current_alias - subwindow_overflow)
last_alias_ = min(last_alias + subwindow_overflow, split_last_alias)
subwindow_aliases2see = [
x for x in split_aliases2see if current_alias <= x and x < last_alias
]
if len(subwindow_aliases2see):
assert last_alias_ - current_alias_ <= max_aliases
output.append(
(
split_offset,
split_endpos,
current_alias_,
last_alias_,
subwindow_aliases2see,
)
)
current_alias = last_alias
return output
def pad_sentence(sentence, pad_token, maxlen):
assert len(sentence) <= maxlen
return sentence + [pad_token] * (maxlen - len(sentence))
def split_sentence(
max_aliases,
phrase,
spans,
aliases,
aliases_seen_by_model,
seq_len,
is_bert,
tokenizer,
sanity_check=False,
):
"""
- Splits a sentence into windows using determine_windows(.)
- Returns 4 'parallel' lists, where the corresponding positions describe a single window:
* window_span_idxs[i] has the alias indices that start in the i^th window.
* window_aliases2see[i] has the alias indices (relative to window_span_idxs[i], starting at zero) that
lie within aliases_to_predict.
* window_spans[i] has the string-formatted spans for the spans in window_span_idxs[i], relative to the start
of the i^th window.
* window_sentences[i] has the tokens of the i^th window.
Args:
max_aliases: maximum number of mentions in a context window
phrase: sentence
spans: List of mention spans
aliases: List of all mention strings
aliases_seen_by_model: List of indexes into spans of the mentions that the model will use in the forward()
This may not be all aliases due to removing weak labels
seq_len: maximum length of window size
is_bert: is the tokenizer a BERT one with CLS/SEP tokens
tokenizer: input tokenizer
sanity_check: whether to sanity check the above conditions
Returns: list of window mention indices, list of window mention indices
(relative to window_span_idxs[i], starting at zero), list of tokenized sentences,
list of token positions (relative to tokenized entire sentence)
"""
sentence, aliases2see, maxlen, old_spans = (
phrase,
aliases_seen_by_model,
seq_len,
spans,
)
maxlen_prepad = maxlen
if is_bert:
maxlen_prepad = maxlen_prepad - 2
old_len = len(sentence.split())
assert old_spans == list(
sorted(old_spans)
), f"You spans {old_spans} for ***{phrase}*** are not in sorted order from smallest to largest"
old_to_new, sentence = get_old_to_new_word_idx_mapping(phrase, tokenizer)
spans = []
for sp in old_spans:
assert sp[0] < sp[1], (
f"We assume all mentions are at least length 1, but you have span {sp} where the right index is not "
f"greater than the left with phrase ***{phrase}***. Each span is in "
f"[0, length of sentence={old_len}], both inclusive"
)
assert (
sp[0] >= 0 and sp[1] >= 0 and sp[1] <= old_len and sp[0] <= old_len
), f"The span of {sp} with {phrase} was not between [0, length of sentence={old_len}], both inclusive"
# We should have the right side be old_to_new[sp[1]][0], but due do tokenization occasionally removing rare
# unicode characters, this way ensures the right span is greater than the left because, in that case,
# we will have old_to_new[sp[1]-1][-1] == old_to_new[sp[0]][0] (see test case in test_sentence_utils.py)
spans.append([old_to_new[sp[0]][0], old_to_new[sp[1] - 1][-1] + 1])
assert spans[-1][0] < spans[-1][1], (
f"Adjusted spans for old span {sp} and phrase ***{phrase}*** have the right side not greater than "
f"the left side. This might be due to a spans being on a unicode character removed by tokenization."
)
(
window_span_idxs,
window_aliases2see,
window_spans,
window_sentences,
window_sentence_pos_idxs,
) = ([], [], [], [], [])
# Sub-divide sentence into windows, respecting maxlen_prepad and max_aliases per window.
# This retains at least maxlen_prepad/5 context to the left and right of each alias2predict.
windows = determine_windows(
sentence,
spans,
aliases2see,
maxlen_prepad,
max(1, maxlen_prepad // 5),
max_aliases,
sanity_check,
)
for (
split_offset,
split_endpos,
split_first_alias,
split_last_alias,
split_aliases2see,
) in windows:
sub_sentence = sentence[split_offset:split_endpos]
sub_sentence_pos = list(range(split_offset, split_endpos))
if is_bert:
sub_sentence = pad_sentence(
[CLS_BERT] + sub_sentence + [SEP_BERT], PAD_BERT, maxlen
)
sub_sentence_pos = pad_sentence([-2] + sub_sentence_pos + [-3], -1, maxlen)
else:
sub_sentence = pad_sentence(sub_sentence, PAD, maxlen)
sub_sentence_pos = pad_sentence(sub_sentence_pos, -1, maxlen)
window_sentences.append(sub_sentence)
window_sentence_pos_idxs.append(sub_sentence_pos)
window_span_idxs.append([])
window_aliases2see.append([])
window_spans.append([])
current_alias_idx = split_first_alias
for span_offset, span_endpos in spans[split_first_alias:split_last_alias]:
window_span_idxs[-1].append(current_alias_idx)
if current_alias_idx in split_aliases2see:
assert current_alias_idx in aliases2see
window_aliases2see[-1].append(current_alias_idx - split_first_alias)
span_offset += int(is_bert) # add one for BERT to account for [CLS]
span_endpos += int(is_bert)
adjusted_endpos = span_endpos - split_offset
# If it's over the maxlen, adjust to be at the [CLS] token
if adjusted_endpos > maxlen:
adjusted_endpos = maxlen
if is_bert:
# Adjust so the end token is over the [CLS]
adjusted_endpos -= 1
assert span_offset - split_offset >= 0, (
f"The first span of {span_offset - split_offset} less than 0. "
f"Something went wrong in the span adjustment"
)
window_spans[-1].append([span_offset - split_offset, adjusted_endpos])
current_alias_idx += 1
return (
window_span_idxs,
window_aliases2see,
window_spans,
window_sentences,
window_sentence_pos_idxs,
)
def get_old_to_new_word_idx_mapping(sentence, tokenizer):
"""Method takes the original sentence and tokenized_sentence and builds a
mapping from the original sentence spans (split on " ") to the new sentence
spans (after tokenization). This will account for tokenizers splitting on
grammar and subwordpiece tokens from BERT.
For example:
phrase: 'Alexander få Baldwin III (born April 3, 1958, in Massapequa, Long Island, New York, USA).'
tokenized sentence: ['Alexander', 'f', '##å', 'Baldwin', 'III', '(', 'born', 'April', '3', ',', '1958',
',', 'in', 'Mass', '##ap', '##e', '##qua', ',', 'Long', 'Island', ',',
'New', 'York', ',', 'USA', ')']
Output: {0: [0], 1: [1, 2], 2: [3], 3: [4], 4: [5, 6], 5: [7], 6: [8, 9], 7: [10, 11], 8: [12],
9: [13, 14, 15, 16, 17], 10: [18], 11: [19, 20], 12: [21], 13: [22, 23], 14: [24, 25]}
We use this to convert spans from original sentence splitting to new sentence splitting.
Args:
sentence: sentence
tokenizer: tokenizer
Returns: Dict of word index to token index, tokenized sentence
"""
old_split = sentence.split()
final_tokenized_sentence = []
old_w = 0
new_w = 0
lost_words = 0
old_to_new = defaultdict(list)
while old_w < len(old_split):
old_word = old_split[old_w]
if old_w > 0:
# This will allow tokenizers that use spaces to know it's a middle word
old_word = " " + old_word
tokenized_word = [t for t in tokenizer.tokenize(old_word) if len(t) > 0]
# due to https://github.com/huggingface/transformers/commit/21ed3a6b993eba06e7f4cf7720f4a07cc8a0d4c2,
# certain characters are cleaned and removed
# if this is the case, we need to adjust the spans so the token is eaten
# print("OLD", old_w, old_word, "TOK", tokenized_word, "NEW W", new_w, "+", len(tokenized_word))
if len(tokenized_word) <= 0:
print(
f"TOKENIZED WORD IS LENGTH 0. It SHOULD BE WEIRD CHARACTERS WITH ORDS",
[ord(c) for c in old_word],
"AND IS CONTROL",
[_is_control(c) for c in old_word],
)
# if this is the last word, assign it to the previous word
if old_w + 1 >= len(old_split):
old_to_new[old_w] = [new_w - 1]
lost_words += 1
else:
# assign the span specifically to the new_w
old_to_new[old_w] = [new_w]
lost_words += 1
else:
new_w_ids = list(range(new_w, new_w + len(tokenized_word)))
old_to_new[old_w] = new_w_ids
final_tokenized_sentence.extend(tokenized_word)
new_w = new_w + len(tokenized_word)
old_w += 1
old_to_new = dict(old_to_new)
# Verify that each word from both sentences are in the mappings
len_tokenized_sentence = len(final_tokenized_sentence)
if final_tokenized_sentence != tokenizer.tokenize(sentence):
import pdb
pdb.set_trace()
assert final_tokenized_sentence == tokenizer.tokenize(sentence)
assert len_tokenized_sentence + lost_words >= len(
old_split
), f"Tokenize has compressed words that weren't lost {old_split} versus {tokenizer.tokenize(sentence)}"
assert all(len(val) > 0 for val in old_to_new.values()), f"{old_to_new}, {sentence}"
assert set(range(len_tokenized_sentence)) == set(
[v for val in old_to_new.values() for v in val]
), f"{old_to_new}, {sentence}"
assert set(range(len(old_split))) == set(
old_to_new.keys()
), f"{old_to_new}, {sentence}"
return old_to_new, final_tokenized_sentence | en | 0.830202 | Truncate <sentence> into windows of <maxlen> tokens each. * Returns a list of windows. Each window is a tuple with: - The offset and endpos, indicating where it starts and ends in sentence. - The first and the last spans that start (but maybe not end) in the window. - The list of spans, among those from the above line, that lie within aliases2see. * Each window will have exactly <maxlen> tokens unless the sentence itself is shorter than that. * Windows may overlap. Conversely, large portions of the sentence may not exist in any window, particularly when they don't contain any aliases2see. * Windows are determined through a greedy packing appraoch that guarantees that: - Every alias in aliases2see is present in at least one window. - Every alias in aliases2see is present in exactly one window in which it's marked as "to predict". - The alias may share this unique window with other aliases, some of which may be 'aliases2see' as well. - In this unique window, the alias is guaranteed to have at least <mincontext> context on its left and right. - The exception to the above rule is if the sentence boundaries are closer than <mincontext> words. - In that case, more words are taken from the "other" direction (e.g., right) up to <maxlen>, if possible. - Given multiple aliases to predict in the same window, the window is centered around its leftmost and rightmost aliases, making sure their left and right contexts---respectively---are equal. - For all of the above, an alias's position is taken as its first token. - Something tells me all of the above just sounds like legalese. I hope it doesn't. Args: sentence: sentence spans: List of mention spans aliases_seen_by_model: List of indexes into spans of the mentions that the model will use in the forward() This may not be all aliases due to removing weak labels maxlen: maximum length of window size mincontext: minimum length of left/right context around a mention sanity_check: whether to sanity check the above conditions Returns: List of window boundaries in terms of tokens and mentions # left-most possible start position is first span - mincontext # Expand with more aliases within the same window # Stop if adding another alias would prevent retaining mincontext to the left of window_first_alias # We +1 to the mincontext because the ending span is exclusive # E.g., if sentence is ["alias", "##1", "alias", "##2", "alias", "##3", "##5"] spans [0,2], [2,4], [4,7] # To have mincontext = 1 around the start of all aliases, we need final sentence of [0:6] (6 is exclusive) # Therefore the condition is start span (i.e., 4) plus mincontext (i.e., 1) plus 1 (i.e., total of 6) # print("MAX LEN", maxlen, "SENT LEN", len(sentence)) # print("first", window_first_alias, "second", window_last_alias, "spans", spans) # print("Center", center) # As the window_offset is inclusive while endpos is exclusive we make sure endpos gets +1 more than offset # (e.g. if maxlen is 6, offset gets -2 while endpos gets +3). This ensure balance on both sides. # print("Start offset", window_offset, "start end", window_endpos) # In the case the window_endpos - window_offset > maxlen, adjust endpos to be maxlen # In len(sentence) < maxlen, adjust endpos # In the case the window_endpos - window_offset > maxlen, adjust window_offset to be maxlen # print("Adjusted offset", window_offset, "Adjusted end", window_endpos) # Adjust the alias indices based on what spans are in the sentence window or now Refer to determine_windowsX(.) for documentation. This function simply postprocesses the output of determine_windowsX(.) to handle max_aliases. To do so, it replicates each window whose number of aliases exceeds max_aliases. The resulting sub-windows may overlap in their sets of aliases but not in their aliases2see. Args: sentence: sentence spans: List of mention spans aliases_seen_by_model: List of indexes into spans of the mentions that the model will use in the forward() This may not be all aliases due to removing weak labels maxlen: maximum length of window size mincontext: minimum length of left/right context around a mention max_aliases: maximum number of mentions in a context window sanity_check: whether to sanity check the above conditions Returns: List of window boundaries with respect to tokens and mentions # Determine the <number of aliases in window> and <number of sub-windows required to accomodate max_aliases> # Determine the <average width of sub-window> and <some allowance for extra aliases per sub-window> - Splits a sentence into windows using determine_windows(.) - Returns 4 'parallel' lists, where the corresponding positions describe a single window: * window_span_idxs[i] has the alias indices that start in the i^th window. * window_aliases2see[i] has the alias indices (relative to window_span_idxs[i], starting at zero) that lie within aliases_to_predict. * window_spans[i] has the string-formatted spans for the spans in window_span_idxs[i], relative to the start of the i^th window. * window_sentences[i] has the tokens of the i^th window. Args: max_aliases: maximum number of mentions in a context window phrase: sentence spans: List of mention spans aliases: List of all mention strings aliases_seen_by_model: List of indexes into spans of the mentions that the model will use in the forward() This may not be all aliases due to removing weak labels seq_len: maximum length of window size is_bert: is the tokenizer a BERT one with CLS/SEP tokens tokenizer: input tokenizer sanity_check: whether to sanity check the above conditions Returns: list of window mention indices, list of window mention indices (relative to window_span_idxs[i], starting at zero), list of tokenized sentences, list of token positions (relative to tokenized entire sentence) # We should have the right side be old_to_new[sp[1]][0], but due do tokenization occasionally removing rare # unicode characters, this way ensures the right span is greater than the left because, in that case, # we will have old_to_new[sp[1]-1][-1] == old_to_new[sp[0]][0] (see test case in test_sentence_utils.py) # Sub-divide sentence into windows, respecting maxlen_prepad and max_aliases per window. # This retains at least maxlen_prepad/5 context to the left and right of each alias2predict. # add one for BERT to account for [CLS] # If it's over the maxlen, adjust to be at the [CLS] token # Adjust so the end token is over the [CLS] Method takes the original sentence and tokenized_sentence and builds a mapping from the original sentence spans (split on " ") to the new sentence spans (after tokenization). This will account for tokenizers splitting on grammar and subwordpiece tokens from BERT. For example: phrase: 'Alexander få Baldwin III (born April 3, 1958, in Massapequa, Long Island, New York, USA).' tokenized sentence: ['Alexander', 'f', '##å', 'Baldwin', 'III', '(', 'born', 'April', '3', ',', '1958', ',', 'in', 'Mass', '##ap', '##e', '##qua', ',', 'Long', 'Island', ',', 'New', 'York', ',', 'USA', ')'] Output: {0: [0], 1: [1, 2], 2: [3], 3: [4], 4: [5, 6], 5: [7], 6: [8, 9], 7: [10, 11], 8: [12], 9: [13, 14, 15, 16, 17], 10: [18], 11: [19, 20], 12: [21], 13: [22, 23], 14: [24, 25]} We use this to convert spans from original sentence splitting to new sentence splitting. Args: sentence: sentence tokenizer: tokenizer Returns: Dict of word index to token index, tokenized sentence # This will allow tokenizers that use spaces to know it's a middle word # due to https://github.com/huggingface/transformers/commit/21ed3a6b993eba06e7f4cf7720f4a07cc8a0d4c2, # certain characters are cleaned and removed # if this is the case, we need to adjust the spans so the token is eaten # print("OLD", old_w, old_word, "TOK", tokenized_word, "NEW W", new_w, "+", len(tokenized_word)) # if this is the last word, assign it to the previous word # assign the span specifically to the new_w # Verify that each word from both sentences are in the mappings | 2.659203 | 3 |
src/avm/usefull_patterns.py | Grosse-pasteque/AVM | 1 | 6619198 | from . import (
Pattern,
Function,
Method,
Module,
Class,
Union,
File,
Dict,
Int,
Str
)
Callable = Union(Method(), Function(), Class(is_init=False))
# lambda x: x**2
Point = Union(int, float)
Coords = [Point, Point]
# [0, 5.5]
PIXEL_VAL = Int(0, 255)
RGB = [PIXEL_VAL, 3]
RGBA = [PIXEL_VAL, 4]
Pixel = Union(RGB, RGBA)
# [255, 255, 255]
# [0, 0, 0, 255]
Image = [[Pixel, -1], -1]
"""
[
[[0, 0, 0], [0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0], [0, 0, 0]]
]
3x3 image full of black pixels
"""
Binnary = Str(startswith="0b", only="01", ignore={'prefix': True})
# "0b1010"
Octal = Str(startswith="0o", only="01234567", ignore={'prefix': True})
# "0o12"
Hexadecimal = Str(startswith="0x", only="0123456789abcdef", ignore={'prefix': True})
# "0xa"
Ascii = Str(only=[chr(x) for x in range(128)])
# "abc"
IntList = [int, -1]
# [1, 2, 3, 4]
StrList = [str, -1]
# ["a", "b", "c", "d"]
FileList = [File(), -1]
# ["file.py", "another_file.txt"]
FunctionList = [Function(), -1]
# [func, other_func, lambda x: x, ...] | from . import (
Pattern,
Function,
Method,
Module,
Class,
Union,
File,
Dict,
Int,
Str
)
Callable = Union(Method(), Function(), Class(is_init=False))
# lambda x: x**2
Point = Union(int, float)
Coords = [Point, Point]
# [0, 5.5]
PIXEL_VAL = Int(0, 255)
RGB = [PIXEL_VAL, 3]
RGBA = [PIXEL_VAL, 4]
Pixel = Union(RGB, RGBA)
# [255, 255, 255]
# [0, 0, 0, 255]
Image = [[Pixel, -1], -1]
"""
[
[[0, 0, 0], [0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0], [0, 0, 0]]
]
3x3 image full of black pixels
"""
Binnary = Str(startswith="0b", only="01", ignore={'prefix': True})
# "0b1010"
Octal = Str(startswith="0o", only="01234567", ignore={'prefix': True})
# "0o12"
Hexadecimal = Str(startswith="0x", only="0123456789abcdef", ignore={'prefix': True})
# "0xa"
Ascii = Str(only=[chr(x) for x in range(128)])
# "abc"
IntList = [int, -1]
# [1, 2, 3, 4]
StrList = [str, -1]
# ["a", "b", "c", "d"]
FileList = [File(), -1]
# ["file.py", "another_file.txt"]
FunctionList = [Function(), -1]
# [func, other_func, lambda x: x, ...] | en | 0.339626 | # lambda x: x**2 # [0, 5.5] # [255, 255, 255] # [0, 0, 0, 255] [ [[0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0]] ] 3x3 image full of black pixels # "0b1010" # "0o12" # "0xa" # "abc" # [1, 2, 3, 4] # ["a", "b", "c", "d"] # ["file.py", "another_file.txt"] # [func, other_func, lambda x: x, ...] | 2.719044 | 3 |
backend/src/configs/database_config.py | Seina88/attendance-system | 2 | 6619199 | import os
from dotenv import load_dotenv
load_dotenv()
class MainDatabaseConfig:
SQLALCHEMY_DATABASE_URI = "mysql+pymysql://{user}:{password}@{host}/{database}?charset=utf8".format(
**{
"user": os.getenv("DB_USER", "root"),
"password": os.getenv("DB_PASSWORD", "password"),
"host": os.getenv("DB_HOST", "database"),
"database": os.getenv("DB_DATABASE", "attendance_system"),
})
SQLALCHEMY_TRACK_MODIFICATIONS = False
SQLALCHEMY_ECHO = False
class TestDatabaseConfig:
SQLALCHEMY_DATABASE_URI = "mysql+pymysql://{user}:{password}@{host}/{database}?charset=utf8".format(
**{
"user": os.getenv("DB_USER", "root"),
"password": os.getenv("DB_PASSWORD", "password"),
"host": os.getenv("DB_HOST", "database"),
"database": os.getenv("DB_DATABASE_TEST", "test"),
})
SQLALCHEMY_TRACK_MODIFICATIONS = False
SQLALCHEMY_ECHO = False
| import os
from dotenv import load_dotenv
load_dotenv()
class MainDatabaseConfig:
SQLALCHEMY_DATABASE_URI = "mysql+pymysql://{user}:{password}@{host}/{database}?charset=utf8".format(
**{
"user": os.getenv("DB_USER", "root"),
"password": os.getenv("DB_PASSWORD", "password"),
"host": os.getenv("DB_HOST", "database"),
"database": os.getenv("DB_DATABASE", "attendance_system"),
})
SQLALCHEMY_TRACK_MODIFICATIONS = False
SQLALCHEMY_ECHO = False
class TestDatabaseConfig:
SQLALCHEMY_DATABASE_URI = "mysql+pymysql://{user}:{password}@{host}/{database}?charset=utf8".format(
**{
"user": os.getenv("DB_USER", "root"),
"password": os.getenv("DB_PASSWORD", "password"),
"host": os.getenv("DB_HOST", "database"),
"database": os.getenv("DB_DATABASE_TEST", "test"),
})
SQLALCHEMY_TRACK_MODIFICATIONS = False
SQLALCHEMY_ECHO = False
| none | 1 | 2.402745 | 2 | |
src/python/setup.py | plandes/gelfglance | 0 | 6619200 | from setuptools import setup, find_packages
setup(
name = "zensols.gelfglance",
packages = ['zensols', 'zensols.gelfglance'],
version = '0.1',
description = 'Forward glance statistics as gelf logs.',
author = '<NAME>',
author_email = '<EMAIL>',
url = 'https://github.com/plandes/gelfglance',
download_url = 'https://github.com/plandes/gelfglance/releases/download/v0.0.1/zensols.gelfglance-0.1-py3-none-any.whl',
keywords = ['tooling'],
classifiers = [],
entry_points={
'console_scripts': [
'gelfglance=zensols.gelfglance.cli:main'
]
}
)
| from setuptools import setup, find_packages
setup(
name = "zensols.gelfglance",
packages = ['zensols', 'zensols.gelfglance'],
version = '0.1',
description = 'Forward glance statistics as gelf logs.',
author = '<NAME>',
author_email = '<EMAIL>',
url = 'https://github.com/plandes/gelfglance',
download_url = 'https://github.com/plandes/gelfglance/releases/download/v0.0.1/zensols.gelfglance-0.1-py3-none-any.whl',
keywords = ['tooling'],
classifiers = [],
entry_points={
'console_scripts': [
'gelfglance=zensols.gelfglance.cli:main'
]
}
)
| none | 1 | 1.232965 | 1 | |
src/extract_patches.py | simongraham/dsf-cnn | 63 | 6619201 | """extract_patches.py
Script for extracting patches from image tiles. The script will read
and RGB image and a corresponding label and form image patches to be
used by the network.
"""
import glob
import os
import cv2
import numpy as np
from misc.patch_extractor import PatchExtractor
from misc.utils import rm_n_mkdir
from config import Config
###########################################################################
if __name__ == "__main__":
cfg = Config()
extract_type = "mirror" # 'valid' or 'mirror'
# 'mirror' reflects at the borders; 'valid' doesn't.
# check the patch_extractor.py 'main' to see the difference
# original size (win size) - input size - output size (step size)
step_size = [112, 112]
# set to size of network input: 448 for glands, 256 for nuclei
win_size = [448, 448]
xtractor = PatchExtractor(win_size, step_size)
### Paths to data - these need to be modified according to where the original data is stored
img_ext = ".png"
# img_dir should contain RGB image tiles from where to extract patches.
img_dir = "path/to/images/"
# ann_dir should contain 2D npy image tiles, with values ranging from 0 to N.
# 0 is background and then each nucleus is uniquely labelled from 1-N.
ann_dir = "path/to/labels/"
####
out_dir = "output_path/%dx%d_%dx%d" % (
win_size[0],
win_size[1],
step_size[0],
step_size[1],
)
file_list = glob.glob("%s/*%s" % (img_dir, img_ext))
file_list.sort()
rm_n_mkdir(out_dir)
for filename in file_list:
filename = os.path.basename(filename)
basename = filename.split(".")[0]
print(filename)
img = cv2.imread(img_dir + basename + img_ext)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# assumes that ann is HxW
ann_inst = np.load(ann_dir + basename + ".npy")
ann_inst = ann_inst.astype("int32")
ann = np.expand_dims(ann_inst, -1)
img = np.concatenate([img, ann], axis=-1)
sub_patches = xtractor.extract(img, extract_type)
for idx, patch in enumerate(sub_patches):
np.save("{0}/{1}_{2:03d}.npy".format(out_dir, basename, idx), patch)
| """extract_patches.py
Script for extracting patches from image tiles. The script will read
and RGB image and a corresponding label and form image patches to be
used by the network.
"""
import glob
import os
import cv2
import numpy as np
from misc.patch_extractor import PatchExtractor
from misc.utils import rm_n_mkdir
from config import Config
###########################################################################
if __name__ == "__main__":
cfg = Config()
extract_type = "mirror" # 'valid' or 'mirror'
# 'mirror' reflects at the borders; 'valid' doesn't.
# check the patch_extractor.py 'main' to see the difference
# original size (win size) - input size - output size (step size)
step_size = [112, 112]
# set to size of network input: 448 for glands, 256 for nuclei
win_size = [448, 448]
xtractor = PatchExtractor(win_size, step_size)
### Paths to data - these need to be modified according to where the original data is stored
img_ext = ".png"
# img_dir should contain RGB image tiles from where to extract patches.
img_dir = "path/to/images/"
# ann_dir should contain 2D npy image tiles, with values ranging from 0 to N.
# 0 is background and then each nucleus is uniquely labelled from 1-N.
ann_dir = "path/to/labels/"
####
out_dir = "output_path/%dx%d_%dx%d" % (
win_size[0],
win_size[1],
step_size[0],
step_size[1],
)
file_list = glob.glob("%s/*%s" % (img_dir, img_ext))
file_list.sort()
rm_n_mkdir(out_dir)
for filename in file_list:
filename = os.path.basename(filename)
basename = filename.split(".")[0]
print(filename)
img = cv2.imread(img_dir + basename + img_ext)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# assumes that ann is HxW
ann_inst = np.load(ann_dir + basename + ".npy")
ann_inst = ann_inst.astype("int32")
ann = np.expand_dims(ann_inst, -1)
img = np.concatenate([img, ann], axis=-1)
sub_patches = xtractor.extract(img, extract_type)
for idx, patch in enumerate(sub_patches):
np.save("{0}/{1}_{2:03d}.npy".format(out_dir, basename, idx), patch)
| en | 0.777754 | extract_patches.py Script for extracting patches from image tiles. The script will read and RGB image and a corresponding label and form image patches to be used by the network. ########################################################################### # 'valid' or 'mirror' # 'mirror' reflects at the borders; 'valid' doesn't. # check the patch_extractor.py 'main' to see the difference # original size (win size) - input size - output size (step size) # set to size of network input: 448 for glands, 256 for nuclei ### Paths to data - these need to be modified according to where the original data is stored # img_dir should contain RGB image tiles from where to extract patches. # ann_dir should contain 2D npy image tiles, with values ranging from 0 to N. # 0 is background and then each nucleus is uniquely labelled from 1-N. #### # assumes that ann is HxW | 2.901576 | 3 |
AwsGameKit/Resources/cloudResources/functionsTests/test_usergamedata/test_BatchDeleteHelper/test_index.py | aws/aws-gamekit-unreal | 17 | 6619202 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
from unittest import TestCase
from unittest.mock import patch, call, MagicMock
with patch("boto3.resource") as boto_resource_mock:
from functions.usergamedata.BatchDeleteHelper import index
def _build_batch_delete_helper_event():
return {
'TableName': 'test_bundleitems_table',
'DeleteRequest': [
{'DeleteRequest': {'Key': {'player_id_bundle': '12345678-1234-1234-1234-123456789012_BANANA_BUNDLE',
'bundle_item_key': 'SCORE1'}}},
{'DeleteRequest': {'Key': {'player_id_bundle': '12345678-1234-1234-1234-123456789012_BANANA_BUNDLE',
'bundle_item_key': 'SCORE2'}}}
]
}
class TestGetItem(TestCase):
def setUp(self):
index.ddb_resource = MagicMock()
def test_batch_delete_helper_event_calls_batch_write_item(self):
test_event = _build_batch_delete_helper_event()
index.lambda_handler(test_event, None)
index.ddb_resource.batch_write_item.assert_called_once_with(
RequestItems={'test_bundleitems_table': [
{'DeleteRequest':
{'Key': {'player_id_bundle': '12345678-1234-1234-1234-123456789012_BANANA_BUNDLE',
'bundle_item_key': 'SCORE1'}}},
{'DeleteRequest':
{'Key': {'player_id_bundle': '12345678-1234-1234-1234-123456789012_BANANA_BUNDLE',
'bundle_item_key': 'SCORE2'}}}]})
| # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
from unittest import TestCase
from unittest.mock import patch, call, MagicMock
with patch("boto3.resource") as boto_resource_mock:
from functions.usergamedata.BatchDeleteHelper import index
def _build_batch_delete_helper_event():
return {
'TableName': 'test_bundleitems_table',
'DeleteRequest': [
{'DeleteRequest': {'Key': {'player_id_bundle': '12345678-1234-1234-1234-123456789012_BANANA_BUNDLE',
'bundle_item_key': 'SCORE1'}}},
{'DeleteRequest': {'Key': {'player_id_bundle': '12345678-1234-1234-1234-123456789012_BANANA_BUNDLE',
'bundle_item_key': 'SCORE2'}}}
]
}
class TestGetItem(TestCase):
def setUp(self):
index.ddb_resource = MagicMock()
def test_batch_delete_helper_event_calls_batch_write_item(self):
test_event = _build_batch_delete_helper_event()
index.lambda_handler(test_event, None)
index.ddb_resource.batch_write_item.assert_called_once_with(
RequestItems={'test_bundleitems_table': [
{'DeleteRequest':
{'Key': {'player_id_bundle': '12345678-1234-1234-1234-123456789012_BANANA_BUNDLE',
'bundle_item_key': 'SCORE1'}}},
{'DeleteRequest':
{'Key': {'player_id_bundle': '12345678-1234-1234-1234-123456789012_BANANA_BUNDLE',
'bundle_item_key': 'SCORE2'}}}]})
| en | 0.655458 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 | 2.304066 | 2 |
src/you_get/extractors/kugou.py | 0x110100/you-get | 12 | 6619203 | #!/usr/bin/env python
__all__ = ['kugou_download']
from ..common import *
from json import loads
from base64 import b64decode
import re
import hashlib
def kugou_download(url, output_dir=".", merge=True, info_only=False, **kwargs):
if url.lower().find("5sing")!=-1:
#for 5sing.kugou.com
html=get_html(url)
ticket=r1(r'"ticket":\s*"(.*)"',html)
j=loads(str(b64decode(ticket),encoding="utf-8"))
url=j['file']
title=j['songName']
songtype, ext, size = url_info(url)
print_info(site_info, title, songtype, size)
if not info_only:
download_urls([url], title, ext, size, output_dir, merge=merge)
elif url.lower().find("hash")!=-1:
return kugou_download_by_hash(url,output_dir,merge,info_only)
else:
#for the www.kugou.com/
return kugou_download_playlist(url, output_dir=output_dir, merge=merge, info_only=info_only)
# raise NotImplementedError(url)
def kugou_download_by_hash(url,output_dir = '.', merge = True, info_only = False):
#sample
#url_sample:http://www.kugou.com/song/#hash=93F7D2FC6E95424739448218B591AEAF&album_id=9019462
hash_val = match1(url,'hash=(\w+)')
album_id = match1(url,'album_id=(\d+)')
html = get_html("http://www.kugou.com/yy/index.php?r=play/getdata&hash={}&album_id={}".format(hash_val,album_id))
j =loads(html)
url = j['data']['play_url']
title = j['data']['audio_name']
# some songs cann't play because of copyright protection
if(url == ''):
return
songtype, ext, size = url_info(url)
print_info(site_info, title, songtype, size)
if not info_only:
download_urls([url], title, ext, size, output_dir, merge=merge)
def kugou_download_playlist(url, output_dir = '.', merge = True, info_only = False, **kwargs):
urls=[]
#download music leaderboard
#sample: http://www.kugou.com/yy/html/rank.html
if url.lower().find('rank') !=-1:
html=get_html(url)
pattern = re.compile('<a href="(http://.*?)" data-active=')
res = pattern.findall(html)
for song in res:
res = get_html(song)
pattern_url = re.compile('"hash":"(\w+)".*"album_id":(\d)+')
hash_val,album_id= res = pattern_url.findall(res)[0]
urls.append('http://www.kugou.com/song/#hash=%s&album_id=%s'%(hash_val,album_id))
# download album
# album sample: http://www.kugou.com/yy/album/single/1645030.html
elif url.lower().find('album')!=-1:
html = get_html(url)
pattern = re.compile('var data=(\[.*?\]);')
res = pattern.findall(html)[0]
for v in json.loads(res):
urls.append('http://www.kugou.com/song/#hash=%s&album_id=%s'%(v['hash'],v['album_id']))
# download the playlist
# playlist sample:http://www.kugou.com/yy/special/single/487279.html
else:
html = get_html(url)
pattern = re.compile('data="(\w+)\|(\d+)"')
for v in pattern.findall(html):
urls.append('http://www.kugou.com/song/#hash=%s&album_id=%s'%(v[0],v[1]))
print('http://www.kugou.com/song/#hash=%s&album_id=%s'%(v[0],v[1]))
#download the list by hash
for url in urls:
kugou_download_by_hash(url,output_dir,merge,info_only)
site_info = "kugou.com"
download = kugou_download
# download_playlist = playlist_not_supported("kugou")
download_playlist=kugou_download_playlist
| #!/usr/bin/env python
__all__ = ['kugou_download']
from ..common import *
from json import loads
from base64 import b64decode
import re
import hashlib
def kugou_download(url, output_dir=".", merge=True, info_only=False, **kwargs):
if url.lower().find("5sing")!=-1:
#for 5sing.kugou.com
html=get_html(url)
ticket=r1(r'"ticket":\s*"(.*)"',html)
j=loads(str(b64decode(ticket),encoding="utf-8"))
url=j['file']
title=j['songName']
songtype, ext, size = url_info(url)
print_info(site_info, title, songtype, size)
if not info_only:
download_urls([url], title, ext, size, output_dir, merge=merge)
elif url.lower().find("hash")!=-1:
return kugou_download_by_hash(url,output_dir,merge,info_only)
else:
#for the www.kugou.com/
return kugou_download_playlist(url, output_dir=output_dir, merge=merge, info_only=info_only)
# raise NotImplementedError(url)
def kugou_download_by_hash(url,output_dir = '.', merge = True, info_only = False):
#sample
#url_sample:http://www.kugou.com/song/#hash=93F7D2FC6E95424739448218B591AEAF&album_id=9019462
hash_val = match1(url,'hash=(\w+)')
album_id = match1(url,'album_id=(\d+)')
html = get_html("http://www.kugou.com/yy/index.php?r=play/getdata&hash={}&album_id={}".format(hash_val,album_id))
j =loads(html)
url = j['data']['play_url']
title = j['data']['audio_name']
# some songs cann't play because of copyright protection
if(url == ''):
return
songtype, ext, size = url_info(url)
print_info(site_info, title, songtype, size)
if not info_only:
download_urls([url], title, ext, size, output_dir, merge=merge)
def kugou_download_playlist(url, output_dir = '.', merge = True, info_only = False, **kwargs):
urls=[]
#download music leaderboard
#sample: http://www.kugou.com/yy/html/rank.html
if url.lower().find('rank') !=-1:
html=get_html(url)
pattern = re.compile('<a href="(http://.*?)" data-active=')
res = pattern.findall(html)
for song in res:
res = get_html(song)
pattern_url = re.compile('"hash":"(\w+)".*"album_id":(\d)+')
hash_val,album_id= res = pattern_url.findall(res)[0]
urls.append('http://www.kugou.com/song/#hash=%s&album_id=%s'%(hash_val,album_id))
# download album
# album sample: http://www.kugou.com/yy/album/single/1645030.html
elif url.lower().find('album')!=-1:
html = get_html(url)
pattern = re.compile('var data=(\[.*?\]);')
res = pattern.findall(html)[0]
for v in json.loads(res):
urls.append('http://www.kugou.com/song/#hash=%s&album_id=%s'%(v['hash'],v['album_id']))
# download the playlist
# playlist sample:http://www.kugou.com/yy/special/single/487279.html
else:
html = get_html(url)
pattern = re.compile('data="(\w+)\|(\d+)"')
for v in pattern.findall(html):
urls.append('http://www.kugou.com/song/#hash=%s&album_id=%s'%(v[0],v[1]))
print('http://www.kugou.com/song/#hash=%s&album_id=%s'%(v[0],v[1]))
#download the list by hash
for url in urls:
kugou_download_by_hash(url,output_dir,merge,info_only)
site_info = "kugou.com"
download = kugou_download
# download_playlist = playlist_not_supported("kugou")
download_playlist=kugou_download_playlist
| en | 0.425262 | #!/usr/bin/env python #for 5sing.kugou.com #for the www.kugou.com/ # raise NotImplementedError(url) #sample #url_sample:http://www.kugou.com/song/#hash=93F7D2FC6E95424739448218B591AEAF&album_id=9019462 # some songs cann't play because of copyright protection #download music leaderboard #sample: http://www.kugou.com/yy/html/rank.html #hash=%s&album_id=%s'%(hash_val,album_id)) # download album # album sample: http://www.kugou.com/yy/album/single/1645030.html #hash=%s&album_id=%s'%(v['hash'],v['album_id'])) # download the playlist # playlist sample:http://www.kugou.com/yy/special/single/487279.html #hash=%s&album_id=%s'%(v[0],v[1])) #hash=%s&album_id=%s'%(v[0],v[1])) #download the list by hash # download_playlist = playlist_not_supported("kugou") | 2.538534 | 3 |
Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/openedx/features/course_experience/api/v1/views.py | osoco/better-ways-of-thinking-about-software | 3 | 6619204 | <reponame>osoco/better-ways-of-thinking-about-software
"""
Views for Course Experience API.
"""
import logging
from django.conf import settings
from django.urls import reverse
from django.utils.html import format_html
from django.utils.translation import ugettext as _
from eventtracking import tracker
from rest_framework.decorators import api_view, authentication_classes, permission_classes
from rest_framework.exceptions import APIException, ParseError
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.generics import RetrieveAPIView
from edx_rest_framework_extensions.auth.jwt.authentication import JwtAuthentication
from edx_rest_framework_extensions.auth.session.authentication import SessionAuthenticationAllowInactiveUser
from opaque_keys.edx.keys import CourseKey
from lms.djangoapps.course_api.api import course_detail
from lms.djangoapps.course_home_api.toggles import course_home_legacy_is_active
from lms.djangoapps.courseware.access import has_access
from lms.djangoapps.courseware.courses import get_course_with_access
from lms.djangoapps.courseware.masquerade import is_masquerading, setup_masquerade
from openedx.core.djangoapps.schedules.utils import reset_self_paced_schedule
from openedx.core.lib.api.authentication import BearerAuthenticationAllowInactiveUser
from openedx.features.course_experience.api.v1.serializers import CourseDeadlinesMobileSerializer
from openedx.features.course_experience.url_helpers import get_learning_mfe_home_url
from openedx.features.course_experience.utils import dates_banner_should_display
log = logging.getLogger(__name__)
class UnableToResetDeadlines(APIException):
status_code = 400
default_detail = 'Unable to reset deadlines.'
default_code = 'unable_to_reset_deadlines'
@api_view(['POST'])
@authentication_classes((
JwtAuthentication, BearerAuthenticationAllowInactiveUser, SessionAuthenticationAllowInactiveUser,
))
@permission_classes((IsAuthenticated,))
def reset_course_deadlines(request):
"""
Set the start_date of a schedule to today, which in turn will adjust due dates for
sequentials belonging to a self paced course
Request Parameters:
course_key: course key
research_event_data: any data that should be included in the research tracking event
Example: sending the location of where the reset deadlines banner (i.e. outline-tab)
IMPORTANT NOTE: If updates are happening to the logic here, ALSO UPDATE the `reset_course_deadlines`
function in common/djangoapps/util/views.py as well.
"""
course_key = request.data.get('course_key', None)
research_event_data = request.data.get('research_event_data', {})
# If body doesnt contain 'course_key', return 400 to client.
if not course_key:
raise ParseError(_("'course_key' is required."))
try:
course_key = CourseKey.from_string(course_key)
course_masquerade, user = setup_masquerade(
request,
course_key,
has_access(request.user, 'staff', course_key)
)
# We ignore the missed_deadlines because this endpoint is used in the Learning MFE for
# learners who have remaining attempts on a problem and reset their due dates in order to
# submit additional attempts. This can apply for 'completed' (submitted) content that would
# not be marked as past_due
_missed_deadlines, missed_gated_content = dates_banner_should_display(course_key, user)
if not missed_gated_content:
reset_self_paced_schedule(user, course_key)
course_overview = course_detail(request, user.username, course_key)
# For context here, research_event_data should already contain `location` indicating
# the page/location dates were reset from and could also contain `block_id` if reset
# within courseware.
research_event_data.update({
'courserun_key': str(course_key),
'is_masquerading': is_masquerading(user, course_key, course_masquerade),
'is_staff': has_access(user, 'staff', course_key).has_access,
'org_key': course_overview.display_org_with_default,
'user_id': user.id,
})
tracker.emit('edx.ui.lms.reset_deadlines.clicked', research_event_data)
if course_home_legacy_is_active(course_key):
body_link = '{}{}'.format(settings.LMS_ROOT_URL, reverse('dates', args=[str(course_key)]))
else:
body_link = get_learning_mfe_home_url(course_key=str(course_key), view_name='dates')
return Response({
'body': format_html('<a href="{}">{}</a>', body_link, _('View all dates')),
'header': _('Your due dates have been successfully shifted to help you stay on track.'),
'link': body_link,
'link_text': _('View all dates'),
'message': _('Deadlines successfully reset.'),
})
except Exception as reset_deadlines_exception:
log.exception('Error occurred while trying to reset deadlines!')
raise UnableToResetDeadlines from reset_deadlines_exception
class CourseDeadlinesMobileView(RetrieveAPIView):
"""
**Use Cases**
Request course deadline info for mobile
**Example Requests**
GET api/course_experience/v1/course_deadlines_info/{course_key}
**Response Values**
Body consists of the following fields:
dates_banner_info: (obj)
missed_deadlines: (bool) Whether the user has missed any graded content deadlines for the given course.
missed_gated_content: (bool) Whether the user has missed any gated content for the given course.
content_type_gating_enabled: (bool) Whether content type gating is enabled for this enrollment.
verified_upgrade_link: (str) The URL to ecommerce IDA for purchasing the verified upgrade.
**Returns**
* 200 on success with above fields.
* 401 if the user is not authenticated.
* 404 if the course is not available or cannot be seen.
"""
authentication_classes = (
JwtAuthentication,
BearerAuthenticationAllowInactiveUser,
SessionAuthenticationAllowInactiveUser,
)
permission_classes = (IsAuthenticated,)
serializer_class = CourseDeadlinesMobileSerializer
def get(self, request, *args, **kwargs):
course_key_string = kwargs.get('course_key_string')
course_key = CourseKey.from_string(course_key_string)
# Although this course data is not used this method will return 404 if course does not exist
get_course_with_access(request.user, 'load', course_key)
serializer = self.get_serializer({})
return Response(serializer.data)
| """
Views for Course Experience API.
"""
import logging
from django.conf import settings
from django.urls import reverse
from django.utils.html import format_html
from django.utils.translation import ugettext as _
from eventtracking import tracker
from rest_framework.decorators import api_view, authentication_classes, permission_classes
from rest_framework.exceptions import APIException, ParseError
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.generics import RetrieveAPIView
from edx_rest_framework_extensions.auth.jwt.authentication import JwtAuthentication
from edx_rest_framework_extensions.auth.session.authentication import SessionAuthenticationAllowInactiveUser
from opaque_keys.edx.keys import CourseKey
from lms.djangoapps.course_api.api import course_detail
from lms.djangoapps.course_home_api.toggles import course_home_legacy_is_active
from lms.djangoapps.courseware.access import has_access
from lms.djangoapps.courseware.courses import get_course_with_access
from lms.djangoapps.courseware.masquerade import is_masquerading, setup_masquerade
from openedx.core.djangoapps.schedules.utils import reset_self_paced_schedule
from openedx.core.lib.api.authentication import BearerAuthenticationAllowInactiveUser
from openedx.features.course_experience.api.v1.serializers import CourseDeadlinesMobileSerializer
from openedx.features.course_experience.url_helpers import get_learning_mfe_home_url
from openedx.features.course_experience.utils import dates_banner_should_display
log = logging.getLogger(__name__)
class UnableToResetDeadlines(APIException):
status_code = 400
default_detail = 'Unable to reset deadlines.'
default_code = 'unable_to_reset_deadlines'
@api_view(['POST'])
@authentication_classes((
JwtAuthentication, BearerAuthenticationAllowInactiveUser, SessionAuthenticationAllowInactiveUser,
))
@permission_classes((IsAuthenticated,))
def reset_course_deadlines(request):
"""
Set the start_date of a schedule to today, which in turn will adjust due dates for
sequentials belonging to a self paced course
Request Parameters:
course_key: course key
research_event_data: any data that should be included in the research tracking event
Example: sending the location of where the reset deadlines banner (i.e. outline-tab)
IMPORTANT NOTE: If updates are happening to the logic here, ALSO UPDATE the `reset_course_deadlines`
function in common/djangoapps/util/views.py as well.
"""
course_key = request.data.get('course_key', None)
research_event_data = request.data.get('research_event_data', {})
# If body doesnt contain 'course_key', return 400 to client.
if not course_key:
raise ParseError(_("'course_key' is required."))
try:
course_key = CourseKey.from_string(course_key)
course_masquerade, user = setup_masquerade(
request,
course_key,
has_access(request.user, 'staff', course_key)
)
# We ignore the missed_deadlines because this endpoint is used in the Learning MFE for
# learners who have remaining attempts on a problem and reset their due dates in order to
# submit additional attempts. This can apply for 'completed' (submitted) content that would
# not be marked as past_due
_missed_deadlines, missed_gated_content = dates_banner_should_display(course_key, user)
if not missed_gated_content:
reset_self_paced_schedule(user, course_key)
course_overview = course_detail(request, user.username, course_key)
# For context here, research_event_data should already contain `location` indicating
# the page/location dates were reset from and could also contain `block_id` if reset
# within courseware.
research_event_data.update({
'courserun_key': str(course_key),
'is_masquerading': is_masquerading(user, course_key, course_masquerade),
'is_staff': has_access(user, 'staff', course_key).has_access,
'org_key': course_overview.display_org_with_default,
'user_id': user.id,
})
tracker.emit('edx.ui.lms.reset_deadlines.clicked', research_event_data)
if course_home_legacy_is_active(course_key):
body_link = '{}{}'.format(settings.LMS_ROOT_URL, reverse('dates', args=[str(course_key)]))
else:
body_link = get_learning_mfe_home_url(course_key=str(course_key), view_name='dates')
return Response({
'body': format_html('<a href="{}">{}</a>', body_link, _('View all dates')),
'header': _('Your due dates have been successfully shifted to help you stay on track.'),
'link': body_link,
'link_text': _('View all dates'),
'message': _('Deadlines successfully reset.'),
})
except Exception as reset_deadlines_exception:
log.exception('Error occurred while trying to reset deadlines!')
raise UnableToResetDeadlines from reset_deadlines_exception
class CourseDeadlinesMobileView(RetrieveAPIView):
"""
**Use Cases**
Request course deadline info for mobile
**Example Requests**
GET api/course_experience/v1/course_deadlines_info/{course_key}
**Response Values**
Body consists of the following fields:
dates_banner_info: (obj)
missed_deadlines: (bool) Whether the user has missed any graded content deadlines for the given course.
missed_gated_content: (bool) Whether the user has missed any gated content for the given course.
content_type_gating_enabled: (bool) Whether content type gating is enabled for this enrollment.
verified_upgrade_link: (str) The URL to ecommerce IDA for purchasing the verified upgrade.
**Returns**
* 200 on success with above fields.
* 401 if the user is not authenticated.
* 404 if the course is not available or cannot be seen.
"""
authentication_classes = (
JwtAuthentication,
BearerAuthenticationAllowInactiveUser,
SessionAuthenticationAllowInactiveUser,
)
permission_classes = (IsAuthenticated,)
serializer_class = CourseDeadlinesMobileSerializer
def get(self, request, *args, **kwargs):
course_key_string = kwargs.get('course_key_string')
course_key = CourseKey.from_string(course_key_string)
# Although this course data is not used this method will return 404 if course does not exist
get_course_with_access(request.user, 'load', course_key)
serializer = self.get_serializer({})
return Response(serializer.data) | en | 0.863632 | Views for Course Experience API. Set the start_date of a schedule to today, which in turn will adjust due dates for sequentials belonging to a self paced course Request Parameters: course_key: course key research_event_data: any data that should be included in the research tracking event Example: sending the location of where the reset deadlines banner (i.e. outline-tab) IMPORTANT NOTE: If updates are happening to the logic here, ALSO UPDATE the `reset_course_deadlines` function in common/djangoapps/util/views.py as well. # If body doesnt contain 'course_key', return 400 to client. # We ignore the missed_deadlines because this endpoint is used in the Learning MFE for # learners who have remaining attempts on a problem and reset their due dates in order to # submit additional attempts. This can apply for 'completed' (submitted) content that would # not be marked as past_due # For context here, research_event_data should already contain `location` indicating # the page/location dates were reset from and could also contain `block_id` if reset # within courseware. **Use Cases** Request course deadline info for mobile **Example Requests** GET api/course_experience/v1/course_deadlines_info/{course_key} **Response Values** Body consists of the following fields: dates_banner_info: (obj) missed_deadlines: (bool) Whether the user has missed any graded content deadlines for the given course. missed_gated_content: (bool) Whether the user has missed any gated content for the given course. content_type_gating_enabled: (bool) Whether content type gating is enabled for this enrollment. verified_upgrade_link: (str) The URL to ecommerce IDA for purchasing the verified upgrade. **Returns** * 200 on success with above fields. * 401 if the user is not authenticated. * 404 if the course is not available or cannot be seen. # Although this course data is not used this method will return 404 if course does not exist | 1.77744 | 2 |
utils/GUI_main_window.py | ApocalyVec/mGesf | 18 | 6619205 | from PyQt5 import QtWidgets, QtCore, QtGui
from PyQt5.QtWidgets import QLabel, QCheckBox, QFrame, QVBoxLayout, QHBoxLayout, QComboBox
import config as config
def init_view(label, container, label_bold=True, position="centertop", vertical=True):
if vertical:
vl = QVBoxLayout(container)
else:
vl = QHBoxLayout(container)
if label:
ql = QLabel()
if label_bold:
ql.setStyleSheet("font: bold 14px;")
# positions
if position == "centertop":
ql.setAlignment(QtCore.Qt.AlignTop)
ql.setAlignment(QtCore.Qt.AlignCenter)
elif position == "center":
ql.setAlignment(QtCore.Qt.AlignCenter)
elif position == "rightbottom":
ql.setAlignment(QtCore.Qt.AlignRight)
ql.setAlignment(QtCore.Qt.AlignBottom)
elif position == "righttop":
ql.setAlignment(QtCore.Qt.AlignRight)
ql.setAlignment(QtCore.Qt.AlignTop)
elif position == "lefttop":
ql.setAlignment(QtCore.Qt.AlignLeft)
ql.setAlignment(QtCore.Qt.AlignTop)
elif position == "leftbottom":
ql.setAlignment(QtCore.Qt.AlignLeft)
ql.setAlignment(QtCore.Qt.AlignBottom)
ql.setText(label)
vl.addWidget(ql)
return vl
def init_container(parent, label=None, label_position=None, label_bold=True, vertical=True, style=None, size=None):
container = QtGui.QWidget()
if size:
container.setFixedWidth(size[0])
container.setFixedHeight(size[1])
if style: # set the style of the container, which takes over the invisible layout
container.setStyleSheet(style)
parent.addWidget(container)
vl = init_view(label, container, label_bold, label_position, vertical)
return vl
def init_button(parent, label=None, function=None, style=config.button_style_classic):
btn = QtWidgets.QPushButton(text=label)
btn.clicked.connect(function)
parent.addWidget(btn)
btn.setStyleSheet(config.button_style_classic)
return btn
def init_inputBox(parent, label=None, label_bold=False, default_input=None):
block = init_container(parent=parent,
label=label,
label_bold=label_bold,
vertical=False)
textbox = QtWidgets.QLineEdit()
textbox.setContentsMargins(5, 0, 0, 0)
textbox.setText(str(default_input))
block.addWidget(textbox)
textbox.setStyleSheet("background-color:white;")
return block, textbox
def setup_configPath_block(parent):
is_valid_config_path = False
config_textbox = init_inputBox(parent=parent,
label=config.control_tab_config_path_label,
label_bold=True,
default_input=config.control_tab_config_file_path_default)
return is_valid_config_path, config_textbox
def init_checkBox(parent, label=None, function=None):
box = QCheckBox(label)
parent.addWidget(box)
box.stateChanged.connect(function)
return box
def draw_boarder(parent, width, height):
frame = QFrame()
frame.setFixedSize(int(width), int(height))
frame.setFrameShape(QFrame.StyledPanel)
frame.setLineWidth(2)
frame.setContentsMargins(5, 5, 5, 5)
parent.addWidget(frame)
return frame
def init_combo_box(parent, label, item_list):
container = init_container(parent=parent, label=label, vertical=False)
combo_widget = QtGui.QWidget()
combo_box = QComboBox()
for i in item_list:
combo_box.addItem(i)
container.addWidget(combo_box)
return combo_box
| from PyQt5 import QtWidgets, QtCore, QtGui
from PyQt5.QtWidgets import QLabel, QCheckBox, QFrame, QVBoxLayout, QHBoxLayout, QComboBox
import config as config
def init_view(label, container, label_bold=True, position="centertop", vertical=True):
if vertical:
vl = QVBoxLayout(container)
else:
vl = QHBoxLayout(container)
if label:
ql = QLabel()
if label_bold:
ql.setStyleSheet("font: bold 14px;")
# positions
if position == "centertop":
ql.setAlignment(QtCore.Qt.AlignTop)
ql.setAlignment(QtCore.Qt.AlignCenter)
elif position == "center":
ql.setAlignment(QtCore.Qt.AlignCenter)
elif position == "rightbottom":
ql.setAlignment(QtCore.Qt.AlignRight)
ql.setAlignment(QtCore.Qt.AlignBottom)
elif position == "righttop":
ql.setAlignment(QtCore.Qt.AlignRight)
ql.setAlignment(QtCore.Qt.AlignTop)
elif position == "lefttop":
ql.setAlignment(QtCore.Qt.AlignLeft)
ql.setAlignment(QtCore.Qt.AlignTop)
elif position == "leftbottom":
ql.setAlignment(QtCore.Qt.AlignLeft)
ql.setAlignment(QtCore.Qt.AlignBottom)
ql.setText(label)
vl.addWidget(ql)
return vl
def init_container(parent, label=None, label_position=None, label_bold=True, vertical=True, style=None, size=None):
container = QtGui.QWidget()
if size:
container.setFixedWidth(size[0])
container.setFixedHeight(size[1])
if style: # set the style of the container, which takes over the invisible layout
container.setStyleSheet(style)
parent.addWidget(container)
vl = init_view(label, container, label_bold, label_position, vertical)
return vl
def init_button(parent, label=None, function=None, style=config.button_style_classic):
btn = QtWidgets.QPushButton(text=label)
btn.clicked.connect(function)
parent.addWidget(btn)
btn.setStyleSheet(config.button_style_classic)
return btn
def init_inputBox(parent, label=None, label_bold=False, default_input=None):
block = init_container(parent=parent,
label=label,
label_bold=label_bold,
vertical=False)
textbox = QtWidgets.QLineEdit()
textbox.setContentsMargins(5, 0, 0, 0)
textbox.setText(str(default_input))
block.addWidget(textbox)
textbox.setStyleSheet("background-color:white;")
return block, textbox
def setup_configPath_block(parent):
is_valid_config_path = False
config_textbox = init_inputBox(parent=parent,
label=config.control_tab_config_path_label,
label_bold=True,
default_input=config.control_tab_config_file_path_default)
return is_valid_config_path, config_textbox
def init_checkBox(parent, label=None, function=None):
box = QCheckBox(label)
parent.addWidget(box)
box.stateChanged.connect(function)
return box
def draw_boarder(parent, width, height):
frame = QFrame()
frame.setFixedSize(int(width), int(height))
frame.setFrameShape(QFrame.StyledPanel)
frame.setLineWidth(2)
frame.setContentsMargins(5, 5, 5, 5)
parent.addWidget(frame)
return frame
def init_combo_box(parent, label, item_list):
container = init_container(parent=parent, label=label, vertical=False)
combo_widget = QtGui.QWidget()
combo_box = QComboBox()
for i in item_list:
combo_box.addItem(i)
container.addWidget(combo_box)
return combo_box
| en | 0.83084 | # positions # set the style of the container, which takes over the invisible layout | 2.724424 | 3 |
Scraper/sync_http.py | EazzyLab/blog-scraper | 0 | 6619206 | import requests
def get_request(url, headers=None, proxy=None):
r = requests.get(url, headers=headers, proxies=proxy)
return r | import requests
def get_request(url, headers=None, proxy=None):
r = requests.get(url, headers=headers, proxies=proxy)
return r | none | 1 | 2.209168 | 2 | |
test/hlt/pytest/python/com/huawei/iotplatform/client/dto/QueryDeviceRealtimeLocationInDTO.py | yuanyi-thu/AIOT- | 128 | 6619207 | from com.huawei.iotplatform.client.dto.CoordinateReferenceSystem import CoordinateReferenceSystem
class QueryDeviceRealtimeLocationInDTO(object):
geoInfo = CoordinateReferenceSystem
def __init__(self):
self.horAcc = int
def getHorAcc(self):
return self.horAcc
def setHorAcc(self, horAcc):
self.horAcc = horAcc
def getGeoInfo(self):
return self.geoInfo
def setGeoInfo(self, geoInfo):
self.geoInfo = geoInfo
| from com.huawei.iotplatform.client.dto.CoordinateReferenceSystem import CoordinateReferenceSystem
class QueryDeviceRealtimeLocationInDTO(object):
geoInfo = CoordinateReferenceSystem
def __init__(self):
self.horAcc = int
def getHorAcc(self):
return self.horAcc
def setHorAcc(self, horAcc):
self.horAcc = horAcc
def getGeoInfo(self):
return self.geoInfo
def setGeoInfo(self, geoInfo):
self.geoInfo = geoInfo
| none | 1 | 2.442075 | 2 | |
django-APIs/table_cleaning/migrations/0020_auto_20190503_0940.py | Henler/ReBridge_data_cloud | 0 | 6619208 | # Generated by Django 2.1.1 on 2019-05-03 07:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('table_cleaning', '0019_auto_20190502_1150'),
]
operations = [
migrations.AlterField(
model_name='keyval',
name='xls_type',
field=models.IntegerField(choices=[(0, 'Empty string'), (1, 'String'), (2, 'Float'), (3, 'Excel date'), (4, 'Boolean'), (5, 'Error'), (6, 'Zero float'), (7, 'String'), (8, 'Order')], default=1),
),
]
| # Generated by Django 2.1.1 on 2019-05-03 07:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('table_cleaning', '0019_auto_20190502_1150'),
]
operations = [
migrations.AlterField(
model_name='keyval',
name='xls_type',
field=models.IntegerField(choices=[(0, 'Empty string'), (1, 'String'), (2, 'Float'), (3, 'Excel date'), (4, 'Boolean'), (5, 'Error'), (6, 'Zero float'), (7, 'String'), (8, 'Order')], default=1),
),
]
| en | 0.655567 | # Generated by Django 2.1.1 on 2019-05-03 07:40 | 1.630609 | 2 |
tools/sublime-completions.py | andoma/rainbow | 0 | 6619209 | <reponame>andoma/rainbow
#!/usr/bin/env python
# Copyright (c) 2010-present Bifrost Entertainment AS and <NAME>
# Distributed under the MIT License.
# (See accompanying file LICENSE or copy at http://opensource.org/licenses/MIT)
from datetime import date
import re
import os
class NumberedParameters(object):
def __init__(self):
self.count = 0
def __call__(self, match):
self.count += 1
return '{}${{{}:{}}}{}'.format(match.group(1), self.count, match.group(2), match.group(3))
class SublimeCompletions(object):
REGEX_INSTANCE = re.compile(r'<(.*?)>')
REGEX_PARAMS = re.compile(r'([ \(])([\w "&\+\-\.;=]+)([\),])')
REGEX_SYNTAX = re.compile(r'^### (\w.*?)[\n\[]')
def format(self, line):
return ' "{}",'.format(self.REGEX_INSTANCE.sub(r'<\1>', line.replace('"', '\\"')))
def parse(self, ref):
return filter((lambda line: line != None), map(self.parse_line, ref))
def parse_line(self, line):
match = self.REGEX_SYNTAX.match(line)
if match:
return self.format(self.REGEX_PARAMS.sub(NumberedParameters(), match.group(1)).strip())
def template(self):
return '{{\n "scope": "source.lua",\n "completions": [\n{}\n ]\n}}\n'
def generate(g, source):
out = '\n'.join(g.parse(source))
return g.template().format(out[:len(out) - 1])
if __name__ == '__main__':
f = open(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'doc', 'programming', 'lua', 'api.md'), 'r')
if f:
s = generate(SublimeCompletions(), f)
f.close()
f = open('Rainbow.sublime-completions', 'w')
f.write('// This file was generated with {}.\n'.format(os.path.basename(__file__)))
f.write('// Copyright (c) {} Bifrost Entertainment AS and <NAME>.\n'.format(str(date.today().year)))
f.write('// Distributed under the MIT License.\n')
f.write(s)
f.close()
| #!/usr/bin/env python
# Copyright (c) 2010-present Bifrost Entertainment AS and <NAME>
# Distributed under the MIT License.
# (See accompanying file LICENSE or copy at http://opensource.org/licenses/MIT)
from datetime import date
import re
import os
class NumberedParameters(object):
def __init__(self):
self.count = 0
def __call__(self, match):
self.count += 1
return '{}${{{}:{}}}{}'.format(match.group(1), self.count, match.group(2), match.group(3))
class SublimeCompletions(object):
REGEX_INSTANCE = re.compile(r'<(.*?)>')
REGEX_PARAMS = re.compile(r'([ \(])([\w "&\+\-\.;=]+)([\),])')
REGEX_SYNTAX = re.compile(r'^### (\w.*?)[\n\[]')
def format(self, line):
return ' "{}",'.format(self.REGEX_INSTANCE.sub(r'<\1>', line.replace('"', '\\"')))
def parse(self, ref):
return filter((lambda line: line != None), map(self.parse_line, ref))
def parse_line(self, line):
match = self.REGEX_SYNTAX.match(line)
if match:
return self.format(self.REGEX_PARAMS.sub(NumberedParameters(), match.group(1)).strip())
def template(self):
return '{{\n "scope": "source.lua",\n "completions": [\n{}\n ]\n}}\n'
def generate(g, source):
out = '\n'.join(g.parse(source))
return g.template().format(out[:len(out) - 1])
if __name__ == '__main__':
f = open(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'doc', 'programming', 'lua', 'api.md'), 'r')
if f:
s = generate(SublimeCompletions(), f)
f.close()
f = open('Rainbow.sublime-completions', 'w')
f.write('// This file was generated with {}.\n'.format(os.path.basename(__file__)))
f.write('// Copyright (c) {} Bifrost Entertainment AS and <NAME>.\n'.format(str(date.today().year)))
f.write('// Distributed under the MIT License.\n')
f.write(s)
f.close() | en | 0.57381 | #!/usr/bin/env python # Copyright (c) 2010-present Bifrost Entertainment AS and <NAME> # Distributed under the MIT License. # (See accompanying file LICENSE or copy at http://opensource.org/licenses/MIT) ### (\w.*?)[\n\[]') | 2.606482 | 3 |
tests/test_adder.py | enics-labs/salamandra | 1 | 6619210 | <gh_stars>1-10
# Copyright 2021 EnICS Labs, Bar-Ilan University.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
import sys, os
sys.path.append(os.path.abspath('..'))
from salamandra import *
'''
This file builds two adders in Salamandra:
A ripple adder using a variable amount of full adders in parallel
A serial adder using a single full adder and a flip flop
Each adder is itself composed of logic gates
Some other components, like a short circuit, are also defined here but unused.
The purpose of this file was to create an environment with which to test
all_connected(), all_fan_in(), & all_fan_out() in, but it may have other uses.
'''
def main():
test(is_metatest=False)
def test(is_metatest):
# Length of the ripple adder
BITLENGTH = 6
#########
# GATES #
#########
inv = Component('inv')
inv.add_pin(Input('A'))
inv.add_pin(Output('Y'))
inv.set_is_physical(True)
inv.set_is_sequential(False)
andgate = Component('and')
andgate.add_pin(Input('A'))
andgate.add_pin(Input('B'))
andgate.add_pin(Output('Y'))
andgate.set_is_physical(True)
andgate.set_is_sequential(False)
orgate = Component('or')
orgate.add_pin(Input('A'))
orgate.add_pin(Input('B'))
orgate.add_pin(Output('Y'))
orgate.set_is_physical(True)
orgate.set_is_sequential(False)
xor = Component('xor')
xor.add_pin(Input('A'))
xor.add_pin(Input('B'))
xor.add_pin(Output('Y'))
xor.set_is_physical(True)
xor.set_is_sequential(False)
#############
# FLIP FLOP #
#############
FF = Component('flipflop')
FF.add_pin(Input('D'))
FF.add_pin(Output('Q'))
FF.add_pin(Input('CK'))
FF.set_is_physical(True)
FF.set_is_sequential(True)
#########
# SHORT #
#########
'''
This component isn't used in the adder.
It's a simple short cirucit cell,
which can be used to test some edge cases
with all_connected and all_fan_in/out
'''
short = Component('short')
short.add_pin_adds_net = False
# pins
short.add_pin(Input('A'))
short.add_pin(Output('Y'))
short.add_net(Net('shortnet'))
short.connect('shortnet', 'A')
short.connect('shortnet', 'Y')
short.set_is_physical(True)
short.set_is_sequential(False)
#########
# ADDER #
#########
adder = Component('adder')
# pins
adder.add_pin(Input('A'))
adder.add_pin(Input('B'))
adder.add_pin(Input('Cin'))
adder.add_pin(Output('S'))
adder.add_pin(Output('Cout'))
# adder.set_is_physical(True)
adder.set_is_sequential(False)
# subcomponents
adder.add_component(xor, 'xor0')
adder.add_component(xor, 'xor1')
adder.add_component(andgate, 'and0')
adder.add_component(andgate, 'and1')
adder.add_component(orgate, 'or')
# nets
adder.add_net(Net('XOROUT'))
adder.add_net(Net('AND0OUT'))
adder.add_net(Net('AND1OUT'))
# connections
adder.connect('A', 'xor0.A')
adder.connect('B', 'xor0.B')
adder.connect('XOROUT', 'xor0.Y')
adder.connect('A', 'and0.A')
adder.connect('B', 'and0.B')
adder.connect('AND0OUT', 'and0.Y')
adder.connect('XOROUT', 'xor1.A')
adder.connect('Cin', 'xor1.B')
adder.connect('S', 'xor1.Y')
adder.connect('XOROUT', 'and1.A')
adder.connect('Cin', 'and1.B')
adder.connect('AND1OUT', 'and1.Y')
adder.connect('AND0OUT', 'or.A')
adder.connect('AND1OUT', 'or.B')
adder.connect('Cout', 'or.Y')
##########
# RIPPLE #
##########
ripple = Component('ripple')
ripple.set_is_sequential(True)
# pins
ripple.add_pinbus(Bus(Input, 'A', BITLENGTH))
ripple.add_pinbus(Bus(Input, 'B', BITLENGTH))
ripple.add_pinbus(Bus(Output, 'S', BITLENGTH))
ripple.add_pin(Output('COUT'))
ripple.add_pin(Inout('GND'))
cnet = 'GND'
for x in range(BITLENGTH):
ripple.add_component(adder, 'adder' + str(x))
ripple.connect(cnet, 'adder' + str(x) + '.Cin')
ripple.connect('A' + str([x]), 'adder' + str(x) + '.A')
ripple.connect('B' + str([x]), 'adder' + str(x) + '.B')
ripple.connect('S' + str([x]), 'adder' + str(x) + '.S')
cnet = 'COUT'
if x < BITLENGTH - 1:
cnet = 'adder' + str(x) + 'out'
ripple.add_net(Net(cnet))
ripple.connect(cnet, 'adder' + str(x) + '.Cout')
##########
# Serial #
##########
serial = Component('serial')
# pins
serial.add_pin(Input('A'))
serial.add_pin(Input('B'))
serial.add_pin(Output('S'))
serial.add_pin(Input('CK'))
# components
serial.add_component(adder, 'adder')
serial.add_component(FF, 'ff')
# nets
serial.add_net(Net('adderout'))
serial.add_net(Net('ffout'))
# connections
serial.connect('A', 'adder.A')
serial.connect('B', 'adder.B')
serial.connect('S', 'adder.S')
serial.connect('CK', 'ff.CK')
serial.connect('adderout', 'adder.Cout')
serial.connect('adderout', 'ff.D')
serial.connect('ffout', 'ff.Q')
serial.connect('ffout', 'adder.Cin')
if not is_metatest:
# f = open('ripple.v', 'w')
for l in ripple.write_verilog():
print(l)
# f.write(l + '\n')
# f.close
# f = open('serial.v', 'w')
for l in serial.write_verilog():
# f.write(l + '\n')
print(l)
# f.close
return True
if __name__ == '__main__':
main()
| # Copyright 2021 EnICS Labs, Bar-Ilan University.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
import sys, os
sys.path.append(os.path.abspath('..'))
from salamandra import *
'''
This file builds two adders in Salamandra:
A ripple adder using a variable amount of full adders in parallel
A serial adder using a single full adder and a flip flop
Each adder is itself composed of logic gates
Some other components, like a short circuit, are also defined here but unused.
The purpose of this file was to create an environment with which to test
all_connected(), all_fan_in(), & all_fan_out() in, but it may have other uses.
'''
def main():
test(is_metatest=False)
def test(is_metatest):
# Length of the ripple adder
BITLENGTH = 6
#########
# GATES #
#########
inv = Component('inv')
inv.add_pin(Input('A'))
inv.add_pin(Output('Y'))
inv.set_is_physical(True)
inv.set_is_sequential(False)
andgate = Component('and')
andgate.add_pin(Input('A'))
andgate.add_pin(Input('B'))
andgate.add_pin(Output('Y'))
andgate.set_is_physical(True)
andgate.set_is_sequential(False)
orgate = Component('or')
orgate.add_pin(Input('A'))
orgate.add_pin(Input('B'))
orgate.add_pin(Output('Y'))
orgate.set_is_physical(True)
orgate.set_is_sequential(False)
xor = Component('xor')
xor.add_pin(Input('A'))
xor.add_pin(Input('B'))
xor.add_pin(Output('Y'))
xor.set_is_physical(True)
xor.set_is_sequential(False)
#############
# FLIP FLOP #
#############
FF = Component('flipflop')
FF.add_pin(Input('D'))
FF.add_pin(Output('Q'))
FF.add_pin(Input('CK'))
FF.set_is_physical(True)
FF.set_is_sequential(True)
#########
# SHORT #
#########
'''
This component isn't used in the adder.
It's a simple short cirucit cell,
which can be used to test some edge cases
with all_connected and all_fan_in/out
'''
short = Component('short')
short.add_pin_adds_net = False
# pins
short.add_pin(Input('A'))
short.add_pin(Output('Y'))
short.add_net(Net('shortnet'))
short.connect('shortnet', 'A')
short.connect('shortnet', 'Y')
short.set_is_physical(True)
short.set_is_sequential(False)
#########
# ADDER #
#########
adder = Component('adder')
# pins
adder.add_pin(Input('A'))
adder.add_pin(Input('B'))
adder.add_pin(Input('Cin'))
adder.add_pin(Output('S'))
adder.add_pin(Output('Cout'))
# adder.set_is_physical(True)
adder.set_is_sequential(False)
# subcomponents
adder.add_component(xor, 'xor0')
adder.add_component(xor, 'xor1')
adder.add_component(andgate, 'and0')
adder.add_component(andgate, 'and1')
adder.add_component(orgate, 'or')
# nets
adder.add_net(Net('XOROUT'))
adder.add_net(Net('AND0OUT'))
adder.add_net(Net('AND1OUT'))
# connections
adder.connect('A', 'xor0.A')
adder.connect('B', 'xor0.B')
adder.connect('XOROUT', 'xor0.Y')
adder.connect('A', 'and0.A')
adder.connect('B', 'and0.B')
adder.connect('AND0OUT', 'and0.Y')
adder.connect('XOROUT', 'xor1.A')
adder.connect('Cin', 'xor1.B')
adder.connect('S', 'xor1.Y')
adder.connect('XOROUT', 'and1.A')
adder.connect('Cin', 'and1.B')
adder.connect('AND1OUT', 'and1.Y')
adder.connect('AND0OUT', 'or.A')
adder.connect('AND1OUT', 'or.B')
adder.connect('Cout', 'or.Y')
##########
# RIPPLE #
##########
ripple = Component('ripple')
ripple.set_is_sequential(True)
# pins
ripple.add_pinbus(Bus(Input, 'A', BITLENGTH))
ripple.add_pinbus(Bus(Input, 'B', BITLENGTH))
ripple.add_pinbus(Bus(Output, 'S', BITLENGTH))
ripple.add_pin(Output('COUT'))
ripple.add_pin(Inout('GND'))
cnet = 'GND'
for x in range(BITLENGTH):
ripple.add_component(adder, 'adder' + str(x))
ripple.connect(cnet, 'adder' + str(x) + '.Cin')
ripple.connect('A' + str([x]), 'adder' + str(x) + '.A')
ripple.connect('B' + str([x]), 'adder' + str(x) + '.B')
ripple.connect('S' + str([x]), 'adder' + str(x) + '.S')
cnet = 'COUT'
if x < BITLENGTH - 1:
cnet = 'adder' + str(x) + 'out'
ripple.add_net(Net(cnet))
ripple.connect(cnet, 'adder' + str(x) + '.Cout')
##########
# Serial #
##########
serial = Component('serial')
# pins
serial.add_pin(Input('A'))
serial.add_pin(Input('B'))
serial.add_pin(Output('S'))
serial.add_pin(Input('CK'))
# components
serial.add_component(adder, 'adder')
serial.add_component(FF, 'ff')
# nets
serial.add_net(Net('adderout'))
serial.add_net(Net('ffout'))
# connections
serial.connect('A', 'adder.A')
serial.connect('B', 'adder.B')
serial.connect('S', 'adder.S')
serial.connect('CK', 'ff.CK')
serial.connect('adderout', 'adder.Cout')
serial.connect('adderout', 'ff.D')
serial.connect('ffout', 'ff.Q')
serial.connect('ffout', 'adder.Cin')
if not is_metatest:
# f = open('ripple.v', 'w')
for l in ripple.write_verilog():
print(l)
# f.write(l + '\n')
# f.close
# f = open('serial.v', 'w')
for l in serial.write_verilog():
# f.write(l + '\n')
print(l)
# f.close
return True
if __name__ == '__main__':
main() | en | 0.753115 | # Copyright 2021 EnICS Labs, Bar-Ilan University. # Licensed under the Apache License, Version 2.0, see LICENSE for details. # SPDX-License-Identifier: Apache-2.0 This file builds two adders in Salamandra: A ripple adder using a variable amount of full adders in parallel A serial adder using a single full adder and a flip flop Each adder is itself composed of logic gates Some other components, like a short circuit, are also defined here but unused. The purpose of this file was to create an environment with which to test all_connected(), all_fan_in(), & all_fan_out() in, but it may have other uses. # Length of the ripple adder ######### # GATES # ######### ############# # FLIP FLOP # ############# ######### # SHORT # ######### This component isn't used in the adder. It's a simple short cirucit cell, which can be used to test some edge cases with all_connected and all_fan_in/out # pins ######### # ADDER # ######### # pins # adder.set_is_physical(True) # subcomponents # nets # connections ########## # RIPPLE # ########## # pins ########## # Serial # ########## # pins # components # nets # connections # f = open('ripple.v', 'w') # f.write(l + '\n') # f.close # f = open('serial.v', 'w') # f.write(l + '\n') # f.close | 2.654616 | 3 |
jig/cli/interaction.py | levii/jig-py | 6 | 6619211 | <gh_stars>1-10
import dataclasses
from jig.analyzer.domain.dependency.import_dependency import ImportDependencyCollection
from jig.cli.main import _collect_source_codes
from jig.visualizer.module_dependency.domain.model.graph import Graph
from jig.visualizer.module_dependency.domain.model.master_graph import MasterGraph
from jig.visualizer.module_dependency.presentation.controller.graph_controller import (
GraphController,
)
@dataclasses.dataclass
class Jig:
@classmethod
def analyze_module_dependency(cls, project_root_path: str) -> GraphController:
source_codes = _collect_source_codes(project_root_path=project_root_path)
collection = ImportDependencyCollection.build_from_source_code_collection(
source_codes
)
dependencies = collection.build_module_dependencies()
dependency_tuples = []
for dependency in dependencies:
dependency_tuples.append((str(dependency.src), str(dependency.dest)))
master_graph = MasterGraph.from_tuple_list(dependency_tuples)
graph = Graph(master_graph=master_graph)
return GraphController(graph=graph)
| import dataclasses
from jig.analyzer.domain.dependency.import_dependency import ImportDependencyCollection
from jig.cli.main import _collect_source_codes
from jig.visualizer.module_dependency.domain.model.graph import Graph
from jig.visualizer.module_dependency.domain.model.master_graph import MasterGraph
from jig.visualizer.module_dependency.presentation.controller.graph_controller import (
GraphController,
)
@dataclasses.dataclass
class Jig:
@classmethod
def analyze_module_dependency(cls, project_root_path: str) -> GraphController:
source_codes = _collect_source_codes(project_root_path=project_root_path)
collection = ImportDependencyCollection.build_from_source_code_collection(
source_codes
)
dependencies = collection.build_module_dependencies()
dependency_tuples = []
for dependency in dependencies:
dependency_tuples.append((str(dependency.src), str(dependency.dest)))
master_graph = MasterGraph.from_tuple_list(dependency_tuples)
graph = Graph(master_graph=master_graph)
return GraphController(graph=graph) | none | 1 | 2.198472 | 2 | |
Client/Classes/ConfigParser.py | crew/dds-client | 0 | 6619212 | <reponame>crew/dds-client
class ConfigParser:
"""
Configuration Parser Class
@copyright: Northeastern University Crew 2014
"""
@staticmethod
def readConfig():
"""
Reads the contents of PIE.conf
@return: User-specified Configuration
@rtype: Dict
"""
config = open("Configs/PIE.conf", "r")
configContents = config.read()
configDict = {}
for line in configContents.splitlines():
if not (line.startswith("[") or line == ""):
pair = ConfigParser.getPair(line)
configDict[pair[0]] = pair[1]
return configDict
@staticmethod
def getPair(line):
"""
Parses the given configuration file
line into a tuple.
@param line: The line to parse
@type line: String
@return: Tuple of the form (key, value)
@rtype: Tuple
"""
split = line.replace(" ", "").split("=")
if len(split) != 2:
raise Exception("Bad config file...")
if split[1].find("[") != -1:
if split[1] != "[]":
temp = []
for string in split[1][1:-1].split(","):
temp.append(string)
split[1] = temp
else:
split[1] = []
return split[0], split[1]
| class ConfigParser:
"""
Configuration Parser Class
@copyright: Northeastern University Crew 2014
"""
@staticmethod
def readConfig():
"""
Reads the contents of PIE.conf
@return: User-specified Configuration
@rtype: Dict
"""
config = open("Configs/PIE.conf", "r")
configContents = config.read()
configDict = {}
for line in configContents.splitlines():
if not (line.startswith("[") or line == ""):
pair = ConfigParser.getPair(line)
configDict[pair[0]] = pair[1]
return configDict
@staticmethod
def getPair(line):
"""
Parses the given configuration file
line into a tuple.
@param line: The line to parse
@type line: String
@return: Tuple of the form (key, value)
@rtype: Tuple
"""
split = line.replace(" ", "").split("=")
if len(split) != 2:
raise Exception("Bad config file...")
if split[1].find("[") != -1:
if split[1] != "[]":
temp = []
for string in split[1][1:-1].split(","):
temp.append(string)
split[1] = temp
else:
split[1] = []
return split[0], split[1] | en | 0.639606 | Configuration Parser Class @copyright: Northeastern University Crew 2014 Reads the contents of PIE.conf @return: User-specified Configuration @rtype: Dict Parses the given configuration file line into a tuple. @param line: The line to parse @type line: String @return: Tuple of the form (key, value) @rtype: Tuple | 3.375989 | 3 |
pytglib/api/functions/send_inline_query_result_message.py | iTeam-co/pytglib | 6 | 6619213 | <filename>pytglib/api/functions/send_inline_query_result_message.py<gh_stars>1-10
from ..utils import Object
class SendInlineQueryResultMessage(Object):
"""
Sends the result of an inline query as a message. Returns the sent message. Always clears a chat draft message
Attributes:
ID (:obj:`str`): ``SendInlineQueryResultMessage``
Args:
chat_id (:obj:`int`):
Target chat
reply_to_message_id (:obj:`int`):
Identifier of a message to reply to or 0
options (:class:`telegram.api.types.sendMessageOptions`):
Options to be used to send the message
query_id (:obj:`int`):
Identifier of the inline query
result_id (:obj:`str`):
Identifier of the inline result
hide_via_bot (:obj:`bool`):
If true, there will be no mention of a bot, via which the message is sentCan be used only for bots GetOption("animation_search_bot_username"), GetOption("photo_search_bot_username") and GetOption("venue_search_bot_username")
Returns:
Message
Raises:
:class:`telegram.Error`
"""
ID = "sendInlineQueryResultMessage"
def __init__(self, chat_id, reply_to_message_id, options, query_id, result_id, hide_via_bot, extra=None, **kwargs):
self.extra = extra
self.chat_id = chat_id # int
self.reply_to_message_id = reply_to_message_id # int
self.options = options # SendMessageOptions
self.query_id = query_id # int
self.result_id = result_id # str
self.hide_via_bot = hide_via_bot # bool
@staticmethod
def read(q: dict, *args) -> "SendInlineQueryResultMessage":
chat_id = q.get('chat_id')
reply_to_message_id = q.get('reply_to_message_id')
options = Object.read(q.get('options'))
query_id = q.get('query_id')
result_id = q.get('result_id')
hide_via_bot = q.get('hide_via_bot')
return SendInlineQueryResultMessage(chat_id, reply_to_message_id, options, query_id, result_id, hide_via_bot)
| <filename>pytglib/api/functions/send_inline_query_result_message.py<gh_stars>1-10
from ..utils import Object
class SendInlineQueryResultMessage(Object):
"""
Sends the result of an inline query as a message. Returns the sent message. Always clears a chat draft message
Attributes:
ID (:obj:`str`): ``SendInlineQueryResultMessage``
Args:
chat_id (:obj:`int`):
Target chat
reply_to_message_id (:obj:`int`):
Identifier of a message to reply to or 0
options (:class:`telegram.api.types.sendMessageOptions`):
Options to be used to send the message
query_id (:obj:`int`):
Identifier of the inline query
result_id (:obj:`str`):
Identifier of the inline result
hide_via_bot (:obj:`bool`):
If true, there will be no mention of a bot, via which the message is sentCan be used only for bots GetOption("animation_search_bot_username"), GetOption("photo_search_bot_username") and GetOption("venue_search_bot_username")
Returns:
Message
Raises:
:class:`telegram.Error`
"""
ID = "sendInlineQueryResultMessage"
def __init__(self, chat_id, reply_to_message_id, options, query_id, result_id, hide_via_bot, extra=None, **kwargs):
self.extra = extra
self.chat_id = chat_id # int
self.reply_to_message_id = reply_to_message_id # int
self.options = options # SendMessageOptions
self.query_id = query_id # int
self.result_id = result_id # str
self.hide_via_bot = hide_via_bot # bool
@staticmethod
def read(q: dict, *args) -> "SendInlineQueryResultMessage":
chat_id = q.get('chat_id')
reply_to_message_id = q.get('reply_to_message_id')
options = Object.read(q.get('options'))
query_id = q.get('query_id')
result_id = q.get('result_id')
hide_via_bot = q.get('hide_via_bot')
return SendInlineQueryResultMessage(chat_id, reply_to_message_id, options, query_id, result_id, hide_via_bot)
| en | 0.424392 | Sends the result of an inline query as a message. Returns the sent message. Always clears a chat draft message Attributes: ID (:obj:`str`): ``SendInlineQueryResultMessage`` Args: chat_id (:obj:`int`): Target chat reply_to_message_id (:obj:`int`): Identifier of a message to reply to or 0 options (:class:`telegram.api.types.sendMessageOptions`): Options to be used to send the message query_id (:obj:`int`): Identifier of the inline query result_id (:obj:`str`): Identifier of the inline result hide_via_bot (:obj:`bool`): If true, there will be no mention of a bot, via which the message is sentCan be used only for bots GetOption("animation_search_bot_username"), GetOption("photo_search_bot_username") and GetOption("venue_search_bot_username") Returns: Message Raises: :class:`telegram.Error` # int # int # SendMessageOptions # int # str # bool | 2.689134 | 3 |
tools/tensorrt/convert_trt_engine.py | ZhuokunYao/smoke | 0 | 6619214 | import argparse
import os
from PIL import Image
import numpy as np
import csv
import cv2
from tqdm import tqdm
import tensorrt as trt
import pycuda.driver as cuda
import pycuda.autoinit
from torchvision.transforms import functional as F
import torch
from smoke.config import cfg
from smoke.modeling.heads.smoke_head.post_processor import make_smoke_post_processor
from smoke.modeling.heatmap_coder import get_transfrom_matrix
from smoke.structures.params_3d import ParamsList
from tools.utils import compute_box_3d, project_to_image, draw_box_3d
TRT_DATA_TYPE = {
'fp32': trt.DataType.FLOAT,
'fp16': trt.DataType.HALF
}
ID_TYPE_CONVERSION = {
0: 'Car',
1: 'Cyclist',
2: 'Pedestrian',
3: 'Truck',
4: 'Tricycle',
5: 'Bus',
6: 'Cyclist_stopped',
}
CAMERA_TO_ID = {
'front': 0,
'front_left': 1,
'front_right': 2,
'side_left': 3,
'side_right': 4,
}
parser = argparse.ArgumentParser(description='Convert ONNX model to TensorRT file ...')
parser.add_argument('--cfg_path', type=str, help='The path of config file',
default='configs/smoke_jdx_resnet18_640x480.yaml')
parser.add_argument('--onnx_path', type=str, help='The path of ONNX model',
default='path/to/ur/checkpoint.onnx')
parser.add_argument('--engine_path', type=str, help='The path of TensorRT engine',
default='path/to/ur/checkpoint.engine')
parser.add_argument('--dataset_type', type=str, help='Specify a dataset type', default='jdx')
parser.add_argument('--camera_type', type=str, help='Specify the camera view, default is None for kitti and jdx',
default=None)
parser.add_argument('--trt_data_type', type=str, help='Specify a TensorRT precision', default='fp16')
parser.add_argument('--validation_dir', type=str, help='The path of dataset', default='datasets/jdx_test/front/training/')
parser.add_argument('--output_dir', type=str, help='Specify a dir to save results', default='demo/jdx_test_trt/')
args = parser.parse_args()
onnx_path = args.onnx_path
engine_path = args.engine_path
dataset_type = args.dataset_type
validation_dir = args.validation_dir
camera_type = args.camera_type
trt_data_type = TRT_DATA_TYPE[args.trt_data_type]
cfg.merge_from_file(args.cfg_path)
cfg.ENABLE_TENSORRT = True
# TensorRT logger singleton
TRT_LOGGER = trt.Logger(trt.Logger.WARNING)
# Simple helper data class that's a little nicer to use than a 2-tuple.
class HostDeviceMem(object):
def __init__(self, host_mem, device_mem):
self.host = host_mem
self.device = device_mem
def __str__(self):
return 'Host:\n' + str(self.host) + '\nDevice:\n' + str(self.device)
def __repr__(self):
return self.__str__()
# Allocates all buffers required for an engine, i.e. host/device inputs/outputs.
def allocate_buffers(engine):
inputs = []
outputs = []
bindings = []
stream = cuda.Stream()
for binding in engine:
# print(engine.get_binding_name())
size = trt.volume(engine.get_binding_shape(binding)) * engine.max_batch_size
dtype = trt.nptype(engine.get_binding_dtype(binding))
# Allocate host and device buffers
host_mem = cuda.pagelocked_empty(size, dtype)
device_mem = cuda.mem_alloc(host_mem.nbytes)
# Append the device buffer to device bindings.
bindings.append(int(device_mem))
# Append to the appropriate list.
if engine.binding_is_input(binding):
inputs.append(HostDeviceMem(host_mem, device_mem))
else:
outputs.append(HostDeviceMem(host_mem, device_mem))
return inputs, outputs, bindings, stream
def create_trt_engine(onnx_path, engine_path='', data_type=trt.DataType.HALF):
'''If the sereized engine is existed, load and run; else create tensorrt engine and save it.'''
def build_engine(data_type=trt.DataType.HALF):
'''Takes an ONNX file and creates a TensorRT engine to run inference with'''
with trt.Builder(TRT_LOGGER) as builder, \
builder.create_network() as network, \
trt.OnnxParser(network, TRT_LOGGER) as parser:
builder.max_workspace_size = 1 << 30 # 1GB
builder.max_batch_size = 1
if data_type == trt.DataType.HALF and builder.platform_has_fast_fp16:
builder.fp16_mode = True
# pass the onnx file
if not os.path.exists(onnx_path):
print('ONNX file {} not found, please run convert_to_onnx.py first to generate it.'.format(onnx_path))
exit(0)
print('Loading ONNX file from path {}...'.format(onnx_path))
with open(onnx_path, 'rb') as model:
print('Beginning ONNX file parsing')
parser.parse(model.read())
err = parser.get_error(0)
if err is not None:
print('[ERROR] {}'.format(err))
raise IOError('Failed to parse ONNX file')
print('Completed parsing of ONNX file')
print('Building an engine from file {}; this may take a while...'.format(onnx_path))
engine = builder.build_cuda_engine(network)
print('Completed creating Engine')
if engine is None:
print('Can not create Engine')
else:
with open(engine_path, 'wb') as f:
f.write(engine.serialize())
return engine
if os.path.exists(engine_path):
# If you have created the TensorRT engine, plz load and run.
print('Loading engine from file {}'.format(engine_path))
with open(engine_path, 'rb') as f, \
trt.Runtime(TRT_LOGGER) as runtime:
return runtime.deserialize_cuda_engine(f.read())
else:
return build_engine(data_type=data_type)
# This function is generalized for multiple inputs/outputs.
# inputs and outputs are expected to be lists of HostDeviceMem objects.
def do_inference(context, bindings, inputs, outputs, stream, batch_size=1):
# Transfer input data to the GPU.
# [cuda.memcpy_htod_async(inp.device, inp.host, stream) for inp in inputs]
for inp in inputs:
cuda.memcpy_htod_async(inp.device, inp.host, stream)
# Run inference.
context.execute_async(batch_size=batch_size, bindings=bindings, stream_handle=stream.handle)
# Transfer predictions back from the GPU.
[cuda.memcpy_dtoh_async(out.host, out.device, stream) for out in outputs]
# Synchronize the stream
stream.synchronize()
# Return only the host outputs.
return [out.host for out in outputs]
def load_intrinsic_matrix(calib_file, camera_type):
proj_type = 'P2:' if camera_type is None else 'P{}:'.format(CAMERA_TO_ID[camera_type])
with open(os.path.join(calib_file), 'r') as csv_file:
reader = csv.reader(csv_file, delimiter=' ')
for line, row in enumerate(reader):
if row[0] == proj_type:
P = row[1:]
P = [float(i) for i in P]
P = np.array(P, dtype=np.float32).reshape(3, 4)
K = P[:3, :3]
break
return K, P
def draw_3d_box_on_image(img, prediction, P):
image = np.asarray(img)
for p in prediction:
p = p.numpy()
p = p.round(4)
dim = [float(p[6]), float(p[7]), float(p[8])]
location = [float(p[9]), float(p[10]), float(p[11])]
rotation_y = float(p[12])
box_3d = compute_box_3d(dim, location, rotation_y)
box_2d = project_to_image(box_3d, P)
image = draw_box_3d(image, box_2d)
return image
def generate_kitti_3d_detection(prediction, predict_txt):
with open(predict_txt, 'w', newline='') as f:
w = csv.writer(f, delimiter=' ', lineterminator='\n')
if len(prediction) == 0:
w.writerow([])
else:
for p in prediction:
p = p.numpy()
p = p.round(4)
type = ID_TYPE_CONVERSION[int(p[0])]
row = [type, 0, 0] + p[1:].tolist()
w.writerow(row)
def run_demo(engine, output_dir):
output_image_dir = os.path.join(output_dir, 'image')
output_pred_dir = os.path.join(output_dir, 'prediction')
if not os.path.exists(output_image_dir):
os.makedirs(output_image_dir)
if not os.path.exists(output_pred_dir):
os.makedirs(output_pred_dir)
input_width = cfg.INPUT.WIDTH_TEST
input_height = cfg.INPUT.HEIGHT_TEST
output_width, output_height = int(input_width / cfg.MODEL.BACKBONE.DOWN_RATIO), int(
input_height / cfg.MODEL.BACKBONE.DOWN_RATIO)
output_shapes = [(1, cfg.MODEL.SMOKE_HEAD.REGRESSION_HEADS, output_height, output_width),
(1, len(cfg.DATASETS.DETECT_CLASSES), output_height, output_width)]
post_processor = make_smoke_post_processor(cfg)
context = engine.create_execution_context()
# allocate the buffer of the host device
inputs, outputs, bindings, stream = allocate_buffers(engine)
val_list_path = os.path.join(validation_dir, 'ImageSets/val.txt')
images_dir = os.path.join(validation_dir, 'image_2')
calibs_dir = os.path.join(validation_dir, 'calib')
if "waymo720" in dataset_type:
images_dir = os.path.join(validation_dir, 'image_2', camera_type)
calibs_dir = os.path.join(validation_dir, 'calib')
val_list_path = os.path.join(validation_dir, 'ImageSets', 'val_{}.txt'.format(camera_type))
list_file = open(val_list_path, 'r')
for idx, image_name in enumerate(tqdm(list_file.readlines())):
image_name = image_name.strip()
image_path = os.path.join(images_dir, image_name + '.jpg') if os.path.exists(
os.path.join(images_dir, image_name + '.jpg')) else os.path.join(images_dir, image_name + '.png')
calib_path = os.path.join(calibs_dir, image_name + '.txt')
img_cv = cv2.imread(image_path)
image = Image.fromarray(img_cv)
K, P = load_intrinsic_matrix(calib_path, camera_type)
K_src = K.copy()
if cfg.INPUT.TEST_AFFINE_TRANSFORM:
center = np.array([i / 2 for i in image.size], dtype=np.float32)
size = np.array([i for i in image.size], dtype=np.float32)
center_size = [center, size]
trans_affine = get_transfrom_matrix(center_size, [input_width, input_height])
trans_affine_inv = np.linalg.inv(trans_affine)
image = image.transform(
(input_width, input_height),
method=Image.AFFINE,
data=trans_affine_inv.flatten()[:6],
resample=Image.BILINEAR)
else:
# Resize the image and change the instric params
src_width, src_height = image.size
image = image.resize((input_width, input_height), Image.BICUBIC)
K[0] = K[0] * input_width / src_width
K[1] = K[1] * input_height / src_height
center = np.array([i / 2 for i in image.size], dtype=np.float32)
size = np.array([i for i in image.size], dtype=np.float32)
center_size = [center, size]
trans_mat = get_transfrom_matrix(center_size, [output_width, output_height])
target = ParamsList(image_size=[src_width, src_height], is_train=False)
target.add_field('K_src', K_src)
target.add_field('trans_mat', trans_mat)
target.add_field('K', K)
target = [target.to(cfg.MODEL.DEVICE)]
# transform
img = F.to_tensor(image)
img = img[[2, 1, 0]]
img = img * 255.0
img = np.array(img.numpy(), dtype=np.float32, order='C')
inputs[0].host = img
trt_outputs = do_inference(context, bindings=bindings, inputs=inputs, outputs=outputs, stream=stream)
''' 2 postprocess the output of the TensorRT engine'''
# reshape the label prediction and bbox prediction
trt_outputs = [torch.from_numpy(output.reshape(shape)) for output, shape in zip(trt_outputs, output_shapes)]
trt_outputs.reverse()
trt_outputs = [output.to(cfg.MODEL.DEVICE) for output in trt_outputs]
prediction = post_processor.forward(trt_outputs, target)
image = draw_3d_box_on_image(image, prediction.to('cpu'), P)
cv2.imwrite(os.path.join(output_image_dir, image_name + '.jpg'), image)
generate_kitti_3d_detection(prediction.to('cpu'), os.path.join(output_pred_dir, image_name + '.txt'))
if __name__ == '__main__':
'''Create a TensorRT engine for ONNX-based and run inference.'''
engine = create_trt_engine(onnx_path, engine_path, data_type=trt_data_type)
run_demo(engine, args.output_dir)
| import argparse
import os
from PIL import Image
import numpy as np
import csv
import cv2
from tqdm import tqdm
import tensorrt as trt
import pycuda.driver as cuda
import pycuda.autoinit
from torchvision.transforms import functional as F
import torch
from smoke.config import cfg
from smoke.modeling.heads.smoke_head.post_processor import make_smoke_post_processor
from smoke.modeling.heatmap_coder import get_transfrom_matrix
from smoke.structures.params_3d import ParamsList
from tools.utils import compute_box_3d, project_to_image, draw_box_3d
TRT_DATA_TYPE = {
'fp32': trt.DataType.FLOAT,
'fp16': trt.DataType.HALF
}
ID_TYPE_CONVERSION = {
0: 'Car',
1: 'Cyclist',
2: 'Pedestrian',
3: 'Truck',
4: 'Tricycle',
5: 'Bus',
6: 'Cyclist_stopped',
}
CAMERA_TO_ID = {
'front': 0,
'front_left': 1,
'front_right': 2,
'side_left': 3,
'side_right': 4,
}
parser = argparse.ArgumentParser(description='Convert ONNX model to TensorRT file ...')
parser.add_argument('--cfg_path', type=str, help='The path of config file',
default='configs/smoke_jdx_resnet18_640x480.yaml')
parser.add_argument('--onnx_path', type=str, help='The path of ONNX model',
default='path/to/ur/checkpoint.onnx')
parser.add_argument('--engine_path', type=str, help='The path of TensorRT engine',
default='path/to/ur/checkpoint.engine')
parser.add_argument('--dataset_type', type=str, help='Specify a dataset type', default='jdx')
parser.add_argument('--camera_type', type=str, help='Specify the camera view, default is None for kitti and jdx',
default=None)
parser.add_argument('--trt_data_type', type=str, help='Specify a TensorRT precision', default='fp16')
parser.add_argument('--validation_dir', type=str, help='The path of dataset', default='datasets/jdx_test/front/training/')
parser.add_argument('--output_dir', type=str, help='Specify a dir to save results', default='demo/jdx_test_trt/')
args = parser.parse_args()
onnx_path = args.onnx_path
engine_path = args.engine_path
dataset_type = args.dataset_type
validation_dir = args.validation_dir
camera_type = args.camera_type
trt_data_type = TRT_DATA_TYPE[args.trt_data_type]
cfg.merge_from_file(args.cfg_path)
cfg.ENABLE_TENSORRT = True
# TensorRT logger singleton
TRT_LOGGER = trt.Logger(trt.Logger.WARNING)
# Simple helper data class that's a little nicer to use than a 2-tuple.
class HostDeviceMem(object):
def __init__(self, host_mem, device_mem):
self.host = host_mem
self.device = device_mem
def __str__(self):
return 'Host:\n' + str(self.host) + '\nDevice:\n' + str(self.device)
def __repr__(self):
return self.__str__()
# Allocates all buffers required for an engine, i.e. host/device inputs/outputs.
def allocate_buffers(engine):
inputs = []
outputs = []
bindings = []
stream = cuda.Stream()
for binding in engine:
# print(engine.get_binding_name())
size = trt.volume(engine.get_binding_shape(binding)) * engine.max_batch_size
dtype = trt.nptype(engine.get_binding_dtype(binding))
# Allocate host and device buffers
host_mem = cuda.pagelocked_empty(size, dtype)
device_mem = cuda.mem_alloc(host_mem.nbytes)
# Append the device buffer to device bindings.
bindings.append(int(device_mem))
# Append to the appropriate list.
if engine.binding_is_input(binding):
inputs.append(HostDeviceMem(host_mem, device_mem))
else:
outputs.append(HostDeviceMem(host_mem, device_mem))
return inputs, outputs, bindings, stream
def create_trt_engine(onnx_path, engine_path='', data_type=trt.DataType.HALF):
'''If the sereized engine is existed, load and run; else create tensorrt engine and save it.'''
def build_engine(data_type=trt.DataType.HALF):
'''Takes an ONNX file and creates a TensorRT engine to run inference with'''
with trt.Builder(TRT_LOGGER) as builder, \
builder.create_network() as network, \
trt.OnnxParser(network, TRT_LOGGER) as parser:
builder.max_workspace_size = 1 << 30 # 1GB
builder.max_batch_size = 1
if data_type == trt.DataType.HALF and builder.platform_has_fast_fp16:
builder.fp16_mode = True
# pass the onnx file
if not os.path.exists(onnx_path):
print('ONNX file {} not found, please run convert_to_onnx.py first to generate it.'.format(onnx_path))
exit(0)
print('Loading ONNX file from path {}...'.format(onnx_path))
with open(onnx_path, 'rb') as model:
print('Beginning ONNX file parsing')
parser.parse(model.read())
err = parser.get_error(0)
if err is not None:
print('[ERROR] {}'.format(err))
raise IOError('Failed to parse ONNX file')
print('Completed parsing of ONNX file')
print('Building an engine from file {}; this may take a while...'.format(onnx_path))
engine = builder.build_cuda_engine(network)
print('Completed creating Engine')
if engine is None:
print('Can not create Engine')
else:
with open(engine_path, 'wb') as f:
f.write(engine.serialize())
return engine
if os.path.exists(engine_path):
# If you have created the TensorRT engine, plz load and run.
print('Loading engine from file {}'.format(engine_path))
with open(engine_path, 'rb') as f, \
trt.Runtime(TRT_LOGGER) as runtime:
return runtime.deserialize_cuda_engine(f.read())
else:
return build_engine(data_type=data_type)
# This function is generalized for multiple inputs/outputs.
# inputs and outputs are expected to be lists of HostDeviceMem objects.
def do_inference(context, bindings, inputs, outputs, stream, batch_size=1):
# Transfer input data to the GPU.
# [cuda.memcpy_htod_async(inp.device, inp.host, stream) for inp in inputs]
for inp in inputs:
cuda.memcpy_htod_async(inp.device, inp.host, stream)
# Run inference.
context.execute_async(batch_size=batch_size, bindings=bindings, stream_handle=stream.handle)
# Transfer predictions back from the GPU.
[cuda.memcpy_dtoh_async(out.host, out.device, stream) for out in outputs]
# Synchronize the stream
stream.synchronize()
# Return only the host outputs.
return [out.host for out in outputs]
def load_intrinsic_matrix(calib_file, camera_type):
proj_type = 'P2:' if camera_type is None else 'P{}:'.format(CAMERA_TO_ID[camera_type])
with open(os.path.join(calib_file), 'r') as csv_file:
reader = csv.reader(csv_file, delimiter=' ')
for line, row in enumerate(reader):
if row[0] == proj_type:
P = row[1:]
P = [float(i) for i in P]
P = np.array(P, dtype=np.float32).reshape(3, 4)
K = P[:3, :3]
break
return K, P
def draw_3d_box_on_image(img, prediction, P):
image = np.asarray(img)
for p in prediction:
p = p.numpy()
p = p.round(4)
dim = [float(p[6]), float(p[7]), float(p[8])]
location = [float(p[9]), float(p[10]), float(p[11])]
rotation_y = float(p[12])
box_3d = compute_box_3d(dim, location, rotation_y)
box_2d = project_to_image(box_3d, P)
image = draw_box_3d(image, box_2d)
return image
def generate_kitti_3d_detection(prediction, predict_txt):
with open(predict_txt, 'w', newline='') as f:
w = csv.writer(f, delimiter=' ', lineterminator='\n')
if len(prediction) == 0:
w.writerow([])
else:
for p in prediction:
p = p.numpy()
p = p.round(4)
type = ID_TYPE_CONVERSION[int(p[0])]
row = [type, 0, 0] + p[1:].tolist()
w.writerow(row)
def run_demo(engine, output_dir):
output_image_dir = os.path.join(output_dir, 'image')
output_pred_dir = os.path.join(output_dir, 'prediction')
if not os.path.exists(output_image_dir):
os.makedirs(output_image_dir)
if not os.path.exists(output_pred_dir):
os.makedirs(output_pred_dir)
input_width = cfg.INPUT.WIDTH_TEST
input_height = cfg.INPUT.HEIGHT_TEST
output_width, output_height = int(input_width / cfg.MODEL.BACKBONE.DOWN_RATIO), int(
input_height / cfg.MODEL.BACKBONE.DOWN_RATIO)
output_shapes = [(1, cfg.MODEL.SMOKE_HEAD.REGRESSION_HEADS, output_height, output_width),
(1, len(cfg.DATASETS.DETECT_CLASSES), output_height, output_width)]
post_processor = make_smoke_post_processor(cfg)
context = engine.create_execution_context()
# allocate the buffer of the host device
inputs, outputs, bindings, stream = allocate_buffers(engine)
val_list_path = os.path.join(validation_dir, 'ImageSets/val.txt')
images_dir = os.path.join(validation_dir, 'image_2')
calibs_dir = os.path.join(validation_dir, 'calib')
if "waymo720" in dataset_type:
images_dir = os.path.join(validation_dir, 'image_2', camera_type)
calibs_dir = os.path.join(validation_dir, 'calib')
val_list_path = os.path.join(validation_dir, 'ImageSets', 'val_{}.txt'.format(camera_type))
list_file = open(val_list_path, 'r')
for idx, image_name in enumerate(tqdm(list_file.readlines())):
image_name = image_name.strip()
image_path = os.path.join(images_dir, image_name + '.jpg') if os.path.exists(
os.path.join(images_dir, image_name + '.jpg')) else os.path.join(images_dir, image_name + '.png')
calib_path = os.path.join(calibs_dir, image_name + '.txt')
img_cv = cv2.imread(image_path)
image = Image.fromarray(img_cv)
K, P = load_intrinsic_matrix(calib_path, camera_type)
K_src = K.copy()
if cfg.INPUT.TEST_AFFINE_TRANSFORM:
center = np.array([i / 2 for i in image.size], dtype=np.float32)
size = np.array([i for i in image.size], dtype=np.float32)
center_size = [center, size]
trans_affine = get_transfrom_matrix(center_size, [input_width, input_height])
trans_affine_inv = np.linalg.inv(trans_affine)
image = image.transform(
(input_width, input_height),
method=Image.AFFINE,
data=trans_affine_inv.flatten()[:6],
resample=Image.BILINEAR)
else:
# Resize the image and change the instric params
src_width, src_height = image.size
image = image.resize((input_width, input_height), Image.BICUBIC)
K[0] = K[0] * input_width / src_width
K[1] = K[1] * input_height / src_height
center = np.array([i / 2 for i in image.size], dtype=np.float32)
size = np.array([i for i in image.size], dtype=np.float32)
center_size = [center, size]
trans_mat = get_transfrom_matrix(center_size, [output_width, output_height])
target = ParamsList(image_size=[src_width, src_height], is_train=False)
target.add_field('K_src', K_src)
target.add_field('trans_mat', trans_mat)
target.add_field('K', K)
target = [target.to(cfg.MODEL.DEVICE)]
# transform
img = F.to_tensor(image)
img = img[[2, 1, 0]]
img = img * 255.0
img = np.array(img.numpy(), dtype=np.float32, order='C')
inputs[0].host = img
trt_outputs = do_inference(context, bindings=bindings, inputs=inputs, outputs=outputs, stream=stream)
''' 2 postprocess the output of the TensorRT engine'''
# reshape the label prediction and bbox prediction
trt_outputs = [torch.from_numpy(output.reshape(shape)) for output, shape in zip(trt_outputs, output_shapes)]
trt_outputs.reverse()
trt_outputs = [output.to(cfg.MODEL.DEVICE) for output in trt_outputs]
prediction = post_processor.forward(trt_outputs, target)
image = draw_3d_box_on_image(image, prediction.to('cpu'), P)
cv2.imwrite(os.path.join(output_image_dir, image_name + '.jpg'), image)
generate_kitti_3d_detection(prediction.to('cpu'), os.path.join(output_pred_dir, image_name + '.txt'))
if __name__ == '__main__':
'''Create a TensorRT engine for ONNX-based and run inference.'''
engine = create_trt_engine(onnx_path, engine_path, data_type=trt_data_type)
run_demo(engine, args.output_dir)
| en | 0.808003 | # TensorRT logger singleton # Simple helper data class that's a little nicer to use than a 2-tuple. # Allocates all buffers required for an engine, i.e. host/device inputs/outputs. # print(engine.get_binding_name()) # Allocate host and device buffers # Append the device buffer to device bindings. # Append to the appropriate list. If the sereized engine is existed, load and run; else create tensorrt engine and save it. Takes an ONNX file and creates a TensorRT engine to run inference with # 1GB # pass the onnx file # If you have created the TensorRT engine, plz load and run. # This function is generalized for multiple inputs/outputs. # inputs and outputs are expected to be lists of HostDeviceMem objects. # Transfer input data to the GPU. # [cuda.memcpy_htod_async(inp.device, inp.host, stream) for inp in inputs] # Run inference. # Transfer predictions back from the GPU. # Synchronize the stream # Return only the host outputs. # allocate the buffer of the host device # Resize the image and change the instric params # transform 2 postprocess the output of the TensorRT engine # reshape the label prediction and bbox prediction Create a TensorRT engine for ONNX-based and run inference. | 1.86675 | 2 |
coroutine/try_aio_2nd.py | lukasdean/robust_python | 0 | 6619215 | #!/user/bin/env python
# -*-coding:utf-8 -*-
# @CreateTime : 2022/1/25 9:19
# @Author : xujiahui
# @Project : robust_python
# @File : try_aio_2nd.py
# @Version : V0.0.1
# @Desc : io密集型
import time
import asyncio
import concurrent.futures
# 声明一个阻塞型任务
def blocked_task():
for i in range(10):
# 为了简化代码逻辑,便于更清晰地认识混合执行阻塞与非阻塞(异步)代码,
# 使用time.sleep函数来模拟阻塞型IO逻辑的执行效果
time.sleep(1)
print(f"[{time.strftime('%X')}] Blocked task {i}")
# 声明一个异步任务
async def async_task():
for i in range(2):
await asyncio.sleep(5)
print(f"[{time.strftime('%X')}] Async task {i}")
async def main():
# 创建一个线程池执行器,该执行器所允许的最大线程数是5
executor = concurrent.futures.ThreadPoolExecutor(max_workers = 5)
# 获取当前正在运行的事件循环对象
current_running_loop = asyncio.get_running_loop()
# 并发执行一个阻塞型任务和一个异步任务
await asyncio.gather(
# 通过函数 run_in_executor 可以让指定的函数运行在特定的执行器(Executor)中,
# 例如线程池执行器(concurrent.futures.ThreadPoolExecutor) 或者
# 进程执行器(concurrent.futures.ProcessPoolExecutor)
current_running_loop.run_in_executor(executor=executor, func=blocked_task),
async_task()
)
if __name__ == "__main__":
asyncio.run(main())
| #!/user/bin/env python
# -*-coding:utf-8 -*-
# @CreateTime : 2022/1/25 9:19
# @Author : xujiahui
# @Project : robust_python
# @File : try_aio_2nd.py
# @Version : V0.0.1
# @Desc : io密集型
import time
import asyncio
import concurrent.futures
# 声明一个阻塞型任务
def blocked_task():
for i in range(10):
# 为了简化代码逻辑,便于更清晰地认识混合执行阻塞与非阻塞(异步)代码,
# 使用time.sleep函数来模拟阻塞型IO逻辑的执行效果
time.sleep(1)
print(f"[{time.strftime('%X')}] Blocked task {i}")
# 声明一个异步任务
async def async_task():
for i in range(2):
await asyncio.sleep(5)
print(f"[{time.strftime('%X')}] Async task {i}")
async def main():
# 创建一个线程池执行器,该执行器所允许的最大线程数是5
executor = concurrent.futures.ThreadPoolExecutor(max_workers = 5)
# 获取当前正在运行的事件循环对象
current_running_loop = asyncio.get_running_loop()
# 并发执行一个阻塞型任务和一个异步任务
await asyncio.gather(
# 通过函数 run_in_executor 可以让指定的函数运行在特定的执行器(Executor)中,
# 例如线程池执行器(concurrent.futures.ThreadPoolExecutor) 或者
# 进程执行器(concurrent.futures.ProcessPoolExecutor)
current_running_loop.run_in_executor(executor=executor, func=blocked_task),
async_task()
)
if __name__ == "__main__":
asyncio.run(main())
| zh | 0.818234 | #!/user/bin/env python # -*-coding:utf-8 -*- # @CreateTime : 2022/1/25 9:19 # @Author : xujiahui # @Project : robust_python # @File : try_aio_2nd.py # @Version : V0.0.1 # @Desc : io密集型 # 声明一个阻塞型任务 # 为了简化代码逻辑,便于更清晰地认识混合执行阻塞与非阻塞(异步)代码, # 使用time.sleep函数来模拟阻塞型IO逻辑的执行效果 # 声明一个异步任务 # 创建一个线程池执行器,该执行器所允许的最大线程数是5 # 获取当前正在运行的事件循环对象 # 并发执行一个阻塞型任务和一个异步任务 # 通过函数 run_in_executor 可以让指定的函数运行在特定的执行器(Executor)中, # 例如线程池执行器(concurrent.futures.ThreadPoolExecutor) 或者 # 进程执行器(concurrent.futures.ProcessPoolExecutor) | 3.18429 | 3 |
BioSIMI-Python/IFFL_model_reduce.py | murrayrm/txtlsim-python | 0 | 6619216 | <filename>BioSIMI-Python/IFFL_model_reduce.py
from modules.System import *
from modules.Subsystem import *
cell = System('cell')
IFFL = cell.createSubsystem('models/IFFL.xml','1')
IFFL.setFastReactions(1)
writeSBML(IFFL.getSubsystemDoc(),'models/IFFLfast.xml')
timepointsFast = np.linspace(0,10000,10)
IFFLreduced = IFFL.modelReduce(timepointsFast)
writeSBML(IFFLreduced.getSubsystemDoc(),'models/IFFLreduced.xml')
timepoints = np.linspace(0,10,1000)
plotSbmlWithBioscrape(['models/IFFLfast.xml','models/IFFLreduced.xml'],0,timepoints,[['inp_IFFL','out_IFFL'],['inp_IFFL','out_IFFL']])
| <filename>BioSIMI-Python/IFFL_model_reduce.py
from modules.System import *
from modules.Subsystem import *
cell = System('cell')
IFFL = cell.createSubsystem('models/IFFL.xml','1')
IFFL.setFastReactions(1)
writeSBML(IFFL.getSubsystemDoc(),'models/IFFLfast.xml')
timepointsFast = np.linspace(0,10000,10)
IFFLreduced = IFFL.modelReduce(timepointsFast)
writeSBML(IFFLreduced.getSubsystemDoc(),'models/IFFLreduced.xml')
timepoints = np.linspace(0,10,1000)
plotSbmlWithBioscrape(['models/IFFLfast.xml','models/IFFLreduced.xml'],0,timepoints,[['inp_IFFL','out_IFFL'],['inp_IFFL','out_IFFL']])
| none | 1 | 1.987538 | 2 | |
1-1 Input.py | mrczl/Python-study | 0 | 6619217 | <reponame>mrczl/Python-study<gh_stars>0
# 1-1 input
# input(prompt=None, /)
# Read a string from standard input.
print('----------1-1 Input---------')
name = input("Please enter your name:->")
age = input ("Please enter your age:->")
print("Hi!,My name is "+name+",and I'm "+age+",Nice to meet you! \
Welcome to join our club.")
| # 1-1 input
# input(prompt=None, /)
# Read a string from standard input.
print('----------1-1 Input---------')
name = input("Please enter your name:->")
age = input ("Please enter your age:->")
print("Hi!,My name is "+name+",and I'm "+age+",Nice to meet you! \
Welcome to join our club.") | en | 0.205153 | # 1-1 input # input(prompt=None, /) # Read a string from standard input. | 4.143054 | 4 |
health_facilities/admin.py | moshthepitt/afya360 | 1 | 6619218 | from django.contrib import admin
from .models import HealthFacility, FacilityOwner, FacilityType
class HealthFacilityAdmin(admin.ModelAdmin):
search_fields = ['name', 'facility_code']
list_filter = ['level', 'facility_class', 'facility_type', 'owner', 'province', 'county', 'status']
list_display = ['name', 'facility_code']
class FacilityOwnerAdmin(admin.ModelAdmin):
search_fields = ['name']
class FacilityTypeAdmin(admin.ModelAdmin):
search_fields = ['name']
admin.site.register(HealthFacility, HealthFacilityAdmin)
admin.site.register(FacilityOwner, FacilityOwnerAdmin)
admin.site.register(FacilityType, FacilityTypeAdmin)
| from django.contrib import admin
from .models import HealthFacility, FacilityOwner, FacilityType
class HealthFacilityAdmin(admin.ModelAdmin):
search_fields = ['name', 'facility_code']
list_filter = ['level', 'facility_class', 'facility_type', 'owner', 'province', 'county', 'status']
list_display = ['name', 'facility_code']
class FacilityOwnerAdmin(admin.ModelAdmin):
search_fields = ['name']
class FacilityTypeAdmin(admin.ModelAdmin):
search_fields = ['name']
admin.site.register(HealthFacility, HealthFacilityAdmin)
admin.site.register(FacilityOwner, FacilityOwnerAdmin)
admin.site.register(FacilityType, FacilityTypeAdmin)
| none | 1 | 1.703599 | 2 | |
uwnet/jacobian.py | sarenehan/uwnet | 1 | 6619219 | <reponame>sarenehan/uwnet<gh_stars>1-10
import torch
from torch.autograd import grad
def jacobian_backward(y, x):
"""Back-propagates the Frobenious norm of the jacobian"""
n = len(y)
out = 0.0
for i in range(n):
y_x = grad(y[i], x, create_graph=True)[0]
y_x2 = y_x.norm()**2 / 2
y_x2.backward(retain_graph=True)
out += y_x2.item()
return out
def jacobian_norm(y, x):
n = len(y)
out = 0.0
for i in range(n):
y_x = grad(y[i], x, create_graph=True)[0]
out += y_x.norm()**2 / 2
return out
def jacobian(y, x):
n = len(y)
jac = []
for i in range(n):
y_x = grad(y[i], x, create_graph=True)[0]
jac.append(y_x)
return torch.stack(jac)
def max_eig_val(A, niter=10, m=1):
"""
Parameters
----------
A : matrix
niter : number of iterations of power method
m :
number of iterations to keep gradients from end to keep gradients for
"""
n = A.size(0)
x = torch.rand(n)
for i in range(niter):
if i < niter - m:
x = x.detach()
y = A.matmul(x)
norm = x.norm()
lam = y.dot(x) / norm / norm
x = y / lam / norm
return lam, x
def max_signed_eigvals(A, niter=100, m=1):
# find maximum norm eigvalue
lam, _ = max_eig_val(A, niter=niter, m=m)
# if it is negative shift the matrix
h = - 1 / lam * .9
I = torch.eye(A.size(0))
B = I + h * A
lam_plus, _ = max_eig_val(B, niter=niter, m=m)
lam_orig = (lam_plus - 1) / h
if lam.item() < lam_orig.item():
lam, lam_orig = lam_orig, lam
return lam, lam_orig
def dict_jacobian(y, d, progs=['QT', 'SLI']):
for key in d:
try:
d[key].requires_grad = True
except RuntimeError:
pass
jac = {}
for inkey in progs:
for outkey in progs:
try:
jac.setdefault(inkey, {})[outkey] = jacobian(
y[inkey], d[outkey]).squeeze()
except KeyError:
pass
return jac
def jacobian_from_model(model, d, **kwargs):
y = model(d)
return dict_jacobian(y, d, **kwargs)
| import torch
from torch.autograd import grad
def jacobian_backward(y, x):
"""Back-propagates the Frobenious norm of the jacobian"""
n = len(y)
out = 0.0
for i in range(n):
y_x = grad(y[i], x, create_graph=True)[0]
y_x2 = y_x.norm()**2 / 2
y_x2.backward(retain_graph=True)
out += y_x2.item()
return out
def jacobian_norm(y, x):
n = len(y)
out = 0.0
for i in range(n):
y_x = grad(y[i], x, create_graph=True)[0]
out += y_x.norm()**2 / 2
return out
def jacobian(y, x):
n = len(y)
jac = []
for i in range(n):
y_x = grad(y[i], x, create_graph=True)[0]
jac.append(y_x)
return torch.stack(jac)
def max_eig_val(A, niter=10, m=1):
"""
Parameters
----------
A : matrix
niter : number of iterations of power method
m :
number of iterations to keep gradients from end to keep gradients for
"""
n = A.size(0)
x = torch.rand(n)
for i in range(niter):
if i < niter - m:
x = x.detach()
y = A.matmul(x)
norm = x.norm()
lam = y.dot(x) / norm / norm
x = y / lam / norm
return lam, x
def max_signed_eigvals(A, niter=100, m=1):
# find maximum norm eigvalue
lam, _ = max_eig_val(A, niter=niter, m=m)
# if it is negative shift the matrix
h = - 1 / lam * .9
I = torch.eye(A.size(0))
B = I + h * A
lam_plus, _ = max_eig_val(B, niter=niter, m=m)
lam_orig = (lam_plus - 1) / h
if lam.item() < lam_orig.item():
lam, lam_orig = lam_orig, lam
return lam, lam_orig
def dict_jacobian(y, d, progs=['QT', 'SLI']):
for key in d:
try:
d[key].requires_grad = True
except RuntimeError:
pass
jac = {}
for inkey in progs:
for outkey in progs:
try:
jac.setdefault(inkey, {})[outkey] = jacobian(
y[inkey], d[outkey]).squeeze()
except KeyError:
pass
return jac
def jacobian_from_model(model, d, **kwargs):
y = model(d)
return dict_jacobian(y, d, **kwargs) | en | 0.692178 | Back-propagates the Frobenious norm of the jacobian Parameters ---------- A : matrix niter : number of iterations of power method m : number of iterations to keep gradients from end to keep gradients for # find maximum norm eigvalue # if it is negative shift the matrix | 2.792736 | 3 |
linkedintest.py | mvonhe/twittergraph | 1 | 6619220 | <reponame>mvonhe/twittergraph
from linkedin import linkedin
#from linkedin.linkedin import NETWORK_UPDATES
#API_KEY = '<KEY>'
API_KEY = '78inod5y0pnmaf'
#API_SECRET = '<KEY>'
API_SECRET = 'yg2g4fMwES3R3HOn'
RETURN_URL = 'http://ligraph.mybluemix.net'
authentication = linkedin.LinkedInAuthentication(API_KEY, API_SECRET, RETURN_URL, linkedin.PERMISSIONS.enums.values())
print authentication.authorization_url
application = linkedin.LinkedInApplication(authentication)
#conns = application.get_connections()
application.get_connections(selectors=['headline', 'first-name', 'last-name'], params={'start':10, 'count':5})
#print conns[:10] | from linkedin import linkedin
#from linkedin.linkedin import NETWORK_UPDATES
#API_KEY = '<KEY>'
API_KEY = '78inod5y0pnmaf'
#API_SECRET = '<KEY>'
API_SECRET = 'yg2g4fMwES3R3HOn'
RETURN_URL = 'http://ligraph.mybluemix.net'
authentication = linkedin.LinkedInAuthentication(API_KEY, API_SECRET, RETURN_URL, linkedin.PERMISSIONS.enums.values())
print authentication.authorization_url
application = linkedin.LinkedInApplication(authentication)
#conns = application.get_connections()
application.get_connections(selectors=['headline', 'first-name', 'last-name'], params={'start':10, 'count':5})
#print conns[:10] | en | 0.232073 | #from linkedin.linkedin import NETWORK_UPDATES #API_KEY = '<KEY>' #API_SECRET = '<KEY>' #conns = application.get_connections() #print conns[:10] | 2.436862 | 2 |
plugins/usd/maya/publish/extract_pointcache_export.py | davidlatwe/reveries-config | 3 | 6619221 | <reponame>davidlatwe/reveries-config<gh_stars>1-10
import os
import pyblish.api
class ExtractPointCacheUSDExport(pyblish.api.InstancePlugin):
"""Publish parent pointcache usd file.
"""
order = pyblish.api.ExtractorOrder + 0.4811
hosts = ["maya"]
label = "Extract PointCache (main usd)"
families = [
"reveries.pointcache.usd",
]
def process(self, instance):
from reveries import utils
from reveries.common import get_frame_range
from reveries.common.build_delay_run import DelayRunBuilder
if instance.data.get("isDummy"):
return
out_cache = instance.data.get("outCache")
start_frame = instance.data.get("startFrame")
end_frame = instance.data.get("endFrame")
if not out_cache:
self.log.warning("No output geometry found in your scene.")
return
if not start_frame or not end_frame:
shot_name = instance.data["asset"]
start_frame, end_frame = get_frame_range.get(shot_name)
instance.data["startFrame"] = start_frame
instance.data["endFrame"] = end_frame
self.frame_range = [start_frame, end_frame]
staging_dir = utils.stage_dir(dir=instance.data["_sharedStage"])
file_info = {
'authored_data': 'authored_data.usd',
'source': 'source.usd',
'main': 'pointcache_prim.usda'
}
instance.data['file_info'] = file_info
# Update information in instance data
instance.data["repr.USD._stage"] = staging_dir
instance.data["repr.USD._files"] = [
file_info['authored_data'], # authored_data.usda
file_info['source'], # source.usd
file_info['main'] # pointcache_prim.usda
]
instance.data["repr.USD.entryFileName"] = file_info['main']
instance.data["_preflighted"] = True
# Create delay running
delay_builder = DelayRunBuilder(instance)
instance.data["repr.USD._delayRun"] = {
"func": self._export_usd,
"args": [
delay_builder.instance_data, delay_builder.context_data
],
"order": 10
}
instance.data["deadline_dependency"] = self.get_deadline_dependency(instance)
def get_deadline_dependency(self, instance):
context = instance.context
child_instances = []
for _instance in context:
if _instance.data["family"] == "reveries.pointcache.child.usd":
if str(_instance.data.get("parent_pointcache_name", "")) == \
str(instance.data["subset"]):
child_instances.append(_instance)
return child_instances
def _export_usd(self, instance_data, context_data):
from reveries.maya.usd import pointcache_export
staging_dir = instance_data.get("repr.USD._stage")
file_info = instance_data.get("file_info")
# === Export Pointcache USD === #
exporter = pointcache_export.PointCacheExporter(
output_dir=staging_dir,
export_node=instance_data.get("export_node"),
root_usd_path=instance_data.get("root_usd_path"),
frame_range=[
instance_data.get("startFrame"), instance_data.get("endFrame")],
asset_name=instance_data.get("asset_name"),
out_cache=instance_data.get("outCache"),
file_info=file_info,
look_variant=instance_data.get("look_variant", "")
)
exporter.export_usd()
# === Generate parent USD === #
self.parent_usd_file = "parent_pointcache_prim.usda"
parent_result = self._generate_parent_usd(instance_data, staging_dir, file_info)
if parent_result:
instance_data["repr.USD._files"].append(self.parent_usd_file)
self._publish_instance(instance_data, context_data)
def _generate_parent_usd(self, instance_data, staging_dir, file_info):
from reveries.maya.usd import parent_pointcache_export
shot_name = instance_data["asset"]
subset_name = instance_data["subset"]
# Export main usd file
exporter = parent_pointcache_export.ParentPointcacheExporter(
shot_name,
subset_name, # parent subset name
frame_range=[
instance_data.get("startFrame"), instance_data.get("endFrame")]
)
if exporter.get_children_data():
exporter.export(staging_dir)
final_main_usd_path = exporter.output_path
if os.path.exists(final_main_usd_path):
# === Generate main usd === #
main_usd_path = os.path.join(
staging_dir, file_info['main']).replace('\\', '/')
pre_main_path = os.path.join(
staging_dir, self.parent_usd_file).replace('\\', '/')
# Rename pre_main usd file
os.rename(main_usd_path, pre_main_path)
# Rename main usd file
os.rename(final_main_usd_path, main_usd_path)
return True
return False
def _publish_instance(self, instance_data, context_data):
# === Publish instance === #
from reveries.common.publish import publish_instance
publish_instance.run(instance_data, context=context_data)
| import os
import pyblish.api
class ExtractPointCacheUSDExport(pyblish.api.InstancePlugin):
"""Publish parent pointcache usd file.
"""
order = pyblish.api.ExtractorOrder + 0.4811
hosts = ["maya"]
label = "Extract PointCache (main usd)"
families = [
"reveries.pointcache.usd",
]
def process(self, instance):
from reveries import utils
from reveries.common import get_frame_range
from reveries.common.build_delay_run import DelayRunBuilder
if instance.data.get("isDummy"):
return
out_cache = instance.data.get("outCache")
start_frame = instance.data.get("startFrame")
end_frame = instance.data.get("endFrame")
if not out_cache:
self.log.warning("No output geometry found in your scene.")
return
if not start_frame or not end_frame:
shot_name = instance.data["asset"]
start_frame, end_frame = get_frame_range.get(shot_name)
instance.data["startFrame"] = start_frame
instance.data["endFrame"] = end_frame
self.frame_range = [start_frame, end_frame]
staging_dir = utils.stage_dir(dir=instance.data["_sharedStage"])
file_info = {
'authored_data': 'authored_data.usd',
'source': 'source.usd',
'main': 'pointcache_prim.usda'
}
instance.data['file_info'] = file_info
# Update information in instance data
instance.data["repr.USD._stage"] = staging_dir
instance.data["repr.USD._files"] = [
file_info['authored_data'], # authored_data.usda
file_info['source'], # source.usd
file_info['main'] # pointcache_prim.usda
]
instance.data["repr.USD.entryFileName"] = file_info['main']
instance.data["_preflighted"] = True
# Create delay running
delay_builder = DelayRunBuilder(instance)
instance.data["repr.USD._delayRun"] = {
"func": self._export_usd,
"args": [
delay_builder.instance_data, delay_builder.context_data
],
"order": 10
}
instance.data["deadline_dependency"] = self.get_deadline_dependency(instance)
def get_deadline_dependency(self, instance):
context = instance.context
child_instances = []
for _instance in context:
if _instance.data["family"] == "reveries.pointcache.child.usd":
if str(_instance.data.get("parent_pointcache_name", "")) == \
str(instance.data["subset"]):
child_instances.append(_instance)
return child_instances
def _export_usd(self, instance_data, context_data):
from reveries.maya.usd import pointcache_export
staging_dir = instance_data.get("repr.USD._stage")
file_info = instance_data.get("file_info")
# === Export Pointcache USD === #
exporter = pointcache_export.PointCacheExporter(
output_dir=staging_dir,
export_node=instance_data.get("export_node"),
root_usd_path=instance_data.get("root_usd_path"),
frame_range=[
instance_data.get("startFrame"), instance_data.get("endFrame")],
asset_name=instance_data.get("asset_name"),
out_cache=instance_data.get("outCache"),
file_info=file_info,
look_variant=instance_data.get("look_variant", "")
)
exporter.export_usd()
# === Generate parent USD === #
self.parent_usd_file = "parent_pointcache_prim.usda"
parent_result = self._generate_parent_usd(instance_data, staging_dir, file_info)
if parent_result:
instance_data["repr.USD._files"].append(self.parent_usd_file)
self._publish_instance(instance_data, context_data)
def _generate_parent_usd(self, instance_data, staging_dir, file_info):
from reveries.maya.usd import parent_pointcache_export
shot_name = instance_data["asset"]
subset_name = instance_data["subset"]
# Export main usd file
exporter = parent_pointcache_export.ParentPointcacheExporter(
shot_name,
subset_name, # parent subset name
frame_range=[
instance_data.get("startFrame"), instance_data.get("endFrame")]
)
if exporter.get_children_data():
exporter.export(staging_dir)
final_main_usd_path = exporter.output_path
if os.path.exists(final_main_usd_path):
# === Generate main usd === #
main_usd_path = os.path.join(
staging_dir, file_info['main']).replace('\\', '/')
pre_main_path = os.path.join(
staging_dir, self.parent_usd_file).replace('\\', '/')
# Rename pre_main usd file
os.rename(main_usd_path, pre_main_path)
# Rename main usd file
os.rename(final_main_usd_path, main_usd_path)
return True
return False
def _publish_instance(self, instance_data, context_data):
# === Publish instance === #
from reveries.common.publish import publish_instance
publish_instance.run(instance_data, context=context_data) | en | 0.580051 | Publish parent pointcache usd file. # Update information in instance data # authored_data.usda # source.usd # pointcache_prim.usda # Create delay running # === Export Pointcache USD === # # === Generate parent USD === # # Export main usd file # parent subset name # === Generate main usd === # # Rename pre_main usd file # Rename main usd file # === Publish instance === # | 2.237549 | 2 |
pool/forms.py | casidos/pool | 0 | 6619222 | from django import forms
# from PIL import Image
from django.core.files.uploadedfile import SimpleUploadedFile
from django.contrib.auth.forms import UserCreationForm, UserChangeForm
from .models import CustomUser, Talk, Game, Team, City, Alert, PayerAudit, PickType, Pick, Talk, Winner, Week
from .validators import pool_username_validator
class CustomUserCreationForm(UserCreationForm):
username = forms.CharField(max_length=150, validators=[pool_username_validator], help_text='Names and numbers only')
class Meta:
model = CustomUser
fields = ('username', 'email', 'mobile', 'first_name', 'last_name', 'image', 'favorite_team', 'timezone')
class CustomUserChangeForm(UserChangeForm):
class Meta:
model = CustomUser
fields = ('username', 'email', 'mobile')
class EditUserForm(forms.ModelForm):
username = forms.CharField(widget=forms.TextInput(attrs={'class' : 'form-control', 'placeholder' : 'Username'}))
first_name = forms.CharField(widget=forms.TextInput(attrs={'class' : 'form-control', 'placeholder' : 'First'}))
last_name = forms.CharField(widget=forms.TextInput(attrs={'class' : 'form-control', 'placeholder' : 'Last'}))
email = forms.EmailField(widget=forms.TextInput(attrs={'class' : 'form-control', 'placeholder' : 'Email'}))
mobile = forms.CharField(widget=forms.TextInput(attrs={'class' : 'form-control', 'placeholder' : 'Numbers Only'}))
class Meta:
model = CustomUser
fields = ('username', 'first_name', 'last_name', 'email', 'mobile', 'image', 'favorite_team', 'timezone')
class PayerAuditForm(forms.ModelForm):
user = forms.ModelChoiceField(queryset=CustomUser.objects.all())
class Meta:
model = PayerAudit
fields = ('user', 'has_paid', 'payment_method', 'date_sent', 'date_received', 'message', )
class WinnerForm(forms.ModelForm):
user = forms.ModelChoiceField(queryset=CustomUser.objects.all())
week = forms.ModelChoiceField(queryset=Week.objects.all())
class Meta:
model = Alert
fields = ('user', 'week', 'message',)
class AlertForm(forms.ModelForm):
user = forms.ModelChoiceField(queryset=CustomUser.objects.all())
class Meta:
model = Alert
fields = ('user', 'alert_level', 'effective_date', 'effective_end_date', 'message',)
class TalkAdminForm(forms.ModelForm):
user = forms.ModelChoiceField(queryset=CustomUser.objects.all())
class Meta:
model = Talk
fields = ('user', 'message', 'effective_date', 'effective_end_date',)
class PickAdminForm(forms.ModelForm):
class Meta:
model = Pick
fields = ('user', 'pick_type', 'score', )
class PickForm(forms.ModelForm):
user = forms.ModelChoiceField(queryset=CustomUser.objects.all())
pick_type = forms.ModelChoiceField(queryset=PickType.objects.filter(is_active=True))
score = forms.IntegerField(min_value=0, max_value=5)
class Meta:
model = Pick
fields = ('user', 'game', 'score', 'pick_type',)
class PickTypeAdminForm(forms.ModelForm):
description = forms.TextInput()
class Meta:
model = PickType
fields = ('description', 'name', 'value',)
class ScoresForm(forms.ModelForm):
number = forms.IntegerField(disabled=True)
start_time = forms.DateTimeField(disabled=True)
home_team = forms.ModelChoiceField(queryset=Team.objects.all(), disabled=True)
visiting_team = forms.ModelChoiceField(queryset=Team.objects.all(), disabled=True)
city = forms.ModelChoiceField(queryset=City.objects.all(), disabled=True)
home_score = forms.IntegerField(max_value=99, label='Score')
visitor_score = forms.IntegerField(max_value=99, label='Score')
class Meta:
model = Game
fields = ('number', 'start_time', 'home_team', 'home_score', 'visiting_team', 'visitor_score', 'is_regulation_tie',)
class GameForm(forms.ModelForm):
home_team = forms.ModelChoiceField(queryset=Team.objects.all())
visiting_team = forms.ModelChoiceField(queryset=Team.objects.all())
class Meta:
model = Game
fields = ('number', 'start_time', 'visiting_team', 'home_team', 'city',)
class TalkForm(forms.ModelForm):
message = forms.TextInput()
class Meta:
model = Talk
fields = ('message',)
| from django import forms
# from PIL import Image
from django.core.files.uploadedfile import SimpleUploadedFile
from django.contrib.auth.forms import UserCreationForm, UserChangeForm
from .models import CustomUser, Talk, Game, Team, City, Alert, PayerAudit, PickType, Pick, Talk, Winner, Week
from .validators import pool_username_validator
class CustomUserCreationForm(UserCreationForm):
username = forms.CharField(max_length=150, validators=[pool_username_validator], help_text='Names and numbers only')
class Meta:
model = CustomUser
fields = ('username', 'email', 'mobile', 'first_name', 'last_name', 'image', 'favorite_team', 'timezone')
class CustomUserChangeForm(UserChangeForm):
class Meta:
model = CustomUser
fields = ('username', 'email', 'mobile')
class EditUserForm(forms.ModelForm):
username = forms.CharField(widget=forms.TextInput(attrs={'class' : 'form-control', 'placeholder' : 'Username'}))
first_name = forms.CharField(widget=forms.TextInput(attrs={'class' : 'form-control', 'placeholder' : 'First'}))
last_name = forms.CharField(widget=forms.TextInput(attrs={'class' : 'form-control', 'placeholder' : 'Last'}))
email = forms.EmailField(widget=forms.TextInput(attrs={'class' : 'form-control', 'placeholder' : 'Email'}))
mobile = forms.CharField(widget=forms.TextInput(attrs={'class' : 'form-control', 'placeholder' : 'Numbers Only'}))
class Meta:
model = CustomUser
fields = ('username', 'first_name', 'last_name', 'email', 'mobile', 'image', 'favorite_team', 'timezone')
class PayerAuditForm(forms.ModelForm):
user = forms.ModelChoiceField(queryset=CustomUser.objects.all())
class Meta:
model = PayerAudit
fields = ('user', 'has_paid', 'payment_method', 'date_sent', 'date_received', 'message', )
class WinnerForm(forms.ModelForm):
user = forms.ModelChoiceField(queryset=CustomUser.objects.all())
week = forms.ModelChoiceField(queryset=Week.objects.all())
class Meta:
model = Alert
fields = ('user', 'week', 'message',)
class AlertForm(forms.ModelForm):
user = forms.ModelChoiceField(queryset=CustomUser.objects.all())
class Meta:
model = Alert
fields = ('user', 'alert_level', 'effective_date', 'effective_end_date', 'message',)
class TalkAdminForm(forms.ModelForm):
user = forms.ModelChoiceField(queryset=CustomUser.objects.all())
class Meta:
model = Talk
fields = ('user', 'message', 'effective_date', 'effective_end_date',)
class PickAdminForm(forms.ModelForm):
class Meta:
model = Pick
fields = ('user', 'pick_type', 'score', )
class PickForm(forms.ModelForm):
user = forms.ModelChoiceField(queryset=CustomUser.objects.all())
pick_type = forms.ModelChoiceField(queryset=PickType.objects.filter(is_active=True))
score = forms.IntegerField(min_value=0, max_value=5)
class Meta:
model = Pick
fields = ('user', 'game', 'score', 'pick_type',)
class PickTypeAdminForm(forms.ModelForm):
description = forms.TextInput()
class Meta:
model = PickType
fields = ('description', 'name', 'value',)
class ScoresForm(forms.ModelForm):
number = forms.IntegerField(disabled=True)
start_time = forms.DateTimeField(disabled=True)
home_team = forms.ModelChoiceField(queryset=Team.objects.all(), disabled=True)
visiting_team = forms.ModelChoiceField(queryset=Team.objects.all(), disabled=True)
city = forms.ModelChoiceField(queryset=City.objects.all(), disabled=True)
home_score = forms.IntegerField(max_value=99, label='Score')
visitor_score = forms.IntegerField(max_value=99, label='Score')
class Meta:
model = Game
fields = ('number', 'start_time', 'home_team', 'home_score', 'visiting_team', 'visitor_score', 'is_regulation_tie',)
class GameForm(forms.ModelForm):
home_team = forms.ModelChoiceField(queryset=Team.objects.all())
visiting_team = forms.ModelChoiceField(queryset=Team.objects.all())
class Meta:
model = Game
fields = ('number', 'start_time', 'visiting_team', 'home_team', 'city',)
class TalkForm(forms.ModelForm):
message = forms.TextInput()
class Meta:
model = Talk
fields = ('message',)
| en | 0.765779 | # from PIL import Image | 2.222872 | 2 |
management/commands/startproject.py | Zadigo/Emails | 0 | 6619223 | from management.base import ProjectCommand
from zineb.management.base import ProjectCommand
class Command(ProjectCommand):
def add_arguments(self, parser):
parser.add_argument('project_name')
| from management.base import ProjectCommand
from zineb.management.base import ProjectCommand
class Command(ProjectCommand):
def add_arguments(self, parser):
parser.add_argument('project_name')
| none | 1 | 1.76852 | 2 | |
src/omero_napari/widgets/tree_model.py | will-moore/napari-omero | 0 | 6619224 | from omero.gateway import (
BlitzObjectWrapper,
_DatasetWrapper,
_ImageWrapper,
)
from qtpy.QtCore import QModelIndex
from qtpy.QtGui import QStandardItem, QStandardItemModel
from .gateway import QGateWay
from typing import Dict
class OMEROTreeItem(QStandardItem):
def __init__(self, wrapper: BlitzObjectWrapper):
super().__init__()
self.wrapper = wrapper
self.setData(wrapper)
# self._has_fetched = False
if self.hasChildren():
self.setText(f"{self.wrapper.getName()} ({self.numChildren()})")
else:
self.setText(f"{self.wrapper.getName()}")
# def canFetchMore(self) -> bool:
# if self._has_fetched or not self.hasChildren():
# return False
# return self.wrapper.countChildren() > 0
# def fetchChildren(self):
# for child in self.wrapper.listChildren():
# self.appendRow(OMEROTreeItem(child))
# self._has_fetched = True
def hasChildren(self):
return bool(self.wrapper.CHILD_WRAPPER_CLASS)
def numChildren(self) -> int:
return self.wrapper.countChildren()
def isDataset(self) -> bool:
return isinstance(self.wrapper, _DatasetWrapper)
def isImage(self) -> bool:
return isinstance(self.wrapper, _ImageWrapper)
class OMEROTreeModel(QStandardItemModel):
def __init__(self, gateway: QGateWay, parent=None):
super().__init__(parent)
self.gateway = gateway
self.gateway.connected.connect(
lambda g: self.gateway._submit(self._populate_tree)
)
self._wrapper_map: Dict[BlitzObjectWrapper, QModelIndex] = {}
def _populate_tree(self):
if not self.gateway.isConnected():
return
root = self.invisibleRootItem()
projects = []
for project in list(self.gateway.conn.listProjects()):
item = OMEROTreeItem(project)
root.appendRow(item)
projects.append(item)
self._wrapper_map[project.getId()] = self.indexFromItem(item)
yield
if not self.gateway.isConnected():
return
for item in projects:
for dataset in list(item.wrapper.listChildren()):
dchild = OMEROTreeItem(dataset)
item.appendRow(dchild)
self._wrapper_map[dataset.getId()] = self.indexFromItem(dchild)
yield
if not self.gateway.isConnected():
return
for image in list(dataset.listChildren()):
ichild = OMEROTreeItem(image)
dchild.appendRow(ichild)
self._wrapper_map[image.getId()] = self.indexFromItem(ichild)
yield
# def canFetchMore(self, index: QModelIndex) -> bool:
# item = self.itemFromIndex(index)
# return bool(item and item.canFetchMore())
# def fetchMore(self, index: QModelIndex) -> None:
# self.itemFromIndex(index).fetchChildren()
def hasChildren(self, index: QModelIndex) -> bool:
item = self.itemFromIndex(index)
if item is not None:
return item.hasChildren() and item.numChildren() > 0
return True
def itemFromIndex(self, index: QModelIndex) -> OMEROTreeItem:
return super().itemFromIndex(index)
| from omero.gateway import (
BlitzObjectWrapper,
_DatasetWrapper,
_ImageWrapper,
)
from qtpy.QtCore import QModelIndex
from qtpy.QtGui import QStandardItem, QStandardItemModel
from .gateway import QGateWay
from typing import Dict
class OMEROTreeItem(QStandardItem):
def __init__(self, wrapper: BlitzObjectWrapper):
super().__init__()
self.wrapper = wrapper
self.setData(wrapper)
# self._has_fetched = False
if self.hasChildren():
self.setText(f"{self.wrapper.getName()} ({self.numChildren()})")
else:
self.setText(f"{self.wrapper.getName()}")
# def canFetchMore(self) -> bool:
# if self._has_fetched or not self.hasChildren():
# return False
# return self.wrapper.countChildren() > 0
# def fetchChildren(self):
# for child in self.wrapper.listChildren():
# self.appendRow(OMEROTreeItem(child))
# self._has_fetched = True
def hasChildren(self):
return bool(self.wrapper.CHILD_WRAPPER_CLASS)
def numChildren(self) -> int:
return self.wrapper.countChildren()
def isDataset(self) -> bool:
return isinstance(self.wrapper, _DatasetWrapper)
def isImage(self) -> bool:
return isinstance(self.wrapper, _ImageWrapper)
class OMEROTreeModel(QStandardItemModel):
def __init__(self, gateway: QGateWay, parent=None):
super().__init__(parent)
self.gateway = gateway
self.gateway.connected.connect(
lambda g: self.gateway._submit(self._populate_tree)
)
self._wrapper_map: Dict[BlitzObjectWrapper, QModelIndex] = {}
def _populate_tree(self):
if not self.gateway.isConnected():
return
root = self.invisibleRootItem()
projects = []
for project in list(self.gateway.conn.listProjects()):
item = OMEROTreeItem(project)
root.appendRow(item)
projects.append(item)
self._wrapper_map[project.getId()] = self.indexFromItem(item)
yield
if not self.gateway.isConnected():
return
for item in projects:
for dataset in list(item.wrapper.listChildren()):
dchild = OMEROTreeItem(dataset)
item.appendRow(dchild)
self._wrapper_map[dataset.getId()] = self.indexFromItem(dchild)
yield
if not self.gateway.isConnected():
return
for image in list(dataset.listChildren()):
ichild = OMEROTreeItem(image)
dchild.appendRow(ichild)
self._wrapper_map[image.getId()] = self.indexFromItem(ichild)
yield
# def canFetchMore(self, index: QModelIndex) -> bool:
# item = self.itemFromIndex(index)
# return bool(item and item.canFetchMore())
# def fetchMore(self, index: QModelIndex) -> None:
# self.itemFromIndex(index).fetchChildren()
def hasChildren(self, index: QModelIndex) -> bool:
item = self.itemFromIndex(index)
if item is not None:
return item.hasChildren() and item.numChildren() > 0
return True
def itemFromIndex(self, index: QModelIndex) -> OMEROTreeItem:
return super().itemFromIndex(index)
| en | 0.351397 | # self._has_fetched = False # def canFetchMore(self) -> bool: # if self._has_fetched or not self.hasChildren(): # return False # return self.wrapper.countChildren() > 0 # def fetchChildren(self): # for child in self.wrapper.listChildren(): # self.appendRow(OMEROTreeItem(child)) # self._has_fetched = True # def canFetchMore(self, index: QModelIndex) -> bool: # item = self.itemFromIndex(index) # return bool(item and item.canFetchMore()) # def fetchMore(self, index: QModelIndex) -> None: # self.itemFromIndex(index).fetchChildren() | 2.195596 | 2 |
swagger_fuzzer/validators.py | cadesalaberry/swagger-fuzzer | 25 | 6619225 | """ Validators
"""
def check_result_status_code(spec, request, response, settings):
""" Check that response status code is either a "standard" one
like 404, 405, 200 (use -s cli argument to change it) or one
of the declared one globally or for the path in swagger configuration
"""
status_code = int(response.status_code)
endpoint_path = request.build_context['endpoint_path']
authorized = spec['paths'][endpoint_path][request.method.lower()]['responses'].keys()
# Default means all status code are allowed
if "default" in authorized:
return
allowed = set(settings.http_code).union(map(int, authorized))
if status_code not in allowed:
raise AssertionError("Request on {!r} returned status_code {}, not in declared one {}".format(request.url, response.status_code, list(allowed)))
def no_server_error(spec, request, response, settings):
""" Check that response status code is different than 500
"""
if response.status_code == 500:
raise AssertionError("Request on {!r} returns status_code {}".format(URL, response.status_code))
def no_body_format_declaration(spec, request, response, settings):
""" Check that for each post path, a body format is declared
"""
body_args = request.build_context.get('body_args')
if request.build_context['body_args'] and request.build_context.get('request_body_format') is None:
raise AssertionError("Body parameters but no declared format for endpoint {}: {}".format(endpoint, body_args))
def valid_output_mime(spec, request, response, settings):
""" Check that each request returns with a content-type that is declared
"""
global_valids = spec.get('consumes', [])
endpoint_path = request.build_context['endpoint_path']
path = spec['paths'][endpoint_path][request.method.lower()]
local_valids = path.get('consumes', [])
if local_valids:
valids = local_valids
else:
valids = global_valids
if response.headers['Content-Type'] not in valids:
raise AssertionError("Response content-type {} is not declared: {}".format(response.headers['Content-Type'], valids))
VALIDATORS = [
no_server_error,
no_body_format_declaration,
check_result_status_code,
valid_output_mime
]
| """ Validators
"""
def check_result_status_code(spec, request, response, settings):
""" Check that response status code is either a "standard" one
like 404, 405, 200 (use -s cli argument to change it) or one
of the declared one globally or for the path in swagger configuration
"""
status_code = int(response.status_code)
endpoint_path = request.build_context['endpoint_path']
authorized = spec['paths'][endpoint_path][request.method.lower()]['responses'].keys()
# Default means all status code are allowed
if "default" in authorized:
return
allowed = set(settings.http_code).union(map(int, authorized))
if status_code not in allowed:
raise AssertionError("Request on {!r} returned status_code {}, not in declared one {}".format(request.url, response.status_code, list(allowed)))
def no_server_error(spec, request, response, settings):
""" Check that response status code is different than 500
"""
if response.status_code == 500:
raise AssertionError("Request on {!r} returns status_code {}".format(URL, response.status_code))
def no_body_format_declaration(spec, request, response, settings):
""" Check that for each post path, a body format is declared
"""
body_args = request.build_context.get('body_args')
if request.build_context['body_args'] and request.build_context.get('request_body_format') is None:
raise AssertionError("Body parameters but no declared format for endpoint {}: {}".format(endpoint, body_args))
def valid_output_mime(spec, request, response, settings):
""" Check that each request returns with a content-type that is declared
"""
global_valids = spec.get('consumes', [])
endpoint_path = request.build_context['endpoint_path']
path = spec['paths'][endpoint_path][request.method.lower()]
local_valids = path.get('consumes', [])
if local_valids:
valids = local_valids
else:
valids = global_valids
if response.headers['Content-Type'] not in valids:
raise AssertionError("Response content-type {} is not declared: {}".format(response.headers['Content-Type'], valids))
VALIDATORS = [
no_server_error,
no_body_format_declaration,
check_result_status_code,
valid_output_mime
]
| en | 0.868808 | Validators Check that response status code is either a "standard" one like 404, 405, 200 (use -s cli argument to change it) or one of the declared one globally or for the path in swagger configuration # Default means all status code are allowed Check that response status code is different than 500 Check that for each post path, a body format is declared Check that each request returns with a content-type that is declared | 2.47005 | 2 |
python_grpc_mutual_tls_auth/commands/generate.py | ychen47/python-grpc-mutual-tls-auth | 9 | 6619226 | <filename>python_grpc_mutual_tls_auth/commands/generate.py<gh_stars>1-10
from invoke import task
@task
def server(ctx):
cmd = "openssl req -x509 -newkey rsa:4096 -sha256 -nodes -keyout {key} -subj '/CN={cn}' -out {crt}".format(
key=ctx.config['credentials']['server']['key'],
crt=ctx.config['credentials']['server']['cert'],
cn=ctx.config['credentials']['server']['host']
)
ctx.run(cmd)
@task
def client(ctx):
cmd = "openssl req -x509 -newkey rsa:4096 -sha256 -nodes -keyout {key} -subj '/CN=localhost' -out {crt}".format(
key=ctx.config['credentials']['client']['key'],
crt=ctx.config['credentials']['client']['cert'],
)
ctx.run(cmd)
| <filename>python_grpc_mutual_tls_auth/commands/generate.py<gh_stars>1-10
from invoke import task
@task
def server(ctx):
cmd = "openssl req -x509 -newkey rsa:4096 -sha256 -nodes -keyout {key} -subj '/CN={cn}' -out {crt}".format(
key=ctx.config['credentials']['server']['key'],
crt=ctx.config['credentials']['server']['cert'],
cn=ctx.config['credentials']['server']['host']
)
ctx.run(cmd)
@task
def client(ctx):
cmd = "openssl req -x509 -newkey rsa:4096 -sha256 -nodes -keyout {key} -subj '/CN=localhost' -out {crt}".format(
key=ctx.config['credentials']['client']['key'],
crt=ctx.config['credentials']['client']['cert'],
)
ctx.run(cmd)
| none | 1 | 2.293863 | 2 | |
pylibs/pymode/lint.py | thekuffs/dotfiles | 1 | 6619227 | import StringIO
import locale
from .interface import get_option, get_var, get_current_buffer, command
from .queue import add_task
locale.setlocale(locale.LC_CTYPE, "C")
def check_file():
checkers = get_option('lint_checker').split(',')
ignore = set(filter(lambda i: i, get_option('lint_ignore').split(',') +
get_var('lint_ignore').split(',')))
select = set(filter(lambda s: s, get_option('lint_select').split(',') +
get_var('lint_select').split(',')))
buffer = get_current_buffer()
add_task(run_checkers, checkers=checkers, ignore=ignore, title='Code checking', callback=parse_result, buffer=buffer, select=select)
def run_checkers(task=None, checkers=None, ignore=None, buffer=None, select=None):
buffer = (task and task.buffer) or buffer
filename = buffer.name
result = []
part = 100 / len(checkers)
for c in checkers:
checker = globals().get(c)
if not checker:
continue
try:
for e in checker(filename):
e.update(
col=e.get('col') or 0,
text="%s [%s]" % (e.get('text', '')
.strip().replace("'", "\"").split('\n')[0], c),
filename=filename,
bufnr=buffer.number,
)
result.append(e)
except SyntaxError, e:
result.append(dict(
lnum=e.lineno,
col=e.offset or 0,
text=e.args[0],
bufnr=buffer.number,
))
break
except Exception, e:
assert True
if task:
task.done += part
result = filter(lambda e: _ignore_error(e, select, ignore), result)
result = sorted(result, key=lambda x: x['lnum'])
if task:
task.result = result
task.finished = True
task.done = 100
def parse_result(result):
command(('let g:qf_list = %s' % repr(result)).replace('\': u', '\': '))
command('call pymode#lint#Parse()')
def mccabe(filename):
from pylibs.mccabe import get_code_complexity
complexity = int(get_option('lint_mccabe_complexity'))
return mc.get_module_complexity(filename, min=complexity)
def pep8(filename):
PEP8 or _init_pep8()
style = PEP8['style']
return style.input_file(filename)
def pylint(filename):
from pylibs.logilab.astng.builder import MANAGER
PYLINT or _init_pylint()
linter = PYLINT['lint']
MANAGER.astng_cache.clear()
linter.reporter.out = StringIO.StringIO()
linter.check(filename)
errors, linter.reporter.errors = linter.reporter.errors, []
return errors
def pyflakes(filename):
from pylibs.pyflakes import checker
import _ast
codeString = file(filename, 'U').read() + '\n'
errors = []
tree = compile(codeString, filename, "exec", _ast.PyCF_ONLY_AST)
w = checker.Checker(tree, filename)
w.messages.sort(lambda a, b: cmp(a.lineno, b.lineno))
for w in w.messages:
errors.append(dict(
lnum=w.lineno,
col=w.col,
text=w.message % w.message_args,
type='E'
))
return errors
PYLINT = dict()
def _init_pylint():
from pylibs.pylint import lint, checkers, reporters
import re
class VimReporter(reporters.BaseReporter):
def __init__(self):
reporters.BaseReporter.__init__(self)
self.errors = []
def add_message(self, msg_id, location, msg):
_, _, line, col = location[1:]
self.errors.append(dict(
lnum=line,
col=col,
text="%s %s" % (msg_id, msg),
type=msg_id[0]
))
PYLINT['lint'] = lint.PyLinter()
PYLINT['re'] = re.compile(
'^(?:.:)?[^:]+:(\d+): \[([EWRCI]+)[^\]]*\] (.*)$')
checkers.initialize(PYLINT['lint'])
PYLINT['lint'].load_file_configuration(get_var('lint_config'))
PYLINT['lint'].set_option("output-format", "parseable")
PYLINT['lint'].set_option("include-ids", 1)
PYLINT['lint'].set_option("reports", 0)
PYLINT['lint'].reporter = VimReporter()
PEP8 = dict()
def _init_pep8():
from pylibs import pep8 as p8
class _PEP8Report(p8.BaseReport):
def init_file(self, filename, lines, expected, line_offset):
super(_PEP8Report, self).init_file(
filename, lines, expected, line_offset)
self.errors = []
def error(self, line_number, offset, text, check):
code = super(_PEP8Report, self).error(
line_number, offset, text, check)
self.errors.append(dict(
text=text,
type=code,
col=offset + 1,
lnum=line_number,
))
def get_file_results(self):
return self.errors
PEP8['style'] = p8.StyleGuide(reporter=_PEP8Report)
def _ignore_error(e, select, ignore):
for s in select:
if e['text'].startswith(s):
return True
for i in ignore:
if e['text'].startswith(i):
return False
return True
| import StringIO
import locale
from .interface import get_option, get_var, get_current_buffer, command
from .queue import add_task
locale.setlocale(locale.LC_CTYPE, "C")
def check_file():
checkers = get_option('lint_checker').split(',')
ignore = set(filter(lambda i: i, get_option('lint_ignore').split(',') +
get_var('lint_ignore').split(',')))
select = set(filter(lambda s: s, get_option('lint_select').split(',') +
get_var('lint_select').split(',')))
buffer = get_current_buffer()
add_task(run_checkers, checkers=checkers, ignore=ignore, title='Code checking', callback=parse_result, buffer=buffer, select=select)
def run_checkers(task=None, checkers=None, ignore=None, buffer=None, select=None):
buffer = (task and task.buffer) or buffer
filename = buffer.name
result = []
part = 100 / len(checkers)
for c in checkers:
checker = globals().get(c)
if not checker:
continue
try:
for e in checker(filename):
e.update(
col=e.get('col') or 0,
text="%s [%s]" % (e.get('text', '')
.strip().replace("'", "\"").split('\n')[0], c),
filename=filename,
bufnr=buffer.number,
)
result.append(e)
except SyntaxError, e:
result.append(dict(
lnum=e.lineno,
col=e.offset or 0,
text=e.args[0],
bufnr=buffer.number,
))
break
except Exception, e:
assert True
if task:
task.done += part
result = filter(lambda e: _ignore_error(e, select, ignore), result)
result = sorted(result, key=lambda x: x['lnum'])
if task:
task.result = result
task.finished = True
task.done = 100
def parse_result(result):
command(('let g:qf_list = %s' % repr(result)).replace('\': u', '\': '))
command('call pymode#lint#Parse()')
def mccabe(filename):
from pylibs.mccabe import get_code_complexity
complexity = int(get_option('lint_mccabe_complexity'))
return mc.get_module_complexity(filename, min=complexity)
def pep8(filename):
PEP8 or _init_pep8()
style = PEP8['style']
return style.input_file(filename)
def pylint(filename):
from pylibs.logilab.astng.builder import MANAGER
PYLINT or _init_pylint()
linter = PYLINT['lint']
MANAGER.astng_cache.clear()
linter.reporter.out = StringIO.StringIO()
linter.check(filename)
errors, linter.reporter.errors = linter.reporter.errors, []
return errors
def pyflakes(filename):
from pylibs.pyflakes import checker
import _ast
codeString = file(filename, 'U').read() + '\n'
errors = []
tree = compile(codeString, filename, "exec", _ast.PyCF_ONLY_AST)
w = checker.Checker(tree, filename)
w.messages.sort(lambda a, b: cmp(a.lineno, b.lineno))
for w in w.messages:
errors.append(dict(
lnum=w.lineno,
col=w.col,
text=w.message % w.message_args,
type='E'
))
return errors
PYLINT = dict()
def _init_pylint():
from pylibs.pylint import lint, checkers, reporters
import re
class VimReporter(reporters.BaseReporter):
def __init__(self):
reporters.BaseReporter.__init__(self)
self.errors = []
def add_message(self, msg_id, location, msg):
_, _, line, col = location[1:]
self.errors.append(dict(
lnum=line,
col=col,
text="%s %s" % (msg_id, msg),
type=msg_id[0]
))
PYLINT['lint'] = lint.PyLinter()
PYLINT['re'] = re.compile(
'^(?:.:)?[^:]+:(\d+): \[([EWRCI]+)[^\]]*\] (.*)$')
checkers.initialize(PYLINT['lint'])
PYLINT['lint'].load_file_configuration(get_var('lint_config'))
PYLINT['lint'].set_option("output-format", "parseable")
PYLINT['lint'].set_option("include-ids", 1)
PYLINT['lint'].set_option("reports", 0)
PYLINT['lint'].reporter = VimReporter()
PEP8 = dict()
def _init_pep8():
from pylibs import pep8 as p8
class _PEP8Report(p8.BaseReport):
def init_file(self, filename, lines, expected, line_offset):
super(_PEP8Report, self).init_file(
filename, lines, expected, line_offset)
self.errors = []
def error(self, line_number, offset, text, check):
code = super(_PEP8Report, self).error(
line_number, offset, text, check)
self.errors.append(dict(
text=text,
type=code,
col=offset + 1,
lnum=line_number,
))
def get_file_results(self):
return self.errors
PEP8['style'] = p8.StyleGuide(reporter=_PEP8Report)
def _ignore_error(e, select, ignore):
for s in select:
if e['text'].startswith(s):
return True
for i in ignore:
if e['text'].startswith(i):
return False
return True
| gl | 0.102439 | #lint#Parse()') | 2.292119 | 2 |
src/pytorch_fid/utils.py | omsrisagar/pytorch-fid | 0 | 6619228 | import pickle
import numpy as np
import matplotlib
# matplotlib.use('MacOSX')
# matplotlib.use('Qt')
from matplotlib import rcParams
from cycler import cycler
from matplotlib import pyplot as plt
import os, sys
def format_y(y):
if isinstance(y, list) and len(y) != 0:
if isinstance(y[0], list):
lengths = [len(obj) for obj in y]
minlength = min(lengths)
y = [obj[:minlength] for obj in y]
return y
def plot_figures(output_path, desc, y, xlabel, ylabel, x=None, yerr = None, legend=None, legendloc='best',
legendncol=1, title=None, xlim=None, ylim=None, show_plot=False, gen_pkl=True, save_pdf=False,
plt_only=False):
plt.clf()
plt.close()
if not plt_only:
rcParams.update({'font.size': 20})
rcParams['interactive'] = True
plt.ioff()
plt.rc('axes', prop_cycle=cycler('color',['black', 'red', 'blue', 'black', 'red', 'blue', 'black','red', 'blue', 'black', 'red', 'blue', 'black']) + cycler('marker', ['*', '+', 'x', 'o', '<', '>', 'v', '^', ',', "_", '.', '|', 'X']) + cycler('linestyle', ['-', '--', '-.', ':', '-', '--', '-.',':', '-', '--', '-.',':','-']))
# this ensures that type-3 fonts are not used when generating figures
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
markersize=8
linewidth=2
capsize = 6 # not recognized in plt.rc
elinewidth = 2 # same
markeredgewidth = 1
plt.rc('lines', linewidth=linewidth, markersize=markersize, markeredgewidth=markeredgewidth)
# plt.gca().set_prop_cycle(cycler('color',['red', 'green', 'blue', 'red', 'green', 'blue','red']))
# markers = ['*', '+', 'x', 'o', '<', '>', ','] # http://matplotlib.org/api/markers_api.html
# linestyles = ['-', '--', '-.', ':', '-', '--', '-.'] # http://matplotlib.org/api/lines_api.html
fig = plt.figure(1, figsize=(40,15)) # width, height
# fig = plt.figure(1, figsize=(7.5,7.5)) # width, height
# fig = plt.figure(1) # width, height
y = format_y(y)
y = np.array(y)
if yerr is not None:
yerr = format_y(yerr)
yerr = np.array(yerr)
assert np.shape(y) == np.shape(yerr)
shape = y.shape
if len(shape) == 1:
ncols = shape[0]
nrows = 1
else:
nrows, ncols = shape
if x is None:
x = range(1,ncols+1)
if nrows == 1:
if yerr is None:
plt.plot(x, y)
else:
ax = plt.gca() # use this only if needed
ax.set_xscale('log')
# ax.set_yscale('log')
(_, caps, _) = plt.errorbar(x, y, yerr, capsize=capsize, elinewidth=elinewidth)
for cap in caps:
cap.set_markeredgewidth(3)
else:
if yerr is None:
for var_indx in range(nrows):
plt.plot(x, y[var_indx, :])
else:
ax = plt.gca() # use this only if needed
ax.set_xscale('log')
for var_indx in range(nrows):
(_, caps, _) = plt.errorbar(x, y[var_indx, :], yerr[var_indx, :], capsize=capsize, elinewidth=elinewidth)
for cap in caps:
cap.set_markeredgewidth(3)
# plt.ylim(ymin=0)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
if legend is not None:
plt.legend(legend, loc=legendloc, ncol=legendncol)
if title is not None:
plt.title(title)
if xlim is not None:
plt.xlim(xlim)
if ylim is not None:
plt.ylim(ylim)
plt.grid(True, which='both')
if not plt_only:
# fig.tight_layout()
filename = 'fig_' + desc
if save_pdf:
fig.savefig(os.path.join(output_path, filename + '.pdf'))
plt.savefig(os.path.join(output_path, filename + '.png'))
if gen_pkl:
save_object1(fig, os.path.join(output_path, 'pkl', filename + '.pkl'))
if show_plot:
plt.show()
plt.clf()
plt.close()
def plot_figures_subplot(output_path, desc, y, xlabels, ylabels, x=None, legends=None, legendlocs=None, legendncols=None, show_plot=False, gen_pkl=True, save_pdf=False, save_eps=False):
rcParams.update({'font.size': 20})
plt.ioff()
plt.rc('axes', prop_cycle=cycler('color',['black', 'red', 'blue', 'black', 'red', 'blue', 'black','red', 'blue', 'black', 'red', 'blue', 'black']) + cycler('marker', ['*', '+', 'x', 'o', '<', '>', 'v', '^', ',', "_", '.', '|', 'X']) + cycler('linestyle', ['-', '--', '-.', ':', '-', '--', '-.',':', '-', '--', '-.',':','-']))
markersize=3
linewidth=3
plt.rc('lines', linewidth=linewidth, markersize=markersize)
# plt.gca().set_prop_cycle(cycler('color',['red', 'green', 'blue', 'red', 'green', 'blue','red']))
# markers = ['*', '+', 'x', 'o', '<', '>', ','] # http://matplotlib.org/api/markers_api.html
# linestyles = ['-', '--', '-.', ':', '-', '--', '-.'] # http://matplotlib.org/api/lines_api.html
if isinstance(y, list) and len(y) != 0:
if isinstance(y[0], list):
lengths = [len(obj) for obj in y]
minlength = min(lengths)
y = [obj[:minlength] for obj in y]
y = np.array(y)
shape = y.shape
if len(shape) == 1:
ncols = shape[0]
nrows = 1
else:
nrows, ncols = shape
if x is None:
x = range(1,ncols+1)
fig,_ = plt.subplots(nrows, 1, figsize=(11.25,7.5)) # width, height
if legends is None:
legends = legendlocs = legendncols = [None]*nrows
for var_indx in range(nrows):
subplt_indx = (nrows*100) + (1*10) + (var_indx+1)
plt.subplot(subplt_indx)
plot_figures('','',y[var_indx,:], xlabels[var_indx],ylabels[var_indx],x,legends[var_indx],legendlocs[var_indx],legendncols[var_indx],plt_only=True)
# ax[var_indx].plot(x,y[var_indx,:])
fig.tight_layout()
filename = 'fig_' + desc
if save_pdf:
plt.savefig(output_path + filename + '.pdf')
if save_eps:
plt.savefig(output_path + filename + '.eps')
plt.savefig(output_path + filename + '.png')
if gen_pkl:
save_object1(fig, output_path + 'pkl/' + filename + '.pkl')
if show_plot:
plt.show()
plt.clf()
plt.close()
def plot_figures_old(output_path, desc, y, xlabel, ylabel, x=None, legend=None, legendloc=None, legendncol=None, show_plot=False, gen_pkl=True, save_pdf=False, save_eps=False):
# rcParams.update({'font.size': 20})
# plt.ioff()
# plt.rc('axes', prop_cycle=cycler('color',['black', 'red', 'blue', 'black', 'red', 'blue', 'black','red', 'blue', 'black', 'red', 'blue', 'black']) + cycler('marker', ['*', '+', 'x', 'o', '<', '>', 'v', '^', ',', "_", '.', '|', 'X']) + cycler('linestyle', ['-', '--', '-.', ':', '-', '--', '-.',':', '-', '--', '-.',':','-']))
# markersize=10
# linewidth=3
# plt.rc('lines', linewidth=linewidth, markersize=markersize)
#
# # plt.gca().set_prop_cycle(cycler('color',['red', 'green', 'blue', 'red', 'green', 'blue','red']))
# # markers = ['*', '+', 'x', 'o', '<', '>', ','] # http://matplotlib.org/api/markers_api.html
# # linestyles = ['-', '--', '-.', ':', '-', '--', '-.'] # http://matplotlib.org/api/lines_api.html
# fig = plt.figure(1, figsize=(15, 10)) # width, height
rcParams.update({'font.size': 20})
plt.ioff()
plt.rc('axes', prop_cycle=cycler('color',['black', 'red', 'blue', 'black', 'red', 'blue', 'black','red', 'blue', 'black', 'red', 'blue', 'black']) + cycler('marker', ['*', '+', 'x', 'o', '<', '>', 'v', '^', ',', "_", '.', '|', 'X']) + cycler('linestyle', ['-', '--', '-.', ':', '-', '--', '-.',':', '-', '--', '-.',':','-']))
markersize=11
linewidth=3
capsize = 6 # not recognized in plt.rc
elinewidth = 3 # same
markeredgewidth = 1
plt.rc('lines', linewidth=linewidth, markersize=markersize, markeredgewidth=markeredgewidth)
# plt.gca().set_prop_cycle(cycler('color',['red', 'green', 'blue', 'red', 'green', 'blue','red']))
# markers = ['*', '+', 'x', 'o', '<', '>', ','] # http://matplotlib.org/api/markers_api.html
# linestyles = ['-', '--', '-.', ':', '-', '--', '-.'] # http://matplotlib.org/api/lines_api.html
# fig = plt.figure(1, figsize=(11.25,7.5)) # width, height
fig = plt.figure(1, figsize=(7.5,7.5)) # width, height
if isinstance(y, list) and len(y) != 0:
if isinstance(y[0], list):
lengths = [len(obj) for obj in y]
minlength = min(lengths)
y = [obj[:minlength] for obj in y]
y = np.array(y)
shape = y.shape
if len(shape) == 1:
ncols = shape[0]
nrows = 1
else:
nrows, ncols = shape
if x is None:
x = range(1,ncols+1)
if nrows == 1:
plt.plot(x, y)
else:
for var_indx in range(nrows):
plt.plot(x, y[var_indx, :])
plt.xlabel(xlabel)
plt.ylabel(ylabel)
if legend is not None:
plt.legend(legend, loc=legendloc, ncol=legendncol)
plt.grid()
fig.tight_layout()
filename = 'fig_' + desc
if save_pdf:
plt.savefig(output_path / (filename + '.pdf'))
if save_eps:
plt.savefig(output_path / (filename + '.eps'))
plt.savefig(output_path / (filename + '.png'))
if gen_pkl:
save_object1(fig, output_path / 'pkl/' / (filename + '.pkl'))
if show_plot:
plt.show()
plt.clf()
plt.close()
def sort_pair_of_lists(list1, list2, reverse=False):
# sorting will be based on the values of list1 (not list2)
zipped_pair = zip(list1, list2)
sorted_zip = sorted(zipped_pair, reverse=reverse)
list1_sorted = [x for x, _ in sorted_zip]
list2_sorted = [x for _, x in sorted_zip]
return [list1_sorted, list2_sorted]
def print_out(s, f=None, new_line=True):
"""Similar to print but with support to flush and output to a file."""
s = str(s)
if f:
f.write(s)
if new_line:
f.write("\n")
# stdout
print(s, end="", file=sys.stdout)
if new_line:
sys.stdout.write("\n")
sys.stdout.flush()
def save_object1(obj1, filename):
os.makedirs(os.path.dirname(filename), exist_ok=True)
with open(filename, 'wb') as output:
pickle.dump(obj1, output, pickle.HIGHEST_PROTOCOL)
def save_object2(obj1, obj2, filename):
os.makedirs(os.path.dirname(filename), exist_ok=True)
with open(filename, 'wb') as output:
pickle.dump(obj1, output, pickle.HIGHEST_PROTOCOL)
pickle.dump(obj2, output, pickle.HIGHEST_PROTOCOL)
def read_object1(filename):
with open(filename, 'rb') as input:
return pickle.load(input)
def read_object2(filename):
with open(filename, 'rb') as input:
first = pickle.load(input)
second = pickle.load(input)
return first, second
# Function to get index of ceiling of x in arr[low..high]*/
def ceilSearch(arr, low, high, x):
# If x is smaller than or equal to the first element,
# then return the first element */
if x <= arr[low]:
return low
# If x is greater than the last element, then return -1 */
if x > arr[high]:
return -1
# get the index of middle element of arr[low..high]*/
mid = int ((low + high) / 2) # low + (high - low)/2 */
# If x is same as middle element, then return mid */
if arr[mid] == x:
return mid
# If x is greater than arr[mid], then either arr[mid + 1]
# is ceiling of x or ceiling lies in arr[mid+1...high] */
# elif arr[mid] < x:
# if mid + 1 <= high and x <= arr[mid + 1]:
# return mid + 1
# else:
elif arr[mid] < x:
return ceilSearch(arr, mid + 1, high, x)
# If x is smaller than arr[mid], then either arr[mid]
# is ceiling of x or ceiling lies in arr[mid-1...high] */
else:
# if mid - 1 >= low and x > arr[mid - 1]:
# return mid
# else:
return ceilSearch(arr, low, mid, x)
# Binary search function to get index of floor of x in arr[low..high]*/
def floorSearch(arr, low, high, x):
# If x is smaller than or equal to the first element,
# then return the first element */
if x >= arr[high]:
return high
# If x is greater than the last element, then return -1 */
if x < arr[low]:
return -1
# get the index of middle element of arr[low..high]*/
mid = int ((low + high) / 2) # low + (high - low)/2 */
# If x is same as middle element, then return mid */
if arr[mid] == x:
return mid
# If x is greater than arr[mid], then floor of x lies in arr[mid...high] */
# elif arr[mid] < x:
# if mid + 1 <= high and x <= arr[mid + 1]:
# return mid + 1
# else:
elif arr[mid] < x:
if x < arr[mid+1]: # this is done to avoid infinite recursion; consider [2,8] and floor(3)
return mid
return floorSearch(arr, mid, high, x)
# If x is smaller than arr[mid], then floor of x lies in arr[low...mid-1] */
else:
# if mid - 1 >= low and x > arr[mid - 1]:
# return mid
# else:
return floorSearch(arr, low, mid-1, x)
| import pickle
import numpy as np
import matplotlib
# matplotlib.use('MacOSX')
# matplotlib.use('Qt')
from matplotlib import rcParams
from cycler import cycler
from matplotlib import pyplot as plt
import os, sys
def format_y(y):
if isinstance(y, list) and len(y) != 0:
if isinstance(y[0], list):
lengths = [len(obj) for obj in y]
minlength = min(lengths)
y = [obj[:minlength] for obj in y]
return y
def plot_figures(output_path, desc, y, xlabel, ylabel, x=None, yerr = None, legend=None, legendloc='best',
legendncol=1, title=None, xlim=None, ylim=None, show_plot=False, gen_pkl=True, save_pdf=False,
plt_only=False):
plt.clf()
plt.close()
if not plt_only:
rcParams.update({'font.size': 20})
rcParams['interactive'] = True
plt.ioff()
plt.rc('axes', prop_cycle=cycler('color',['black', 'red', 'blue', 'black', 'red', 'blue', 'black','red', 'blue', 'black', 'red', 'blue', 'black']) + cycler('marker', ['*', '+', 'x', 'o', '<', '>', 'v', '^', ',', "_", '.', '|', 'X']) + cycler('linestyle', ['-', '--', '-.', ':', '-', '--', '-.',':', '-', '--', '-.',':','-']))
# this ensures that type-3 fonts are not used when generating figures
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
markersize=8
linewidth=2
capsize = 6 # not recognized in plt.rc
elinewidth = 2 # same
markeredgewidth = 1
plt.rc('lines', linewidth=linewidth, markersize=markersize, markeredgewidth=markeredgewidth)
# plt.gca().set_prop_cycle(cycler('color',['red', 'green', 'blue', 'red', 'green', 'blue','red']))
# markers = ['*', '+', 'x', 'o', '<', '>', ','] # http://matplotlib.org/api/markers_api.html
# linestyles = ['-', '--', '-.', ':', '-', '--', '-.'] # http://matplotlib.org/api/lines_api.html
fig = plt.figure(1, figsize=(40,15)) # width, height
# fig = plt.figure(1, figsize=(7.5,7.5)) # width, height
# fig = plt.figure(1) # width, height
y = format_y(y)
y = np.array(y)
if yerr is not None:
yerr = format_y(yerr)
yerr = np.array(yerr)
assert np.shape(y) == np.shape(yerr)
shape = y.shape
if len(shape) == 1:
ncols = shape[0]
nrows = 1
else:
nrows, ncols = shape
if x is None:
x = range(1,ncols+1)
if nrows == 1:
if yerr is None:
plt.plot(x, y)
else:
ax = plt.gca() # use this only if needed
ax.set_xscale('log')
# ax.set_yscale('log')
(_, caps, _) = plt.errorbar(x, y, yerr, capsize=capsize, elinewidth=elinewidth)
for cap in caps:
cap.set_markeredgewidth(3)
else:
if yerr is None:
for var_indx in range(nrows):
plt.plot(x, y[var_indx, :])
else:
ax = plt.gca() # use this only if needed
ax.set_xscale('log')
for var_indx in range(nrows):
(_, caps, _) = plt.errorbar(x, y[var_indx, :], yerr[var_indx, :], capsize=capsize, elinewidth=elinewidth)
for cap in caps:
cap.set_markeredgewidth(3)
# plt.ylim(ymin=0)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
if legend is not None:
plt.legend(legend, loc=legendloc, ncol=legendncol)
if title is not None:
plt.title(title)
if xlim is not None:
plt.xlim(xlim)
if ylim is not None:
plt.ylim(ylim)
plt.grid(True, which='both')
if not plt_only:
# fig.tight_layout()
filename = 'fig_' + desc
if save_pdf:
fig.savefig(os.path.join(output_path, filename + '.pdf'))
plt.savefig(os.path.join(output_path, filename + '.png'))
if gen_pkl:
save_object1(fig, os.path.join(output_path, 'pkl', filename + '.pkl'))
if show_plot:
plt.show()
plt.clf()
plt.close()
def plot_figures_subplot(output_path, desc, y, xlabels, ylabels, x=None, legends=None, legendlocs=None, legendncols=None, show_plot=False, gen_pkl=True, save_pdf=False, save_eps=False):
rcParams.update({'font.size': 20})
plt.ioff()
plt.rc('axes', prop_cycle=cycler('color',['black', 'red', 'blue', 'black', 'red', 'blue', 'black','red', 'blue', 'black', 'red', 'blue', 'black']) + cycler('marker', ['*', '+', 'x', 'o', '<', '>', 'v', '^', ',', "_", '.', '|', 'X']) + cycler('linestyle', ['-', '--', '-.', ':', '-', '--', '-.',':', '-', '--', '-.',':','-']))
markersize=3
linewidth=3
plt.rc('lines', linewidth=linewidth, markersize=markersize)
# plt.gca().set_prop_cycle(cycler('color',['red', 'green', 'blue', 'red', 'green', 'blue','red']))
# markers = ['*', '+', 'x', 'o', '<', '>', ','] # http://matplotlib.org/api/markers_api.html
# linestyles = ['-', '--', '-.', ':', '-', '--', '-.'] # http://matplotlib.org/api/lines_api.html
if isinstance(y, list) and len(y) != 0:
if isinstance(y[0], list):
lengths = [len(obj) for obj in y]
minlength = min(lengths)
y = [obj[:minlength] for obj in y]
y = np.array(y)
shape = y.shape
if len(shape) == 1:
ncols = shape[0]
nrows = 1
else:
nrows, ncols = shape
if x is None:
x = range(1,ncols+1)
fig,_ = plt.subplots(nrows, 1, figsize=(11.25,7.5)) # width, height
if legends is None:
legends = legendlocs = legendncols = [None]*nrows
for var_indx in range(nrows):
subplt_indx = (nrows*100) + (1*10) + (var_indx+1)
plt.subplot(subplt_indx)
plot_figures('','',y[var_indx,:], xlabels[var_indx],ylabels[var_indx],x,legends[var_indx],legendlocs[var_indx],legendncols[var_indx],plt_only=True)
# ax[var_indx].plot(x,y[var_indx,:])
fig.tight_layout()
filename = 'fig_' + desc
if save_pdf:
plt.savefig(output_path + filename + '.pdf')
if save_eps:
plt.savefig(output_path + filename + '.eps')
plt.savefig(output_path + filename + '.png')
if gen_pkl:
save_object1(fig, output_path + 'pkl/' + filename + '.pkl')
if show_plot:
plt.show()
plt.clf()
plt.close()
def plot_figures_old(output_path, desc, y, xlabel, ylabel, x=None, legend=None, legendloc=None, legendncol=None, show_plot=False, gen_pkl=True, save_pdf=False, save_eps=False):
# rcParams.update({'font.size': 20})
# plt.ioff()
# plt.rc('axes', prop_cycle=cycler('color',['black', 'red', 'blue', 'black', 'red', 'blue', 'black','red', 'blue', 'black', 'red', 'blue', 'black']) + cycler('marker', ['*', '+', 'x', 'o', '<', '>', 'v', '^', ',', "_", '.', '|', 'X']) + cycler('linestyle', ['-', '--', '-.', ':', '-', '--', '-.',':', '-', '--', '-.',':','-']))
# markersize=10
# linewidth=3
# plt.rc('lines', linewidth=linewidth, markersize=markersize)
#
# # plt.gca().set_prop_cycle(cycler('color',['red', 'green', 'blue', 'red', 'green', 'blue','red']))
# # markers = ['*', '+', 'x', 'o', '<', '>', ','] # http://matplotlib.org/api/markers_api.html
# # linestyles = ['-', '--', '-.', ':', '-', '--', '-.'] # http://matplotlib.org/api/lines_api.html
# fig = plt.figure(1, figsize=(15, 10)) # width, height
rcParams.update({'font.size': 20})
plt.ioff()
plt.rc('axes', prop_cycle=cycler('color',['black', 'red', 'blue', 'black', 'red', 'blue', 'black','red', 'blue', 'black', 'red', 'blue', 'black']) + cycler('marker', ['*', '+', 'x', 'o', '<', '>', 'v', '^', ',', "_", '.', '|', 'X']) + cycler('linestyle', ['-', '--', '-.', ':', '-', '--', '-.',':', '-', '--', '-.',':','-']))
markersize=11
linewidth=3
capsize = 6 # not recognized in plt.rc
elinewidth = 3 # same
markeredgewidth = 1
plt.rc('lines', linewidth=linewidth, markersize=markersize, markeredgewidth=markeredgewidth)
# plt.gca().set_prop_cycle(cycler('color',['red', 'green', 'blue', 'red', 'green', 'blue','red']))
# markers = ['*', '+', 'x', 'o', '<', '>', ','] # http://matplotlib.org/api/markers_api.html
# linestyles = ['-', '--', '-.', ':', '-', '--', '-.'] # http://matplotlib.org/api/lines_api.html
# fig = plt.figure(1, figsize=(11.25,7.5)) # width, height
fig = plt.figure(1, figsize=(7.5,7.5)) # width, height
if isinstance(y, list) and len(y) != 0:
if isinstance(y[0], list):
lengths = [len(obj) for obj in y]
minlength = min(lengths)
y = [obj[:minlength] for obj in y]
y = np.array(y)
shape = y.shape
if len(shape) == 1:
ncols = shape[0]
nrows = 1
else:
nrows, ncols = shape
if x is None:
x = range(1,ncols+1)
if nrows == 1:
plt.plot(x, y)
else:
for var_indx in range(nrows):
plt.plot(x, y[var_indx, :])
plt.xlabel(xlabel)
plt.ylabel(ylabel)
if legend is not None:
plt.legend(legend, loc=legendloc, ncol=legendncol)
plt.grid()
fig.tight_layout()
filename = 'fig_' + desc
if save_pdf:
plt.savefig(output_path / (filename + '.pdf'))
if save_eps:
plt.savefig(output_path / (filename + '.eps'))
plt.savefig(output_path / (filename + '.png'))
if gen_pkl:
save_object1(fig, output_path / 'pkl/' / (filename + '.pkl'))
if show_plot:
plt.show()
plt.clf()
plt.close()
def sort_pair_of_lists(list1, list2, reverse=False):
# sorting will be based on the values of list1 (not list2)
zipped_pair = zip(list1, list2)
sorted_zip = sorted(zipped_pair, reverse=reverse)
list1_sorted = [x for x, _ in sorted_zip]
list2_sorted = [x for _, x in sorted_zip]
return [list1_sorted, list2_sorted]
def print_out(s, f=None, new_line=True):
"""Similar to print but with support to flush and output to a file."""
s = str(s)
if f:
f.write(s)
if new_line:
f.write("\n")
# stdout
print(s, end="", file=sys.stdout)
if new_line:
sys.stdout.write("\n")
sys.stdout.flush()
def save_object1(obj1, filename):
os.makedirs(os.path.dirname(filename), exist_ok=True)
with open(filename, 'wb') as output:
pickle.dump(obj1, output, pickle.HIGHEST_PROTOCOL)
def save_object2(obj1, obj2, filename):
os.makedirs(os.path.dirname(filename), exist_ok=True)
with open(filename, 'wb') as output:
pickle.dump(obj1, output, pickle.HIGHEST_PROTOCOL)
pickle.dump(obj2, output, pickle.HIGHEST_PROTOCOL)
def read_object1(filename):
with open(filename, 'rb') as input:
return pickle.load(input)
def read_object2(filename):
with open(filename, 'rb') as input:
first = pickle.load(input)
second = pickle.load(input)
return first, second
# Function to get index of ceiling of x in arr[low..high]*/
def ceilSearch(arr, low, high, x):
# If x is smaller than or equal to the first element,
# then return the first element */
if x <= arr[low]:
return low
# If x is greater than the last element, then return -1 */
if x > arr[high]:
return -1
# get the index of middle element of arr[low..high]*/
mid = int ((low + high) / 2) # low + (high - low)/2 */
# If x is same as middle element, then return mid */
if arr[mid] == x:
return mid
# If x is greater than arr[mid], then either arr[mid + 1]
# is ceiling of x or ceiling lies in arr[mid+1...high] */
# elif arr[mid] < x:
# if mid + 1 <= high and x <= arr[mid + 1]:
# return mid + 1
# else:
elif arr[mid] < x:
return ceilSearch(arr, mid + 1, high, x)
# If x is smaller than arr[mid], then either arr[mid]
# is ceiling of x or ceiling lies in arr[mid-1...high] */
else:
# if mid - 1 >= low and x > arr[mid - 1]:
# return mid
# else:
return ceilSearch(arr, low, mid, x)
# Binary search function to get index of floor of x in arr[low..high]*/
def floorSearch(arr, low, high, x):
# If x is smaller than or equal to the first element,
# then return the first element */
if x >= arr[high]:
return high
# If x is greater than the last element, then return -1 */
if x < arr[low]:
return -1
# get the index of middle element of arr[low..high]*/
mid = int ((low + high) / 2) # low + (high - low)/2 */
# If x is same as middle element, then return mid */
if arr[mid] == x:
return mid
# If x is greater than arr[mid], then floor of x lies in arr[mid...high] */
# elif arr[mid] < x:
# if mid + 1 <= high and x <= arr[mid + 1]:
# return mid + 1
# else:
elif arr[mid] < x:
if x < arr[mid+1]: # this is done to avoid infinite recursion; consider [2,8] and floor(3)
return mid
return floorSearch(arr, mid, high, x)
# If x is smaller than arr[mid], then floor of x lies in arr[low...mid-1] */
else:
# if mid - 1 >= low and x > arr[mid - 1]:
# return mid
# else:
return floorSearch(arr, low, mid-1, x)
| en | 0.305013 | # matplotlib.use('MacOSX') # matplotlib.use('Qt') # this ensures that type-3 fonts are not used when generating figures # not recognized in plt.rc # same # plt.gca().set_prop_cycle(cycler('color',['red', 'green', 'blue', 'red', 'green', 'blue','red'])) # markers = ['*', '+', 'x', 'o', '<', '>', ','] # http://matplotlib.org/api/markers_api.html # linestyles = ['-', '--', '-.', ':', '-', '--', '-.'] # http://matplotlib.org/api/lines_api.html # width, height # fig = plt.figure(1, figsize=(7.5,7.5)) # width, height # fig = plt.figure(1) # width, height # use this only if needed # ax.set_yscale('log') # use this only if needed # plt.ylim(ymin=0) # fig.tight_layout() # plt.gca().set_prop_cycle(cycler('color',['red', 'green', 'blue', 'red', 'green', 'blue','red'])) # markers = ['*', '+', 'x', 'o', '<', '>', ','] # http://matplotlib.org/api/markers_api.html # linestyles = ['-', '--', '-.', ':', '-', '--', '-.'] # http://matplotlib.org/api/lines_api.html # width, height # ax[var_indx].plot(x,y[var_indx,:]) # rcParams.update({'font.size': 20}) # plt.ioff() # plt.rc('axes', prop_cycle=cycler('color',['black', 'red', 'blue', 'black', 'red', 'blue', 'black','red', 'blue', 'black', 'red', 'blue', 'black']) + cycler('marker', ['*', '+', 'x', 'o', '<', '>', 'v', '^', ',', "_", '.', '|', 'X']) + cycler('linestyle', ['-', '--', '-.', ':', '-', '--', '-.',':', '-', '--', '-.',':','-'])) # markersize=10 # linewidth=3 # plt.rc('lines', linewidth=linewidth, markersize=markersize) # # # plt.gca().set_prop_cycle(cycler('color',['red', 'green', 'blue', 'red', 'green', 'blue','red'])) # # markers = ['*', '+', 'x', 'o', '<', '>', ','] # http://matplotlib.org/api/markers_api.html # # linestyles = ['-', '--', '-.', ':', '-', '--', '-.'] # http://matplotlib.org/api/lines_api.html # fig = plt.figure(1, figsize=(15, 10)) # width, height # not recognized in plt.rc # same # plt.gca().set_prop_cycle(cycler('color',['red', 'green', 'blue', 'red', 'green', 'blue','red'])) # markers = ['*', '+', 'x', 'o', '<', '>', ','] # http://matplotlib.org/api/markers_api.html # linestyles = ['-', '--', '-.', ':', '-', '--', '-.'] # http://matplotlib.org/api/lines_api.html # fig = plt.figure(1, figsize=(11.25,7.5)) # width, height # width, height # sorting will be based on the values of list1 (not list2) Similar to print but with support to flush and output to a file. # stdout # Function to get index of ceiling of x in arr[low..high]*/ # If x is smaller than or equal to the first element, # then return the first element */ # If x is greater than the last element, then return -1 */ # get the index of middle element of arr[low..high]*/ # low + (high - low)/2 */ # If x is same as middle element, then return mid */ # If x is greater than arr[mid], then either arr[mid + 1] # is ceiling of x or ceiling lies in arr[mid+1...high] */ # elif arr[mid] < x: # if mid + 1 <= high and x <= arr[mid + 1]: # return mid + 1 # else: # If x is smaller than arr[mid], then either arr[mid] # is ceiling of x or ceiling lies in arr[mid-1...high] */ # if mid - 1 >= low and x > arr[mid - 1]: # return mid # else: # Binary search function to get index of floor of x in arr[low..high]*/ # If x is smaller than or equal to the first element, # then return the first element */ # If x is greater than the last element, then return -1 */ # get the index of middle element of arr[low..high]*/ # low + (high - low)/2 */ # If x is same as middle element, then return mid */ # If x is greater than arr[mid], then floor of x lies in arr[mid...high] */ # elif arr[mid] < x: # if mid + 1 <= high and x <= arr[mid + 1]: # return mid + 1 # else: # this is done to avoid infinite recursion; consider [2,8] and floor(3) # If x is smaller than arr[mid], then floor of x lies in arr[low...mid-1] */ # if mid - 1 >= low and x > arr[mid - 1]: # return mid # else: | 2.454702 | 2 |
aulas/expressao_cond.py | thiagonantunes/Estudos | 1 | 6619229 | # EXPRESSÃO CONDICIONAL COM OPERADOR OR
nome = input('Digite seu nome: ')
if nome:
print(f'Nome: {nome}')
else:
print('Você não digitou nada')
# código acima pode ser escrito da seguinte forma:
print(nome or 'Você não digitou nada')
a = 0
b = None
c = False
d = []
e = {}
f = 22
g = 'Thiago'
variavel = a or b or c or d or e or f or g
print(variavel) # irá retornar o 1º valor verdadeiro | # EXPRESSÃO CONDICIONAL COM OPERADOR OR
nome = input('Digite seu nome: ')
if nome:
print(f'Nome: {nome}')
else:
print('Você não digitou nada')
# código acima pode ser escrito da seguinte forma:
print(nome or 'Você não digitou nada')
a = 0
b = None
c = False
d = []
e = {}
f = 22
g = 'Thiago'
variavel = a or b or c or d or e or f or g
print(variavel) # irá retornar o 1º valor verdadeiro | pt | 0.916644 | # EXPRESSÃO CONDICIONAL COM OPERADOR OR # código acima pode ser escrito da seguinte forma: # irá retornar o 1º valor verdadeiro | 3.877202 | 4 |
interview_challenges/barclays_codility_test/question_2.py | noelevans/playground | 1 | 6619230 | <gh_stars>1-10
# import numpy as np
def mean(ol):
return float(sum(ol)) / len(ol)
def abs(v):
return v > 0 and v or v * -1
def solution(A):
if not A:
return -1
m = mean(A)
vs = sorted((abs(v - m), i) for i, v in enumerate(A))
return vs[-1][1]
def main():
print solution([])
print solution([9, 4, -3, -10])
print solution([-1, -20, -1, -1, -1])
print solution([1, 1])
print solution([1])
if __name__ == '__main__':
main()
| # import numpy as np
def mean(ol):
return float(sum(ol)) / len(ol)
def abs(v):
return v > 0 and v or v * -1
def solution(A):
if not A:
return -1
m = mean(A)
vs = sorted((abs(v - m), i) for i, v in enumerate(A))
return vs[-1][1]
def main():
print solution([])
print solution([9, 4, -3, -10])
print solution([-1, -20, -1, -1, -1])
print solution([1, 1])
print solution([1])
if __name__ == '__main__':
main() | en | 0.786256 | # import numpy as np | 3.510431 | 4 |
deep_rl/agent/BaseAgent.py | neale/Procgen_bench | 2 | 6619231 | <reponame>neale/Procgen_bench<gh_stars>1-10
#######################################################################
# Copyright (C) 2017 <NAME>(<EMAIL>) #
# Permission given to modify the code as long as you keep this #
# declaration at the top #
#######################################################################
import os
import torch
import numpy as np
from ..utils import *
import torch.multiprocessing as mp
from collections import deque
from skimage.io import imsave
class BaseAgent:
def __init__(self, config):
self.config = config
self.logger = get_logger(tag=config.tag, log_level=config.log_level)
self.task_ind = 0
def close(self):
close_obj(self.task)
def save(self, filename):
os.makedirs(filename, exist_ok=True)
torch.save(self.network.state_dict(), '%s/agent.model' % (filename))
with open('%s/agent.stats' % (filename), 'wb') as f:
pickle.dump(self.config.state_normalizer.state_dict(), f)
def load(self, filename):
state_dict = torch.load('%s/agent.model' % filename, map_location=lambda storage, loc: storage)
self.network.load_state_dict(state_dict)
with open('%s/agent.stats' % (filename), 'rb') as f:
self.config.state_normalizer.load_state_dict(pickle.load(f))
def save_data(self, log_data, ep, save_tag, eval=True):
states, features, extra = log_data
states = np.stack(states)
features = np.stack(features)
if len(extra) > 0:
extra = np.stack(extra)
else:
extra = None
print ('states', states.shape)
print ('features', features.shape)
print ('tag', save_tag)
if eval:
mode = 'eval'
else:
mode = 'train'
path = save_tag+'_{}'.format(self.total_steps)
os.makedirs(path, exist_ok=True)
np.save(path+'/{}_ep_{}_states.npy'.format(mode, ep), states)
np.save(path+'/{}_ep_{}_features.npy'.format(mode, ep), features)
def eval_step(self, state):
raise NotImplementedError
def eval_with_record(self, state):
raise NotImplementedError
def eval_and_record_episode(self, training=False):
if training is False:
env = self.config.eval_env
else:
env = self.task
state = env.reset()
states_save = []
features_save = []
extra_save = []
while True:
action, features, extra = self.eval_with_record(state)
states_save.append(state[0])
features_save.append(features)
extra_save.append(extra)
state, reward, done, info = env.step(action)
ret = info[0]['episodic_return']
if ret is not None:
break
return ret, (states_save, features_save, extra_save)
def eval_episode(self):
env = self.config.eval_env
state = env.reset()
while True:
action = self.eval_step(state)
state, reward, done, info = env.step(action)
ret = info[0]['episodic_return']
if ret is not None:
break
return ret
def eval_episodes(self, save_tag):
episodic_returns = []
for ep in range(self.config.eval_episodes):
if self.config.record_eval_npy:
total_rewards, log_data = self.eval_and_record_episode()
self.save_data(log_data, ep, save_tag)
if self.config.record_train:
self.switch_task()
total_rewards, log_data = self.eval_and_record_episode(training=True)
self.save_data(log_data, ep, save_tag, eval=False)
else:
total_rewards = self.eval_episode()
episodic_returns.append(np.sum(total_rewards))
self.logger.info('steps %d, episodic_return_test %.2f(%.2f)' % (
self.total_steps, np.mean(episodic_returns), np.std(episodic_returns) / np.sqrt(len(episodic_returns))
))
self.logger.add_scalar('episodic_return_test', np.mean(episodic_returns), self.total_steps)
return {
'episodic_return_test': np.mean(episodic_returns),
}
def record_online_return(self, info, offset=0):
if isinstance(info, dict):
ret = info['episodic_return']
if ret is not None:
self.logger.add_scalar('episodic_return_train', ret, self.total_steps + offset)
self.logger.info('steps %d, episodic_return_train %s' % (self.total_steps + offset, ret))
elif isinstance(info, tuple):
for i, info_ in enumerate(info):
self.record_online_return(info_, i)
elif isinstance(info[0], dict):
pass
else:
raise NotImplementedError
def switch_task(self):
config = self.config
if not config.tasks:
return
segs = np.linspace(0, config.max_steps, len(config.tasks) + 1)
if self.total_steps > segs[self.task_ind + 1]:
self.task_ind += 1
self.task = config.tasks[self.task_ind]
self.states = self.task.reset()
self.states = config.state_normalizer(self.states)
def record_episode(self, dir, env):
mkdir(dir)
steps = 0
state = env.reset()
while True:
self.record_obs(env, dir, steps)
action = self.record_step(state)
state, reward, done, info = env.step(action)
ret = info[0]['episodic_return']
steps += 1
if ret is not None:
break
def record_step(self, state):
raise NotImplementedError
# For DMControl
def record_obs(self, env, dir, steps):
env = env.env.envs[0]
obs = env.render(mode='rgb_array')
imsave('%s/%04d.png' % (dir, steps), obs)
class BaseActor(mp.Process):
STEP = 0
RESET = 1
EXIT = 2
SPECS = 3
NETWORK = 4
CACHE = 5
def __init__(self, config):
mp.Process.__init__(self)
self.config = config
self.__pipe, self.__worker_pipe = mp.Pipe()
self._state = None
self._task = None
self._network = None
self._total_steps = 0
self.__cache_len = 2
if not config.async_actor:
self.start = lambda: None
self.step = self._sample
self.close = lambda: None
self._set_up()
self._task = config.task_fn()
def _sample(self):
transitions = []
for _ in range(self.config.sgd_update_frequency):
transition = self._transition()
if transition is not None:
transitions.append(transition)
return transitions
def run(self):
self._set_up()
config = self.config
self._task = config.task_fn()
cache = deque([], maxlen=2)
while True:
op, data = self.__worker_pipe.recv()
if op == self.STEP:
if not len(cache):
cache.append(self._sample())
cache.append(self._sample())
self.__worker_pipe.send(cache.popleft())
cache.append(self._sample())
elif op == self.EXIT:
self.__worker_pipe.close()
return
elif op == self.NETWORK:
self._network = data
else:
raise NotImplementedError
def _transition(self):
raise NotImplementedError
def _set_up(self):
pass
def step(self):
self.__pipe.send([self.STEP, None])
return self.__pipe.recv()
def close(self):
self.__pipe.send([self.EXIT, None])
self.__pipe.close()
def set_network(self, net):
if not self.config.async_actor:
self._network = net
else:
self.__pipe.send([self.NETWORK, net])
| #######################################################################
# Copyright (C) 2017 <NAME>(<EMAIL>) #
# Permission given to modify the code as long as you keep this #
# declaration at the top #
#######################################################################
import os
import torch
import numpy as np
from ..utils import *
import torch.multiprocessing as mp
from collections import deque
from skimage.io import imsave
class BaseAgent:
def __init__(self, config):
self.config = config
self.logger = get_logger(tag=config.tag, log_level=config.log_level)
self.task_ind = 0
def close(self):
close_obj(self.task)
def save(self, filename):
os.makedirs(filename, exist_ok=True)
torch.save(self.network.state_dict(), '%s/agent.model' % (filename))
with open('%s/agent.stats' % (filename), 'wb') as f:
pickle.dump(self.config.state_normalizer.state_dict(), f)
def load(self, filename):
state_dict = torch.load('%s/agent.model' % filename, map_location=lambda storage, loc: storage)
self.network.load_state_dict(state_dict)
with open('%s/agent.stats' % (filename), 'rb') as f:
self.config.state_normalizer.load_state_dict(pickle.load(f))
def save_data(self, log_data, ep, save_tag, eval=True):
states, features, extra = log_data
states = np.stack(states)
features = np.stack(features)
if len(extra) > 0:
extra = np.stack(extra)
else:
extra = None
print ('states', states.shape)
print ('features', features.shape)
print ('tag', save_tag)
if eval:
mode = 'eval'
else:
mode = 'train'
path = save_tag+'_{}'.format(self.total_steps)
os.makedirs(path, exist_ok=True)
np.save(path+'/{}_ep_{}_states.npy'.format(mode, ep), states)
np.save(path+'/{}_ep_{}_features.npy'.format(mode, ep), features)
def eval_step(self, state):
raise NotImplementedError
def eval_with_record(self, state):
raise NotImplementedError
def eval_and_record_episode(self, training=False):
if training is False:
env = self.config.eval_env
else:
env = self.task
state = env.reset()
states_save = []
features_save = []
extra_save = []
while True:
action, features, extra = self.eval_with_record(state)
states_save.append(state[0])
features_save.append(features)
extra_save.append(extra)
state, reward, done, info = env.step(action)
ret = info[0]['episodic_return']
if ret is not None:
break
return ret, (states_save, features_save, extra_save)
def eval_episode(self):
env = self.config.eval_env
state = env.reset()
while True:
action = self.eval_step(state)
state, reward, done, info = env.step(action)
ret = info[0]['episodic_return']
if ret is not None:
break
return ret
def eval_episodes(self, save_tag):
episodic_returns = []
for ep in range(self.config.eval_episodes):
if self.config.record_eval_npy:
total_rewards, log_data = self.eval_and_record_episode()
self.save_data(log_data, ep, save_tag)
if self.config.record_train:
self.switch_task()
total_rewards, log_data = self.eval_and_record_episode(training=True)
self.save_data(log_data, ep, save_tag, eval=False)
else:
total_rewards = self.eval_episode()
episodic_returns.append(np.sum(total_rewards))
self.logger.info('steps %d, episodic_return_test %.2f(%.2f)' % (
self.total_steps, np.mean(episodic_returns), np.std(episodic_returns) / np.sqrt(len(episodic_returns))
))
self.logger.add_scalar('episodic_return_test', np.mean(episodic_returns), self.total_steps)
return {
'episodic_return_test': np.mean(episodic_returns),
}
def record_online_return(self, info, offset=0):
if isinstance(info, dict):
ret = info['episodic_return']
if ret is not None:
self.logger.add_scalar('episodic_return_train', ret, self.total_steps + offset)
self.logger.info('steps %d, episodic_return_train %s' % (self.total_steps + offset, ret))
elif isinstance(info, tuple):
for i, info_ in enumerate(info):
self.record_online_return(info_, i)
elif isinstance(info[0], dict):
pass
else:
raise NotImplementedError
def switch_task(self):
config = self.config
if not config.tasks:
return
segs = np.linspace(0, config.max_steps, len(config.tasks) + 1)
if self.total_steps > segs[self.task_ind + 1]:
self.task_ind += 1
self.task = config.tasks[self.task_ind]
self.states = self.task.reset()
self.states = config.state_normalizer(self.states)
def record_episode(self, dir, env):
mkdir(dir)
steps = 0
state = env.reset()
while True:
self.record_obs(env, dir, steps)
action = self.record_step(state)
state, reward, done, info = env.step(action)
ret = info[0]['episodic_return']
steps += 1
if ret is not None:
break
def record_step(self, state):
raise NotImplementedError
# For DMControl
def record_obs(self, env, dir, steps):
env = env.env.envs[0]
obs = env.render(mode='rgb_array')
imsave('%s/%04d.png' % (dir, steps), obs)
class BaseActor(mp.Process):
STEP = 0
RESET = 1
EXIT = 2
SPECS = 3
NETWORK = 4
CACHE = 5
def __init__(self, config):
mp.Process.__init__(self)
self.config = config
self.__pipe, self.__worker_pipe = mp.Pipe()
self._state = None
self._task = None
self._network = None
self._total_steps = 0
self.__cache_len = 2
if not config.async_actor:
self.start = lambda: None
self.step = self._sample
self.close = lambda: None
self._set_up()
self._task = config.task_fn()
def _sample(self):
transitions = []
for _ in range(self.config.sgd_update_frequency):
transition = self._transition()
if transition is not None:
transitions.append(transition)
return transitions
def run(self):
self._set_up()
config = self.config
self._task = config.task_fn()
cache = deque([], maxlen=2)
while True:
op, data = self.__worker_pipe.recv()
if op == self.STEP:
if not len(cache):
cache.append(self._sample())
cache.append(self._sample())
self.__worker_pipe.send(cache.popleft())
cache.append(self._sample())
elif op == self.EXIT:
self.__worker_pipe.close()
return
elif op == self.NETWORK:
self._network = data
else:
raise NotImplementedError
def _transition(self):
raise NotImplementedError
def _set_up(self):
pass
def step(self):
self.__pipe.send([self.STEP, None])
return self.__pipe.recv()
def close(self):
self.__pipe.send([self.EXIT, None])
self.__pipe.close()
def set_network(self, net):
if not self.config.async_actor:
self._network = net
else:
self.__pipe.send([self.NETWORK, net]) | de | 0.509557 | ####################################################################### # Copyright (C) 2017 <NAME>(<EMAIL>) # # Permission given to modify the code as long as you keep this # # declaration at the top # ####################################################################### # For DMControl | 2.066845 | 2 |
data/fcedataloader.py | microsoft/aaai21-copy-that | 7 | 6619232 | <filename>data/fcedataloader.py
import logging
from typing import Iterator, List, Tuple, NamedTuple
from dpu_utils.utils import RichPath
from data.edits import Edit
def load_data_from(file: RichPath) -> Iterator[Edit]:
num_excluded_samples = 0
with open(file.to_local_path().path) as f:
for i, row in enumerate(f):
edit_start_idx, edit_end_idx, source_words, target_words, error_type, sentence = row.split('\t')
edit_start_idx, edit_end_idx = int(edit_start_idx), int(edit_end_idx)
sentence = sentence.lower().split()
source_words = source_words.lower().split()
target_words = target_words.lower().split()
assert sentence[edit_start_idx:edit_end_idx] == source_words
output_sequence = sentence[:edit_start_idx] + target_words + sentence[edit_end_idx:]
if sentence == output_sequence:
num_excluded_samples += 1
continue
if len(sentence) < 2 or len(output_sequence) < 2:
num_excluded_samples += 1
continue
yield Edit(
input_sequence=sentence,
output_sequence=output_sequence,
edit_type=error_type,
provenance=f'row{i}'
)
logging.warning('Removed %s samples because before/after sentence was identical or too small.', num_excluded_samples) | <filename>data/fcedataloader.py
import logging
from typing import Iterator, List, Tuple, NamedTuple
from dpu_utils.utils import RichPath
from data.edits import Edit
def load_data_from(file: RichPath) -> Iterator[Edit]:
num_excluded_samples = 0
with open(file.to_local_path().path) as f:
for i, row in enumerate(f):
edit_start_idx, edit_end_idx, source_words, target_words, error_type, sentence = row.split('\t')
edit_start_idx, edit_end_idx = int(edit_start_idx), int(edit_end_idx)
sentence = sentence.lower().split()
source_words = source_words.lower().split()
target_words = target_words.lower().split()
assert sentence[edit_start_idx:edit_end_idx] == source_words
output_sequence = sentence[:edit_start_idx] + target_words + sentence[edit_end_idx:]
if sentence == output_sequence:
num_excluded_samples += 1
continue
if len(sentence) < 2 or len(output_sequence) < 2:
num_excluded_samples += 1
continue
yield Edit(
input_sequence=sentence,
output_sequence=output_sequence,
edit_type=error_type,
provenance=f'row{i}'
)
logging.warning('Removed %s samples because before/after sentence was identical or too small.', num_excluded_samples) | none | 1 | 2.473675 | 2 | |
tests/test_reports.py | orchardbirds/skorecard-1 | 0 | 6619233 | from skorecard.bucketers import DecisionTreeBucketer
from skorecard.reporting import build_bucket_table
import numpy as np
import pandas as pd
def test_report_decision_tree(df):
"""Test the reporting module."""
X = df[["LIMIT_BAL", "BILL_AMT1"]]
y = df["default"]
tbt = DecisionTreeBucketer(max_n_bins=4, min_bin_size=0.1, variables=["LIMIT_BAL", "BILL_AMT1"])
tbt.fit(X, y)
tbt.transform(X)
df_out = build_bucket_table(X, y, column="LIMIT_BAL", bucketer=tbt)
assert df_out.shape == (5, 9)
assert df_out["label"].to_dict() == tbt.features_bucket_mapping_["LIMIT_BAL"].labels
expected = pd.DataFrame(
{"bucket_id": {0: 0, 1: 1, 2: 2, 3: 3, 4: 4}, "Count": {0: 849, 1: 676, 2: 1551, 3: 2924, 4: 0.0}}
)
pd.testing.assert_frame_equal(df_out[["bucket_id", "Count"]], expected)
np.testing.assert_array_equal(
df_out.columns.ravel(),
np.array(
[
"bucket_id",
"label",
"Count",
"Count (%)",
"Non-event",
"Event",
"Event Rate",
# "% Event",
# "% Non Event",
"WoE",
"IV",
]
),
)
| from skorecard.bucketers import DecisionTreeBucketer
from skorecard.reporting import build_bucket_table
import numpy as np
import pandas as pd
def test_report_decision_tree(df):
"""Test the reporting module."""
X = df[["LIMIT_BAL", "BILL_AMT1"]]
y = df["default"]
tbt = DecisionTreeBucketer(max_n_bins=4, min_bin_size=0.1, variables=["LIMIT_BAL", "BILL_AMT1"])
tbt.fit(X, y)
tbt.transform(X)
df_out = build_bucket_table(X, y, column="LIMIT_BAL", bucketer=tbt)
assert df_out.shape == (5, 9)
assert df_out["label"].to_dict() == tbt.features_bucket_mapping_["LIMIT_BAL"].labels
expected = pd.DataFrame(
{"bucket_id": {0: 0, 1: 1, 2: 2, 3: 3, 4: 4}, "Count": {0: 849, 1: 676, 2: 1551, 3: 2924, 4: 0.0}}
)
pd.testing.assert_frame_equal(df_out[["bucket_id", "Count"]], expected)
np.testing.assert_array_equal(
df_out.columns.ravel(),
np.array(
[
"bucket_id",
"label",
"Count",
"Count (%)",
"Non-event",
"Event",
"Event Rate",
# "% Event",
# "% Non Event",
"WoE",
"IV",
]
),
)
| en | 0.457978 | Test the reporting module. # "% Event", # "% Non Event", | 2.526531 | 3 |
annomathtex/annomathtex/views/helper_classes/formula_concept_handler.py | philsMINT/AnnotaTeX | 3 | 6619234 | <filename>annomathtex/annomathtex/views/helper_classes/formula_concept_handler.py
import logging
logging.basicConfig(level=logging.WARNING)
formula_concept_handler_logger = logging.getLogger(__name__)
class FormulaConceptHandler:
"""
Prepares the formulae for adding to the formula concepts file.
"""
def __init__(self, annotations):
self.annotations = annotations
def extract_formulae(self):
formulae = {}
if 'global' in self.annotations:
g = self.annotations['global']
for key in g:
instance = g[key]
#formula_concept_handler_logger.info('INSTANCE: {}'.format(instance))
try:
if instance['type'] == 'Formula':
formulae[key.replace('__EQUALS__', '=')] = {
'name': instance['name'].replace('__EQUALS__', '='),
'qid': instance['qid']
#'sourcesWithNums': instance['sourcesWithNums']
}
except:
#formula_concept_handler_logger.info(instance)
continue
if 'local' in self.annotations:
l = self.annotations['local']
for key in l:
for unique_id in l[key]:
instance = l[key][unique_id]
if instance['type'] == 'Formula':
formulae[key.replace('__EQUALS__', '=')] = {
'name': instance['name'].replace('__EQUALS__', '='),
'qid': instance['qid']
#'sourcesWithNums': instance['sourcesWithNums']
}
return formulae
#todo: simplify
def add_identifiers(self):
formulae = self.extract_formulae()
#formula_concept_handler_logger.info(formulae)
if 'global' in self.annotations:
g = self.annotations['global']
for key in g:
instance = g[key]
#formula_concept_handler_logger.info(instance)
m = instance['mathEnv']
is_identifier = True if instance['type'] == 'Identifier' else False
if m in formulae and is_identifier:
if 'identifiers' in formulae[m]:
#formulae[m]['identifiers'][key] = instance['name']
formulae[m]['identifiers'][key] = {'name': instance['name'], 'qid': instance['qid']}
else:
#formulae[m]['identifiers'] = {key: instance['name']}
formulae[m]['identifiers'] = {key: {'name': instance['name'], 'qid': instance['qid']}}
if 'local' in self.annotations:
l = self.annotations['local']
for key in l:
for unique_id in l[key]:
instance = l[key][unique_id]
m = instance['mathEnv']
is_identifier = True if instance['type'] == 'Identifier' else False
if m in formulae and is_identifier:
if 'identifiers' in formulae[m]:
#formulae[m]['identifiers'][key] = instance['name']
formulae[m]['identifiers'][key] = {'name': instance['name'], 'qid': instance['qid']}
else:
#formulae[m]['identifiers'] = {key: instance['name']}
formulae[m]['identifiers'] = {key: {'name': instance['name'], 'qid': instance['qid']} }
return formulae
def get_formulae(self):
formulae = self.add_identifiers()
reversed_formulae = {}
for formula_string in formulae:
#formula_concept_handler_logger.info(formulae[formula_string])
name = formulae[formula_string]['name']
identifiers = []
if 'identifiers' in formulae[formula_string]:
identifiers = formulae[formula_string]['identifiers']
qid = formulae[formula_string]['qid']
reversed_formulae[name] = {'TeXStrings': [formula_string],
'Identifiers': identifiers,
'qid': qid}
return reversed_formulae
| <filename>annomathtex/annomathtex/views/helper_classes/formula_concept_handler.py
import logging
logging.basicConfig(level=logging.WARNING)
formula_concept_handler_logger = logging.getLogger(__name__)
class FormulaConceptHandler:
"""
Prepares the formulae for adding to the formula concepts file.
"""
def __init__(self, annotations):
self.annotations = annotations
def extract_formulae(self):
formulae = {}
if 'global' in self.annotations:
g = self.annotations['global']
for key in g:
instance = g[key]
#formula_concept_handler_logger.info('INSTANCE: {}'.format(instance))
try:
if instance['type'] == 'Formula':
formulae[key.replace('__EQUALS__', '=')] = {
'name': instance['name'].replace('__EQUALS__', '='),
'qid': instance['qid']
#'sourcesWithNums': instance['sourcesWithNums']
}
except:
#formula_concept_handler_logger.info(instance)
continue
if 'local' in self.annotations:
l = self.annotations['local']
for key in l:
for unique_id in l[key]:
instance = l[key][unique_id]
if instance['type'] == 'Formula':
formulae[key.replace('__EQUALS__', '=')] = {
'name': instance['name'].replace('__EQUALS__', '='),
'qid': instance['qid']
#'sourcesWithNums': instance['sourcesWithNums']
}
return formulae
#todo: simplify
def add_identifiers(self):
formulae = self.extract_formulae()
#formula_concept_handler_logger.info(formulae)
if 'global' in self.annotations:
g = self.annotations['global']
for key in g:
instance = g[key]
#formula_concept_handler_logger.info(instance)
m = instance['mathEnv']
is_identifier = True if instance['type'] == 'Identifier' else False
if m in formulae and is_identifier:
if 'identifiers' in formulae[m]:
#formulae[m]['identifiers'][key] = instance['name']
formulae[m]['identifiers'][key] = {'name': instance['name'], 'qid': instance['qid']}
else:
#formulae[m]['identifiers'] = {key: instance['name']}
formulae[m]['identifiers'] = {key: {'name': instance['name'], 'qid': instance['qid']}}
if 'local' in self.annotations:
l = self.annotations['local']
for key in l:
for unique_id in l[key]:
instance = l[key][unique_id]
m = instance['mathEnv']
is_identifier = True if instance['type'] == 'Identifier' else False
if m in formulae and is_identifier:
if 'identifiers' in formulae[m]:
#formulae[m]['identifiers'][key] = instance['name']
formulae[m]['identifiers'][key] = {'name': instance['name'], 'qid': instance['qid']}
else:
#formulae[m]['identifiers'] = {key: instance['name']}
formulae[m]['identifiers'] = {key: {'name': instance['name'], 'qid': instance['qid']} }
return formulae
def get_formulae(self):
formulae = self.add_identifiers()
reversed_formulae = {}
for formula_string in formulae:
#formula_concept_handler_logger.info(formulae[formula_string])
name = formulae[formula_string]['name']
identifiers = []
if 'identifiers' in formulae[formula_string]:
identifiers = formulae[formula_string]['identifiers']
qid = formulae[formula_string]['qid']
reversed_formulae[name] = {'TeXStrings': [formula_string],
'Identifiers': identifiers,
'qid': qid}
return reversed_formulae
| en | 0.277366 | Prepares the formulae for adding to the formula concepts file. #formula_concept_handler_logger.info('INSTANCE: {}'.format(instance)) #'sourcesWithNums': instance['sourcesWithNums'] #formula_concept_handler_logger.info(instance) #'sourcesWithNums': instance['sourcesWithNums'] #todo: simplify #formula_concept_handler_logger.info(formulae) #formula_concept_handler_logger.info(instance) #formulae[m]['identifiers'][key] = instance['name'] #formulae[m]['identifiers'] = {key: instance['name']} #formulae[m]['identifiers'][key] = instance['name'] #formulae[m]['identifiers'] = {key: instance['name']} #formula_concept_handler_logger.info(formulae[formula_string]) | 2.037249 | 2 |
orchestrator/core/orc_server/command/views/stats.py | patconnole/openc2-oif-orchestrator | 2 | 6619235 | <filename>orchestrator/core/orc_server/command/views/stats.py
from ..models import SentHistory, ResponseHistory
def app_stats():
return dict(
sent=SentHistory.objects.count(),
responses=ResponseHistory.objects.count()
)
| <filename>orchestrator/core/orc_server/command/views/stats.py
from ..models import SentHistory, ResponseHistory
def app_stats():
return dict(
sent=SentHistory.objects.count(),
responses=ResponseHistory.objects.count()
)
| none | 1 | 1.631331 | 2 | |
main.py | TrymDev/saberhook | 1 | 6619236 | <reponame>TrymDev/saberhook
import requests
from time import sleep
recentsPlays_webhook = "https://discord.com/api/webhooks/"
top_webhook = "https://discord.com/api/webhooks/"
scoresaber_id = "76561198272483934"
response = requests.get(f"https://scoresaber.com/api/player/{scoresaber_id}/scores?limit=1&sort=recent&withMetadata=true").json()
last_recent = (response["playerScores"][0]["score"]["timeSet"])
response2 = requests.get(f"https://scoresaber.com/api/player/{scoresaber_id}/basic").json()
last_ppcount = (response2["pp"])
print(f"PP - {last_ppcount}PP")
print(f"Last Play - {last_recent}")
def post_topPlays():
plays = requests.get(f"https://scoresaber.com/api/player/{scoresaber_id}/scores?limit=8&sort=top&withMetadata=true").json()
weight_1 = round(((plays["playerScores"][0]["score"]["pp"]) * (plays["playerScores"][0]["score"]["weight"])), 3)
perc_1 = round((plays["playerScores"][0]["score"]["baseScore"]) / (plays["playerScores"][0]["leaderboard"]["maxScore"])*100, 2)
weight_2 = round(((plays["playerScores"][1]["score"]["pp"]) * (plays["playerScores"][1]["score"]["weight"])), 3)
perc_2 = round((plays["playerScores"][1]["score"]["baseScore"]) / (plays["playerScores"][1]["leaderboard"]["maxScore"])*100, 2)
weight_3 = round(((plays["playerScores"][2]["score"]["pp"]) * (plays["playerScores"][2]["score"]["weight"])), 3)
perc_3 = round((plays["playerScores"][2]["score"]["baseScore"]) / (plays["playerScores"][2]["leaderboard"]["maxScore"])*100, 2)
weight_4 = round(((plays["playerScores"][3]["score"]["pp"]) * (plays["playerScores"][3]["score"]["weight"])), 3)
perc_4 = round((plays["playerScores"][3]["score"]["baseScore"]) / (plays["playerScores"][3]["leaderboard"]["maxScore"])*100, 2)
weight_5 = round(((plays["playerScores"][4]["score"]["pp"]) * (plays["playerScores"][4]["score"]["weight"])), 3)
perc_5 = round((plays["playerScores"][4]["score"]["baseScore"]) / (plays["playerScores"][4]["leaderboard"]["maxScore"])*100, 2)
weight_6 = round(((plays["playerScores"][5]["score"]["pp"]) * (plays["playerScores"][5]["score"]["weight"])), 3)
perc_6 = round((plays["playerScores"][5]["score"]["baseScore"]) / (plays["playerScores"][5]["leaderboard"]["maxScore"])*100, 2)
weight_7 = round(((plays["playerScores"][6]["score"]["pp"]) * (plays["playerScores"][6]["score"]["weight"])), 3)
perc_7 = round((plays["playerScores"][6]["score"]["baseScore"]) / (plays["playerScores"][6]["leaderboard"]["maxScore"])*100, 2)
weight_8 = round(((plays["playerScores"][7]["score"]["pp"]) * (plays["playerScores"][7]["score"]["weight"])), 3)
perc_8 = round((plays["playerScores"][7]["score"]["baseScore"]) / (plays["playerScores"][7]["leaderboard"]["maxScore"])*100, 2)
randomWaifu_R = requests.get("https://api.waifu.im/sfw/waifu/").json()
randomWaifu = (randomWaifu_R["images"][0]["url"])
headers = {
"embeds": [
{
"title": "TOP PLAYS",
"color": 0x00f7ff,
"description": "",
"timestamp": "",
"url": "",
"author": {
"name": "",
"url": ""
},
"image": {"url": randomWaifu},
"thumbnail": {},
"footer": {},
"fields": [
{
"name": (plays["playerScores"][0]["leaderboard"]["songName"]) + " | " + str((plays["playerScores"][0]["leaderboard"]["stars"])) + "☆ - " + str(perc_1) + "%",
"value": str((plays["playerScores"][0]["score"]["pp"])) + "pp" + " (" + str(weight_1) +"pp)"
},
{
"name": (plays["playerScores"][1]["leaderboard"]["songName"]) + " | " + str((plays["playerScores"][1]["leaderboard"]["stars"])) + "☆ - " + str(perc_2) + "%",
"value": str((plays["playerScores"][1]["score"]["pp"])) + "pp" + " (" + str(weight_2) +"pp)"
},
{
"name": (plays["playerScores"][2]["leaderboard"]["songName"]) + " | " + str((plays["playerScores"][2]["leaderboard"]["stars"])) + "☆ - " + str(perc_3) + "%",
"value": str((plays["playerScores"][2]["score"]["pp"])) + "pp" + " (" + str(weight_3) +"pp)"
},
{
"name": (plays["playerScores"][3]["leaderboard"]["songName"]) + " | " + str((plays["playerScores"][3]["leaderboard"]["stars"])) + "☆ - " + str(perc_4) + "%",
"value": str((plays["playerScores"][3]["score"]["pp"])) + "pp" + " (" + str(weight_4) +"pp)"
},
{
"name": (plays["playerScores"][4]["leaderboard"]["songName"]) + " | " + str((plays["playerScores"][4]["leaderboard"]["stars"])) + "☆ - " + str(perc_5) + "%",
"value": str((plays["playerScores"][4]["score"]["pp"])) + "pp" + " (" + str(weight_5) +"pp)"
},
{
"name": (plays["playerScores"][5]["leaderboard"]["songName"]) + " | " + str((plays["playerScores"][5]["leaderboard"]["stars"])) + "☆ - " + str(perc_6) + "%",
"value": str((plays["playerScores"][5]["score"]["pp"])) + "pp" + " (" + str(weight_6) +"pp)"
},
{
"name": (plays["playerScores"][6]["leaderboard"]["songName"]) + " | " + str((plays["playerScores"][6]["leaderboard"]["stars"])) + "☆ - " + str(perc_7) + "%",
"value": str((plays["playerScores"][6]["score"]["pp"])) + "pp" + " (" + str(weight_7) +"pp)"
},
{
"name": (plays["playerScores"][7]["leaderboard"]["songName"]) + " | " + str((plays["playerScores"][7]["leaderboard"]["stars"])) + "☆ - " + str(perc_8) + "%",
"value": str((plays["playerScores"][7]["score"]["pp"])) + "pp" + " (" + str(weight_8) +"pp)"
}
]
}
],
}
hook_response = requests.post(top_webhook, json=headers)
print(hook_response.text)
while True:
r = requests.get(f"https://scoresaber.com/api/player/{scoresaber_id}/scores?limit=1&sort=recent&withMetadata=true").json()
if (r["playerScores"][0]["score"]["timeSet"]) != last_recent:
last_recent = (r["playerScores"][0]["score"]["timeSet"])
if (r["playerScores"][0]["leaderboard"]["ranked"]) == False:
if (r["playerScores"][0]["score"]["missedNotes"]) == 0:
headers = {
"username": "",
"avatar_url": "",
"content": "",
"embeds": [
{
"title": (r["playerScores"][0]["leaderboard"]["songName"]),
"color": 8382010,
"description": "good job uwu (unranked)",
"timestamp": (r["playerScores"][0]["score"]["timeSet"]),
"url": "",
"author": {
"name": "",
"url": ""
},
"image": {},
"thumbnail": {"url": (r["playerScores"][0]["leaderboard"]["coverImage"])},
"footer": {},
"fields": [
{
"name": "Rank",
"value": "#" + str((r["playerScores"][0]["score"]["rank"]))
},
{
"name": "Missed Notes",
"value": "FC ✅"
},
{
"name": "Score",
"value": (r["playerScores"][0]["score"]["baseScore"])
}
]
}
],
}
else:
headers = {
"username": "",
"avatar_url": "",
"content": "",
"embeds": [
{
"title": (r["playerScores"][0]["leaderboard"]["songName"]),
"color": 8382010,
"description": "good job uwu (unranked)",
"timestamp": (r["playerScores"][0]["score"]["timeSet"]),
"url": "",
"author": {
"name": "",
"url": ""
},
"image": {},
"thumbnail": {"url": "https://cdn.scoresaber.com/covers/85F2204FE701F2E88AAF29331009446687A9BCB6.png"},
"footer": {},
"fields": [
{
"name": "Rank",
"value": "#" + str((r["playerScores"][0]["score"]["rank"]))
},
{
"name": "Missed Notes",
"value": str((r["playerScores"][0]["score"]["missedNotes"])) + "❌"
},
{
"name": "Score",
"value": (r["playerScores"][0]["score"]["baseScore"])
}
]
}
],
}
hook_response = requests.post(recentsPlays_webhook, json=headers)
print(hook_response.text)
else:
acc = round(((r["playerScores"][0]["score"]["baseScore"]) / (r["playerScores"][0]["leaderboard"]["maxScore"]))*100, 2)
weighted = round(((r["playerScores"][0]["score"]["pp"]) * (r["playerScores"][0]["score"]["weight"])), 3)
accinfo_response = requests.get(f"https://scoresaber.com/api/player/{scoresaber_id}/basic").json()
added_pp = round((accinfo_response["pp"]) - last_ppcount, 3)
if (r["playerScores"][0]["score"]["missedNotes"]) == 0:
headers = {
"username": "",
"avatar_url": "",
"content": "",
"embeds": [
{
"title": (r["playerScores"][0]["leaderboard"]["songName"]) + " " + str((r["playerScores"][0]["leaderboard"]["stars"])) + "☆" + " - " + str(acc) + "%",
"color": 8382010,
"description": "good job uwu (ranked)",
"timestamp": (r["playerScores"][0]["score"]["timeSet"]),
"url": "",
"author": {
"name": "",
"url": ""
},
"image": {},
"thumbnail": {"url": (r["playerScores"][0]["leaderboard"]["coverImage"])},
"footer": {},
"fields": [
{
"name": "Rank",
"value": "#" + str((r["playerScores"][0]["score"]["rank"]))
},
{
"name": "Missed Notes",
"value": "FC ✅"
},
{
"name": "PP",
"value": str(round((r["playerScores"][0]["score"]["pp"]), 3)) + " pp🍆"
},
{
"name": "WEIGHTED PP",
"value": "+" + str(weighted) + " pp" + " (+" + str(added_pp) + ")"
}
]
}
],
}
else:
headers = {
"username": "",
"avatar_url": "",
"content": "",
"embeds": [
{
"title": (r["playerScores"][0]["leaderboard"]["songName"]) + " " + str((r["playerScores"][0]["leaderboard"]["stars"])) + "☆" + " - " + str(acc) + "%",
"color": 8382010,
"description": "good job uwu (ranked)",
"timestamp": (r["playerScores"][0]["score"]["timeSet"]),
"url": "",
"author": {
"name": "",
"url": ""
},
"image": {},
"thumbnail": {"url": (r["playerScores"][0]["leaderboard"]["coverImage"])},
"footer": {},
"fields": [
{
"name": "Rank",
"value": "#" + str((r["playerScores"][0]["score"]["rank"]))
},
{
"name": "Missed Notes",
"value": str((r["playerScores"][0]["score"]["missedNotes"])) + "❌"
},
{
"name": "PP",
"value": str(round((r["playerScores"][0]["score"]["pp"]), 3)) + " pp🍆"
},
{
"name": "WEIGHTED PP",
"value": "+" + str(weighted) + " pp" + " (+" + str(added_pp) + ")"
}
]
}
],
}
hook_response = requests.post(recentsPlays_webhook, json=headers)
print(hook_response.text)
post_topPlays()
else:
print("no new scores sadge")
sleep(1)
| import requests
from time import sleep
recentsPlays_webhook = "https://discord.com/api/webhooks/"
top_webhook = "https://discord.com/api/webhooks/"
scoresaber_id = "76561198272483934"
response = requests.get(f"https://scoresaber.com/api/player/{scoresaber_id}/scores?limit=1&sort=recent&withMetadata=true").json()
last_recent = (response["playerScores"][0]["score"]["timeSet"])
response2 = requests.get(f"https://scoresaber.com/api/player/{scoresaber_id}/basic").json()
last_ppcount = (response2["pp"])
print(f"PP - {last_ppcount}PP")
print(f"Last Play - {last_recent}")
def post_topPlays():
plays = requests.get(f"https://scoresaber.com/api/player/{scoresaber_id}/scores?limit=8&sort=top&withMetadata=true").json()
weight_1 = round(((plays["playerScores"][0]["score"]["pp"]) * (plays["playerScores"][0]["score"]["weight"])), 3)
perc_1 = round((plays["playerScores"][0]["score"]["baseScore"]) / (plays["playerScores"][0]["leaderboard"]["maxScore"])*100, 2)
weight_2 = round(((plays["playerScores"][1]["score"]["pp"]) * (plays["playerScores"][1]["score"]["weight"])), 3)
perc_2 = round((plays["playerScores"][1]["score"]["baseScore"]) / (plays["playerScores"][1]["leaderboard"]["maxScore"])*100, 2)
weight_3 = round(((plays["playerScores"][2]["score"]["pp"]) * (plays["playerScores"][2]["score"]["weight"])), 3)
perc_3 = round((plays["playerScores"][2]["score"]["baseScore"]) / (plays["playerScores"][2]["leaderboard"]["maxScore"])*100, 2)
weight_4 = round(((plays["playerScores"][3]["score"]["pp"]) * (plays["playerScores"][3]["score"]["weight"])), 3)
perc_4 = round((plays["playerScores"][3]["score"]["baseScore"]) / (plays["playerScores"][3]["leaderboard"]["maxScore"])*100, 2)
weight_5 = round(((plays["playerScores"][4]["score"]["pp"]) * (plays["playerScores"][4]["score"]["weight"])), 3)
perc_5 = round((plays["playerScores"][4]["score"]["baseScore"]) / (plays["playerScores"][4]["leaderboard"]["maxScore"])*100, 2)
weight_6 = round(((plays["playerScores"][5]["score"]["pp"]) * (plays["playerScores"][5]["score"]["weight"])), 3)
perc_6 = round((plays["playerScores"][5]["score"]["baseScore"]) / (plays["playerScores"][5]["leaderboard"]["maxScore"])*100, 2)
weight_7 = round(((plays["playerScores"][6]["score"]["pp"]) * (plays["playerScores"][6]["score"]["weight"])), 3)
perc_7 = round((plays["playerScores"][6]["score"]["baseScore"]) / (plays["playerScores"][6]["leaderboard"]["maxScore"])*100, 2)
weight_8 = round(((plays["playerScores"][7]["score"]["pp"]) * (plays["playerScores"][7]["score"]["weight"])), 3)
perc_8 = round((plays["playerScores"][7]["score"]["baseScore"]) / (plays["playerScores"][7]["leaderboard"]["maxScore"])*100, 2)
randomWaifu_R = requests.get("https://api.waifu.im/sfw/waifu/").json()
randomWaifu = (randomWaifu_R["images"][0]["url"])
headers = {
"embeds": [
{
"title": "TOP PLAYS",
"color": 0x00f7ff,
"description": "",
"timestamp": "",
"url": "",
"author": {
"name": "",
"url": ""
},
"image": {"url": randomWaifu},
"thumbnail": {},
"footer": {},
"fields": [
{
"name": (plays["playerScores"][0]["leaderboard"]["songName"]) + " | " + str((plays["playerScores"][0]["leaderboard"]["stars"])) + "☆ - " + str(perc_1) + "%",
"value": str((plays["playerScores"][0]["score"]["pp"])) + "pp" + " (" + str(weight_1) +"pp)"
},
{
"name": (plays["playerScores"][1]["leaderboard"]["songName"]) + " | " + str((plays["playerScores"][1]["leaderboard"]["stars"])) + "☆ - " + str(perc_2) + "%",
"value": str((plays["playerScores"][1]["score"]["pp"])) + "pp" + " (" + str(weight_2) +"pp)"
},
{
"name": (plays["playerScores"][2]["leaderboard"]["songName"]) + " | " + str((plays["playerScores"][2]["leaderboard"]["stars"])) + "☆ - " + str(perc_3) + "%",
"value": str((plays["playerScores"][2]["score"]["pp"])) + "pp" + " (" + str(weight_3) +"pp)"
},
{
"name": (plays["playerScores"][3]["leaderboard"]["songName"]) + " | " + str((plays["playerScores"][3]["leaderboard"]["stars"])) + "☆ - " + str(perc_4) + "%",
"value": str((plays["playerScores"][3]["score"]["pp"])) + "pp" + " (" + str(weight_4) +"pp)"
},
{
"name": (plays["playerScores"][4]["leaderboard"]["songName"]) + " | " + str((plays["playerScores"][4]["leaderboard"]["stars"])) + "☆ - " + str(perc_5) + "%",
"value": str((plays["playerScores"][4]["score"]["pp"])) + "pp" + " (" + str(weight_5) +"pp)"
},
{
"name": (plays["playerScores"][5]["leaderboard"]["songName"]) + " | " + str((plays["playerScores"][5]["leaderboard"]["stars"])) + "☆ - " + str(perc_6) + "%",
"value": str((plays["playerScores"][5]["score"]["pp"])) + "pp" + " (" + str(weight_6) +"pp)"
},
{
"name": (plays["playerScores"][6]["leaderboard"]["songName"]) + " | " + str((plays["playerScores"][6]["leaderboard"]["stars"])) + "☆ - " + str(perc_7) + "%",
"value": str((plays["playerScores"][6]["score"]["pp"])) + "pp" + " (" + str(weight_7) +"pp)"
},
{
"name": (plays["playerScores"][7]["leaderboard"]["songName"]) + " | " + str((plays["playerScores"][7]["leaderboard"]["stars"])) + "☆ - " + str(perc_8) + "%",
"value": str((plays["playerScores"][7]["score"]["pp"])) + "pp" + " (" + str(weight_8) +"pp)"
}
]
}
],
}
hook_response = requests.post(top_webhook, json=headers)
print(hook_response.text)
while True:
r = requests.get(f"https://scoresaber.com/api/player/{scoresaber_id}/scores?limit=1&sort=recent&withMetadata=true").json()
if (r["playerScores"][0]["score"]["timeSet"]) != last_recent:
last_recent = (r["playerScores"][0]["score"]["timeSet"])
if (r["playerScores"][0]["leaderboard"]["ranked"]) == False:
if (r["playerScores"][0]["score"]["missedNotes"]) == 0:
headers = {
"username": "",
"avatar_url": "",
"content": "",
"embeds": [
{
"title": (r["playerScores"][0]["leaderboard"]["songName"]),
"color": 8382010,
"description": "good job uwu (unranked)",
"timestamp": (r["playerScores"][0]["score"]["timeSet"]),
"url": "",
"author": {
"name": "",
"url": ""
},
"image": {},
"thumbnail": {"url": (r["playerScores"][0]["leaderboard"]["coverImage"])},
"footer": {},
"fields": [
{
"name": "Rank",
"value": "#" + str((r["playerScores"][0]["score"]["rank"]))
},
{
"name": "Missed Notes",
"value": "FC ✅"
},
{
"name": "Score",
"value": (r["playerScores"][0]["score"]["baseScore"])
}
]
}
],
}
else:
headers = {
"username": "",
"avatar_url": "",
"content": "",
"embeds": [
{
"title": (r["playerScores"][0]["leaderboard"]["songName"]),
"color": 8382010,
"description": "good job uwu (unranked)",
"timestamp": (r["playerScores"][0]["score"]["timeSet"]),
"url": "",
"author": {
"name": "",
"url": ""
},
"image": {},
"thumbnail": {"url": "https://cdn.scoresaber.com/covers/85F2204FE701F2E88AAF29331009446687A9BCB6.png"},
"footer": {},
"fields": [
{
"name": "Rank",
"value": "#" + str((r["playerScores"][0]["score"]["rank"]))
},
{
"name": "Missed Notes",
"value": str((r["playerScores"][0]["score"]["missedNotes"])) + "❌"
},
{
"name": "Score",
"value": (r["playerScores"][0]["score"]["baseScore"])
}
]
}
],
}
hook_response = requests.post(recentsPlays_webhook, json=headers)
print(hook_response.text)
else:
acc = round(((r["playerScores"][0]["score"]["baseScore"]) / (r["playerScores"][0]["leaderboard"]["maxScore"]))*100, 2)
weighted = round(((r["playerScores"][0]["score"]["pp"]) * (r["playerScores"][0]["score"]["weight"])), 3)
accinfo_response = requests.get(f"https://scoresaber.com/api/player/{scoresaber_id}/basic").json()
added_pp = round((accinfo_response["pp"]) - last_ppcount, 3)
if (r["playerScores"][0]["score"]["missedNotes"]) == 0:
headers = {
"username": "",
"avatar_url": "",
"content": "",
"embeds": [
{
"title": (r["playerScores"][0]["leaderboard"]["songName"]) + " " + str((r["playerScores"][0]["leaderboard"]["stars"])) + "☆" + " - " + str(acc) + "%",
"color": 8382010,
"description": "good job uwu (ranked)",
"timestamp": (r["playerScores"][0]["score"]["timeSet"]),
"url": "",
"author": {
"name": "",
"url": ""
},
"image": {},
"thumbnail": {"url": (r["playerScores"][0]["leaderboard"]["coverImage"])},
"footer": {},
"fields": [
{
"name": "Rank",
"value": "#" + str((r["playerScores"][0]["score"]["rank"]))
},
{
"name": "Missed Notes",
"value": "FC ✅"
},
{
"name": "PP",
"value": str(round((r["playerScores"][0]["score"]["pp"]), 3)) + " pp🍆"
},
{
"name": "WEIGHTED PP",
"value": "+" + str(weighted) + " pp" + " (+" + str(added_pp) + ")"
}
]
}
],
}
else:
headers = {
"username": "",
"avatar_url": "",
"content": "",
"embeds": [
{
"title": (r["playerScores"][0]["leaderboard"]["songName"]) + " " + str((r["playerScores"][0]["leaderboard"]["stars"])) + "☆" + " - " + str(acc) + "%",
"color": 8382010,
"description": "good job uwu (ranked)",
"timestamp": (r["playerScores"][0]["score"]["timeSet"]),
"url": "",
"author": {
"name": "",
"url": ""
},
"image": {},
"thumbnail": {"url": (r["playerScores"][0]["leaderboard"]["coverImage"])},
"footer": {},
"fields": [
{
"name": "Rank",
"value": "#" + str((r["playerScores"][0]["score"]["rank"]))
},
{
"name": "Missed Notes",
"value": str((r["playerScores"][0]["score"]["missedNotes"])) + "❌"
},
{
"name": "PP",
"value": str(round((r["playerScores"][0]["score"]["pp"]), 3)) + " pp🍆"
},
{
"name": "WEIGHTED PP",
"value": "+" + str(weighted) + " pp" + " (+" + str(added_pp) + ")"
}
]
}
],
}
hook_response = requests.post(recentsPlays_webhook, json=headers)
print(hook_response.text)
post_topPlays()
else:
print("no new scores sadge")
sleep(1) | none | 1 | 2.867197 | 3 | |
red_color_extract.py | drishtim17/supervisedML | 0 | 6619237 | #!/usr/bin/python3
import cv2
import time
#image read hogi
img1=cv2.imread('redhat.jpg')
#printing the shape of the images(rows,col,color(3))
print(img1.shape)
a=img1.shape
time.sleep(5)
print(img1)
#extracting only red colour
#(jis par range aplly krni hai,(starting range of colour),(ending range))
red=cv2.inRange(img1,(0,0,0),(255,40,40))
cv2.imshow("original",img1)
cv2.imshow("only red",red)
#It will hold the image on the screen
cv2.waitKey(0)
#close the image
cv2.destroyAllWindows()
| #!/usr/bin/python3
import cv2
import time
#image read hogi
img1=cv2.imread('redhat.jpg')
#printing the shape of the images(rows,col,color(3))
print(img1.shape)
a=img1.shape
time.sleep(5)
print(img1)
#extracting only red colour
#(jis par range aplly krni hai,(starting range of colour),(ending range))
red=cv2.inRange(img1,(0,0,0),(255,40,40))
cv2.imshow("original",img1)
cv2.imshow("only red",red)
#It will hold the image on the screen
cv2.waitKey(0)
#close the image
cv2.destroyAllWindows()
| en | 0.445737 | #!/usr/bin/python3 #image read hogi #printing the shape of the images(rows,col,color(3)) #extracting only red colour #(jis par range aplly krni hai,(starting range of colour),(ending range)) #It will hold the image on the screen #close the image | 3.47643 | 3 |
run.py | GochoMugo/remindme | 17 | 6619238 | #!/usr/bin/env python
import remindme
remindme.cli.run()
| #!/usr/bin/env python
import remindme
remindme.cli.run()
| ru | 0.26433 | #!/usr/bin/env python | 1.129475 | 1 |
section-0/1_variables_methods.py | LBenzahia/rest_api_flask | 0 | 6619239 | a = 5
b = 9
my_variable = 125
my_10_variable = 10
string_variable = "Hi Lakhdar!"
single_quotes = 'String can have signle quates'
print(my_variable)
print(string_variable)
## Methods
def my_print_method(my_argument):
print(my_argument)
def my_multiply_method(num_one, num_two):
return num_one * num_two
result = my_multiply_method(10, 3)
print(result)
my_print_method(my_multiply_method(10, 3))
| a = 5
b = 9
my_variable = 125
my_10_variable = 10
string_variable = "Hi Lakhdar!"
single_quotes = 'String can have signle quates'
print(my_variable)
print(string_variable)
## Methods
def my_print_method(my_argument):
print(my_argument)
def my_multiply_method(num_one, num_two):
return num_one * num_two
result = my_multiply_method(10, 3)
print(result)
my_print_method(my_multiply_method(10, 3))
| en | 0.228583 | ## Methods | 3.560511 | 4 |
lol.py | Steffo99/royal-bot-vecchio | 1 | 6619240 | import requests
import filemanager
lolkey = filemanager.readfile("lolapi.txt")
def getchampionstaticdata(cid, extra=None):
parametri = {
'api_key': lolkey,
'region': "euw",
'locale': "it_IT",
'id': cid,
'champData': extra,
}
r = requests.get("https://global.api.pvp.net/api/lol/static-data/euw/v1.2/champion/" + str(cid),
params=parametri).json()
return r
def getfreerotation():
parametri = {
'freeToPlay': 'true',
'region': "euw",
'api_key': lolkey
}
r = requests.get("https://euw.api.pvp.net/api/lol/euw/v1.2/champion", params=parametri).json()
return r['champions']
def getmatchlist(sid):
parametri = {
'region': "euw",
'api_key': lolkey,
}
r = requests.get("https://euw.api.pvp.net/api/lol/euw/v2.2/matchlist/by-summoner/" + str(sid), params=parametri)\
.json()
return r
| import requests
import filemanager
lolkey = filemanager.readfile("lolapi.txt")
def getchampionstaticdata(cid, extra=None):
parametri = {
'api_key': lolkey,
'region': "euw",
'locale': "it_IT",
'id': cid,
'champData': extra,
}
r = requests.get("https://global.api.pvp.net/api/lol/static-data/euw/v1.2/champion/" + str(cid),
params=parametri).json()
return r
def getfreerotation():
parametri = {
'freeToPlay': 'true',
'region': "euw",
'api_key': lolkey
}
r = requests.get("https://euw.api.pvp.net/api/lol/euw/v1.2/champion", params=parametri).json()
return r['champions']
def getmatchlist(sid):
parametri = {
'region': "euw",
'api_key': lolkey,
}
r = requests.get("https://euw.api.pvp.net/api/lol/euw/v2.2/matchlist/by-summoner/" + str(sid), params=parametri)\
.json()
return r
| none | 1 | 2.733835 | 3 | |
SBaaS_quantification/stage01_quantification_MQResultsTable_query.py | dmccloskey/SBaaS_quantification | 0 | 6619241 | #lims
from .lims_quantitationMethod_postgresql_models import *
from SBaaS_LIMS.lims_experiment_postgresql_models import *
from SBaaS_LIMS.lims_sample_postgresql_models import *
from .stage01_quantification_MQResultsTable_postgresql_models import *
from .stage01_quantification_analysis_postgresql_models import data_stage01_quantification_analysis
from SBaaS_base.sbaas_base_query_update import sbaas_base_query_update
from SBaaS_base.sbaas_base_query_drop import sbaas_base_query_drop
from SBaaS_base.sbaas_base_query_initialize import sbaas_base_query_initialize
from SBaaS_base.sbaas_base_query_insert import sbaas_base_query_insert
from SBaaS_base.sbaas_base_query_select import sbaas_base_query_select
from SBaaS_base.sbaas_base_query_delete import sbaas_base_query_delete
from SBaaS_base.sbaas_template_query import sbaas_template_query
#resources
from listDict.listDict import listDict
class stage01_quantification_MQResultsTable_query(sbaas_template_query):
def initialize_supportedTables(self):
'''Set the supported tables dict for
'''
tables_supported = {'data_stage01_quantification_mqresultstable':data_stage01_quantification_MQResultsTable,
};
self.set_supportedTables(tables_supported);
def initialize_dataStage01_quantification_MQResultsTable(self,
tables_I = [],):
try:
if not tables_I:
tables_I = list(self.get_supportedTables().keys());
queryinitialize = sbaas_base_query_initialize(session_I=self.session,engine_I=self.engine,settings_I=self.settings,data_I=self.data);
for table in tables_I:
model_I = self.convert_tableString2SqlalchemyModel(table);
queryinitialize.initialize_table_sqlalchemyModel(model_I);
except Exception as e:
print(e);
def drop_dataStage01_quantification_MQResultsTable(self,
tables_I = [],):
try:
if not tables_I:
tables_I = list(self.get_supportedTables().keys());
querydrop = sbaas_base_query_drop(session_I=self.session,engine_I=self.engine,settings_I=self.settings,data_I=self.data);
for table in tables_I:
model_I = self.convert_tableString2SqlalchemyModel(table);
querydrop.drop_table_sqlalchemyModel(model_I);
except Exception as e:
print(e);
def reset_dataStage01_quantification_MQResultsTable(self,
component_name,sample_name,acquisition_date_and_time,
tables_I = [],
warn_I=True):
try:
if not tables_I:
tables_I = list(self.get_supportedTables().keys());
querydelete = sbaas_base_query_delete(session_I=self.session,engine_I=self.engine,settings_I=self.settings,data_I=self.data);
for table in tables_I:
query = {};
query['delete_from'] = [{'table_name':table}];
query['where'] = [{
'table_name':table,
'column_name':'component_name',
'value':analysis_id_I,
'operator':'LIKE',
'connector':'AND'
},{
'table_name':table,
'column_name':'sample_name',
'value':analysis_id_I,
'operator':'LIKE',
'connector':'AND'
},{
'table_name':table,
'column_name':'acquisition_date_and_time',
'value':analysis_id_I,
'operator':'LIKE',
'connector':'AND'
},
];
table_model = self.convert_tableStringList2SqlalchemyModelDict([table]);
query = querydelete.make_queryFromString(table_model,query);
querydelete.reset_table_sqlalchemyModel(query_I=query,warn_I=warn_I);
except Exception as e:
print(e);
def add_dataStage01MQResultsTable(self,data_I):
'''add rows of data_stage01_quantification_MQResultsTable'''
if data_I:
cnt = 0;
for d in data_I:
try:
if 'Index' in d:
d['index_']=d['Index'];
d['sample_index']=d['Sample Index'];
d['original_filename']=d['Original Filename'];
d['sample_name']=d['Sample Name'];
d['sample_id']=d['Sample ID'];
d['sample_comment']=d['Sample Comment'];
d['sample_type']=d['Sample Type'];
d['acquisition_date_and_time']=d['Acquisition Date & Time'];
d['rack_number']=d['Rack Number'];
d['plate_number']=d['Plate Number'];
d['vial_number']=d['Vial Number'];
d['dilution_factor']=d['Dilution Factor'];
d['injection_volume']=d['Injection Volume'];
d['operator_name']=d['Operator Name'];
d['acq_method_name']=d['Acq. Method Name'];
d['is_']=d['IS'];
d['component_name']=d['Component Name'];
d['component_index']=d['Component Index'];
d['component_comment']=d['Component Comment'];
d['is_comment']=d['IS Comment'];
d['mass_info']=d['Mass Info'];
d['is_mass']=d['IS Mass Info'];
d['is_name']=d['IS Name'];
d['component_group_name']=d['Component Group Name'];
d['conc_units']=d['Conc. Units'];
d['failed_query']=d['Failed Query'];
d['is_failed_query']=d['IS Failed Query'];
d['peak_comment']=d['Peak Comment'];
d['is_peak_comment']=d['IS Peak Comment'];
d['actual_concentration']=d['Actual Concentration'];
d['is_actual_concentration']=d['IS Actual Concentration'];
d['concentration_ratio']=d['Concentration Ratio'];
d['expected_rt']=d['Expected RT'];
d['is_expected_rt']=d['IS Expected RT'];
d['integration_type']=d['Integration Type'];
d['is_integration_type']=d['IS Integration Type'];
d['area']=d['Area'];
d['is_area']=d['IS Area'];
d['corrected_area']=d['Corrected Area'];
d['is_corrected_area']=d['IS Corrected Area'];
d['area_ratio']=d['Area Ratio'];
d['height']=d['Height'];
d['is_height']=d['IS Height'];
d['corrected_height']=d['Corrected Height'];
d['is_corrected_height']=d['IS Corrected Height'];
d['height_ratio']=d['Height Ratio'];
d['area_2_height']=d['Area / Height'];
d['is_area_2_height']=d['IS Area / Height'];
d['corrected_area2height']=d['Corrected Area/Height'];
d['is_corrected_area2height']=d['IS Corrected Area/Height'];
d['region_height']=d['Region Height'];
d['is_region_height']=d['IS Region Height'];
d['quality']=d['Quality'];
d['is_quality']=d['IS Quality'];
d['retention_time']=d['Retention Time'];
d['is_retention_time']=d['IS Retention Time'];
d['start_time']=d['Start Time'];
d['is_start_time']=d['IS Start Time'];
d['end_time']=d['End Time'];
d['is_end_time']=d['IS End Time'];
d['total_width']=d['Total Width'];
d['is_total_width']=d['IS Total Width'];
d['width_at_50']=d['Width at 50%'];
d['is_width_at_50']=d['IS Width at 50%'];
d['signal_2_noise']=d['Signal / Noise'];
d['is_signal_2_noise']=d['IS Signal / Noise'];
d['baseline_delta_2_height']=d['Baseline Delta / Height'];
d['is_baseline_delta_2_height']=d['IS Baseline Delta / Height'];
d['modified_']=d['Modified'];
d['relative_rt']=d['Relative RT'];
d['used_']=d['Used'];
d['calculated_concentration']=d['Calculated Concentration'];
d['accuracy_']=d['Accuracy'];
d['comment_']=d['Comment'];
d['use_calculated_concentration']=d['Use_Calculated_Concentration'];
d['start_time_at_5']=d['Start Time at 5%'];
d['end_time_at_5']=d['End Time at 5%'];
d['width_at_5']=d['Width at 5%'];
d['start_time_at_10']=d['Start Time at 10%'];
d['end_time_at_10']=d['End Time at 10%'];
d['width_at_10']=d['Width at 10%'];
d['slope_of_baseline']=d['Slope of Baseline'];
d['tailing_factor']=d['Tailing Factor'];
d['asymmetry_factor']=d['Asymmetry Factor'];
d['ion_ratio']=d['Ion Ratio'];
d['expected_ion_ratio']=d['Expected Ion Ratio'];
d['points_across_baseline']=d['Points Across Baseline'];
d['points_across_half_height']=d['Points Across Half Height'];
data_add = data_stage01_quantification_MQResultsTable(d
#d['Index'],
#d['Sample Index'],
#d['Original Filename'],
#d['Sample Name'],
#d['Sample ID'],
#d['Sample Comment'],
#d['Sample Type'],
#d['Acquisition Date & Time'],
#d['Rack Number'],
#d['Plate Number'],
#d['Vial Number'],
#d['Dilution Factor'],
#d['Injection Volume'],
#d['Operator Name'],
#d['Acq. Method Name'],
#d['IS'],
#d['Component Name'],
#d['Component Index'],
#d['Component Comment'],
#d['IS Comment'],
#d['Mass Info'],
#d['IS Mass Info'],
#d['IS Name'],
#d['Component Group Name'],
#d['Conc. Units'],
#d['Failed Query'],
#d['IS Failed Query'],
#d['Peak Comment'],
#d['IS Peak Comment'],
#d['Actual Concentration'],
#d['IS Actual Concentration'],
#d['Concentration Ratio'],
#d['Expected RT'],
#d['IS Expected RT'],
#d['Integration Type'],
#d['IS Integration Type'],
#d['Area'],
#d['IS Area'],
#d['Corrected Area'],
#d['IS Corrected Area'],
#d['Area Ratio'],
#d['Height'],
#d['IS Height'],
#d['Corrected Height'],
#d['IS Corrected Height'],
#d['Height Ratio'],
#d['Area / Height'],
#d['IS Area / Height'],
#d['Corrected Area/Height'],
#d['IS Corrected Area/Height'],
#d['Region Height'],
#d['IS Region Height'],
#d['Quality'],
#d['IS Quality'],
#d['Retention Time'],
#d['IS Retention Time'],
#d['Start Time'],
#d['IS Start Time'],
#d['End Time'],
#d['IS End Time'],
#d['Total Width'],
#d['IS Total Width'],
#d['Width at 50%'],
#d['IS Width at 50%'],
#d['Signal / Noise'],
#d['IS Signal / Noise'],
#d['Baseline Delta / Height'],
#d['IS Baseline Delta / Height'],
#d['Modified'],
#d['Relative RT'],
#d['Used'],
#d['Calculated Concentration'],
#d['Accuracy'],
#d['Comment'],
#d['Use_Calculated_Concentration']
);
elif 'index_' in d:
data_add = data_stage01_quantification_MQResultsTable(d
#d['index_'],
#d['sample_index'],
#d['original_filename'],
#d['sample_name'],
#d['sample_id'],
#d['sample_comment'],
#d['sample_type'],
#d['acquisition_date_and_time'],
#d['rack_number'],
#d['plate_number'],
#d['vial_number'],
#d['dilution_factor'],
#d['injection_volume'],
#d['operator_name'],
#d['acq_method_name'],
#d['is_'],
#d['component_name'],
#d['component_index'],
#d['component_comment'],
#d['is_comment'],
#d['mass_info'],
#d['is_mass'],
#d['is_name'],
#d['component_group_name'],
#d['conc_units'],
#d['failed_query'],
#d['is_failed_query'],
#d['peak_comment'],
#d['is_peak_comment'],
#d['actual_concentration'],
#d['is_actual_concentration'],
#d['concentration_ratio'],
#d['expected_rt'],
#d['is_expected_rt'],
#d['integration_type'],
#d['is_integration_type'],
#d['area'],
#d['is_area'],
#d['corrected_area'],
#d['is_corrected_area'],
#d['area_ratio'],
#d['height'],
#d['is_height'],
#d['corrected_height'],
#d['is_corrected_height'],
#d['height_ratio'],
#d['area_2_height'],
#d['is_area_2_height'],
#d['corrected_area2height'],
#d['is_corrected_area2height'],
#d['region_height'],
#d['is_region_height'],
#d['quality'],
#d['is_quality'],
#d['retention_time'],
#d['is_retention_time'],
#d['start_time'],
#d['is_start_time'],
#d['end_time'],
#d['is_end_time'],
#d['total_width'],
#d['is_total_width'],
#d['width_at_50'],
#d['is_width_at_50'],
#d['signal_2_noise'],
#d['is_signal_2_noise'],
#d['baseline_delta_2_height'],
#d['is_baseline_delta_2_height'],
#d['modified_'],
#d['relative_rt'],
#d['used_'],
#d['calculated_concentration'],
#d['accuracy_'],
#d['comment_'],
#d['use_calculated_concentration'],
);
self.session.add(data_add);
cnt = cnt + 1;
if cnt > 1000:
self.session.commit();
cnt = 0;
except IntegrityError as e:
print(e);
except SQLAlchemyError as e:
print(e);
self.session.commit();
def update_dataStage01MQResultsTable(self,data_I):
'''update rows of data_stage01_quantification_MQResultsTable'''
if data_I:
for d in data_I:
try:
data_update = self.session.query(data_stage01_quantification_MQResultsTable).filter(
data_stage01_quantification_MQResultsTable.component_name.like(d['Component Name']),
data_stage01_quantification_MQResultsTable.sample_name.like(d['Sample Name']),
data_stage01_quantification_MQResultsTable.acquisition_date_and_time == d['Acquisition Date & Time']).update(
{'index_':d['Index'],
'sample_index':d['Sample Index'],
'original_filename':d['Original Filename'],
'sample_name':d['Sample Name'],
'sample_id':d['Sample ID'],
'sample_comment':d['Sample Comment'],
'sample_type':d['Sample Type'],
'acquisition_date_and_time':d['Acquisition Date & Time'],
'rack_number':d['Rack Number'],
'plate_number':d['Plate Number'],
'vial_number':d['Vial Number'],
'dilution_factor':d['Dilution Factor'],
'injection_volume':d['Injection Volume'],
'operator_name':d['Operator Name'],
'acq_method_name':d['Acq. Method Name'],
'is_':d['IS'],
'component_name':d['Component Name'],
'component_index':d['Component Index'],
'component_comment':d['Component Comment'],
'is_comment':d['IS Comment'],
'mass_info':d['Mass Info'],
'is_mass':d['IS Mass Info'],
'is_name':d['IS Name'],
'component_group_name':d['Component Group Name'],
'conc_units':d['Conc. Units'],
'failed_query':d['Failed Query'],
'is_failed_query':d['IS Failed Query'],
'peak_comment':d['Peak Comment'],
'is_peak_comment':d['IS Peak Comment'],
'actual_concentration':d['Actual Concentration'],
'is_actual_concentration':d['IS Actual Concentration'],
'concentration_ratio':d['Concentration Ratio'],
'expected_rt':d['Expected RT'],
'is_expected_rt':d['IS Expected RT'],
'integration_type':d['Integration Type'],
'is_integration_type':d['IS Integration Type'],
'area':d['Area'],
'is_area':d['IS Area'],
'corrected_area':d['Corrected Area'],
'is_corrected_area':d['IS Corrected Area'],
'area_ratio':d['Area Ratio'],
'height':d['Height'],
'is_height':d['IS Height'],
'corrected_height':d['Corrected Height'],
'is_corrected_height':d['IS Corrected Height'],
'height_ratio':d['Height Ratio'],
'area_2_height':d['Area / Height'],
'is_area_2_height':d['IS Area / Height'],
'corrected_area2height':d['Corrected Area/Height'],
'is_corrected_area2height':d['IS Corrected Area/Height'],
'region_height':d['Region Height'],
'is_region_height':d['IS Region Height'],
'quality':d['Quality'],
'is_quality':d['IS Quality'],
'retention_time':d['Retention Time'],
'is_retention_time':d['IS Retention Time'],
'start_time':d['Start Time'],
'is_start_time':d['IS Start Time'],
'end_time':d['End Time'],
'is_end_time':d['IS End Time'],
'total_width':d['Total Width'],
'is_total_width':d['IS Total Width'],
'width_at_50':d['Width at 50%'],
'is_width_at_50':d['IS Width at 50%'],
'signal_2_noise':d['Signal / Noise'],
'is_signal_2_noise':d['IS Signal / Noise'],
'baseline_delta_2_height':d['Baseline Delta / Height'],
'is_baseline_delta_2_height':d['IS Baseline Delta / Height'],
'modified_':d['Modified'],
'relative_rt':d['Relative RT'],
'used_':d['Used'],
'calculated_concentration':d['Calculated Concentration'],
'accuracy_':d['Accuracy'],
'comment_':d['Comment'],
'use_calculated_concentration':d['Use_Calculated_Concentration'],
'start_time_at_5':d['Start Time at 5%'],
'end_time_at_5':d['End Time at 5%'],
'width_at_5':d['Width at 5%'],
'start_time_at_10':d['Start Time at 10%'],
'end_time_at_10':d['End Time at 10%'],
'width_at_10':d['Width at 10%'],
'slope_of_baseline':d['Slope of Baseline'],
'tailing_factor':d['Tailing Factor'],
'asymmetry_factor':d['Asymmetry Factor'],
'ion_ratio':d['Ion Ratio'],
'expected_ion_ratio':d['Expected Ion Ratio'],
'points_across_baseline':d['Points Across Baseline'],
'points_across_half_height':d['Points Across Half Height'],},
synchronize_session=False);
except SQLAlchemyError as e:
print(e);
self.session.commit();
# query data from data_stage01_quantification_mqresultstable
# no other table dependencies
def get_peakHeight_sampleNameAndComponentName(self,sample_name_I,component_name_I):
'''Query peak height from sample name and component name
NOTE: intended to be used within a for loop'''
try:
data = self.session.query(data_stage01_quantification_MQResultsTable.height).filter(
data_stage01_quantification_MQResultsTable.sample_name.like(sample_name_I),
data_stage01_quantification_MQResultsTable.component_name.like(component_name_I),
data_stage01_quantification_MQResultsTable.used_.is_(True)).all();
if data:
conc_O = data[0][0];
conc_units_O = 'height';
else:
conc_O = None;
conc_units_O = None;
return conc_O, conc_units_O;
except SQLAlchemyError as e:
print(e);
def get_used_sampleNameAndComponentName(self,sample_name_I,component_name_I):
'''Query used from sample name and component name
NOTE: intended to be used within a for loop'''
try:
data = self.session.query(data_stage01_quantification_MQResultsTable.used_).filter(
data_stage01_quantification_MQResultsTable.sample_name.like(sample_name_I),
data_stage01_quantification_MQResultsTable.component_name_name.like(component_name_name_I)).all();
if data:
used_O = data[0];
else: used_O = None;
return used_O;
except SQLAlchemyError as e:
print(e);
def get_row_sampleNameAndComponentName(self,sample_name_I,component_name_I):
'''Query peak information from sample name and component name
NOTE: intended to be used within a for loop'''
try:
data = self.session.query(data_stage01_quantification_MQResultsTable).filter(
data_stage01_quantification_MQResultsTable.sample_name.like(sample_name_I),
data_stage01_quantification_MQResultsTable.component_name.like(component_name_I),
data_stage01_quantification_MQResultsTable.used_.is_(True)).all();
data_O = {};
if data:
for d in data:
used_O=d.__repr__dict__();
else: used_O = None;
return used_O;
except SQLAlchemyError as e:
print(e);
def get_peakInfo_sampleNameAndComponentName(self,sample_name_I,component_name_I,acquisition_date_and_time_I):
'''Query peak information from sample name and component name
NOTE: intended to be used within a for loop'''
try:
if acquisition_date_and_time_I[0] and acquisition_date_and_time_I[1]:
data = self.session.query(data_stage01_quantification_MQResultsTable).filter(
data_stage01_quantification_MQResultsTable.sample_name.like(sample_name_I),
data_stage01_quantification_MQResultsTable.component_name.like(component_name_I),
data_stage01_quantification_MQResultsTable.acquisition_date_and_time>=acquisition_date_and_time_I[0],
data_stage01_quantification_MQResultsTable.acquisition_date_and_time<=acquisition_date_and_time_I[1],
data_stage01_quantification_MQResultsTable.used_.is_(True)).all();
else:
data = self.session.query(data_stage01_quantification_MQResultsTable).filter(
data_stage01_quantification_MQResultsTable.sample_name.like(sample_name_I),
data_stage01_quantification_MQResultsTable.component_name.like(component_name_I),
data_stage01_quantification_MQResultsTable.used_.is_(True)).all();
data_O = {};
if data:
for d in data:
used_O={'acquisition_date_and_time':d.acquisition_date_and_time,
'component_name':d.component_name,
'component_group_name':d.component_group_name,
'area':d.area,
'height':d.height,
'retention_time':d.retention_time,
'start_time':d.start_time,
'end_time':d.end_time,
'total_width':d.total_width,
'width_at_50':d.width_at_50,
'signal_2_noise':d.signal_2_noise,
'baseline_delta_2_height':d.baseline_delta_2_height,
'relative_rt':d.relative_rt};
else: used_O = None;
return used_O;
except SQLAlchemyError as e:
print(e);
# delete data from data_stage01_quantification_mqresultstable
# no other table dependencies
def delete_row_sampleName(self,sampleNames_I):
'''Delete specific samples from an experiment by their sample ID from sample_physiologicalparameters'''
deletes = [];
for d in sampleNames_I:
try:
delete = self.session.query(data_stage01_quantification_MQResultsTable).filter(
data_stage01_quantification_MQResultsTable.sample_name.like(d['sample_name'])).delete(
synchronize_session=False);
if delete == 0:
print('row not found')
print(d);
deletes.append(delete);
except SQLAlchemyError as e:
print(e);
self.session.commit();
# query data from data_stage01_quantification_mqresultstable
# requires quantitation_method
def get_concAndConcUnits_sampleNameAndComponentName(self,sample_name_I,component_name_I):
'''Query data (i.e. concentration, area/peak height ratio) from sample name and component name
NOTE: intended to be used within a for loop'''
# check for absolute or relative quantitation (i.e. area/peak height ratio)
try:
use_conc = self.session.query(data_stage01_quantification_MQResultsTable.use_calculated_concentration).filter(
data_stage01_quantification_MQResultsTable.sample_name.like(sample_name_I),
data_stage01_quantification_MQResultsTable.component_name.like(component_name_I),
data_stage01_quantification_MQResultsTable.used_.is_(True)).all();
if use_conc:
use_conc_O = use_conc[0][0];
else:
use_conc_O = None;
except SQLAlchemyError as e:
print(e);
if use_conc_O:
try:
data = self.session.query(data_stage01_quantification_MQResultsTable.calculated_concentration,
data_stage01_quantification_MQResultsTable.conc_units).filter(
data_stage01_quantification_MQResultsTable.sample_name.like(sample_name_I),
data_stage01_quantification_MQResultsTable.component_name.like(component_name_I),
data_stage01_quantification_MQResultsTable.used_.is_(True)).all();
if data:
conc_O = data[0][0];
conc_units_O = data[0][1];
else:
conc_O = None;
conc_units_O = None;
return conc_O, conc_units_O;
except SQLAlchemyError as e:
print(e);
else:
# check for area or peak height ratio from quantitation_method
try:
data = self.session.query(quantitation_method.use_area).filter(
experiment.sample_name.like(sample_name_I),
experiment.quantitation_method_id.like(quantitation_method.id),
quantitation_method.component_name.like(component_name_I)).all();
if data:
ratio_O = data[0][0];
else:
ratio_O = None;
except SQLAlchemyError as e:
print(e);
if ratio_O:
try:
data = self.session.query(data_stage01_quantification_MQResultsTable.area_ratio).filter(
data_stage01_quantification_MQResultsTable.sample_name.like(sample_name_I),
data_stage01_quantification_MQResultsTable.component_name.like(component_name_I),
data_stage01_quantification_MQResultsTable.used_.is_(True)).all();
if data:
conc_O = data[0][0];
conc_units_O = 'area_ratio';
else:
conc_O = None;
conc_units_O = None;
return conc_O, conc_units_O;
except SQLAlchemyError as e:
print(e);
else:
try:
data = self.session.query(data_stage01_quantification_MQResultsTable.height_ratio).filter(
data_stage01_quantification_MQResultsTable.sample_name.like(sample_name_I),
data_stage01_quantification_MQResultsTable.component_name.like(component_name_I),
data_stage01_quantification_MQResultsTable.used_.is_(True)).all();
if data:
conc_O = data[0][0];
conc_units_O = 'height_ratio';
else:
conc_O = None;
conc_units_O = None;
return conc_O, conc_units_O;
except SQLAlchemyError as e:
print(e);
# query component group names from data_stage01_quantification_mqresultstable
def get_componentGroupNames_sampleName(self,sample_name_I):
'''Query component group names that are used from the sample name
NOTE: intended to be used within a for loop'''
try:
component_group_names = self.session.query(data_stage01_quantification_MQResultsTable.component_group_name).filter(
data_stage01_quantification_MQResultsTable.sample_name.like(sample_name_I),
data_stage01_quantification_MQResultsTable.used_.is_(True)).group_by(
data_stage01_quantification_MQResultsTable.component_group_name).order_by(
data_stage01_quantification_MQResultsTable.component_group_name.asc()).all();
component_group_names_O = [];
for cgn in component_group_names: component_group_names_O.append(cgn.component_group_name);
return component_group_names_O;
except SQLAlchemyError as e:
print(e);
def get_componentGroupName_experimentIDAndComponentName(self,experiment_id_I,component_name_I,exp_type_I=4):
'''Query component group names that are used from the component name
NOTE: intended to be used within a for loop'''
try:
component_group_name = self.session.query(data_stage01_quantification_MQResultsTable.component_group_name).filter(
experiment.id.like(experiment_id_I),
experiment.exp_type_id == exp_type_I,
experiment.sample_name.like(data_stage01_quantification_MQResultsTable.sample_name),
data_stage01_quantification_MQResultsTable.component_name.like(component_name_I),
data_stage01_quantification_MQResultsTable.used_.is_(True)).group_by(
data_stage01_quantification_MQResultsTable.component_group_name).all();
if len(component_group_name)>1:
print('more than 1 component_group_name retrieved per component_name')
component_group_name_O = component_group_name[0].component_group_name;
return component_group_name_O;
except SQLAlchemyError as e:
print(e);
# query sample names from data_stage01_quantification_mqresultstable
def get_sampleNames_experimentIDAndSampleType(self,experiment_id_I,sample_type_I,exp_type_I=4):
'''Query sample names (i.e. unknowns) that are used from
the experiment'''
try:
sample_names = self.session.query(data_stage01_quantification_MQResultsTable.sample_name).filter(
data_stage01_quantification_MQResultsTable.sample_type.like(sample_type_I),
experiment.id.like(experiment_id_I),
#experiment.exp_type_id == exp_type_I,
data_stage01_quantification_MQResultsTable.used_.is_(True),
experiment.sample_name.like(data_stage01_quantification_MQResultsTable.sample_name)).group_by(
data_stage01_quantification_MQResultsTable.sample_name).order_by(
data_stage01_quantification_MQResultsTable.sample_name.asc()).all();
sample_names_O = [];
for sn in sample_names: sample_names_O.append(sn.sample_name);
return sample_names_O;
except SQLAlchemyError as e:
print(e);
# query sample names from data_stage01_quantification_mqresultstable
def get_sampleNamesAndSampleIDs_experimentIDAndSampleType(self,experiment_id_I,sample_type_I,exp_type_I=4):
'''Query sample names and sample ids (i.e. unknowns) that are used from
the experiment'''
try:
sample_names = self.session.query(data_stage01_quantification_MQResultsTable.sample_name,
sample.sample_id).filter(
data_stage01_quantification_MQResultsTable.sample_type.like(sample_type_I),
experiment.id.like(experiment_id_I),
#experiment.exp_type_id == exp_type_I,
data_stage01_quantification_MQResultsTable.used_.is_(True),
experiment.sample_name.like(data_stage01_quantification_MQResultsTable.sample_name),
experiment.sample_name.like(sample.sample_name)).group_by(
data_stage01_quantification_MQResultsTable.sample_name,
sample.sample_id).order_by(
data_stage01_quantification_MQResultsTable.sample_name.asc(),
sample.sample_id.asc()).all();
sample_names_O = [];
sample_ids_O = [];
for sn in sample_names:
sample_names_O.append(sn.sample_name);
sample_ids_O.append(sn.sample_id);
return sample_names_O,sample_ids_O;
except SQLAlchemyError as e:
print(e);
def get_sampleNames_experimentIDAndSampleID(self,experiment_id_I,sample_id_I,exp_type_I=4):
'''Query sample names (i.e. unknowns) that are used from
the experiment'''
try:
sample_names = self.session.query(sample.sample_name).filter(
sample.sample_id.like(sample_id_I),
experiment.id.like(experiment_id_I),
#experiment.exp_type_id == exp_type_I,
data_stage01_quantification_MQResultsTable.used_.is_(True),
experiment.sample_name.like(sample.sample_name),
experiment.sample_name.like(data_stage01_quantification_MQResultsTable.sample_name)).group_by(
sample.sample_name).order_by(
sample.sample_name.asc()).all();
sample_names_O = [];
for sn in sample_names: sample_names_O.append(sn.sample_name);
return sample_names_O;
except SQLAlchemyError as e:
print(e);
def get_sampleNames_experimentIDAndSampleIDAndSampleDilution(self,experiment_id_I,sample_id_I,sample_dilution_I,exp_type_I=4):
'''Query sample names (i.e. unknowns) that are used from
the experiment'''
try:
sample_names = self.session.query(sample.sample_name).filter(
sample.sample_id.like(sample_id_I),
sample.sample_dilution == sample_dilution_I,
experiment.id.like(experiment_id_I),
#experiment.exp_type_id == exp_type_I,
data_stage01_quantification_MQResultsTable.used_.is_(True),
experiment.sample_name.like(sample.sample_name),
experiment.sample_name.like(data_stage01_quantification_MQResultsTable.sample_name)).group_by(
sample.sample_name).order_by(
sample.sample_name.asc()).all();
sample_names_O = [];
for sn in sample_names: sample_names_O.append(sn.sample_name);
return sample_names_O;
except SQLAlchemyError as e:
print(e);
def get_sampleNames_experimentIDAndSampleNameShortAndSampleDescription(self,experiment_id_I,sample_name_short_I,sample_decription_I,exp_type_I=4):
'''Query sample names that are used from
the experiment'''
try:
sample_names = self.session.query(sample.sample_name).filter(
sample_description.sample_name_short.like(sample_name_short_I),
sample_description.sample_desc.like(sample_decription_I),
experiment.id.like(experiment_id_I),
experiment.exp_type_id == exp_type_I,
sample.sample_id.like(sample_description.sample_id),
experiment.sample_name.like(sample.sample_name),
experiment.sample_name.like(data_stage01_quantification_MQResultsTable.sample_name),
data_stage01_quantification_MQResultsTable.used_.is_(True)).group_by(
sample.sample_name).order_by(
sample.sample_name.asc()).all();
sample_names_O = [];
for sn in sample_names: sample_names_O.append(sn.sample_name);
return sample_names_O;
except SQLAlchemyError as e:
print(e);
def get_sampleNames_experimentIDAndSampleNameAbbreviationAndSampleDescription(self,experiment_id_I,sample_name_abbreviation_I,sample_decription_I,exp_type_I=4):
'''Query sample names that are used from
the experiment'''
try:
sample_names = self.session.query(sample.sample_name).filter(
sample_description.sample_name_abbreviation.like(sample_name_abbreviation_I),
sample_description.sample_desc.like(sample_decription_I),
experiment.id.like(experiment_id_I),
experiment.exp_type_id == exp_type_I,
sample.sample_id.like(sample_description.sample_id),
experiment.sample_name.like(sample.sample_name),
experiment.sample_name.like(data_stage01_quantification_MQResultsTable.sample_name),
data_stage01_quantification_MQResultsTable.used_.is_(True)).group_by(
sample.sample_name).order_by(
sample.sample_name.asc()).all();
sample_names_O = [];
for sn in sample_names: sample_names_O.append(sn.sample_name);
return sample_names_O;
except SQLAlchemyError as e:
print(e);
def get_sampleNames_experimentIDAndSampleNameAbbreviationAndSampleDilution(self,experiment_id_I,sample_name_abbreviation_I,sample_dilution_I,exp_type_I=4):
'''Query sample names that are used from
the experiment'''
try:
sample_names = self.session.query(sample.sample_name).filter(
sample_description.sample_name_abbreviation.like(sample_name_abbreviation_I),
sample.sample_dilution == sample_dilution_I,
experiment.id.like(experiment_id_I),
experiment.exp_type_id == exp_type_I,
sample.sample_id.like(sample_description.sample_id),
experiment.sample_name.like(sample.sample_name),
experiment.sample_name.like(data_stage01_quantification_MQResultsTable.sample_name),
data_stage01_quantification_MQResultsTable.used_.is_(True)).group_by(
sample.sample_name).order_by(
sample.sample_name.asc()).all();
sample_names_O = [];
for sn in sample_names: sample_names_O.append(sn.sample_name);
return sample_names_O;
except SQLAlchemyError as e:
print(e);
# query sample ids from data_stage01_quantification_mqresultstable
def get_sampleIDs_experimentIDAndSampleType(self,experiment_id_I,sample_type_I,exp_type_I=4):
'''Query sample names (i.e. unknowns) that are used from
the experiment'''
try:
sample_ids = self.session.query(sample.sample_id).filter(
data_stage01_quantification_MQResultsTable.sample_type.like(sample_type_I),
experiment.id.like(experiment_id_I),
experiment.exp_type_id == exp_type_I,
data_stage01_quantification_MQResultsTable.used_.is_(True),
experiment.sample_name.like(sample.sample_name),
experiment.sample_name.like(data_stage01_quantification_MQResultsTable.sample_name)).group_by(
sample.sample_id).order_by(
sample.sample_id.asc()).all();
sample_ids_O = [];
for si in sample_ids: sample_ids_O.append(si.sample_id);
return sample_ids_O;
except SQLAlchemyError as e:
print(e);
def get_sampleIDs_experimentID(self,experiment_id_I,exp_type_I=4):
'''Query sample names that are used from the experiment'''
try:
sample_ids = self.session.query(sample.sample_id).filter(
experiment.id.like(experiment_id_I),
experiment.exp_type_id == exp_type_I,
data_stage01_quantification_MQResultsTable.used_.is_(True),
experiment.sample_name.like(sample.sample_name),
experiment.sample_name.like(data_stage01_quantification_MQResultsTable.sample_name)).group_by(
sample.sample_id).order_by(
sample.sample_id.asc()).all();
sample_ids_O = [];
for si in sample_ids: sample_ids_O.append(si.sample_id);
return sample_ids_O;
except SQLAlchemyError as e:
print(e);
def get_sampleID_experimentIDAndSampleName(self,experiment_id_I,sample_name_I,exp_type_I=4):
'''Query sample names (i.e. unknowns) that are used from
the experiment'''
try:
sample_id = self.session.query(sample.sample_id).filter(
sample.sample_name.like(sample_name_I),
experiment.id.like(experiment_id_I),
experiment.exp_type_id == exp_type_I,
data_stage01_quantification_MQResultsTable.used_.is_(True),
experiment.sample_name.like(sample.sample_name),
experiment.sample_name.like(data_stage01_quantification_MQResultsTable.sample_name)).group_by(
sample.sample_id).all();
sample_id_O = sample_id[0][0];
return sample_id_O;
except SQLAlchemyError as e:
print(e);
# query sample name short from data_stage01_quantification_mqresultstable
def get_sampleNameShort_experimentIDAndSampleType(self,experiment_id_I,sample_type_I,exp_type_I=4):
'''Query sample name short that are used from
the experiment'''
try:
sample_name_short = self.session.query(sample_description.sample_name_short).filter(
sample.sample_type.like(sample_type_I),
experiment.id.like(experiment_id_I),
experiment.exp_type_id == exp_type_I,
data_stage01_quantification_MQResultsTable.used_.is_(True),
experiment.sample_name.like(sample.sample_name),
sample.sample_id.like(sample_description.sample_id),
experiment.sample_name.like(data_stage01_quantification_MQResultsTable.sample_name)).group_by(
sample_description.sample_name_short).order_by(
sample_description.sample_name_short.asc()).all();
sample_name_short_O = [];
for sns in sample_name_short: sample_name_short_O.append(sns.sample_name_short);
return sample_name_short_O;
except SQLAlchemyError as e:
print(e);
def get_sampleNameShort_experimentIDAndSampleName(self,experiment_id_I,sample_name_I,exp_type_I=4):
'''Query sample name short that are used from
the experiment'''
try:
sample_name_short = self.session.query(sample_description.sample_name_short).filter(
sample.sample_name.like(sample_name_I),
experiment.id.like(experiment_id_I),
experiment.exp_type_id == exp_type_I,
data_stage01_quantification_MQResultsTable.used_.is_(True),
experiment.sample_name.like(sample.sample_name),
sample.sample_id.like(sample_description.sample_id),
experiment.sample_name.like(data_stage01_quantification_MQResultsTable.sample_name)).group_by(
sample_description.sample_name_short).all();
sample_name_short_O = sample_name_short[0];
return sample_name_short_O;
except SQLAlchemyError as e:
print(e);
# query sample name abbreviations from data_stage01_quantification_mqresultstable
def get_sampleNameAbbreviations_experimentIDAndSampleType(self,experiment_id_I,sample_type_I,exp_type_I=4):
'''Query sample name abbreviations that are used from
the experiment'''
try:
sample_name_abbreviations = self.session.query(sample_description.sample_name_abbreviation).filter(
sample.sample_type.like(sample_type_I),
experiment.id.like(experiment_id_I),
experiment.exp_type_id == exp_type_I,
data_stage01_quantification_MQResultsTable.used_.is_(True),
experiment.sample_name.like(sample.sample_name),
sample.sample_id.like(sample_description.sample_id),
experiment.sample_name.like(data_stage01_quantification_MQResultsTable.sample_name)).group_by(
sample_description.sample_name_abbreviation).order_by(
sample_description.sample_name_abbreviation.asc()).all();
sample_name_abbreviations_O = [];
for sna in sample_name_abbreviations: sample_name_abbreviations_O.append(sna.sample_name_abbreviation);
return sample_name_abbreviations_O;
except SQLAlchemyError as e:
print(e);
# query dilutions from data_stage01_quantification_mqresultstable
def get_sampleDilution_experimentIDAndSampleID(self,experiment_id_I,sample_id_I,exp_type_I=4):
'''Query dilutions that are used from the experiment'''
try:
sample_dilutions = self.session.query(sample.sample_dilution).filter(
sample.sample_id.like(sample_id_I),
experiment.id.like(experiment_id_I),
experiment.exp_type_id == exp_type_I,
data_stage01_quantification_MQResultsTable.used_.is_(True),
experiment.sample_name.like(sample.sample_name),
experiment.sample_name.like(data_stage01_quantification_MQResultsTable.sample_name)).group_by(
sample.sample_dilution).order_by(
sample.sample_dilution.asc()).all();
sample_dilutions_O = [];
for sd in sample_dilutions: sample_dilutions_O.append(sd.sample_dilution);
return sample_dilutions_O;
except SQLAlchemyError as e:
print(e);
def get_sampleDilution_experimentIDAndSampleNameAbbreviation(self,experiment_id_I,sample_name_abbreviation_I,exp_type_I=4):
'''Query dilutions that are used from the experiment'''
try:
sample_dilutions = self.session.query(sample.sample_dilution).filter(
experiment.id.like(experiment_id_I),
experiment.exp_type_id == exp_type_I,
experiment.sample_name.like(sample.sample_name),
sample.sample_id.like(sample_description.sample_id),
sample_description.sample_name_abbreviation.like(sample_name_abbreviation_I),
data_stage01_quantification_MQResultsTable.used_.is_(True),
experiment.sample_name.like(data_stage01_quantification_MQResultsTable.sample_name)).group_by(
sample.sample_dilution).order_by(
sample.sample_dilution.asc()).all();
sample_dilutions_O = [];
for sd in sample_dilutions: sample_dilutions_O.append(sd.sample_dilution);
return sample_dilutions_O;
except SQLAlchemyError as e:
print(e);
# query time points from data_stage01_quantification_mqresultstable
def get_timePoint_experimentIDAndSampleNameAbbreviation(self,experiment_id_I,sample_name_abbreviation_I,exp_type_I=4):
'''Query time points that are used from the experiment and sample name abbreviation'''
try:
time_points = self.session.query(sample_description.time_point).filter(
sample_description.sample_name_abbreviation.like(sample_name_abbreviation_I),
experiment.id.like(experiment_id_I),
experiment.exp_type_id == exp_type_I,
data_stage01_quantification_MQResultsTable.used_.is_(True),
experiment.sample_name.like(sample.sample_name),
sample.sample_id.like(sample_description.sample_id),
experiment.sample_name.like(data_stage01_quantification_MQResultsTable.sample_name)).group_by(
sample_description.time_point).order_by(
sample_description.time_point.asc()).all();
time_points_O = [];
for tp in time_points: time_points_O.append(tp.time_point);
return time_points_O;
except SQLAlchemyError as e:
print(e);
# query component names from data_stage01_quantification_mqresultstable
def get_componentsNames_experimentIDAndSampleID(self,experiment_id_I,sample_id_I,exp_type_I=4):
'''Query component names that are used and are not IS from
the experiment and sample_id'''
try:
component_names = self.session.query(data_stage01_quantification_MQResultsTable.component_name).filter(
experiment.id.like(experiment_id_I),
experiment.exp_type_id == exp_type_I,
data_stage01_quantification_MQResultsTable.used_.is_(True),
data_stage01_quantification_MQResultsTable.is_.is_(False),
experiment.sample_name.like(sample.sample_name),
sample.sample_id.like(sample_id_I),
experiment.sample_name.like(data_stage01_quantification_MQResultsTable.sample_name)).group_by(
data_stage01_quantification_MQResultsTable.component_name).order_by(
data_stage01_quantification_MQResultsTable.component_name.asc()).all();
component_names_O = [];
for cn in component_names: component_names_O.append(cn.component_name);
return component_names_O;
except SQLAlchemyError as e:
print(e);
def get_componentsNames_experimentIDAndSampleNameAbbreviation(self,experiment_id_I,sample_name_abbreviation_I,exp_type_I=4):
'''Query component names that are used from
the experiment and sample_name_abbreviation'''
try:
component_names = self.session.query(data_stage01_quantification_MQResultsTable.component_name).filter(
sample_description.sample_name_abbreviation.like(sample_name_abbreviation_I),
experiment.id.like(experiment_id_I),
experiment.exp_type_id == exp_type_I,
sample.sample_id.like(sample_description.sample_id),
experiment.sample_name.like(sample.sample_name),
experiment.sample_name.like(data_stage01_quantification_MQResultsTable.sample_name),
data_stage01_quantification_MQResultsTable.used_.is_(True),
data_stage01_quantification_MQResultsTable.is_.is_(False)).group_by(
data_stage01_quantification_MQResultsTable.component_name).order_by(
data_stage01_quantification_MQResultsTable.component_name.asc()).all();
component_names_O = [];
for cn in component_names: component_names_O.append(cn.component_name);
return component_names_O;
except SQLAlchemyError as e:
print(e);
def get_componentsNames_experimentIDAndSampleName(self,experiment_id_I,sample_name_I,exp_type_I=4):
'''Query component names that are used and not internal standards from
the experiment and sample_name'''
try:
component_names = self.session.query(data_stage01_quantification_MQResultsTable.component_name).filter(
experiment.sample_name.like(sample_name_I),
experiment.id.like(experiment_id_I),
experiment.exp_type_id == exp_type_I,
experiment.sample_name.like(data_stage01_quantification_MQResultsTable.sample_name),
data_stage01_quantification_MQResultsTable.used_.is_(True),
data_stage01_quantification_MQResultsTable.is_.is_(False)).group_by(
data_stage01_quantification_MQResultsTable.component_name).order_by(
data_stage01_quantification_MQResultsTable.component_name.asc()).all();
component_names_O = [];
for cn in component_names: component_names_O.append(cn.component_name);
return component_names_O;
except SQLAlchemyError as e:
print(e);
def get_componentsNamesAndComponentGroupNames_experimentIDAndSampleName(self,experiment_id_I,sample_name_I,exp_type_I=4):
'''Query component names that are used and not internal standards from
the experiment and sample_name'''
try:
component_names = self.session.query(data_stage01_quantification_MQResultsTable.component_name,
data_stage01_quantification_MQResultsTable.component_group_name).filter(
experiment.sample_name.like(sample_name_I),
experiment.id.like(experiment_id_I),
experiment.exp_type_id == exp_type_I,
experiment.sample_name.like(data_stage01_quantification_MQResultsTable.sample_name),
data_stage01_quantification_MQResultsTable.used_.is_(True),
data_stage01_quantification_MQResultsTable.is_.is_(False)).group_by(
data_stage01_quantification_MQResultsTable.component_name,
data_stage01_quantification_MQResultsTable.component_group_name).order_by(
data_stage01_quantification_MQResultsTable.component_name.asc(),
data_stage01_quantification_MQResultsTable.component_group_name.asc()).all();
component_names_O = [];
component_group_names_O = [];
for cn in component_names:
component_names_O.append(cn.component_name);
component_group_names_O.append(cn.component_group_name);
return component_names_O,component_group_names_O;
except SQLAlchemyError as e:
print(e);
def get_componentsNames_experimentIDAndSampleType(self,experiment_id_I,sample_type_I):
'''Query component names that are used and not internal standards from
the experiment and sample_name'''
try:
component_names = self.session.query(data_stage01_quantification_MQResultsTable.component_name).filter(
data_stage01_quantification_MQResultsTable.sample_type.like(sample_type_I),
experiment.id.like(experiment_id_I),
experiment.sample_name.like(data_stage01_quantification_MQResultsTable.sample_name),
data_stage01_quantification_MQResultsTable.used_.is_(True),
data_stage01_quantification_MQResultsTable.is_.is_(False)).group_by(
data_stage01_quantification_MQResultsTable.component_name).order_by(
data_stage01_quantification_MQResultsTable.component_name.asc()).all();
component_names_O = [];
for cn in component_names: component_names_O.append(cn.component_name);
return component_names_O;
except SQLAlchemyError as e:
print(e);#,quant_method_id_I
def get_sampleNames_QMethodIDAndComponentNameAndSampleType(self,quantitation_method_id_I,component_name_I,sample_type_I='Standard'):
'''Query sample names (i.e. unknowns) that are used from
the experiment by quantitation_method_id, component_name, and sample_type'''
try:
sample_names = self.session.query(data_stage01_quantification_MQResultsTable.sample_name).filter(
data_stage01_quantification_MQResultsTable.sample_type.like(sample_type_I),
data_stage01_quantification_MQResultsTable.component_name.like(component_name_I),
experiment.quantitation_method_id.like(quantitation_method_id_I),
data_stage01_quantification_MQResultsTable.used_.is_(True),
experiment.sample_name.like(data_stage01_quantification_MQResultsTable.sample_name)).group_by(
data_stage01_quantification_MQResultsTable.sample_name).order_by(
data_stage01_quantification_MQResultsTable.sample_name.asc()).all();
sample_names_O = [];
for sn in sample_names: sample_names_O.append(sn.sample_name);
return sample_names_O;
except SQLAlchemyError as e:
print(e);
def get_rows_dataStage01QuantificationMQResultsTable(
self,
analysis_id_I = [],
experiment_id_I = [],
sample_name_I = [],
sample_id_I = [],
sample_name_abbreviation_I = [],
sample_type_I = [],
component_name_I = [],
acquisition_date_and_time_I = [],
):
'''Query rows from data_stage01_quantification_MQResultsTable
'''
try:
subquery1 = '''SELECT
"data_stage01_quantification_analysis"."analysis_id",
"data_stage01_quantification_analysis"."experiment_id",
"data_stage01_quantification_analysis"."sample_name",
"data_stage01_quantification_analysis"."sample_id",
"data_stage01_quantification_analysis"."sample_name_short",
"data_stage01_quantification_analysis"."sample_name_abbreviation",
"data_stage01_quantification_analysis"."time_point",
"data_stage01_quantification_analysis"."analysis_type",
"data_stage01_quantification_analysis"."sample_desc",
"data_stage01_quantification_analysis"."used_",
"data_stage01_quantification_analysis"."comment_"
'''
subquery1 += '''FROM "data_stage01_quantification_analysis"
'''
subquery1 += '''
WHERE "data_stage01_quantification_analysis"."used_"
'''
if analysis_id_I:
cmd_q = '''AND "data_stage01_quantification_analysis".analysis_id =ANY ('{%s}'::text[]) '''%(
self.convert_list2string(analysis_id_I));
subquery1+=cmd_q;
if experiment_id_I:
cmd_q = '''AND "data_stage01_quantification_analysis".experiment_id =ANY ('{%s}'::text[]) '''%(
self.convert_list2string(experiment_id_I));
subquery1+=cmd_q;
if sample_name_I:
cmd_q = '''AND "data_stage01_quantification_analysis".sample_name =ANY ('{%s}'::text[]) '''%(
self.convert_list2string(sample_name_I));
subquery1+=cmd_q;
if sample_id_I:
cmd_q = '''AND "data_stage01_quantification_analysis".sample_id =ANY ('{%s}'::text[]) '''%(
self.convert_list2string(sample_id_I));
subquery1+=cmd_q;
if sample_name_abbreviation_I:
cmd_q = '''AND "data_stage01_quantification_analysis".sample_name_abbreviation =ANY ('{%s}'::text[]) '''%(
self.convert_list2string(sample_name_abbreviation_I));
subquery1+=cmd_q;
subquery1 += '''
ORDER BY "data_stage01_quantification_analysis"."analysis_id" ASC,
"data_stage01_quantification_analysis"."experiment_id" ASC,
"data_stage01_quantification_analysis"."sample_name" ASC
'''
cmd = '''SELECT "data_stage01_quantification_mqresultstable"."id",
"data_stage01_quantification_mqresultstable"."index_",
"data_stage01_quantification_mqresultstable"."sample_index",
"data_stage01_quantification_mqresultstable"."original_filename",
"data_stage01_quantification_mqresultstable"."sample_name",
"data_stage01_quantification_mqresultstable"."sample_comment",
"data_stage01_quantification_mqresultstable"."sample_type",
"data_stage01_quantification_mqresultstable"."acquisition_date_and_time",
"data_stage01_quantification_mqresultstable"."rack_number",
"data_stage01_quantification_mqresultstable"."plate_number",
"data_stage01_quantification_mqresultstable"."vial_number",
"data_stage01_quantification_mqresultstable"."dilution_factor",
"data_stage01_quantification_mqresultstable"."injection_volume",
"data_stage01_quantification_mqresultstable"."operator_name",
"data_stage01_quantification_mqresultstable"."acq_method_name",
"data_stage01_quantification_mqresultstable"."is_",
"data_stage01_quantification_mqresultstable"."component_name",
"data_stage01_quantification_mqresultstable"."component_index",
"data_stage01_quantification_mqresultstable"."component_comment",
"data_stage01_quantification_mqresultstable"."is_comment",
"data_stage01_quantification_mqresultstable"."mass_info",
"data_stage01_quantification_mqresultstable"."is_mass",
"data_stage01_quantification_mqresultstable"."is_name",
"data_stage01_quantification_mqresultstable"."component_group_name",
"data_stage01_quantification_mqresultstable"."conc_units",
"data_stage01_quantification_mqresultstable"."failed_query",
"data_stage01_quantification_mqresultstable"."is_failed_query",
"data_stage01_quantification_mqresultstable"."peak_comment",
"data_stage01_quantification_mqresultstable"."is_peak_comment",
"data_stage01_quantification_mqresultstable"."actual_concentration",
"data_stage01_quantification_mqresultstable"."is_actual_concentration",
"data_stage01_quantification_mqresultstable"."concentration_ratio",
"data_stage01_quantification_mqresultstable"."expected_rt",
"data_stage01_quantification_mqresultstable"."is_expected_rt",
"data_stage01_quantification_mqresultstable"."integration_type",
"data_stage01_quantification_mqresultstable"."is_integration_type",
"data_stage01_quantification_mqresultstable"."area",
"data_stage01_quantification_mqresultstable"."is_area",
"data_stage01_quantification_mqresultstable"."corrected_area",
"data_stage01_quantification_mqresultstable"."is_corrected_area",
"data_stage01_quantification_mqresultstable"."area_ratio",
"data_stage01_quantification_mqresultstable"."height",
"data_stage01_quantification_mqresultstable"."is_height",
"data_stage01_quantification_mqresultstable"."corrected_height",
"data_stage01_quantification_mqresultstable"."is_corrected_height",
"data_stage01_quantification_mqresultstable"."height_ratio",
"data_stage01_quantification_mqresultstable"."area_2_height",
"data_stage01_quantification_mqresultstable"."is_area_2_height",
"data_stage01_quantification_mqresultstable"."corrected_area2height",
"data_stage01_quantification_mqresultstable"."is_corrected_area2height",
"data_stage01_quantification_mqresultstable"."region_height",
"data_stage01_quantification_mqresultstable"."is_region_height",
"data_stage01_quantification_mqresultstable"."quality",
"data_stage01_quantification_mqresultstable"."is_quality",
"data_stage01_quantification_mqresultstable"."retention_time",
"data_stage01_quantification_mqresultstable"."is_retention_time",
"data_stage01_quantification_mqresultstable"."start_time",
"data_stage01_quantification_mqresultstable"."is_start_time",
"data_stage01_quantification_mqresultstable"."end_time",
"data_stage01_quantification_mqresultstable"."is_end_time",
"data_stage01_quantification_mqresultstable"."total_width",
"data_stage01_quantification_mqresultstable"."is_total_width",
"data_stage01_quantification_mqresultstable"."width_at_50",
"data_stage01_quantification_mqresultstable"."is_width_at_50",
"data_stage01_quantification_mqresultstable"."signal_2_noise",
"data_stage01_quantification_mqresultstable"."is_signal_2_noise",
"data_stage01_quantification_mqresultstable"."baseline_delta_2_height",
"data_stage01_quantification_mqresultstable"."is_baseline_delta_2_height",
"data_stage01_quantification_mqresultstable"."modified_",
"data_stage01_quantification_mqresultstable"."relative_rt",
"data_stage01_quantification_mqresultstable"."used_",
"data_stage01_quantification_mqresultstable"."calculated_concentration",
"data_stage01_quantification_mqresultstable"."accuracy_",
"data_stage01_quantification_mqresultstable"."comment_",
"data_stage01_quantification_mqresultstable"."use_calculated_concentration",
"data_stage01_quantification_mqresultstable"."start_time_at_5",
"data_stage01_quantification_mqresultstable"."end_time_at_5",
"data_stage01_quantification_mqresultstable"."width_at_5",
"data_stage01_quantification_mqresultstable"."start_time_at_10",
"data_stage01_quantification_mqresultstable"."end_time_at_10",
"data_stage01_quantification_mqresultstable"."width_at_10",
"data_stage01_quantification_mqresultstable"."slope_of_baseline",
"data_stage01_quantification_mqresultstable"."tailing_factor",
"data_stage01_quantification_mqresultstable"."asymmetry_factor",
"data_stage01_quantification_mqresultstable"."ion_ratio",
"data_stage01_quantification_mqresultstable"."expected_ion_ratio",
"data_stage01_quantification_mqresultstable"."points_across_baseline",
"data_stage01_quantification_mqresultstable"."points_across_half_height",
"subquery1"."analysis_id",
"subquery1"."experiment_id",
"subquery1"."sample_id",
"subquery1"."sample_name_short",
"subquery1"."sample_name_abbreviation",
"subquery1"."time_point",
"subquery1"."analysis_type",
"subquery1"."sample_desc"
'''
cmd += '''
FROM "data_stage01_quantification_mqresultstable",
(%s) AS subquery1
''' %(subquery1)
cmd += '''WHERE "data_stage01_quantification_mqresultstable"."used_"
AND "subquery1".sample_name = "data_stage01_quantification_mqresultstable"."sample_name"
'''
if component_name_I:
cmd_q = '''AND "data_stage01_quantification_mqresultstable".component_name =ANY ('{%s}'::text[]) '''%(
self.convert_list2string(component_name_I));
cmd+=cmd_q;
if sample_type_I:
cmd_q = '''AND "data_stage01_quantification_mqresultstable".sample_type =ANY ('{%s}'::text[]) '''%(
self.convert_list2string(sample_type_I));
cmd+=cmd_q;
if acquisition_date_and_time_I and not acquisition_date_and_time_I[0] is None:
cmd_q = '''AND "data_stage01_quantification_mqresultstable".acquisition_date_and_time >= %s'''%(
acquisition_date_and_time_I[0]);
cmd+=cmd_q;
cmd_q = '''AND "data_stage01_quantification_mqresultstable".acquisition_date_and_time <= %s'''%(
acquisition_date_and_time_I[1]);
cmd+=cmd_q;
cmd += '''
ORDER BY "subquery1"."analysis_id" ASC,
"subquery1"."experiment_id" ASC,
"subquery1"."sample_name" ASC,
"data_stage01_quantification_mqresultstable"."component_name" ASC;
'''
result = self.session.execute(cmd);
data = result.fetchall();
data_O = [dict(d) for d in data];
return data_O;
except SQLAlchemyError as e:
print(e);
# query sample names from data_stage01_quantification_mqresultstable
def getGroupJoin_experimentAndQuantitationMethodAndMQResultsTable_experimentID_dataStage01QuantificationMQResultsTable(self,
experiment_id_I,
sample_types_I=[],
sample_names_I=[],
sample_ids_I=[],
component_names_I=[],
):
'''Query sample names and sample ids (i.e. unknowns) that are used from
the experiment'''
try:
cmd = '''SELECT quantitation_method.use_area, subquery1.sample_name, subquery1.sample_type,
subquery1.use_calculated_concentration, subquery1.sample_id, subquery1.component_name,
subquery1.component_group_name, subquery1.quantitation_method_id, subquery1.acquisition_date_and_time,
subquery1.calculated_concentration, subquery1.height, subquery1.height_ratio, subquery1.area_ratio, subquery1.conc_units
FROM quantitation_method, (
SELECT data_stage01_quantification_mqresultstable.sample_name, data_stage01_quantification_mqresultstable.sample_type,
data_stage01_quantification_mqresultstable.use_calculated_concentration, sample.sample_id,
data_stage01_quantification_mqresultstable.component_name, data_stage01_quantification_mqresultstable.component_group_name,
experiment.quantitation_method_id, data_stage01_quantification_mqresultstable.acquisition_date_and_time,
data_stage01_quantification_mqresultstable.calculated_concentration, data_stage01_quantification_mqresultstable.height,
data_stage01_quantification_mqresultstable.height_ratio, data_stage01_quantification_mqresultstable.area_ratio,
data_stage01_quantification_mqresultstable.conc_units
FROM data_stage01_quantification_mqresultstable, sample, experiment
WHERE experiment.id LIKE '%s' AND data_stage01_quantification_mqresultstable.used_ IS true AND data_stage01_quantification_mqresultstable.is_ IS false AND experiment.sample_name LIKE data_stage01_quantification_mqresultstable.sample_name AND experiment.sample_name LIKE sample.sample_name
GROUP BY data_stage01_quantification_mqresultstable.sample_name, data_stage01_quantification_mqresultstable.sample_type,
data_stage01_quantification_mqresultstable.use_calculated_concentration, sample.sample_id,
data_stage01_quantification_mqresultstable.component_name, data_stage01_quantification_mqresultstable.component_group_name,
experiment.quantitation_method_id, data_stage01_quantification_mqresultstable.acquisition_date_and_time,
data_stage01_quantification_mqresultstable.calculated_concentration, data_stage01_quantification_mqresultstable.height,
data_stage01_quantification_mqresultstable.height_ratio, data_stage01_quantification_mqresultstable.area_ratio,
data_stage01_quantification_mqresultstable.conc_units
ORDER BY data_stage01_quantification_mqresultstable.sample_name ASC, sample.sample_id ASC, data_stage01_quantification_mqresultstable.component_name ASC, data_stage01_quantification_mqresultstable.component_group_name ASC
) subquery1
WHERE quantitation_method.component_name LIKE subquery1.component_name AND quantitation_method.id LIKE subquery1.quantitation_method_id
GROUP BY subquery1.sample_name, subquery1.sample_type, subquery1.use_calculated_concentration,
subquery1.sample_id, subquery1.component_name, subquery1.component_group_name, quantitation_method.use_area, subquery1.quantitation_method_id, subquery1.acquisition_date_and_time,
subquery1.calculated_concentration, subquery1.height, subquery1.height_ratio, subquery1.area_ratio, subquery1.conc_units
ORDER BY subquery1.sample_name ASC, subquery1.sample_id ASC, subquery1.component_name ASC, subquery1.component_group_name ASC, subquery1.acquisition_date_and_time ASC
''' % (experiment_id_I);
result = self.session.execute(cmd);
data = result.fetchall();
#data = self.session.query(data_stage01_quantification_MQResultsTable.sample_name,
# data_stage01_quantification_MQResultsTable.sample_type,
# data_stage01_quantification_MQResultsTable.use_calculated_concentration,
# sample.sample_id,
# data_stage01_quantification_MQResultsTable.component_name,
# data_stage01_quantification_MQResultsTable.component_group_name,
# #quantitation_method.use_area,
# experiment.quantitation_method_id
# ).filter(
# experiment.id.like(experiment_id_I),
# data_stage01_quantification_MQResultsTable.used_.is_(True),
# data_stage01_quantification_MQResultsTable.is_.is_(False),
# experiment.sample_name.like(data_stage01_quantification_MQResultsTable.sample_name),
# experiment.sample_name.like(sample.sample_name),
# #data_stage01_quantification_MQResultsTable.component_name.like(quantitation_method.component_name),
# #experiment.quantitation_method_id.like(quantitation_method.id)
# ).group_by(
# data_stage01_quantification_MQResultsTable.sample_name,
# data_stage01_quantification_MQResultsTable.sample_type,
# data_stage01_quantification_MQResultsTable.use_calculated_concentration,
# sample.sample_id,
# data_stage01_quantification_MQResultsTable.component_name,
# data_stage01_quantification_MQResultsTable.component_group_name,
# #quantitation_method.use_area,
# experiment.quantitation_method_id
# ).order_by(
# data_stage01_quantification_MQResultsTable.sample_name.asc(),
# sample.sample_id.asc(),
# data_stage01_quantification_MQResultsTable.component_name.asc(),
# data_stage01_quantification_MQResultsTable.component_group_name.asc()
# ).all();
data_O = [];
if data:
data_O = listDict(record_I=data);
data_O.convert_record2DataFrame();
data_O.filterIn_byDictList({
'sample_id':sample_ids_I,
'sample_name':sample_names_I,
'sample_type':sample_types_I,
'component_name':component_names_I,
});
return data_O;
except SQLAlchemyError as e:
print(e);
# Join between data_stage01_quantification_mqresultstable and data_stage01_quantification_analysis
def getRowsJoin_analysisID_dataStage01QuantificationMQResultsTable(self,
analysis_id_I,
experiment_ids_I=[],
sample_types_I=[],
sample_names_I=[],
sample_ids_I=[],
sample_name_shorts_I=[],
sample_name_abbreviations_I=[],
component_names_I=[],
component_group_names_I=[],
):
'''Query mqresultstable rows by analysis_id'''
try:
data = self.session.query(
data_stage01_quantification_MQResultsTable,
#data_stage01_quantification_analysis.experiment_id,
#data_stage01_quantification_analysis.analysis_id,
#data_stage01_quantification_analysis.sample_name_short,
#data_stage01_quantification_analysis.sample_name_abbreviation,
).filter(
data_stage01_quantification_analysis.analysis_id.like(analysis_id_I),
data_stage01_quantification_analysis.sample_name.like(data_stage01_quantification_MQResultsTable.sample_name),
data_stage01_quantification_MQResultsTable.used_.is_(True),
data_stage01_quantification_MQResultsTable.is_.is_(False),
#).group_by(
).order_by(
data_stage01_quantification_MQResultsTable.acquisition_date_and_time.asc(),
data_stage01_quantification_MQResultsTable.sample_name.asc(),
data_stage01_quantification_MQResultsTable.component_name.asc(),
data_stage01_quantification_MQResultsTable.component_group_name.asc()
).all();
data_O = [d.__repr__dict__() for d in data];
return data_O
except SQLAlchemyError as e:
print(e);
def getRowsJoin_analysisID_dataStage01QuantificationMQResultsTable_limsQuantitationMethod(self,
analysis_id_I
):
'''Query mqresultstable and quantitation_method rows by analysis_id'''
try:
cmd = '''
SELECT subquery3.experiment_id,
subquery3.quantitation_method_id,
quantitation_method.q1_mass,
quantitation_method.q3_mass,
quantitation_method.met_id,
quantitation_method.component_name,
quantitation_method.is_name,
quantitation_method.fit,
quantitation_method.weighting,
quantitation_method.intercept,
quantitation_method.slope,
quantitation_method.correlation,
quantitation_method.use_area,
quantitation_method.lloq,
quantitation_method.uloq,
quantitation_method.points,
subquery3.sample_name,
subquery3.component_name,
subquery3.concentration_ratio,
subquery3.area_ratio,
subquery3.height_ratio
FROM quantitation_method, (
SELECT subquery2.experiment_id,
subquery2.quantitation_method_id,
data_stage01_quantification_mqresultstable.sample_name,
data_stage01_quantification_mqresultstable.component_name,
data_stage01_quantification_mqresultstable.concentration_ratio,
data_stage01_quantification_mqresultstable.area_ratio,
data_stage01_quantification_mqresultstable.height_ratio
FROM data_stage01_quantification_mqresultstable, (
SELECT experiment.quantitation_method_id,
subquery1.experiment_id,
experiment.sample_name
FROM experiment, (
SELECT experiment_id
FROM data_stage01_quantification_analysis
WHERE analysis_id LIKE '%s'
GROUP BY experiment_id
ORDER BY experiment_id ASC
) subquery1
WHERE experiment.id LIKE subquery1.experiment_id
GROUP BY experiment.quantitation_method_id,
subquery1.experiment_id,
experiment.sample_name
ORDER BY experiment.quantitation_method_id ASC
) subquery2
WHERE data_stage01_quantification_mqresultstable.sample_type LIKE '%s' AND
data_stage01_quantification_mqresultstable.sample_name LIKE subquery2.sample_name AND
NOT (data_stage01_quantification_mqresultstable.is_) AND
data_stage01_quantification_mqresultstable.used_
GROUP BY subquery2.experiment_id,
subquery2.quantitation_method_id,
data_stage01_quantification_mqresultstable.sample_name,
data_stage01_quantification_mqresultstable.component_name,
data_stage01_quantification_mqresultstable.concentration_ratio,
data_stage01_quantification_mqresultstable.area_ratio,
data_stage01_quantification_mqresultstable.height_ratio
ORDER BY subquery2.quantitation_method_id ASC,
data_stage01_quantification_mqresultstable.component_name ASC,
data_stage01_quantification_mqresultstable.sample_name ASC
) subquery3
WHERE quantitation_method.id LIKE subquery3.quantitation_method_id AND
subquery3.component_name LIKE quantitation_method.component_name
ORDER BY quantitation_method.id ASC,
quantitation_method.component_name ASC,
subquery3.sample_name
''' %(analysis_id_I,'Standard')
result = self.session.execute(cmd);
data = result.fetchall();
data_O = [dict(d) for d in data];
return data_O;
except SQLAlchemyError as e:
print(e);
| #lims
from .lims_quantitationMethod_postgresql_models import *
from SBaaS_LIMS.lims_experiment_postgresql_models import *
from SBaaS_LIMS.lims_sample_postgresql_models import *
from .stage01_quantification_MQResultsTable_postgresql_models import *
from .stage01_quantification_analysis_postgresql_models import data_stage01_quantification_analysis
from SBaaS_base.sbaas_base_query_update import sbaas_base_query_update
from SBaaS_base.sbaas_base_query_drop import sbaas_base_query_drop
from SBaaS_base.sbaas_base_query_initialize import sbaas_base_query_initialize
from SBaaS_base.sbaas_base_query_insert import sbaas_base_query_insert
from SBaaS_base.sbaas_base_query_select import sbaas_base_query_select
from SBaaS_base.sbaas_base_query_delete import sbaas_base_query_delete
from SBaaS_base.sbaas_template_query import sbaas_template_query
#resources
from listDict.listDict import listDict
class stage01_quantification_MQResultsTable_query(sbaas_template_query):
def initialize_supportedTables(self):
'''Set the supported tables dict for
'''
tables_supported = {'data_stage01_quantification_mqresultstable':data_stage01_quantification_MQResultsTable,
};
self.set_supportedTables(tables_supported);
def initialize_dataStage01_quantification_MQResultsTable(self,
tables_I = [],):
try:
if not tables_I:
tables_I = list(self.get_supportedTables().keys());
queryinitialize = sbaas_base_query_initialize(session_I=self.session,engine_I=self.engine,settings_I=self.settings,data_I=self.data);
for table in tables_I:
model_I = self.convert_tableString2SqlalchemyModel(table);
queryinitialize.initialize_table_sqlalchemyModel(model_I);
except Exception as e:
print(e);
def drop_dataStage01_quantification_MQResultsTable(self,
tables_I = [],):
try:
if not tables_I:
tables_I = list(self.get_supportedTables().keys());
querydrop = sbaas_base_query_drop(session_I=self.session,engine_I=self.engine,settings_I=self.settings,data_I=self.data);
for table in tables_I:
model_I = self.convert_tableString2SqlalchemyModel(table);
querydrop.drop_table_sqlalchemyModel(model_I);
except Exception as e:
print(e);
def reset_dataStage01_quantification_MQResultsTable(self,
component_name,sample_name,acquisition_date_and_time,
tables_I = [],
warn_I=True):
try:
if not tables_I:
tables_I = list(self.get_supportedTables().keys());
querydelete = sbaas_base_query_delete(session_I=self.session,engine_I=self.engine,settings_I=self.settings,data_I=self.data);
for table in tables_I:
query = {};
query['delete_from'] = [{'table_name':table}];
query['where'] = [{
'table_name':table,
'column_name':'component_name',
'value':analysis_id_I,
'operator':'LIKE',
'connector':'AND'
},{
'table_name':table,
'column_name':'sample_name',
'value':analysis_id_I,
'operator':'LIKE',
'connector':'AND'
},{
'table_name':table,
'column_name':'acquisition_date_and_time',
'value':analysis_id_I,
'operator':'LIKE',
'connector':'AND'
},
];
table_model = self.convert_tableStringList2SqlalchemyModelDict([table]);
query = querydelete.make_queryFromString(table_model,query);
querydelete.reset_table_sqlalchemyModel(query_I=query,warn_I=warn_I);
except Exception as e:
print(e);
def add_dataStage01MQResultsTable(self,data_I):
'''add rows of data_stage01_quantification_MQResultsTable'''
if data_I:
cnt = 0;
for d in data_I:
try:
if 'Index' in d:
d['index_']=d['Index'];
d['sample_index']=d['Sample Index'];
d['original_filename']=d['Original Filename'];
d['sample_name']=d['Sample Name'];
d['sample_id']=d['Sample ID'];
d['sample_comment']=d['Sample Comment'];
d['sample_type']=d['Sample Type'];
d['acquisition_date_and_time']=d['Acquisition Date & Time'];
d['rack_number']=d['Rack Number'];
d['plate_number']=d['Plate Number'];
d['vial_number']=d['Vial Number'];
d['dilution_factor']=d['Dilution Factor'];
d['injection_volume']=d['Injection Volume'];
d['operator_name']=d['Operator Name'];
d['acq_method_name']=d['Acq. Method Name'];
d['is_']=d['IS'];
d['component_name']=d['Component Name'];
d['component_index']=d['Component Index'];
d['component_comment']=d['Component Comment'];
d['is_comment']=d['IS Comment'];
d['mass_info']=d['Mass Info'];
d['is_mass']=d['IS Mass Info'];
d['is_name']=d['IS Name'];
d['component_group_name']=d['Component Group Name'];
d['conc_units']=d['Conc. Units'];
d['failed_query']=d['Failed Query'];
d['is_failed_query']=d['IS Failed Query'];
d['peak_comment']=d['Peak Comment'];
d['is_peak_comment']=d['IS Peak Comment'];
d['actual_concentration']=d['Actual Concentration'];
d['is_actual_concentration']=d['IS Actual Concentration'];
d['concentration_ratio']=d['Concentration Ratio'];
d['expected_rt']=d['Expected RT'];
d['is_expected_rt']=d['IS Expected RT'];
d['integration_type']=d['Integration Type'];
d['is_integration_type']=d['IS Integration Type'];
d['area']=d['Area'];
d['is_area']=d['IS Area'];
d['corrected_area']=d['Corrected Area'];
d['is_corrected_area']=d['IS Corrected Area'];
d['area_ratio']=d['Area Ratio'];
d['height']=d['Height'];
d['is_height']=d['IS Height'];
d['corrected_height']=d['Corrected Height'];
d['is_corrected_height']=d['IS Corrected Height'];
d['height_ratio']=d['Height Ratio'];
d['area_2_height']=d['Area / Height'];
d['is_area_2_height']=d['IS Area / Height'];
d['corrected_area2height']=d['Corrected Area/Height'];
d['is_corrected_area2height']=d['IS Corrected Area/Height'];
d['region_height']=d['Region Height'];
d['is_region_height']=d['IS Region Height'];
d['quality']=d['Quality'];
d['is_quality']=d['IS Quality'];
d['retention_time']=d['Retention Time'];
d['is_retention_time']=d['IS Retention Time'];
d['start_time']=d['Start Time'];
d['is_start_time']=d['IS Start Time'];
d['end_time']=d['End Time'];
d['is_end_time']=d['IS End Time'];
d['total_width']=d['Total Width'];
d['is_total_width']=d['IS Total Width'];
d['width_at_50']=d['Width at 50%'];
d['is_width_at_50']=d['IS Width at 50%'];
d['signal_2_noise']=d['Signal / Noise'];
d['is_signal_2_noise']=d['IS Signal / Noise'];
d['baseline_delta_2_height']=d['Baseline Delta / Height'];
d['is_baseline_delta_2_height']=d['IS Baseline Delta / Height'];
d['modified_']=d['Modified'];
d['relative_rt']=d['Relative RT'];
d['used_']=d['Used'];
d['calculated_concentration']=d['Calculated Concentration'];
d['accuracy_']=d['Accuracy'];
d['comment_']=d['Comment'];
d['use_calculated_concentration']=d['Use_Calculated_Concentration'];
d['start_time_at_5']=d['Start Time at 5%'];
d['end_time_at_5']=d['End Time at 5%'];
d['width_at_5']=d['Width at 5%'];
d['start_time_at_10']=d['Start Time at 10%'];
d['end_time_at_10']=d['End Time at 10%'];
d['width_at_10']=d['Width at 10%'];
d['slope_of_baseline']=d['Slope of Baseline'];
d['tailing_factor']=d['Tailing Factor'];
d['asymmetry_factor']=d['Asymmetry Factor'];
d['ion_ratio']=d['Ion Ratio'];
d['expected_ion_ratio']=d['Expected Ion Ratio'];
d['points_across_baseline']=d['Points Across Baseline'];
d['points_across_half_height']=d['Points Across Half Height'];
data_add = data_stage01_quantification_MQResultsTable(d
#d['Index'],
#d['Sample Index'],
#d['Original Filename'],
#d['Sample Name'],
#d['Sample ID'],
#d['Sample Comment'],
#d['Sample Type'],
#d['Acquisition Date & Time'],
#d['Rack Number'],
#d['Plate Number'],
#d['Vial Number'],
#d['Dilution Factor'],
#d['Injection Volume'],
#d['Operator Name'],
#d['Acq. Method Name'],
#d['IS'],
#d['Component Name'],
#d['Component Index'],
#d['Component Comment'],
#d['IS Comment'],
#d['Mass Info'],
#d['IS Mass Info'],
#d['IS Name'],
#d['Component Group Name'],
#d['Conc. Units'],
#d['Failed Query'],
#d['IS Failed Query'],
#d['Peak Comment'],
#d['IS Peak Comment'],
#d['Actual Concentration'],
#d['IS Actual Concentration'],
#d['Concentration Ratio'],
#d['Expected RT'],
#d['IS Expected RT'],
#d['Integration Type'],
#d['IS Integration Type'],
#d['Area'],
#d['IS Area'],
#d['Corrected Area'],
#d['IS Corrected Area'],
#d['Area Ratio'],
#d['Height'],
#d['IS Height'],
#d['Corrected Height'],
#d['IS Corrected Height'],
#d['Height Ratio'],
#d['Area / Height'],
#d['IS Area / Height'],
#d['Corrected Area/Height'],
#d['IS Corrected Area/Height'],
#d['Region Height'],
#d['IS Region Height'],
#d['Quality'],
#d['IS Quality'],
#d['Retention Time'],
#d['IS Retention Time'],
#d['Start Time'],
#d['IS Start Time'],
#d['End Time'],
#d['IS End Time'],
#d['Total Width'],
#d['IS Total Width'],
#d['Width at 50%'],
#d['IS Width at 50%'],
#d['Signal / Noise'],
#d['IS Signal / Noise'],
#d['Baseline Delta / Height'],
#d['IS Baseline Delta / Height'],
#d['Modified'],
#d['Relative RT'],
#d['Used'],
#d['Calculated Concentration'],
#d['Accuracy'],
#d['Comment'],
#d['Use_Calculated_Concentration']
);
elif 'index_' in d:
data_add = data_stage01_quantification_MQResultsTable(d
#d['index_'],
#d['sample_index'],
#d['original_filename'],
#d['sample_name'],
#d['sample_id'],
#d['sample_comment'],
#d['sample_type'],
#d['acquisition_date_and_time'],
#d['rack_number'],
#d['plate_number'],
#d['vial_number'],
#d['dilution_factor'],
#d['injection_volume'],
#d['operator_name'],
#d['acq_method_name'],
#d['is_'],
#d['component_name'],
#d['component_index'],
#d['component_comment'],
#d['is_comment'],
#d['mass_info'],
#d['is_mass'],
#d['is_name'],
#d['component_group_name'],
#d['conc_units'],
#d['failed_query'],
#d['is_failed_query'],
#d['peak_comment'],
#d['is_peak_comment'],
#d['actual_concentration'],
#d['is_actual_concentration'],
#d['concentration_ratio'],
#d['expected_rt'],
#d['is_expected_rt'],
#d['integration_type'],
#d['is_integration_type'],
#d['area'],
#d['is_area'],
#d['corrected_area'],
#d['is_corrected_area'],
#d['area_ratio'],
#d['height'],
#d['is_height'],
#d['corrected_height'],
#d['is_corrected_height'],
#d['height_ratio'],
#d['area_2_height'],
#d['is_area_2_height'],
#d['corrected_area2height'],
#d['is_corrected_area2height'],
#d['region_height'],
#d['is_region_height'],
#d['quality'],
#d['is_quality'],
#d['retention_time'],
#d['is_retention_time'],
#d['start_time'],
#d['is_start_time'],
#d['end_time'],
#d['is_end_time'],
#d['total_width'],
#d['is_total_width'],
#d['width_at_50'],
#d['is_width_at_50'],
#d['signal_2_noise'],
#d['is_signal_2_noise'],
#d['baseline_delta_2_height'],
#d['is_baseline_delta_2_height'],
#d['modified_'],
#d['relative_rt'],
#d['used_'],
#d['calculated_concentration'],
#d['accuracy_'],
#d['comment_'],
#d['use_calculated_concentration'],
);
self.session.add(data_add);
cnt = cnt + 1;
if cnt > 1000:
self.session.commit();
cnt = 0;
except IntegrityError as e:
print(e);
except SQLAlchemyError as e:
print(e);
self.session.commit();
def update_dataStage01MQResultsTable(self,data_I):
'''update rows of data_stage01_quantification_MQResultsTable'''
if data_I:
for d in data_I:
try:
data_update = self.session.query(data_stage01_quantification_MQResultsTable).filter(
data_stage01_quantification_MQResultsTable.component_name.like(d['Component Name']),
data_stage01_quantification_MQResultsTable.sample_name.like(d['Sample Name']),
data_stage01_quantification_MQResultsTable.acquisition_date_and_time == d['Acquisition Date & Time']).update(
{'index_':d['Index'],
'sample_index':d['Sample Index'],
'original_filename':d['Original Filename'],
'sample_name':d['Sample Name'],
'sample_id':d['Sample ID'],
'sample_comment':d['Sample Comment'],
'sample_type':d['Sample Type'],
'acquisition_date_and_time':d['Acquisition Date & Time'],
'rack_number':d['Rack Number'],
'plate_number':d['Plate Number'],
'vial_number':d['Vial Number'],
'dilution_factor':d['Dilution Factor'],
'injection_volume':d['Injection Volume'],
'operator_name':d['Operator Name'],
'acq_method_name':d['Acq. Method Name'],
'is_':d['IS'],
'component_name':d['Component Name'],
'component_index':d['Component Index'],
'component_comment':d['Component Comment'],
'is_comment':d['IS Comment'],
'mass_info':d['Mass Info'],
'is_mass':d['IS Mass Info'],
'is_name':d['IS Name'],
'component_group_name':d['Component Group Name'],
'conc_units':d['Conc. Units'],
'failed_query':d['Failed Query'],
'is_failed_query':d['IS Failed Query'],
'peak_comment':d['Peak Comment'],
'is_peak_comment':d['IS Peak Comment'],
'actual_concentration':d['Actual Concentration'],
'is_actual_concentration':d['IS Actual Concentration'],
'concentration_ratio':d['Concentration Ratio'],
'expected_rt':d['Expected RT'],
'is_expected_rt':d['IS Expected RT'],
'integration_type':d['Integration Type'],
'is_integration_type':d['IS Integration Type'],
'area':d['Area'],
'is_area':d['IS Area'],
'corrected_area':d['Corrected Area'],
'is_corrected_area':d['IS Corrected Area'],
'area_ratio':d['Area Ratio'],
'height':d['Height'],
'is_height':d['IS Height'],
'corrected_height':d['Corrected Height'],
'is_corrected_height':d['IS Corrected Height'],
'height_ratio':d['Height Ratio'],
'area_2_height':d['Area / Height'],
'is_area_2_height':d['IS Area / Height'],
'corrected_area2height':d['Corrected Area/Height'],
'is_corrected_area2height':d['IS Corrected Area/Height'],
'region_height':d['Region Height'],
'is_region_height':d['IS Region Height'],
'quality':d['Quality'],
'is_quality':d['IS Quality'],
'retention_time':d['Retention Time'],
'is_retention_time':d['IS Retention Time'],
'start_time':d['Start Time'],
'is_start_time':d['IS Start Time'],
'end_time':d['End Time'],
'is_end_time':d['IS End Time'],
'total_width':d['Total Width'],
'is_total_width':d['IS Total Width'],
'width_at_50':d['Width at 50%'],
'is_width_at_50':d['IS Width at 50%'],
'signal_2_noise':d['Signal / Noise'],
'is_signal_2_noise':d['IS Signal / Noise'],
'baseline_delta_2_height':d['Baseline Delta / Height'],
'is_baseline_delta_2_height':d['IS Baseline Delta / Height'],
'modified_':d['Modified'],
'relative_rt':d['Relative RT'],
'used_':d['Used'],
'calculated_concentration':d['Calculated Concentration'],
'accuracy_':d['Accuracy'],
'comment_':d['Comment'],
'use_calculated_concentration':d['Use_Calculated_Concentration'],
'start_time_at_5':d['Start Time at 5%'],
'end_time_at_5':d['End Time at 5%'],
'width_at_5':d['Width at 5%'],
'start_time_at_10':d['Start Time at 10%'],
'end_time_at_10':d['End Time at 10%'],
'width_at_10':d['Width at 10%'],
'slope_of_baseline':d['Slope of Baseline'],
'tailing_factor':d['Tailing Factor'],
'asymmetry_factor':d['Asymmetry Factor'],
'ion_ratio':d['Ion Ratio'],
'expected_ion_ratio':d['Expected Ion Ratio'],
'points_across_baseline':d['Points Across Baseline'],
'points_across_half_height':d['Points Across Half Height'],},
synchronize_session=False);
except SQLAlchemyError as e:
print(e);
self.session.commit();
# query data from data_stage01_quantification_mqresultstable
# no other table dependencies
def get_peakHeight_sampleNameAndComponentName(self,sample_name_I,component_name_I):
'''Query peak height from sample name and component name
NOTE: intended to be used within a for loop'''
try:
data = self.session.query(data_stage01_quantification_MQResultsTable.height).filter(
data_stage01_quantification_MQResultsTable.sample_name.like(sample_name_I),
data_stage01_quantification_MQResultsTable.component_name.like(component_name_I),
data_stage01_quantification_MQResultsTable.used_.is_(True)).all();
if data:
conc_O = data[0][0];
conc_units_O = 'height';
else:
conc_O = None;
conc_units_O = None;
return conc_O, conc_units_O;
except SQLAlchemyError as e:
print(e);
def get_used_sampleNameAndComponentName(self,sample_name_I,component_name_I):
'''Query used from sample name and component name
NOTE: intended to be used within a for loop'''
try:
data = self.session.query(data_stage01_quantification_MQResultsTable.used_).filter(
data_stage01_quantification_MQResultsTable.sample_name.like(sample_name_I),
data_stage01_quantification_MQResultsTable.component_name_name.like(component_name_name_I)).all();
if data:
used_O = data[0];
else: used_O = None;
return used_O;
except SQLAlchemyError as e:
print(e);
def get_row_sampleNameAndComponentName(self,sample_name_I,component_name_I):
'''Query peak information from sample name and component name
NOTE: intended to be used within a for loop'''
try:
data = self.session.query(data_stage01_quantification_MQResultsTable).filter(
data_stage01_quantification_MQResultsTable.sample_name.like(sample_name_I),
data_stage01_quantification_MQResultsTable.component_name.like(component_name_I),
data_stage01_quantification_MQResultsTable.used_.is_(True)).all();
data_O = {};
if data:
for d in data:
used_O=d.__repr__dict__();
else: used_O = None;
return used_O;
except SQLAlchemyError as e:
print(e);
def get_peakInfo_sampleNameAndComponentName(self,sample_name_I,component_name_I,acquisition_date_and_time_I):
'''Query peak information from sample name and component name
NOTE: intended to be used within a for loop'''
try:
if acquisition_date_and_time_I[0] and acquisition_date_and_time_I[1]:
data = self.session.query(data_stage01_quantification_MQResultsTable).filter(
data_stage01_quantification_MQResultsTable.sample_name.like(sample_name_I),
data_stage01_quantification_MQResultsTable.component_name.like(component_name_I),
data_stage01_quantification_MQResultsTable.acquisition_date_and_time>=acquisition_date_and_time_I[0],
data_stage01_quantification_MQResultsTable.acquisition_date_and_time<=acquisition_date_and_time_I[1],
data_stage01_quantification_MQResultsTable.used_.is_(True)).all();
else:
data = self.session.query(data_stage01_quantification_MQResultsTable).filter(
data_stage01_quantification_MQResultsTable.sample_name.like(sample_name_I),
data_stage01_quantification_MQResultsTable.component_name.like(component_name_I),
data_stage01_quantification_MQResultsTable.used_.is_(True)).all();
data_O = {};
if data:
for d in data:
used_O={'acquisition_date_and_time':d.acquisition_date_and_time,
'component_name':d.component_name,
'component_group_name':d.component_group_name,
'area':d.area,
'height':d.height,
'retention_time':d.retention_time,
'start_time':d.start_time,
'end_time':d.end_time,
'total_width':d.total_width,
'width_at_50':d.width_at_50,
'signal_2_noise':d.signal_2_noise,
'baseline_delta_2_height':d.baseline_delta_2_height,
'relative_rt':d.relative_rt};
else: used_O = None;
return used_O;
except SQLAlchemyError as e:
print(e);
# delete data from data_stage01_quantification_mqresultstable
# no other table dependencies
def delete_row_sampleName(self,sampleNames_I):
'''Delete specific samples from an experiment by their sample ID from sample_physiologicalparameters'''
deletes = [];
for d in sampleNames_I:
try:
delete = self.session.query(data_stage01_quantification_MQResultsTable).filter(
data_stage01_quantification_MQResultsTable.sample_name.like(d['sample_name'])).delete(
synchronize_session=False);
if delete == 0:
print('row not found')
print(d);
deletes.append(delete);
except SQLAlchemyError as e:
print(e);
self.session.commit();
# query data from data_stage01_quantification_mqresultstable
# requires quantitation_method
def get_concAndConcUnits_sampleNameAndComponentName(self,sample_name_I,component_name_I):
'''Query data (i.e. concentration, area/peak height ratio) from sample name and component name
NOTE: intended to be used within a for loop'''
# check for absolute or relative quantitation (i.e. area/peak height ratio)
try:
use_conc = self.session.query(data_stage01_quantification_MQResultsTable.use_calculated_concentration).filter(
data_stage01_quantification_MQResultsTable.sample_name.like(sample_name_I),
data_stage01_quantification_MQResultsTable.component_name.like(component_name_I),
data_stage01_quantification_MQResultsTable.used_.is_(True)).all();
if use_conc:
use_conc_O = use_conc[0][0];
else:
use_conc_O = None;
except SQLAlchemyError as e:
print(e);
if use_conc_O:
try:
data = self.session.query(data_stage01_quantification_MQResultsTable.calculated_concentration,
data_stage01_quantification_MQResultsTable.conc_units).filter(
data_stage01_quantification_MQResultsTable.sample_name.like(sample_name_I),
data_stage01_quantification_MQResultsTable.component_name.like(component_name_I),
data_stage01_quantification_MQResultsTable.used_.is_(True)).all();
if data:
conc_O = data[0][0];
conc_units_O = data[0][1];
else:
conc_O = None;
conc_units_O = None;
return conc_O, conc_units_O;
except SQLAlchemyError as e:
print(e);
else:
# check for area or peak height ratio from quantitation_method
try:
data = self.session.query(quantitation_method.use_area).filter(
experiment.sample_name.like(sample_name_I),
experiment.quantitation_method_id.like(quantitation_method.id),
quantitation_method.component_name.like(component_name_I)).all();
if data:
ratio_O = data[0][0];
else:
ratio_O = None;
except SQLAlchemyError as e:
print(e);
if ratio_O:
try:
data = self.session.query(data_stage01_quantification_MQResultsTable.area_ratio).filter(
data_stage01_quantification_MQResultsTable.sample_name.like(sample_name_I),
data_stage01_quantification_MQResultsTable.component_name.like(component_name_I),
data_stage01_quantification_MQResultsTable.used_.is_(True)).all();
if data:
conc_O = data[0][0];
conc_units_O = 'area_ratio';
else:
conc_O = None;
conc_units_O = None;
return conc_O, conc_units_O;
except SQLAlchemyError as e:
print(e);
else:
try:
data = self.session.query(data_stage01_quantification_MQResultsTable.height_ratio).filter(
data_stage01_quantification_MQResultsTable.sample_name.like(sample_name_I),
data_stage01_quantification_MQResultsTable.component_name.like(component_name_I),
data_stage01_quantification_MQResultsTable.used_.is_(True)).all();
if data:
conc_O = data[0][0];
conc_units_O = 'height_ratio';
else:
conc_O = None;
conc_units_O = None;
return conc_O, conc_units_O;
except SQLAlchemyError as e:
print(e);
# query component group names from data_stage01_quantification_mqresultstable
def get_componentGroupNames_sampleName(self,sample_name_I):
'''Query component group names that are used from the sample name
NOTE: intended to be used within a for loop'''
try:
component_group_names = self.session.query(data_stage01_quantification_MQResultsTable.component_group_name).filter(
data_stage01_quantification_MQResultsTable.sample_name.like(sample_name_I),
data_stage01_quantification_MQResultsTable.used_.is_(True)).group_by(
data_stage01_quantification_MQResultsTable.component_group_name).order_by(
data_stage01_quantification_MQResultsTable.component_group_name.asc()).all();
component_group_names_O = [];
for cgn in component_group_names: component_group_names_O.append(cgn.component_group_name);
return component_group_names_O;
except SQLAlchemyError as e:
print(e);
def get_componentGroupName_experimentIDAndComponentName(self,experiment_id_I,component_name_I,exp_type_I=4):
'''Query component group names that are used from the component name
NOTE: intended to be used within a for loop'''
try:
component_group_name = self.session.query(data_stage01_quantification_MQResultsTable.component_group_name).filter(
experiment.id.like(experiment_id_I),
experiment.exp_type_id == exp_type_I,
experiment.sample_name.like(data_stage01_quantification_MQResultsTable.sample_name),
data_stage01_quantification_MQResultsTable.component_name.like(component_name_I),
data_stage01_quantification_MQResultsTable.used_.is_(True)).group_by(
data_stage01_quantification_MQResultsTable.component_group_name).all();
if len(component_group_name)>1:
print('more than 1 component_group_name retrieved per component_name')
component_group_name_O = component_group_name[0].component_group_name;
return component_group_name_O;
except SQLAlchemyError as e:
print(e);
# query sample names from data_stage01_quantification_mqresultstable
def get_sampleNames_experimentIDAndSampleType(self,experiment_id_I,sample_type_I,exp_type_I=4):
'''Query sample names (i.e. unknowns) that are used from
the experiment'''
try:
sample_names = self.session.query(data_stage01_quantification_MQResultsTable.sample_name).filter(
data_stage01_quantification_MQResultsTable.sample_type.like(sample_type_I),
experiment.id.like(experiment_id_I),
#experiment.exp_type_id == exp_type_I,
data_stage01_quantification_MQResultsTable.used_.is_(True),
experiment.sample_name.like(data_stage01_quantification_MQResultsTable.sample_name)).group_by(
data_stage01_quantification_MQResultsTable.sample_name).order_by(
data_stage01_quantification_MQResultsTable.sample_name.asc()).all();
sample_names_O = [];
for sn in sample_names: sample_names_O.append(sn.sample_name);
return sample_names_O;
except SQLAlchemyError as e:
print(e);
# query sample names from data_stage01_quantification_mqresultstable
def get_sampleNamesAndSampleIDs_experimentIDAndSampleType(self,experiment_id_I,sample_type_I,exp_type_I=4):
'''Query sample names and sample ids (i.e. unknowns) that are used from
the experiment'''
try:
sample_names = self.session.query(data_stage01_quantification_MQResultsTable.sample_name,
sample.sample_id).filter(
data_stage01_quantification_MQResultsTable.sample_type.like(sample_type_I),
experiment.id.like(experiment_id_I),
#experiment.exp_type_id == exp_type_I,
data_stage01_quantification_MQResultsTable.used_.is_(True),
experiment.sample_name.like(data_stage01_quantification_MQResultsTable.sample_name),
experiment.sample_name.like(sample.sample_name)).group_by(
data_stage01_quantification_MQResultsTable.sample_name,
sample.sample_id).order_by(
data_stage01_quantification_MQResultsTable.sample_name.asc(),
sample.sample_id.asc()).all();
sample_names_O = [];
sample_ids_O = [];
for sn in sample_names:
sample_names_O.append(sn.sample_name);
sample_ids_O.append(sn.sample_id);
return sample_names_O,sample_ids_O;
except SQLAlchemyError as e:
print(e);
def get_sampleNames_experimentIDAndSampleID(self,experiment_id_I,sample_id_I,exp_type_I=4):
'''Query sample names (i.e. unknowns) that are used from
the experiment'''
try:
sample_names = self.session.query(sample.sample_name).filter(
sample.sample_id.like(sample_id_I),
experiment.id.like(experiment_id_I),
#experiment.exp_type_id == exp_type_I,
data_stage01_quantification_MQResultsTable.used_.is_(True),
experiment.sample_name.like(sample.sample_name),
experiment.sample_name.like(data_stage01_quantification_MQResultsTable.sample_name)).group_by(
sample.sample_name).order_by(
sample.sample_name.asc()).all();
sample_names_O = [];
for sn in sample_names: sample_names_O.append(sn.sample_name);
return sample_names_O;
except SQLAlchemyError as e:
print(e);
def get_sampleNames_experimentIDAndSampleIDAndSampleDilution(self,experiment_id_I,sample_id_I,sample_dilution_I,exp_type_I=4):
'''Query sample names (i.e. unknowns) that are used from
the experiment'''
try:
sample_names = self.session.query(sample.sample_name).filter(
sample.sample_id.like(sample_id_I),
sample.sample_dilution == sample_dilution_I,
experiment.id.like(experiment_id_I),
#experiment.exp_type_id == exp_type_I,
data_stage01_quantification_MQResultsTable.used_.is_(True),
experiment.sample_name.like(sample.sample_name),
experiment.sample_name.like(data_stage01_quantification_MQResultsTable.sample_name)).group_by(
sample.sample_name).order_by(
sample.sample_name.asc()).all();
sample_names_O = [];
for sn in sample_names: sample_names_O.append(sn.sample_name);
return sample_names_O;
except SQLAlchemyError as e:
print(e);
def get_sampleNames_experimentIDAndSampleNameShortAndSampleDescription(self,experiment_id_I,sample_name_short_I,sample_decription_I,exp_type_I=4):
'''Query sample names that are used from
the experiment'''
try:
sample_names = self.session.query(sample.sample_name).filter(
sample_description.sample_name_short.like(sample_name_short_I),
sample_description.sample_desc.like(sample_decription_I),
experiment.id.like(experiment_id_I),
experiment.exp_type_id == exp_type_I,
sample.sample_id.like(sample_description.sample_id),
experiment.sample_name.like(sample.sample_name),
experiment.sample_name.like(data_stage01_quantification_MQResultsTable.sample_name),
data_stage01_quantification_MQResultsTable.used_.is_(True)).group_by(
sample.sample_name).order_by(
sample.sample_name.asc()).all();
sample_names_O = [];
for sn in sample_names: sample_names_O.append(sn.sample_name);
return sample_names_O;
except SQLAlchemyError as e:
print(e);
def get_sampleNames_experimentIDAndSampleNameAbbreviationAndSampleDescription(self,experiment_id_I,sample_name_abbreviation_I,sample_decription_I,exp_type_I=4):
'''Query sample names that are used from
the experiment'''
try:
sample_names = self.session.query(sample.sample_name).filter(
sample_description.sample_name_abbreviation.like(sample_name_abbreviation_I),
sample_description.sample_desc.like(sample_decription_I),
experiment.id.like(experiment_id_I),
experiment.exp_type_id == exp_type_I,
sample.sample_id.like(sample_description.sample_id),
experiment.sample_name.like(sample.sample_name),
experiment.sample_name.like(data_stage01_quantification_MQResultsTable.sample_name),
data_stage01_quantification_MQResultsTable.used_.is_(True)).group_by(
sample.sample_name).order_by(
sample.sample_name.asc()).all();
sample_names_O = [];
for sn in sample_names: sample_names_O.append(sn.sample_name);
return sample_names_O;
except SQLAlchemyError as e:
print(e);
def get_sampleNames_experimentIDAndSampleNameAbbreviationAndSampleDilution(self,experiment_id_I,sample_name_abbreviation_I,sample_dilution_I,exp_type_I=4):
'''Query sample names that are used from
the experiment'''
try:
sample_names = self.session.query(sample.sample_name).filter(
sample_description.sample_name_abbreviation.like(sample_name_abbreviation_I),
sample.sample_dilution == sample_dilution_I,
experiment.id.like(experiment_id_I),
experiment.exp_type_id == exp_type_I,
sample.sample_id.like(sample_description.sample_id),
experiment.sample_name.like(sample.sample_name),
experiment.sample_name.like(data_stage01_quantification_MQResultsTable.sample_name),
data_stage01_quantification_MQResultsTable.used_.is_(True)).group_by(
sample.sample_name).order_by(
sample.sample_name.asc()).all();
sample_names_O = [];
for sn in sample_names: sample_names_O.append(sn.sample_name);
return sample_names_O;
except SQLAlchemyError as e:
print(e);
# query sample ids from data_stage01_quantification_mqresultstable
def get_sampleIDs_experimentIDAndSampleType(self,experiment_id_I,sample_type_I,exp_type_I=4):
'''Query sample names (i.e. unknowns) that are used from
the experiment'''
try:
sample_ids = self.session.query(sample.sample_id).filter(
data_stage01_quantification_MQResultsTable.sample_type.like(sample_type_I),
experiment.id.like(experiment_id_I),
experiment.exp_type_id == exp_type_I,
data_stage01_quantification_MQResultsTable.used_.is_(True),
experiment.sample_name.like(sample.sample_name),
experiment.sample_name.like(data_stage01_quantification_MQResultsTable.sample_name)).group_by(
sample.sample_id).order_by(
sample.sample_id.asc()).all();
sample_ids_O = [];
for si in sample_ids: sample_ids_O.append(si.sample_id);
return sample_ids_O;
except SQLAlchemyError as e:
print(e);
def get_sampleIDs_experimentID(self,experiment_id_I,exp_type_I=4):
'''Query sample names that are used from the experiment'''
try:
sample_ids = self.session.query(sample.sample_id).filter(
experiment.id.like(experiment_id_I),
experiment.exp_type_id == exp_type_I,
data_stage01_quantification_MQResultsTable.used_.is_(True),
experiment.sample_name.like(sample.sample_name),
experiment.sample_name.like(data_stage01_quantification_MQResultsTable.sample_name)).group_by(
sample.sample_id).order_by(
sample.sample_id.asc()).all();
sample_ids_O = [];
for si in sample_ids: sample_ids_O.append(si.sample_id);
return sample_ids_O;
except SQLAlchemyError as e:
print(e);
def get_sampleID_experimentIDAndSampleName(self,experiment_id_I,sample_name_I,exp_type_I=4):
'''Query sample names (i.e. unknowns) that are used from
the experiment'''
try:
sample_id = self.session.query(sample.sample_id).filter(
sample.sample_name.like(sample_name_I),
experiment.id.like(experiment_id_I),
experiment.exp_type_id == exp_type_I,
data_stage01_quantification_MQResultsTable.used_.is_(True),
experiment.sample_name.like(sample.sample_name),
experiment.sample_name.like(data_stage01_quantification_MQResultsTable.sample_name)).group_by(
sample.sample_id).all();
sample_id_O = sample_id[0][0];
return sample_id_O;
except SQLAlchemyError as e:
print(e);
# query sample name short from data_stage01_quantification_mqresultstable
def get_sampleNameShort_experimentIDAndSampleType(self,experiment_id_I,sample_type_I,exp_type_I=4):
'''Query sample name short that are used from
the experiment'''
try:
sample_name_short = self.session.query(sample_description.sample_name_short).filter(
sample.sample_type.like(sample_type_I),
experiment.id.like(experiment_id_I),
experiment.exp_type_id == exp_type_I,
data_stage01_quantification_MQResultsTable.used_.is_(True),
experiment.sample_name.like(sample.sample_name),
sample.sample_id.like(sample_description.sample_id),
experiment.sample_name.like(data_stage01_quantification_MQResultsTable.sample_name)).group_by(
sample_description.sample_name_short).order_by(
sample_description.sample_name_short.asc()).all();
sample_name_short_O = [];
for sns in sample_name_short: sample_name_short_O.append(sns.sample_name_short);
return sample_name_short_O;
except SQLAlchemyError as e:
print(e);
def get_sampleNameShort_experimentIDAndSampleName(self,experiment_id_I,sample_name_I,exp_type_I=4):
'''Query sample name short that are used from
the experiment'''
try:
sample_name_short = self.session.query(sample_description.sample_name_short).filter(
sample.sample_name.like(sample_name_I),
experiment.id.like(experiment_id_I),
experiment.exp_type_id == exp_type_I,
data_stage01_quantification_MQResultsTable.used_.is_(True),
experiment.sample_name.like(sample.sample_name),
sample.sample_id.like(sample_description.sample_id),
experiment.sample_name.like(data_stage01_quantification_MQResultsTable.sample_name)).group_by(
sample_description.sample_name_short).all();
sample_name_short_O = sample_name_short[0];
return sample_name_short_O;
except SQLAlchemyError as e:
print(e);
# query sample name abbreviations from data_stage01_quantification_mqresultstable
def get_sampleNameAbbreviations_experimentIDAndSampleType(self,experiment_id_I,sample_type_I,exp_type_I=4):
'''Query sample name abbreviations that are used from
the experiment'''
try:
sample_name_abbreviations = self.session.query(sample_description.sample_name_abbreviation).filter(
sample.sample_type.like(sample_type_I),
experiment.id.like(experiment_id_I),
experiment.exp_type_id == exp_type_I,
data_stage01_quantification_MQResultsTable.used_.is_(True),
experiment.sample_name.like(sample.sample_name),
sample.sample_id.like(sample_description.sample_id),
experiment.sample_name.like(data_stage01_quantification_MQResultsTable.sample_name)).group_by(
sample_description.sample_name_abbreviation).order_by(
sample_description.sample_name_abbreviation.asc()).all();
sample_name_abbreviations_O = [];
for sna in sample_name_abbreviations: sample_name_abbreviations_O.append(sna.sample_name_abbreviation);
return sample_name_abbreviations_O;
except SQLAlchemyError as e:
print(e);
# query dilutions from data_stage01_quantification_mqresultstable
def get_sampleDilution_experimentIDAndSampleID(self,experiment_id_I,sample_id_I,exp_type_I=4):
'''Query dilutions that are used from the experiment'''
try:
sample_dilutions = self.session.query(sample.sample_dilution).filter(
sample.sample_id.like(sample_id_I),
experiment.id.like(experiment_id_I),
experiment.exp_type_id == exp_type_I,
data_stage01_quantification_MQResultsTable.used_.is_(True),
experiment.sample_name.like(sample.sample_name),
experiment.sample_name.like(data_stage01_quantification_MQResultsTable.sample_name)).group_by(
sample.sample_dilution).order_by(
sample.sample_dilution.asc()).all();
sample_dilutions_O = [];
for sd in sample_dilutions: sample_dilutions_O.append(sd.sample_dilution);
return sample_dilutions_O;
except SQLAlchemyError as e:
print(e);
def get_sampleDilution_experimentIDAndSampleNameAbbreviation(self,experiment_id_I,sample_name_abbreviation_I,exp_type_I=4):
'''Query dilutions that are used from the experiment'''
try:
sample_dilutions = self.session.query(sample.sample_dilution).filter(
experiment.id.like(experiment_id_I),
experiment.exp_type_id == exp_type_I,
experiment.sample_name.like(sample.sample_name),
sample.sample_id.like(sample_description.sample_id),
sample_description.sample_name_abbreviation.like(sample_name_abbreviation_I),
data_stage01_quantification_MQResultsTable.used_.is_(True),
experiment.sample_name.like(data_stage01_quantification_MQResultsTable.sample_name)).group_by(
sample.sample_dilution).order_by(
sample.sample_dilution.asc()).all();
sample_dilutions_O = [];
for sd in sample_dilutions: sample_dilutions_O.append(sd.sample_dilution);
return sample_dilutions_O;
except SQLAlchemyError as e:
print(e);
# query time points from data_stage01_quantification_mqresultstable
def get_timePoint_experimentIDAndSampleNameAbbreviation(self,experiment_id_I,sample_name_abbreviation_I,exp_type_I=4):
'''Query time points that are used from the experiment and sample name abbreviation'''
try:
time_points = self.session.query(sample_description.time_point).filter(
sample_description.sample_name_abbreviation.like(sample_name_abbreviation_I),
experiment.id.like(experiment_id_I),
experiment.exp_type_id == exp_type_I,
data_stage01_quantification_MQResultsTable.used_.is_(True),
experiment.sample_name.like(sample.sample_name),
sample.sample_id.like(sample_description.sample_id),
experiment.sample_name.like(data_stage01_quantification_MQResultsTable.sample_name)).group_by(
sample_description.time_point).order_by(
sample_description.time_point.asc()).all();
time_points_O = [];
for tp in time_points: time_points_O.append(tp.time_point);
return time_points_O;
except SQLAlchemyError as e:
print(e);
# query component names from data_stage01_quantification_mqresultstable
def get_componentsNames_experimentIDAndSampleID(self,experiment_id_I,sample_id_I,exp_type_I=4):
'''Query component names that are used and are not IS from
the experiment and sample_id'''
try:
component_names = self.session.query(data_stage01_quantification_MQResultsTable.component_name).filter(
experiment.id.like(experiment_id_I),
experiment.exp_type_id == exp_type_I,
data_stage01_quantification_MQResultsTable.used_.is_(True),
data_stage01_quantification_MQResultsTable.is_.is_(False),
experiment.sample_name.like(sample.sample_name),
sample.sample_id.like(sample_id_I),
experiment.sample_name.like(data_stage01_quantification_MQResultsTable.sample_name)).group_by(
data_stage01_quantification_MQResultsTable.component_name).order_by(
data_stage01_quantification_MQResultsTable.component_name.asc()).all();
component_names_O = [];
for cn in component_names: component_names_O.append(cn.component_name);
return component_names_O;
except SQLAlchemyError as e:
print(e);
def get_componentsNames_experimentIDAndSampleNameAbbreviation(self,experiment_id_I,sample_name_abbreviation_I,exp_type_I=4):
'''Query component names that are used from
the experiment and sample_name_abbreviation'''
try:
component_names = self.session.query(data_stage01_quantification_MQResultsTable.component_name).filter(
sample_description.sample_name_abbreviation.like(sample_name_abbreviation_I),
experiment.id.like(experiment_id_I),
experiment.exp_type_id == exp_type_I,
sample.sample_id.like(sample_description.sample_id),
experiment.sample_name.like(sample.sample_name),
experiment.sample_name.like(data_stage01_quantification_MQResultsTable.sample_name),
data_stage01_quantification_MQResultsTable.used_.is_(True),
data_stage01_quantification_MQResultsTable.is_.is_(False)).group_by(
data_stage01_quantification_MQResultsTable.component_name).order_by(
data_stage01_quantification_MQResultsTable.component_name.asc()).all();
component_names_O = [];
for cn in component_names: component_names_O.append(cn.component_name);
return component_names_O;
except SQLAlchemyError as e:
print(e);
def get_componentsNames_experimentIDAndSampleName(self,experiment_id_I,sample_name_I,exp_type_I=4):
'''Query component names that are used and not internal standards from
the experiment and sample_name'''
try:
component_names = self.session.query(data_stage01_quantification_MQResultsTable.component_name).filter(
experiment.sample_name.like(sample_name_I),
experiment.id.like(experiment_id_I),
experiment.exp_type_id == exp_type_I,
experiment.sample_name.like(data_stage01_quantification_MQResultsTable.sample_name),
data_stage01_quantification_MQResultsTable.used_.is_(True),
data_stage01_quantification_MQResultsTable.is_.is_(False)).group_by(
data_stage01_quantification_MQResultsTable.component_name).order_by(
data_stage01_quantification_MQResultsTable.component_name.asc()).all();
component_names_O = [];
for cn in component_names: component_names_O.append(cn.component_name);
return component_names_O;
except SQLAlchemyError as e:
print(e);
def get_componentsNamesAndComponentGroupNames_experimentIDAndSampleName(self,experiment_id_I,sample_name_I,exp_type_I=4):
'''Query component names that are used and not internal standards from
the experiment and sample_name'''
try:
component_names = self.session.query(data_stage01_quantification_MQResultsTable.component_name,
data_stage01_quantification_MQResultsTable.component_group_name).filter(
experiment.sample_name.like(sample_name_I),
experiment.id.like(experiment_id_I),
experiment.exp_type_id == exp_type_I,
experiment.sample_name.like(data_stage01_quantification_MQResultsTable.sample_name),
data_stage01_quantification_MQResultsTable.used_.is_(True),
data_stage01_quantification_MQResultsTable.is_.is_(False)).group_by(
data_stage01_quantification_MQResultsTable.component_name,
data_stage01_quantification_MQResultsTable.component_group_name).order_by(
data_stage01_quantification_MQResultsTable.component_name.asc(),
data_stage01_quantification_MQResultsTable.component_group_name.asc()).all();
component_names_O = [];
component_group_names_O = [];
for cn in component_names:
component_names_O.append(cn.component_name);
component_group_names_O.append(cn.component_group_name);
return component_names_O,component_group_names_O;
except SQLAlchemyError as e:
print(e);
def get_componentsNames_experimentIDAndSampleType(self,experiment_id_I,sample_type_I):
'''Query component names that are used and not internal standards from
the experiment and sample_name'''
try:
component_names = self.session.query(data_stage01_quantification_MQResultsTable.component_name).filter(
data_stage01_quantification_MQResultsTable.sample_type.like(sample_type_I),
experiment.id.like(experiment_id_I),
experiment.sample_name.like(data_stage01_quantification_MQResultsTable.sample_name),
data_stage01_quantification_MQResultsTable.used_.is_(True),
data_stage01_quantification_MQResultsTable.is_.is_(False)).group_by(
data_stage01_quantification_MQResultsTable.component_name).order_by(
data_stage01_quantification_MQResultsTable.component_name.asc()).all();
component_names_O = [];
for cn in component_names: component_names_O.append(cn.component_name);
return component_names_O;
except SQLAlchemyError as e:
print(e);#,quant_method_id_I
def get_sampleNames_QMethodIDAndComponentNameAndSampleType(self,quantitation_method_id_I,component_name_I,sample_type_I='Standard'):
'''Query sample names (i.e. unknowns) that are used from
the experiment by quantitation_method_id, component_name, and sample_type'''
try:
sample_names = self.session.query(data_stage01_quantification_MQResultsTable.sample_name).filter(
data_stage01_quantification_MQResultsTable.sample_type.like(sample_type_I),
data_stage01_quantification_MQResultsTable.component_name.like(component_name_I),
experiment.quantitation_method_id.like(quantitation_method_id_I),
data_stage01_quantification_MQResultsTable.used_.is_(True),
experiment.sample_name.like(data_stage01_quantification_MQResultsTable.sample_name)).group_by(
data_stage01_quantification_MQResultsTable.sample_name).order_by(
data_stage01_quantification_MQResultsTable.sample_name.asc()).all();
sample_names_O = [];
for sn in sample_names: sample_names_O.append(sn.sample_name);
return sample_names_O;
except SQLAlchemyError as e:
print(e);
def get_rows_dataStage01QuantificationMQResultsTable(
self,
analysis_id_I = [],
experiment_id_I = [],
sample_name_I = [],
sample_id_I = [],
sample_name_abbreviation_I = [],
sample_type_I = [],
component_name_I = [],
acquisition_date_and_time_I = [],
):
'''Query rows from data_stage01_quantification_MQResultsTable
'''
try:
subquery1 = '''SELECT
"data_stage01_quantification_analysis"."analysis_id",
"data_stage01_quantification_analysis"."experiment_id",
"data_stage01_quantification_analysis"."sample_name",
"data_stage01_quantification_analysis"."sample_id",
"data_stage01_quantification_analysis"."sample_name_short",
"data_stage01_quantification_analysis"."sample_name_abbreviation",
"data_stage01_quantification_analysis"."time_point",
"data_stage01_quantification_analysis"."analysis_type",
"data_stage01_quantification_analysis"."sample_desc",
"data_stage01_quantification_analysis"."used_",
"data_stage01_quantification_analysis"."comment_"
'''
subquery1 += '''FROM "data_stage01_quantification_analysis"
'''
subquery1 += '''
WHERE "data_stage01_quantification_analysis"."used_"
'''
if analysis_id_I:
cmd_q = '''AND "data_stage01_quantification_analysis".analysis_id =ANY ('{%s}'::text[]) '''%(
self.convert_list2string(analysis_id_I));
subquery1+=cmd_q;
if experiment_id_I:
cmd_q = '''AND "data_stage01_quantification_analysis".experiment_id =ANY ('{%s}'::text[]) '''%(
self.convert_list2string(experiment_id_I));
subquery1+=cmd_q;
if sample_name_I:
cmd_q = '''AND "data_stage01_quantification_analysis".sample_name =ANY ('{%s}'::text[]) '''%(
self.convert_list2string(sample_name_I));
subquery1+=cmd_q;
if sample_id_I:
cmd_q = '''AND "data_stage01_quantification_analysis".sample_id =ANY ('{%s}'::text[]) '''%(
self.convert_list2string(sample_id_I));
subquery1+=cmd_q;
if sample_name_abbreviation_I:
cmd_q = '''AND "data_stage01_quantification_analysis".sample_name_abbreviation =ANY ('{%s}'::text[]) '''%(
self.convert_list2string(sample_name_abbreviation_I));
subquery1+=cmd_q;
subquery1 += '''
ORDER BY "data_stage01_quantification_analysis"."analysis_id" ASC,
"data_stage01_quantification_analysis"."experiment_id" ASC,
"data_stage01_quantification_analysis"."sample_name" ASC
'''
cmd = '''SELECT "data_stage01_quantification_mqresultstable"."id",
"data_stage01_quantification_mqresultstable"."index_",
"data_stage01_quantification_mqresultstable"."sample_index",
"data_stage01_quantification_mqresultstable"."original_filename",
"data_stage01_quantification_mqresultstable"."sample_name",
"data_stage01_quantification_mqresultstable"."sample_comment",
"data_stage01_quantification_mqresultstable"."sample_type",
"data_stage01_quantification_mqresultstable"."acquisition_date_and_time",
"data_stage01_quantification_mqresultstable"."rack_number",
"data_stage01_quantification_mqresultstable"."plate_number",
"data_stage01_quantification_mqresultstable"."vial_number",
"data_stage01_quantification_mqresultstable"."dilution_factor",
"data_stage01_quantification_mqresultstable"."injection_volume",
"data_stage01_quantification_mqresultstable"."operator_name",
"data_stage01_quantification_mqresultstable"."acq_method_name",
"data_stage01_quantification_mqresultstable"."is_",
"data_stage01_quantification_mqresultstable"."component_name",
"data_stage01_quantification_mqresultstable"."component_index",
"data_stage01_quantification_mqresultstable"."component_comment",
"data_stage01_quantification_mqresultstable"."is_comment",
"data_stage01_quantification_mqresultstable"."mass_info",
"data_stage01_quantification_mqresultstable"."is_mass",
"data_stage01_quantification_mqresultstable"."is_name",
"data_stage01_quantification_mqresultstable"."component_group_name",
"data_stage01_quantification_mqresultstable"."conc_units",
"data_stage01_quantification_mqresultstable"."failed_query",
"data_stage01_quantification_mqresultstable"."is_failed_query",
"data_stage01_quantification_mqresultstable"."peak_comment",
"data_stage01_quantification_mqresultstable"."is_peak_comment",
"data_stage01_quantification_mqresultstable"."actual_concentration",
"data_stage01_quantification_mqresultstable"."is_actual_concentration",
"data_stage01_quantification_mqresultstable"."concentration_ratio",
"data_stage01_quantification_mqresultstable"."expected_rt",
"data_stage01_quantification_mqresultstable"."is_expected_rt",
"data_stage01_quantification_mqresultstable"."integration_type",
"data_stage01_quantification_mqresultstable"."is_integration_type",
"data_stage01_quantification_mqresultstable"."area",
"data_stage01_quantification_mqresultstable"."is_area",
"data_stage01_quantification_mqresultstable"."corrected_area",
"data_stage01_quantification_mqresultstable"."is_corrected_area",
"data_stage01_quantification_mqresultstable"."area_ratio",
"data_stage01_quantification_mqresultstable"."height",
"data_stage01_quantification_mqresultstable"."is_height",
"data_stage01_quantification_mqresultstable"."corrected_height",
"data_stage01_quantification_mqresultstable"."is_corrected_height",
"data_stage01_quantification_mqresultstable"."height_ratio",
"data_stage01_quantification_mqresultstable"."area_2_height",
"data_stage01_quantification_mqresultstable"."is_area_2_height",
"data_stage01_quantification_mqresultstable"."corrected_area2height",
"data_stage01_quantification_mqresultstable"."is_corrected_area2height",
"data_stage01_quantification_mqresultstable"."region_height",
"data_stage01_quantification_mqresultstable"."is_region_height",
"data_stage01_quantification_mqresultstable"."quality",
"data_stage01_quantification_mqresultstable"."is_quality",
"data_stage01_quantification_mqresultstable"."retention_time",
"data_stage01_quantification_mqresultstable"."is_retention_time",
"data_stage01_quantification_mqresultstable"."start_time",
"data_stage01_quantification_mqresultstable"."is_start_time",
"data_stage01_quantification_mqresultstable"."end_time",
"data_stage01_quantification_mqresultstable"."is_end_time",
"data_stage01_quantification_mqresultstable"."total_width",
"data_stage01_quantification_mqresultstable"."is_total_width",
"data_stage01_quantification_mqresultstable"."width_at_50",
"data_stage01_quantification_mqresultstable"."is_width_at_50",
"data_stage01_quantification_mqresultstable"."signal_2_noise",
"data_stage01_quantification_mqresultstable"."is_signal_2_noise",
"data_stage01_quantification_mqresultstable"."baseline_delta_2_height",
"data_stage01_quantification_mqresultstable"."is_baseline_delta_2_height",
"data_stage01_quantification_mqresultstable"."modified_",
"data_stage01_quantification_mqresultstable"."relative_rt",
"data_stage01_quantification_mqresultstable"."used_",
"data_stage01_quantification_mqresultstable"."calculated_concentration",
"data_stage01_quantification_mqresultstable"."accuracy_",
"data_stage01_quantification_mqresultstable"."comment_",
"data_stage01_quantification_mqresultstable"."use_calculated_concentration",
"data_stage01_quantification_mqresultstable"."start_time_at_5",
"data_stage01_quantification_mqresultstable"."end_time_at_5",
"data_stage01_quantification_mqresultstable"."width_at_5",
"data_stage01_quantification_mqresultstable"."start_time_at_10",
"data_stage01_quantification_mqresultstable"."end_time_at_10",
"data_stage01_quantification_mqresultstable"."width_at_10",
"data_stage01_quantification_mqresultstable"."slope_of_baseline",
"data_stage01_quantification_mqresultstable"."tailing_factor",
"data_stage01_quantification_mqresultstable"."asymmetry_factor",
"data_stage01_quantification_mqresultstable"."ion_ratio",
"data_stage01_quantification_mqresultstable"."expected_ion_ratio",
"data_stage01_quantification_mqresultstable"."points_across_baseline",
"data_stage01_quantification_mqresultstable"."points_across_half_height",
"subquery1"."analysis_id",
"subquery1"."experiment_id",
"subquery1"."sample_id",
"subquery1"."sample_name_short",
"subquery1"."sample_name_abbreviation",
"subquery1"."time_point",
"subquery1"."analysis_type",
"subquery1"."sample_desc"
'''
cmd += '''
FROM "data_stage01_quantification_mqresultstable",
(%s) AS subquery1
''' %(subquery1)
cmd += '''WHERE "data_stage01_quantification_mqresultstable"."used_"
AND "subquery1".sample_name = "data_stage01_quantification_mqresultstable"."sample_name"
'''
if component_name_I:
cmd_q = '''AND "data_stage01_quantification_mqresultstable".component_name =ANY ('{%s}'::text[]) '''%(
self.convert_list2string(component_name_I));
cmd+=cmd_q;
if sample_type_I:
cmd_q = '''AND "data_stage01_quantification_mqresultstable".sample_type =ANY ('{%s}'::text[]) '''%(
self.convert_list2string(sample_type_I));
cmd+=cmd_q;
if acquisition_date_and_time_I and not acquisition_date_and_time_I[0] is None:
cmd_q = '''AND "data_stage01_quantification_mqresultstable".acquisition_date_and_time >= %s'''%(
acquisition_date_and_time_I[0]);
cmd+=cmd_q;
cmd_q = '''AND "data_stage01_quantification_mqresultstable".acquisition_date_and_time <= %s'''%(
acquisition_date_and_time_I[1]);
cmd+=cmd_q;
cmd += '''
ORDER BY "subquery1"."analysis_id" ASC,
"subquery1"."experiment_id" ASC,
"subquery1"."sample_name" ASC,
"data_stage01_quantification_mqresultstable"."component_name" ASC;
'''
result = self.session.execute(cmd);
data = result.fetchall();
data_O = [dict(d) for d in data];
return data_O;
except SQLAlchemyError as e:
print(e);
# query sample names from data_stage01_quantification_mqresultstable
def getGroupJoin_experimentAndQuantitationMethodAndMQResultsTable_experimentID_dataStage01QuantificationMQResultsTable(self,
experiment_id_I,
sample_types_I=[],
sample_names_I=[],
sample_ids_I=[],
component_names_I=[],
):
'''Query sample names and sample ids (i.e. unknowns) that are used from
the experiment'''
try:
cmd = '''SELECT quantitation_method.use_area, subquery1.sample_name, subquery1.sample_type,
subquery1.use_calculated_concentration, subquery1.sample_id, subquery1.component_name,
subquery1.component_group_name, subquery1.quantitation_method_id, subquery1.acquisition_date_and_time,
subquery1.calculated_concentration, subquery1.height, subquery1.height_ratio, subquery1.area_ratio, subquery1.conc_units
FROM quantitation_method, (
SELECT data_stage01_quantification_mqresultstable.sample_name, data_stage01_quantification_mqresultstable.sample_type,
data_stage01_quantification_mqresultstable.use_calculated_concentration, sample.sample_id,
data_stage01_quantification_mqresultstable.component_name, data_stage01_quantification_mqresultstable.component_group_name,
experiment.quantitation_method_id, data_stage01_quantification_mqresultstable.acquisition_date_and_time,
data_stage01_quantification_mqresultstable.calculated_concentration, data_stage01_quantification_mqresultstable.height,
data_stage01_quantification_mqresultstable.height_ratio, data_stage01_quantification_mqresultstable.area_ratio,
data_stage01_quantification_mqresultstable.conc_units
FROM data_stage01_quantification_mqresultstable, sample, experiment
WHERE experiment.id LIKE '%s' AND data_stage01_quantification_mqresultstable.used_ IS true AND data_stage01_quantification_mqresultstable.is_ IS false AND experiment.sample_name LIKE data_stage01_quantification_mqresultstable.sample_name AND experiment.sample_name LIKE sample.sample_name
GROUP BY data_stage01_quantification_mqresultstable.sample_name, data_stage01_quantification_mqresultstable.sample_type,
data_stage01_quantification_mqresultstable.use_calculated_concentration, sample.sample_id,
data_stage01_quantification_mqresultstable.component_name, data_stage01_quantification_mqresultstable.component_group_name,
experiment.quantitation_method_id, data_stage01_quantification_mqresultstable.acquisition_date_and_time,
data_stage01_quantification_mqresultstable.calculated_concentration, data_stage01_quantification_mqresultstable.height,
data_stage01_quantification_mqresultstable.height_ratio, data_stage01_quantification_mqresultstable.area_ratio,
data_stage01_quantification_mqresultstable.conc_units
ORDER BY data_stage01_quantification_mqresultstable.sample_name ASC, sample.sample_id ASC, data_stage01_quantification_mqresultstable.component_name ASC, data_stage01_quantification_mqresultstable.component_group_name ASC
) subquery1
WHERE quantitation_method.component_name LIKE subquery1.component_name AND quantitation_method.id LIKE subquery1.quantitation_method_id
GROUP BY subquery1.sample_name, subquery1.sample_type, subquery1.use_calculated_concentration,
subquery1.sample_id, subquery1.component_name, subquery1.component_group_name, quantitation_method.use_area, subquery1.quantitation_method_id, subquery1.acquisition_date_and_time,
subquery1.calculated_concentration, subquery1.height, subquery1.height_ratio, subquery1.area_ratio, subquery1.conc_units
ORDER BY subquery1.sample_name ASC, subquery1.sample_id ASC, subquery1.component_name ASC, subquery1.component_group_name ASC, subquery1.acquisition_date_and_time ASC
''' % (experiment_id_I);
result = self.session.execute(cmd);
data = result.fetchall();
#data = self.session.query(data_stage01_quantification_MQResultsTable.sample_name,
# data_stage01_quantification_MQResultsTable.sample_type,
# data_stage01_quantification_MQResultsTable.use_calculated_concentration,
# sample.sample_id,
# data_stage01_quantification_MQResultsTable.component_name,
# data_stage01_quantification_MQResultsTable.component_group_name,
# #quantitation_method.use_area,
# experiment.quantitation_method_id
# ).filter(
# experiment.id.like(experiment_id_I),
# data_stage01_quantification_MQResultsTable.used_.is_(True),
# data_stage01_quantification_MQResultsTable.is_.is_(False),
# experiment.sample_name.like(data_stage01_quantification_MQResultsTable.sample_name),
# experiment.sample_name.like(sample.sample_name),
# #data_stage01_quantification_MQResultsTable.component_name.like(quantitation_method.component_name),
# #experiment.quantitation_method_id.like(quantitation_method.id)
# ).group_by(
# data_stage01_quantification_MQResultsTable.sample_name,
# data_stage01_quantification_MQResultsTable.sample_type,
# data_stage01_quantification_MQResultsTable.use_calculated_concentration,
# sample.sample_id,
# data_stage01_quantification_MQResultsTable.component_name,
# data_stage01_quantification_MQResultsTable.component_group_name,
# #quantitation_method.use_area,
# experiment.quantitation_method_id
# ).order_by(
# data_stage01_quantification_MQResultsTable.sample_name.asc(),
# sample.sample_id.asc(),
# data_stage01_quantification_MQResultsTable.component_name.asc(),
# data_stage01_quantification_MQResultsTable.component_group_name.asc()
# ).all();
data_O = [];
if data:
data_O = listDict(record_I=data);
data_O.convert_record2DataFrame();
data_O.filterIn_byDictList({
'sample_id':sample_ids_I,
'sample_name':sample_names_I,
'sample_type':sample_types_I,
'component_name':component_names_I,
});
return data_O;
except SQLAlchemyError as e:
print(e);
# Join between data_stage01_quantification_mqresultstable and data_stage01_quantification_analysis
def getRowsJoin_analysisID_dataStage01QuantificationMQResultsTable(self,
analysis_id_I,
experiment_ids_I=[],
sample_types_I=[],
sample_names_I=[],
sample_ids_I=[],
sample_name_shorts_I=[],
sample_name_abbreviations_I=[],
component_names_I=[],
component_group_names_I=[],
):
'''Query mqresultstable rows by analysis_id'''
try:
data = self.session.query(
data_stage01_quantification_MQResultsTable,
#data_stage01_quantification_analysis.experiment_id,
#data_stage01_quantification_analysis.analysis_id,
#data_stage01_quantification_analysis.sample_name_short,
#data_stage01_quantification_analysis.sample_name_abbreviation,
).filter(
data_stage01_quantification_analysis.analysis_id.like(analysis_id_I),
data_stage01_quantification_analysis.sample_name.like(data_stage01_quantification_MQResultsTable.sample_name),
data_stage01_quantification_MQResultsTable.used_.is_(True),
data_stage01_quantification_MQResultsTable.is_.is_(False),
#).group_by(
).order_by(
data_stage01_quantification_MQResultsTable.acquisition_date_and_time.asc(),
data_stage01_quantification_MQResultsTable.sample_name.asc(),
data_stage01_quantification_MQResultsTable.component_name.asc(),
data_stage01_quantification_MQResultsTable.component_group_name.asc()
).all();
data_O = [d.__repr__dict__() for d in data];
return data_O
except SQLAlchemyError as e:
print(e);
def getRowsJoin_analysisID_dataStage01QuantificationMQResultsTable_limsQuantitationMethod(self,
analysis_id_I
):
'''Query mqresultstable and quantitation_method rows by analysis_id'''
try:
cmd = '''
SELECT subquery3.experiment_id,
subquery3.quantitation_method_id,
quantitation_method.q1_mass,
quantitation_method.q3_mass,
quantitation_method.met_id,
quantitation_method.component_name,
quantitation_method.is_name,
quantitation_method.fit,
quantitation_method.weighting,
quantitation_method.intercept,
quantitation_method.slope,
quantitation_method.correlation,
quantitation_method.use_area,
quantitation_method.lloq,
quantitation_method.uloq,
quantitation_method.points,
subquery3.sample_name,
subquery3.component_name,
subquery3.concentration_ratio,
subquery3.area_ratio,
subquery3.height_ratio
FROM quantitation_method, (
SELECT subquery2.experiment_id,
subquery2.quantitation_method_id,
data_stage01_quantification_mqresultstable.sample_name,
data_stage01_quantification_mqresultstable.component_name,
data_stage01_quantification_mqresultstable.concentration_ratio,
data_stage01_quantification_mqresultstable.area_ratio,
data_stage01_quantification_mqresultstable.height_ratio
FROM data_stage01_quantification_mqresultstable, (
SELECT experiment.quantitation_method_id,
subquery1.experiment_id,
experiment.sample_name
FROM experiment, (
SELECT experiment_id
FROM data_stage01_quantification_analysis
WHERE analysis_id LIKE '%s'
GROUP BY experiment_id
ORDER BY experiment_id ASC
) subquery1
WHERE experiment.id LIKE subquery1.experiment_id
GROUP BY experiment.quantitation_method_id,
subquery1.experiment_id,
experiment.sample_name
ORDER BY experiment.quantitation_method_id ASC
) subquery2
WHERE data_stage01_quantification_mqresultstable.sample_type LIKE '%s' AND
data_stage01_quantification_mqresultstable.sample_name LIKE subquery2.sample_name AND
NOT (data_stage01_quantification_mqresultstable.is_) AND
data_stage01_quantification_mqresultstable.used_
GROUP BY subquery2.experiment_id,
subquery2.quantitation_method_id,
data_stage01_quantification_mqresultstable.sample_name,
data_stage01_quantification_mqresultstable.component_name,
data_stage01_quantification_mqresultstable.concentration_ratio,
data_stage01_quantification_mqresultstable.area_ratio,
data_stage01_quantification_mqresultstable.height_ratio
ORDER BY subquery2.quantitation_method_id ASC,
data_stage01_quantification_mqresultstable.component_name ASC,
data_stage01_quantification_mqresultstable.sample_name ASC
) subquery3
WHERE quantitation_method.id LIKE subquery3.quantitation_method_id AND
subquery3.component_name LIKE quantitation_method.component_name
ORDER BY quantitation_method.id ASC,
quantitation_method.component_name ASC,
subquery3.sample_name
''' %(analysis_id_I,'Standard')
result = self.session.execute(cmd);
data = result.fetchall();
data_O = [dict(d) for d in data];
return data_O;
except SQLAlchemyError as e:
print(e);
| en | 0.401051 | #lims #resources Set the supported tables dict for add rows of data_stage01_quantification_MQResultsTable #d['Index'], #d['Sample Index'], #d['Original Filename'], #d['Sample Name'], #d['Sample ID'], #d['Sample Comment'], #d['Sample Type'], #d['Acquisition Date & Time'], #d['Rack Number'], #d['Plate Number'], #d['Vial Number'], #d['Dilution Factor'], #d['Injection Volume'], #d['Operator Name'], #d['Acq. Method Name'], #d['IS'], #d['Component Name'], #d['Component Index'], #d['Component Comment'], #d['IS Comment'], #d['Mass Info'], #d['IS Mass Info'], #d['IS Name'], #d['Component Group Name'], #d['Conc. Units'], #d['Failed Query'], #d['IS Failed Query'], #d['Peak Comment'], #d['IS Peak Comment'], #d['Actual Concentration'], #d['IS Actual Concentration'], #d['Concentration Ratio'], #d['Expected RT'], #d['IS Expected RT'], #d['Integration Type'], #d['IS Integration Type'], #d['Area'], #d['IS Area'], #d['Corrected Area'], #d['IS Corrected Area'], #d['Area Ratio'], #d['Height'], #d['IS Height'], #d['Corrected Height'], #d['IS Corrected Height'], #d['Height Ratio'], #d['Area / Height'], #d['IS Area / Height'], #d['Corrected Area/Height'], #d['IS Corrected Area/Height'], #d['Region Height'], #d['IS Region Height'], #d['Quality'], #d['IS Quality'], #d['Retention Time'], #d['IS Retention Time'], #d['Start Time'], #d['IS Start Time'], #d['End Time'], #d['IS End Time'], #d['Total Width'], #d['IS Total Width'], #d['Width at 50%'], #d['IS Width at 50%'], #d['Signal / Noise'], #d['IS Signal / Noise'], #d['Baseline Delta / Height'], #d['IS Baseline Delta / Height'], #d['Modified'], #d['Relative RT'], #d['Used'], #d['Calculated Concentration'], #d['Accuracy'], #d['Comment'], #d['Use_Calculated_Concentration'] #d['index_'], #d['sample_index'], #d['original_filename'], #d['sample_name'], #d['sample_id'], #d['sample_comment'], #d['sample_type'], #d['acquisition_date_and_time'], #d['rack_number'], #d['plate_number'], #d['vial_number'], #d['dilution_factor'], #d['injection_volume'], #d['operator_name'], #d['acq_method_name'], #d['is_'], #d['component_name'], #d['component_index'], #d['component_comment'], #d['is_comment'], #d['mass_info'], #d['is_mass'], #d['is_name'], #d['component_group_name'], #d['conc_units'], #d['failed_query'], #d['is_failed_query'], #d['peak_comment'], #d['is_peak_comment'], #d['actual_concentration'], #d['is_actual_concentration'], #d['concentration_ratio'], #d['expected_rt'], #d['is_expected_rt'], #d['integration_type'], #d['is_integration_type'], #d['area'], #d['is_area'], #d['corrected_area'], #d['is_corrected_area'], #d['area_ratio'], #d['height'], #d['is_height'], #d['corrected_height'], #d['is_corrected_height'], #d['height_ratio'], #d['area_2_height'], #d['is_area_2_height'], #d['corrected_area2height'], #d['is_corrected_area2height'], #d['region_height'], #d['is_region_height'], #d['quality'], #d['is_quality'], #d['retention_time'], #d['is_retention_time'], #d['start_time'], #d['is_start_time'], #d['end_time'], #d['is_end_time'], #d['total_width'], #d['is_total_width'], #d['width_at_50'], #d['is_width_at_50'], #d['signal_2_noise'], #d['is_signal_2_noise'], #d['baseline_delta_2_height'], #d['is_baseline_delta_2_height'], #d['modified_'], #d['relative_rt'], #d['used_'], #d['calculated_concentration'], #d['accuracy_'], #d['comment_'], #d['use_calculated_concentration'], update rows of data_stage01_quantification_MQResultsTable # query data from data_stage01_quantification_mqresultstable # no other table dependencies Query peak height from sample name and component name NOTE: intended to be used within a for loop Query used from sample name and component name NOTE: intended to be used within a for loop Query peak information from sample name and component name NOTE: intended to be used within a for loop Query peak information from sample name and component name NOTE: intended to be used within a for loop # delete data from data_stage01_quantification_mqresultstable # no other table dependencies Delete specific samples from an experiment by their sample ID from sample_physiologicalparameters # query data from data_stage01_quantification_mqresultstable # requires quantitation_method Query data (i.e. concentration, area/peak height ratio) from sample name and component name NOTE: intended to be used within a for loop # check for absolute or relative quantitation (i.e. area/peak height ratio) # check for area or peak height ratio from quantitation_method # query component group names from data_stage01_quantification_mqresultstable Query component group names that are used from the sample name NOTE: intended to be used within a for loop Query component group names that are used from the component name NOTE: intended to be used within a for loop # query sample names from data_stage01_quantification_mqresultstable Query sample names (i.e. unknowns) that are used from the experiment #experiment.exp_type_id == exp_type_I, # query sample names from data_stage01_quantification_mqresultstable Query sample names and sample ids (i.e. unknowns) that are used from the experiment #experiment.exp_type_id == exp_type_I, Query sample names (i.e. unknowns) that are used from the experiment #experiment.exp_type_id == exp_type_I, Query sample names (i.e. unknowns) that are used from the experiment #experiment.exp_type_id == exp_type_I, Query sample names that are used from the experiment Query sample names that are used from the experiment Query sample names that are used from the experiment # query sample ids from data_stage01_quantification_mqresultstable Query sample names (i.e. unknowns) that are used from the experiment Query sample names that are used from the experiment Query sample names (i.e. unknowns) that are used from the experiment # query sample name short from data_stage01_quantification_mqresultstable Query sample name short that are used from the experiment Query sample name short that are used from the experiment # query sample name abbreviations from data_stage01_quantification_mqresultstable Query sample name abbreviations that are used from the experiment # query dilutions from data_stage01_quantification_mqresultstable Query dilutions that are used from the experiment Query dilutions that are used from the experiment # query time points from data_stage01_quantification_mqresultstable Query time points that are used from the experiment and sample name abbreviation # query component names from data_stage01_quantification_mqresultstable Query component names that are used and are not IS from the experiment and sample_id Query component names that are used from the experiment and sample_name_abbreviation Query component names that are used and not internal standards from the experiment and sample_name Query component names that are used and not internal standards from the experiment and sample_name Query component names that are used and not internal standards from the experiment and sample_name #,quant_method_id_I Query sample names (i.e. unknowns) that are used from the experiment by quantitation_method_id, component_name, and sample_type Query rows from data_stage01_quantification_MQResultsTable SELECT "data_stage01_quantification_analysis"."analysis_id", "data_stage01_quantification_analysis"."experiment_id", "data_stage01_quantification_analysis"."sample_name", "data_stage01_quantification_analysis"."sample_id", "data_stage01_quantification_analysis"."sample_name_short", "data_stage01_quantification_analysis"."sample_name_abbreviation", "data_stage01_quantification_analysis"."time_point", "data_stage01_quantification_analysis"."analysis_type", "data_stage01_quantification_analysis"."sample_desc", "data_stage01_quantification_analysis"."used_", "data_stage01_quantification_analysis"."comment_" FROM "data_stage01_quantification_analysis" WHERE "data_stage01_quantification_analysis"."used_" AND "data_stage01_quantification_analysis".analysis_id =ANY ('{%s}'::text[]) AND "data_stage01_quantification_analysis".experiment_id =ANY ('{%s}'::text[]) AND "data_stage01_quantification_analysis".sample_name =ANY ('{%s}'::text[]) AND "data_stage01_quantification_analysis".sample_id =ANY ('{%s}'::text[]) AND "data_stage01_quantification_analysis".sample_name_abbreviation =ANY ('{%s}'::text[]) ORDER BY "data_stage01_quantification_analysis"."analysis_id" ASC, "data_stage01_quantification_analysis"."experiment_id" ASC, "data_stage01_quantification_analysis"."sample_name" ASC SELECT "data_stage01_quantification_mqresultstable"."id", "data_stage01_quantification_mqresultstable"."index_", "data_stage01_quantification_mqresultstable"."sample_index", "data_stage01_quantification_mqresultstable"."original_filename", "data_stage01_quantification_mqresultstable"."sample_name", "data_stage01_quantification_mqresultstable"."sample_comment", "data_stage01_quantification_mqresultstable"."sample_type", "data_stage01_quantification_mqresultstable"."acquisition_date_and_time", "data_stage01_quantification_mqresultstable"."rack_number", "data_stage01_quantification_mqresultstable"."plate_number", "data_stage01_quantification_mqresultstable"."vial_number", "data_stage01_quantification_mqresultstable"."dilution_factor", "data_stage01_quantification_mqresultstable"."injection_volume", "data_stage01_quantification_mqresultstable"."operator_name", "data_stage01_quantification_mqresultstable"."acq_method_name", "data_stage01_quantification_mqresultstable"."is_", "data_stage01_quantification_mqresultstable"."component_name", "data_stage01_quantification_mqresultstable"."component_index", "data_stage01_quantification_mqresultstable"."component_comment", "data_stage01_quantification_mqresultstable"."is_comment", "data_stage01_quantification_mqresultstable"."mass_info", "data_stage01_quantification_mqresultstable"."is_mass", "data_stage01_quantification_mqresultstable"."is_name", "data_stage01_quantification_mqresultstable"."component_group_name", "data_stage01_quantification_mqresultstable"."conc_units", "data_stage01_quantification_mqresultstable"."failed_query", "data_stage01_quantification_mqresultstable"."is_failed_query", "data_stage01_quantification_mqresultstable"."peak_comment", "data_stage01_quantification_mqresultstable"."is_peak_comment", "data_stage01_quantification_mqresultstable"."actual_concentration", "data_stage01_quantification_mqresultstable"."is_actual_concentration", "data_stage01_quantification_mqresultstable"."concentration_ratio", "data_stage01_quantification_mqresultstable"."expected_rt", "data_stage01_quantification_mqresultstable"."is_expected_rt", "data_stage01_quantification_mqresultstable"."integration_type", "data_stage01_quantification_mqresultstable"."is_integration_type", "data_stage01_quantification_mqresultstable"."area", "data_stage01_quantification_mqresultstable"."is_area", "data_stage01_quantification_mqresultstable"."corrected_area", "data_stage01_quantification_mqresultstable"."is_corrected_area", "data_stage01_quantification_mqresultstable"."area_ratio", "data_stage01_quantification_mqresultstable"."height", "data_stage01_quantification_mqresultstable"."is_height", "data_stage01_quantification_mqresultstable"."corrected_height", "data_stage01_quantification_mqresultstable"."is_corrected_height", "data_stage01_quantification_mqresultstable"."height_ratio", "data_stage01_quantification_mqresultstable"."area_2_height", "data_stage01_quantification_mqresultstable"."is_area_2_height", "data_stage01_quantification_mqresultstable"."corrected_area2height", "data_stage01_quantification_mqresultstable"."is_corrected_area2height", "data_stage01_quantification_mqresultstable"."region_height", "data_stage01_quantification_mqresultstable"."is_region_height", "data_stage01_quantification_mqresultstable"."quality", "data_stage01_quantification_mqresultstable"."is_quality", "data_stage01_quantification_mqresultstable"."retention_time", "data_stage01_quantification_mqresultstable"."is_retention_time", "data_stage01_quantification_mqresultstable"."start_time", "data_stage01_quantification_mqresultstable"."is_start_time", "data_stage01_quantification_mqresultstable"."end_time", "data_stage01_quantification_mqresultstable"."is_end_time", "data_stage01_quantification_mqresultstable"."total_width", "data_stage01_quantification_mqresultstable"."is_total_width", "data_stage01_quantification_mqresultstable"."width_at_50", "data_stage01_quantification_mqresultstable"."is_width_at_50", "data_stage01_quantification_mqresultstable"."signal_2_noise", "data_stage01_quantification_mqresultstable"."is_signal_2_noise", "data_stage01_quantification_mqresultstable"."baseline_delta_2_height", "data_stage01_quantification_mqresultstable"."is_baseline_delta_2_height", "data_stage01_quantification_mqresultstable"."modified_", "data_stage01_quantification_mqresultstable"."relative_rt", "data_stage01_quantification_mqresultstable"."used_", "data_stage01_quantification_mqresultstable"."calculated_concentration", "data_stage01_quantification_mqresultstable"."accuracy_", "data_stage01_quantification_mqresultstable"."comment_", "data_stage01_quantification_mqresultstable"."use_calculated_concentration", "data_stage01_quantification_mqresultstable"."start_time_at_5", "data_stage01_quantification_mqresultstable"."end_time_at_5", "data_stage01_quantification_mqresultstable"."width_at_5", "data_stage01_quantification_mqresultstable"."start_time_at_10", "data_stage01_quantification_mqresultstable"."end_time_at_10", "data_stage01_quantification_mqresultstable"."width_at_10", "data_stage01_quantification_mqresultstable"."slope_of_baseline", "data_stage01_quantification_mqresultstable"."tailing_factor", "data_stage01_quantification_mqresultstable"."asymmetry_factor", "data_stage01_quantification_mqresultstable"."ion_ratio", "data_stage01_quantification_mqresultstable"."expected_ion_ratio", "data_stage01_quantification_mqresultstable"."points_across_baseline", "data_stage01_quantification_mqresultstable"."points_across_half_height", "subquery1"."analysis_id", "subquery1"."experiment_id", "subquery1"."sample_id", "subquery1"."sample_name_short", "subquery1"."sample_name_abbreviation", "subquery1"."time_point", "subquery1"."analysis_type", "subquery1"."sample_desc" FROM "data_stage01_quantification_mqresultstable", (%s) AS subquery1 WHERE "data_stage01_quantification_mqresultstable"."used_" AND "subquery1".sample_name = "data_stage01_quantification_mqresultstable"."sample_name" AND "data_stage01_quantification_mqresultstable".component_name =ANY ('{%s}'::text[]) AND "data_stage01_quantification_mqresultstable".sample_type =ANY ('{%s}'::text[]) AND "data_stage01_quantification_mqresultstable".acquisition_date_and_time >= %s AND "data_stage01_quantification_mqresultstable".acquisition_date_and_time <= %s ORDER BY "subquery1"."analysis_id" ASC, "subquery1"."experiment_id" ASC, "subquery1"."sample_name" ASC, "data_stage01_quantification_mqresultstable"."component_name" ASC; # query sample names from data_stage01_quantification_mqresultstable Query sample names and sample ids (i.e. unknowns) that are used from the experiment SELECT quantitation_method.use_area, subquery1.sample_name, subquery1.sample_type, subquery1.use_calculated_concentration, subquery1.sample_id, subquery1.component_name, subquery1.component_group_name, subquery1.quantitation_method_id, subquery1.acquisition_date_and_time, subquery1.calculated_concentration, subquery1.height, subquery1.height_ratio, subquery1.area_ratio, subquery1.conc_units FROM quantitation_method, ( SELECT data_stage01_quantification_mqresultstable.sample_name, data_stage01_quantification_mqresultstable.sample_type, data_stage01_quantification_mqresultstable.use_calculated_concentration, sample.sample_id, data_stage01_quantification_mqresultstable.component_name, data_stage01_quantification_mqresultstable.component_group_name, experiment.quantitation_method_id, data_stage01_quantification_mqresultstable.acquisition_date_and_time, data_stage01_quantification_mqresultstable.calculated_concentration, data_stage01_quantification_mqresultstable.height, data_stage01_quantification_mqresultstable.height_ratio, data_stage01_quantification_mqresultstable.area_ratio, data_stage01_quantification_mqresultstable.conc_units FROM data_stage01_quantification_mqresultstable, sample, experiment WHERE experiment.id LIKE '%s' AND data_stage01_quantification_mqresultstable.used_ IS true AND data_stage01_quantification_mqresultstable.is_ IS false AND experiment.sample_name LIKE data_stage01_quantification_mqresultstable.sample_name AND experiment.sample_name LIKE sample.sample_name GROUP BY data_stage01_quantification_mqresultstable.sample_name, data_stage01_quantification_mqresultstable.sample_type, data_stage01_quantification_mqresultstable.use_calculated_concentration, sample.sample_id, data_stage01_quantification_mqresultstable.component_name, data_stage01_quantification_mqresultstable.component_group_name, experiment.quantitation_method_id, data_stage01_quantification_mqresultstable.acquisition_date_and_time, data_stage01_quantification_mqresultstable.calculated_concentration, data_stage01_quantification_mqresultstable.height, data_stage01_quantification_mqresultstable.height_ratio, data_stage01_quantification_mqresultstable.area_ratio, data_stage01_quantification_mqresultstable.conc_units ORDER BY data_stage01_quantification_mqresultstable.sample_name ASC, sample.sample_id ASC, data_stage01_quantification_mqresultstable.component_name ASC, data_stage01_quantification_mqresultstable.component_group_name ASC ) subquery1 WHERE quantitation_method.component_name LIKE subquery1.component_name AND quantitation_method.id LIKE subquery1.quantitation_method_id GROUP BY subquery1.sample_name, subquery1.sample_type, subquery1.use_calculated_concentration, subquery1.sample_id, subquery1.component_name, subquery1.component_group_name, quantitation_method.use_area, subquery1.quantitation_method_id, subquery1.acquisition_date_and_time, subquery1.calculated_concentration, subquery1.height, subquery1.height_ratio, subquery1.area_ratio, subquery1.conc_units ORDER BY subquery1.sample_name ASC, subquery1.sample_id ASC, subquery1.component_name ASC, subquery1.component_group_name ASC, subquery1.acquisition_date_and_time ASC #data = self.session.query(data_stage01_quantification_MQResultsTable.sample_name, # data_stage01_quantification_MQResultsTable.sample_type, # data_stage01_quantification_MQResultsTable.use_calculated_concentration, # sample.sample_id, # data_stage01_quantification_MQResultsTable.component_name, # data_stage01_quantification_MQResultsTable.component_group_name, # #quantitation_method.use_area, # experiment.quantitation_method_id # ).filter( # experiment.id.like(experiment_id_I), # data_stage01_quantification_MQResultsTable.used_.is_(True), # data_stage01_quantification_MQResultsTable.is_.is_(False), # experiment.sample_name.like(data_stage01_quantification_MQResultsTable.sample_name), # experiment.sample_name.like(sample.sample_name), # #data_stage01_quantification_MQResultsTable.component_name.like(quantitation_method.component_name), # #experiment.quantitation_method_id.like(quantitation_method.id) # ).group_by( # data_stage01_quantification_MQResultsTable.sample_name, # data_stage01_quantification_MQResultsTable.sample_type, # data_stage01_quantification_MQResultsTable.use_calculated_concentration, # sample.sample_id, # data_stage01_quantification_MQResultsTable.component_name, # data_stage01_quantification_MQResultsTable.component_group_name, # #quantitation_method.use_area, # experiment.quantitation_method_id # ).order_by( # data_stage01_quantification_MQResultsTable.sample_name.asc(), # sample.sample_id.asc(), # data_stage01_quantification_MQResultsTable.component_name.asc(), # data_stage01_quantification_MQResultsTable.component_group_name.asc() # ).all(); # Join between data_stage01_quantification_mqresultstable and data_stage01_quantification_analysis Query mqresultstable rows by analysis_id #data_stage01_quantification_analysis.experiment_id, #data_stage01_quantification_analysis.analysis_id, #data_stage01_quantification_analysis.sample_name_short, #data_stage01_quantification_analysis.sample_name_abbreviation, #).group_by( Query mqresultstable and quantitation_method rows by analysis_id SELECT subquery3.experiment_id, subquery3.quantitation_method_id, quantitation_method.q1_mass, quantitation_method.q3_mass, quantitation_method.met_id, quantitation_method.component_name, quantitation_method.is_name, quantitation_method.fit, quantitation_method.weighting, quantitation_method.intercept, quantitation_method.slope, quantitation_method.correlation, quantitation_method.use_area, quantitation_method.lloq, quantitation_method.uloq, quantitation_method.points, subquery3.sample_name, subquery3.component_name, subquery3.concentration_ratio, subquery3.area_ratio, subquery3.height_ratio FROM quantitation_method, ( SELECT subquery2.experiment_id, subquery2.quantitation_method_id, data_stage01_quantification_mqresultstable.sample_name, data_stage01_quantification_mqresultstable.component_name, data_stage01_quantification_mqresultstable.concentration_ratio, data_stage01_quantification_mqresultstable.area_ratio, data_stage01_quantification_mqresultstable.height_ratio FROM data_stage01_quantification_mqresultstable, ( SELECT experiment.quantitation_method_id, subquery1.experiment_id, experiment.sample_name FROM experiment, ( SELECT experiment_id FROM data_stage01_quantification_analysis WHERE analysis_id LIKE '%s' GROUP BY experiment_id ORDER BY experiment_id ASC ) subquery1 WHERE experiment.id LIKE subquery1.experiment_id GROUP BY experiment.quantitation_method_id, subquery1.experiment_id, experiment.sample_name ORDER BY experiment.quantitation_method_id ASC ) subquery2 WHERE data_stage01_quantification_mqresultstable.sample_type LIKE '%s' AND data_stage01_quantification_mqresultstable.sample_name LIKE subquery2.sample_name AND NOT (data_stage01_quantification_mqresultstable.is_) AND data_stage01_quantification_mqresultstable.used_ GROUP BY subquery2.experiment_id, subquery2.quantitation_method_id, data_stage01_quantification_mqresultstable.sample_name, data_stage01_quantification_mqresultstable.component_name, data_stage01_quantification_mqresultstable.concentration_ratio, data_stage01_quantification_mqresultstable.area_ratio, data_stage01_quantification_mqresultstable.height_ratio ORDER BY subquery2.quantitation_method_id ASC, data_stage01_quantification_mqresultstable.component_name ASC, data_stage01_quantification_mqresultstable.sample_name ASC ) subquery3 WHERE quantitation_method.id LIKE subquery3.quantitation_method_id AND subquery3.component_name LIKE quantitation_method.component_name ORDER BY quantitation_method.id ASC, quantitation_method.component_name ASC, subquery3.sample_name | 1.789865 | 2 |
cogs/practical.py | Saphielle-Akiyama/testing-crew | 21 | 6619242 | """
MIT License
Copyright (c) 2021 - µYert
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import datetime
from typing import Optional, List, Tuple
import config
import main
from discord.ext import commands, menus
from packages import aiogooglesearch, aiomagmachain, aiotranslator, aioweather
from utils import converters
from utils.containers import DieEval
class DiceListMenu(menus.ListPageSource):
def __init__(self, die):
self.die = die
super().__init__(self.die, per_page=5)
async def format_page(self, menu, page):
offset = menu.current_page * self.per_page
return '\n'.join(f"{count} : {item}" for count, item in enumerate(page, start=offset))
class Practical(commands.Cog):
settings = {
'num_min' : 1,
'num_max' : 20,
'size_min' : 2,
'size_max' : 20,
'mod_min' : 0,
'mod_max' : 20
}
def __init__(self, bot):
self.bot = bot
self.aioweather = aioweather.AioWeather(
session=bot.session, api_key=config.WEATHER_TOKEN
)
self.aiotranslator = aiotranslator.AioTranslator(session=bot.session)
self.aiogoogle = aiogooglesearch.AioSearchEngine(
api_keys=config.GOOGLE_TOKENS, session=bot.session
)
self.aioscreen = aiomagmachain.AioMagmaChain(
session=bot.session, google_client=self.aiogoogle
)
def make_dice(self, iters, *args) -> List[str]:
out = ''
for _ in range(iters):
die = converters.DieEval(*args)
@commands.command(name="weather")
@commands.cooldown(1, 30, type=commands.BucketType.channel)
async def weather(self, ctx: main.NewCtx, *, city: str):
"""Displays the weather at a particular location"""
if not (embed := ctx.cached_data):
res = await self.aioweather.fetch_weather(city)
embed = self.aioweather.format_weather(res)
ctx.add_to_cache(value=embed, timeout=datetime.timedelta(minutes=10))
await ctx.send(embed=embed)
@commands.group(name="translate", invoke_without_command=True)
async def translate(
self,
ctx: main.NewCtx,
language: Optional[aiotranslator.to_language] = "auto",
*,
text: str,
):
"""Translates from another language"""
if not (embed := ctx.cached_data):
# the embed is implicitely cached there, since it's used by both subcommnands
embed = await self.aiotranslator.do_translation(
ctx=ctx, text=text, translation_kwarg={"src": language}
)
await ctx.send(embed=embed)
@translate.command(name="to")
async def translate_to(
self, ctx: main.NewCtx, language: aiotranslator.to_language, *, text: str
):
"""Translate something to another language"""
if not (embed := ctx.cached_data):
embed = await self.aiotranslator.do_translation(
ctx=ctx, text=text, translation_kwarg={"dest": language}
)
await ctx.send(embed=embed)
@commands.group(name="google", invoke_without_command=True)
@commands.cooldown(1, 15, commands.BucketType.user)
async def google(self, ctx: main.NewCtx, *, query: str):
"""Searches something on google"""
is_nsfw = ctx.channel.is_nsfw()
ctx.cache_key += [is_nsfw]
if not (source := ctx.cached_data):
source = await self.aiogoogle.do_search(ctx, query=query, is_nsfw=is_nsfw)
menu = menus.MenuPages(source, delete_message_after=True)
await menu.start(ctx)
@google.command(name="image", aliases=["-i"])
@commands.cooldown(1, 15, commands.BucketType.user)
async def google_image(self, ctx: main.NewCtx, *, query: str):
"""Searches an image on google"""
is_nsfw = ctx.channel.is_nsfw()
ctx.cache_key += [is_nsfw]
if not (source := ctx.cached_data):
source = await self.aiogoogle.do_search(
ctx, query=query, is_nsfw=is_nsfw, image_search=True
)
menu = menus.MenuPages(source, clear_reactions_after=True)
await menu.start(ctx)
@commands.command(name="screenshot")
@commands.cooldown(1, 15, commands.BucketType.user)
async def screenshot(self, ctx: main.NewCtx, url: str):
"""Screenshots a website"""
is_nsfw = ctx.channel.is_nsfw()
ctx.cache_key += [is_nsfw]
if not (embed := ctx.cached_data):
if not is_nsfw or len(url.split(".")) < 2:
url = await self.aioscreen.check_url(url=url, is_nsfw=is_nsfw)
response = await self.aioscreen.fetch_snapshot(url)
embed = self.aioscreen.format_snapshot(response=response, is_nsfw=is_nsfw)
ctx.add_to_cache(embed, timeout=datetime.timedelta(minutes=5))
await ctx.send(embed=embed)
@commands.group(invoke_without_command=True, aliases=['d'])
async def dice(self, ctx, *dice: converters.Dice):
"""Takes the typical die+/-mod format to output the results"""
results = [die.print() for die in dice]
die_menu = menus.MenuPages(source=DiceListMenu(results), clear_reactions_after=True)
await die_menu.start(ctx)
@dice.command(aliases=['make', 'generate'])
async def gen_rand(self, ctx, number: int):
"""Generates <number> of die and rolls them"""
if 1 <= number <= 25:
res = [DieEval.generate(**self.settings) for _ in range(number)]
out = [die.print() for die in res]
die_menu = menus.MenuPages(source=DiceListMenu(out), clear_reactions_after=True)
return await die_menu.start(ctx)
raise commands.BadArgument('Number of different die formats to roll must be between 1 and 25 inclusive')
@commands.is_owner()
@dice.command(aliases=['settings', 'bounds'])
async def _setting(self, ctx, settings: commands.Greedy[int], *names):
"""Owner only way to toggle the generator settings for die, to make them lower or higher"""
if len(settings) == len(names):
new = {k: v for k in names for v in settings}
try:
self.settings.update(new)
except (KeyError,):
return await ctx.send('Snek messed up, bug him, issa KeyError though')
except Exception as exc:
raise exc
return await ctx.send('\n'.join([f'{k} set to {v}' for k, v in new.items()]))
raise commands.BadArgument("Number of settings and number of names don't match.")
def setup(bot):
bot.add_cog(Practical(bot))
| """
MIT License
Copyright (c) 2021 - µYert
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import datetime
from typing import Optional, List, Tuple
import config
import main
from discord.ext import commands, menus
from packages import aiogooglesearch, aiomagmachain, aiotranslator, aioweather
from utils import converters
from utils.containers import DieEval
class DiceListMenu(menus.ListPageSource):
def __init__(self, die):
self.die = die
super().__init__(self.die, per_page=5)
async def format_page(self, menu, page):
offset = menu.current_page * self.per_page
return '\n'.join(f"{count} : {item}" for count, item in enumerate(page, start=offset))
class Practical(commands.Cog):
settings = {
'num_min' : 1,
'num_max' : 20,
'size_min' : 2,
'size_max' : 20,
'mod_min' : 0,
'mod_max' : 20
}
def __init__(self, bot):
self.bot = bot
self.aioweather = aioweather.AioWeather(
session=bot.session, api_key=config.WEATHER_TOKEN
)
self.aiotranslator = aiotranslator.AioTranslator(session=bot.session)
self.aiogoogle = aiogooglesearch.AioSearchEngine(
api_keys=config.GOOGLE_TOKENS, session=bot.session
)
self.aioscreen = aiomagmachain.AioMagmaChain(
session=bot.session, google_client=self.aiogoogle
)
def make_dice(self, iters, *args) -> List[str]:
out = ''
for _ in range(iters):
die = converters.DieEval(*args)
@commands.command(name="weather")
@commands.cooldown(1, 30, type=commands.BucketType.channel)
async def weather(self, ctx: main.NewCtx, *, city: str):
"""Displays the weather at a particular location"""
if not (embed := ctx.cached_data):
res = await self.aioweather.fetch_weather(city)
embed = self.aioweather.format_weather(res)
ctx.add_to_cache(value=embed, timeout=datetime.timedelta(minutes=10))
await ctx.send(embed=embed)
@commands.group(name="translate", invoke_without_command=True)
async def translate(
self,
ctx: main.NewCtx,
language: Optional[aiotranslator.to_language] = "auto",
*,
text: str,
):
"""Translates from another language"""
if not (embed := ctx.cached_data):
# the embed is implicitely cached there, since it's used by both subcommnands
embed = await self.aiotranslator.do_translation(
ctx=ctx, text=text, translation_kwarg={"src": language}
)
await ctx.send(embed=embed)
@translate.command(name="to")
async def translate_to(
self, ctx: main.NewCtx, language: aiotranslator.to_language, *, text: str
):
"""Translate something to another language"""
if not (embed := ctx.cached_data):
embed = await self.aiotranslator.do_translation(
ctx=ctx, text=text, translation_kwarg={"dest": language}
)
await ctx.send(embed=embed)
@commands.group(name="google", invoke_without_command=True)
@commands.cooldown(1, 15, commands.BucketType.user)
async def google(self, ctx: main.NewCtx, *, query: str):
"""Searches something on google"""
is_nsfw = ctx.channel.is_nsfw()
ctx.cache_key += [is_nsfw]
if not (source := ctx.cached_data):
source = await self.aiogoogle.do_search(ctx, query=query, is_nsfw=is_nsfw)
menu = menus.MenuPages(source, delete_message_after=True)
await menu.start(ctx)
@google.command(name="image", aliases=["-i"])
@commands.cooldown(1, 15, commands.BucketType.user)
async def google_image(self, ctx: main.NewCtx, *, query: str):
"""Searches an image on google"""
is_nsfw = ctx.channel.is_nsfw()
ctx.cache_key += [is_nsfw]
if not (source := ctx.cached_data):
source = await self.aiogoogle.do_search(
ctx, query=query, is_nsfw=is_nsfw, image_search=True
)
menu = menus.MenuPages(source, clear_reactions_after=True)
await menu.start(ctx)
@commands.command(name="screenshot")
@commands.cooldown(1, 15, commands.BucketType.user)
async def screenshot(self, ctx: main.NewCtx, url: str):
"""Screenshots a website"""
is_nsfw = ctx.channel.is_nsfw()
ctx.cache_key += [is_nsfw]
if not (embed := ctx.cached_data):
if not is_nsfw or len(url.split(".")) < 2:
url = await self.aioscreen.check_url(url=url, is_nsfw=is_nsfw)
response = await self.aioscreen.fetch_snapshot(url)
embed = self.aioscreen.format_snapshot(response=response, is_nsfw=is_nsfw)
ctx.add_to_cache(embed, timeout=datetime.timedelta(minutes=5))
await ctx.send(embed=embed)
@commands.group(invoke_without_command=True, aliases=['d'])
async def dice(self, ctx, *dice: converters.Dice):
"""Takes the typical die+/-mod format to output the results"""
results = [die.print() for die in dice]
die_menu = menus.MenuPages(source=DiceListMenu(results), clear_reactions_after=True)
await die_menu.start(ctx)
@dice.command(aliases=['make', 'generate'])
async def gen_rand(self, ctx, number: int):
"""Generates <number> of die and rolls them"""
if 1 <= number <= 25:
res = [DieEval.generate(**self.settings) for _ in range(number)]
out = [die.print() for die in res]
die_menu = menus.MenuPages(source=DiceListMenu(out), clear_reactions_after=True)
return await die_menu.start(ctx)
raise commands.BadArgument('Number of different die formats to roll must be between 1 and 25 inclusive')
@commands.is_owner()
@dice.command(aliases=['settings', 'bounds'])
async def _setting(self, ctx, settings: commands.Greedy[int], *names):
"""Owner only way to toggle the generator settings for die, to make them lower or higher"""
if len(settings) == len(names):
new = {k: v for k in names for v in settings}
try:
self.settings.update(new)
except (KeyError,):
return await ctx.send('Snek messed up, bug him, issa KeyError though')
except Exception as exc:
raise exc
return await ctx.send('\n'.join([f'{k} set to {v}' for k, v in new.items()]))
raise commands.BadArgument("Number of settings and number of names don't match.")
def setup(bot):
bot.add_cog(Practical(bot))
| en | 0.812318 | MIT License Copyright (c) 2021 - µYert Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. Displays the weather at a particular location Translates from another language # the embed is implicitely cached there, since it's used by both subcommnands Translate something to another language Searches something on google Searches an image on google Screenshots a website Takes the typical die+/-mod format to output the results Generates <number> of die and rolls them Owner only way to toggle the generator settings for die, to make them lower or higher | 1.984697 | 2 |
topology_construction/topo_utils.py | KofClubs/DeepMG | 27 | 6619243 | <filename>topology_construction/topo_utils.py
from tptk.common.spatial_func import SPoint
import numpy as np
def unit_vector(vector):
""" Returns the unit vector of the vector. """
return vector / np.linalg.norm(vector)
def angle_between(v1, v2):
v1_u = unit_vector(v1)
v2_u = unit_vector(v2)
return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))
def magnitude(vector):
return np.sqrt(np.dot(np.array(vector),np.array(vector)))
def norm(vector):
return np.array(vector)/magnitude(np.array(vector))
def ccw(A,B,C):
return (C.lat-A.lat) * (B.lng-A.lng) > (B.lat-A.lat) * (C.lng-A.lng)
def is_line_line_intersected(A, B, C, D):
return ccw(A, C, D) != ccw(B, C, D) and ccw(A, B, C) != ccw(A, B, D)
def line_ray_intersection_test(o, f, a, b):
"""
:param o: ray original point SPoint
:param f: ray from point SPoint ray: f->o
:param a: line segment point 1 SPoint
:param b: line segment point 2 SPoint
:return:
"""
o = np.array((o.lng, o.lat), dtype=np.float)
dir = np.array(norm((o[0] - f.lng, o[1] - f.lat)), dtype=np.float)
a = np.array((a.lng, a.lat), dtype=np.float)
b = np.array((b.lng, b.lat), dtype=np.float)
v1 = o - a
v2 = b - a
v3 = np.asarray([-dir[1], dir[0]])
t1 = np.cross(v2, v1) / np.dot(v2, v3)
t2 = np.dot(v1, v3) / np.dot(v2, v3)
# t1=inf parallel
if t1 == np.inf or t1 < 0:
# ray has no intersection with line segment
return None
else:
pt = o + t1 * dir
# 1. t2<0, in extension of a; 2. t2 in [0,1], within ab; 3. t2>1, in extension of b
return t2, SPoint(pt[1], pt[0])
| <filename>topology_construction/topo_utils.py
from tptk.common.spatial_func import SPoint
import numpy as np
def unit_vector(vector):
""" Returns the unit vector of the vector. """
return vector / np.linalg.norm(vector)
def angle_between(v1, v2):
v1_u = unit_vector(v1)
v2_u = unit_vector(v2)
return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))
def magnitude(vector):
return np.sqrt(np.dot(np.array(vector),np.array(vector)))
def norm(vector):
return np.array(vector)/magnitude(np.array(vector))
def ccw(A,B,C):
return (C.lat-A.lat) * (B.lng-A.lng) > (B.lat-A.lat) * (C.lng-A.lng)
def is_line_line_intersected(A, B, C, D):
return ccw(A, C, D) != ccw(B, C, D) and ccw(A, B, C) != ccw(A, B, D)
def line_ray_intersection_test(o, f, a, b):
"""
:param o: ray original point SPoint
:param f: ray from point SPoint ray: f->o
:param a: line segment point 1 SPoint
:param b: line segment point 2 SPoint
:return:
"""
o = np.array((o.lng, o.lat), dtype=np.float)
dir = np.array(norm((o[0] - f.lng, o[1] - f.lat)), dtype=np.float)
a = np.array((a.lng, a.lat), dtype=np.float)
b = np.array((b.lng, b.lat), dtype=np.float)
v1 = o - a
v2 = b - a
v3 = np.asarray([-dir[1], dir[0]])
t1 = np.cross(v2, v1) / np.dot(v2, v3)
t2 = np.dot(v1, v3) / np.dot(v2, v3)
# t1=inf parallel
if t1 == np.inf or t1 < 0:
# ray has no intersection with line segment
return None
else:
pt = o + t1 * dir
# 1. t2<0, in extension of a; 2. t2 in [0,1], within ab; 3. t2>1, in extension of b
return t2, SPoint(pt[1], pt[0])
| en | 0.716315 | Returns the unit vector of the vector. :param o: ray original point SPoint :param f: ray from point SPoint ray: f->o :param a: line segment point 1 SPoint :param b: line segment point 2 SPoint :return: # t1=inf parallel # ray has no intersection with line segment # 1. t2<0, in extension of a; 2. t2 in [0,1], within ab; 3. t2>1, in extension of b | 2.466686 | 2 |
next.py | ericgreenwell/turret | 0 | 6619244 | <gh_stars>0
#!/usr/bin/env python
import pygame
import time
from datetime import datetime
import pygame.camera
from pygame.locals import *
import cv2
# Define some colors
BLACK = ( 0, 0, 0)
WHITE = ( 255, 255, 255)
RED = ( 255, 0, 0)
DEVICE = '/dev/video0'
SIZE = (640, 480)
FILENAME = 'capture.png'
# This is a simple class that will help us print to the screen
# It has nothing to do with the joysticks, just outputting the
# information.
class TextPrint:
def __init__(self):
self.reset()
self.font = pygame.font.Font(None, 20)
def printy(self, display, textString):
textBitmap = self.font.render(textString, True, RED)
display.blit(textBitmap, [self.x, self.y])
self.y += self.line_height
def reset(self):
self.x = 10
self.y = 10
self.line_height = 15
def indent(self):
self.x += 10
def unindent(self):
self.x -= 10
pygame.init()
pygame.camera.init()
# Set the width and height of the screen [width,height]
#size = [800, 480]
#screen = pygame.display.set_mode(size)
display = pygame.display.set_mode(SIZE,0)
camera = pygame.camera.Camera(DEVICE, SIZE)
camera.start()
screen = pygame.surface.Surface((640,480), 0, display)
pygame.display.set_caption("Range Capture")
#Loop until the user clicks the close button.
done = False
# Used to manage how fast the screen updates
clock = pygame.time.Clock()
# Get ready to print
textPrint = TextPrint()
count = 0
# -------- Main Program Loop -----------
while done==False:
# EVENT PROCESSING STEP
for event in pygame.event.get(): # User did something
if event.type == pygame.QUIT: # If user clicked close
done=True # Flag that we are done so we exit this loop
# DRAWING STEP
# First, clear the screen to white. Don't put other drawing commands
# above this, or they will be erased with this command.
screen.fill(WHITE)
display.fill(WHITE)
textPrint.reset()
screen = camera.get_image(screen)
#cv2.putText(screen, "time:{}".format(datetime.now()), (0,0), cv2.FONT_HERSHEY_SIMPLEX,2,255)
display.blit(screen, (0,0))
# Get count of joysticks
#joystick_count = pygame.joystick.get_count()
textPrint.printy(display, "Time: {}".format(datetime.now()))
textPrint.printy(display, "Count:{}".format(count))
# ALL CODE TO DRAW SHOULD GO ABOVE THIS COMMENT
# Go ahead and update the screen with what we've drawn.
pygame.display.flip()
# Limit to 20 frames per second
clock.tick(20)
count += 1
# Close the window and quit.
# If you forget this line, the program will 'hang'
# on exit if running from IDLE.
pygame.quit ()
camera.stop()
| #!/usr/bin/env python
import pygame
import time
from datetime import datetime
import pygame.camera
from pygame.locals import *
import cv2
# Define some colors
BLACK = ( 0, 0, 0)
WHITE = ( 255, 255, 255)
RED = ( 255, 0, 0)
DEVICE = '/dev/video0'
SIZE = (640, 480)
FILENAME = 'capture.png'
# This is a simple class that will help us print to the screen
# It has nothing to do with the joysticks, just outputting the
# information.
class TextPrint:
def __init__(self):
self.reset()
self.font = pygame.font.Font(None, 20)
def printy(self, display, textString):
textBitmap = self.font.render(textString, True, RED)
display.blit(textBitmap, [self.x, self.y])
self.y += self.line_height
def reset(self):
self.x = 10
self.y = 10
self.line_height = 15
def indent(self):
self.x += 10
def unindent(self):
self.x -= 10
pygame.init()
pygame.camera.init()
# Set the width and height of the screen [width,height]
#size = [800, 480]
#screen = pygame.display.set_mode(size)
display = pygame.display.set_mode(SIZE,0)
camera = pygame.camera.Camera(DEVICE, SIZE)
camera.start()
screen = pygame.surface.Surface((640,480), 0, display)
pygame.display.set_caption("Range Capture")
#Loop until the user clicks the close button.
done = False
# Used to manage how fast the screen updates
clock = pygame.time.Clock()
# Get ready to print
textPrint = TextPrint()
count = 0
# -------- Main Program Loop -----------
while done==False:
# EVENT PROCESSING STEP
for event in pygame.event.get(): # User did something
if event.type == pygame.QUIT: # If user clicked close
done=True # Flag that we are done so we exit this loop
# DRAWING STEP
# First, clear the screen to white. Don't put other drawing commands
# above this, or they will be erased with this command.
screen.fill(WHITE)
display.fill(WHITE)
textPrint.reset()
screen = camera.get_image(screen)
#cv2.putText(screen, "time:{}".format(datetime.now()), (0,0), cv2.FONT_HERSHEY_SIMPLEX,2,255)
display.blit(screen, (0,0))
# Get count of joysticks
#joystick_count = pygame.joystick.get_count()
textPrint.printy(display, "Time: {}".format(datetime.now()))
textPrint.printy(display, "Count:{}".format(count))
# ALL CODE TO DRAW SHOULD GO ABOVE THIS COMMENT
# Go ahead and update the screen with what we've drawn.
pygame.display.flip()
# Limit to 20 frames per second
clock.tick(20)
count += 1
# Close the window and quit.
# If you forget this line, the program will 'hang'
# on exit if running from IDLE.
pygame.quit ()
camera.stop() | en | 0.78643 | #!/usr/bin/env python # Define some colors # This is a simple class that will help us print to the screen # It has nothing to do with the joysticks, just outputting the # information. # Set the width and height of the screen [width,height] #size = [800, 480] #screen = pygame.display.set_mode(size) #Loop until the user clicks the close button. # Used to manage how fast the screen updates # Get ready to print # -------- Main Program Loop ----------- # EVENT PROCESSING STEP # User did something # If user clicked close # Flag that we are done so we exit this loop # DRAWING STEP # First, clear the screen to white. Don't put other drawing commands # above this, or they will be erased with this command. #cv2.putText(screen, "time:{}".format(datetime.now()), (0,0), cv2.FONT_HERSHEY_SIMPLEX,2,255) # Get count of joysticks #joystick_count = pygame.joystick.get_count() # ALL CODE TO DRAW SHOULD GO ABOVE THIS COMMENT # Go ahead and update the screen with what we've drawn. # Limit to 20 frames per second # Close the window and quit. # If you forget this line, the program will 'hang' # on exit if running from IDLE. | 3.340505 | 3 |
xblock/utils/image_processing.py | ImperialNLP/X-Block | 0 | 6619245 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# File : image_processing.py
# Author : <NAME> <<EMAIL>>
# Date : 01.11.2020
# Last Modified Date: 09.11.2021
# Last Modified By : <NAME> <<EMAIL>>
#
# Copyright (c) 2020, Imperial College, London
# All rights reserved.
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of Imperial College nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# image pre-processing codebase
import os
import time
import cv2
import numpy as np
import peakutils
from PIL import Image
def scale(img, xScale, yScale):
out = cv2.resize(img, None, fx=xScale, fy=yScale, interpolation=cv2.INTER_AREA)
return out
def crop(infile, height, width):
image = Image.open(infile)
imgwidth, imgheight = im.size
for i in range(imgheight // height):
for j in range(imgwidth // width):
box = (j * width, i * height, (j + 1) * width, (i + 1) * height)
yield im.crop(box)
def convert_frame_to_grayscale(frame):
grayframe = None
gray = None
if frame is not None:
cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = scale(gray, 1, 1)
grayframe = scale(gray, 1, 1)
gray = cv2.GaussianBlur(gray, (9, 9), 0.0)
return grayframe, gray
def keyframe_extractor(video, threshold=0.25):
"""
Video: video filepath
threshold: image difference threshold
"""
keyframesdir = '/tmp/keyframes/{}'.format(time.time())
if not os.path.exists(keyframesdir):
os.makedirs(keyframesdir)
source = cv2.VideoCapture(video)
length = int(source.get(cv2.CAP_PROP_FRAME_COUNT))
listframes = []
listdiffs = []
images = []
colored = []
lastframe = None
if source.isOpened():
for i in range(length):
ret, frame = source.read()
grayframe, blur_gray = convert_frame_to_grayscale(frame)
frame_number = source.get(cv2.CAP_PROP_POS_FRAMES) - 1
listframes.append(frame_number)
images.append(grayframe)
colored.append(frame)
if frame_number == 0:
lastframe = blur_gray
diff = cv2.subtract(blur_gray, lastframe)
difference = cv2.countNonZero(diff)
listdiffs.append(difference)
lastframe = blur_gray
source.release()
y = np.array(listdiffs)
base = peakutils.baseline(y, 2)
indices = peakutils.indexes(y - base, threshold, min_dist=1)
for k in indices:
cv2.imwrite(os.path.join('{}/keyframe_{}.jpg'.format(keyframesdir, k)), colored[k])
else:
print('error in the file')
cv2.destroyAllWindows()
return keyframesdir
if __name__ == '__main__':
import plac
plac.call(keyframe_extractor)
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# File : image_processing.py
# Author : <NAME> <<EMAIL>>
# Date : 01.11.2020
# Last Modified Date: 09.11.2021
# Last Modified By : <NAME> <<EMAIL>>
#
# Copyright (c) 2020, Imperial College, London
# All rights reserved.
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of Imperial College nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# image pre-processing codebase
import os
import time
import cv2
import numpy as np
import peakutils
from PIL import Image
def scale(img, xScale, yScale):
out = cv2.resize(img, None, fx=xScale, fy=yScale, interpolation=cv2.INTER_AREA)
return out
def crop(infile, height, width):
image = Image.open(infile)
imgwidth, imgheight = im.size
for i in range(imgheight // height):
for j in range(imgwidth // width):
box = (j * width, i * height, (j + 1) * width, (i + 1) * height)
yield im.crop(box)
def convert_frame_to_grayscale(frame):
grayframe = None
gray = None
if frame is not None:
cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = scale(gray, 1, 1)
grayframe = scale(gray, 1, 1)
gray = cv2.GaussianBlur(gray, (9, 9), 0.0)
return grayframe, gray
def keyframe_extractor(video, threshold=0.25):
"""
Video: video filepath
threshold: image difference threshold
"""
keyframesdir = '/tmp/keyframes/{}'.format(time.time())
if not os.path.exists(keyframesdir):
os.makedirs(keyframesdir)
source = cv2.VideoCapture(video)
length = int(source.get(cv2.CAP_PROP_FRAME_COUNT))
listframes = []
listdiffs = []
images = []
colored = []
lastframe = None
if source.isOpened():
for i in range(length):
ret, frame = source.read()
grayframe, blur_gray = convert_frame_to_grayscale(frame)
frame_number = source.get(cv2.CAP_PROP_POS_FRAMES) - 1
listframes.append(frame_number)
images.append(grayframe)
colored.append(frame)
if frame_number == 0:
lastframe = blur_gray
diff = cv2.subtract(blur_gray, lastframe)
difference = cv2.countNonZero(diff)
listdiffs.append(difference)
lastframe = blur_gray
source.release()
y = np.array(listdiffs)
base = peakutils.baseline(y, 2)
indices = peakutils.indexes(y - base, threshold, min_dist=1)
for k in indices:
cv2.imwrite(os.path.join('{}/keyframe_{}.jpg'.format(keyframesdir, k)), colored[k])
else:
print('error in the file')
cv2.destroyAllWindows()
return keyframesdir
if __name__ == '__main__':
import plac
plac.call(keyframe_extractor)
| en | 0.685486 | #!/usr/bin/env python3 # -*- coding: utf-8 -*- # File : image_processing.py # Author : <NAME> <<EMAIL>> # Date : 01.11.2020 # Last Modified Date: 09.11.2021 # Last Modified By : <NAME> <<EMAIL>> # # Copyright (c) 2020, Imperial College, London # All rights reserved. # Redistribution and use in source and binary forms, with or without modification, # are permitted provided that the following conditions are met: # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # 3. Neither the name of Imperial College nor the names of its contributors may # be used to endorse or promote products derived from this software without # specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR # TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # image pre-processing codebase Video: video filepath threshold: image difference threshold | 1.874882 | 2 |
setup.py | w0rp/tox-travis-example | 0 | 6619246 | <gh_stars>0
from __future__ import absolute_import, division, print_function, unicode_literals # isort:skip # noqa
import os
from setuptools import find_packages, setup
README = ''
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='tox-travis-example',
version='0.1',
install_requires=['six'],
packages=find_packages(),
include_package_data=True,
license='Public domain',
description='Just an example project',
long_description=README,
url='https://www.example.com/',
author='w0rp',
author_email='<EMAIL>',
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Framework :: Django :: 1.10',
'Framework :: Django :: 1.11',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
)
| from __future__ import absolute_import, division, print_function, unicode_literals # isort:skip # noqa
import os
from setuptools import find_packages, setup
README = ''
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='tox-travis-example',
version='0.1',
install_requires=['six'],
packages=find_packages(),
include_package_data=True,
license='Public domain',
description='Just an example project',
long_description=README,
url='https://www.example.com/',
author='w0rp',
author_email='<EMAIL>',
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Framework :: Django :: 1.10',
'Framework :: Django :: 1.11',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
) | en | 0.643589 | # isort:skip # noqa # allow setup.py to be run from any path | 1.348267 | 1 |
collect_numasvm.py | zuevmaxim/hogwildpp | 0 | 6619247 | <gh_stars>0
#!/usr/bin/env python2
import os, sys, math, time, subprocess, multiprocessing
from subprocess import check_call
dryrun = False
datasets = [
# "covtype",
# "webspam",
# "music",
"rcv1",
# "epsilon",
# "news20"
]
# settings used for grid size search
'''
nthreads = [10]
iterations = { "default" : 200, "news20" : 350, "epsilon" : 150}
maxstepsize = { "covtype" : 5e-03,
"webspam" : 2e-01,
"music" : 5e-08,
"rcv1" : 5e-01,
"epsilon" : 1e-01,
"news20" : 5e-01,
}
stepdecay = [1, 0.95, 0.9, 0.85, 0.8]
stepdecay_per_dataset = {}
step_search_range = 10
'''
nthreads = [1, 2, 4, 8, 16, 32, 48, 64]
cluster_size = [1, 2, 4, 8, 16]
maxstepsize = { "covtype" : 5e-03,
"webspam" : 2e-01,
"music" : 5e-08,
"rcv1" : 5e-01,
"epsilon" : 1e-01,
"news20" : 5e-01,
}
target_accuracy = { "covtype" : 0.76291,
"webspam" : 0.92700,
"rcv1" : 0.97713,
"epsilon" : 0.89740,
"news20" : 0.96425,
}
stepdecay = []
stepdecay_per_dataset = { "covtype" : [0.85],
"webspam" : [0.8],
"music" : [0.8],
"rcv1" : [0.8],
"epsilon" : [0.85],
"news20" : [0.8],
}
iterations = { "default" : 50, "epsilon" : 25}
step_search_range = 0
outputdir = "numasvm_" + time.strftime("%m%d-%H%M%S")
if len(sys.argv) > 1:
if sys.argv[1] == "-n":
dryrun = True
if sys.argv[1] == "-y":
dryrun = False
if not dryrun:
check_call("mkdir -p {}/".format(outputdir), shell=True)
def GenerateSteps(max_step_size):
return [max_step_size]
def GenerateUpdateDelay(nweights):
if nweights <= 4:
update_delay = 64
elif nweights <= 10:
update_delay = 16
else:
update_delay = 4
return update_delay
for d in datasets:
# Find a step size from table
steps = GenerateSteps(maxstepsize[d])
if d in iterations:
epochs = iterations[d]
else:
epochs = iterations["default"]
print "For dataset {} we will use {} epochs and step size:\n {}\n".format(d, epochs, steps)
for s in steps:
for n in nthreads[::-1]:
for c in cluster_size[::-1]:
nweights = n / c
if (n % c) != 0 or nweights < 2:
continue
effective_epochs = epochs * nweights
effective_epochs = min(1000, effective_epochs)
effective_epochs = max(150, effective_epochs)
u = GenerateUpdateDelay(nweights)
if d in stepdecay_per_dataset:
stepdecay_trials = stepdecay_per_dataset[d]
else:
stepdecay_trials = stepdecay
for b in stepdecay_trials:
effective_b = math.pow(b, (1.0/nweights))
result_name = os.path.join(outputdir, "{}_{}_{}_{}_{}.txt".format(d, n, c, s, b))
cmdline = "bin/numasvm --epoch {} --stepinitial {} --step_decay {} --update_delay {} --cluster_size {} --split {} --target_accuracy {} data/{}_train.tsv data/{}_test.tsv | tee {}".format(effective_epochs, s, effective_b, u, c, n, target_accuracy[d], d, d, result_name)
print "Executing HogWild++ with {} threads, c={}:\n{}\nResults at {}".format(n, c, cmdline, result_name)
if not dryrun:
subprocess.Popen(cmdline, shell=True).wait()
else:
print "*** This is a dry run. No results will be produced. ***"
print
| #!/usr/bin/env python2
import os, sys, math, time, subprocess, multiprocessing
from subprocess import check_call
dryrun = False
datasets = [
# "covtype",
# "webspam",
# "music",
"rcv1",
# "epsilon",
# "news20"
]
# settings used for grid size search
'''
nthreads = [10]
iterations = { "default" : 200, "news20" : 350, "epsilon" : 150}
maxstepsize = { "covtype" : 5e-03,
"webspam" : 2e-01,
"music" : 5e-08,
"rcv1" : 5e-01,
"epsilon" : 1e-01,
"news20" : 5e-01,
}
stepdecay = [1, 0.95, 0.9, 0.85, 0.8]
stepdecay_per_dataset = {}
step_search_range = 10
'''
nthreads = [1, 2, 4, 8, 16, 32, 48, 64]
cluster_size = [1, 2, 4, 8, 16]
maxstepsize = { "covtype" : 5e-03,
"webspam" : 2e-01,
"music" : 5e-08,
"rcv1" : 5e-01,
"epsilon" : 1e-01,
"news20" : 5e-01,
}
target_accuracy = { "covtype" : 0.76291,
"webspam" : 0.92700,
"rcv1" : 0.97713,
"epsilon" : 0.89740,
"news20" : 0.96425,
}
stepdecay = []
stepdecay_per_dataset = { "covtype" : [0.85],
"webspam" : [0.8],
"music" : [0.8],
"rcv1" : [0.8],
"epsilon" : [0.85],
"news20" : [0.8],
}
iterations = { "default" : 50, "epsilon" : 25}
step_search_range = 0
outputdir = "numasvm_" + time.strftime("%m%d-%H%M%S")
if len(sys.argv) > 1:
if sys.argv[1] == "-n":
dryrun = True
if sys.argv[1] == "-y":
dryrun = False
if not dryrun:
check_call("mkdir -p {}/".format(outputdir), shell=True)
def GenerateSteps(max_step_size):
return [max_step_size]
def GenerateUpdateDelay(nweights):
if nweights <= 4:
update_delay = 64
elif nweights <= 10:
update_delay = 16
else:
update_delay = 4
return update_delay
for d in datasets:
# Find a step size from table
steps = GenerateSteps(maxstepsize[d])
if d in iterations:
epochs = iterations[d]
else:
epochs = iterations["default"]
print "For dataset {} we will use {} epochs and step size:\n {}\n".format(d, epochs, steps)
for s in steps:
for n in nthreads[::-1]:
for c in cluster_size[::-1]:
nweights = n / c
if (n % c) != 0 or nweights < 2:
continue
effective_epochs = epochs * nweights
effective_epochs = min(1000, effective_epochs)
effective_epochs = max(150, effective_epochs)
u = GenerateUpdateDelay(nweights)
if d in stepdecay_per_dataset:
stepdecay_trials = stepdecay_per_dataset[d]
else:
stepdecay_trials = stepdecay
for b in stepdecay_trials:
effective_b = math.pow(b, (1.0/nweights))
result_name = os.path.join(outputdir, "{}_{}_{}_{}_{}.txt".format(d, n, c, s, b))
cmdline = "bin/numasvm --epoch {} --stepinitial {} --step_decay {} --update_delay {} --cluster_size {} --split {} --target_accuracy {} data/{}_train.tsv data/{}_test.tsv | tee {}".format(effective_epochs, s, effective_b, u, c, n, target_accuracy[d], d, d, result_name)
print "Executing HogWild++ with {} threads, c={}:\n{}\nResults at {}".format(n, c, cmdline, result_name)
if not dryrun:
subprocess.Popen(cmdline, shell=True).wait()
else:
print "*** This is a dry run. No results will be produced. ***"
print | en | 0.445405 | #!/usr/bin/env python2 # "covtype", # "webspam", # "music", # "epsilon", # "news20" # settings used for grid size search nthreads = [10] iterations = { "default" : 200, "news20" : 350, "epsilon" : 150} maxstepsize = { "covtype" : 5e-03, "webspam" : 2e-01, "music" : 5e-08, "rcv1" : 5e-01, "epsilon" : 1e-01, "news20" : 5e-01, } stepdecay = [1, 0.95, 0.9, 0.85, 0.8] stepdecay_per_dataset = {} step_search_range = 10 # Find a step size from table | 1.921803 | 2 |
tests/python_to_cpp/test_deep_imports.py | 11l-lang/_11l_to_cpp | 9 | 6619248 | import syntax_highlighter_for_pqmarkup
print() | import syntax_highlighter_for_pqmarkup
print() | none | 1 | 1.120566 | 1 | |
scripts/nao_gesture_action_server_node.py | jdddog/nao_hri | 2 | 6619249 | <filename>scripts/nao_gesture_action_server_node.py
#!/usr/bin/env python
# Copyright (c) 2014, <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import rospy
import actionlib
from hri_framework import IGestureActionServer, GestureHandle
from nao_hri import NaoNode, Gesture
from hri_msgs.msg import TargetAction, TargetGoal
from threading import Timer, RLock
from nao_hri import AnimationType
from threading import Thread
class NaoGestureHandle(GestureHandle):
def __init__(self, goal_handle, gesture, motion_id=None, client=None):
GestureHandle.__init__(self, goal_handle, gesture)
self.motion_id = motion_id
self.client = client
class NaoGestureActionServer(IGestureActionServer, NaoNode):
def __init__(self):
IGestureActionServer.__init__(self, Gesture)
self.motion_proxy = None
self.lock = RLock()
self.larm_client = actionlib.SimpleActionClient('nao_point_left', TargetAction)
self.larm_gh = None
self.rarm_client = actionlib.SimpleActionClient('nao_point_right', TargetAction)
self.rarm_gh = None
def start(self):
module_name = self.get_instance_name(globals())
NaoNode.__init__(self, module_name)
self.motion_proxy = self.get_proxy('ALMotion')
super(NaoGestureActionServer, self).start()
@staticmethod
def get_actual_duration(times):
maxTime = 0.0
for time in times:
tempMax = max(time)
if tempMax > maxTime:
maxTime = tempMax
return maxTime
def start_gesture(self, goal_handle):
with self.lock:
goal = goal_handle.get_goal()
if self.is_valid_gesture(goal.gesture):
gesture = Gesture[goal.gesture]
if goal.duration == -1:
duration = gesture.default_duration
else:
duration = goal.duration
if gesture.animation_type is AnimationType.Keyframe:
animations = gesture.keyframe_animations()
names = []
times = []
keys = []
durations = []
for a in animations:
durations.append(a.get_end_time())
(n_temp, t_temp, k_temp) = a.get_ntk(duration)
names += n_temp
times += t_temp
keys += k_temp
actual_duration = NaoGestureActionServer.get_actual_duration(times)
motion_id = self.motion_proxy.post.angleInterpolationBezier(names, times, keys)
gesture_handle = NaoGestureHandle(goal_handle, gesture, motion_id=motion_id)
self.add_gesture_handle(gesture_handle)
gesture_handle.start_timer(actual_duration, self.set_succeeded, [goal_handle])
else:
target_goal = TargetGoal()
target_goal.target = goal.target
target_goal.speed = 0.5
target_goal.acceleration = 0.3
if gesture is Gesture.PointLarm:
if self.larm_gh is None:
self.larm_gh = goal_handle
client = self.larm_client
done_cb = self.larm_succeeded
else:
self.set_aborted(goal_handle)
rospy.logwarn('Left arm is already busy performing a gesture, please cancel it first')
return
elif gesture is Gesture.PointRarm:
if self.rarm_gh is None:
self.rarm_gh = goal_handle
client = self.rarm_client
done_cb = self.rarm_succeeded
else:
self.set_aborted(goal_handle)
rospy.logwarn('Right arm is already busy performing a gesture, please cancel it first')
return
gesture_handle = NaoGestureHandle(goal_handle, gesture, client=client)
self.add_gesture_handle(gesture_handle)
if goal.duration == -1:
client.send_goal(target_goal, done_cb=done_cb)
else:
client.send_goal(target_goal)
gesture_handle.start_timer(duration, self.set_succeeded, [goal_handle])
else:
self.set_aborted(goal_handle)
def larm_succeeded(self):
with self.lock:
self.set_succeeded(self.larm_gh)
self.larm_gh = None
def rarm_succeeded(self):
with self.lock:
self.set_succeeded(self.rarm_gh)
self.rarm_gh = None
def larm_cancelled(self):
with self.lock:
self.cancel_gesture(self.larm_gh)
self.larm_gh = None
def rarm_cancelled(self):
with self.lock:
self.cancel_gesture(self.rarm_gh)
self.rarm_gh = None
def cancel_gesture(self, goal_handle):
with self.lock:
gesture_handle = self.get_gesture_handle(goal_handle)
gesture_handle.stop_timer()
if gesture_handle.gesture.animation_type is AnimationType.Keyframe:
self.motion_proxy.stop(gesture_handle.motion_id)
else:
gesture_handle.client.cancel_goal()
if __name__ == "__main__":
rospy.init_node('gesture_action_server')
gesture_server = NaoGestureActionServer()
gesture_server.start()
rospy.spin()
| <filename>scripts/nao_gesture_action_server_node.py
#!/usr/bin/env python
# Copyright (c) 2014, <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import rospy
import actionlib
from hri_framework import IGestureActionServer, GestureHandle
from nao_hri import NaoNode, Gesture
from hri_msgs.msg import TargetAction, TargetGoal
from threading import Timer, RLock
from nao_hri import AnimationType
from threading import Thread
class NaoGestureHandle(GestureHandle):
def __init__(self, goal_handle, gesture, motion_id=None, client=None):
GestureHandle.__init__(self, goal_handle, gesture)
self.motion_id = motion_id
self.client = client
class NaoGestureActionServer(IGestureActionServer, NaoNode):
def __init__(self):
IGestureActionServer.__init__(self, Gesture)
self.motion_proxy = None
self.lock = RLock()
self.larm_client = actionlib.SimpleActionClient('nao_point_left', TargetAction)
self.larm_gh = None
self.rarm_client = actionlib.SimpleActionClient('nao_point_right', TargetAction)
self.rarm_gh = None
def start(self):
module_name = self.get_instance_name(globals())
NaoNode.__init__(self, module_name)
self.motion_proxy = self.get_proxy('ALMotion')
super(NaoGestureActionServer, self).start()
@staticmethod
def get_actual_duration(times):
maxTime = 0.0
for time in times:
tempMax = max(time)
if tempMax > maxTime:
maxTime = tempMax
return maxTime
def start_gesture(self, goal_handle):
with self.lock:
goal = goal_handle.get_goal()
if self.is_valid_gesture(goal.gesture):
gesture = Gesture[goal.gesture]
if goal.duration == -1:
duration = gesture.default_duration
else:
duration = goal.duration
if gesture.animation_type is AnimationType.Keyframe:
animations = gesture.keyframe_animations()
names = []
times = []
keys = []
durations = []
for a in animations:
durations.append(a.get_end_time())
(n_temp, t_temp, k_temp) = a.get_ntk(duration)
names += n_temp
times += t_temp
keys += k_temp
actual_duration = NaoGestureActionServer.get_actual_duration(times)
motion_id = self.motion_proxy.post.angleInterpolationBezier(names, times, keys)
gesture_handle = NaoGestureHandle(goal_handle, gesture, motion_id=motion_id)
self.add_gesture_handle(gesture_handle)
gesture_handle.start_timer(actual_duration, self.set_succeeded, [goal_handle])
else:
target_goal = TargetGoal()
target_goal.target = goal.target
target_goal.speed = 0.5
target_goal.acceleration = 0.3
if gesture is Gesture.PointLarm:
if self.larm_gh is None:
self.larm_gh = goal_handle
client = self.larm_client
done_cb = self.larm_succeeded
else:
self.set_aborted(goal_handle)
rospy.logwarn('Left arm is already busy performing a gesture, please cancel it first')
return
elif gesture is Gesture.PointRarm:
if self.rarm_gh is None:
self.rarm_gh = goal_handle
client = self.rarm_client
done_cb = self.rarm_succeeded
else:
self.set_aborted(goal_handle)
rospy.logwarn('Right arm is already busy performing a gesture, please cancel it first')
return
gesture_handle = NaoGestureHandle(goal_handle, gesture, client=client)
self.add_gesture_handle(gesture_handle)
if goal.duration == -1:
client.send_goal(target_goal, done_cb=done_cb)
else:
client.send_goal(target_goal)
gesture_handle.start_timer(duration, self.set_succeeded, [goal_handle])
else:
self.set_aborted(goal_handle)
def larm_succeeded(self):
with self.lock:
self.set_succeeded(self.larm_gh)
self.larm_gh = None
def rarm_succeeded(self):
with self.lock:
self.set_succeeded(self.rarm_gh)
self.rarm_gh = None
def larm_cancelled(self):
with self.lock:
self.cancel_gesture(self.larm_gh)
self.larm_gh = None
def rarm_cancelled(self):
with self.lock:
self.cancel_gesture(self.rarm_gh)
self.rarm_gh = None
def cancel_gesture(self, goal_handle):
with self.lock:
gesture_handle = self.get_gesture_handle(goal_handle)
gesture_handle.stop_timer()
if gesture_handle.gesture.animation_type is AnimationType.Keyframe:
self.motion_proxy.stop(gesture_handle.motion_id)
else:
gesture_handle.client.cancel_goal()
if __name__ == "__main__":
rospy.init_node('gesture_action_server')
gesture_server = NaoGestureActionServer()
gesture_server.start()
rospy.spin()
| en | 0.714801 | #!/usr/bin/env python # Copyright (c) 2014, <NAME> # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # * Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 1.551161 | 2 |
util/divider/plottypes/1dscalar.py | JGU-VC/activation-pattern-analysis | 0 | 6619250 | <reponame>JGU-VC/activation-pattern-analysis<filename>util/divider/plottypes/1dscalar.py
import re
import json
from subprocess import Popen, PIPE
import numpy as np
from util.names import Jaccard2last_mean_over_time, train_H_over_time
from util.extract import get_data, get_expname, compile_filename
def register(parser):
parser.add_argument('files', type=str, nargs='+', help='number of files')
parser.add_argument('scalarname', type=str, help='plotname')
def moving_average(a, n=3) :
ret = np.cumsum(a, dtype=float)
ret[n:] = ret[n:] - ret[:-n]
return ret[n - 1:] / n
def plot(plt, args):
files = filter
print(args.files)
plotname = args.scalarname
if plotname.endswith(".json") or plotname.endswith(".bin"):
raise ValueError("No plotname specified.")
for full_filename in args.files:
expdir = "/".join(full_filename.split("/")[:-1])
filename = full_filename.split("/")[-1]
expname = filename[:-5]
jq = lambda cmd: Popen("jq '%s' %s " % (cmd,full_filename), shell=True, stdout=PIPE, stderr=PIPE).communicate()[0].decode('utf-8')
jq_json = lambda cmd: json.loads(jq(cmd))
jq_array = lambda cmd: np.array(jq_json(cmd))
keys = jq_json('.jsons | keys')
mode_data = re.compile(".*scalar2d-\[(\w+\|\w+)\].*").match(",".join(keys))[1]
x_type = "%i"
if "flr" in filename and "mcmc" in filename:
name_re = compile_filename("flr-mcmcstats-{word}-{value}-{value}_{value}")
def name_match_fn(d,m):
d["net"], d["perlayer"], d["initlr"], d["seed"] = m[1], m[2], m[3], m[4]
data = get_data(args.files, name_re, name_match_fn, expname=expdir+"/"+expname, exclude_unfinished=True, cache=True)
else:
# print(keys)
test_acc = float(jq('.jsons["scalar-test_acc_1"].content.data[-1].y[-1]'))
print(filename, test_acc)
if plotname == "meanji":
data = jq_json('.jsons["scalar2d-['+mode_data+'][sinceLast] JI(last,current)"].content.data[0]')
x = data["x"]
x = np.array(x, dtype=np.int)
y = np.mean(data["z"],0)
elif plotname == "dLdJI":
any_d = next(iter(data.values()))
any_len = len(any_d["scalar-loss"]["y"])
losses = np.stack([d["scalar-loss"]["y"] for d in data.values() if len(d["scalar-loss"]["y"]) == any_len])
jis = [Jaccard2last_mean_over_time(d) for d in data.values()]
min_len = np.min([ji.shape[0] for ji in jis])
jis = [ji[:min_len] for ji in jis]
jis = np.stack(jis)
ji_x = np.array(any_d["scalar2d-[tm|trd][sinceLast] JI(last,current)"]["x"],dtype=np.int)
ji_y = np.mean(np.array(any_d["scalar2d-[tm|trd][sinceLast] JI(last,current)"]["z"]),0)
ji_x = ji_x[:min_len]
ji_y = ji_y[:min_len]
# ji_y = np.stack([d["scalar-learning rate"]["y"] for d in data.values() if len(d["scalar-loss"]["y"]) == any_len])
# jis = np.array([ji_y])
# ji_x = any_d["scalar-learning rate"]["x"]
loss_x = np.array(any_d["scalar-loss"]["x"],dtype=np.int)
losses_var = losses.var(0)
jis_mean = jis.mean(0)
jis_mean = np.interp(loss_x, ji_x, jis_mean)
x, y = loss_x[1:], losses_var[1:]/jis_mean[1:]**2
# x = np.linspace(0.2*len(y),len(y),len(y))/len(y)
# x = ji_y
x = np.interp(loss_x, ji_x, ji_y)
x = x[1:]
x_type = "%.8f"
y = moving_average(y, n=25)
x = moving_average(x, n=25)
elif plotname == "dEdJI":
any_d = next(iter(data.values()))
any_len = len(any_d["scalar-loss"]["y"])
losses = np.stack([d["scalar-loss"]["y"] for d in data.values() if len(d["scalar-loss"]["y"]) == any_len])
jis = [train_H_over_time(d) for d in data.values()]
min_len = np.min([ji.shape[0] for ji in jis])
jis = [ji[:min_len] for ji in jis]
jis = np.stack(jis)
ji_x = np.array(any_d["scalar2d-[tm|trd] % max Entropy"]["x"],dtype=np.int)
ji_y = np.mean(np.array(any_d["scalar2d-[tm|trd] % max Entropy"]["z"]),0)
ji_x = ji_x[:min_len]
ji_y = ji_y[:min_len]
# ji_y = np.stack([d["scalar-learning rate"]["y"] for d in data.values() if len(d["scalar-loss"]["y"]) == any_len])
# jis = np.array([ji_y])
# ji_x = any_d["scalar-learning rate"]["x"]
loss_x = np.array(any_d["scalar-loss"]["x"],dtype=np.int)
losses_var = losses.var(0)
jis_mean = jis.mean(0)
jis_mean = np.interp(loss_x, ji_x, jis_mean)
x, y = loss_x[1:], losses_var[1:]/jis_mean[1:]**2
# x = np.linspace(0.2*len(y),len(y),len(y))/len(y)
# x = ji_y
x = np.interp(loss_x, ji_x, ji_y)
x = x[1:]
x_type = "%.8f"
y = moving_average(y, n=25)
x = moving_average(x, n=25)
else:
data = jq_json(".jsons[\"scalar-"+plotname+"\"].content.data[0]")
x = data["x"]
y = data["y"]
plt.plot(x,y)
# np.savetext("paper/fantasticlr/data/%s.csv" % plotname)
# np.savetxt("paper/fantasticlr-cifar10/data/%s-%s.csv" % (expname,plotname), np.array([x,y]).T, header="x y", fmt=" ".join([x_type,'%.8f']), comments="")
y2 = moving_average(y, n=25)
x2 = moving_average(x, n=25)
# np.savetxt("paper/fantasticlr-cifar10/data/%s-%s-smooth.csv" % (expname,plotname), np.array([x2,y2]).T, header="x y", fmt=" ".join([x_type,'%.8f']), comments="")
if plotname == "dLdJI" or plotname == "dEdJI":
break
fontsize = 2
plt.tight_layout()
# plt.legend()
plt.title(plotname)
# np.savetxt("/tmp/scalar1d-%s.txt" % (plotname), [x,y])
# plt.savefig("paper/fantasticlr/img/scalar1d-%s.pdf" % (plotname))
plt.show()
# plt.savefig("paper/fantasticlr/img/scalar1d-%s.pdf" % (plotname))
# save as csv
# np.savetxt("paper/measures/data/%s-%s.csv" % (filename,plotname), data, header="x y z", fmt=" ".join(['%s','%s','%.8f']))
| import re
import json
from subprocess import Popen, PIPE
import numpy as np
from util.names import Jaccard2last_mean_over_time, train_H_over_time
from util.extract import get_data, get_expname, compile_filename
def register(parser):
parser.add_argument('files', type=str, nargs='+', help='number of files')
parser.add_argument('scalarname', type=str, help='plotname')
def moving_average(a, n=3) :
ret = np.cumsum(a, dtype=float)
ret[n:] = ret[n:] - ret[:-n]
return ret[n - 1:] / n
def plot(plt, args):
files = filter
print(args.files)
plotname = args.scalarname
if plotname.endswith(".json") or plotname.endswith(".bin"):
raise ValueError("No plotname specified.")
for full_filename in args.files:
expdir = "/".join(full_filename.split("/")[:-1])
filename = full_filename.split("/")[-1]
expname = filename[:-5]
jq = lambda cmd: Popen("jq '%s' %s " % (cmd,full_filename), shell=True, stdout=PIPE, stderr=PIPE).communicate()[0].decode('utf-8')
jq_json = lambda cmd: json.loads(jq(cmd))
jq_array = lambda cmd: np.array(jq_json(cmd))
keys = jq_json('.jsons | keys')
mode_data = re.compile(".*scalar2d-\[(\w+\|\w+)\].*").match(",".join(keys))[1]
x_type = "%i"
if "flr" in filename and "mcmc" in filename:
name_re = compile_filename("flr-mcmcstats-{word}-{value}-{value}_{value}")
def name_match_fn(d,m):
d["net"], d["perlayer"], d["initlr"], d["seed"] = m[1], m[2], m[3], m[4]
data = get_data(args.files, name_re, name_match_fn, expname=expdir+"/"+expname, exclude_unfinished=True, cache=True)
else:
# print(keys)
test_acc = float(jq('.jsons["scalar-test_acc_1"].content.data[-1].y[-1]'))
print(filename, test_acc)
if plotname == "meanji":
data = jq_json('.jsons["scalar2d-['+mode_data+'][sinceLast] JI(last,current)"].content.data[0]')
x = data["x"]
x = np.array(x, dtype=np.int)
y = np.mean(data["z"],0)
elif plotname == "dLdJI":
any_d = next(iter(data.values()))
any_len = len(any_d["scalar-loss"]["y"])
losses = np.stack([d["scalar-loss"]["y"] for d in data.values() if len(d["scalar-loss"]["y"]) == any_len])
jis = [Jaccard2last_mean_over_time(d) for d in data.values()]
min_len = np.min([ji.shape[0] for ji in jis])
jis = [ji[:min_len] for ji in jis]
jis = np.stack(jis)
ji_x = np.array(any_d["scalar2d-[tm|trd][sinceLast] JI(last,current)"]["x"],dtype=np.int)
ji_y = np.mean(np.array(any_d["scalar2d-[tm|trd][sinceLast] JI(last,current)"]["z"]),0)
ji_x = ji_x[:min_len]
ji_y = ji_y[:min_len]
# ji_y = np.stack([d["scalar-learning rate"]["y"] for d in data.values() if len(d["scalar-loss"]["y"]) == any_len])
# jis = np.array([ji_y])
# ji_x = any_d["scalar-learning rate"]["x"]
loss_x = np.array(any_d["scalar-loss"]["x"],dtype=np.int)
losses_var = losses.var(0)
jis_mean = jis.mean(0)
jis_mean = np.interp(loss_x, ji_x, jis_mean)
x, y = loss_x[1:], losses_var[1:]/jis_mean[1:]**2
# x = np.linspace(0.2*len(y),len(y),len(y))/len(y)
# x = ji_y
x = np.interp(loss_x, ji_x, ji_y)
x = x[1:]
x_type = "%.8f"
y = moving_average(y, n=25)
x = moving_average(x, n=25)
elif plotname == "dEdJI":
any_d = next(iter(data.values()))
any_len = len(any_d["scalar-loss"]["y"])
losses = np.stack([d["scalar-loss"]["y"] for d in data.values() if len(d["scalar-loss"]["y"]) == any_len])
jis = [train_H_over_time(d) for d in data.values()]
min_len = np.min([ji.shape[0] for ji in jis])
jis = [ji[:min_len] for ji in jis]
jis = np.stack(jis)
ji_x = np.array(any_d["scalar2d-[tm|trd] % max Entropy"]["x"],dtype=np.int)
ji_y = np.mean(np.array(any_d["scalar2d-[tm|trd] % max Entropy"]["z"]),0)
ji_x = ji_x[:min_len]
ji_y = ji_y[:min_len]
# ji_y = np.stack([d["scalar-learning rate"]["y"] for d in data.values() if len(d["scalar-loss"]["y"]) == any_len])
# jis = np.array([ji_y])
# ji_x = any_d["scalar-learning rate"]["x"]
loss_x = np.array(any_d["scalar-loss"]["x"],dtype=np.int)
losses_var = losses.var(0)
jis_mean = jis.mean(0)
jis_mean = np.interp(loss_x, ji_x, jis_mean)
x, y = loss_x[1:], losses_var[1:]/jis_mean[1:]**2
# x = np.linspace(0.2*len(y),len(y),len(y))/len(y)
# x = ji_y
x = np.interp(loss_x, ji_x, ji_y)
x = x[1:]
x_type = "%.8f"
y = moving_average(y, n=25)
x = moving_average(x, n=25)
else:
data = jq_json(".jsons[\"scalar-"+plotname+"\"].content.data[0]")
x = data["x"]
y = data["y"]
plt.plot(x,y)
# np.savetext("paper/fantasticlr/data/%s.csv" % plotname)
# np.savetxt("paper/fantasticlr-cifar10/data/%s-%s.csv" % (expname,plotname), np.array([x,y]).T, header="x y", fmt=" ".join([x_type,'%.8f']), comments="")
y2 = moving_average(y, n=25)
x2 = moving_average(x, n=25)
# np.savetxt("paper/fantasticlr-cifar10/data/%s-%s-smooth.csv" % (expname,plotname), np.array([x2,y2]).T, header="x y", fmt=" ".join([x_type,'%.8f']), comments="")
if plotname == "dLdJI" or plotname == "dEdJI":
break
fontsize = 2
plt.tight_layout()
# plt.legend()
plt.title(plotname)
# np.savetxt("/tmp/scalar1d-%s.txt" % (plotname), [x,y])
# plt.savefig("paper/fantasticlr/img/scalar1d-%s.pdf" % (plotname))
plt.show()
# plt.savefig("paper/fantasticlr/img/scalar1d-%s.pdf" % (plotname))
# save as csv
# np.savetxt("paper/measures/data/%s-%s.csv" % (filename,plotname), data, header="x y z", fmt=" ".join(['%s','%s','%.8f'])) | en | 0.352802 | # print(keys) # ji_y = np.stack([d["scalar-learning rate"]["y"] for d in data.values() if len(d["scalar-loss"]["y"]) == any_len]) # jis = np.array([ji_y]) # ji_x = any_d["scalar-learning rate"]["x"] # x = np.linspace(0.2*len(y),len(y),len(y))/len(y) # x = ji_y # ji_y = np.stack([d["scalar-learning rate"]["y"] for d in data.values() if len(d["scalar-loss"]["y"]) == any_len]) # jis = np.array([ji_y]) # ji_x = any_d["scalar-learning rate"]["x"] # x = np.linspace(0.2*len(y),len(y),len(y))/len(y) # x = ji_y # np.savetext("paper/fantasticlr/data/%s.csv" % plotname) # np.savetxt("paper/fantasticlr-cifar10/data/%s-%s.csv" % (expname,plotname), np.array([x,y]).T, header="x y", fmt=" ".join([x_type,'%.8f']), comments="") # np.savetxt("paper/fantasticlr-cifar10/data/%s-%s-smooth.csv" % (expname,plotname), np.array([x2,y2]).T, header="x y", fmt=" ".join([x_type,'%.8f']), comments="") # plt.legend() # np.savetxt("/tmp/scalar1d-%s.txt" % (plotname), [x,y]) # plt.savefig("paper/fantasticlr/img/scalar1d-%s.pdf" % (plotname)) # plt.savefig("paper/fantasticlr/img/scalar1d-%s.pdf" % (plotname)) # save as csv # np.savetxt("paper/measures/data/%s-%s.csv" % (filename,plotname), data, header="x y z", fmt=" ".join(['%s','%s','%.8f'])) | 2.352372 | 2 |